Merge branch 'develop' into adaface
|
@ -7,6 +7,12 @@
|
|||
飞桨图像识别套件PaddleClas是飞桨为工业界和学术界所准备的一个图像识别任务的工具集,助力使用者训练出更好的视觉模型和应用落地。
|
||||
|
||||
**近期更新**
|
||||
- 🔥️ 2022.5.26 [飞桨产业实践范例直播课](http://aglc.cn/v-c4FAR),解读**超轻量重点区域人员出入管理方案**,欢迎报名来交流。
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/80816848/170166458-767a01ca-1429-437f-a628-dd184732ef53.png" width = "150" />
|
||||
</div>
|
||||
- 2022.5.23 新增[人员出入管理范例库](https://aistudio.baidu.com/aistudio/projectdetail/4094475),具体内容可以在 AI Stuio 上体验。
|
||||
- 2022.5.20 上线[PP-HGNet](./docs/zh_CN/models/PP-HGNet.md), [PP-LCNet v2](./docs/zh_CN/models/PP-LCNetV2.md)
|
||||
- 2022.4.21 新增 CVPR2022 oral论文 [MixFormer](https://arxiv.org/pdf/2204.02557.pdf) 相关[代码](https://github.com/PaddlePaddle/PaddleClas/pull/1820/files)。
|
||||
- 2022.1.27 全面升级文档;新增[PaddleServing C++ pipeline部署方式](./deploy/paddleserving)和[18M图像识别安卓部署Demo](./deploy/lite_shitu)。
|
||||
- 2021.11.1 发布[PP-ShiTu技术报告](https://arxiv.org/pdf/2111.00775.pdf),新增饮料识别demo
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
Global:
|
||||
infer_imgs: "./images/PULC/person/objects365_02035329.jpg"
|
||||
inference_model_dir: "./models/person_cls_infer"
|
||||
batch_size: 1
|
||||
use_gpu: True
|
||||
enable_mkldnn: False
|
||||
cpu_num_threads: 10
|
||||
enable_benchmark: True
|
||||
use_fp16: False
|
||||
ir_optim: True
|
||||
use_tensorrt: False
|
||||
gpu_mem: 8000
|
||||
enable_profile: False
|
||||
|
||||
PreProcess:
|
||||
transform_ops:
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 0.00392157
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
channel_num: 3
|
||||
- ToCHWImage:
|
||||
|
||||
PostProcess:
|
||||
main_indicator: ThreshOutput
|
||||
ThreshOutput:
|
||||
threshold: 0.9
|
||||
label_0: nobody
|
||||
label_1: someone
|
||||
SavePreLabel:
|
||||
save_dir: ./pre_label/
|
|
@ -8,7 +8,7 @@ Global:
|
|||
image_shape: [3, 640, 640]
|
||||
threshold: 0.2
|
||||
max_det_results: 5
|
||||
labe_list:
|
||||
label_list:
|
||||
- foreground
|
||||
|
||||
use_gpu: True
|
||||
|
|
|
@ -5,7 +5,7 @@ Global:
|
|||
image_shape: [3, 640, 640]
|
||||
threshold: 0.2
|
||||
max_det_results: 1
|
||||
labe_list:
|
||||
label_list:
|
||||
- foreground
|
||||
|
||||
# inference engine config
|
||||
|
|
|
@ -8,7 +8,7 @@ Global:
|
|||
image_shape: [3, 640, 640]
|
||||
threshold: 0.2
|
||||
max_det_results: 5
|
||||
labe_list:
|
||||
label_list:
|
||||
- foreground
|
||||
|
||||
use_gpu: True
|
||||
|
|
|
@ -8,7 +8,7 @@ Global:
|
|||
image_shape: [3, 640, 640]
|
||||
threshold: 0.2
|
||||
max_det_results: 5
|
||||
labe_list:
|
||||
label_list:
|
||||
- foreground
|
||||
|
||||
use_gpu: True
|
||||
|
|
|
@ -8,7 +8,7 @@ Global:
|
|||
image_shape: [3, 640, 640]
|
||||
threshold: 0.2
|
||||
max_det_results: 5
|
||||
labe_list:
|
||||
label_list:
|
||||
- foreground
|
||||
|
||||
use_gpu: True
|
||||
|
|
|
@ -8,7 +8,7 @@ Global:
|
|||
image_shape: [3, 640, 640]
|
||||
threshold: 0.2
|
||||
max_det_results: 5
|
||||
labe_list:
|
||||
label_list:
|
||||
- foreground
|
||||
|
||||
use_gpu: True
|
||||
|
|
|
@ -8,7 +8,7 @@ Global:
|
|||
image_shape: [3, 640, 640]
|
||||
threshold: 0.2
|
||||
max_det_results: 5
|
||||
labe_list:
|
||||
label_list:
|
||||
- foreground
|
||||
|
||||
# inference engine config
|
||||
|
|
|
@ -8,7 +8,7 @@ Global:
|
|||
image_shape: [3, 640, 640]
|
||||
threshold: 0.2
|
||||
max_det_results: 5
|
||||
labe_list:
|
||||
label_list:
|
||||
- foreground
|
||||
|
||||
use_gpu: True
|
||||
|
|
|
@ -33,106 +33,106 @@ using namespace paddle_infer;
|
|||
|
||||
namespace Detection {
|
||||
// Object Detection Result
|
||||
struct ObjectResult {
|
||||
// Rectangle coordinates of detected object: left, right, top, down
|
||||
std::vector<int> rect;
|
||||
// Class id of detected object
|
||||
int class_id;
|
||||
// Confidence of detected object
|
||||
float confidence;
|
||||
};
|
||||
struct ObjectResult {
|
||||
// Rectangle coordinates of detected object: left, right, top, down
|
||||
std::vector<int> rect;
|
||||
// Class id of detected object
|
||||
int class_id;
|
||||
// Confidence of detected object
|
||||
float confidence;
|
||||
};
|
||||
|
||||
// Generate visualization colormap for each class
|
||||
std::vector<int> GenerateColorMap(int num_class);
|
||||
std::vector<int> GenerateColorMap(int num_class);
|
||||
|
||||
// Visualiztion Detection Result
|
||||
cv::Mat VisualizeResult(const cv::Mat &img,
|
||||
const std::vector <ObjectResult> &results,
|
||||
const std::vector <std::string> &lables,
|
||||
const std::vector<int> &colormap, const bool is_rbox);
|
||||
cv::Mat VisualizeResult(const cv::Mat &img,
|
||||
const std::vector<ObjectResult> &results,
|
||||
const std::vector<std::string> &lables,
|
||||
const std::vector<int> &colormap, const bool is_rbox);
|
||||
|
||||
class ObjectDetector {
|
||||
public:
|
||||
explicit ObjectDetector(const YAML::Node &config_file) {
|
||||
this->use_gpu_ = config_file["Global"]["use_gpu"].as<bool>();
|
||||
if (config_file["Global"]["gpu_id"].IsDefined())
|
||||
this->gpu_id_ = config_file["Global"]["gpu_id"].as<int>();
|
||||
this->gpu_mem_ = config_file["Global"]["gpu_mem"].as<int>();
|
||||
this->cpu_math_library_num_threads_ =
|
||||
config_file["Global"]["cpu_num_threads"].as<int>();
|
||||
this->use_mkldnn_ = config_file["Global"]["enable_mkldnn"].as<bool>();
|
||||
this->use_tensorrt_ = config_file["Global"]["use_tensorrt"].as<bool>();
|
||||
this->use_fp16_ = config_file["Global"]["use_fp16"].as<bool>();
|
||||
this->model_dir_ =
|
||||
config_file["Global"]["det_inference_model_dir"].as<std::string>();
|
||||
this->threshold_ = config_file["Global"]["threshold"].as<float>();
|
||||
this->max_det_results_ = config_file["Global"]["max_det_results"].as<int>();
|
||||
this->image_shape_ =
|
||||
config_file["Global"]["image_shape"].as < std::vector < int >> ();
|
||||
this->label_list_ =
|
||||
config_file["Global"]["labe_list"].as < std::vector < std::string >> ();
|
||||
this->ir_optim_ = config_file["Global"]["ir_optim"].as<bool>();
|
||||
this->batch_size_ = config_file["Global"]["batch_size"].as<int>();
|
||||
class ObjectDetector {
|
||||
public:
|
||||
explicit ObjectDetector(const YAML::Node &config_file) {
|
||||
this->use_gpu_ = config_file["Global"]["use_gpu"].as<bool>();
|
||||
if (config_file["Global"]["gpu_id"].IsDefined())
|
||||
this->gpu_id_ = config_file["Global"]["gpu_id"].as<int>();
|
||||
this->gpu_mem_ = config_file["Global"]["gpu_mem"].as<int>();
|
||||
this->cpu_math_library_num_threads_ =
|
||||
config_file["Global"]["cpu_num_threads"].as<int>();
|
||||
this->use_mkldnn_ = config_file["Global"]["enable_mkldnn"].as<bool>();
|
||||
this->use_tensorrt_ = config_file["Global"]["use_tensorrt"].as<bool>();
|
||||
this->use_fp16_ = config_file["Global"]["use_fp16"].as<bool>();
|
||||
this->model_dir_ =
|
||||
config_file["Global"]["det_inference_model_dir"].as<std::string>();
|
||||
this->threshold_ = config_file["Global"]["threshold"].as<float>();
|
||||
this->max_det_results_ = config_file["Global"]["max_det_results"].as<int>();
|
||||
this->image_shape_ =
|
||||
config_file["Global"]["image_shape"].as<std::vector<int>>();
|
||||
this->label_list_ =
|
||||
config_file["Global"]["label_list"].as<std::vector<std::string>>();
|
||||
this->ir_optim_ = config_file["Global"]["ir_optim"].as<bool>();
|
||||
this->batch_size_ = config_file["Global"]["batch_size"].as<int>();
|
||||
|
||||
preprocessor_.Init(config_file["DetPreProcess"]["transform_ops"]);
|
||||
LoadModel(model_dir_, batch_size_, run_mode);
|
||||
}
|
||||
preprocessor_.Init(config_file["DetPreProcess"]["transform_ops"]);
|
||||
LoadModel(model_dir_, batch_size_, run_mode);
|
||||
}
|
||||
|
||||
// Load Paddle inference model
|
||||
void LoadModel(const std::string &model_dir, const int batch_size = 1,
|
||||
const std::string &run_mode = "fluid");
|
||||
// Load Paddle inference model
|
||||
void LoadModel(const std::string &model_dir, const int batch_size = 1,
|
||||
const std::string &run_mode = "fluid");
|
||||
|
||||
// Run predictor
|
||||
void Predict(const std::vector <cv::Mat> imgs, const int warmup = 0,
|
||||
const int repeats = 1,
|
||||
std::vector <ObjectResult> *result = nullptr,
|
||||
std::vector<int> *bbox_num = nullptr,
|
||||
std::vector<double> *times = nullptr);
|
||||
// Run predictor
|
||||
void Predict(const std::vector<cv::Mat> imgs, const int warmup = 0,
|
||||
const int repeats = 1,
|
||||
std::vector<ObjectResult> *result = nullptr,
|
||||
std::vector<int> *bbox_num = nullptr,
|
||||
std::vector<double> *times = nullptr);
|
||||
|
||||
const std::vector <std::string> &GetLabelList() const {
|
||||
return this->label_list_;
|
||||
}
|
||||
const std::vector<std::string> &GetLabelList() const {
|
||||
return this->label_list_;
|
||||
}
|
||||
|
||||
const float &GetThreshold() const { return this->threshold_; }
|
||||
const float &GetThreshold() const { return this->threshold_; }
|
||||
|
||||
private:
|
||||
bool use_gpu_ = true;
|
||||
int gpu_id_ = 0;
|
||||
int gpu_mem_ = 800;
|
||||
int cpu_math_library_num_threads_ = 6;
|
||||
std::string run_mode = "fluid";
|
||||
bool use_mkldnn_ = false;
|
||||
bool use_tensorrt_ = false;
|
||||
bool batch_size_ = 1;
|
||||
bool use_fp16_ = false;
|
||||
std::string model_dir_;
|
||||
float threshold_ = 0.5;
|
||||
float max_det_results_ = 5;
|
||||
std::vector<int> image_shape_ = {3, 640, 640};
|
||||
std::vector <std::string> label_list_;
|
||||
bool ir_optim_ = true;
|
||||
bool det_permute_ = true;
|
||||
bool det_postprocess_ = true;
|
||||
int min_subgraph_size_ = 30;
|
||||
bool use_dynamic_shape_ = false;
|
||||
int trt_min_shape_ = 1;
|
||||
int trt_max_shape_ = 1280;
|
||||
int trt_opt_shape_ = 640;
|
||||
bool trt_calib_mode_ = false;
|
||||
private:
|
||||
bool use_gpu_ = true;
|
||||
int gpu_id_ = 0;
|
||||
int gpu_mem_ = 800;
|
||||
int cpu_math_library_num_threads_ = 6;
|
||||
std::string run_mode = "fluid";
|
||||
bool use_mkldnn_ = false;
|
||||
bool use_tensorrt_ = false;
|
||||
bool batch_size_ = 1;
|
||||
bool use_fp16_ = false;
|
||||
std::string model_dir_;
|
||||
float threshold_ = 0.5;
|
||||
float max_det_results_ = 5;
|
||||
std::vector<int> image_shape_ = {3, 640, 640};
|
||||
std::vector<std::string> label_list_;
|
||||
bool ir_optim_ = true;
|
||||
bool det_permute_ = true;
|
||||
bool det_postprocess_ = true;
|
||||
int min_subgraph_size_ = 30;
|
||||
bool use_dynamic_shape_ = false;
|
||||
int trt_min_shape_ = 1;
|
||||
int trt_max_shape_ = 1280;
|
||||
int trt_opt_shape_ = 640;
|
||||
bool trt_calib_mode_ = false;
|
||||
|
||||
// Preprocess image and copy data to input buffer
|
||||
void Preprocess(const cv::Mat &image_mat);
|
||||
// Preprocess image and copy data to input buffer
|
||||
void Preprocess(const cv::Mat &image_mat);
|
||||
|
||||
// Postprocess result
|
||||
void Postprocess(const std::vector <cv::Mat> mats,
|
||||
std::vector <ObjectResult> *result, std::vector<int> bbox_num,
|
||||
bool is_rbox);
|
||||
// Postprocess result
|
||||
void Postprocess(const std::vector<cv::Mat> mats,
|
||||
std::vector<ObjectResult> *result, std::vector<int> bbox_num,
|
||||
bool is_rbox);
|
||||
|
||||
std::shared_ptr <Predictor> predictor_;
|
||||
Preprocessor preprocessor_;
|
||||
ImageBlob inputs_;
|
||||
std::vector<float> output_data_;
|
||||
std::vector<int> out_bbox_num_data_;
|
||||
};
|
||||
std::shared_ptr<Predictor> predictor_;
|
||||
Preprocessor preprocessor_;
|
||||
ImageBlob inputs_;
|
||||
std::vector<float> output_data_;
|
||||
std::vector<int> out_bbox_num_data_;
|
||||
};
|
||||
|
||||
} // namespace Detection
|
||||
|
|
After Width: | Height: | Size: 275 KiB |
After Width: | Height: | Size: 230 KiB |
|
@ -95,7 +95,7 @@ def main():
|
|||
config_json["Global"]["det_model_path"] = args.det_model_path
|
||||
config_json["Global"]["rec_model_path"] = args.rec_model_path
|
||||
config_json["Global"]["rec_label_path"] = args.rec_label_path
|
||||
config_json["Global"]["label_list"] = config_yaml["Global"]["labe_list"]
|
||||
config_json["Global"]["label_list"] = config_yaml["Global"]["label_list"]
|
||||
config_json["Global"]["rec_nms_thresold"] = config_yaml["Global"][
|
||||
"rec_nms_thresold"]
|
||||
config_json["Global"]["max_det_results"] = config_yaml["Global"][
|
||||
|
|
|
@ -53,6 +53,26 @@ class PostProcesser(object):
|
|||
return rtn
|
||||
|
||||
|
||||
class ThreshOutput(object):
|
||||
def __init__(self, threshold, label_0="0", label_1="1"):
|
||||
self.threshold = threshold
|
||||
self.label_0 = label_0
|
||||
self.label_1 = label_1
|
||||
|
||||
def __call__(self, x, file_names=None):
|
||||
y = []
|
||||
for idx, probs in enumerate(x):
|
||||
score = probs[1]
|
||||
if score < self.threshold:
|
||||
result = {"class_ids": [0], "scores": [1 - score], "label_names": [self.label_0]}
|
||||
else:
|
||||
result = {"class_ids": [1], "scores": [score], "label_names": [self.label_1]}
|
||||
if file_names is not None:
|
||||
result["file_name"] = file_names[idx]
|
||||
y.append(result)
|
||||
return y
|
||||
|
||||
|
||||
class Topk(object):
|
||||
def __init__(self, topk=1, class_id_map_file=None):
|
||||
assert isinstance(topk, (int, ))
|
||||
|
|
|
@ -49,10 +49,15 @@ class ClsPredictor(Predictor):
|
|||
pid = os.getpid()
|
||||
size = config["PreProcess"]["transform_ops"][1]["CropImage"][
|
||||
"size"]
|
||||
if config["Global"].get("use_int8", False):
|
||||
precision = "int8"
|
||||
elif config["Global"].get("use_fp16", False):
|
||||
precision = "fp16"
|
||||
else:
|
||||
precision = "fp32"
|
||||
self.auto_logger = auto_log.AutoLogger(
|
||||
model_name=config["Global"].get("model_name", "cls"),
|
||||
model_precision='fp16'
|
||||
if config["Global"]["use_fp16"] else 'fp32',
|
||||
model_precision=precision,
|
||||
batch_size=config["Global"].get("batch_size", 1),
|
||||
data_shape=[3, size, size],
|
||||
save_path=config["Global"].get("save_log_path",
|
||||
|
|
|
@ -128,13 +128,10 @@ class DetPredictor(Predictor):
|
|||
results = []
|
||||
if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
|
||||
print('[WARNNING] No object detected.')
|
||||
results = np.array([])
|
||||
else:
|
||||
results = np_boxes
|
||||
|
||||
results = self.parse_det_results(results,
|
||||
self.config["Global"]["threshold"],
|
||||
self.config["Global"]["labe_list"])
|
||||
results = self.parse_det_results(
|
||||
np_boxes, self.config["Global"]["threshold"],
|
||||
self.config["Global"]["label_list"])
|
||||
return results
|
||||
|
||||
|
||||
|
|
|
@ -42,8 +42,22 @@ class Predictor(object):
|
|||
def create_paddle_predictor(self, args, inference_model_dir=None):
|
||||
if inference_model_dir is None:
|
||||
inference_model_dir = args.inference_model_dir
|
||||
params_file = os.path.join(inference_model_dir, "inference.pdiparams")
|
||||
model_file = os.path.join(inference_model_dir, "inference.pdmodel")
|
||||
if "inference_int8.pdiparams" in os.listdir(inference_model_dir):
|
||||
params_file = os.path.join(inference_model_dir,
|
||||
"inference_int8.pdiparams")
|
||||
model_file = os.path.join(inference_model_dir,
|
||||
"inference_int8.pdmodel")
|
||||
assert args.get(
|
||||
"use_fp16", False
|
||||
) is False, "fp16 mode is not supported for int8 model inference, please set use_fp16 as False during inference."
|
||||
else:
|
||||
params_file = os.path.join(inference_model_dir,
|
||||
"inference.pdiparams")
|
||||
model_file = os.path.join(inference_model_dir, "inference.pdmodel")
|
||||
assert args.get(
|
||||
"use_int8", False
|
||||
) is False, "int8 mode is not supported for fp32 model inference, please set use_int8 as False during inference."
|
||||
|
||||
config = Config(model_file, params_file)
|
||||
|
||||
if args.use_gpu:
|
||||
|
@ -63,12 +77,18 @@ class Predictor(object):
|
|||
config.disable_glog_info()
|
||||
config.switch_ir_optim(args.ir_optim) # default true
|
||||
if args.use_tensorrt:
|
||||
precision = Config.Precision.Float32
|
||||
if args.get("use_int8", False):
|
||||
precision = Config.Precision.Int8
|
||||
elif args.get("use_fp16", False):
|
||||
precision = Config.Precision.Half
|
||||
|
||||
config.enable_tensorrt_engine(
|
||||
precision_mode=Config.Precision.Half
|
||||
if args.use_fp16 else Config.Precision.Float32,
|
||||
precision_mode=precision,
|
||||
max_batch_size=args.batch_size,
|
||||
workspace_size=1 << 30,
|
||||
min_subgraph_size=30)
|
||||
min_subgraph_size=30,
|
||||
use_calib_mode=False)
|
||||
|
||||
config.enable_memory_optim()
|
||||
# use zero copy
|
||||
|
|
After Width: | Height: | Size: 104 KiB |
After Width: | Height: | Size: 362 KiB |
After Width: | Height: | Size: 275 KiB |
After Width: | Height: | Size: 203 KiB |
After Width: | Height: | Size: 100 KiB |
After Width: | Height: | Size: 97 KiB |
|
@ -0,0 +1,332 @@
|
|||
# PaddleClas构建有人/无人分类案例
|
||||
|
||||
此处提供了用户使用 PaddleClas 快速构建轻量级、高精度、可落地的有人/无人的分类模型教程,主要基于有人/无人场景的数据,融合了轻量级骨干网络PPLCNet、SSLD预训练权重、EDA数据增强策略、SKL-UGI知识蒸馏策略、SHAS超参数搜索策略,得到精度高、速度快、易于部署的二分类模型。
|
||||
|
||||
------
|
||||
|
||||
|
||||
## 目录
|
||||
|
||||
- [1. 环境配置](#1)
|
||||
- [2. 有人/无人场景推理预测](#2)
|
||||
- [2.1 下载模型](#2.1)
|
||||
- [2.2 模型推理预测](#2.2)
|
||||
- [2.2.1 预测单张图像](#2.2.1)
|
||||
- [2.2.2 基于文件夹的批量预测](#2.2.2)
|
||||
- [3.有人/无人场景训练](#3)
|
||||
- [3.1 数据准备](#3.1)
|
||||
- [3.2 模型训练](#3.2)
|
||||
- [3.2.1 基于默认超参数训练](#3.2.1)
|
||||
- [3.2.1.1 基于默认超参数训练轻量级模型](#3.2.1.1)
|
||||
- [3.2.1.2 基于默认超参数训练教师模型](#3.2.1.2)
|
||||
- [3.2.1.3 基于默认超参数进行蒸馏训练](#3.2.1.3)
|
||||
- [3.2.2 超参数搜索训练](#3.2)
|
||||
- [4. 模型评估与推理](#4)
|
||||
- [4.1 模型评估](#3.1)
|
||||
- [4.2 模型预测](#3.2)
|
||||
- [4.3 使用 inference 模型进行推理](#4.3)
|
||||
- [4.3.1 导出 inference 模型](#4.3.1)
|
||||
- [4.3.2 模型推理预测](#4.3.2)
|
||||
|
||||
|
||||
<a name="1"></a>
|
||||
|
||||
## 1. 环境配置
|
||||
|
||||
* 安装:请先参考 [Paddle 安装教程](../installation/install_paddle.md) 以及 [PaddleClas 安装教程](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。
|
||||
|
||||
<a name="2"></a>
|
||||
|
||||
## 2. 有人/无人场景推理预测
|
||||
|
||||
<a name="2.1"></a>
|
||||
|
||||
### 2.1 下载模型
|
||||
|
||||
* 进入 `deploy` 运行目录。
|
||||
|
||||
```
|
||||
cd deploy
|
||||
```
|
||||
|
||||
下载有人/无人分类的模型。
|
||||
|
||||
```
|
||||
mkdir models
|
||||
cd models
|
||||
# 下载inference 模型并解压
|
||||
wget https://paddleclas.bj.bcebos.com/models/PULC/person_cls_infer.tar && tar -xf person_cls_infer.tar
|
||||
```
|
||||
|
||||
解压完毕后,`models` 文件夹下应有如下文件结构:
|
||||
|
||||
```
|
||||
├── person_cls_infer
|
||||
│ ├── inference.pdiparams
|
||||
│ ├── inference.pdiparams.info
|
||||
│ └── inference.pdmodel
|
||||
```
|
||||
|
||||
<a name="2.2"></a>
|
||||
|
||||
### 2.2 模型推理预测
|
||||
|
||||
<a name="2.2.1"></a>
|
||||
|
||||
#### 2.2.1 预测单张图像
|
||||
|
||||
返回 `deploy` 目录:
|
||||
|
||||
```
|
||||
cd ../
|
||||
```
|
||||
|
||||
运行下面的命令,对图像 `./images/PULC/person/objects365_02035329.jpg` 进行有人/无人分类。
|
||||
|
||||
```shell
|
||||
# 使用下面的命令使用 GPU 进行预测
|
||||
python3.7 python/predict_cls.py -c configs/PULC/person/inference_person_cls.yaml -o PostProcess.ThreshOutput.threshold=0.9794
|
||||
# 使用下面的命令使用 CPU 进行预测
|
||||
python3.7 python/predict_cls.py -c configs/PULC/person/inference_person_cls.yaml -o PostProcess.ThreshOutput.threshold=0.9794 -o Global.use_gpu=False
|
||||
```
|
||||
|
||||
输出结果如下。
|
||||
|
||||
```
|
||||
objects365_02035329.jpg: class id(s): [1], score(s): [1.00], label_name(s): ['someone']
|
||||
```
|
||||
|
||||
|
||||
**备注:** 真实场景中往往需要在假正类率(Fpr)小于某一个指标下求真正类率(Tpr),该场景中的`val`数据集在千分之一Fpr下得到的最佳Tpr所得到的阈值为`0.9794`,故此处的`threshold`为`0.9794`。该阈值的确定方法可以参考[3.2节](#3.2)
|
||||
|
||||
<a name="2.2.2"></a>
|
||||
|
||||
#### 2.2.2 基于文件夹的批量预测
|
||||
|
||||
如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。
|
||||
|
||||
```shell
|
||||
# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False
|
||||
python3.7 python/predict_cls.py -c configs/PULC/person/inference_person_cls.yaml -o Global.infer_imgs="./images/PULC/person/"
|
||||
```
|
||||
|
||||
终端中会输出该文件夹内所有图像的分类结果,如下所示。
|
||||
|
||||
```
|
||||
objects365_01780782.jpg: class id(s): [0], score(s): [1.00], label_name(s): ['nobody']
|
||||
objects365_02035329.jpg: class id(s): [1], score(s): [1.00], label_name(s): ['someone']
|
||||
```
|
||||
|
||||
其中,`someone` 表示该图里存在人,`nobody` 表示该图里不存在人。
|
||||
|
||||
<a name="3"></a>
|
||||
|
||||
## 3.有人/无人场景训练
|
||||
|
||||
<a name="3.1"></a>
|
||||
|
||||
### 3.1 数据准备
|
||||
|
||||
进入 PaddleClas 目录。
|
||||
|
||||
```
|
||||
cd path_to_PaddleClas
|
||||
```
|
||||
|
||||
进入 `dataset/` 目录,下载并解压有人/无人场景的数据。
|
||||
|
||||
```shell
|
||||
cd dataset
|
||||
wget https://paddleclas.bj.bcebos.com/data/cls_demo/person.tar
|
||||
tar -xf person.tar
|
||||
cd ../
|
||||
```
|
||||
|
||||
执行上述命令后,`dataset/`下存在`person`目录,该目录中具有以下数据:
|
||||
|
||||
```
|
||||
|
||||
├── train
|
||||
│ ├── 000000000009.jpg
|
||||
│ ├── 000000000025.jpg
|
||||
...
|
||||
├── val
|
||||
│ ├── objects365_01780637.jpg
|
||||
│ ├── objects365_01780640.jpg
|
||||
...
|
||||
├── ImageNet_val
|
||||
│ ├── ILSVRC2012_val_00000001.JPEG
|
||||
│ ├── ILSVRC2012_val_00000002.JPEG
|
||||
...
|
||||
├── train_list.txt
|
||||
├── train_list.txt.debug
|
||||
├── train_list_for_distill.txt
|
||||
├── val_list.txt
|
||||
└── val_list.txt.debug
|
||||
```
|
||||
|
||||
其中`train/`和`val/`分别为训练集和验证集。`train_list.txt`和`val_list.txt`分别为训练集和验证集的标签文件,`train_list.txt.debug`和`val_list.txt.debug`分别为训练集和验证集的`debug`标签文件,其分别是`train_list.txt`和`val_list.txt`的子集,用该文件可以快速体验本案例的流程。`ImageNet_val/`是ImageNet的验证集,该集合和`train`集合的混合数据用于本案例的`SKL-UGI知识蒸馏策略`,对应的训练标签文件为`train_list_for_distill.txt`。
|
||||
|
||||
* **注意**:
|
||||
|
||||
* 本案例中所使用的所有数据集均为开源数据,`train`集合为[MS-COCO数据](https://cocodataset.org/#overview)的训练集的子集,`val`集合为[Object365数据](https://www.objects365.org/overview.html)的训练集的子集,`ImageNet_val`为[ImageNet数据](https://www.image-net.org/)的验证集。数据集的筛选流程可以参考[有人/无人场景数据集筛选方法]()。
|
||||
|
||||
<a name="3.2"></a>
|
||||
|
||||
### 3.2 模型训练
|
||||
|
||||
<a name="3.2.1"></a>
|
||||
|
||||
#### 3.2.1 基于默认超参数训练
|
||||
|
||||
<a name="3.2.1.1"></a>
|
||||
|
||||
##### 3.2.1.1 基于默认超参数训练轻量级模型
|
||||
|
||||
在`ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml`中提供了基于该场景的训练配置,可以通过如下脚本启动训练:
|
||||
|
||||
```shell
|
||||
export CUDA_VISIBLE_DEVICES=0,1,2,3
|
||||
python3 -m paddle.distributed.launch \
|
||||
--gpus="0,1,2,3" \
|
||||
tools/train.py \
|
||||
-c ./ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml
|
||||
```
|
||||
|
||||
验证集的最佳指标在0.94-0.95之间(数据集较小,容易造成波动)。
|
||||
|
||||
**备注:**
|
||||
|
||||
* 此时使用的指标为Tpr,该指标描述了在假正类率(Fpr)小于某一个指标时的真正类率(Tpr),是产业中二分类问题常用的指标之一。在本案例中,Fpr为千分之一。关于Fpr和Tpr的更多介绍,可以参考[这里](https://baike.baidu.com/item/AUC/19282953)。
|
||||
|
||||
* 在eval时,会打印出来当前最佳的TprAtFpr指标,具体地,其会打印当前的`Fpr`、`Tpr`值,以及当前的`threshold`值,`Tpr`值反映了在当前`Fpr`值下的召回率,该值越高,代表模型越好。`threshold` 表示当前最佳`Fpr`所对应的分类阈值,可用于后续模型部署落地等。
|
||||
|
||||
<a name="3.2.1.2"></a>
|
||||
|
||||
##### 3.2.1.2 基于默认超参数训练教师模型
|
||||
|
||||
复用`ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml`中的超参数,训练教师模型,训练脚本如下:
|
||||
|
||||
```shell
|
||||
export CUDA_VISIBLE_DEVICES=0,1,2,3
|
||||
python3 -m paddle.distributed.launch \
|
||||
--gpus="0,1,2,3" \
|
||||
tools/train.py \
|
||||
-c ./ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml \
|
||||
-o Arch.name=ResNet101_vd
|
||||
```
|
||||
|
||||
验证集的最佳指标为0.96-0.98之间,当前教师模型最好的权重保存在`output/ResNet101_vd/best_model.pdparams`。
|
||||
|
||||
<a name="3.2.1.3"></a>
|
||||
|
||||
##### 3.2.1.3 基于默认超参数进行蒸馏训练
|
||||
|
||||
配置文件`ppcls/configs/PULC/PULC/Distillation/PPLCNet_x1_0_distillation.yaml`提供了`SKL-UGI知识蒸馏策略`的配置。该配置将`ResNet101_vd`当作教师模型,`PPLCNet_x1_0`当作学生模型,使用ImageNet数据集的验证集作为新增的无标签数据。训练脚本如下:
|
||||
|
||||
```shell
|
||||
export CUDA_VISIBLE_DEVICES=0,1,2,3
|
||||
python3 -m paddle.distributed.launch \
|
||||
--gpus="0,1,2,3" \
|
||||
tools/train.py \
|
||||
-c ./ppcls/configs/PULC/person/Distillation/PPLCNet_x1_0_distillation.yaml \
|
||||
-o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model
|
||||
```
|
||||
|
||||
验证集的最佳指标为0.95-0.97之间,当前模型最好的权重保存在`output/DistillationModel/best_model_student.pdparams`。
|
||||
|
||||
<a name="3.2.2"></a>
|
||||
|
||||
#### 3.2.2 超参数搜索训练
|
||||
|
||||
[3.2 小节](#3.2) 提供了在已经搜索并得到的超参数上进行了训练,此部分内容提供了搜索的过程,此过程是为了得到更好的训练超参数。
|
||||
|
||||
* 搜索运行脚本如下:
|
||||
|
||||
```shell
|
||||
python tools/search_strategy.py -c ppcls/configs/StrategySearch/person.yaml
|
||||
```
|
||||
|
||||
在`ppcls/configs/StrategySearch/person.yaml`中指定了具体的 GPU id 号和搜索配置, 默认搜索的训练日志和模型存放于`output/search_person`中,最终的蒸馏模型存放于`output/search_person/search_res/DistillationModel/best_model_student.pdparams`。
|
||||
|
||||
* **注意**:
|
||||
|
||||
* 3.1小节提供的默认配置已经经过了搜索,所以此过程不是必要的过程,如果自己的训练数据集有变化,可以尝试此过程。
|
||||
|
||||
* 此过程基于当前数据集在 V100 4 卡上大概需要耗时 10 小时,如果缺少机器资源,希望体验搜索过程,可以将`ppcls/configs/cls_demo/person/PPLCNet/PPLCNet_x1_0_search.yaml`中的`train_list.txt`和`val_list.txt`分别替换为`train_list.txt.debug`和`val_list.txt.debug`。替换list只是为了加速跑通整个搜索过程,由于数据量较小,其搜素的结果没有参考性。另外,搜索空间可以根据当前的机器资源来调整,如果机器资源有限,可以尝试缩小搜索空间,如果机器资源较充足,可以尝试扩大搜索空间。
|
||||
|
||||
* 如果此过程搜索的得到的超参数与[3.2.1小节](#3.2.1)提供的超参数不一致,主要是由于训练数据较小造成的波动导致,可以忽略。
|
||||
|
||||
|
||||
<a name="4"></a>
|
||||
|
||||
## 4. 模型评估与推理
|
||||
|
||||
|
||||
<a name="4.1"></a>
|
||||
|
||||
### 4.1 模型评估
|
||||
|
||||
训练好模型之后,可以通过以下命令实现对模型指标的评估。
|
||||
|
||||
```bash
|
||||
python3 tools/eval.py \
|
||||
-c ./ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml \
|
||||
-o Global.pretrained_model="output/DistillationModel/best_model_student"
|
||||
```
|
||||
|
||||
<a name="4.2"></a>
|
||||
|
||||
### 4.2 模型预测
|
||||
|
||||
模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测:
|
||||
|
||||
```python
|
||||
python3 tools/infer.py \
|
||||
-c ./ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml \
|
||||
-o Infer.infer_imgs=./dataset/person/val/objects365_01780637.jpg \
|
||||
-o Global.pretrained_model=output/DistillationModel/best_model_student \
|
||||
-o Global.pretrained_model=Infer.PostProcess.threshold=0.9794
|
||||
```
|
||||
|
||||
输出结果如下:
|
||||
|
||||
```
|
||||
[{'class_ids': [0], 'scores': [0.9878496769815683], 'label_names': ['nobody'], 'file_name': './dataset/person/val/objects365_01780637.jpg'}]
|
||||
```
|
||||
|
||||
**备注:** 这里的`Infer.PostProcess.threshold`的值需要根据实际场景来确定,此处的`0.9794`是在该场景中的`val`数据集在千分之一Fpr下得到的最佳Tpr所得到的。
|
||||
|
||||
<a name="4.3"></a>
|
||||
|
||||
### 4.3 使用 inference 模型进行推理
|
||||
|
||||
<a name="4.3.1"></a>
|
||||
|
||||
### 4.3.1 导出 inference 模型
|
||||
|
||||
通过导出 inference 模型,PaddlePaddle 支持使用预测引擎进行预测推理。接下来介绍如何用预测引擎进行推理:
|
||||
首先,对训练好的模型进行转换:
|
||||
|
||||
```bash
|
||||
python3 tools/export_model.py \
|
||||
-c ./ppcls/configs/cls_demo/PULC/PPLCNet/PPLCNet_x1_0.yaml \
|
||||
-o Global.pretrained_model=output/DistillationModel/best_model_student \
|
||||
-o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_person
|
||||
```
|
||||
执行完该脚本后会在`deploy/models/`下生成`PPLCNet_x1_0_person`文件夹,该文件夹中的模型与 2.2 节下载的推理预测模型格式一致。
|
||||
|
||||
<a name="4.3.2"></a>
|
||||
|
||||
### 4.3.2 基于 inference 模型推理预测
|
||||
推理预测的脚本为:
|
||||
|
||||
```
|
||||
python3.7 python/predict_cls.py -c configs/PULC/person/inference_person_cls.yaml -o Global.inference_model_dir="models/PPLCNet_x1_0_person" -o PostProcess.ThreshOutput.threshold=0.9794
|
||||
```
|
||||
|
||||
**备注:**
|
||||
|
||||
- 此处的`PostProcess.ThreshOutput.threshold`由eval时的最佳`threshold`来确定。
|
||||
- 更多关于推理的细节,可以参考[2.2节](#2.2)。
|
||||
|
|
@ -5,40 +5,41 @@
|
|||
|
||||
## 目录
|
||||
|
||||
- [1. 模型库概览图](#1)
|
||||
- [2. SSLD 知识蒸馏预训练模型](#2)
|
||||
- [2.1 服务器端知识蒸馏模型](#2.1)
|
||||
- [2.2 移动端知识蒸馏模型](#2.2)
|
||||
- [2.3 Intel CPU 端知识蒸馏模型](#2.3)
|
||||
- [3. PP-LCNet 系列](#3)
|
||||
- [4. ResNet 系列](#4)
|
||||
- [5. 移动端系列](#5)
|
||||
- [6. SEResNeXt 与 Res2Net 系列](#6)
|
||||
- [7. DPN 与 DenseNet 系列](#7)
|
||||
- [8. HRNet 系列](#8)
|
||||
- [9. Inception 系列](#9)
|
||||
- [10. EfficientNet 与 ResNeXt101_wsl 系列](#10)
|
||||
- [11. ResNeSt 与 RegNet 系列](#11)
|
||||
- [12. ViT_and_DeiT 系列](#12)
|
||||
- [13. RepVGG 系列](#13)
|
||||
- [14. MixNet 系列](#14)
|
||||
- [15. ReXNet 系列](#15)
|
||||
- [16. SwinTransformer 系列](#16)
|
||||
- [17. LeViT 系列](#17)
|
||||
- [18. Twins 系列](#18)
|
||||
- [19. HarDNet 系列](#19)
|
||||
- [20. DLA 系列](#20)
|
||||
- [21. RedNet 系列](#21)
|
||||
- [22. TNT 系列](#22)
|
||||
- [23. CSwinTransformer 系列](#23)
|
||||
- [24. PVTV2 系列](#24)
|
||||
- [25. MobileViT 系列](#25)
|
||||
- [26. 其他模型](#26)
|
||||
- [模型库概览图](#Overview)
|
||||
- [SSLD 知识蒸馏预训练模型](#SSLD)
|
||||
- [服务器端知识蒸馏模型](#SSLD_server)
|
||||
- [移动端知识蒸馏模型](#SSLD_mobile)
|
||||
- [Intel CPU 端知识蒸馏模型](#SSLD_intel_cpu)
|
||||
- [PP-LCNet & PP-LCNetV2 系列](#PPLCNet)
|
||||
- [PP-HGNet 系列](#PPHGNet)
|
||||
- [ResNet 系列](#ResNet)
|
||||
- [移动端系列](#Mobile)
|
||||
- [SEResNeXt 与 Res2Net 系列](#SEResNeXt_Res2Net)
|
||||
- [DPN 与 DenseNet 系列](#DPN&DenseNet)
|
||||
- [HRNet 系列](#HRNet)
|
||||
- [Inception 系列](#Inception)
|
||||
- [EfficientNet 与 ResNeXt101_wsl 系列](#EfficientNetRes&NeXt101_wsl)
|
||||
- [ResNeSt 与 RegNet 系列](#ResNeSt&RegNet)
|
||||
- [ViT_and_DeiT 系列](#ViT&DeiT)
|
||||
- [RepVGG 系列](#RepVGG)
|
||||
- [MixNet 系列](#MixNet)
|
||||
- [ReXNet 系列](#ReXNet)
|
||||
- [SwinTransformer 系列](#SwinTransformer)
|
||||
- [LeViT 系列](#LeViT)
|
||||
- [Twins 系列](#Twins)
|
||||
- [HarDNet 系列](#HarDNet)
|
||||
- [DLA 系列](#DLA)
|
||||
- [RedNet 系列](#RedNet)
|
||||
- [TNT 系列](#TNT)
|
||||
- [CSwinTransformer 系列](#CSwinTransformer)
|
||||
- [PVTV2 系列](#PVTV2)
|
||||
- [MobileViT 系列](#MobileViT)
|
||||
- [其他模型](#Others)
|
||||
- [参考文献](#reference)
|
||||
|
||||
<a name="1"></a>
|
||||
<a name="Overview"></a>
|
||||
|
||||
## 1. 模型库概览图
|
||||
## 模型库概览图
|
||||
|
||||
基于 ImageNet1k 分类数据集,PaddleClas 支持 37 个系列分类网络结构以及对应的 217 个图像分类预训练模型,训练技巧、每个系列网络结构的简单介绍和性能评估将在相应章节展现,下面所有的速度指标评估环境如下:
|
||||
* Arm CPU 的评估环境基于骁龙 855(SD855)。
|
||||
|
@ -58,14 +59,14 @@
|
|||
|
||||

|
||||
|
||||
<a name="2"></a>
|
||||
<a name="SSLD"></a>
|
||||
|
||||
## 2. SSLD 知识蒸馏预训练模型
|
||||
## SSLD 知识蒸馏预训练模型
|
||||
基于 SSLD 知识蒸馏的预训练模型列表如下所示,更多关于 SSLD 知识蒸馏方案的介绍可以参考:[SSLD 知识蒸馏文档](./knowledge_distillation.md)。
|
||||
|
||||
<a name="2.1"></a>
|
||||
<a name="SSLD_server"></a>
|
||||
|
||||
### 2.1 服务器端知识蒸馏模型
|
||||
### 服务器端知识蒸馏模型
|
||||
|
||||
| 模型 | Top-1 Acc | Reference<br>Top-1 Acc | Acc gain | time(ms)<br>bs=1 | time(ms)<br>bs=4 | time(ms)<br/>bs=8 | FLOPs(G) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
|
||||
|---------------------|-----------|-----------|---------------|----------------|-----------|----------|-----------|-----------------------------------|-----------------------------------|-----------------------------------|
|
||||
|
@ -78,10 +79,12 @@
|
|||
| HRNet_W18_C_ssld | 0.812 | 0.769 | 0.043 | 6.66 | 8.94 | 11.95 | 4.32 | 21.35 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W18_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HRNet_W18_C_ssld_infer.tar) |
|
||||
| HRNet_W48_C_ssld | 0.836 | 0.790 | 0.046 | 11.07 | 17.06 | 27.28 | 17.34 | 77.57 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W48_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HRNet_W48_C_ssld_infer.tar) |
|
||||
| SE_HRNet_W64_C_ssld | 0.848 | - | - | 17.11 | 26.87 | 43.24 | 29.00 | 129.12 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SE_HRNet_W64_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SE_HRNet_W64_C_ssld_infer.tar) |
|
||||
| PPHGNet_tiny_ssld | 0.8195 | 0.7983 | 0.021 | 1.77 | - | - | 4.54 | 14.75 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_ssld_infer.tar) |
|
||||
| PPHGNet_small_ssld | 0.8382 | 0.8151 | 0.023 | 2.52 | - | - | 8.53 | 24.38 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_ssld_infer.tar) |
|
||||
|
||||
<a name="2.2"></a>
|
||||
<a name="SSLD_mobile"></a>
|
||||
|
||||
### 2.2 移动端知识蒸馏模型
|
||||
### 移动端知识蒸馏模型
|
||||
|
||||
| 模型 | Top-1 Acc | Reference<br>Top-1 Acc | Acc gain | SD855 time(ms)<br>bs=1, thread=1 | SD855 time(ms)<br/>bs=1, thread=2 | SD855 time(ms)<br/>bs=1, thread=4 | FLOPs(M) | Params(M) | <span style="white-space:nowrap;">模型大小(M)</span> | 预训练模型下载地址 | inference模型下载地址 |
|
||||
|---------------------|-----------|-----------|---------------|----------------|-----------|----------|-----------|-----------------------------------|-----------------------------------|-----------------------------------|-----------------------------------|
|
||||
|
@ -92,9 +95,9 @@
|
|||
| MobileNetV3_small_x1_0_ssld | 0.713 | 0.682 | 0.031 | 5.63 | 3.65 | 2.60 | 63.67 | 2.95 | 12 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_small_x1_0_ssld_infer.tar) |
|
||||
| GhostNet_x1_3_ssld | 0.794 | 0.757 | 0.037 | 19.16 | 12.25 | 9.40 | 236.89 | 7.38 | 29 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_3_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/GhostNet_x1_3_ssld_infer.tar) |
|
||||
|
||||
<a name="2.3"></a>
|
||||
<a name="SSLD_intel_cpu"></a>
|
||||
|
||||
### 2.3 Intel CPU 端知识蒸馏模型
|
||||
### Intel CPU 端知识蒸馏模型
|
||||
|
||||
| 模型 | Top-1 Acc | Reference<br>Top-1 Acc | Acc gain | Intel-Xeon-Gold-6148 time(ms)<br>bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
|
||||
|---------------------|-----------|-----------|---------------|----------------|----------|-----------|-----------------------------------|-----------------------------------|
|
||||
|
@ -104,26 +107,44 @@
|
|||
|
||||
* 注: `Reference Top-1 Acc` 表示 PaddleClas 基于 ImageNet1k 数据集训练得到的预训练模型精度。
|
||||
|
||||
<a name="3"></a>
|
||||
<a name="PPLCNet"></a>
|
||||
|
||||
## 3. PP-LCNet 系列 <sup>[[28](#ref28)]</sup>
|
||||
## PP-LCNet & PP-LCNetV2 系列 <sup>[[28](#ref28)]</sup>
|
||||
|
||||
PP-LCNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[PP-LCNet 系列模型文档](../models/PP-LCNet.md)。
|
||||
PP-LCNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[PP-LCNet 系列模型文档](../models/PP-LCNet.md),[PP-LCNetV2 系列模型文档](../models/PP-LCNetV2.md)。
|
||||
|
||||
| 模型 | Top-1 Acc | Top-5 Acc | Intel-Xeon-Gold-6148 time(ms)<br>bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
|
||||
| 模型 | Top-1 Acc | Top-5 Acc | time(ms)<sup>*</sup><br>bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
|
||||
|:--:|:--:|:--:|:--:|----|----|----|:--:|
|
||||
| PPLCNet_x0_25 |0.5186 | 0.7565 | 1.61785 | 18.25 | 1.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar) |
|
||||
| PPLCNet_x0_35 |0.5809 | 0.8083 | 2.11344 | 29.46 | 1.65 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar) |
|
||||
| PPLCNet_x0_5 |0.6314 | 0.8466 | 2.72974 | 47.28 | 1.89 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar) |
|
||||
| PPLCNet_x0_75 |0.6818 | 0.8830 | 4.51216 | 98.82 | 2.37 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar) |
|
||||
| PPLCNet_x1_0 |0.7132 | 0.9003 | 6.49276 | 160.81 | 2.96 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar) |
|
||||
| PPLCNet_x1_5 |0.7371 | 0.9153 | 12.2601 | 341.86 | 4.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar) |
|
||||
| PPLCNet_x2_0 |0.7518 | 0.9227 | 20.1667 | 590 | 6.54 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar) |
|
||||
| PPLCNet_x2_5 |0.7660 | 0.9300 | 29.595 | 906 | 9.04 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar) |
|
||||
| PPLCNet_x0_25 |0.5186 | 0.7565 | 1.74 | 18.25 | 1.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar) |
|
||||
| PPLCNet_x0_35 |0.5809 | 0.8083 | 1.92 | 29.46 | 1.65 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar) |
|
||||
| PPLCNet_x0_5 |0.6314 | 0.8466 | 2.05 | 47.28 | 1.89 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar) |
|
||||
| PPLCNet_x0_75 |0.6818 | 0.8830 | 2.29 | 98.82 | 2.37 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar) |
|
||||
| PPLCNet_x1_0 |0.7132 | 0.9003 | 2.46 | 160.81 | 2.96 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar) |
|
||||
| PPLCNet_x1_5 |0.7371 | 0.9153 | 3.19 | 341.86 | 4.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar) |
|
||||
| PPLCNet_x2_0 |0.7518 | 0.9227 | 4.27 | 590 | 6.54 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar) |
|
||||
| PPLCNet_x2_5 |0.7660 | 0.9300 | 5.39 | 906 | 9.04 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar) |
|
||||
|
||||
<a name="4"></a>
|
||||
| 模型 | Top-1 Acc | Top-5 Acc | time(ms)<sup>**</sup><br>bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
|
||||
|:--:|:--:|:--:|:--:|----|----|----|:--:|
|
||||
| PPLCNetV2_base | 77.04 | 93.27 | 4.32 | 604 | 6.6 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar) |
|
||||
|
||||
## 4. ResNet 系列 <sup>[[1](#ref1)]</sup>
|
||||
|
||||
*: 基于 Intel-Xeon-Gold-6148 硬件平台与 PaddlePaddle 推理平台。
|
||||
|
||||
**: 基于 Intel-Xeon-Gold-6271C 硬件平台与 OpenVINO 2021.4.2 推理平台。
|
||||
|
||||
## PP-HGNet 系列
|
||||
|
||||
PP-HGNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[PP-HGNet 系列模型文档](../models/PP-HGNet.md)。
|
||||
|
||||
| 模型 | Top-1 Acc | Top-5 Acc | time(ms)<br>bs=1 | time(ms)<br>bs=4 | time(ms)<br/>bs=8 | FLOPs(G) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
|
||||
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
| PPHGNet_tiny | 0.7983 | 0.9504 | 1.77 | - | - | 4.54 | 14.75 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar) |
|
||||
| PPHGNet_small | 0.8151 | 0.9582 | 2.52 | - | - | 8.53 | 24.38 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar) |
|
||||
|
||||
<a name="ResNet"></a>
|
||||
|
||||
## ResNet 系列 <sup>[[1](#ref1)]</sup>
|
||||
|
||||
ResNet 及其 Vd 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[ResNet 及其 Vd 系列模型文档](../models/ResNet_and_vd.md)。
|
||||
|
||||
|
@ -145,9 +166,9 @@ ResNet 及其 Vd 系列模型的精度、速度指标如下表所示,更多关
|
|||
| ResNet50_vd_<br>ssld | 0.8300 | 0.9640 | 2.60 | 4.86 | 7.63 | 4.35 | 25.63 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_ssld_infer.tar) |
|
||||
| ResNet101_vd_<br>ssld | 0.8373 | 0.9669 | 4.43 | 8.25 | 12.60 | 8.08 | 44.67 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet101_vd_ssld_infer.tar) |
|
||||
|
||||
<a name="5"></a>
|
||||
<a name="Mobile"></a>
|
||||
|
||||
## 5. 移动端系列 <sup>[[3](#ref3)][[4](#ref4)][[5](#ref5)][[6](#ref6)][[23](#ref23)]</sup>
|
||||
## 移动端系列 <sup>[[3](#ref3)][[4](#ref4)][[5](#ref5)][[6](#ref6)][[23](#ref23)]</sup>
|
||||
|
||||
移动端系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[移动端系列模型文档](../models/Mobile.md)。
|
||||
|
||||
|
@ -194,9 +215,9 @@ ResNet 及其 Vd 系列模型的精度、速度指标如下表所示,更多关
|
|||
| ESNet_x0_75 | 0.7224 | 0.9045 |9.59|6.28|4.52| 123.74 | 3.87 | 15 |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_75_pretrained.pdparams) |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ESNet_x0_75_infer.tar) |
|
||||
| ESNet_x1_0 | 0.7392 | 0.9140 |13.67|8.71|5.97| 197.33 | 4.64 | 18 |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x1_0_pretrained.pdparams) |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ESNet_x1_0_infer.tar) |
|
||||
|
||||
<a name="6"></a>
|
||||
<a name="SEResNeXt&Res2Net"></a>
|
||||
|
||||
## 6. SEResNeXt 与 Res2Net 系列 <sup>[[7](#ref7)][[8](#ref8)][[9](#ref9)]</sup>
|
||||
## SEResNeXt 与 Res2Net 系列 <sup>[[7](#ref7)][[8](#ref8)][[9](#ref9)]</sup>
|
||||
|
||||
SEResNeXt 与 Res2Net 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[SEResNeXt 与 Res2Net 系列模型文档](../models/SEResNext_and_Res2Net.md)。
|
||||
|
||||
|
@ -229,9 +250,9 @@ SEResNeXt 与 Res2Net 系列模型的精度、速度指标如下表所示,更
|
|||
| SE_ResNeXt101_<br>32x4d | 0.7939 | 0.9443 | 13.31 | 21.85 | 28.77 | 8.03 | 49.09 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt101_32x4d_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SE_ResNeXt101_32x4d_infer.tar) |
|
||||
| SENet154_vd | 0.8140 | 0.9548 | 34.83 | 51.22 | 69.74 | 24.45 | 122.03 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SENet154_vd_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SENet154_vd_infer.tar) |
|
||||
|
||||
<a name="7"></a>
|
||||
<a name="DPN&DenseNet"></a>
|
||||
|
||||
## 7. DPN 与 DenseNet 系列 <sup>[[14](#ref14)][[15](#ref15)]</sup>
|
||||
## DPN 与 DenseNet 系列 <sup>[[14](#ref14)][[15](#ref15)]</sup>
|
||||
|
||||
DPN 与 DenseNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[DPN 与 DenseNet 系列模型文档](../models/DPN_DenseNet.md)。
|
||||
|
||||
|
@ -249,9 +270,9 @@ DPN 与 DenseNet 系列模型的精度、速度指标如下表所示,更多关
|
|||
| DPN107 | 0.8089 | 0.9532 | 19.46 | 35.62 | 50.22 | 18.38 | 87.13 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN107_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DPN107_infer.tar) |
|
||||
| DPN131 | 0.8070 | 0.9514 | 19.64 | 34.60 | 47.42 | 16.09 | 79.48 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN131_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DPN131_infer.tar) |
|
||||
|
||||
<a name="8"></a>
|
||||
<a name="HRNet"></a>
|
||||
|
||||
## 8. HRNet 系列 <sup>[[13](#ref13)]</sup>
|
||||
## HRNet 系列 <sup>[[13](#ref13)]</sup>
|
||||
|
||||
HRNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[HRNet 系列模型文档](../models/HRNet.md)。
|
||||
|
||||
|
@ -268,9 +289,9 @@ HRNet 系列模型的精度、速度指标如下表所示,更多关于该系
|
|||
| HRNet_W64_C | 0.7930 | 0.9461 | 13.82 | 21.15 | 35.51 | 28.97 | 128.18 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W64_C_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HRNet_W64_C_infer.tar) |
|
||||
| SE_HRNet_W64_C_ssld | 0.8475 | 0.9726 | 17.11 | 26.87 | 43.24 | 29.00 | 129.12 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SE_HRNet_W64_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SE_HRNet_W64_C_ssld_infer.tar) |
|
||||
|
||||
<a name="9"></a>
|
||||
<a name="Inception"></a>
|
||||
|
||||
## 9. Inception 系列 <sup>[[10](#ref10)][[11](#ref11)][[12](#ref12)][[26](#ref26)]</sup>
|
||||
## Inception 系列 <sup>[[10](#ref10)][[11](#ref11)][[12](#ref12)][[26](#ref26)]</sup>
|
||||
|
||||
Inception 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[Inception 系列模型文档](../models/Inception.md)。
|
||||
|
||||
|
@ -285,9 +306,9 @@ Inception 系列模型的精度、速度指标如下表所示,更多关于该
|
|||
| InceptionV3 | 0.7914 | 0.9459 | 4.78 | 8.53 | 12.28 | 5.73 | 23.87 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/InceptionV3_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/InceptionV3_infer.tar) |
|
||||
| InceptionV4 | 0.8077 | 0.9526 | 8.93 | 15.17 | 21.56 | 12.29 | 42.74 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV4_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/InceptionV4_infer.tar) |
|
||||
|
||||
<a name="10"></a>
|
||||
<a name="EfficientNet&ResNeXt101_wsl"></a>
|
||||
|
||||
## 10. EfficientNet 与 ResNeXt101_wsl 系列 <sup>[[16](#ref16)][[17](#ref17)]</sup>
|
||||
## EfficientNet 与 ResNeXt101_wsl 系列 <sup>[[16](#ref16)][[17](#ref17)]</sup>
|
||||
|
||||
EfficientNet 与 ResNeXt101_wsl 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[EfficientNet 与 ResNeXt101_wsl 系列模型文档](../models/EfficientNet_and_ResNeXt101_wsl.md)。
|
||||
|
||||
|
@ -308,9 +329,9 @@ EfficientNet 与 ResNeXt101_wsl 系列模型的精度、速度指标如下表所
|
|||
| EfficientNetB7 | 0.8430 | 0.9689 | 25.91 | 71.23 | 128.20 | 38.45 | 66.66 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB7_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/EfficientNetB7_infer.tar) |
|
||||
| EfficientNetB0_<br>small | 0.7580 | 0.9258 | 1.24 | 2.59 | 3.92 | 0.40 | 4.69 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_small_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/EfficientNetB0_small_infer.tar) |
|
||||
|
||||
<a name="11"></a>
|
||||
<a name="ResNeSt&RegNet"></a>
|
||||
|
||||
## 11. ResNeSt 与 RegNet 系列 <sup>[[24](#ref24)][[25](#ref25)]</sup>
|
||||
## ResNeSt 与 RegNet 系列 <sup>[[24](#ref24)][[25](#ref25)]</sup>
|
||||
|
||||
ResNeSt 与 RegNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[ResNeSt 与 RegNet 系列模型文档](../models/ResNeSt_RegNet.md)。
|
||||
|
||||
|
@ -320,9 +341,9 @@ ResNeSt 与 RegNet 系列模型的精度、速度指标如下表所示,更多
|
|||
| ResNeSt50 | 0.8083 | 0.9542 | 7.36 | 10.23 | 13.84 | 5.40 | 27.54 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNeSt50_infer.tar) |
|
||||
| RegNetX_4GF | 0.785 | 0.9416 | 6.46 | 8.48 | 11.45 | 4.00 | 22.23 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_4GF_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RegNetX_4GF_infer.tar) |
|
||||
|
||||
<a name="12"></a>
|
||||
<a name="ViT&DeiT"></a>
|
||||
|
||||
## 12. ViT_and_DeiT 系列 <sup>[[31](#ref31)][[32](#ref32)]</sup>
|
||||
## ViT_and_DeiT 系列 <sup>[[31](#ref31)][[32](#ref32)]</sup>
|
||||
|
||||
ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模型的精度、速度指标如下表所示. 更多关于该系列模型的介绍可以参考: [ViT_and_DeiT 系列模型文档](../models/ViT_and_DeiT.md)。
|
||||
|
||||
|
@ -347,9 +368,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
| DeiT_base_<br>distilled_patch16_224 | 0.831 | 0.964 | 6.17 | 14.94 | 28.58 | 16.93 | 87.18 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_distilled_patch16_224_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DeiT_base_distilled_patch16_224_infer.tar) |
|
||||
| DeiT_base_<br>distilled_patch16_384 | 0.851 | 0.973 | 14.12 | 48.76 | 97.09 | 49.43 | 87.18 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_distilled_patch16_384_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DeiT_base_distilled_patch16_384_infer.tar) |
|
||||
|
||||
<a name="13"></a>
|
||||
<a name="RepVGG"></a>
|
||||
|
||||
## 13. RepVGG 系列 <sup>[[36](#ref36)]</sup>
|
||||
## RepVGG 系列 <sup>[[36](#ref36)]</sup>
|
||||
|
||||
关于 RepVGG 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[RepVGG 系列模型文档](../models/RepVGG.md)。
|
||||
|
||||
|
@ -366,9 +387,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
| RepVGG_B2g4 | 0.7881 | 0.9448 | | | | 11.34 | 55.78 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B2g4_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RepVGG_B2g4_infer.tar) |
|
||||
| RepVGG_B3g4 | 0.7965 | 0.9485 | | | | 16.07 | 75.63 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B3g4_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RepVGG_B3g4_infer.tar) |
|
||||
|
||||
<a name="14"></a>
|
||||
<a name="MixNet"></a>
|
||||
|
||||
## 14. MixNet 系列 <sup>[[29](#ref29)]</sup>
|
||||
## MixNet 系列 <sup>[[29](#ref29)]</sup>
|
||||
|
||||
关于 MixNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[MixNet 系列模型文档](../models/MixNet.md)。
|
||||
|
||||
|
@ -378,9 +399,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
| MixNet_M | 0.7767 | 0.9364 | 2.84 | 4.60 | 6.62 | 357.119 | 5.065 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_M_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MixNet_M_infer.tar) |
|
||||
| MixNet_L | 0.7860 | 0.9437 | 3.16 | 5.55 | 8.03 | 579.017 | 7.384 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_L_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MixNet_L_infer.tar) |
|
||||
|
||||
<a name="15"></a>
|
||||
<a name="ReXNet"></a>
|
||||
|
||||
## 15. ReXNet 系列 <sup>[[30](#ref30)]</sup>
|
||||
## ReXNet 系列 <sup>[[30](#ref30)]</sup>
|
||||
|
||||
关于 ReXNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[ReXNet 系列模型文档](../models/ReXNet.md)。
|
||||
|
||||
|
@ -392,9 +413,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
| ReXNet_2_0 | 0.8122 | 0.9536 | 4.30 | 6.54 | 9.19 | 1.56 | 16.45 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_2_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ReXNet_2_0_infer.tar) |
|
||||
| ReXNet_3_0 | 0.8209 | 0.9612 | 5.74 | 9.49 | 13.62 | 3.44 | 34.83 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_3_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ReXNet_3_0_infer.tar) |
|
||||
|
||||
<a name="16"></a>
|
||||
<a name="SwinTransformer"></a>
|
||||
|
||||
## 16. SwinTransformer 系列 <sup>[[27](#ref27)]</sup>
|
||||
## SwinTransformer 系列 <sup>[[27](#ref27)]</sup>
|
||||
|
||||
关于 SwinTransformer 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[SwinTransformer 系列模型文档](../models/SwinTransformer.md)。
|
||||
|
||||
|
@ -411,9 +432,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
|
||||
[1]:基于 ImageNet22k 数据集预训练,然后在 ImageNet1k 数据集迁移学习得到。
|
||||
|
||||
<a name="17"></a>
|
||||
<a name="LeViT"></a>
|
||||
|
||||
## 17. LeViT 系列 <sup>[[33](#ref33)]</sup>
|
||||
## LeViT 系列 <sup>[[33](#ref33)]</sup>
|
||||
|
||||
关于 LeViT 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[LeViT 系列模型文档](../models/LeViT.md)。
|
||||
|
||||
|
@ -427,9 +448,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
|
||||
**注**:与 Reference 的精度差异源于数据预处理不同及未使用蒸馏的 head 作为输出。
|
||||
|
||||
<a name="18"></a>
|
||||
<a name="Twins"></a>
|
||||
|
||||
## 18. Twins 系列 <sup>[[34](#ref34)]</sup>
|
||||
## Twins 系列 <sup>[[34](#ref34)]</sup>
|
||||
|
||||
关于 Twins 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[Twins 系列模型文档](../models/Twins.md)。
|
||||
|
||||
|
@ -444,9 +465,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
|
||||
**注**:与 Reference 的精度差异源于数据预处理不同。
|
||||
|
||||
<a name="19"></a>
|
||||
<a name="HarDNet"></a>
|
||||
|
||||
## 19. HarDNet 系列 <sup>[[37](#ref37)]</sup>
|
||||
## HarDNet 系列 <sup>[[37](#ref37)]</sup>
|
||||
|
||||
关于 HarDNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[HarDNet 系列模型文档](../models/HarDNet.md)。
|
||||
|
||||
|
@ -457,9 +478,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
| HarDNet68| 0.7546 | 0.9265 | 3.58 | 8.53 | 11.58 | 4.26 | 17.58 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HarDNet68_infer.tar) |
|
||||
| HarDNet85 | 0.7744 | 0.9355 | 6.24 | 14.85 | 20.57 | 9.09 | 36.69 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet85_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HarDNet85_infer.tar) |
|
||||
|
||||
<a name="20"></a>
|
||||
<a name="DLA"></a>
|
||||
|
||||
## 20. DLA 系列 <sup>[[38](#ref38)]</sup>
|
||||
## DLA 系列 <sup>[[38](#ref38)]</sup>
|
||||
|
||||
关于 DLA 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[DLA 系列模型文档](../models/DLA.md)。
|
||||
|
||||
|
@ -475,9 +496,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
| DLA60x_c | 0.6645 | 0.8754 | 1.79 | 3.68 | 5.19 | 0.59 | 1.33 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_c_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DLA60x_c_infer.tar) |
|
||||
| DLA60x | 0.7753 | 0.9378 | 5.98 | 9.24 | 12.52 | 3.54 | 17.41 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DLA60x_infer.tar) |
|
||||
|
||||
<a name="21"></a>
|
||||
<a name="RedNet"></a>
|
||||
|
||||
## 21. RedNet 系列 <sup>[[39](#ref39)]</sup>
|
||||
## RedNet 系列 <sup>[[39](#ref39)]</sup>
|
||||
|
||||
关于 RedNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[RedNet 系列模型文档](../models/RedNet.md)。
|
||||
|
||||
|
@ -489,9 +510,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
| RedNet101 | 0.7894 | 0.9436 | 13.07 | 44.12 | 83.28 | 4.59 | 25.76 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet101_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RedNet101_infer.tar) |
|
||||
| RedNet152 | 0.7917 | 0.9440 | 18.66 | 63.27 | 119.48 | 6.57 | 34.14 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet152_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RedNet152_infer.tar) |
|
||||
|
||||
<a name="22"></a>
|
||||
<a name="TNT"></a>
|
||||
|
||||
## 22. TNT 系列 <sup>[[35](#ref35)]</sup>
|
||||
## TNT 系列 <sup>[[35](#ref35)]</sup>
|
||||
|
||||
关于 TNT 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[TNT 系列模型文档](../models/TNT.md)。
|
||||
|
||||
|
@ -501,9 +522,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
|
||||
**注**:TNT 模型的数据预处理部分 `NormalizeImage` 中的 `mean` 与 `std` 均为 0.5。
|
||||
|
||||
<a name="23"></a>
|
||||
<a name="CSWinTransformer"></a>
|
||||
|
||||
## 23. CSWinTransformer 系列 <sup>[[40](#ref40)]</sup>
|
||||
## CSWinTransformer 系列 <sup>[[40](#ref40)]</sup>
|
||||
|
||||
关于 CSWinTransformer 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[CSWinTransformer 系列模型文档](../models/CSWinTransformer.md)。
|
||||
|
||||
|
@ -517,9 +538,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
| CSWinTransformer_large_384 | 0.8748 | 0.9833 | - | - | - | 94.7 | 173.3 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_large_384_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/CSWinTransformer_large_384_infer.tar) |
|
||||
|
||||
|
||||
<a name="24"></a>
|
||||
<a name="PVTV2"></a>
|
||||
|
||||
## 24. PVTV2 系列 <sup>[[41](#ref41)]</sup>
|
||||
## PVTV2 系列 <sup>[[41](#ref41)]</sup>
|
||||
|
||||
关于 PVTV2 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[PVTV2 系列模型文档](../models/PVTV2.md)。
|
||||
|
||||
|
@ -534,9 +555,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
| PVT_V2_B5 | 0.837 | 0.966 | - | - | - | 11.4 | 82.0 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PVT_V2_B5_infer.tar) |
|
||||
|
||||
|
||||
<a name="25"></a>
|
||||
<a name="MobileViT"></a>
|
||||
|
||||
## 25. MobileViT 系列 <sup>[[42](#ref42)]</sup>
|
||||
## MobileViT 系列 <sup>[[42](#ref42)]</sup>
|
||||
|
||||
关于 MobileViT 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[MobileViT 系列模型文档](../models/MobileViT.md)。
|
||||
|
||||
|
@ -546,9 +567,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
|
|||
| MobileViT_XS | 0.7454 | 0.9227 | - | - | - | 930.75 | 2.33 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XS_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XS_infer.tar) |
|
||||
| MobileViT_S | 0.7814 | 0.9413 | - | - | - | 337.24 | 1.28 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_S_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_S_infer.tar) |
|
||||
|
||||
<a name="26"></a>
|
||||
<a name="Others"></a>
|
||||
|
||||
## 26. 其他模型
|
||||
## 其他模型
|
||||
|
||||
关于 AlexNet <sup>[[18](#ref18)]</sup>、SqueezeNet 系列 <sup>[[19](#ref19)]</sup>、VGG 系列 <sup>[[20](#ref20)]</sup>、DarkNet53 <sup>[[21](#ref21)]</sup> 等模型的精度、速度指标如下表所示,更多介绍可以参考:[其他模型文档](../models/Others.md)。
|
||||
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
# PP-HGNet 系列
|
||||
---
|
||||
## 目录
|
||||
|
||||
* [1. 概述](#1)
|
||||
* [2. 结构信息](#2)
|
||||
* [3. 实验结果](#3)
|
||||
|
||||
<a name='1'></a>
|
||||
|
||||
## 1. 概述
|
||||
|
||||
PP-HGNet(High Performance GPU Net) 是百度飞桨视觉团队自研的更适用于 GPU 平台的高性能骨干网络,该网络在 VOVNet 的基础上使用了可学习的下采样层(LDS Layer),融合了 ResNet_vd、PPLCNet 等模型的优点,该模型在 GPU 平台上与其他 SOTA 模型在相同的速度下有着更高的精度。在同等速度下,该模型高于 ResNet34-D 模型 3.8 个百分点,高于 ResNet50-D 模型 2.4 个百分点,在使用百度自研 SSLD 蒸馏策略后,超越 ResNet50-D 模型 4.7 个百分点。与此同时,在相同精度下,其推理速度也远超主流 VisionTransformer 的推理速度。
|
||||
|
||||
<a name='2'></a>
|
||||
|
||||
## 2. 结构信息
|
||||
|
||||
PP-HGNet 作者针对 GPU 设备,对目前 GPU 友好的网络做了分析和归纳,尽可能多的使用 3x3 标准卷积(计算密度最高)。在此将 VOVNet 作为基准模型,将主要的有利于 GPU 推理的改进点进行融合。从而得到一个有利于 GPU 推理的骨干网络,同样速度下,精度大幅超越其他 CNN 或者 VisionTransformer 模型。
|
||||
|
||||
PP-HGNet 骨干网络的整体结构如下:
|
||||
|
||||

|
||||
|
||||
其中,PP-HGNet是由多个HG-Block组成,HG-Block的细节如下:
|
||||
|
||||

|
||||
|
||||
<a name='3'></a>
|
||||
|
||||
## 3. 实验结果
|
||||
|
||||
PP-HGNet 与其他模型的比较如下,其中测试机器为 NVIDIA® Tesla® V100,开启 TensorRT 引擎,精度类型为 FP32。在相同速度下,PP-HGNet 精度均超越了其他 SOTA CNN 模型,在与 SwinTransformer 模型的比较中,在更高精度的同时,速度快 2 倍以上。
|
||||
|
||||
| Model | Top-1 Acc(\%) | Top-5 Acc(\%) | Latency(ms) |
|
||||
|-------|---------------|---------------|-------------|
|
||||
| ResNet34 | 74.57 | 92.14 | 1.97 |
|
||||
| ResNet34_vd | 75.98 | 92.98 | 2.00 |
|
||||
| EfficientNetB0 | 77.38 | 93.31 | 1.96 |
|
||||
| <b>PPHGNet_tiny<b> | <b>79.83<b> | <b>95.04<b> | <b>1.77<b> |
|
||||
| <b>PPHGNet_tiny_ssld<b> | <b>81.95<b> | <b>96.12<b> | <b>1.77<b> |
|
||||
| ResNet50 | 76.50 | 93.00 | 2.54 |
|
||||
| ResNet50_vd | 79.12 | 94.44 | 2.60 |
|
||||
| ResNet50_rsb | 80.40 | | 2.54 |
|
||||
| EfficientNetB1 | 79.15 | 94.41 | 2.88 |
|
||||
| SwinTransformer_tiny | 81.2 | 95.5 | 6.59 |
|
||||
| <b>PPHGNet_small<b> | <b>81.51<b>| <b>95.82<b> | <b>2.52<b> |
|
||||
| <b>PPHGNet_small_ssld<b> | <b>83.82<b>| <b>96.81<b> | <b>2.52<b> |
|
||||
|
||||
|
||||
关于更多 PP-HGNet 的介绍以及下游任务的表现,敬请期待。
|
|
@ -0,0 +1,53 @@
|
|||
# PP-LCNetV2
|
||||
|
||||
---
|
||||
|
||||
## 1. 概述
|
||||
|
||||
骨干网络对计算机视觉下游任务的影响不言而喻,不仅对下游模型的性能影响很大,而且模型效率也极大地受此影响,但现有的大多骨干网络在真实应用中的效率并不理想,特别是缺乏针对 Intel CPU 平台所优化的骨干网络,我们测试了现有的主流轻量级模型,发现在 Intel CPU 平台上的效率并不理想,然而目前 Intel CPU 平台在工业界仍有大量使用场景,因此我们提出了 PP-LCNet 系列模型,PP-LCNetV2 是在 [PP-LCNetV1](./PP-LCNet.md) 基础上所改进的。
|
||||
|
||||
## 2. 设计细节
|
||||
|
||||

|
||||
|
||||
PP-LCNetV2 模型的网络整体结构如上图所示。PP-LCNetV2 模型是在 PP-LCNetV1 的基础上优化而来,主要使用重参数化策略组合了不同大小卷积核的深度卷积,并优化了点卷积、Shortcut等。
|
||||
|
||||
### 2.1 Rep 策略
|
||||
|
||||
卷积核的大小决定了卷积层感受野的大小,通过组合使用不同大小的卷积核,能够获取不同尺度的特征,因此 PPLCNetV2 在 Stage3、Stage4 中,在同一层组合使用 kernel size 分别为 5、3、1 的 DW 卷积,同时为了避免对模型效率的影响,使用重参数化(Re parameterization,Rep)策略对同层的 DW 卷积进行融合,如下图所示。
|
||||
|
||||

|
||||
|
||||
### 2.2 PW 卷积
|
||||
|
||||
深度可分离卷积通常由一层 DW 卷积和一层 PW 卷积组成,用以替换标准卷积,为了使深度可分离卷积具有更强的拟合能力,我们尝试使用两层 PW 卷积,同时为了控制模型效率不受影响,两层 PW 卷积设置为:第一个在通道维度对特征图压缩,第二个再通过放大还原特征图通道,如下图所示。通过实验发现,该策略能够显著提高模型性能,同时为了平衡对模型效率带来的影响,PPLCNetV2 仅在 Stage4、Stage5 中使用了该策略。
|
||||
|
||||

|
||||
|
||||
### 2.3 Shortcut
|
||||
|
||||
残差结构(residual)自提出以来,被诸多模型广泛使用,但在轻量级卷积神经网络中,由于残差结构所带来的元素级(element-wise)加法操作,会对模型的速度造成影响,我们在 PP-LCNetV2 中,以 Stage 为单位实验了 残差结构对模型的影响,发现残差结构的使用并非一定会带来性能的提高,因此 PPLCNetV2 仅在最后一个 Stage 中的使用了残差结构:在 Block 中增加 Shortcut,如下图所示。
|
||||
|
||||

|
||||
|
||||
### 2.4 激活函数
|
||||
|
||||
在目前的轻量级卷积神经网络中,ReLU、Hard-Swish 激活函数最为常用,虽然在模型性能方面,Hard-Swish 通常更为优秀,然而我们发现部分推理平台对于 Hard-Swish 激活函数的效率优化并不理想,因此为了兼顾通用性,PP-LCNetV2 默认使用了 ReLU 激活函数,并且我们测试发现,ReLU 激活函数对于较大模型的性能影响较小。
|
||||
|
||||
### 2.5 SE 模块
|
||||
|
||||
虽然 SE 模块能够显著提高模型性能,但其对模型速度的影响同样不可忽视,在 PP-LCNetV1 中,我们发现在模型中后部使用 SE 模块能够获得最大化的收益。在 PP-LCNetV2 的优化过程中,我们以 Stage 为单位对 SE 模块的位置做了进一步实验,并发现在 Stage3 中使用能够取得更好的平衡。
|
||||
|
||||
## 3. 实验结果
|
||||
|
||||
在不使用额外数据的前提下,PPLCNetV2_base 模型在图像分类 ImageNet 数据集上能够取得超过 77% 的 Top1 Acc,同时在 Intel CPU 平台的推理时间在 4.4 ms 以下,如下表所示,其中推理时间基于 Intel(R) Xeon(R) Gold 6271C CPU @ 2.60GHz 硬件平台,OpenVINO 推理平台。
|
||||
|
||||
| Model | Params(M) | FLOPs(M) | Top-1 Acc(\%) | Top-5 Acc(\%) | Latency(ms) |
|
||||
|-------|-----------|----------|---------------|---------------|-------------|
|
||||
| MobileNetV3_Large_x1_25 | 7.4 | 714 | 76.4 | 93.00 | 5.19 |
|
||||
| PPLCNetV2_x2_5 | 9 | 906 | 76.60 | 93.00 | 7.25 |
|
||||
| <b>PPLCNetV2_base<b> | <b>6.6<b> | <b>604<b> | <b>77.04<b> | <b>93.27<b> | <b>4.32<b> |
|
||||
|
||||
|
||||
|
||||
关于 PP-LCNetV2 模型的更多信息,敬请关注。
|
|
@ -1 +0,0 @@
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
## 人员出入管理
|
||||
|
||||
近几年,AI视觉技术在安防、工业制造等场景在产业智能化升级进程中发挥着举足轻重的作用。【进出管控】作为各行业中的关键场景,应用需求十分迫切。 如在居家防盗、机房管控以及景区危险告警等场景中,存在大量对异常目标(人、车或其他物体)不经允许擅自进入规定区域的及时检测需求。利用深度学习视觉技术,可以及时准确地对闯入行为进行识别并发出告警信息。切实保障人员的生命财产安全。相比传统人力监管的方式,不仅可以实现7*24小时不间断的全方位保护,还能极大地降低管理成本,解放劳动力。
|
||||
|
||||
但在真实产业中,要实现高精度的人员进出识别不是一件容易的事,在实际场景中存在着各种各样的问题:
|
||||
|
||||
**摄像头采集到的图像会受到建筑、机器、车辆等遮挡的影响**
|
||||
|
||||
**天气多种多样,要适应白天、黑夜、雾天和雨天等**
|
||||
|
||||
针对上述场景,本次飞桨产业实践范例库推出了重点区域人员进出管控实践示例,提供从数据准备、技术方案、模型训练优化,到模型部署的全流程可复用方案,有效解决了不同光照、不同天气等室外复杂环境下的图像分类问题,并且极大地降低了数据标注和算力成本,适用于厂区巡检、家居防盗、景区管理等多个产业应用。
|
||||
|
||||
|
||||

|
||||
|
||||
**注**: AI Studio在线运行代码请参考[人员出入管理](https://aistudio.baidu.com/aistudio/projectdetail/4094475)
|
After Width: | Height: | Size: 734 KiB |
|
@ -32,14 +32,19 @@ from ppcls.arch.distill.afd_attention import LinearTransformStudent, LinearTrans
|
|||
__all__ = ["build_model", "RecModel", "DistillationModel", "AttentionModel"]
|
||||
|
||||
|
||||
def build_model(config):
|
||||
def build_model(config, mode="train"):
|
||||
arch_config = copy.deepcopy(config["Arch"])
|
||||
model_type = arch_config.pop("name")
|
||||
use_sync_bn = arch_config.pop("use_sync_bn", False)
|
||||
mod = importlib.import_module(__name__)
|
||||
arch = getattr(mod, model_type)(**arch_config)
|
||||
if use_sync_bn:
|
||||
arch = nn.SyncBatchNorm.convert_sync_batchnorm(arch)
|
||||
|
||||
if isinstance(arch, TheseusLayer):
|
||||
prune_model(config, arch)
|
||||
quantize_model(config, arch)
|
||||
quantize_model(config, arch, mode)
|
||||
|
||||
return arch
|
||||
|
||||
|
||||
|
@ -50,6 +55,7 @@ def apply_to_static(config, model):
|
|||
specs = None
|
||||
if 'image_shape' in config['Global']:
|
||||
specs = [InputSpec([None] + config['Global']['image_shape'])]
|
||||
specs[0].stop_gradient = True
|
||||
model = to_static(model, input_spec=specs)
|
||||
logger.info("Successfully to apply @to_static with specs: {}".format(
|
||||
specs))
|
||||
|
|
|
@ -22,7 +22,9 @@ from ppcls.arch.backbone.legendary_models.vgg import VGG11, VGG13, VGG16, VGG19
|
|||
from ppcls.arch.backbone.legendary_models.inception_v3 import InceptionV3
|
||||
from ppcls.arch.backbone.legendary_models.hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W60_C, HRNet_W64_C, SE_HRNet_W64_C
|
||||
from ppcls.arch.backbone.legendary_models.pp_lcnet import PPLCNet_x0_25, PPLCNet_x0_35, PPLCNet_x0_5, PPLCNet_x0_75, PPLCNet_x1_0, PPLCNet_x1_5, PPLCNet_x2_0, PPLCNet_x2_5
|
||||
from ppcls.arch.backbone.legendary_models.pp_lcnet_v2 import PPLCNetV2_base
|
||||
from ppcls.arch.backbone.legendary_models.esnet import ESNet_x0_25, ESNet_x0_5, ESNet_x0_75, ESNet_x1_0
|
||||
from ppcls.arch.backbone.legendary_models.pp_hgnet import PPHGNet_tiny, PPHGNet_small, PPHGNet_base
|
||||
|
||||
from ppcls.arch.backbone.model_zoo.resnet_vc import ResNet50_vc
|
||||
from ppcls.arch.backbone.model_zoo.resnext import ResNeXt50_32x4d, ResNeXt50_64x4d, ResNeXt101_32x4d, ResNeXt101_64x4d, ResNeXt152_32x4d, ResNeXt152_64x4d
|
||||
|
@ -50,7 +52,7 @@ from ppcls.arch.backbone.model_zoo.darknet import DarkNet53
|
|||
from ppcls.arch.backbone.model_zoo.regnet import RegNetX_200MF, RegNetX_4GF, RegNetX_32GF, RegNetY_200MF, RegNetY_4GF, RegNetY_32GF
|
||||
from ppcls.arch.backbone.model_zoo.vision_transformer import ViT_small_patch16_224, ViT_base_patch16_224, ViT_base_patch16_384, ViT_base_patch32_384, ViT_large_patch16_224, ViT_large_patch16_384, ViT_large_patch32_384
|
||||
from ppcls.arch.backbone.model_zoo.distilled_vision_transformer import DeiT_tiny_patch16_224, DeiT_small_patch16_224, DeiT_base_patch16_224, DeiT_tiny_distilled_patch16_224, DeiT_small_distilled_patch16_224, DeiT_base_distilled_patch16_224, DeiT_base_patch16_384, DeiT_base_distilled_patch16_384
|
||||
from ppcls.arch.backbone.model_zoo.swin_transformer import SwinTransformer_tiny_patch4_window7_224, SwinTransformer_small_patch4_window7_224, SwinTransformer_base_patch4_window7_224, SwinTransformer_base_patch4_window12_384, SwinTransformer_large_patch4_window7_224, SwinTransformer_large_patch4_window12_384
|
||||
from ppcls.arch.backbone.legendary_models.swin_transformer import SwinTransformer_tiny_patch4_window7_224, SwinTransformer_small_patch4_window7_224, SwinTransformer_base_patch4_window7_224, SwinTransformer_base_patch4_window12_384, SwinTransformer_large_patch4_window7_224, SwinTransformer_large_patch4_window12_384
|
||||
from ppcls.arch.backbone.model_zoo.cswin_transformer import CSWinTransformer_tiny_224, CSWinTransformer_small_224, CSWinTransformer_base_224, CSWinTransformer_large_224, CSWinTransformer_base_384, CSWinTransformer_large_384
|
||||
from ppcls.arch.backbone.model_zoo.mixnet import MixNet_S, MixNet_M, MixNet_L
|
||||
from ppcls.arch.backbone.model_zoo.rexnet import ReXNet_1_0, ReXNet_1_3, ReXNet_1_5, ReXNet_2_0, ReXNet_3_0
|
||||
|
|
|
@ -0,0 +1,372 @@
|
|||
# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import paddle
|
||||
import paddle.nn as nn
|
||||
import paddle.nn.functional as F
|
||||
from paddle.nn.initializer import KaimingNormal, Constant
|
||||
from paddle.nn import Conv2D, BatchNorm2D, ReLU, AdaptiveAvgPool2D, MaxPool2D
|
||||
from paddle.regularizer import L2Decay
|
||||
from paddle import ParamAttr
|
||||
|
||||
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
|
||||
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
|
||||
|
||||
MODEL_URLS = {
|
||||
"PPHGNet_tiny":
|
||||
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams",
|
||||
"PPHGNet_small":
|
||||
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams"
|
||||
}
|
||||
|
||||
__all__ = list(MODEL_URLS.keys())
|
||||
|
||||
kaiming_normal_ = KaimingNormal()
|
||||
zeros_ = Constant(value=0.)
|
||||
ones_ = Constant(value=1.)
|
||||
|
||||
|
||||
class ConvBNAct(TheseusLayer):
|
||||
def __init__(self,
|
||||
in_channels,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
stride,
|
||||
groups=1,
|
||||
use_act=True):
|
||||
super().__init__()
|
||||
self.use_act = use_act
|
||||
self.conv = Conv2D(
|
||||
in_channels,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding=(kernel_size - 1) // 2,
|
||||
groups=groups,
|
||||
bias_attr=False)
|
||||
self.bn = BatchNorm2D(
|
||||
out_channels,
|
||||
weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
|
||||
bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
|
||||
if self.use_act:
|
||||
self.act = ReLU()
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv(x)
|
||||
x = self.bn(x)
|
||||
if self.use_act:
|
||||
x = self.act(x)
|
||||
return x
|
||||
|
||||
|
||||
class ESEModule(TheseusLayer):
|
||||
def __init__(self, channels):
|
||||
super().__init__()
|
||||
self.avg_pool = AdaptiveAvgPool2D(1)
|
||||
self.conv = Conv2D(
|
||||
in_channels=channels,
|
||||
out_channels=channels,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0)
|
||||
self.sigmoid = nn.Sigmoid()
|
||||
|
||||
def forward(self, x):
|
||||
identity = x
|
||||
x = self.avg_pool(x)
|
||||
x = self.conv(x)
|
||||
x = self.sigmoid(x)
|
||||
return paddle.multiply(x=identity, y=x)
|
||||
|
||||
|
||||
class HG_Block(TheseusLayer):
|
||||
def __init__(
|
||||
self,
|
||||
in_channels,
|
||||
mid_channels,
|
||||
out_channels,
|
||||
layer_num,
|
||||
identity=False, ):
|
||||
super().__init__()
|
||||
self.identity = identity
|
||||
|
||||
self.layers = nn.LayerList()
|
||||
self.layers.append(
|
||||
ConvBNAct(
|
||||
in_channels=in_channels,
|
||||
out_channels=mid_channels,
|
||||
kernel_size=3,
|
||||
stride=1))
|
||||
for _ in range(layer_num - 1):
|
||||
self.layers.append(
|
||||
ConvBNAct(
|
||||
in_channels=mid_channels,
|
||||
out_channels=mid_channels,
|
||||
kernel_size=3,
|
||||
stride=1))
|
||||
|
||||
# feature aggregation
|
||||
total_channels = in_channels + layer_num * mid_channels
|
||||
self.aggregation_conv = ConvBNAct(
|
||||
in_channels=total_channels,
|
||||
out_channels=out_channels,
|
||||
kernel_size=1,
|
||||
stride=1)
|
||||
self.att = ESEModule(out_channels)
|
||||
|
||||
def forward(self, x):
|
||||
identity = x
|
||||
output = []
|
||||
output.append(x)
|
||||
for layer in self.layers:
|
||||
x = layer(x)
|
||||
output.append(x)
|
||||
x = paddle.concat(output, axis=1)
|
||||
x = self.aggregation_conv(x)
|
||||
x = self.att(x)
|
||||
if self.identity:
|
||||
x += identity
|
||||
return x
|
||||
|
||||
|
||||
class HG_Stage(TheseusLayer):
|
||||
def __init__(self,
|
||||
in_channels,
|
||||
mid_channels,
|
||||
out_channels,
|
||||
block_num,
|
||||
layer_num,
|
||||
downsample=True):
|
||||
super().__init__()
|
||||
self.downsample = downsample
|
||||
if downsample:
|
||||
self.downsample = ConvBNAct(
|
||||
in_channels=in_channels,
|
||||
out_channels=in_channels,
|
||||
kernel_size=3,
|
||||
stride=2,
|
||||
groups=in_channels,
|
||||
use_act=False)
|
||||
|
||||
blocks_list = []
|
||||
blocks_list.append(
|
||||
HG_Block(
|
||||
in_channels,
|
||||
mid_channels,
|
||||
out_channels,
|
||||
layer_num,
|
||||
identity=False))
|
||||
for _ in range(block_num - 1):
|
||||
blocks_list.append(
|
||||
HG_Block(
|
||||
out_channels,
|
||||
mid_channels,
|
||||
out_channels,
|
||||
layer_num,
|
||||
identity=True))
|
||||
self.blocks = nn.Sequential(*blocks_list)
|
||||
|
||||
def forward(self, x):
|
||||
if self.downsample:
|
||||
x = self.downsample(x)
|
||||
x = self.blocks(x)
|
||||
return x
|
||||
|
||||
|
||||
class PPHGNet(TheseusLayer):
|
||||
"""
|
||||
PPHGNet
|
||||
Args:
|
||||
stem_channels: list. Stem channel list of PPHGNet.
|
||||
stage_config: dict. The configuration of each stage of PPHGNet. such as the number of channels, stride, etc.
|
||||
layer_num: int. Number of layers of HG_Block.
|
||||
use_last_conv: boolean. Whether to use a 1x1 convolutional layer before the classification layer.
|
||||
class_expand: int=2048. Number of channels for the last 1x1 convolutional layer.
|
||||
dropout_prob: float. Parameters of dropout, 0.0 means dropout is not used.
|
||||
class_num: int=1000. The number of classes.
|
||||
Returns:
|
||||
model: nn.Layer. Specific PPHGNet model depends on args.
|
||||
"""
|
||||
def __init__(self,
|
||||
stem_channels,
|
||||
stage_config,
|
||||
layer_num,
|
||||
use_last_conv=True,
|
||||
class_expand=2048,
|
||||
dropout_prob=0.0,
|
||||
class_num=1000):
|
||||
super().__init__()
|
||||
self.use_last_conv = use_last_conv
|
||||
self.class_expand = class_expand
|
||||
|
||||
# stem
|
||||
stem_channels.insert(0, 3)
|
||||
self.stem = nn.Sequential(* [
|
||||
ConvBNAct(
|
||||
in_channels=stem_channels[i],
|
||||
out_channels=stem_channels[i + 1],
|
||||
kernel_size=3,
|
||||
stride=2 if i == 0 else 1) for i in range(
|
||||
len(stem_channels) - 1)
|
||||
])
|
||||
self.pool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
|
||||
|
||||
# stages
|
||||
self.stages = nn.LayerList()
|
||||
for k in stage_config:
|
||||
in_channels, mid_channels, out_channels, block_num, downsample = stage_config[
|
||||
k]
|
||||
self.stages.append(
|
||||
HG_Stage(in_channels, mid_channels, out_channels, block_num,
|
||||
layer_num, downsample))
|
||||
|
||||
self.avg_pool = AdaptiveAvgPool2D(1)
|
||||
if self.use_last_conv:
|
||||
self.last_conv = Conv2D(
|
||||
in_channels=out_channels,
|
||||
out_channels=self.class_expand,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
bias_attr=False)
|
||||
self.act = nn.ReLU()
|
||||
self.dropout = nn.Dropout(
|
||||
p=dropout_prob, mode="downscale_in_infer")
|
||||
|
||||
self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)
|
||||
self.fc = nn.Linear(self.class_expand
|
||||
if self.use_last_conv else out_channels, class_num)
|
||||
|
||||
self._init_weights()
|
||||
|
||||
def _init_weights(self):
|
||||
for m in self.sublayers():
|
||||
if isinstance(m, nn.Conv2D):
|
||||
kaiming_normal_(m.weight)
|
||||
elif isinstance(m, (nn.BatchNorm2D)):
|
||||
ones_(m.weight)
|
||||
zeros_(m.bias)
|
||||
elif isinstance(m, nn.Linear):
|
||||
zeros_(m.bias)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.stem(x)
|
||||
x = self.pool(x)
|
||||
|
||||
for stage in self.stages:
|
||||
x = stage(x)
|
||||
|
||||
x = self.avg_pool(x)
|
||||
if self.use_last_conv:
|
||||
x = self.last_conv(x)
|
||||
x = self.act(x)
|
||||
x = self.dropout(x)
|
||||
x = self.flatten(x)
|
||||
x = self.fc(x)
|
||||
return x
|
||||
|
||||
|
||||
def _load_pretrained(pretrained, model, model_url, use_ssld):
|
||||
if pretrained is False:
|
||||
pass
|
||||
elif pretrained is True:
|
||||
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
|
||||
elif isinstance(pretrained, str):
|
||||
load_dygraph_pretrain(model, pretrained)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"pretrained type is not available. Please use `string` or `boolean` type."
|
||||
)
|
||||
|
||||
|
||||
def PPHGNet_tiny(pretrained=False, use_ssld=False, **kwargs):
|
||||
"""
|
||||
PPHGNet_tiny
|
||||
Args:
|
||||
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
|
||||
If str, means the path of the pretrained model.
|
||||
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
|
||||
Returns:
|
||||
model: nn.Layer. Specific `PPHGNet_tiny` model depends on args.
|
||||
"""
|
||||
stage_config = {
|
||||
# in_channels, mid_channels, out_channels, blocks, downsample
|
||||
"stage1": [96, 96, 224, 1, False],
|
||||
"stage2": [224, 128, 448, 1, True],
|
||||
"stage3": [448, 160, 512, 2, True],
|
||||
"stage4": [512, 192, 768, 1, True],
|
||||
}
|
||||
|
||||
model = PPHGNet(
|
||||
stem_channels=[48, 48, 96],
|
||||
stage_config=stage_config,
|
||||
layer_num=5,
|
||||
**kwargs)
|
||||
_load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_tiny"], use_ssld)
|
||||
return model
|
||||
|
||||
|
||||
def PPHGNet_small(pretrained=False, use_ssld=False, **kwargs):
|
||||
"""
|
||||
PPHGNet_small
|
||||
Args:
|
||||
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
|
||||
If str, means the path of the pretrained model.
|
||||
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
|
||||
Returns:
|
||||
model: nn.Layer. Specific `PPHGNet_small` model depends on args.
|
||||
"""
|
||||
stage_config = {
|
||||
# in_channels, mid_channels, out_channels, blocks, downsample
|
||||
"stage1": [128, 128, 256, 1, False],
|
||||
"stage2": [256, 160, 512, 1, True],
|
||||
"stage3": [512, 192, 768, 2, True],
|
||||
"stage4": [768, 224, 1024, 1, True],
|
||||
}
|
||||
|
||||
model = PPHGNet(
|
||||
stem_channels=[64, 64, 128],
|
||||
stage_config=stage_config,
|
||||
layer_num=6,
|
||||
**kwargs)
|
||||
_load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_small"], use_ssld)
|
||||
return model
|
||||
|
||||
|
||||
def PPHGNet_base(pretrained=False, use_ssld=False, **kwargs):
|
||||
"""
|
||||
PPHGNet_base
|
||||
Args:
|
||||
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
|
||||
If str, means the path of the pretrained model.
|
||||
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
|
||||
Returns:
|
||||
model: nn.Layer. Specific `PPHGNet_base` model depends on args.
|
||||
"""
|
||||
stage_config = {
|
||||
# in_channels, mid_channels, out_channels, blocks, downsample
|
||||
"stage1": [160, 192, 320, 1, False],
|
||||
"stage2": [320, 224, 640, 2, True],
|
||||
"stage3": [640, 256, 960, 3, True],
|
||||
"stage4": [960, 288, 1280, 2, True],
|
||||
}
|
||||
|
||||
model = PPHGNet(
|
||||
stem_channels=[96, 96, 160],
|
||||
stage_config=stage_config,
|
||||
layer_num=7,
|
||||
dropout_prob=0.2,
|
||||
**kwargs)
|
||||
_load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_base"], use_ssld)
|
||||
return model
|
|
@ -17,7 +17,7 @@ from __future__ import absolute_import, division, print_function
|
|||
import paddle
|
||||
import paddle.nn as nn
|
||||
from paddle import ParamAttr
|
||||
from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Dropout, Linear
|
||||
from paddle.nn import AdaptiveAvgPool2D, BatchNorm2D, Conv2D, Dropout, Linear
|
||||
from paddle.regularizer import L2Decay
|
||||
from paddle.nn.initializer import KaimingNormal
|
||||
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
|
||||
|
@ -83,7 +83,8 @@ class ConvBNLayer(TheseusLayer):
|
|||
filter_size,
|
||||
num_filters,
|
||||
stride,
|
||||
num_groups=1):
|
||||
num_groups=1,
|
||||
lr_mult=1.0):
|
||||
super().__init__()
|
||||
|
||||
self.conv = Conv2D(
|
||||
|
@ -93,13 +94,13 @@ class ConvBNLayer(TheseusLayer):
|
|||
stride=stride,
|
||||
padding=(filter_size - 1) // 2,
|
||||
groups=num_groups,
|
||||
weight_attr=ParamAttr(initializer=KaimingNormal()),
|
||||
weight_attr=ParamAttr(initializer=KaimingNormal(), learning_rate=lr_mult),
|
||||
bias_attr=False)
|
||||
|
||||
self.bn = BatchNorm(
|
||||
self.bn = BatchNorm2D(
|
||||
num_filters,
|
||||
param_attr=ParamAttr(regularizer=L2Decay(0.0)),
|
||||
bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
|
||||
weight_attr=ParamAttr(regularizer=L2Decay(0.0), learning_rate=lr_mult),
|
||||
bias_attr=ParamAttr(regularizer=L2Decay(0.0), learning_rate=lr_mult))
|
||||
self.hardswish = nn.Hardswish()
|
||||
|
||||
def forward(self, x):
|
||||
|
@ -115,7 +116,8 @@ class DepthwiseSeparable(TheseusLayer):
|
|||
num_filters,
|
||||
stride,
|
||||
dw_size=3,
|
||||
use_se=False):
|
||||
use_se=False,
|
||||
lr_mult=1.0):
|
||||
super().__init__()
|
||||
self.use_se = use_se
|
||||
self.dw_conv = ConvBNLayer(
|
||||
|
@ -123,14 +125,17 @@ class DepthwiseSeparable(TheseusLayer):
|
|||
num_filters=num_channels,
|
||||
filter_size=dw_size,
|
||||
stride=stride,
|
||||
num_groups=num_channels)
|
||||
num_groups=num_channels,
|
||||
lr_mult=lr_mult)
|
||||
if use_se:
|
||||
self.se = SEModule(num_channels)
|
||||
self.se = SEModule(num_channels,
|
||||
lr_mult=lr_mult)
|
||||
self.pw_conv = ConvBNLayer(
|
||||
num_channels=num_channels,
|
||||
filter_size=1,
|
||||
num_filters=num_filters,
|
||||
stride=1)
|
||||
stride=1,
|
||||
lr_mult=lr_mult)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.dw_conv(x)
|
||||
|
@ -141,7 +146,7 @@ class DepthwiseSeparable(TheseusLayer):
|
|||
|
||||
|
||||
class SEModule(TheseusLayer):
|
||||
def __init__(self, channel, reduction=4):
|
||||
def __init__(self, channel, reduction=4, lr_mult=1.0):
|
||||
super().__init__()
|
||||
self.avg_pool = AdaptiveAvgPool2D(1)
|
||||
self.conv1 = Conv2D(
|
||||
|
@ -149,14 +154,18 @@ class SEModule(TheseusLayer):
|
|||
out_channels=channel // reduction,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0)
|
||||
padding=0,
|
||||
weight_attr=ParamAttr(learning_rate=lr_mult),
|
||||
bias_attr=ParamAttr(learning_rate=lr_mult))
|
||||
self.relu = nn.ReLU()
|
||||
self.conv2 = Conv2D(
|
||||
in_channels=channel // reduction,
|
||||
out_channels=channel,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0)
|
||||
padding=0,
|
||||
weight_attr=ParamAttr(learning_rate=lr_mult),
|
||||
bias_attr=ParamAttr(learning_rate=lr_mult))
|
||||
self.hardsigmoid = nn.Hardsigmoid()
|
||||
|
||||
def forward(self, x):
|
||||
|
@ -177,17 +186,32 @@ class PPLCNet(TheseusLayer):
|
|||
class_num=1000,
|
||||
dropout_prob=0.2,
|
||||
class_expand=1280,
|
||||
lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
|
||||
use_last_conv=True,
|
||||
return_patterns=None,
|
||||
return_stages=None):
|
||||
super().__init__()
|
||||
self.scale = scale
|
||||
self.class_expand = class_expand
|
||||
self.lr_mult_list = lr_mult_list
|
||||
self.use_last_conv = use_last_conv
|
||||
if isinstance(self.lr_mult_list, str):
|
||||
self.lr_mult_list = eval(self.lr_mult_list)
|
||||
|
||||
assert isinstance(self.lr_mult_list, (
|
||||
list, tuple
|
||||
)), "lr_mult_list should be in (list, tuple) but got {}".format(
|
||||
type(self.lr_mult_list))
|
||||
assert len(self.lr_mult_list
|
||||
) == 6, "lr_mult_list length should be 5 but got {}".format(
|
||||
len(self.lr_mult_list))
|
||||
|
||||
self.conv1 = ConvBNLayer(
|
||||
num_channels=3,
|
||||
filter_size=3,
|
||||
num_filters=make_divisible(16 * scale),
|
||||
stride=2)
|
||||
stride=2,
|
||||
lr_mult=self.lr_mult_list[0])
|
||||
|
||||
self.blocks2 = nn.Sequential(* [
|
||||
DepthwiseSeparable(
|
||||
|
@ -195,7 +219,8 @@ class PPLCNet(TheseusLayer):
|
|||
num_filters=make_divisible(out_c * scale),
|
||||
dw_size=k,
|
||||
stride=s,
|
||||
use_se=se)
|
||||
use_se=se,
|
||||
lr_mult=self.lr_mult_list[1])
|
||||
for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks2"])
|
||||
])
|
||||
|
||||
|
@ -205,7 +230,8 @@ class PPLCNet(TheseusLayer):
|
|||
num_filters=make_divisible(out_c * scale),
|
||||
dw_size=k,
|
||||
stride=s,
|
||||
use_se=se)
|
||||
use_se=se,
|
||||
lr_mult=self.lr_mult_list[2])
|
||||
for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks3"])
|
||||
])
|
||||
|
||||
|
@ -215,7 +241,8 @@ class PPLCNet(TheseusLayer):
|
|||
num_filters=make_divisible(out_c * scale),
|
||||
dw_size=k,
|
||||
stride=s,
|
||||
use_se=se)
|
||||
use_se=se,
|
||||
lr_mult=self.lr_mult_list[3])
|
||||
for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks4"])
|
||||
])
|
||||
|
||||
|
@ -225,7 +252,8 @@ class PPLCNet(TheseusLayer):
|
|||
num_filters=make_divisible(out_c * scale),
|
||||
dw_size=k,
|
||||
stride=s,
|
||||
use_se=se)
|
||||
use_se=se,
|
||||
lr_mult=self.lr_mult_list[4])
|
||||
for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks5"])
|
||||
])
|
||||
|
||||
|
@ -235,25 +263,26 @@ class PPLCNet(TheseusLayer):
|
|||
num_filters=make_divisible(out_c * scale),
|
||||
dw_size=k,
|
||||
stride=s,
|
||||
use_se=se)
|
||||
use_se=se,
|
||||
lr_mult=self.lr_mult_list[5])
|
||||
for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks6"])
|
||||
])
|
||||
|
||||
self.avg_pool = AdaptiveAvgPool2D(1)
|
||||
|
||||
self.last_conv = Conv2D(
|
||||
in_channels=make_divisible(NET_CONFIG["blocks6"][-1][2] * scale),
|
||||
out_channels=self.class_expand,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
bias_attr=False)
|
||||
|
||||
self.hardswish = nn.Hardswish()
|
||||
self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")
|
||||
if self.use_last_conv:
|
||||
self.last_conv = Conv2D(
|
||||
in_channels=make_divisible(NET_CONFIG["blocks6"][-1][2] * scale),
|
||||
out_channels=self.class_expand,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
bias_attr=False)
|
||||
self.hardswish = nn.Hardswish()
|
||||
self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")
|
||||
else:
|
||||
self.last_conv = None
|
||||
self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)
|
||||
|
||||
self.fc = Linear(self.class_expand, class_num)
|
||||
self.fc = Linear(self.class_expand if self.use_last_conv else NET_CONFIG["blocks6"][-1][2], class_num)
|
||||
|
||||
super().init_res(
|
||||
stages_pattern,
|
||||
|
@ -270,9 +299,10 @@ class PPLCNet(TheseusLayer):
|
|||
x = self.blocks6(x)
|
||||
|
||||
x = self.avg_pool(x)
|
||||
x = self.last_conv(x)
|
||||
x = self.hardswish(x)
|
||||
x = self.dropout(x)
|
||||
if self.last_conv is not None:
|
||||
x = self.last_conv(x)
|
||||
x = self.hardswish(x)
|
||||
x = self.dropout(x)
|
||||
x = self.flatten(x)
|
||||
x = self.fc(x)
|
||||
return x
|
||||
|
|
|
@ -0,0 +1,352 @@
|
|||
# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import paddle
|
||||
import paddle.nn as nn
|
||||
import paddle.nn.functional as F
|
||||
from paddle import ParamAttr
|
||||
from paddle.nn import AdaptiveAvgPool2D, BatchNorm2D, Conv2D, Dropout, Linear
|
||||
from paddle.regularizer import L2Decay
|
||||
from paddle.nn.initializer import KaimingNormal
|
||||
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
|
||||
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
|
||||
|
||||
MODEL_URLS = {
|
||||
"PPLCNetV2_base":
|
||||
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams",
|
||||
}
|
||||
|
||||
__all__ = list(MODEL_URLS.keys())
|
||||
|
||||
NET_CONFIG = {
|
||||
# in_channels, kernel_size, split_pw, use_rep, use_se, use_shortcut
|
||||
"stage1": [64, 3, False, False, False, False],
|
||||
"stage2": [128, 3, False, False, False, False],
|
||||
"stage3": [256, 5, True, True, True, False],
|
||||
"stage4": [512, 5, False, True, False, True],
|
||||
}
|
||||
|
||||
|
||||
def make_divisible(v, divisor=8, min_value=None):
|
||||
if min_value is None:
|
||||
min_value = divisor
|
||||
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
|
||||
if new_v < 0.9 * v:
|
||||
new_v += divisor
|
||||
return new_v
|
||||
|
||||
|
||||
class ConvBNLayer(TheseusLayer):
|
||||
def __init__(self,
|
||||
in_channels,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
stride,
|
||||
groups=1,
|
||||
use_act=True):
|
||||
super().__init__()
|
||||
self.use_act = use_act
|
||||
self.conv = Conv2D(
|
||||
in_channels=in_channels,
|
||||
out_channels=out_channels,
|
||||
kernel_size=kernel_size,
|
||||
stride=stride,
|
||||
padding=(kernel_size - 1) // 2,
|
||||
groups=groups,
|
||||
weight_attr=ParamAttr(initializer=KaimingNormal()),
|
||||
bias_attr=False)
|
||||
|
||||
self.bn = BatchNorm2D(
|
||||
out_channels,
|
||||
weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
|
||||
bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
|
||||
if self.use_act:
|
||||
self.act = nn.ReLU()
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv(x)
|
||||
x = self.bn(x)
|
||||
if self.use_act:
|
||||
x = self.act(x)
|
||||
return x
|
||||
|
||||
|
||||
class SEModule(TheseusLayer):
|
||||
def __init__(self, channel, reduction=4):
|
||||
super().__init__()
|
||||
self.avg_pool = AdaptiveAvgPool2D(1)
|
||||
self.conv1 = Conv2D(
|
||||
in_channels=channel,
|
||||
out_channels=channel // reduction,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0)
|
||||
self.relu = nn.ReLU()
|
||||
self.conv2 = Conv2D(
|
||||
in_channels=channel // reduction,
|
||||
out_channels=channel,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0)
|
||||
self.hardsigmoid = nn.Sigmoid()
|
||||
|
||||
def forward(self, x):
|
||||
identity = x
|
||||
x = self.avg_pool(x)
|
||||
x = self.conv1(x)
|
||||
x = self.relu(x)
|
||||
x = self.conv2(x)
|
||||
x = self.hardsigmoid(x)
|
||||
x = paddle.multiply(x=identity, y=x)
|
||||
return x
|
||||
|
||||
|
||||
class RepDepthwiseSeparable(TheseusLayer):
|
||||
def __init__(self,
|
||||
in_channels,
|
||||
out_channels,
|
||||
stride,
|
||||
dw_size=3,
|
||||
split_pw=False,
|
||||
use_rep=False,
|
||||
use_se=False,
|
||||
use_shortcut=False):
|
||||
super().__init__()
|
||||
self.is_repped = False
|
||||
|
||||
self.dw_size = dw_size
|
||||
self.split_pw = split_pw
|
||||
self.use_rep = use_rep
|
||||
self.use_se = use_se
|
||||
self.use_shortcut = True if use_shortcut and stride == 1 and in_channels == out_channels else False
|
||||
|
||||
if self.use_rep:
|
||||
self.dw_conv_list = nn.LayerList()
|
||||
for kernel_size in range(self.dw_size, 0, -2):
|
||||
if kernel_size == 1 and stride != 1:
|
||||
continue
|
||||
dw_conv = ConvBNLayer(
|
||||
in_channels=in_channels,
|
||||
out_channels=in_channels,
|
||||
kernel_size=kernel_size,
|
||||
stride=stride,
|
||||
groups=in_channels,
|
||||
use_act=False)
|
||||
self.dw_conv_list.append(dw_conv)
|
||||
self.dw_conv = nn.Conv2D(
|
||||
in_channels=in_channels,
|
||||
out_channels=in_channels,
|
||||
kernel_size=dw_size,
|
||||
stride=stride,
|
||||
padding=(dw_size - 1) // 2,
|
||||
groups=in_channels)
|
||||
else:
|
||||
self.dw_conv = ConvBNLayer(
|
||||
in_channels=in_channels,
|
||||
out_channels=in_channels,
|
||||
kernel_size=dw_size,
|
||||
stride=stride,
|
||||
groups=in_channels)
|
||||
|
||||
self.act = nn.ReLU()
|
||||
|
||||
if use_se:
|
||||
self.se = SEModule(in_channels)
|
||||
|
||||
if self.split_pw:
|
||||
pw_ratio = 0.5
|
||||
self.pw_conv_1 = ConvBNLayer(
|
||||
in_channels=in_channels,
|
||||
kernel_size=1,
|
||||
out_channels=int(out_channels * pw_ratio),
|
||||
stride=1)
|
||||
self.pw_conv_2 = ConvBNLayer(
|
||||
in_channels=int(out_channels * pw_ratio),
|
||||
kernel_size=1,
|
||||
out_channels=out_channels,
|
||||
stride=1)
|
||||
else:
|
||||
self.pw_conv = ConvBNLayer(
|
||||
in_channels=in_channels,
|
||||
kernel_size=1,
|
||||
out_channels=out_channels,
|
||||
stride=1)
|
||||
|
||||
def forward(self, x):
|
||||
if self.use_rep:
|
||||
input_x = x
|
||||
if self.is_repped:
|
||||
x = self.act(self.dw_conv(x))
|
||||
else:
|
||||
y = self.dw_conv_list[0](x)
|
||||
for dw_conv in self.dw_conv_list[1:]:
|
||||
y += dw_conv(x)
|
||||
x = self.act(y)
|
||||
else:
|
||||
x = self.dw_conv(x)
|
||||
|
||||
if self.use_se:
|
||||
x = self.se(x)
|
||||
if self.split_pw:
|
||||
x = self.pw_conv_1(x)
|
||||
x = self.pw_conv_2(x)
|
||||
else:
|
||||
x = self.pw_conv(x)
|
||||
if self.use_shortcut:
|
||||
x = x + input_x
|
||||
return x
|
||||
|
||||
def rep(self):
|
||||
if self.use_rep:
|
||||
self.is_repped = True
|
||||
kernel, bias = self._get_equivalent_kernel_bias()
|
||||
self.dw_conv.weight.set_value(kernel)
|
||||
self.dw_conv.bias.set_value(bias)
|
||||
|
||||
def _get_equivalent_kernel_bias(self):
|
||||
kernel_sum = 0
|
||||
bias_sum = 0
|
||||
for dw_conv in self.dw_conv_list:
|
||||
kernel, bias = self._fuse_bn_tensor(dw_conv)
|
||||
kernel = self._pad_tensor(kernel, to_size=self.dw_size)
|
||||
kernel_sum += kernel
|
||||
bias_sum += bias
|
||||
return kernel_sum, bias_sum
|
||||
|
||||
def _fuse_bn_tensor(self, branch):
|
||||
kernel = branch.conv.weight
|
||||
running_mean = branch.bn._mean
|
||||
running_var = branch.bn._variance
|
||||
gamma = branch.bn.weight
|
||||
beta = branch.bn.bias
|
||||
eps = branch.bn._epsilon
|
||||
std = (running_var + eps).sqrt()
|
||||
t = (gamma / std).reshape((-1, 1, 1, 1))
|
||||
return kernel * t, beta - running_mean * gamma / std
|
||||
|
||||
def _pad_tensor(self, tensor, to_size):
|
||||
from_size = tensor.shape[-1]
|
||||
if from_size == to_size:
|
||||
return tensor
|
||||
pad = (to_size - from_size) // 2
|
||||
return F.pad(tensor, [pad, pad, pad, pad])
|
||||
|
||||
|
||||
class PPLCNetV2(TheseusLayer):
|
||||
def __init__(self,
|
||||
scale,
|
||||
depths,
|
||||
class_num=1000,
|
||||
dropout_prob=0,
|
||||
use_last_conv=True,
|
||||
class_expand=1280):
|
||||
super().__init__()
|
||||
self.scale = scale
|
||||
self.use_last_conv = use_last_conv
|
||||
self.class_expand = class_expand
|
||||
|
||||
self.stem = nn.Sequential(* [
|
||||
ConvBNLayer(
|
||||
in_channels=3,
|
||||
kernel_size=3,
|
||||
out_channels=make_divisible(32 * scale),
|
||||
stride=2), RepDepthwiseSeparable(
|
||||
in_channels=make_divisible(32 * scale),
|
||||
out_channels=make_divisible(64 * scale),
|
||||
stride=1,
|
||||
dw_size=3)
|
||||
])
|
||||
|
||||
# stages
|
||||
self.stages = nn.LayerList()
|
||||
for depth_idx, k in enumerate(NET_CONFIG):
|
||||
in_channels, kernel_size, split_pw, use_rep, use_se, use_shortcut = NET_CONFIG[
|
||||
k]
|
||||
self.stages.append(
|
||||
nn.Sequential(* [
|
||||
RepDepthwiseSeparable(
|
||||
in_channels=make_divisible((in_channels if i == 0 else
|
||||
in_channels * 2) * scale),
|
||||
out_channels=make_divisible(in_channels * 2 * scale),
|
||||
stride=2 if i == 0 else 1,
|
||||
dw_size=kernel_size,
|
||||
split_pw=split_pw,
|
||||
use_rep=use_rep,
|
||||
use_se=use_se,
|
||||
use_shortcut=use_shortcut)
|
||||
for i in range(depths[depth_idx])
|
||||
]))
|
||||
|
||||
self.avg_pool = AdaptiveAvgPool2D(1)
|
||||
|
||||
if self.use_last_conv:
|
||||
self.last_conv = Conv2D(
|
||||
in_channels=make_divisible(NET_CONFIG["stage4"][0] * 2 *
|
||||
scale),
|
||||
out_channels=self.class_expand,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
bias_attr=False)
|
||||
self.act = nn.ReLU()
|
||||
self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")
|
||||
|
||||
self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)
|
||||
in_features = self.class_expand if self.use_last_conv else NET_CONFIG[
|
||||
"stage4"][0] * 2 * scale
|
||||
self.fc = Linear(in_features, class_num)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.stem(x)
|
||||
for stage in self.stages:
|
||||
x = stage(x)
|
||||
x = self.avg_pool(x)
|
||||
if self.use_last_conv:
|
||||
x = self.last_conv(x)
|
||||
x = self.act(x)
|
||||
x = self.dropout(x)
|
||||
x = self.flatten(x)
|
||||
x = self.fc(x)
|
||||
return x
|
||||
|
||||
|
||||
def _load_pretrained(pretrained, model, model_url, use_ssld):
|
||||
if pretrained is False:
|
||||
pass
|
||||
elif pretrained is True:
|
||||
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
|
||||
elif isinstance(pretrained, str):
|
||||
load_dygraph_pretrain(model, pretrained)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"pretrained type is not available. Please use `string` or `boolean` type."
|
||||
)
|
||||
|
||||
|
||||
def PPLCNetV2_base(pretrained=False, use_ssld=False, **kwargs):
|
||||
"""
|
||||
PPLCNetV2_base
|
||||
Args:
|
||||
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
|
||||
If str, means the path of the pretrained model.
|
||||
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
|
||||
Returns:
|
||||
model: nn.Layer. Specific `PPLCNetV2_base` model depends on args.
|
||||
"""
|
||||
model = PPLCNetV2(
|
||||
scale=1.0, depths=[2, 2, 6, 2], dropout_prob=0.2, **kwargs)
|
||||
_load_pretrained(pretrained, model, MODEL_URLS["PPLCNetV2_base"], use_ssld)
|
||||
return model
|
|
@ -20,9 +20,10 @@ import numpy as np
|
|||
import paddle
|
||||
from paddle import ParamAttr
|
||||
import paddle.nn as nn
|
||||
from paddle.nn import Conv2D, BatchNorm, Linear
|
||||
from paddle.nn import Conv2D, BatchNorm, Linear, BatchNorm2D
|
||||
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
|
||||
from paddle.nn.initializer import Uniform
|
||||
from paddle.regularizer import L2Decay
|
||||
import math
|
||||
|
||||
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
|
||||
|
@ -132,11 +133,12 @@ class ConvBNLayer(TheseusLayer):
|
|||
weight_attr=ParamAttr(learning_rate=lr_mult),
|
||||
bias_attr=False,
|
||||
data_format=data_format)
|
||||
self.bn = BatchNorm(
|
||||
num_filters,
|
||||
param_attr=ParamAttr(learning_rate=lr_mult),
|
||||
bias_attr=ParamAttr(learning_rate=lr_mult),
|
||||
data_layout=data_format)
|
||||
|
||||
weight_attr = ParamAttr(learning_rate=lr_mult, trainable=True)
|
||||
bias_attr = ParamAttr(learning_rate=lr_mult, trainable=True)
|
||||
|
||||
self.bn = BatchNorm2D(
|
||||
num_filters, weight_attr=weight_attr, bias_attr=bias_attr)
|
||||
self.relu = nn.ReLU()
|
||||
|
||||
def forward(self, x):
|
||||
|
@ -192,6 +194,7 @@ class BottleneckBlock(TheseusLayer):
|
|||
is_vd_mode=False if if_first else True,
|
||||
lr_mult=lr_mult,
|
||||
data_format=data_format)
|
||||
|
||||
self.relu = nn.ReLU()
|
||||
self.shortcut = shortcut
|
||||
|
||||
|
@ -312,7 +315,7 @@ class ResNet(TheseusLayer):
|
|||
[[input_image_channel, 32, 3, 2], [32, 32, 3, 1], [32, 64, 3, 1]]
|
||||
}
|
||||
|
||||
self.stem = nn.Sequential(*[
|
||||
self.stem = nn.Sequential(* [
|
||||
ConvBNLayer(
|
||||
num_channels=in_c,
|
||||
num_filters=out_c,
|
||||
|
|
|
@ -21,8 +21,8 @@ import paddle.nn as nn
|
|||
import paddle.nn.functional as F
|
||||
from paddle.nn.initializer import TruncatedNormal, Constant
|
||||
|
||||
from .vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity
|
||||
|
||||
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
|
||||
from ppcls.arch.backbone.model_zoo.vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity
|
||||
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
|
||||
|
||||
MODEL_URLS = {
|
||||
|
@ -589,7 +589,7 @@ class PatchEmbed(nn.Layer):
|
|||
return flops
|
||||
|
||||
|
||||
class SwinTransformer(nn.Layer):
|
||||
class SwinTransformer(TheseusLayer):
|
||||
""" Swin Transformer
|
||||
A PaddlePaddle impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
|
||||
https://arxiv.org/pdf/2103.14030
|
|
@ -124,13 +124,7 @@ class RepVGGBlock(nn.Layer):
|
|||
groups=groups)
|
||||
|
||||
def forward(self, inputs):
|
||||
if not self.training and not self.is_repped:
|
||||
self.rep()
|
||||
self.is_repped = True
|
||||
if self.training and self.is_repped:
|
||||
self.is_repped = False
|
||||
|
||||
if not self.training:
|
||||
if self.is_repped:
|
||||
return self.nonlinearity(self.rbr_reparam(inputs))
|
||||
|
||||
if self.rbr_identity is None:
|
||||
|
@ -154,6 +148,7 @@ class RepVGGBlock(nn.Layer):
|
|||
kernel, bias = self.get_equivalent_kernel_bias()
|
||||
self.rbr_reparam.weight.set_value(kernel)
|
||||
self.rbr_reparam.bias.set_value(bias)
|
||||
self.is_repped = True
|
||||
|
||||
def get_equivalent_kernel_bias(self):
|
||||
kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
|
||||
|
|
|
@ -40,12 +40,14 @@ QUANT_CONFIG = {
|
|||
}
|
||||
|
||||
|
||||
def quantize_model(config, model):
|
||||
def quantize_model(config, model, mode="train"):
|
||||
if config.get("Slim", False) and config["Slim"].get("quant", False):
|
||||
from paddleslim.dygraph.quant import QAT
|
||||
assert config["Slim"]["quant"]["name"].lower(
|
||||
) == 'pact', 'Only PACT quantization method is supported now'
|
||||
QUANT_CONFIG["activation_preprocess_type"] = "PACT"
|
||||
if mode in ["infer", "export"]:
|
||||
QUANT_CONFIG['activation_preprocess_type'] = None
|
||||
model.quanter = QAT(config=QUANT_CONFIG)
|
||||
model.quanter.quantize(model)
|
||||
logger.info("QAT model summary:")
|
||||
|
|
|
@ -0,0 +1,114 @@
|
|||
# global configs
|
||||
Global:
|
||||
checkpoints: null
|
||||
pretrained_model: null
|
||||
output_dir: "./output/"
|
||||
device: "gpu"
|
||||
save_interval: 5
|
||||
eval_during_train: True
|
||||
eval_interval: 1
|
||||
epochs: 30
|
||||
print_batch_step: 20
|
||||
use_visualdl: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 256, 192]
|
||||
save_inference_dir: "./inference"
|
||||
use_multilabel: True
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: "ResNet50"
|
||||
pretrained: True
|
||||
class_num: 26
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
Train:
|
||||
- MultiLabelLoss:
|
||||
weight: 1.0
|
||||
weight_ratio: True
|
||||
size_sum: True
|
||||
Eval:
|
||||
- MultiLabelLoss:
|
||||
weight: 1.0
|
||||
weight_ratio: True
|
||||
size_sum: True
|
||||
|
||||
Optimizer:
|
||||
name: Adam
|
||||
lr:
|
||||
name: Piecewise
|
||||
decay_epochs: [12, 18, 24, 28]
|
||||
values: [0.0001, 0.00001, 0.000001, 0.0000001]
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
coeff: 0.0005
|
||||
clip_norm: 10
|
||||
|
||||
# data loader for train and eval
|
||||
DataLoader:
|
||||
Train:
|
||||
dataset:
|
||||
name: MultiLabelDataset
|
||||
image_root: "dataset/attribute/data/"
|
||||
cls_label_path: "dataset/attribute/trainval.txt"
|
||||
label_ratio: True
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
size: [192, 256]
|
||||
- Padv2:
|
||||
size: [212, 276]
|
||||
pad_mode: 1
|
||||
fill_value: 0
|
||||
- RandomCropImage:
|
||||
size: [192, 256]
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: True
|
||||
shuffle: True
|
||||
loader:
|
||||
num_workers: 4
|
||||
use_shared_memory: True
|
||||
Eval:
|
||||
dataset:
|
||||
name: MultiLabelDataset
|
||||
image_root: "dataset/attribute/data/"
|
||||
cls_label_path: "dataset/attribute/test.txt"
|
||||
label_ratio: True
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
size: [192, 256]
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
loader:
|
||||
num_workers: 4
|
||||
use_shared_memory: True
|
||||
|
||||
|
||||
Metric:
|
||||
Eval:
|
||||
- ATTRMetric:
|
||||
|
||||
|
|
@ -0,0 +1,155 @@
|
|||
# global configs
|
||||
Global:
|
||||
checkpoints: null
|
||||
pretrained_model: null
|
||||
output_dir: "./output/"
|
||||
device: "gpu"
|
||||
save_interval: 1
|
||||
eval_during_train: True
|
||||
eval_interval: 1
|
||||
epochs: 100
|
||||
print_batch_step: 10
|
||||
use_visualdl: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 224, 224]
|
||||
save_inference_dir: "./inference"
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: "DistillationModel"
|
||||
# if not null, its lengths should be same as models
|
||||
pretrained_list:
|
||||
# if not null, its lengths should be same as models
|
||||
freeze_params_list:
|
||||
- True
|
||||
- False
|
||||
models:
|
||||
- Teacher:
|
||||
name: ResNet34
|
||||
pretrained: True
|
||||
|
||||
- Student:
|
||||
name: ResNet18
|
||||
pretrained: False
|
||||
|
||||
infer_model_name: "Student"
|
||||
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
Train:
|
||||
- DistillationGTCELoss:
|
||||
weight: 1.0
|
||||
model_names: ["Student"]
|
||||
- DistillationDKDLoss:
|
||||
weight: 1.0
|
||||
model_name_pairs: [["Student", "Teacher"]]
|
||||
temperature: 1
|
||||
alpha: 1.0
|
||||
beta: 1.0
|
||||
Eval:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
|
||||
|
||||
Optimizer:
|
||||
name: Momentum
|
||||
momentum: 0.9
|
||||
weight_decay: 1e-4
|
||||
lr:
|
||||
name: MultiStepDecay
|
||||
learning_rate: 0.2
|
||||
milestones: [30, 60, 90]
|
||||
step_each_epoch: 1
|
||||
gamma: 0.1
|
||||
|
||||
|
||||
# data loader for train and eval
|
||||
DataLoader:
|
||||
Train:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: "./dataset/ILSVRC2012/"
|
||||
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- RandCropImage:
|
||||
size: 224
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
- NormalizeImage:
|
||||
scale: 0.00392157
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 128
|
||||
drop_last: False
|
||||
shuffle: True
|
||||
loader:
|
||||
num_workers: 8
|
||||
use_shared_memory: True
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: "./dataset/ILSVRC2012/"
|
||||
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 0.00392157
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
loader:
|
||||
num_workers: 4
|
||||
use_shared_memory: True
|
||||
|
||||
Infer:
|
||||
infer_imgs: "docs/images/inference_deployment/whl_demo.jpg"
|
||||
batch_size: 10
|
||||
transforms:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
name: DistillationPostProcess
|
||||
func: Topk
|
||||
topk: 5
|
||||
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
|
||||
|
||||
Metric:
|
||||
Train:
|
||||
- DistillationTopkAcc:
|
||||
model_key: "Student"
|
||||
topk: [1, 5]
|
||||
Eval:
|
||||
- DistillationTopkAcc:
|
||||
model_key: "Student"
|
||||
topk: [1, 5]
|
|
@ -0,0 +1,164 @@
|
|||
# global configs
|
||||
Global:
|
||||
checkpoints: null
|
||||
pretrained_model: null
|
||||
output_dir: ./output/
|
||||
device: gpu
|
||||
save_interval: 1
|
||||
eval_during_train: True
|
||||
eval_interval: 1
|
||||
epochs: 600
|
||||
print_batch_step: 10
|
||||
use_visualdl: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 224, 224]
|
||||
save_inference_dir: ./inference
|
||||
# training model under @to_static
|
||||
to_static: False
|
||||
use_dali: False
|
||||
|
||||
# mixed precision training
|
||||
AMP:
|
||||
scale_loss: 128.0
|
||||
use_dynamic_loss_scaling: True
|
||||
# O1: mixed fp16
|
||||
level: O1
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: PPHGNet_small
|
||||
class_num: 1000
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
Train:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
epsilon: 0.1
|
||||
Eval:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
|
||||
|
||||
Optimizer:
|
||||
name: Momentum
|
||||
momentum: 0.9
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 0.5
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
coeff: 0.00004
|
||||
|
||||
|
||||
# data loader for train and eval
|
||||
DataLoader:
|
||||
Train:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/ILSVRC2012/
|
||||
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- RandCropImage:
|
||||
size: 224
|
||||
interpolation: bicubic
|
||||
backend: pil
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
- TimmAutoAugment:
|
||||
config_str: rand-m7-mstd0.5-inc1
|
||||
interpolation: bicubic
|
||||
img_size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- RandomErasing:
|
||||
EPSILON: 0.25
|
||||
sl: 0.02
|
||||
sh: 1.0/3.0
|
||||
r1: 0.3
|
||||
attempt: 10
|
||||
use_log_aspect: True
|
||||
mode: pixel
|
||||
batch_transform_ops:
|
||||
- OpSampler:
|
||||
MixupOperator:
|
||||
alpha: 0.2
|
||||
prob: 0.5
|
||||
CutmixOperator:
|
||||
alpha: 1.0
|
||||
prob: 0.5
|
||||
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 128
|
||||
drop_last: False
|
||||
shuffle: True
|
||||
loader:
|
||||
num_workers: 16
|
||||
use_shared_memory: True
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/ILSVRC2012/
|
||||
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 236
|
||||
interpolation: bicubic
|
||||
backend: pil
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 128
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
loader:
|
||||
num_workers: 16
|
||||
use_shared_memory: True
|
||||
|
||||
Infer:
|
||||
infer_imgs: docs/images/inference_deployment/whl_demo.jpg
|
||||
batch_size: 10
|
||||
transforms:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 236
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
name: Topk
|
||||
topk: 5
|
||||
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
|
||||
|
||||
Metric:
|
||||
Train:
|
||||
- TopkAcc:
|
||||
topk: [1, 5]
|
||||
Eval:
|
||||
- TopkAcc:
|
||||
topk: [1, 5]
|
|
@ -0,0 +1,164 @@
|
|||
# global configs
|
||||
Global:
|
||||
checkpoints: null
|
||||
pretrained_model: null
|
||||
output_dir: ./output/
|
||||
device: gpu
|
||||
save_interval: 1
|
||||
eval_during_train: True
|
||||
eval_interval: 1
|
||||
epochs: 600
|
||||
print_batch_step: 10
|
||||
use_visualdl: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 224, 224]
|
||||
save_inference_dir: ./inference
|
||||
# training model under @to_static
|
||||
to_static: False
|
||||
use_dali: False
|
||||
|
||||
# mixed precision training
|
||||
AMP:
|
||||
scale_loss: 128.0
|
||||
use_dynamic_loss_scaling: True
|
||||
# O1: mixed fp16
|
||||
level: O1
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: PPHGNet_tiny
|
||||
class_num: 1000
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
Train:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
epsilon: 0.1
|
||||
Eval:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
|
||||
|
||||
Optimizer:
|
||||
name: Momentum
|
||||
momentum: 0.9
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 0.5
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
coeff: 0.00004
|
||||
|
||||
|
||||
# data loader for train and eval
|
||||
DataLoader:
|
||||
Train:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/ILSVRC2012/
|
||||
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- RandCropImage:
|
||||
size: 224
|
||||
interpolation: bicubic
|
||||
backend: pil
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
- TimmAutoAugment:
|
||||
config_str: rand-m7-mstd0.5-inc1
|
||||
interpolation: bicubic
|
||||
img_size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- RandomErasing:
|
||||
EPSILON: 0.25
|
||||
sl: 0.02
|
||||
sh: 1.0/3.0
|
||||
r1: 0.3
|
||||
attempt: 10
|
||||
use_log_aspect: True
|
||||
mode: pixel
|
||||
batch_transform_ops:
|
||||
- OpSampler:
|
||||
MixupOperator:
|
||||
alpha: 0.2
|
||||
prob: 0.5
|
||||
CutmixOperator:
|
||||
alpha: 1.0
|
||||
prob: 0.5
|
||||
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 128
|
||||
drop_last: False
|
||||
shuffle: True
|
||||
loader:
|
||||
num_workers: 16
|
||||
use_shared_memory: True
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/ILSVRC2012/
|
||||
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 232
|
||||
interpolation: bicubic
|
||||
backend: pil
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 128
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
loader:
|
||||
num_workers: 16
|
||||
use_shared_memory: True
|
||||
|
||||
Infer:
|
||||
infer_imgs: docs/images/inference_deployment/whl_demo.jpg
|
||||
batch_size: 10
|
||||
transforms:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 232
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
name: Topk
|
||||
topk: 5
|
||||
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
|
||||
|
||||
Metric:
|
||||
Train:
|
||||
- TopkAcc:
|
||||
topk: [1, 5]
|
||||
Eval:
|
||||
- TopkAcc:
|
||||
topk: [1, 5]
|
|
@ -0,0 +1,133 @@
|
|||
# global configs
|
||||
Global:
|
||||
checkpoints: null
|
||||
pretrained_model: null
|
||||
output_dir: ./output/
|
||||
device: gpu
|
||||
save_interval: 1
|
||||
eval_during_train: True
|
||||
eval_interval: 1
|
||||
epochs: 480
|
||||
print_batch_step: 10
|
||||
use_visualdl: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 224, 224]
|
||||
save_inference_dir: ./inference
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: PPLCNetV2_base
|
||||
class_num: 1000
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
Train:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
epsilon: 0.1
|
||||
Eval:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
|
||||
Optimizer:
|
||||
name: Momentum
|
||||
momentum: 0.9
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 0.8
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
coeff: 0.00004
|
||||
|
||||
# data loader for train and eval
|
||||
DataLoader:
|
||||
Train:
|
||||
dataset:
|
||||
name: MultiScaleDataset
|
||||
image_root: ./dataset/ILSVRC2012/
|
||||
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- RandCropImage:
|
||||
size: 224
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
|
||||
# support to specify width and height respectively:
|
||||
# scales: [(160,160), (192,192), (224,224) (288,288) (320,320)]
|
||||
sampler:
|
||||
name: MultiScaleSampler
|
||||
scales: [160, 192, 224, 288, 320]
|
||||
# first_bs: batch size for the first image resolution in the scales list
|
||||
# divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple
|
||||
first_bs: 500
|
||||
divided_factor: 32
|
||||
is_training: True
|
||||
loader:
|
||||
num_workers: 4
|
||||
use_shared_memory: True
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/ILSVRC2012/
|
||||
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
loader:
|
||||
num_workers: 4
|
||||
use_shared_memory: True
|
||||
|
||||
Infer:
|
||||
infer_imgs: docs/images/inference_deployment/whl_demo.jpg
|
||||
batch_size: 10
|
||||
transforms:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
name: Topk
|
||||
topk: 5
|
||||
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
|
||||
|
||||
Metric:
|
||||
Train:
|
||||
- TopkAcc:
|
||||
topk: [1, 5]
|
||||
Eval:
|
||||
- TopkAcc:
|
||||
topk: [1, 5]
|
|
@ -105,7 +105,6 @@ DataLoader:
|
|||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
output_fp16: True
|
||||
channel_num: *image_channel
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
|
@ -132,7 +131,6 @@ Infer:
|
|||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
output_fp16: True
|
||||
channel_num: *image_channel
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
|
|
|
@ -15,6 +15,13 @@ Global:
|
|||
image_shape: [*image_channel, 224, 224]
|
||||
save_inference_dir: ./inference
|
||||
|
||||
# mixed precision training
|
||||
AMP:
|
||||
scale_loss: 128.0
|
||||
use_dynamic_loss_scaling: True
|
||||
# O2: pure fp16
|
||||
level: O2
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: SE_ResNeXt101_32x4d
|
||||
|
@ -32,13 +39,6 @@ Loss:
|
|||
- CELoss:
|
||||
weight: 1.0
|
||||
|
||||
# mixed precision training
|
||||
AMP:
|
||||
scale_loss: 128.0
|
||||
use_dynamic_loss_scaling: True
|
||||
# O2: pure fp16
|
||||
level: O2
|
||||
|
||||
Optimizer:
|
||||
name: Momentum
|
||||
momentum: 0.9
|
||||
|
@ -99,10 +99,9 @@ DataLoader:
|
|||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
output_fp16: True
|
||||
channel_num: *image_channel
|
||||
sampler:
|
||||
name: BatchSampler
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
|
@ -126,7 +125,6 @@ Infer:
|
|||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
output_fp16: True
|
||||
channel_num: *image_channel
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
|
|
|
@ -0,0 +1,168 @@
|
|||
# global configs
|
||||
Global:
|
||||
checkpoints: null
|
||||
pretrained_model: null
|
||||
output_dir: ./output
|
||||
device: gpu
|
||||
save_interval: 1
|
||||
eval_during_train: True
|
||||
start_eval_epoch: 1
|
||||
eval_interval: 1
|
||||
epochs: 20
|
||||
print_batch_step: 10
|
||||
use_visualdl: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 224, 224]
|
||||
save_inference_dir: ./inference
|
||||
# training model under @to_static
|
||||
to_static: False
|
||||
use_dali: False
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: "DistillationModel"
|
||||
class_num: &class_num 2
|
||||
# if not null, its lengths should be same as models
|
||||
pretrained_list:
|
||||
# if not null, its lengths should be same as models
|
||||
freeze_params_list:
|
||||
- True
|
||||
- False
|
||||
use_sync_bn: True
|
||||
models:
|
||||
- Teacher:
|
||||
name: ResNet101_vd
|
||||
class_num: *class_num
|
||||
- Student:
|
||||
name: PPLCNet_x1_0
|
||||
class_num: *class_num
|
||||
pretrained: True
|
||||
use_ssld: True
|
||||
|
||||
infer_model_name: "Student"
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
Train:
|
||||
- DistillationDMLLoss:
|
||||
weight: 1.0
|
||||
model_name_pairs:
|
||||
- ["Student", "Teacher"]
|
||||
Eval:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
|
||||
|
||||
Optimizer:
|
||||
name: Momentum
|
||||
momentum: 0.9
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 0.01
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
coeff: 0.00004
|
||||
|
||||
|
||||
# data loader for train and eval
|
||||
DataLoader:
|
||||
Train:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/person/
|
||||
cls_label_path: ./dataset/person/train_list_for_distill.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- RandCropImage:
|
||||
size: 192
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
- TimmAutoAugment:
|
||||
prob: 0.0
|
||||
config_str: rand-m9-mstd0.5-inc1
|
||||
interpolation: bicubic
|
||||
img_size: 192
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- RandomErasing:
|
||||
EPSILON: 0.1
|
||||
sl: 0.02
|
||||
sh: 1.0/3.0
|
||||
r1: 0.3
|
||||
attempt: 10
|
||||
use_log_aspect: True
|
||||
mode: pixel
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: True
|
||||
loader:
|
||||
num_workers: 16
|
||||
use_shared_memory: True
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/person/
|
||||
cls_label_path: ./dataset/person/val_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
loader:
|
||||
num_workers: 4
|
||||
use_shared_memory: True
|
||||
|
||||
Infer:
|
||||
infer_imgs: docs/images/inference_deployment/whl_demo.jpg
|
||||
batch_size: 10
|
||||
transforms:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
name: ThreshOutput
|
||||
threshold: 0.9
|
||||
label_0: nobody
|
||||
label_1: someone
|
||||
|
||||
Metric:
|
||||
Train:
|
||||
- DistillationTopkAcc:
|
||||
model_key: "Student"
|
||||
topk: [1, 2]
|
||||
Eval:
|
||||
- TprAtFpr:
|
||||
- TopkAcc:
|
||||
topk: [1, 2]
|
|
@ -0,0 +1,145 @@
|
|||
# global configs
|
||||
Global:
|
||||
checkpoints: null
|
||||
pretrained_model: null
|
||||
output_dir: ./output/
|
||||
device: gpu
|
||||
save_interval: 1
|
||||
eval_during_train: True
|
||||
eval_interval: 1
|
||||
start_eval_epoch: 10
|
||||
epochs: 20
|
||||
print_batch_step: 10
|
||||
use_visualdl: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 224, 224]
|
||||
save_inference_dir: ./inference
|
||||
# training model under @to_static
|
||||
to_static: False
|
||||
use_dali: False
|
||||
|
||||
# mixed precision training
|
||||
AMP:
|
||||
scale_loss: 128.0
|
||||
use_dynamic_loss_scaling: True
|
||||
# O1: mixed fp16
|
||||
level: O1
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: MobileNetV3_large_x1_0
|
||||
class_num: 2
|
||||
pretrained: True
|
||||
use_sync_bn: True
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
Train:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
epsilon: 0.1
|
||||
Eval:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
|
||||
|
||||
Optimizer:
|
||||
name: Momentum
|
||||
momentum: 0.9
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 0.13
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
coeff: 0.00002
|
||||
|
||||
|
||||
# data loader for train and eval
|
||||
DataLoader:
|
||||
Train:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/person/
|
||||
cls_label_path: ./dataset/person/train_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- RandCropImage:
|
||||
size: 224
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 512
|
||||
drop_last: False
|
||||
shuffle: True
|
||||
loader:
|
||||
num_workers: 8
|
||||
use_shared_memory: True
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/person/
|
||||
cls_label_path: ./dataset/person/val_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
loader:
|
||||
num_workers: 4
|
||||
use_shared_memory: True
|
||||
|
||||
Infer:
|
||||
infer_imgs: docs/images/inference_deployment/whl_demo.jpg
|
||||
batch_size: 10
|
||||
transforms:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
name: ThreshOutput
|
||||
threshold: 0.9
|
||||
label_0: nobody
|
||||
label_1: someone
|
||||
|
||||
Metric:
|
||||
Train:
|
||||
- TopkAcc:
|
||||
topk: [1, 2]
|
||||
Eval:
|
||||
- TprAtFpr:
|
||||
- TopkAcc:
|
||||
topk: [1, 2]
|
|
@ -0,0 +1,168 @@
|
|||
# global configs
|
||||
Global:
|
||||
checkpoints: null
|
||||
pretrained_model: null
|
||||
output_dir: ./output/
|
||||
device: gpu
|
||||
save_interval: 1
|
||||
eval_during_train: True
|
||||
eval_interval: 1
|
||||
start_eval_epoch: 10
|
||||
epochs: 20
|
||||
print_batch_step: 10
|
||||
use_visualdl: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 224, 224]
|
||||
save_inference_dir: ./inference
|
||||
# training model under @to_static
|
||||
to_static: False
|
||||
use_dali: False
|
||||
|
||||
# mixed precision training
|
||||
AMP:
|
||||
scale_loss: 128.0
|
||||
use_dynamic_loss_scaling: True
|
||||
# O1: mixed fp16
|
||||
level: O1
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: SwinTransformer_tiny_patch4_window7_224
|
||||
class_num: 2
|
||||
pretrained: True
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
Train:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
epsilon: 0.1
|
||||
Eval:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
|
||||
Optimizer:
|
||||
name: AdamW
|
||||
beta1: 0.9
|
||||
beta2: 0.999
|
||||
epsilon: 1e-8
|
||||
weight_decay: 0.05
|
||||
no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm
|
||||
one_dim_param_no_weight_decay: True
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 1e-4
|
||||
eta_min: 2e-6
|
||||
warmup_epoch: 5
|
||||
warmup_start_lr: 2e-7
|
||||
|
||||
|
||||
# data loader for train and eval
|
||||
DataLoader:
|
||||
Train:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/person/
|
||||
cls_label_path: ./dataset/person/train_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- RandCropImage:
|
||||
size: 224
|
||||
interpolation: bicubic
|
||||
backend: pil
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
- TimmAutoAugment:
|
||||
config_str: rand-m9-mstd0.5-inc1
|
||||
interpolation: bicubic
|
||||
img_size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- RandomErasing:
|
||||
EPSILON: 0.25
|
||||
sl: 0.02
|
||||
sh: 1.0/3.0
|
||||
r1: 0.3
|
||||
attempt: 10
|
||||
use_log_aspect: True
|
||||
mode: pixel
|
||||
batch_transform_ops:
|
||||
- OpSampler:
|
||||
MixupOperator:
|
||||
alpha: 0.8
|
||||
prob: 0.5
|
||||
CutmixOperator:
|
||||
alpha: 1.0
|
||||
prob: 0.5
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 128
|
||||
drop_last: False
|
||||
shuffle: True
|
||||
loader:
|
||||
num_workers: 8
|
||||
use_shared_memory: True
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/person/
|
||||
cls_label_path: ./dataset/person/val_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
loader:
|
||||
num_workers: 8
|
||||
use_shared_memory: True
|
||||
|
||||
Infer:
|
||||
infer_imgs: docs/images/inference_deployment/whl_demo.jpg
|
||||
batch_size: 10
|
||||
transforms:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
name: ThreshOutput
|
||||
threshold: 0.9
|
||||
label_0: nobody
|
||||
label_1: someone
|
||||
|
||||
Metric:
|
||||
Train:
|
||||
- TopkAcc:
|
||||
topk: [1, 2]
|
||||
Eval:
|
||||
- TprAtFpr:
|
||||
- TopkAcc:
|
||||
topk: [1, 2]
|
|
@ -0,0 +1,151 @@
|
|||
# global configs
|
||||
Global:
|
||||
checkpoints: null
|
||||
pretrained_model: null
|
||||
output_dir: ./output/
|
||||
device: gpu
|
||||
save_interval: 1
|
||||
eval_during_train: True
|
||||
eval_interval: 1
|
||||
start_eval_epoch: 10
|
||||
epochs: 20
|
||||
print_batch_step: 10
|
||||
use_visualdl: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 224, 224]
|
||||
save_inference_dir: ./inference
|
||||
# training model under @to_static
|
||||
to_static: False
|
||||
use_dali: False
|
||||
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: PPLCNet_x1_0
|
||||
class_num: 2
|
||||
pretrained: True
|
||||
use_ssld: True
|
||||
use_sync_bn: True
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
Train:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
Eval:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
|
||||
|
||||
Optimizer:
|
||||
name: Momentum
|
||||
momentum: 0.9
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 0.01
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
coeff: 0.00004
|
||||
|
||||
|
||||
# data loader for train and eval
|
||||
DataLoader:
|
||||
Train:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/person/
|
||||
cls_label_path: ./dataset/person/train_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- RandCropImage:
|
||||
size: 192
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
- TimmAutoAugment:
|
||||
prob: 0.0
|
||||
config_str: rand-m9-mstd0.5-inc1
|
||||
interpolation: bicubic
|
||||
img_size: 192
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- RandomErasing:
|
||||
EPSILON: 0.1
|
||||
sl: 0.02
|
||||
sh: 1.0/3.0
|
||||
r1: 0.3
|
||||
attempt: 10
|
||||
use_log_aspect: True
|
||||
mode: pixel
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: True
|
||||
loader:
|
||||
num_workers: 8
|
||||
use_shared_memory: True
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/person/
|
||||
cls_label_path: ./dataset/person/val_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
loader:
|
||||
num_workers: 4
|
||||
use_shared_memory: True
|
||||
|
||||
Infer:
|
||||
infer_imgs: docs/images/inference_deployment/whl_demo.jpg
|
||||
batch_size: 10
|
||||
transforms:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
name: ThreshOutput
|
||||
threshold: 0.9
|
||||
label_0: nobody
|
||||
label_1: someone
|
||||
|
||||
Metric:
|
||||
Train:
|
||||
- TopkAcc:
|
||||
topk: [1, 2]
|
||||
Eval:
|
||||
- TprAtFpr:
|
||||
- TopkAcc:
|
||||
topk: [1, 2]
|
|
@ -0,0 +1,151 @@
|
|||
# global configs
|
||||
Global:
|
||||
checkpoints: null
|
||||
pretrained_model: null
|
||||
output_dir: ./output/
|
||||
device: gpu
|
||||
save_interval: 1
|
||||
eval_during_train: True
|
||||
eval_interval: 1
|
||||
start_eval_epoch: 10
|
||||
epochs: 20
|
||||
print_batch_step: 10
|
||||
use_visualdl: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 224, 224]
|
||||
save_inference_dir: ./inference
|
||||
# training model under @to_static
|
||||
to_static: False
|
||||
use_dali: False
|
||||
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: PPLCNet_x1_0
|
||||
class_num: 2
|
||||
pretrained: True
|
||||
use_ssld: True
|
||||
use_sync_bn: True
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
Train:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
Eval:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
|
||||
|
||||
Optimizer:
|
||||
name: Momentum
|
||||
momentum: 0.9
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 0.01
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
coeff: 0.00004
|
||||
|
||||
|
||||
# data loader for train and eval
|
||||
DataLoader:
|
||||
Train:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/person/
|
||||
cls_label_path: ./dataset/person/train_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- RandCropImage:
|
||||
size: 224
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
- TimmAutoAugment:
|
||||
prob: 0.0
|
||||
config_str: rand-m9-mstd0.5-inc1
|
||||
interpolation: bicubic
|
||||
img_size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- RandomErasing:
|
||||
EPSILON: 0.0
|
||||
sl: 0.02
|
||||
sh: 1.0/3.0
|
||||
r1: 0.3
|
||||
attempt: 10
|
||||
use_log_aspect: True
|
||||
mode: pixel
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: True
|
||||
loader:
|
||||
num_workers: 8
|
||||
use_shared_memory: True
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/person/
|
||||
cls_label_path: ./dataset/person/val_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
loader:
|
||||
num_workers: 4
|
||||
use_shared_memory: True
|
||||
|
||||
Infer:
|
||||
infer_imgs: docs/images/inference_deployment/whl_demo.jpg
|
||||
batch_size: 10
|
||||
transforms:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
name: ThreshOutput
|
||||
threshold: 0.9
|
||||
label_0: nobody
|
||||
label_1: someone
|
||||
|
||||
Metric:
|
||||
Train:
|
||||
- TopkAcc:
|
||||
topk: [1, 2]
|
||||
Eval:
|
||||
- TprAtFpr:
|
||||
- TopkAcc:
|
||||
topk: [1, 2]
|
|
@ -0,0 +1,40 @@
|
|||
base_config_file: ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0_search.yaml
|
||||
distill_config_file: ppcls/configs/PULC/person/Distillation/PPLCNet_x1_0_distillation.yaml
|
||||
|
||||
gpus: 0,1,2,3
|
||||
output_dir: output/search_person
|
||||
search_times: 1
|
||||
search_dict:
|
||||
- search_key: lrs
|
||||
replace_config:
|
||||
- Optimizer.lr.learning_rate
|
||||
search_values: [0.0075, 0.01, 0.0125]
|
||||
- search_key: resolutions
|
||||
replace_config:
|
||||
- DataLoader.Train.dataset.transform_ops.1.RandCropImage.size
|
||||
- DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size
|
||||
search_values: [176, 192, 224]
|
||||
- search_key: ra_probs
|
||||
replace_config:
|
||||
- DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob
|
||||
search_values: [0.0, 0.1, 0.5]
|
||||
- search_key: re_probs
|
||||
replace_config:
|
||||
- DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON
|
||||
search_values: [0.0, 0.1, 0.5]
|
||||
- search_key: lr_mult_list
|
||||
replace_config:
|
||||
- Arch.lr_mult_list
|
||||
search_values:
|
||||
- [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
|
||||
- [0.0, 0.4, 0.4, 0.8, 0.8, 1.0]
|
||||
- [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
|
||||
teacher:
|
||||
rm_keys:
|
||||
- Arch.lr_mult_list
|
||||
search_values:
|
||||
- ResNet101_vd
|
||||
- ResNet50_vd
|
||||
final_replace:
|
||||
Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
# global configs
|
||||
Global:
|
||||
checkpoints: null
|
||||
pretrained_model: null
|
||||
output_dir: ./output/
|
||||
device: gpu
|
||||
save_interval: 1
|
||||
eval_during_train: True
|
||||
eval_interval: 1
|
||||
epochs: 120
|
||||
print_batch_step: 10
|
||||
use_visualdl: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 224, 224]
|
||||
save_inference_dir: ./inference
|
||||
# training model under @to_static
|
||||
to_static: False
|
||||
|
||||
# model architecture
|
||||
Arch:
|
||||
name: MobileNetV1
|
||||
class_num: 1000
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
Train:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
Eval:
|
||||
- CELoss:
|
||||
weight: 1.0
|
||||
|
||||
|
||||
Optimizer:
|
||||
name: Momentum
|
||||
momentum: 0.9
|
||||
lr:
|
||||
name: Piecewise
|
||||
learning_rate: 0.1
|
||||
decay_epochs: [30, 60, 90]
|
||||
values: [0.1, 0.01, 0.001, 0.0001]
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
coeff: 0.00003
|
||||
|
||||
|
||||
# data loader for train and eval
|
||||
DataLoader:
|
||||
Train:
|
||||
dataset:
|
||||
name: MultiScaleDataset
|
||||
image_root: ./dataset/ILSVRC2012/
|
||||
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- RandCropImage:
|
||||
size: 224
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
|
||||
# support to specify width and height respectively:
|
||||
# scales: [(160,160), (192,192), (224,224) (288,288) (320,320)]
|
||||
sampler:
|
||||
name: MultiScaleSampler
|
||||
scales: [160, 192, 224, 288, 320]
|
||||
# first_bs: batch size for the first image resolution in the scales list
|
||||
# divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple
|
||||
first_bs: 64
|
||||
divided_factor: 32
|
||||
is_training: True
|
||||
|
||||
loader:
|
||||
num_workers: 4
|
||||
use_shared_memory: True
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: ImageNetDataset
|
||||
image_root: ./dataset/ILSVRC2012/
|
||||
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
|
||||
transform_ops:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 64
|
||||
drop_last: False
|
||||
shuffle: False
|
||||
loader:
|
||||
num_workers: 4
|
||||
use_shared_memory: True
|
||||
|
||||
Infer:
|
||||
infer_imgs: docs/images/whl/demo.jpg
|
||||
batch_size: 10
|
||||
transforms:
|
||||
- DecodeImage:
|
||||
to_rgb: True
|
||||
channel_first: False
|
||||
- ResizeImage:
|
||||
resize_short: 256
|
||||
- CropImage:
|
||||
size: 224
|
||||
- NormalizeImage:
|
||||
scale: 1.0/255.0
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: ''
|
||||
- ToCHWImage:
|
||||
PostProcess:
|
||||
name: Topk
|
||||
topk: 5
|
||||
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
|
||||
|
||||
Metric:
|
||||
Train:
|
||||
- TopkAcc:
|
||||
topk: [1, 5]
|
||||
Eval:
|
||||
- TopkAcc:
|
||||
topk: [1, 5]
|
|
@ -12,6 +12,7 @@ Global:
|
|||
use_visualdl: False
|
||||
eval_mode: "retrieval"
|
||||
retrieval_feature_from: "backbone" # 'backbone' or 'neck'
|
||||
re_ranking: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 256, 128]
|
||||
save_inference_dir: "./inference"
|
||||
|
@ -31,6 +32,14 @@ Arch:
|
|||
name: "FC"
|
||||
embedding_size: 2048
|
||||
class_num: 751
|
||||
weight_attr:
|
||||
initializer:
|
||||
name: KaimingUniform
|
||||
fan_in: 12288 # 6*embedding_size
|
||||
bias_attr:
|
||||
initializer:
|
||||
name: KaimingUniform
|
||||
fan_in: 12288 # 6*embedding_size
|
||||
|
||||
# loss function config for traing/eval process
|
||||
Loss:
|
||||
|
@ -52,7 +61,6 @@ Optimizer:
|
|||
name: Piecewise
|
||||
decay_epochs: [40, 70]
|
||||
values: [0.00035, 0.000035, 0.0000035]
|
||||
warmup_epoch: 10
|
||||
by_epoch: True
|
||||
last_epoch: 0
|
||||
regularizer:
|
||||
|
@ -71,6 +79,7 @@ DataLoader:
|
|||
- ResizeImage:
|
||||
size: [128, 256]
|
||||
return_numpy: False
|
||||
interpolation: 'bilinear'
|
||||
backend: "pil"
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
|
@ -102,6 +111,7 @@ DataLoader:
|
|||
- ResizeImage:
|
||||
size: [128, 256]
|
||||
return_numpy: False
|
||||
interpolation: 'bilinear'
|
||||
backend: "pil"
|
||||
- ToTensor:
|
||||
- Normalize:
|
||||
|
@ -126,6 +136,7 @@ DataLoader:
|
|||
- ResizeImage:
|
||||
size: [128, 256]
|
||||
return_numpy: False
|
||||
interpolation: 'bilinear'
|
||||
backend: "pil"
|
||||
- ToTensor:
|
||||
- Normalize:
|
|
@ -12,6 +12,7 @@ Global:
|
|||
use_visualdl: False
|
||||
eval_mode: "retrieval"
|
||||
retrieval_feature_from: "features" # 'backbone' or 'features'
|
||||
re_ranking: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 256, 128]
|
||||
save_inference_dir: "./inference"
|
||||
|
@ -90,6 +91,7 @@ DataLoader:
|
|||
- ResizeImage:
|
||||
size: [128, 256]
|
||||
return_numpy: False
|
||||
interpolation: 'bilinear'
|
||||
backend: "pil"
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
|
@ -127,6 +129,7 @@ DataLoader:
|
|||
- ResizeImage:
|
||||
size: [128, 256]
|
||||
return_numpy: False
|
||||
interpolation: 'bilinear'
|
||||
backend: "pil"
|
||||
- ToTensor:
|
||||
- Normalize:
|
||||
|
@ -151,6 +154,7 @@ DataLoader:
|
|||
- ResizeImage:
|
||||
size: [128, 256]
|
||||
return_numpy: False
|
||||
interpolation: 'bilinear'
|
||||
backend: "pil"
|
||||
- ToTensor:
|
||||
- Normalize:
|
|
@ -12,6 +12,7 @@ Global:
|
|||
use_visualdl: False
|
||||
eval_mode: "retrieval"
|
||||
retrieval_feature_from: "features" # 'backbone' or 'features'
|
||||
re_ranking: False
|
||||
# used for static mode and model export
|
||||
image_shape: [3, 256, 128]
|
||||
save_inference_dir: "./inference"
|
||||
|
@ -101,6 +102,7 @@ DataLoader:
|
|||
- ResizeImage:
|
||||
size: [128, 256]
|
||||
return_numpy: False
|
||||
interpolation: 'bilinear'
|
||||
backend: "pil"
|
||||
- RandFlipImage:
|
||||
flip_code: 1
|
||||
|
@ -138,6 +140,7 @@ DataLoader:
|
|||
- ResizeImage:
|
||||
size: [128, 256]
|
||||
return_numpy: False
|
||||
interpolation: 'bilinear'
|
||||
backend: "pil"
|
||||
- ToTensor:
|
||||
- Normalize:
|
||||
|
@ -162,6 +165,7 @@ DataLoader:
|
|||
- ResizeImage:
|
||||
size: [128, 256]
|
||||
return_numpy: False
|
||||
interpolation: 'bilinear'
|
||||
backend: "pil"
|
||||
- ToTensor:
|
||||
- Normalize:
|
|
@ -28,13 +28,16 @@ from ppcls.data.dataloader.vehicle_dataset import CompCars, VeriWild
|
|||
from ppcls.data.dataloader.logo_dataset import LogoDataset
|
||||
from ppcls.data.dataloader.icartoon_dataset import ICartoonDataset
|
||||
from ppcls.data.dataloader.mix_dataset import MixDataset
|
||||
from ppcls.data.dataloader.multi_scale_dataset import MultiScaleDataset
|
||||
from ppcls.data.dataloader.person_dataset import Market1501, MSMT17
|
||||
from ppcls.data.dataloader.face_dataset import FiveValidationDataset, AdaFaceDataset
|
||||
|
||||
|
||||
# sampler
|
||||
from ppcls.data.dataloader.DistributedRandomIdentitySampler import DistributedRandomIdentitySampler
|
||||
from ppcls.data.dataloader.pk_sampler import PKSampler
|
||||
from ppcls.data.dataloader.mix_sampler import MixSampler
|
||||
from ppcls.data.dataloader.multi_scale_sampler import MultiScaleSampler
|
||||
from ppcls.data import preprocess
|
||||
from ppcls.data.preprocess import transform
|
||||
|
||||
|
|
|
@ -5,7 +5,9 @@ from ppcls.data.dataloader.vehicle_dataset import CompCars, VeriWild
|
|||
from ppcls.data.dataloader.logo_dataset import LogoDataset
|
||||
from ppcls.data.dataloader.icartoon_dataset import ICartoonDataset
|
||||
from ppcls.data.dataloader.mix_dataset import MixDataset
|
||||
from ppcls.data.dataloader.multi_scale_dataset import MultiScaleDataset
|
||||
from ppcls.data.dataloader.mix_sampler import MixSampler
|
||||
from ppcls.data.dataloader.multi_scale_sampler import MultiScaleSampler
|
||||
from ppcls.data.dataloader.pk_sampler import PKSampler
|
||||
from ppcls.data.dataloader.person_dataset import Market1501, MSMT17
|
||||
from ppcls.data.dataloader.face_dataset import AdaFaceDataset, FiveValidationDataset
|
||||
|
|
|
@ -44,11 +44,11 @@ def create_operators(params):
|
|||
|
||||
|
||||
class CommonDataset(Dataset):
|
||||
def __init__(
|
||||
self,
|
||||
image_root,
|
||||
cls_label_path,
|
||||
transform_ops=None, ):
|
||||
def __init__(self,
|
||||
image_root,
|
||||
cls_label_path,
|
||||
transform_ops=None,
|
||||
label_ratio=False):
|
||||
self._img_root = image_root
|
||||
self._cls_path = cls_label_path
|
||||
if transform_ops:
|
||||
|
@ -56,7 +56,10 @@ class CommonDataset(Dataset):
|
|||
|
||||
self.images = []
|
||||
self.labels = []
|
||||
self._load_anno()
|
||||
if label_ratio:
|
||||
self.label_ratio = self._load_anno(label_ratio=label_ratio)
|
||||
else:
|
||||
self._load_anno()
|
||||
|
||||
def _load_anno(self):
|
||||
pass
|
||||
|
|
|
@ -230,7 +230,7 @@ def dali_dataloader(config, mode, device, seed=None):
|
|||
lower = ratio[0]
|
||||
upper = ratio[1]
|
||||
|
||||
if 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env:
|
||||
if 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env and 'FLAGS_selected_gpus' in env:
|
||||
shard_id = int(env['PADDLE_TRAINER_ID'])
|
||||
num_shards = int(env['PADDLE_TRAINERS_NUM'])
|
||||
device_id = int(env['FLAGS_selected_gpus'])
|
||||
|
@ -282,7 +282,7 @@ def dali_dataloader(config, mode, device, seed=None):
|
|||
else:
|
||||
resize_shorter = transforms["ResizeImage"].get("resize_short", 256)
|
||||
crop = transforms["CropImage"]["size"]
|
||||
if 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env and sampler_name == "DistributedBatchSampler":
|
||||
if 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env and 'FLAGS_selected_gpus' in env and sampler_name == "DistributedBatchSampler":
|
||||
shard_id = int(env['PADDLE_TRAINER_ID'])
|
||||
num_shards = int(env['PADDLE_TRAINERS_NUM'])
|
||||
device_id = int(env['FLAGS_selected_gpus'])
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
from paddle.io import Dataset
|
||||
from paddle.vision import transforms
|
||||
import cv2
|
||||
import warnings
|
||||
|
||||
from ppcls.data import preprocess
|
||||
from ppcls.data.preprocess import transform
|
||||
from ppcls.data.preprocess.ops.operators import DecodeImage
|
||||
from ppcls.utils import logger
|
||||
from ppcls.data.dataloader.common_dataset import create_operators
|
||||
|
||||
|
||||
class MultiScaleDataset(Dataset):
|
||||
def __init__(
|
||||
self,
|
||||
image_root,
|
||||
cls_label_path,
|
||||
transform_ops=None, ):
|
||||
self._img_root = image_root
|
||||
self._cls_path = cls_label_path
|
||||
self.transform_ops = transform_ops
|
||||
self.images = []
|
||||
self.labels = []
|
||||
self._load_anno()
|
||||
self.has_crop_flag = 1
|
||||
|
||||
def _load_anno(self, seed=None):
|
||||
assert os.path.exists(self._cls_path)
|
||||
assert os.path.exists(self._img_root)
|
||||
self.images = []
|
||||
self.labels = []
|
||||
|
||||
with open(self._cls_path) as fd:
|
||||
lines = fd.readlines()
|
||||
if seed is not None:
|
||||
np.random.RandomState(seed).shuffle(lines)
|
||||
for l in lines:
|
||||
l = l.strip().split(" ")
|
||||
self.images.append(os.path.join(self._img_root, l[0]))
|
||||
self.labels.append(np.int64(l[1]))
|
||||
assert os.path.exists(self.images[-1])
|
||||
|
||||
def __getitem__(self, properties):
|
||||
# properites is a tuple, contains (width, height, index)
|
||||
img_width = properties[0]
|
||||
img_height = properties[1]
|
||||
index = properties[2]
|
||||
has_crop = False
|
||||
if self.transform_ops:
|
||||
for i in range(len(self.transform_ops)):
|
||||
op = self.transform_ops[i]
|
||||
resize_op = ['RandCropImage', 'ResizeImage', 'CropImage']
|
||||
for resize in resize_op:
|
||||
if resize in op:
|
||||
if self.has_crop_flag:
|
||||
logger.warning(
|
||||
"Multi scale dataset will crop image according to the multi scale resolution"
|
||||
)
|
||||
self.transform_ops[i][resize] = {
|
||||
'size': (img_width, img_height)
|
||||
}
|
||||
has_crop = True
|
||||
self.has_crop_flag = 0
|
||||
if has_crop == False:
|
||||
logger.error("Multi scale dateset requests RandCropImage")
|
||||
raise RuntimeError("Multi scale dateset requests RandCropImage")
|
||||
self._transform_ops = create_operators(self.transform_ops)
|
||||
|
||||
try:
|
||||
with open(self.images[index], 'rb') as f:
|
||||
img = f.read()
|
||||
if self._transform_ops:
|
||||
img = transform(img, self._transform_ops)
|
||||
img = img.transpose((2, 0, 1))
|
||||
return (img, self.labels[index])
|
||||
|
||||
except Exception as ex:
|
||||
logger.error("Exception occured when parse line: {} with msg: {}".
|
||||
format(self.images[index], ex))
|
||||
rnd_idx = np.random.randint(self.__len__())
|
||||
return self.__getitem__(rnd_idx)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.images)
|
||||
|
||||
@property
|
||||
def class_num(self):
|
||||
return len(set(self.labels))
|
|
@ -0,0 +1,132 @@
|
|||
from paddle.io import Sampler
|
||||
import paddle.distributed as dist
|
||||
|
||||
import math
|
||||
import random
|
||||
import numpy as np
|
||||
|
||||
from ppcls import data
|
||||
|
||||
|
||||
class MultiScaleSampler(Sampler):
|
||||
def __init__(self,
|
||||
data_source,
|
||||
scales,
|
||||
first_bs,
|
||||
divided_factor=32,
|
||||
is_training=True,
|
||||
seed=None):
|
||||
"""
|
||||
multi scale samper
|
||||
Args:
|
||||
data_source(dataset)
|
||||
scales(list): several scales for image resolution
|
||||
first_bs(int): batch size for the first scale in scales
|
||||
divided_factor(int): ImageNet models down-sample images by a factor, ensure that width and height dimensions are multiples are multiple of devided_factor.
|
||||
is_training(boolean): mode
|
||||
"""
|
||||
# min. and max. spatial dimensions
|
||||
self.data_source = data_source
|
||||
self.n_data_samples = len(self.data_source)
|
||||
|
||||
if isinstance(scales[0], tuple):
|
||||
width_dims = [i[0] for i in scales]
|
||||
height_dims = [i[1] for i in scales]
|
||||
elif isinstance(scales[0], int):
|
||||
width_dims = scales
|
||||
height_dims = scales
|
||||
base_im_w = width_dims[0]
|
||||
base_im_h = height_dims[0]
|
||||
base_batch_size = first_bs
|
||||
|
||||
# Get the GPU and node related information
|
||||
num_replicas = dist.get_world_size()
|
||||
rank = dist.get_rank()
|
||||
# adjust the total samples to avoid batch dropping
|
||||
num_samples_per_replica = int(
|
||||
math.ceil(self.n_data_samples * 1.0 / num_replicas))
|
||||
img_indices = [idx for idx in range(self.n_data_samples)]
|
||||
|
||||
self.shuffle = False
|
||||
if is_training:
|
||||
# compute the spatial dimensions and corresponding batch size
|
||||
# ImageNet models down-sample images by a factor of 32.
|
||||
# Ensure that width and height dimensions are multiples are multiple of 32.
|
||||
width_dims = [
|
||||
int((w // divided_factor) * divided_factor) for w in width_dims
|
||||
]
|
||||
height_dims = [
|
||||
int((h // divided_factor) * divided_factor)
|
||||
for h in height_dims
|
||||
]
|
||||
|
||||
img_batch_pairs = list()
|
||||
base_elements = base_im_w * base_im_h * base_batch_size
|
||||
for (h, w) in zip(height_dims, width_dims):
|
||||
batch_size = int(max(1, (base_elements / (h * w))))
|
||||
img_batch_pairs.append((w, h, batch_size))
|
||||
self.img_batch_pairs = img_batch_pairs
|
||||
self.shuffle = True
|
||||
else:
|
||||
self.img_batch_pairs = [(base_im_w, base_im_h, base_batch_size)]
|
||||
|
||||
self.img_indices = img_indices
|
||||
self.n_samples_per_replica = num_samples_per_replica
|
||||
self.epoch = 0
|
||||
self.rank = rank
|
||||
self.num_replicas = num_replicas
|
||||
self.seed = seed
|
||||
self.batch_list = []
|
||||
self.current = 0
|
||||
indices_rank_i = self.img_indices[self.rank:len(self.img_indices):
|
||||
self.num_replicas]
|
||||
while self.current < self.n_samples_per_replica:
|
||||
curr_w, curr_h, curr_bsz = random.choice(self.img_batch_pairs)
|
||||
|
||||
end_index = min(self.current + curr_bsz,
|
||||
self.n_samples_per_replica)
|
||||
|
||||
batch_ids = indices_rank_i[self.current:end_index]
|
||||
n_batch_samples = len(batch_ids)
|
||||
if n_batch_samples != curr_bsz:
|
||||
batch_ids += indices_rank_i[:(curr_bsz - n_batch_samples)]
|
||||
self.current += curr_bsz
|
||||
|
||||
if len(batch_ids) > 0:
|
||||
batch = [curr_w, curr_h, len(batch_ids)]
|
||||
self.batch_list.append(batch)
|
||||
self.length = len(self.batch_list)
|
||||
|
||||
def __iter__(self):
|
||||
if self.shuffle:
|
||||
if self.seed is not None:
|
||||
random.seed(self.seed)
|
||||
else:
|
||||
random.seed(self.epoch)
|
||||
random.shuffle(self.img_indices)
|
||||
random.shuffle(self.img_batch_pairs)
|
||||
indices_rank_i = self.img_indices[self.rank:len(self.img_indices):
|
||||
self.num_replicas]
|
||||
else:
|
||||
indices_rank_i = self.img_indices[self.rank:len(self.img_indices):
|
||||
self.num_replicas]
|
||||
|
||||
start_index = 0
|
||||
for batch_tuple in self.batch_list:
|
||||
curr_w, curr_h, curr_bsz = batch_tuple
|
||||
end_index = min(start_index + curr_bsz, self.n_samples_per_replica)
|
||||
batch_ids = indices_rank_i[start_index:end_index]
|
||||
n_batch_samples = len(batch_ids)
|
||||
if n_batch_samples != curr_bsz:
|
||||
batch_ids += indices_rank_i[:(curr_bsz - n_batch_samples)]
|
||||
start_index += curr_bsz
|
||||
|
||||
if len(batch_ids) > 0:
|
||||
batch = [(curr_w, curr_h, b_id) for b_id in batch_ids]
|
||||
yield batch
|
||||
|
||||
def set_epoch(self, epoch: int):
|
||||
self.epoch = epoch
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
|
@ -25,7 +25,7 @@ from .common_dataset import CommonDataset
|
|||
|
||||
|
||||
class MultiLabelDataset(CommonDataset):
|
||||
def _load_anno(self):
|
||||
def _load_anno(self, label_ratio=False):
|
||||
assert os.path.exists(self._cls_path)
|
||||
assert os.path.exists(self._img_root)
|
||||
self.images = []
|
||||
|
@ -41,6 +41,8 @@ class MultiLabelDataset(CommonDataset):
|
|||
|
||||
self.labels.append(labels)
|
||||
assert os.path.exists(self.images[-1])
|
||||
if label_ratio:
|
||||
return np.array(self.labels).mean(0).astype("float32")
|
||||
|
||||
def __getitem__(self, idx):
|
||||
try:
|
||||
|
@ -50,7 +52,10 @@ class MultiLabelDataset(CommonDataset):
|
|||
img = transform(img, self._transform_ops)
|
||||
img = img.transpose((2, 0, 1))
|
||||
label = np.array(self.labels[idx]).astype("float32")
|
||||
return (img, label)
|
||||
if self.label_ratio is not None:
|
||||
return (img, np.array([label, self.label_ratio]))
|
||||
else:
|
||||
return (img, label)
|
||||
|
||||
except Exception as ex:
|
||||
logger.error("Exception occured when parse line: {} with msg: {}".
|
||||
|
|
|
@ -14,9 +14,10 @@
|
|||
import copy
|
||||
import importlib
|
||||
|
||||
from . import topk
|
||||
from . import topk, threshoutput
|
||||
|
||||
from .topk import Topk, MultiLabelTopk
|
||||
from .threshoutput import ThreshOutput
|
||||
|
||||
|
||||
def build_postprocess(config):
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import paddle.nn.functional as F
|
||||
|
||||
|
||||
class ThreshOutput(object):
|
||||
def __init__(self, threshold, label_0="0", label_1="1"):
|
||||
self.threshold = threshold
|
||||
self.label_0 = label_0
|
||||
self.label_1 = label_1
|
||||
|
||||
def __call__(self, x, file_names=None):
|
||||
y = []
|
||||
x = F.softmax(x, axis=-1).numpy()
|
||||
for idx, probs in enumerate(x):
|
||||
score = probs[1]
|
||||
if score < self.threshold:
|
||||
result = {"class_ids": [0], "scores": [1 - score], "label_names": [self.label_0]}
|
||||
else:
|
||||
result = {"class_ids": [1], "scores": [score], "label_names": [self.label_1]}
|
||||
if file_names is not None:
|
||||
result["file_name"] = file_names[idx]
|
||||
y.append(result)
|
||||
return y
|
|
@ -37,11 +37,14 @@ from ppcls.data.preprocess.ops.operators import RandomHorizontalFlip
|
|||
from ppcls.data.preprocess.ops.operators import CropWithPadding
|
||||
from ppcls.data.preprocess.ops.operators import RandomInterpolationAugment
|
||||
from ppcls.data.preprocess.ops.operators import ColorJitter
|
||||
from ppcls.data.preprocess.ops.operators import RandomCropImage
|
||||
from ppcls.data.preprocess.ops.operators import Padv2
|
||||
|
||||
from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
import random
|
||||
|
||||
|
||||
def transform(data, ops=[]):
|
||||
|
@ -92,16 +95,16 @@ class RandAugment(RawRandAugment):
|
|||
class TimmAutoAugment(RawTimmAutoAugment):
|
||||
""" TimmAutoAugment wrapper to auto fit different img tyeps. """
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
def __init__(self, prob=1.0, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.prob = prob
|
||||
|
||||
def __call__(self, img):
|
||||
if not isinstance(img, Image.Image):
|
||||
img = np.ascontiguousarray(img)
|
||||
img = Image.fromarray(img)
|
||||
|
||||
img = super().__call__(img)
|
||||
|
||||
if random.random() < self.prob:
|
||||
img = super().__call__(img)
|
||||
if isinstance(img, Image.Image):
|
||||
img = np.asarray(img)
|
||||
|
||||
|
|
|
@ -272,6 +272,105 @@ class CropImage(object):
|
|||
return img[h_start:h_end, w_start:w_end, :]
|
||||
|
||||
|
||||
class Padv2(object):
|
||||
def __init__(self,
|
||||
size=None,
|
||||
size_divisor=32,
|
||||
pad_mode=0,
|
||||
offsets=None,
|
||||
fill_value=(127.5, 127.5, 127.5)):
|
||||
"""
|
||||
Pad image to a specified size or multiple of size_divisor.
|
||||
Args:
|
||||
size (int, list): image target size, if None, pad to multiple of size_divisor, default None
|
||||
size_divisor (int): size divisor, default 32
|
||||
pad_mode (int): pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets
|
||||
if 0, only pad to right and bottom. if 1, pad according to center. if 2, only pad left and top
|
||||
offsets (list): [offset_x, offset_y], specify offset while padding, only supported pad_mode=-1
|
||||
fill_value (bool): rgb value of pad area, default (127.5, 127.5, 127.5)
|
||||
"""
|
||||
|
||||
if not isinstance(size, (int, list)):
|
||||
raise TypeError(
|
||||
"Type of target_size is invalid when random_size is True. \
|
||||
Must be List, now is {}".format(type(size)))
|
||||
|
||||
if isinstance(size, int):
|
||||
size = [size, size]
|
||||
|
||||
assert pad_mode in [
|
||||
-1, 0, 1, 2
|
||||
], 'currently only supports four modes [-1, 0, 1, 2]'
|
||||
if pad_mode == -1:
|
||||
assert offsets, 'if pad_mode is -1, offsets should not be None'
|
||||
|
||||
self.size = size
|
||||
self.size_divisor = size_divisor
|
||||
self.pad_mode = pad_mode
|
||||
self.fill_value = fill_value
|
||||
self.offsets = offsets
|
||||
|
||||
def apply_image(self, image, offsets, im_size, size):
|
||||
x, y = offsets
|
||||
im_h, im_w = im_size
|
||||
h, w = size
|
||||
canvas = np.ones((h, w, 3), dtype=np.float32)
|
||||
canvas *= np.array(self.fill_value, dtype=np.float32)
|
||||
canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32)
|
||||
return canvas
|
||||
|
||||
def __call__(self, img):
|
||||
im_h, im_w = img.shape[:2]
|
||||
if self.size:
|
||||
w, h = self.size
|
||||
assert (
|
||||
im_h <= h and im_w <= w
|
||||
), '(h, w) of target size should be greater than (im_h, im_w)'
|
||||
else:
|
||||
h = int(np.ceil(im_h / self.size_divisor) * self.size_divisor)
|
||||
w = int(np.ceil(im_w / self.size_divisor) * self.size_divisor)
|
||||
|
||||
if h == im_h and w == im_w:
|
||||
return img.astype(np.float32)
|
||||
|
||||
if self.pad_mode == -1:
|
||||
offset_x, offset_y = self.offsets
|
||||
elif self.pad_mode == 0:
|
||||
offset_y, offset_x = 0, 0
|
||||
elif self.pad_mode == 1:
|
||||
offset_y, offset_x = (h - im_h) // 2, (w - im_w) // 2
|
||||
else:
|
||||
offset_y, offset_x = h - im_h, w - im_w
|
||||
|
||||
offsets, im_size, size = [offset_x, offset_y], [im_h, im_w], [h, w]
|
||||
|
||||
return self.apply_image(img, offsets, im_size, size)
|
||||
|
||||
|
||||
class RandomCropImage(object):
|
||||
"""Random crop image only
|
||||
"""
|
||||
|
||||
def __init__(self, size):
|
||||
super(RandomCropImage, self).__init__()
|
||||
if isinstance(size, int):
|
||||
size = [size, size]
|
||||
self.size = size
|
||||
|
||||
def __call__(self, img):
|
||||
|
||||
h, w = img.shape[:2]
|
||||
tw, th = self.size
|
||||
i = random.randint(0, h - th)
|
||||
j = random.randint(0, w - tw)
|
||||
|
||||
img = img[i:i + th, j:j + tw, :]
|
||||
if img.shape[0] != 256 or img.shape[1] != 192:
|
||||
raise ValueError('sample: ', h, w, i, j, th, tw, img.shape)
|
||||
|
||||
return img
|
||||
|
||||
|
||||
class RandCropImage(object):
|
||||
""" random crop image """
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ class Engine(object):
|
|||
self.eval_metric_func = None
|
||||
|
||||
# build model
|
||||
self.model = build_model(self.config)
|
||||
self.model = build_model(self.config, self.mode)
|
||||
# set @to_static for benchmark, skip this by default.
|
||||
apply_to_static(self.config, self.model)
|
||||
|
||||
|
@ -240,7 +240,7 @@ class Engine(object):
|
|||
|
||||
self.amp_eval = self.config["AMP"].get("use_fp16_test", False)
|
||||
# TODO(gaotingquan): Paddle not yet support FP32 evaluation when training with AMPO2
|
||||
if self.config["Global"].get(
|
||||
if self.mode == "train" and self.config["Global"].get(
|
||||
"eval_during_train",
|
||||
True) and self.amp_level == "O2" and self.amp_eval == False:
|
||||
msg = "PaddlePaddle only support FP16 evaluation when training with AMP O2 now. "
|
||||
|
@ -248,13 +248,10 @@ class Engine(object):
|
|||
self.config["AMP"]["use_fp16_test"] = True
|
||||
self.amp_eval = True
|
||||
|
||||
# TODO(gaotingquan): to compatible with Paddle 2.2, 2.3, develop and so on.
|
||||
paddle_version = sum([
|
||||
int(x) * 10**(2 - i)
|
||||
for i, x in enumerate(paddle.__version__.split(".")[:3])
|
||||
])
|
||||
# TODO(gaotingquan): to compatible with different versions of Paddle
|
||||
paddle_version = paddle.__version__[:3]
|
||||
# paddle version < 2.3.0 and not develop
|
||||
if paddle_version < 230 and paddle_version != 0:
|
||||
if paddle_version not in ["2.3", "0.0"]:
|
||||
if self.mode == "train":
|
||||
self.model, self.optimizer = paddle.amp.decorate(
|
||||
models=self.model,
|
||||
|
@ -273,10 +270,11 @@ class Engine(object):
|
|||
save_dtype='float32')
|
||||
# paddle version >= 2.3.0 or develop
|
||||
else:
|
||||
self.model = paddle.amp.decorate(
|
||||
models=self.model,
|
||||
level=self.amp_level,
|
||||
save_dtype='float32')
|
||||
if self.mode == "train" or self.amp_eval:
|
||||
self.model = paddle.amp.decorate(
|
||||
models=self.model,
|
||||
level=self.amp_level,
|
||||
save_dtype='float32')
|
||||
|
||||
if self.mode == "train" and len(self.train_loss_func.parameters(
|
||||
)) > 0:
|
||||
|
@ -316,7 +314,7 @@ class Engine(object):
|
|||
print_batch_step = self.config['Global']['print_batch_step']
|
||||
save_interval = self.config["Global"]["save_interval"]
|
||||
best_metric = {
|
||||
"metric": 0.0,
|
||||
"metric": -1.0,
|
||||
"epoch": 0,
|
||||
}
|
||||
# key:
|
||||
|
@ -348,18 +346,18 @@ class Engine(object):
|
|||
|
||||
if self.use_dali:
|
||||
self.train_dataloader.reset()
|
||||
metric_msg = ", ".join([
|
||||
"{}: {:.5f}".format(key, self.output_info[key].avg)
|
||||
for key in self.output_info
|
||||
])
|
||||
metric_msg = ", ".join(
|
||||
[self.output_info[key].avg_info for key in self.output_info])
|
||||
logger.info("[Train][Epoch {}/{}][Avg]{}".format(
|
||||
epoch_id, self.config["Global"]["epochs"], metric_msg))
|
||||
self.output_info.clear()
|
||||
|
||||
# eval model and save model if possible
|
||||
start_eval_epoch = self.config["Global"].get("start_eval_epoch",
|
||||
0) - 1
|
||||
if self.config["Global"][
|
||||
"eval_during_train"] and epoch_id % self.config["Global"][
|
||||
"eval_interval"] == 0:
|
||||
"eval_interval"] == 0 and epoch_id > start_eval_epoch:
|
||||
acc = self.eval(epoch_id)
|
||||
if acc > best_metric["metric"]:
|
||||
best_metric["metric"] = acc
|
||||
|
@ -371,7 +369,8 @@ class Engine(object):
|
|||
self.output_dir,
|
||||
model_name=self.config["Arch"]["name"],
|
||||
prefix="best_model",
|
||||
loss=self.train_loss_func)
|
||||
loss=self.train_loss_func,
|
||||
save_student_model=True)
|
||||
logger.info("[Eval][Epoch {}][best metric: {}]".format(
|
||||
epoch_id, best_metric["metric"]))
|
||||
logger.scaler(
|
||||
|
@ -435,7 +434,17 @@ class Engine(object):
|
|||
image_file_list.append(image_file)
|
||||
if len(batch_data) >= batch_size or idx == len(image_list) - 1:
|
||||
batch_tensor = paddle.to_tensor(batch_data)
|
||||
out = self.model(batch_tensor)
|
||||
|
||||
if self.amp and self.amp_eval:
|
||||
with paddle.amp.auto_cast(
|
||||
custom_black_list={
|
||||
"flatten_contiguous_range", "greater_than"
|
||||
},
|
||||
level=self.amp_level):
|
||||
out = self.model(batch_tensor)
|
||||
else:
|
||||
out = self.model(batch_tensor)
|
||||
|
||||
if isinstance(out, list):
|
||||
out = out[0]
|
||||
if isinstance(out, dict) and "logits" in out:
|
||||
|
@ -456,26 +465,31 @@ class Engine(object):
|
|||
self.config["Global"]["pretrained_model"])
|
||||
|
||||
model.eval()
|
||||
|
||||
# for rep nets
|
||||
for layer in self.model.sublayers():
|
||||
if hasattr(layer, "rep"):
|
||||
layer.rep()
|
||||
|
||||
save_path = os.path.join(self.config["Global"]["save_inference_dir"],
|
||||
"inference")
|
||||
if model.quanter:
|
||||
model.quanter.save_quantized_model(
|
||||
model.base_model,
|
||||
save_path,
|
||||
input_spec=[
|
||||
paddle.static.InputSpec(
|
||||
shape=[None] + self.config["Global"]["image_shape"],
|
||||
dtype='float32')
|
||||
])
|
||||
|
||||
model = paddle.jit.to_static(
|
||||
model,
|
||||
input_spec=[
|
||||
paddle.static.InputSpec(
|
||||
shape=[None] + self.config["Global"]["image_shape"],
|
||||
dtype='float32')
|
||||
])
|
||||
if hasattr(model.base_model,
|
||||
"quanter") and model.base_model.quanter is not None:
|
||||
model.base_model.quanter.save_quantized_model(model,
|
||||
save_path + "_int8")
|
||||
else:
|
||||
model = paddle.jit.to_static(
|
||||
model,
|
||||
input_spec=[
|
||||
paddle.static.InputSpec(
|
||||
shape=[None] + self.config["Global"]["image_shape"],
|
||||
dtype='float32')
|
||||
])
|
||||
paddle.jit.save(model, save_path)
|
||||
logger.info(
|
||||
f"Export succeeded! The inference model exported has been saved in \"{self.config['Global']['save_inference_dir']}\"."
|
||||
)
|
||||
|
||||
|
||||
class ExportModel(TheseusLayer):
|
||||
|
|
|
@ -23,6 +23,8 @@ from ppcls.utils import logger
|
|||
|
||||
|
||||
def classification_eval(engine, epoch_id=0):
|
||||
if hasattr(engine.eval_metric_func, "reset"):
|
||||
engine.eval_metric_func.reset()
|
||||
output_info = dict()
|
||||
time_info = {
|
||||
"batch_cost": AverageMeter(
|
||||
|
@ -80,6 +82,7 @@ def classification_eval(engine, epoch_id=0):
|
|||
# gather Tensor when distributed
|
||||
if paddle.distributed.get_world_size() > 1:
|
||||
label_list = []
|
||||
|
||||
paddle.distributed.all_gather(label_list, batch[1])
|
||||
labels = paddle.concat(label_list, 0)
|
||||
|
||||
|
@ -121,18 +124,10 @@ def classification_eval(engine, epoch_id=0):
|
|||
output_info[key] = AverageMeter(key, '7.5f')
|
||||
output_info[key].update(loss_dict[key].numpy()[0],
|
||||
current_samples)
|
||||
|
||||
# calc metric
|
||||
if engine.eval_metric_func is not None:
|
||||
metric_dict = engine.eval_metric_func(preds, labels)
|
||||
for key in metric_dict:
|
||||
if metric_key is None:
|
||||
metric_key = key
|
||||
if key not in output_info:
|
||||
output_info[key] = AverageMeter(key, '7.5f')
|
||||
|
||||
output_info[key].update(metric_dict[key].numpy()[0],
|
||||
current_samples)
|
||||
|
||||
engine.eval_metric_func(preds, labels)
|
||||
time_info["batch_cost"].update(time.time() - tic)
|
||||
|
||||
if iter_id % print_batch_step == 0:
|
||||
|
@ -144,10 +139,14 @@ def classification_eval(engine, epoch_id=0):
|
|||
ips_msg = "ips: {:.5f} images/sec".format(
|
||||
batch_size / time_info["batch_cost"].avg)
|
||||
|
||||
metric_msg = ", ".join([
|
||||
"{}: {:.5f}".format(key, output_info[key].val)
|
||||
for key in output_info
|
||||
])
|
||||
if "ATTRMetric" in engine.config["Metric"]["Eval"][0]:
|
||||
metric_msg = ""
|
||||
else:
|
||||
metric_msg = ", ".join([
|
||||
"{}: {:.5f}".format(key, output_info[key].val)
|
||||
for key in output_info
|
||||
])
|
||||
metric_msg += ", {}".format(engine.eval_metric_func.avg_info)
|
||||
logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format(
|
||||
epoch_id, iter_id,
|
||||
len(engine.eval_dataloader), metric_msg, time_msg, ips_msg))
|
||||
|
@ -155,13 +154,29 @@ def classification_eval(engine, epoch_id=0):
|
|||
tic = time.time()
|
||||
if engine.use_dali:
|
||||
engine.eval_dataloader.reset()
|
||||
metric_msg = ", ".join([
|
||||
"{}: {:.5f}".format(key, output_info[key].avg) for key in output_info
|
||||
])
|
||||
logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg))
|
||||
|
||||
# do not try to save best eval.model
|
||||
if engine.eval_metric_func is None:
|
||||
return -1
|
||||
# return 1st metric in the dict
|
||||
return output_info[metric_key].avg
|
||||
if "ATTRMetric" in engine.config["Metric"]["Eval"][0]:
|
||||
metric_msg = ", ".join([
|
||||
"evalres: ma: {:.5f} label_f1: {:.5f} label_pos_recall: {:.5f} label_neg_recall: {:.5f} instance_f1: {:.5f} instance_acc: {:.5f} instance_prec: {:.5f} instance_recall: {:.5f}".
|
||||
format(*engine.eval_metric_func.attr_res())
|
||||
])
|
||||
logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg))
|
||||
|
||||
# do not try to save best eval.model
|
||||
if engine.eval_metric_func is None:
|
||||
return -1
|
||||
# return 1st metric in the dict
|
||||
return engine.eval_metric_func.attr_res()[0]
|
||||
else:
|
||||
metric_msg = ", ".join([
|
||||
"{}: {:.5f}".format(key, output_info[key].avg)
|
||||
for key in output_info
|
||||
])
|
||||
metric_msg += ", {}".format(engine.eval_metric_func.avg_info)
|
||||
logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg))
|
||||
|
||||
# do not try to save best eval.model
|
||||
if engine.eval_metric_func is None:
|
||||
return -1
|
||||
# return 1st metric in the dict
|
||||
return engine.eval_metric_func.avg
|
||||
|
|
|
@ -16,6 +16,9 @@ from __future__ import division
|
|||
from __future__ import print_function
|
||||
|
||||
import platform
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
import paddle
|
||||
from ppcls.utils import logger
|
||||
|
||||
|
@ -48,34 +51,67 @@ def retrieval_eval(engine, epoch_id=0):
|
|||
if engine.eval_loss_func is None:
|
||||
metric_dict = {metric_key: 0.}
|
||||
else:
|
||||
reranking_flag = engine.config['Global'].get('re_ranking', False)
|
||||
logger.info(f"re_ranking={reranking_flag}")
|
||||
metric_dict = dict()
|
||||
for block_idx, block_fea in enumerate(fea_blocks):
|
||||
similarity_matrix = paddle.matmul(
|
||||
block_fea, gallery_feas, transpose_y=True)
|
||||
if query_query_id is not None:
|
||||
query_id_block = query_id_blocks[block_idx]
|
||||
query_id_mask = (query_id_block != gallery_unique_id.t())
|
||||
if reranking_flag:
|
||||
# set the order from small to large
|
||||
for i in range(len(engine.eval_metric_func.metric_func_list)):
|
||||
if hasattr(engine.eval_metric_func.metric_func_list[i], 'descending') \
|
||||
and engine.eval_metric_func.metric_func_list[i].descending is True:
|
||||
engine.eval_metric_func.metric_func_list[
|
||||
i].descending = False
|
||||
logger.warning(
|
||||
f"re_ranking=True,{engine.eval_metric_func.metric_func_list[i].__class__.__name__}.descending has been set to False"
|
||||
)
|
||||
|
||||
image_id_block = image_id_blocks[block_idx]
|
||||
image_id_mask = (image_id_block != gallery_img_id.t())
|
||||
# compute distance matrix(The smaller the value, the more similar)
|
||||
distmat = re_ranking(
|
||||
query_feas, gallery_feas, k1=20, k2=6, lambda_value=0.3)
|
||||
|
||||
keep_mask = paddle.logical_or(query_id_mask, image_id_mask)
|
||||
similarity_matrix = similarity_matrix * keep_mask.astype(
|
||||
"float32")
|
||||
else:
|
||||
keep_mask = None
|
||||
# compute keep mask
|
||||
query_id_mask = (query_query_id != gallery_unique_id.t())
|
||||
image_id_mask = (query_img_id != gallery_img_id.t())
|
||||
keep_mask = paddle.logical_or(query_id_mask, image_id_mask)
|
||||
|
||||
metric_tmp = engine.eval_metric_func(similarity_matrix,
|
||||
image_id_blocks[block_idx],
|
||||
# set inf(1e9) distance to those exist in gallery
|
||||
distmat = distmat * keep_mask.astype("float32")
|
||||
inf_mat = (paddle.logical_not(keep_mask).astype("float32")) * 1e20
|
||||
distmat = distmat + inf_mat
|
||||
|
||||
# compute metric
|
||||
metric_tmp = engine.eval_metric_func(distmat, query_img_id,
|
||||
gallery_img_id, keep_mask)
|
||||
|
||||
for key in metric_tmp:
|
||||
if key not in metric_dict:
|
||||
metric_dict[key] = metric_tmp[key] * block_fea.shape[
|
||||
0] / len(query_feas)
|
||||
metric_dict[key] = metric_tmp[key]
|
||||
else:
|
||||
for block_idx, block_fea in enumerate(fea_blocks):
|
||||
similarity_matrix = paddle.matmul(
|
||||
block_fea, gallery_feas, transpose_y=True) # [n,m]
|
||||
if query_query_id is not None:
|
||||
query_id_block = query_id_blocks[block_idx]
|
||||
query_id_mask = (query_id_block != gallery_unique_id.t())
|
||||
|
||||
image_id_block = image_id_blocks[block_idx]
|
||||
image_id_mask = (image_id_block != gallery_img_id.t())
|
||||
|
||||
keep_mask = paddle.logical_or(query_id_mask, image_id_mask)
|
||||
similarity_matrix = similarity_matrix * keep_mask.astype(
|
||||
"float32")
|
||||
else:
|
||||
metric_dict[key] += metric_tmp[key] * block_fea.shape[
|
||||
0] / len(query_feas)
|
||||
keep_mask = None
|
||||
|
||||
metric_tmp = engine.eval_metric_func(
|
||||
similarity_matrix, image_id_blocks[block_idx],
|
||||
gallery_img_id, keep_mask)
|
||||
|
||||
for key in metric_tmp:
|
||||
if key not in metric_dict:
|
||||
metric_dict[key] = metric_tmp[key] * block_fea.shape[
|
||||
0] / len(query_feas)
|
||||
else:
|
||||
metric_dict[key] += metric_tmp[key] * block_fea.shape[
|
||||
0] / len(query_feas)
|
||||
|
||||
metric_info_list = []
|
||||
for key in metric_dict:
|
||||
|
@ -185,3 +221,109 @@ def cal_feature(engine, name='gallery'):
|
|||
logger.info("Build {} done, all feat shape: {}, begin to eval..".format(
|
||||
name, all_feas.shape))
|
||||
return all_feas, all_img_id, all_unique_id
|
||||
|
||||
|
||||
def re_ranking(query_feas: paddle.Tensor,
|
||||
gallery_feas: paddle.Tensor,
|
||||
k1: int=20,
|
||||
k2: int=6,
|
||||
lambda_value: int=0.5,
|
||||
local_distmat: Optional[np.ndarray]=None,
|
||||
only_local: bool=False) -> paddle.Tensor:
|
||||
"""re-ranking, most computed with numpy
|
||||
|
||||
code heavily based on
|
||||
https://github.com/michuanhaohao/reid-strong-baseline/blob/3da7e6f03164a92e696cb6da059b1cd771b0346d/utils/reid_metric.py
|
||||
|
||||
Args:
|
||||
query_feas (paddle.Tensor): query features, [num_query, num_features]
|
||||
gallery_feas (paddle.Tensor): gallery features, [num_gallery, num_features]
|
||||
k1 (int, optional): k1. Defaults to 20.
|
||||
k2 (int, optional): k2. Defaults to 6.
|
||||
lambda_value (int, optional): lambda. Defaults to 0.5.
|
||||
local_distmat (Optional[np.ndarray], optional): local_distmat. Defaults to None.
|
||||
only_local (bool, optional): only_local. Defaults to False.
|
||||
|
||||
Returns:
|
||||
paddle.Tensor: final_dist matrix after re-ranking, [num_query, num_gallery]
|
||||
"""
|
||||
query_num = query_feas.shape[0]
|
||||
all_num = query_num + gallery_feas.shape[0]
|
||||
if only_local:
|
||||
original_dist = local_distmat
|
||||
else:
|
||||
feat = paddle.concat([query_feas, gallery_feas])
|
||||
logger.info('using GPU to compute original distance')
|
||||
|
||||
# L2 distance
|
||||
distmat = paddle.pow(feat, 2).sum(axis=1, keepdim=True).expand([all_num, all_num]) + \
|
||||
paddle.pow(feat, 2).sum(axis=1, keepdim=True).expand([all_num, all_num]).t()
|
||||
distmat = distmat.addmm(x=feat, y=feat.t(), alpha=-2.0, beta=1.0)
|
||||
|
||||
original_dist = distmat.cpu().numpy()
|
||||
del feat
|
||||
if local_distmat is not None:
|
||||
original_dist = original_dist + local_distmat
|
||||
|
||||
gallery_num = original_dist.shape[0]
|
||||
original_dist = np.transpose(original_dist / np.max(original_dist, axis=0))
|
||||
V = np.zeros_like(original_dist).astype(np.float16)
|
||||
initial_rank = np.argsort(original_dist).astype(np.int32)
|
||||
logger.info('starting re_ranking')
|
||||
for i in range(all_num):
|
||||
# k-reciprocal neighbors
|
||||
forward_k_neigh_index = initial_rank[i, :k1 + 1]
|
||||
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
|
||||
fi = np.where(backward_k_neigh_index == i)[0]
|
||||
k_reciprocal_index = forward_k_neigh_index[fi]
|
||||
k_reciprocal_expansion_index = k_reciprocal_index
|
||||
for j in range(len(k_reciprocal_index)):
|
||||
candidate = k_reciprocal_index[j]
|
||||
candidate_forward_k_neigh_index = initial_rank[candidate, :int(
|
||||
np.around(k1 / 2)) + 1]
|
||||
candidate_backward_k_neigh_index = initial_rank[
|
||||
candidate_forward_k_neigh_index, :int(np.around(k1 / 2)) + 1]
|
||||
fi_candidate = np.where(
|
||||
candidate_backward_k_neigh_index == candidate)[0]
|
||||
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[
|
||||
fi_candidate]
|
||||
if len(
|
||||
np.intersect1d(candidate_k_reciprocal_index,
|
||||
k_reciprocal_index)) > 2 / 3 * len(
|
||||
candidate_k_reciprocal_index):
|
||||
k_reciprocal_expansion_index = np.append(
|
||||
k_reciprocal_expansion_index, candidate_k_reciprocal_index)
|
||||
|
||||
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
|
||||
weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
|
||||
V[i, k_reciprocal_expansion_index] = weight / np.sum(weight)
|
||||
original_dist = original_dist[:query_num, ]
|
||||
if k2 != 1:
|
||||
V_qe = np.zeros_like(V, dtype=np.float16)
|
||||
for i in range(all_num):
|
||||
V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
|
||||
V = V_qe
|
||||
del V_qe
|
||||
del initial_rank
|
||||
invIndex = []
|
||||
for i in range(gallery_num):
|
||||
invIndex.append(np.where(V[:, i] != 0)[0])
|
||||
|
||||
jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)
|
||||
for i in range(query_num):
|
||||
temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float16)
|
||||
indNonZero = np.where(V[i, :] != 0)[0]
|
||||
indImages = [invIndex[ind] for ind in indNonZero]
|
||||
for j in range(len(indNonZero)):
|
||||
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(
|
||||
V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])
|
||||
jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
|
||||
|
||||
final_dist = jaccard_dist * (1 - lambda_value
|
||||
) + original_dist * lambda_value
|
||||
del original_dist
|
||||
del V
|
||||
del jaccard_dist
|
||||
final_dist = final_dist[:query_num, query_num:]
|
||||
final_dist = paddle.to_tensor(final_dist)
|
||||
return final_dist
|
||||
|
|
|
@ -23,6 +23,7 @@ from .distillationloss import DistillationDMLLoss
|
|||
from .distillationloss import DistillationDistanceLoss
|
||||
from .distillationloss import DistillationRKDLoss
|
||||
from .distillationloss import DistillationKLDivLoss
|
||||
from .distillationloss import DistillationDKDLoss
|
||||
from .multilabelloss import MultiLabelLoss
|
||||
from .afdloss import AFDLoss
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ from .dmlloss import DMLLoss
|
|||
from .distanceloss import DistanceLoss
|
||||
from .rkdloss import RKdAngle, RkdDistance
|
||||
from .kldivloss import KLDivLoss
|
||||
from .dkdloss import DKDLoss
|
||||
|
||||
|
||||
class DistillationCELoss(CELoss):
|
||||
|
@ -204,3 +205,33 @@ class DistillationKLDivLoss(KLDivLoss):
|
|||
for key in loss:
|
||||
loss_dict["{}_{}_{}".format(key, pair[0], pair[1])] = loss[key]
|
||||
return loss_dict
|
||||
|
||||
|
||||
class DistillationDKDLoss(DKDLoss):
|
||||
"""
|
||||
DistillationDKDLoss
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
model_name_pairs=[],
|
||||
key=None,
|
||||
temperature=1.0,
|
||||
alpha=1.0,
|
||||
beta=1.0,
|
||||
name="loss_dkd"):
|
||||
super().__init__(temperature=temperature, alpha=alpha, beta=beta)
|
||||
self.key = key
|
||||
self.model_name_pairs = model_name_pairs
|
||||
self.name = name
|
||||
|
||||
def forward(self, predicts, batch):
|
||||
loss_dict = dict()
|
||||
for idx, pair in enumerate(self.model_name_pairs):
|
||||
out1 = predicts[pair[0]]
|
||||
out2 = predicts[pair[1]]
|
||||
if self.key is not None:
|
||||
out1 = out1[self.key]
|
||||
out2 = out2[self.key]
|
||||
loss = super().forward(out1, out2, batch)
|
||||
loss_dict[f"{self.name}_{pair[0]}_{pair[1]}"] = loss
|
||||
return loss_dict
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
import paddle
|
||||
import paddle.nn as nn
|
||||
import paddle.nn.functional as F
|
||||
|
||||
|
||||
class DKDLoss(nn.Layer):
|
||||
"""
|
||||
DKDLoss
|
||||
Reference: https://arxiv.org/abs/2203.08679
|
||||
Code was heavily based on https://github.com/megvii-research/mdistiller
|
||||
"""
|
||||
|
||||
def __init__(self, temperature=1.0, alpha=1.0, beta=1.0):
|
||||
super().__init__()
|
||||
self.temperature = temperature
|
||||
self.alpha = alpha
|
||||
self.beta = beta
|
||||
|
||||
def forward(self, logits_student, logits_teacher, target):
|
||||
gt_mask = _get_gt_mask(logits_student, target)
|
||||
other_mask = 1 - gt_mask
|
||||
pred_student = F.softmax(logits_student / self.temperature, axis=1)
|
||||
pred_teacher = F.softmax(logits_teacher / self.temperature, axis=1)
|
||||
pred_student = cat_mask(pred_student, gt_mask, other_mask)
|
||||
pred_teacher = cat_mask(pred_teacher, gt_mask, other_mask)
|
||||
log_pred_student = paddle.log(pred_student)
|
||||
tckd_loss = (F.kl_div(
|
||||
log_pred_student, pred_teacher,
|
||||
reduction='sum') * (self.temperature**2) / target.shape[0])
|
||||
pred_teacher_part2 = F.softmax(
|
||||
logits_teacher / self.temperature - 1000.0 * gt_mask, axis=1)
|
||||
log_pred_student_part2 = F.log_softmax(
|
||||
logits_student / self.temperature - 1000.0 * gt_mask, axis=1)
|
||||
nckd_loss = (F.kl_div(
|
||||
log_pred_student_part2, pred_teacher_part2,
|
||||
reduction='sum') * (self.temperature**2) / target.shape[0])
|
||||
return self.alpha * tckd_loss + self.beta * nckd_loss
|
||||
|
||||
|
||||
def _get_gt_mask(logits, target):
|
||||
target = target.reshape([-1]).unsqueeze(1)
|
||||
updates = paddle.ones_like(target)
|
||||
mask = scatter(
|
||||
paddle.zeros_like(logits), target, updates.astype('float32'))
|
||||
return mask
|
||||
|
||||
|
||||
def cat_mask(t, mask1, mask2):
|
||||
t1 = (t * mask1).sum(axis=1, keepdim=True)
|
||||
t2 = (t * mask2).sum(axis=1, keepdim=True)
|
||||
rt = paddle.concat([t1, t2], axis=1)
|
||||
return rt
|
||||
|
||||
|
||||
def scatter(x, index, updates):
|
||||
i, j = index.shape
|
||||
grid_x, grid_y = paddle.meshgrid(paddle.arange(i), paddle.arange(j))
|
||||
index = paddle.stack([grid_x.flatten(), index.flatten()], axis=1)
|
||||
updates_index = paddle.stack([grid_x.flatten(), grid_y.flatten()], axis=1)
|
||||
updates = paddle.gather_nd(updates, index=updates_index)
|
||||
return paddle.scatter_nd_add(x, index, updates)
|
|
@ -3,16 +3,29 @@ import paddle.nn as nn
|
|||
import paddle.nn.functional as F
|
||||
|
||||
|
||||
def ratio2weight(targets, ratio):
|
||||
pos_weights = targets * (1. - ratio)
|
||||
neg_weights = (1. - targets) * ratio
|
||||
weights = paddle.exp(neg_weights + pos_weights)
|
||||
|
||||
# for RAP dataloader, targets element may be 2, with or without smooth, some element must great than 1
|
||||
weights = weights - weights * (targets > 1)
|
||||
|
||||
return weights
|
||||
|
||||
|
||||
class MultiLabelLoss(nn.Layer):
|
||||
"""
|
||||
Multi-label loss
|
||||
"""
|
||||
|
||||
def __init__(self, epsilon=None):
|
||||
def __init__(self, epsilon=None, size_sum=False, weight_ratio=False):
|
||||
super().__init__()
|
||||
if epsilon is not None and (epsilon <= 0 or epsilon >= 1):
|
||||
epsilon = None
|
||||
self.epsilon = epsilon
|
||||
self.weight_ratio = weight_ratio
|
||||
self.size_sum = size_sum
|
||||
|
||||
def _labelsmoothing(self, target, class_num):
|
||||
if target.ndim == 1 or target.shape[-1] != class_num:
|
||||
|
@ -24,13 +37,21 @@ class MultiLabelLoss(nn.Layer):
|
|||
return soft_target
|
||||
|
||||
def _binary_crossentropy(self, input, target, class_num):
|
||||
if self.weight_ratio:
|
||||
target, label_ratio = target[:, 0, :], target[:, 1, :]
|
||||
if self.epsilon is not None:
|
||||
target = self._labelsmoothing(target, class_num)
|
||||
cost = F.binary_cross_entropy_with_logits(
|
||||
logit=input, label=target)
|
||||
else:
|
||||
cost = F.binary_cross_entropy_with_logits(
|
||||
logit=input, label=target)
|
||||
cost = F.binary_cross_entropy_with_logits(
|
||||
logit=input, label=target, reduction='none')
|
||||
|
||||
if self.weight_ratio:
|
||||
targets_mask = paddle.cast(target > 0.5, 'float32')
|
||||
weight = ratio2weight(targets_mask, paddle.to_tensor(label_ratio))
|
||||
weight = weight * (target > -1)
|
||||
cost = cost * weight
|
||||
|
||||
if self.size_sum:
|
||||
cost = cost.sum(1).mean() if self.size_sum else cost.mean()
|
||||
|
||||
return cost
|
||||
|
||||
|
|
|
@ -12,17 +12,19 @@
|
|||
#See the License for the specific language governing permissions and
|
||||
#limitations under the License.
|
||||
|
||||
from paddle import nn
|
||||
import copy
|
||||
from collections import OrderedDict
|
||||
|
||||
from .avg_metrics import AvgMetrics
|
||||
from .metrics import TopkAcc, mAP, mINP, Recallk, Precisionk
|
||||
from .metrics import DistillationTopkAcc
|
||||
from .metrics import GoogLeNetTopkAcc
|
||||
from .metrics import HammingDistance, AccuracyScore
|
||||
from .metrics import ATTRMetric
|
||||
from .metrics import TprAtFpr
|
||||
|
||||
|
||||
class CombinedMetrics(nn.Layer):
|
||||
class CombinedMetrics(AvgMetrics):
|
||||
def __init__(self, config_list):
|
||||
super().__init__()
|
||||
self.metric_func_list = []
|
||||
|
@ -38,13 +40,30 @@ class CombinedMetrics(nn.Layer):
|
|||
eval(metric_name)(**metric_params))
|
||||
else:
|
||||
self.metric_func_list.append(eval(metric_name)())
|
||||
self.reset()
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
def forward(self, *args, **kwargs):
|
||||
metric_dict = OrderedDict()
|
||||
for idx, metric_func in enumerate(self.metric_func_list):
|
||||
metric_dict.update(metric_func(*args, **kwargs))
|
||||
return metric_dict
|
||||
|
||||
@property
|
||||
def avg_info(self):
|
||||
return ", ".join([metric.avg_info for metric in self.metric_func_list])
|
||||
|
||||
@property
|
||||
def avg(self):
|
||||
return self.metric_func_list[0].avg
|
||||
|
||||
def attr_res(self):
|
||||
return self.metric_func_list[0].attrmeter.res()
|
||||
|
||||
def reset(self):
|
||||
for metric in self.metric_func_list:
|
||||
if hasattr(metric, "reset"):
|
||||
metric.reset()
|
||||
|
||||
|
||||
def build_metrics(config):
|
||||
metrics_list = CombinedMetrics(copy.deepcopy(config))
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
from paddle import nn
|
||||
|
||||
|
||||
class AvgMetrics(nn.Layer):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.avg_meters = {}
|
||||
|
||||
def reset(self):
|
||||
self.avg_meters = {}
|
||||
|
||||
@property
|
||||
def avg(self):
|
||||
if self.avg_meters:
|
||||
for metric_key in self.avg_meters:
|
||||
return self.avg_meters[metric_key].avg
|
||||
|
||||
@property
|
||||
def avg_info(self):
|
||||
return ", ".join([self.avg_meters[key].avg_info for key in self.avg_meters])
|
|
@ -22,14 +22,26 @@ from sklearn.metrics import accuracy_score as accuracy_metric
|
|||
from sklearn.metrics import multilabel_confusion_matrix
|
||||
from sklearn.preprocessing import binarize
|
||||
|
||||
from easydict import EasyDict
|
||||
|
||||
class TopkAcc(nn.Layer):
|
||||
from ppcls.metric.avg_metrics import AvgMetrics
|
||||
from ppcls.utils.misc import AverageMeter, AttrMeter
|
||||
|
||||
|
||||
class TopkAcc(AvgMetrics):
|
||||
def __init__(self, topk=(1, 5)):
|
||||
super().__init__()
|
||||
assert isinstance(topk, (int, list, tuple))
|
||||
if isinstance(topk, int):
|
||||
topk = [topk]
|
||||
self.topk = topk
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.avg_meters = {
|
||||
"top{}".format(k): AverageMeter("top{}".format(k))
|
||||
for k in self.topk
|
||||
}
|
||||
|
||||
def forward(self, x, label):
|
||||
if isinstance(x, dict):
|
||||
|
@ -39,19 +51,21 @@ class TopkAcc(nn.Layer):
|
|||
for k in self.topk:
|
||||
metric_dict["top{}".format(k)] = paddle.metric.accuracy(
|
||||
x, label, k=k)
|
||||
self.avg_meters["top{}".format(k)].update(metric_dict["top{}".format(k)], x.shape[0])
|
||||
return metric_dict
|
||||
|
||||
|
||||
class mAP(nn.Layer):
|
||||
def __init__(self):
|
||||
def __init__(self, descending=True):
|
||||
super().__init__()
|
||||
self.descending = descending
|
||||
|
||||
def forward(self, similarities_matrix, query_img_id, gallery_img_id,
|
||||
keep_mask):
|
||||
metric_dict = dict()
|
||||
|
||||
choosen_indices = paddle.argsort(
|
||||
similarities_matrix, axis=1, descending=True)
|
||||
similarities_matrix, axis=1, descending=self.descending)
|
||||
gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])
|
||||
gallery_labels_transpose = paddle.broadcast_to(
|
||||
gallery_labels_transpose,
|
||||
|
@ -87,15 +101,16 @@ class mAP(nn.Layer):
|
|||
|
||||
|
||||
class mINP(nn.Layer):
|
||||
def __init__(self):
|
||||
def __init__(self, descending=True):
|
||||
super().__init__()
|
||||
self.descending = descending
|
||||
|
||||
def forward(self, similarities_matrix, query_img_id, gallery_img_id,
|
||||
keep_mask):
|
||||
metric_dict = dict()
|
||||
|
||||
choosen_indices = paddle.argsort(
|
||||
similarities_matrix, axis=1, descending=True)
|
||||
similarities_matrix, axis=1, descending=self.descending)
|
||||
gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])
|
||||
gallery_labels_transpose = paddle.broadcast_to(
|
||||
gallery_labels_transpose,
|
||||
|
@ -106,7 +121,7 @@ class mINP(nn.Layer):
|
|||
choosen_indices)
|
||||
equal_flag = paddle.equal(choosen_label, query_img_id)
|
||||
if keep_mask is not None:
|
||||
keep_mask = paddle.index_sample(
|
||||
keep_mask = paddle.indechmx_sample(
|
||||
keep_mask.astype('float32'), choosen_indices)
|
||||
equal_flag = paddle.logical_and(equal_flag,
|
||||
keep_mask.astype('bool'))
|
||||
|
@ -129,13 +144,69 @@ class mINP(nn.Layer):
|
|||
return metric_dict
|
||||
|
||||
|
||||
class TprAtFpr(nn.Layer):
|
||||
def __init__(self, max_fpr=1 / 1000.):
|
||||
super().__init__()
|
||||
self.gt_pos_score_list = []
|
||||
self.gt_neg_score_list = []
|
||||
self.softmax = nn.Softmax(axis=-1)
|
||||
self.max_fpr = max_fpr
|
||||
self.max_tpr = 0.
|
||||
|
||||
def forward(self, x, label):
|
||||
if isinstance(x, dict):
|
||||
x = x["logits"]
|
||||
x = self.softmax(x)
|
||||
for i, label_i in enumerate(label):
|
||||
if label_i[0] == 0:
|
||||
self.gt_neg_score_list.append(x[i][1].numpy())
|
||||
else:
|
||||
self.gt_pos_score_list.append(x[i][1].numpy())
|
||||
return {}
|
||||
|
||||
def reset(self):
|
||||
self.gt_pos_score_list = []
|
||||
self.gt_neg_score_list = []
|
||||
self.max_tpr = 0.
|
||||
|
||||
@property
|
||||
def avg(self):
|
||||
return self.max_tpr
|
||||
|
||||
@property
|
||||
def avg_info(self):
|
||||
max_tpr = 0.
|
||||
result = ""
|
||||
gt_pos_score_list = np.array(self.gt_pos_score_list)
|
||||
gt_neg_score_list = np.array(self.gt_neg_score_list)
|
||||
for i in range(0, 10000):
|
||||
threshold = i / 10000.
|
||||
if len(gt_pos_score_list) == 0:
|
||||
continue
|
||||
tpr = np.sum(
|
||||
gt_pos_score_list > threshold) / len(gt_pos_score_list)
|
||||
if len(gt_neg_score_list) == 0 and tpr > max_tpr:
|
||||
max_tpr = tpr
|
||||
result = "threshold: {}, fpr: {}, tpr: {:.5f}".format(
|
||||
threshold, fpr, tpr)
|
||||
fpr = np.sum(
|
||||
gt_neg_score_list > threshold) / len(gt_neg_score_list)
|
||||
if fpr <= self.max_fpr and tpr > max_tpr:
|
||||
max_tpr = tpr
|
||||
result = "threshold: {}, fpr: {}, tpr: {:.5f}".format(
|
||||
threshold, fpr, tpr)
|
||||
self.max_tpr = max_tpr
|
||||
return result
|
||||
|
||||
|
||||
class Recallk(nn.Layer):
|
||||
def __init__(self, topk=(1, 5)):
|
||||
def __init__(self, topk=(1, 5), descending=True):
|
||||
super().__init__()
|
||||
assert isinstance(topk, (int, list, tuple))
|
||||
if isinstance(topk, int):
|
||||
topk = [topk]
|
||||
self.topk = topk
|
||||
self.descending = descending
|
||||
|
||||
def forward(self, similarities_matrix, query_img_id, gallery_img_id,
|
||||
keep_mask):
|
||||
|
@ -143,7 +214,7 @@ class Recallk(nn.Layer):
|
|||
|
||||
#get cmc
|
||||
choosen_indices = paddle.argsort(
|
||||
similarities_matrix, axis=1, descending=True)
|
||||
similarities_matrix, axis=1, descending=self.descending)
|
||||
gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])
|
||||
gallery_labels_transpose = paddle.broadcast_to(
|
||||
gallery_labels_transpose,
|
||||
|
@ -175,12 +246,13 @@ class Recallk(nn.Layer):
|
|||
|
||||
|
||||
class Precisionk(nn.Layer):
|
||||
def __init__(self, topk=(1, 5)):
|
||||
def __init__(self, topk=(1, 5), descending=True):
|
||||
super().__init__()
|
||||
assert isinstance(topk, (int, list, tuple))
|
||||
if isinstance(topk, int):
|
||||
topk = [topk]
|
||||
self.topk = topk
|
||||
self.descending = descending
|
||||
|
||||
def forward(self, similarities_matrix, query_img_id, gallery_img_id,
|
||||
keep_mask):
|
||||
|
@ -188,7 +260,7 @@ class Precisionk(nn.Layer):
|
|||
|
||||
#get cmc
|
||||
choosen_indices = paddle.argsort(
|
||||
similarities_matrix, axis=1, descending=True)
|
||||
similarities_matrix, axis=1, descending=self.descending)
|
||||
gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])
|
||||
gallery_labels_transpose = paddle.broadcast_to(
|
||||
gallery_labels_transpose,
|
||||
|
@ -241,20 +313,17 @@ class GoogLeNetTopkAcc(TopkAcc):
|
|||
return super().forward(x[0], label)
|
||||
|
||||
|
||||
class MutiLabelMetric(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
class MultiLabelMetric(AvgMetrics):
|
||||
def __init__(self, bi_threshold=0.5):
|
||||
super().__init__()
|
||||
self.bi_threshold = bi_threshold
|
||||
|
||||
def _multi_hot_encode(self, logits, threshold=0.5):
|
||||
return binarize(logits, threshold=threshold)
|
||||
|
||||
def __call__(self, output):
|
||||
output = F.sigmoid(output)
|
||||
preds = self._multi_hot_encode(logits=output.numpy(), threshold=0.5)
|
||||
return preds
|
||||
def _multi_hot_encode(self, output):
|
||||
logits = F.sigmoid(output).numpy()
|
||||
return binarize(logits, threshold=self.bi_threshold)
|
||||
|
||||
|
||||
class HammingDistance(MutiLabelMetric):
|
||||
class HammingDistance(MultiLabelMetric):
|
||||
"""
|
||||
Soft metric based label for multilabel classification
|
||||
Returns:
|
||||
|
@ -263,16 +332,22 @@ class HammingDistance(MutiLabelMetric):
|
|||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.reset()
|
||||
|
||||
def __call__(self, output, target):
|
||||
preds = super().__call__(output)
|
||||
def reset(self):
|
||||
self.avg_meters = {"HammingDistance": AverageMeter("HammingDistance")}
|
||||
|
||||
def forward(self, output, target):
|
||||
preds = super()._multi_hot_encode(output)
|
||||
metric_dict = dict()
|
||||
metric_dict["HammingDistance"] = paddle.to_tensor(
|
||||
hamming_loss(target, preds))
|
||||
self.avg_meters["HammingDistance"].update(
|
||||
metric_dict["HammingDistance"].numpy()[0], output.shape[0])
|
||||
return metric_dict
|
||||
|
||||
|
||||
class AccuracyScore(MutiLabelMetric):
|
||||
class AccuracyScore(MultiLabelMetric):
|
||||
"""
|
||||
Hard metric for multilabel classification
|
||||
Args:
|
||||
|
@ -288,9 +363,13 @@ class AccuracyScore(MutiLabelMetric):
|
|||
assert base in ["sample", "label"
|
||||
], 'must be one of ["sample", "label"]'
|
||||
self.base = base
|
||||
self.reset()
|
||||
|
||||
def __call__(self, output, target):
|
||||
preds = super().__call__(output)
|
||||
def reset(self):
|
||||
self.avg_meters = {"AccuracyScore": AverageMeter("AccuracyScore")}
|
||||
|
||||
def forward(self, output, target):
|
||||
preds = super()._multi_hot_encode(output)
|
||||
metric_dict = dict()
|
||||
if self.base == "sample":
|
||||
accuracy = accuracy_metric(target, preds)
|
||||
|
@ -303,4 +382,66 @@ class AccuracyScore(MutiLabelMetric):
|
|||
accuracy = (sum(tps) + sum(tns)) / (
|
||||
sum(tps) + sum(tns) + sum(fns) + sum(fps))
|
||||
metric_dict["AccuracyScore"] = paddle.to_tensor(accuracy)
|
||||
self.avg_meters["AccuracyScore"].update(
|
||||
metric_dict["AccuracyScore"].numpy()[0], output.shape[0])
|
||||
return metric_dict
|
||||
|
||||
|
||||
def get_attr_metrics(gt_label, preds_probs, threshold):
|
||||
"""
|
||||
index: evaluated label index
|
||||
"""
|
||||
pred_label = (preds_probs > threshold).astype(int)
|
||||
|
||||
eps = 1e-20
|
||||
result = EasyDict()
|
||||
|
||||
has_fuyi = gt_label == -1
|
||||
pred_label[has_fuyi] = -1
|
||||
|
||||
###############################
|
||||
# label metrics
|
||||
# TP + FN
|
||||
result.gt_pos = np.sum((gt_label == 1), axis=0).astype(float)
|
||||
# TN + FP
|
||||
result.gt_neg = np.sum((gt_label == 0), axis=0).astype(float)
|
||||
# TP
|
||||
result.true_pos = np.sum((gt_label == 1) * (pred_label == 1),
|
||||
axis=0).astype(float)
|
||||
# TN
|
||||
result.true_neg = np.sum((gt_label == 0) * (pred_label == 0),
|
||||
axis=0).astype(float)
|
||||
# FP
|
||||
result.false_pos = np.sum(((gt_label == 0) * (pred_label == 1)),
|
||||
axis=0).astype(float)
|
||||
# FN
|
||||
result.false_neg = np.sum(((gt_label == 1) * (pred_label == 0)),
|
||||
axis=0).astype(float)
|
||||
|
||||
################
|
||||
# instance metrics
|
||||
result.gt_pos_ins = np.sum((gt_label == 1), axis=1).astype(float)
|
||||
result.true_pos_ins = np.sum((pred_label == 1), axis=1).astype(float)
|
||||
# true positive
|
||||
result.intersect_pos = np.sum((gt_label == 1) * (pred_label == 1),
|
||||
axis=1).astype(float)
|
||||
# IOU
|
||||
result.union_pos = np.sum(((gt_label == 1) + (pred_label == 1)),
|
||||
axis=1).astype(float)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class ATTRMetric(nn.Layer):
|
||||
def __init__(self, threshold=0.5):
|
||||
super().__init__()
|
||||
self.threshold = threshold
|
||||
|
||||
def reset(self):
|
||||
self.attrmeter = AttrMeter(threshold=0.5)
|
||||
|
||||
def forward(self, output, target):
|
||||
metric_dict = get_attr_metrics(target[:, 0, :].numpy(),
|
||||
output.numpy(), self.threshold)
|
||||
self.attrmeter.update(metric_dict)
|
||||
return metric_dict
|
||||
|
|
|
@ -439,8 +439,7 @@ def run(dataloader,
|
|||
logger.info("END {:s} {:s} {:s}".format(mode, end_str, ips_info))
|
||||
else:
|
||||
end_epoch_str = "END epoch:{:<3d}".format(epoch)
|
||||
logger.info("{:s} {:s} {:s} {:s}".format(end_epoch_str, mode, end_str,
|
||||
ips_info))
|
||||
logger.info("{:s} {:s} {:s}".format(end_epoch_str, mode, end_str))
|
||||
if use_dali:
|
||||
dataloader.reset()
|
||||
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
0 nobody
|
||||
1 someone
|
|
@ -12,6 +12,8 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import paddle
|
||||
|
||||
__all__ = ['AverageMeter']
|
||||
|
||||
|
||||
|
@ -42,6 +44,12 @@ class AverageMeter(object):
|
|||
self.count += n
|
||||
self.avg = self.sum / self.count
|
||||
|
||||
@property
|
||||
def avg_info(self):
|
||||
if isinstance(self.avg, paddle.Tensor):
|
||||
self.avg = self.avg.numpy()[0]
|
||||
return "{}: {:.5f}".format(self.name, self.avg)
|
||||
|
||||
@property
|
||||
def total(self):
|
||||
return '{self.name}_sum: {self.sum:{self.fmt}}{self.postfix}'.format(
|
||||
|
@ -61,3 +69,87 @@ class AverageMeter(object):
|
|||
def value(self):
|
||||
return '{self.name}: {self.val:{self.fmt}}{self.postfix}'.format(
|
||||
self=self)
|
||||
|
||||
|
||||
class AttrMeter(object):
|
||||
"""
|
||||
Computes and stores the average and current value
|
||||
Code was based on https://github.com/pytorch/examples/blob/master/imagenet/main.py
|
||||
"""
|
||||
|
||||
def __init__(self, threshold=0.5):
|
||||
self.threshold = threshold
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.gt_pos = 0
|
||||
self.gt_neg = 0
|
||||
self.true_pos = 0
|
||||
self.true_neg = 0
|
||||
self.false_pos = 0
|
||||
self.false_neg = 0
|
||||
|
||||
self.gt_pos_ins = []
|
||||
self.true_pos_ins = []
|
||||
self.intersect_pos = []
|
||||
self.union_pos = []
|
||||
|
||||
def update(self, metric_dict):
|
||||
self.gt_pos += metric_dict['gt_pos']
|
||||
self.gt_neg += metric_dict['gt_neg']
|
||||
self.true_pos += metric_dict['true_pos']
|
||||
self.true_neg += metric_dict['true_neg']
|
||||
self.false_pos += metric_dict['false_pos']
|
||||
self.false_neg += metric_dict['false_neg']
|
||||
|
||||
self.gt_pos_ins += metric_dict['gt_pos_ins'].tolist()
|
||||
self.true_pos_ins += metric_dict['true_pos_ins'].tolist()
|
||||
self.intersect_pos += metric_dict['intersect_pos'].tolist()
|
||||
self.union_pos += metric_dict['union_pos'].tolist()
|
||||
|
||||
def res(self):
|
||||
import numpy as np
|
||||
eps = 1e-20
|
||||
label_pos_recall = 1.0 * self.true_pos / (
|
||||
self.gt_pos + eps) # true positive
|
||||
label_neg_recall = 1.0 * self.true_neg / (
|
||||
self.gt_neg + eps) # true negative
|
||||
# mean accuracy
|
||||
label_ma = (label_pos_recall + label_neg_recall) / 2
|
||||
|
||||
label_pos_recall = np.mean(label_pos_recall)
|
||||
label_neg_recall = np.mean(label_neg_recall)
|
||||
label_prec = (self.true_pos / (self.true_pos + self.false_pos + eps))
|
||||
label_acc = (self.true_pos /
|
||||
(self.true_pos + self.false_pos + self.false_neg + eps))
|
||||
label_f1 = np.mean(2 * label_prec * label_pos_recall /
|
||||
(label_prec + label_pos_recall + eps))
|
||||
|
||||
ma = (np.mean(label_ma))
|
||||
|
||||
self.gt_pos_ins = np.array(self.gt_pos_ins)
|
||||
self.true_pos_ins = np.array(self.true_pos_ins)
|
||||
self.intersect_pos = np.array(self.intersect_pos)
|
||||
self.union_pos = np.array(self.union_pos)
|
||||
instance_acc = self.intersect_pos / (self.union_pos + eps)
|
||||
instance_prec = self.intersect_pos / (self.true_pos_ins + eps)
|
||||
instance_recall = self.intersect_pos / (self.gt_pos_ins + eps)
|
||||
instance_f1 = 2 * instance_prec * instance_recall / (
|
||||
instance_prec + instance_recall + eps)
|
||||
|
||||
instance_acc = np.mean(instance_acc)
|
||||
instance_prec = np.mean(instance_prec)
|
||||
instance_recall = np.mean(instance_recall)
|
||||
instance_f1 = 2 * instance_prec * instance_recall / (
|
||||
instance_prec + instance_recall + eps)
|
||||
|
||||
instance_acc = np.mean(instance_acc)
|
||||
instance_prec = np.mean(instance_prec)
|
||||
instance_recall = np.mean(instance_recall)
|
||||
instance_f1 = np.mean(instance_f1)
|
||||
|
||||
res = [
|
||||
ma, label_f1, label_pos_recall, label_neg_recall, instance_f1,
|
||||
instance_acc, instance_prec, instance_recall
|
||||
]
|
||||
return res
|
||||
|
|
|
@ -42,6 +42,14 @@ def _mkdir_if_not_exist(path):
|
|||
raise OSError('Failed to mkdir {}'.format(path))
|
||||
|
||||
|
||||
def _extract_student_weights(all_params, student_prefix="Student."):
|
||||
s_params = {
|
||||
key[len(student_prefix):]: all_params[key]
|
||||
for key in all_params if student_prefix in key
|
||||
}
|
||||
return s_params
|
||||
|
||||
|
||||
def load_dygraph_pretrain(model, path=None):
|
||||
if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')):
|
||||
raise ValueError("Model pretrain path {}.pdparams does not "
|
||||
|
@ -105,7 +113,8 @@ def init_model(config, net, optimizer=None, loss: paddle.nn.Layer=None):
|
|||
net.set_state_dict(para_dict)
|
||||
loss.set_state_dict(para_dict)
|
||||
for i in range(len(optimizer)):
|
||||
optimizer[i].set_state_dict(opti_dict)
|
||||
optimizer[i].set_state_dict(opti_dict[i] if isinstance(
|
||||
opti_dict, list) else opti_dict)
|
||||
logger.info("Finish load checkpoints from {}".format(checkpoints))
|
||||
return metric_dict
|
||||
|
||||
|
@ -116,9 +125,8 @@ def init_model(config, net, optimizer=None, loss: paddle.nn.Layer=None):
|
|||
load_distillation_model(net, pretrained_model)
|
||||
else: # common load
|
||||
load_dygraph_pretrain(net, path=pretrained_model)
|
||||
logger.info(
|
||||
logger.coloring("Finish load pretrained model from {}".format(
|
||||
pretrained_model), "HEADER"))
|
||||
logger.info("Finish load pretrained model from {}".format(
|
||||
pretrained_model))
|
||||
|
||||
|
||||
def save_model(net,
|
||||
|
@ -127,7 +135,8 @@ def save_model(net,
|
|||
model_path,
|
||||
model_name="",
|
||||
prefix='ppcls',
|
||||
loss: paddle.nn.Layer=None):
|
||||
loss: paddle.nn.Layer=None,
|
||||
save_student_model=False):
|
||||
"""
|
||||
save model to the target path
|
||||
"""
|
||||
|
@ -138,11 +147,18 @@ def save_model(net,
|
|||
model_path = os.path.join(model_path, prefix)
|
||||
|
||||
params_state_dict = net.state_dict()
|
||||
loss_state_dict = loss.state_dict()
|
||||
keys_inter = set(params_state_dict.keys()) & set(loss_state_dict.keys())
|
||||
assert len(keys_inter) == 0, \
|
||||
f"keys in model and loss state_dict must be unique, but got intersection {keys_inter}"
|
||||
params_state_dict.update(loss_state_dict)
|
||||
if loss is not None:
|
||||
loss_state_dict = loss.state_dict()
|
||||
keys_inter = set(params_state_dict.keys()) & set(loss_state_dict.keys(
|
||||
))
|
||||
assert len(keys_inter) == 0, \
|
||||
f"keys in model and loss state_dict must be unique, but got intersection {keys_inter}"
|
||||
params_state_dict.update(loss_state_dict)
|
||||
|
||||
if save_student_model:
|
||||
s_params = _extract_student_weights(params_state_dict)
|
||||
if len(s_params) > 0:
|
||||
paddle.save(s_params, model_path + "_student.pdparams")
|
||||
|
||||
paddle.save(params_state_dict, model_path + ".pdparams")
|
||||
paddle.save([opt.state_dict() for opt in optimizer], model_path + ".pdopt")
|
||||
|
|
|
@ -9,3 +9,4 @@ scipy
|
|||
scikit-learn>=0.21.0
|
||||
gast==0.3.3
|
||||
faiss-cpu==1.7.1.post2
|
||||
easydict
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
===========================train_params===========================
|
||||
model_name:DistillationModel
|
||||
python:python3.7
|
||||
gpu_list:0|0,1
|
||||
-o Global.device:gpu
|
||||
-o Global.auto_cast:null
|
||||
-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=100
|
||||
-o Global.output_dir:./output/
|
||||
-o DataLoader.Train.sampler.batch_size:8
|
||||
-o Global.pretrained_model:null
|
||||
train_model_name:latest
|
||||
train_infer_img_dir:./dataset/ILSVRC2012/val
|
||||
null:null
|
||||
##
|
||||
trainer:amp_train
|
||||
amp_train:tools/train.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2
|
||||
pact_train:null
|
||||
fpgm_train:null
|
||||
distill_train:null
|
||||
null:null
|
||||
null:null
|
||||
##
|
||||
===========================eval_params===========================
|
||||
eval:tools/eval.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml
|
||||
null:null
|
||||
##
|
||||
===========================infer_params==========================
|
||||
-o Global.save_inference_dir:./inference
|
||||
-o Global.pretrained_model:
|
||||
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml
|
||||
quant_export:null
|
||||
fpgm_export:null
|
||||
distill_export:null
|
||||
kl_quant:null
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams
|
||||
infer_model:../inference/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_cls.py -c configs/inference_cls.yaml
|
||||
-o Global.use_gpu:True|False
|
||||
-o Global.enable_mkldnn:True|False
|
||||
-o Global.cpu_num_threads:1|6
|
||||
-o Global.batch_size:1|16
|
||||
-o Global.use_tensorrt:True|False
|
||||
-o Global.use_fp16:True|False
|
||||
-o Global.inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/ILSVRC2012/val
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
null:null
|
||||
null:null
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,224,224]}]
|
|
@ -0,0 +1,54 @@
|
|||
===========================train_params===========================
|
||||
model_name:DistillationModel
|
||||
python:python3.7
|
||||
gpu_list:0|0,1
|
||||
-o Global.device:gpu
|
||||
-o Global.auto_cast:null
|
||||
-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=100
|
||||
-o Global.output_dir:./output/
|
||||
-o DataLoader.Train.sampler.batch_size:8
|
||||
-o Global.pretrained_model:null
|
||||
train_model_name:latest
|
||||
train_infer_img_dir:./dataset/ILSVRC2012/val
|
||||
null:null
|
||||
##
|
||||
trainer:norm_train
|
||||
norm_train:tools/train.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False
|
||||
pact_train:null
|
||||
fpgm_train:null
|
||||
distill_train:null
|
||||
null:null
|
||||
null:null
|
||||
##
|
||||
===========================eval_params===========================
|
||||
eval:tools/eval.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml
|
||||
null:null
|
||||
##
|
||||
===========================infer_params==========================
|
||||
-o Global.save_inference_dir:./inference
|
||||
-o Global.pretrained_model:
|
||||
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml
|
||||
quant_export:null
|
||||
fpgm_export:null
|
||||
distill_export:null
|
||||
kl_quant:null
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams
|
||||
infer_model:../inference/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_cls.py -c configs/inference_cls.yaml
|
||||
-o Global.use_gpu:True|False
|
||||
-o Global.enable_mkldnn:True|False
|
||||
-o Global.cpu_num_threads:1|6
|
||||
-o Global.batch_size:1|16
|
||||
-o Global.use_tensorrt:True|False
|
||||
-o Global.use_fp16:True|False
|
||||
-o Global.inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/ILSVRC2012/val
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
null:null
|
||||
null:null
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,224,224]}]
|
|
@ -0,0 +1,53 @@
|
|||
===========================train_params===========================
|
||||
model_name:PPHGNet_small
|
||||
python:python3.7
|
||||
gpu_list:0|0,1
|
||||
-o Global.device:gpu
|
||||
-o Global.auto_cast:null
|
||||
-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120
|
||||
-o Global.output_dir:./output/
|
||||
-o DataLoader.Train.sampler.batch_size:8
|
||||
-o Global.pretrained_model:null
|
||||
train_model_name:latest
|
||||
train_infer_img_dir:./dataset/ILSVRC2012/val
|
||||
null:null
|
||||
##
|
||||
trainer:norm_train
|
||||
norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False
|
||||
pact_train:null
|
||||
fpgm_train:null
|
||||
distill_train:null
|
||||
null:null
|
||||
null:null
|
||||
##
|
||||
===========================eval_params===========================
|
||||
eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml
|
||||
null:null
|
||||
##
|
||||
===========================infer_params==========================
|
||||
-o Global.save_inference_dir:./inference
|
||||
-o Global.pretrained_model:
|
||||
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml
|
||||
quant_export:null
|
||||
fpgm_export:null
|
||||
distill_export:null
|
||||
kl_quant:null
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams
|
||||
infer_model:../inference/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236
|
||||
-o Global.use_gpu:True|False
|
||||
-o Global.enable_mkldnn:True|False
|
||||
-o Global.cpu_num_threads:1|6
|
||||
-o Global.batch_size:1|16
|
||||
-o Global.use_tensorrt:True|False
|
||||
-o Global.use_fp16:True|False
|
||||
-o Global.inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/ILSVRC2012/val
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
null:null
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,224,224]}]
|
|
@ -0,0 +1,53 @@
|
|||
===========================train_params===========================
|
||||
model_name:PPHGNet_tiny
|
||||
python:python3.7
|
||||
gpu_list:0|0,1
|
||||
-o Global.device:gpu
|
||||
-o Global.auto_cast:null
|
||||
-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120
|
||||
-o Global.output_dir:./output/
|
||||
-o DataLoader.Train.sampler.batch_size:8
|
||||
-o Global.pretrained_model:null
|
||||
train_model_name:latest
|
||||
train_infer_img_dir:./dataset/ILSVRC2012/val
|
||||
null:null
|
||||
##
|
||||
trainer:norm_train
|
||||
norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False
|
||||
pact_train:null
|
||||
fpgm_train:null
|
||||
distill_train:null
|
||||
null:null
|
||||
null:null
|
||||
##
|
||||
===========================eval_params===========================
|
||||
eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml
|
||||
null:null
|
||||
##
|
||||
===========================infer_params==========================
|
||||
-o Global.save_inference_dir:./inference
|
||||
-o Global.pretrained_model:
|
||||
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml
|
||||
quant_export:null
|
||||
fpgm_export:null
|
||||
distill_export:null
|
||||
kl_quant:null
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams
|
||||
infer_model:../inference/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=232
|
||||
-o Global.use_gpu:True|False
|
||||
-o Global.enable_mkldnn:True|False
|
||||
-o Global.cpu_num_threads:1|6
|
||||
-o Global.batch_size:1|16
|
||||
-o Global.use_tensorrt:True|False
|
||||
-o Global.use_fp16:True|False
|
||||
-o Global.inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/ILSVRC2012/val
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
null:null
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,224,224]}]
|
|
@ -0,0 +1,53 @@
|
|||
===========================train_params===========================
|
||||
model_name:PPLCNetV2_base
|
||||
python:python3.7
|
||||
gpu_list:0|0,1
|
||||
-o Global.device:gpu
|
||||
-o Global.auto_cast:null
|
||||
-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120
|
||||
-o Global.output_dir:./output/
|
||||
-o DataLoader.Train.sampler.first_bs:8
|
||||
-o Global.pretrained_model:null
|
||||
train_model_name:latest
|
||||
train_infer_img_dir:./dataset/ILSVRC2012/val
|
||||
null:null
|
||||
##
|
||||
trainer:norm_train
|
||||
norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Global.seed=1234 -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False
|
||||
pact_train:null
|
||||
fpgm_train:null
|
||||
distill_train:null
|
||||
null:null
|
||||
null:null
|
||||
##
|
||||
===========================eval_params===========================
|
||||
eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml
|
||||
null:null
|
||||
##
|
||||
===========================infer_params==========================
|
||||
-o Global.save_inference_dir:./inference
|
||||
-o Global.pretrained_model:
|
||||
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml
|
||||
quant_export:null
|
||||
fpgm_export:null
|
||||
distill_export:null
|
||||
kl_quant:null
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams
|
||||
infer_model:../inference/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_cls.py -c configs/inference_cls.yaml
|
||||
-o Global.use_gpu:True|False
|
||||
-o Global.enable_mkldnn:True|False
|
||||
-o Global.cpu_num_threads:1|6
|
||||
-o Global.batch_size:1|16
|
||||
-o Global.use_tensorrt:True|False
|
||||
-o Global.use_fp16:True|False
|
||||
-o Global.inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/ILSVRC2012/val
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
null:null
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,224,224]}]
|
|
@ -0,0 +1,302 @@
|
|||
#!/usr/bin/env bash
|
||||
GPU_IDS="0,1,2,3"
|
||||
|
||||
# Basic Config
|
||||
CONFIG="ppcls/configs/cls_demo/person/PPLCNet/PPLCNet_x1_0.yaml"
|
||||
EPOCHS=1
|
||||
OUTPUT="output_debug4"
|
||||
STATUS_LOG="${OUTPUT}/status_result.log"
|
||||
RESULT="${OUTPUT}/result.log"
|
||||
|
||||
|
||||
# Search Options
|
||||
LR_LIST=( 0.0075 0.01 0.0125 )
|
||||
RESOLUTION_LIST=( 176 192 224 )
|
||||
RA_PROB_LIST=( 0.0 0.1 0.5 )
|
||||
RE_PROB_LIST=( 0.0 0.1 0.5 )
|
||||
LR_MULT_LIST=( [0.0,0.2,0.4,0.6,0.8,1.0] [0.0,0.4,0.4,0.8,0.8,1.0] )
|
||||
TEACHER_LIST=( "ResNet101_vd" "ResNet50_vd" )
|
||||
|
||||
|
||||
# Train Mode
|
||||
declare -A MODE_MAP
|
||||
MODE_MAP=(["search_lr"]=1 ["search_resolution"]=1 ["search_ra_prob"]=1 ["search_re_prob"]=1 ["search_lr_mult_list"]=1 ["search_teacher"]=1 ["train_distillation_model"]=1)
|
||||
|
||||
export CUDA_VISIBLE_DEVICES=${GPU_IDS}
|
||||
|
||||
|
||||
function status_check(){
|
||||
last_status=$1 # the exit code
|
||||
run_command=$2
|
||||
run_log=$3
|
||||
if [ $last_status -eq 0 ]; then
|
||||
echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log}
|
||||
else
|
||||
echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log}
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function get_max_value(){
|
||||
array=($*)
|
||||
max=${array[0]}
|
||||
index=0
|
||||
for (( i=0; i<${#array[*]-1}; i++ )); do
|
||||
if [[ $(echo "${array[$i]} > $max"|bc) -eq 1 ]]; then
|
||||
max=${array[$i]}
|
||||
index=${i}
|
||||
else
|
||||
continue
|
||||
fi
|
||||
done
|
||||
echo ${max}
|
||||
echo ${index}
|
||||
}
|
||||
|
||||
function get_best_info(){
|
||||
_parameter=$1
|
||||
params_index=2
|
||||
if [[ ${_parameter} == "TEACHER" ]]; then
|
||||
params_index=3
|
||||
fi
|
||||
parameters_list=$(find ${OUTPUT}/${_parameter}* -name train.log | awk -v params_index=${params_index} -F "/" '{print $params_index}')
|
||||
metric_list=$(find ${OUTPUT}/${_parameter}* -name train.log | xargs cat | grep "best" | grep "Epoch ${EPOCHS}" | awk -F " " '{print substr($NF,0,7)}')
|
||||
best_info=$(get_max_value ${metric_list[*]})
|
||||
best_metric=$(echo $best_info | awk -F " " '{print $1}')
|
||||
best_index=$(echo $best_info | awk -F " " '{print $2}')
|
||||
best_parameter=$(echo $parameters_list | awk -v best=$(($best_index+1)) '{print $best}' | awk -F "_" '{print $2}')
|
||||
echo ${best_metric}
|
||||
echo ${best_parameter}
|
||||
}
|
||||
|
||||
|
||||
function search_lr(){
|
||||
for lr in ${LR_LIST[*]}; do
|
||||
cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
|
||||
-c ${CONFIG} \
|
||||
-o Global.output_dir=${OUTPUT}/LR_${lr} \
|
||||
-o Optimizer.lr.learning_rate=${lr} \
|
||||
-o Global.epochs=${EPOCHS}"
|
||||
eval ${cmd_train}
|
||||
status_check $? "${cmd_train}" "${STATUS_LOG}"
|
||||
cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
|
||||
eval ${cmd}
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function search_resolution(){
|
||||
_lr=$1
|
||||
for resolution in ${RESOLUTION_LIST[*]}; do
|
||||
cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
|
||||
-c ${CONFIG} \
|
||||
-o Global.output_dir=${OUTPUT}/RESOLUTION_${resolution} \
|
||||
-o Optimizer.lr.learning_rate=${_lr} \
|
||||
-o Global.epochs=${EPOCHS} \
|
||||
-o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${resolution}"
|
||||
eval ${cmd_train}
|
||||
status_check $? "${cmd_train}" "${STATUS_LOG}"
|
||||
cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
|
||||
eval ${cmd}
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
|
||||
function search_ra_prob(){
|
||||
_lr=$1
|
||||
_resolution=$2
|
||||
for ra_prob in ${RA_PROB_LIST[*]}; do
|
||||
cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
|
||||
-c ${CONFIG} \
|
||||
-o Global.output_dir=${OUTPUT}/RA_${ra_prob} \
|
||||
-o Optimizer.lr.learning_rate=${_lr} \
|
||||
-o Global.epochs=${EPOCHS} \
|
||||
-o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${ra_prob} \
|
||||
-o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \
|
||||
-o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution}"
|
||||
eval ${cmd_train}
|
||||
status_check $? "${cmd_train}" "${STATUS_LOG}"
|
||||
cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
|
||||
eval ${cmd}
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
|
||||
function search_re_prob(){
|
||||
_lr=$1
|
||||
_resolution=$2
|
||||
_ra_prob=$3
|
||||
for re_prob in ${RE_PROB_LIST[*]}; do
|
||||
cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
|
||||
-c ${CONFIG} \
|
||||
-o Global.output_dir=${OUTPUT}/RE_${re_prob} \
|
||||
-o Optimizer.lr.learning_rate=${_lr} \
|
||||
-o Global.epochs=${EPOCHS} \
|
||||
-o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \
|
||||
-o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${re_prob} \
|
||||
-o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \
|
||||
-o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution}"
|
||||
eval ${cmd_train}
|
||||
status_check $? "${cmd_train}" "${STATUS_LOG}"
|
||||
cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
|
||||
eval ${cmd}
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function search_lr_mult_list(){
|
||||
_lr=$1
|
||||
_resolution=$2
|
||||
_ra_prob=$3
|
||||
_re_prob=$4
|
||||
|
||||
for lr_mult in ${LR_MULT_LIST[*]}; do
|
||||
cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
|
||||
-c ${CONFIG} \
|
||||
-o Global.output_dir=${OUTPUT}/LR_MULT_${lr_mult} \
|
||||
-o Optimizer.lr.learning_rate=${_lr} \
|
||||
-o Global.epochs=${EPOCHS} \
|
||||
-o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \
|
||||
-o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${_re_prob} \
|
||||
-o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \
|
||||
-o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution} \
|
||||
-o Arch.lr_mult_list=${lr_mult}"
|
||||
eval ${cmd_train}
|
||||
status_check $? "${cmd_train}" "${STATUS_LOG}"
|
||||
cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
|
||||
eval ${cmd}
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
|
||||
function search_teacher(){
|
||||
_lr=$1
|
||||
_resolution=$2
|
||||
_ra_prob=$3
|
||||
_re_prob=$4
|
||||
|
||||
for teacher in ${TEACHER_LIST[*]}; do
|
||||
cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
|
||||
-c ${CONFIG} \
|
||||
-o Global.output_dir=${OUTPUT}/TEACHER_${teacher} \
|
||||
-o Optimizer.lr.learning_rate=${_lr} \
|
||||
-o Global.epochs=${EPOCHS} \
|
||||
-o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \
|
||||
-o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${_re_prob} \
|
||||
-o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \
|
||||
-o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution} \
|
||||
-o Arch.name=${teacher}"
|
||||
eval ${cmd_train}
|
||||
status_check $? "${cmd_train}" "${STATUS_LOG}"
|
||||
cmd="find ${OUTPUT}/* -name epoch* | xargs rm -rf"
|
||||
eval ${cmd}
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
# train the model for knowledge distillation
|
||||
function train_distillation_model(){
|
||||
_lr=$1
|
||||
_resolution=$2
|
||||
_ra_prob=$3
|
||||
_re_prob=$4
|
||||
_lr_mult=$5
|
||||
teacher=$6
|
||||
t_pretrained_model="${OUTPUT}/TEACHER_${teacher}/${teacher}/best_model"
|
||||
config="ppcls/configs/cls_demo/person/Distillation/PPLCNet_x1_0_distillation.yaml"
|
||||
combined_label_list="./dataset/person/train_list_for_distill.txt"
|
||||
|
||||
cmd_train="python3.7 -m paddle.distributed.launch \
|
||||
--gpus=${GPU_IDS} \
|
||||
tools/train.py -c ${config} \
|
||||
-o Global.output_dir=${OUTPUT}/kd_teacher \
|
||||
-o Optimizer.lr.learning_rate=${_lr} \
|
||||
-o Global.epochs=${EPOCHS} \
|
||||
-o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \
|
||||
-o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${_re_prob} \
|
||||
-o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \
|
||||
-o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution} \
|
||||
-o DataLoader.Train.dataset.cls_label_path=${combined_label_list} \
|
||||
-o Arch.models.0.Teacher.name="${teacher}" \
|
||||
-o Arch.models.0.Teacher.pretrained="${t_pretrained_model}" \
|
||||
-o Arch.models.1.Student.lr_mult_list=${_lr_mult}"
|
||||
eval ${cmd_train}
|
||||
status_check $? "${cmd_train}" "${STATUS_LOG}"
|
||||
cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
|
||||
eval ${cmd}
|
||||
}
|
||||
|
||||
######## Train PaddleClas ########
|
||||
rm -rf ${OUTPUT}
|
||||
|
||||
# Train and get best lr
|
||||
best_lr=0.01
|
||||
if [[ ${MODE_MAP["search_lr"]} -eq 1 ]]; then
|
||||
search_lr
|
||||
best_info=$(get_best_info "LR_[0-9]")
|
||||
best_metric=$(echo $best_info | awk -F " " '{print $1}')
|
||||
best_lr=$(echo $best_info | awk -F " " '{print $2}')
|
||||
echo "The best lr is ${best_lr}, and the best metric is ${best_metric}" >> ${RESULT}
|
||||
fi
|
||||
|
||||
# Train and get best resolution
|
||||
best_resolution=192
|
||||
if [[ ${MODE_MAP["search_resolution"]} -eq 1 ]]; then
|
||||
search_resolution "${best_lr}"
|
||||
best_info=$(get_best_info "RESOLUTION")
|
||||
best_metric=$(echo $best_info | awk -F " " '{print $1}')
|
||||
best_resolution=$(echo $best_info | awk -F " " '{print $2}')
|
||||
echo "The best resolution is ${best_resolution}, and the best metric is ${best_metric}" >> ${RESULT}
|
||||
fi
|
||||
|
||||
# Train and get best ra_prob
|
||||
best_ra_prob=0.0
|
||||
if [[ ${MODE_MAP["search_ra_prob"]} -eq 1 ]]; then
|
||||
search_ra_prob "${best_lr}" "${best_resolution}"
|
||||
best_info=$(get_best_info "RA")
|
||||
best_metric=$(echo $best_info | awk -F " " '{print $1}')
|
||||
best_ra_prob=$(echo $best_info | awk -F " " '{print $2}')
|
||||
echo "The best ra_prob is ${best_ra_prob}, and the best metric is ${best_metric}" >> ${RESULT}
|
||||
fi
|
||||
|
||||
# Train and get best re_prob
|
||||
best_re_prob=0.1
|
||||
if [[ ${MODE_MAP["search_re_prob"]} -eq 1 ]]; then
|
||||
search_re_prob "${best_lr}" "${best_resolution}" "${best_ra_prob}"
|
||||
best_info=$(get_best_info "RE")
|
||||
best_metric=$(echo $best_info | awk -F " " '{print $1}')
|
||||
best_re_prob=$(echo $best_info | awk -F " " '{print $2}')
|
||||
echo "The best re_prob is ${best_re_prob}, and the best metric is ${best_metric}" >> ${RESULT}
|
||||
fi
|
||||
|
||||
# Train and get best lr_mult_list
|
||||
best_lr_mult_list=[1.0,1.0,1.0,1.0,1.0,1.0]
|
||||
if [[ ${MODE_MAP["search_lr_mult_list"]} -eq 1 ]]; then
|
||||
search_lr_mult_list "${best_lr}" "${best_resolution}" "${best_ra_prob}" "${best_re_prob}"
|
||||
best_info=$(get_best_info "LR_MULT")
|
||||
best_metric=$(echo $best_info | awk -F " " '{print $1}')
|
||||
best_lr_mult_list=$(echo $best_info | awk -F " " '{print $2}')
|
||||
echo "The best lr_mult_list is ${best_lr_mult_list}, and the best metric is ${best_metric}" >> ${RESULT}
|
||||
fi
|
||||
|
||||
# train and get best teacher
|
||||
best_teacher="ResNet101_vd"
|
||||
if [[ ${MODE_MAP["search_teacher"]} -eq 1 ]]; then
|
||||
search_teacher "${best_lr}" "${best_resolution}" "${best_ra_prob}" "${best_re_prob}"
|
||||
best_info=$(get_best_info "TEACHER")
|
||||
best_metric=$(echo $best_info | awk -F " " '{print $1}')
|
||||
best_teacher=$(echo $best_info | awk -F " " '{print $2}')
|
||||
echo "The best teacher is ${best_teacher}, and the best metric is ${best_metric}" >> ${RESULT}
|
||||
fi
|
||||
|
||||
# train the distillation model
|
||||
if [[ ${MODE_MAP["train_distillation_model"]} -eq 1 ]]; then
|
||||
train_distillation_model "${best_lr}" "${best_resolution}" "${best_ra_prob}" "${best_re_prob}" "${best_lr_mult_list}" ${best_teacher}
|
||||
best_info=$(get_best_info "kd_teacher/DistillationModel")
|
||||
best_metric=$(echo $best_info | awk -F " " '{print $1}')
|
||||
echo "the distillation best metric is ${best_metric}, it is global best metric!" >> ${RESULT}
|
||||
fi
|
||||
|
|
@ -0,0 +1,112 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
__dir__ = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
|
||||
|
||||
import subprocess
|
||||
import numpy as np
|
||||
|
||||
from ppcls.utils import config
|
||||
|
||||
|
||||
def get_result(log_dir):
|
||||
log_file = "{}/train.log".format(log_dir)
|
||||
with open(log_file, "r") as f:
|
||||
raw = f.read()
|
||||
res = float(raw.split("best metric: ")[-1].split("]")[0])
|
||||
return res
|
||||
|
||||
|
||||
def search_train(search_list, base_program, base_output_dir, search_key,
|
||||
config_replace_value, model_name, search_times=1):
|
||||
best_res = 0.
|
||||
best = search_list[0]
|
||||
all_result = {}
|
||||
for search_i in search_list:
|
||||
program = base_program.copy()
|
||||
for v in config_replace_value:
|
||||
program += ["-o", "{}={}".format(v, search_i)]
|
||||
if v == "Arch.name":
|
||||
model_name = search_i
|
||||
res_list = []
|
||||
for j in range(search_times):
|
||||
output_dir = "{}/{}_{}_{}".format(base_output_dir, search_key, search_i, j).replace(".", "_")
|
||||
program += ["-o", "Global.output_dir={}".format(output_dir)]
|
||||
process = subprocess.Popen(program)
|
||||
process.communicate()
|
||||
res = get_result("{}/{}".format(output_dir, model_name))
|
||||
res_list.append(res)
|
||||
all_result[str(search_i)] = res_list
|
||||
|
||||
if np.mean(res_list) > best_res:
|
||||
best = search_i
|
||||
best_res = np.mean(res_list)
|
||||
all_result["best"] = best
|
||||
return all_result
|
||||
|
||||
|
||||
def search_strategy():
|
||||
args = config.parse_args()
|
||||
configs = config.get_config(args.config, overrides=args.override, show=False)
|
||||
base_config_file = configs["base_config_file"]
|
||||
distill_config_file = configs["distill_config_file"]
|
||||
model_name = config.get_config(base_config_file)["Arch"]["name"]
|
||||
gpus = configs["gpus"]
|
||||
gpus = ",".join([str(i) for i in gpus])
|
||||
base_program = ["python3.7", "-m", "paddle.distributed.launch", "--gpus={}".format(gpus),
|
||||
"tools/train.py", "-c", base_config_file]
|
||||
base_output_dir = configs["output_dir"]
|
||||
search_times = configs["search_times"]
|
||||
search_dict = configs.get("search_dict")
|
||||
all_results = {}
|
||||
for search_i in search_dict:
|
||||
search_key = search_i["search_key"]
|
||||
search_values = search_i["search_values"]
|
||||
replace_config = search_i["replace_config"]
|
||||
res = search_train(search_values, base_program, base_output_dir,
|
||||
search_key, replace_config, model_name, search_times)
|
||||
all_results[search_key] = res
|
||||
best = res.get("best")
|
||||
for v in replace_config:
|
||||
base_program += ["-o", "{}={}".format(v, best)]
|
||||
|
||||
teacher_configs = configs.get("teacher", None)
|
||||
if teacher_configs is not None:
|
||||
teacher_program = base_program.copy()
|
||||
# remove incompatible keys
|
||||
teacher_rm_keys = teacher_configs["rm_keys"]
|
||||
rm_indices = []
|
||||
for rm_k in teacher_rm_keys:
|
||||
for ind, ki in enumerate(base_program):
|
||||
if rm_k in ki:
|
||||
rm_indices.append(ind)
|
||||
for rm_index in rm_indices[::-1]:
|
||||
teacher_program.pop(rm_index)
|
||||
teacher_program.pop(rm_index-1)
|
||||
replace_config = ["Arch.name"]
|
||||
teacher_list = teacher_configs["search_values"]
|
||||
res = search_train(teacher_list, teacher_program, base_output_dir, "teacher", replace_config, model_name)
|
||||
all_results["teacher"] = res
|
||||
best = res.get("best")
|
||||
t_pretrained = "{}/{}_{}_0/{}/best_model".format(base_output_dir, "teacher", best, best)
|
||||
base_program += ["-o", "Arch.models.0.Teacher.name={}".format(best),
|
||||
"-o", "Arch.models.0.Teacher.pretrained={}".format(t_pretrained)]
|
||||
output_dir = "{}/search_res".format(base_output_dir)
|
||||
base_program += ["-o", "Global.output_dir={}".format(output_dir)]
|
||||
final_replace = configs.get('final_replace')
|
||||
for i in range(len(base_program)):
|
||||
base_program[i] = base_program[i].replace(base_config_file, distill_config_file)
|
||||
for k in final_replace:
|
||||
v = final_replace[k]
|
||||
base_program[i] = base_program[i].replace(k, v)
|
||||
|
||||
process = subprocess.Popen(base_program)
|
||||
process.communicate()
|
||||
print(all_results, base_program)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
search_strategy()
|