Merge pull request #4683 from cuicheng01/dygraph
Add tipc lite multi-predictor & arm_gpu_opencl chainspull/4686/head^2
commit
1bbf6e6a92
|
@ -172,7 +172,10 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
|
|||
cv::Mat resize_img;
|
||||
|
||||
int index = 0;
|
||||
|
||||
std::vector<double> time_info = {0, 0, 0};
|
||||
for (int i = boxes.size() - 1; i >= 0; i--) {
|
||||
auto preprocess_start = std::chrono::steady_clock::now();
|
||||
crop_img = GetRotateCropImage(srcimg, boxes[i]);
|
||||
if (use_direction_classify >= 1) {
|
||||
crop_img = RunClsModel(crop_img, predictor_cls);
|
||||
|
@ -191,7 +194,9 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
|
|||
auto *data0 = input_tensor0->mutable_data<float>();
|
||||
|
||||
NeonMeanScale(dimg, data0, resize_img.rows * resize_img.cols, mean, scale);
|
||||
auto preprocess_end = std::chrono::steady_clock::now();
|
||||
//// Run CRNN predictor
|
||||
auto inference_start = std::chrono::steady_clock::now();
|
||||
predictor_crnn->Run();
|
||||
|
||||
// Get output and run postprocess
|
||||
|
@ -199,8 +204,10 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
|
|||
std::move(predictor_crnn->GetOutput(0)));
|
||||
auto *predict_batch = output_tensor0->data<float>();
|
||||
auto predict_shape = output_tensor0->shape();
|
||||
auto inference_end = std::chrono::steady_clock::now();
|
||||
|
||||
// ctc decode
|
||||
auto postprocess_start = std::chrono::steady_clock::now();
|
||||
std::string str_res;
|
||||
int argmax_idx;
|
||||
int last_index = 0;
|
||||
|
@ -224,7 +231,20 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
|
|||
score /= count;
|
||||
rec_text.push_back(str_res);
|
||||
rec_text_score.push_back(score);
|
||||
auto postprocess_end = std::chrono::steady_clock::now();
|
||||
|
||||
std::chrono::duration<float> preprocess_diff = preprocess_end - preprocess_start;
|
||||
time_info[0] += double(preprocess_diff.count() * 1000);
|
||||
std::chrono::duration<float> inference_diff = inference_end - inference_start;
|
||||
time_info[1] += double(inference_diff.count() * 1000);
|
||||
std::chrono::duration<float> postprocess_diff = postprocess_end - postprocess_start;
|
||||
time_info[2] += double(postprocess_diff.count() * 1000);
|
||||
|
||||
}
|
||||
|
||||
times->push_back(time_info[0]);
|
||||
times->push_back(time_info[1]);
|
||||
times->push_back(time_info[2]);
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<int>>>
|
||||
|
@ -312,7 +332,6 @@ std::shared_ptr<PaddlePredictor> loadModel(std::string model_file, int num_threa
|
|||
config.set_model_from_file(model_file);
|
||||
|
||||
config.set_threads(num_threads);
|
||||
|
||||
std::shared_ptr<PaddlePredictor> predictor =
|
||||
CreatePaddlePredictor<MobileConfig>(config);
|
||||
return predictor;
|
||||
|
@ -434,6 +453,9 @@ void system(char **argv){
|
|||
auto rec_predictor = loadModel(rec_model_file, std::stoi(num_threads));
|
||||
auto cls_predictor = loadModel(cls_model_file, std::stoi(num_threads));
|
||||
|
||||
std::vector<double> det_time_info = {0, 0, 0};
|
||||
std::vector<double> rec_time_info = {0, 0, 0};
|
||||
|
||||
for (int i = 0; i < cv_all_img_names.size(); ++i) {
|
||||
std::cout << "The predict img: " << cv_all_img_names[i] << std::endl;
|
||||
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
|
||||
|
@ -459,8 +481,38 @@ void system(char **argv){
|
|||
//// print recognized text
|
||||
for (int i = 0; i < rec_text.size(); i++) {
|
||||
std::cout << i << "\t" << rec_text[i] << "\t" << rec_text_score[i]
|
||||
<< std::endl;
|
||||
<< std::endl;
|
||||
|
||||
}
|
||||
|
||||
det_time_info[0] += det_times[0];
|
||||
det_time_info[1] += det_times[1];
|
||||
det_time_info[2] += det_times[2];
|
||||
rec_time_info[0] += rec_times[0];
|
||||
rec_time_info[1] += rec_times[1];
|
||||
rec_time_info[2] += rec_times[2];
|
||||
}
|
||||
if (strcmp(argv[12], "True") == 0) {
|
||||
AutoLogger autolog_det(det_model_file,
|
||||
runtime_device,
|
||||
std::stoi(num_threads),
|
||||
std::stoi(batchsize),
|
||||
"dynamic",
|
||||
precision,
|
||||
det_time_info,
|
||||
cv_all_img_names.size());
|
||||
AutoLogger autolog_rec(rec_model_file,
|
||||
runtime_device,
|
||||
std::stoi(num_threads),
|
||||
std::stoi(batchsize),
|
||||
"dynamic",
|
||||
precision,
|
||||
rec_time_info,
|
||||
cv_all_img_names.size());
|
||||
|
||||
autolog_det.report();
|
||||
std::cout << std::endl;
|
||||
autolog_rec.report();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -503,15 +555,15 @@ void det(int argc, char **argv) {
|
|||
auto img_vis = Visualization(srcimg, boxes);
|
||||
std::cout << boxes.size() << " bboxes have detected:" << std::endl;
|
||||
|
||||
// for (int i=0; i<boxes.size(); i++){
|
||||
// std::cout << "The " << i << " box:" << std::endl;
|
||||
// for (int j=0; j<4; j++){
|
||||
// for (int k=0; k<2; k++){
|
||||
// std::cout << boxes[i][j][k] << "\t";
|
||||
// }
|
||||
// }
|
||||
// std::cout << std::endl;
|
||||
// }
|
||||
for (int i=0; i<boxes.size(); i++){
|
||||
std::cout << "The " << i << " box:" << std::endl;
|
||||
for (int j=0; j<4; j++){
|
||||
for (int k=0; k<2; k++){
|
||||
std::cout << boxes[i][j][k] << "\t";
|
||||
}
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
time_info[0] += times[0];
|
||||
time_info[1] += times[1];
|
||||
time_info[2] += times[2];
|
||||
|
@ -585,6 +637,9 @@ void rec(int argc, char **argv) {
|
|||
std::cout << i << "\t" << rec_text[i] << "\t" << rec_text_score[i]
|
||||
<< std::endl;
|
||||
}
|
||||
time_info[0] += times[0];
|
||||
time_info[1] += times[1];
|
||||
time_info[2] += times[2];
|
||||
}
|
||||
// TODO: support autolog
|
||||
if (strcmp(argv[9], "True") == 0) {
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
===========================lite_params===========================
|
||||
inference:./ocr_db_crnn det
|
||||
infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
|
||||
runtime_device:ARM_CPU
|
||||
det_infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
|
||||
null:null
|
||||
null:null
|
||||
--cpu_threads:1|4
|
||||
--det_batch_size:1
|
||||
--rec_batch_size:1
|
||||
--system_batch_size:1
|
||||
null:null
|
||||
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
|
||||
--config_dir:./config.txt
|
||||
--rec_dict_dir:./ppocr_keys_v1.txt
|
||||
null:null
|
||||
--benchmark:True
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
===========================lite_params===========================
|
||||
inference:./ocr_db_crnn det
|
||||
runtime_device:ARM_GPU_OPENCL
|
||||
det_infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
|
||||
null:null
|
||||
null:null
|
||||
--cpu_threads:1|4
|
||||
--det_batch_size:1
|
||||
null:null
|
||||
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
|
||||
--config_dir:./config.txt
|
||||
null:null
|
||||
--benchmark:True
|
|
@ -0,0 +1,13 @@
|
|||
===========================lite_params===========================
|
||||
inference:./ocr_db_crnn system
|
||||
runtime_device:ARM_CPU
|
||||
det_infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
|
||||
rec_infer_model:ch_PP-OCRv2_rec_infer|ch_PP-OCRv2_rec_slim_quant_infer
|
||||
cls_infer_model:ch_ppocr_mobile_v2.0_cls_infer|ch_ppocr_mobile_v2.0_cls_slim_infer
|
||||
--cpu_threads:1|4
|
||||
--det_batch_size:1
|
||||
--rec_batch_size:1
|
||||
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
|
||||
--config_dir:./config.txt
|
||||
--rec_dict_dir:./ppocr_keys_v1.txt
|
||||
--benchmark:True
|
|
@ -0,0 +1,13 @@
|
|||
===========================lite_params===========================
|
||||
inference:./ocr_db_crnn system
|
||||
runtime_device:ARM_GPU_OPENCL
|
||||
det_infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
|
||||
rec_infer_model:ch_PP-OCRv2_rec_infer|ch_PP-OCRv2_rec_slim_quant_infer
|
||||
cls_infer_model:ch_ppocr_mobile_v2.0_cls_infer|ch_ppocr_mobile_v2.0_cls_slim_infer
|
||||
--cpu_threads:1|4
|
||||
--det_batch_size:1
|
||||
--rec_batch_size:1
|
||||
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
|
||||
--config_dir:./config.txt
|
||||
--rec_dict_dir:./ppocr_keys_v1.txt
|
||||
--benchmark:True
|
|
@ -0,0 +1,91 @@
|
|||
# Lite\_arm\_cpp预测功能测试
|
||||
|
||||
Lite\_arm\_cpp预测功能测试的主程序为`test_lite_arm_cpp.sh`,可以在ARM上基于Lite预测库测试模型的C++推理功能。
|
||||
|
||||
## 1. 测试结论汇总
|
||||
|
||||
目前Lite端的样本间支持以方式的组合:
|
||||
|
||||
**字段说明:**
|
||||
- 模型类型:包括正常模型(FP32)和量化模型(INT8)
|
||||
- batch-size:包括1和4
|
||||
- threads:包括1和4
|
||||
- predictor数量:包括单predictor预测和多predictor预测
|
||||
- 预测库来源:包括下载方式和编译方式
|
||||
- 测试硬件:ARM\_CPU/ARM\_GPU_OPENCL
|
||||
|
||||
| 模型类型 | batch-size | threads | predictor数量 | 预测库来源 | 测试硬件 |
|
||||
| :----: | :----: | :----: | :----: | :----: | :----: |
|
||||
| 正常模型/量化模型 | 1 | 1/4 | 单/多 | 下载方式 | ARM\_CPU/ARM\_GPU_OPENCL |
|
||||
|
||||
|
||||
## 2. 测试流程
|
||||
运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。
|
||||
|
||||
### 2.1 功能测试
|
||||
|
||||
先运行`prepare_lite_cpp.sh`,运行后会在当前路径下生成`test_lite.tar`,其中包含了测试数据、测试模型和用于预测的可执行文件。将`test_lite.tar`上传到被测试的手机上,在手机的终端解压该文件,进入`test_lite`目录中,然后运行`test_lite_arm_cpp.sh`进行测试,最终在`test_lite/output`目录下生成`lite_*.log`后缀的日志文件。
|
||||
|
||||
#### 2.1.1 基于ARM\_CPU测试
|
||||
|
||||
```shell
|
||||
|
||||
# 数据和模型准备
|
||||
bash test_tipc/prepare_lite_cpp.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
|
||||
|
||||
# 手机端测试:
|
||||
bash test_lite_arm_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
|
||||
|
||||
```
|
||||
|
||||
#### 2.1.2 基于ARM\_GPU\_OPENCL测试
|
||||
|
||||
```shell
|
||||
|
||||
# 数据和模型准备
|
||||
bash test_tipc/prepare_lite_cpp.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
|
||||
|
||||
# 手机端测试:
|
||||
bash test_lite_arm_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
|
||||
|
||||
```
|
||||
|
||||
|
||||
**注意**:
|
||||
|
||||
1.由于运行该项目需要bash等命令,传统的adb方式不能很好的安装。所以此处推荐通在手机上开启虚拟终端的方式连接电脑,连接方式可以参考[安卓手机termux连接电脑](./termux_for_android.md)。
|
||||
|
||||
2.如果测试文本检测和识别完整的pipeline,在执行`prepare_lite_cpp.sh`时,配置文件需替换为`test_tipc/configs/ppocr_system_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt`。在手机端测试阶段,配置文件同样修改为该文件。
|
||||
|
||||
### 2.2 运行结果
|
||||
|
||||
各测试的运行情况会打印在 `./output/` 中:
|
||||
运行成功时会输出:
|
||||
|
||||
```
|
||||
Run successfully with command - ./ocr_db_crnn det ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1 ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1!
|
||||
Run successfully with command xxx
|
||||
...
|
||||
```
|
||||
|
||||
运行失败时会输出:
|
||||
|
||||
```
|
||||
Run failed with command - ./ocr_db_crnn det ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1 ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1!
|
||||
Run failed with command xxx
|
||||
...
|
||||
```
|
||||
|
||||
在./output/文件夹下,会存在如下日志,每一个日志都是不同配置下的log结果:
|
||||
|
||||
<img src="lite_log.png" width="1000">
|
||||
|
||||
在每一个log中,都会调用autolog打印如下信息:
|
||||
|
||||
<img src="lite_auto_log.png" width="1000">
|
||||
|
||||
|
||||
|
||||
## 3. 更多教程
|
||||
|
||||
本文档为功能测试用,更详细的Lite端预测使用教程请参考:[Lite端部署](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme.md)。
|
|
@ -1,71 +0,0 @@
|
|||
# Lite\_arm\_cpu\_cpp预测功能测试
|
||||
|
||||
Lite\_arm\_cpu\_cpp预测功能测试的主程序为`test_lite_arm_cpu_cpp.sh`,可以在ARM CPU上基于Lite预测库测试模型的C++推理功能。
|
||||
|
||||
## 1. 测试结论汇总
|
||||
|
||||
目前Lite端的样本间支持以方式的组合:
|
||||
|
||||
**字段说明:**
|
||||
- 模型类型:包括正常模型(FP32)和量化模型(INT8)
|
||||
- batch-size:包括1和4
|
||||
- threads:包括1和4
|
||||
- predictor数量:包括多predictor预测和单predictor预测
|
||||
- 预测库来源:包括下载方式和编译方式
|
||||
|
||||
| 模型类型 | batch-size | threads | predictor数量 | 预测库来源 |
|
||||
| :----: | :----: | :----: | :----: | :----: |
|
||||
| 正常模型/量化模型 | 1 | 1/4 | 1 | 下载方式 |
|
||||
|
||||
|
||||
## 2. 测试流程
|
||||
运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。
|
||||
|
||||
### 2.1 功能测试
|
||||
|
||||
先运行`prepare_lite.sh`,运行后会在当前路径下生成`test_lite.tar`,其中包含了测试数据、测试模型和用于预测的可执行文件。将`test_lite.tar`上传到被测试的手机上,在手机的终端解压该文件,进入`test_lite`目录中,然后运行`test_lite_arm_cpu_cpp.sh`进行测试,最终在`test_lite/output`目录下生成`lite_*.log`后缀的日志文件。
|
||||
|
||||
```shell
|
||||
|
||||
# 数据和模型准备
|
||||
bash test_tipc/prepare_lite.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
|
||||
|
||||
# 手机端测试:
|
||||
bash test_lite_arm_cpu_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
|
||||
|
||||
```
|
||||
|
||||
**注意**:由于运行该项目需要bash等命令,传统的adb方式不能很好的安装。所以此处推荐通在手机上开启虚拟终端的方式连接电脑,连接方式可以参考[安卓手机termux连接电脑](./termux_for_android.md)。
|
||||
|
||||
#### 运行结果
|
||||
|
||||
各测试的运行情况会打印在 `./output/` 中:
|
||||
运行成功时会输出:
|
||||
|
||||
```
|
||||
Run successfully with command - ./ocr_db_crnn det ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1 ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1!
|
||||
Run successfully with command xxx
|
||||
...
|
||||
```
|
||||
|
||||
运行失败时会输出:
|
||||
|
||||
```
|
||||
Run failed with command - ./ocr_db_crnn det ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1 ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1!
|
||||
Run failed with command xxx
|
||||
...
|
||||
```
|
||||
|
||||
在./output/文件夹下,会存在如下日志,每一个日志都是不同配置下的log结果:
|
||||
|
||||
<img src="lite_log.png" width="1000">
|
||||
|
||||
在每一个log中,都会调用autolog打印如下信息:
|
||||
|
||||
<img src="lite_auto_log.png" width="1000">
|
||||
|
||||
|
||||
|
||||
## 3. 更多教程
|
||||
|
||||
本文档为功能测试用,更详细的Lite端预测使用教程请参考:[Lite端部署](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme.md)。
|
|
@ -6,22 +6,59 @@ dataline=$(cat ${FILENAME})
|
|||
IFS=$'\n'
|
||||
lines=(${dataline})
|
||||
IFS=$'\n'
|
||||
lite_model_list=$(func_parser_value "${lines[2]}")
|
||||
|
||||
inference_cmd=$(func_parser_value "${lines[1]}")
|
||||
DEVICE=$(func_parser_value "${lines[2]}")
|
||||
det_lite_model_list=$(func_parser_value "${lines[3]}")
|
||||
rec_lite_model_list=$(func_parser_value "${lines[4]}")
|
||||
cls_lite_model_list=$(func_parser_value "${lines[5]}")
|
||||
|
||||
if [[ $inference_cmd =~ "det" ]];then
|
||||
lite_model_list=${det_lite_model_list}
|
||||
elif [[ $inference_cmd =~ "rec" ]];then
|
||||
lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
|
||||
elif [[ $inference_cmd =~ "system" ]];then
|
||||
lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
|
||||
else
|
||||
echo "inference_cmd is wrong, please check."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ${DEVICE} = "ARM_CPU" ];then
|
||||
valid_targets="arm"
|
||||
paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz"
|
||||
end_index="66"
|
||||
elif [ ${DEVICE} = "ARM_GPU_OPENCL" ];then
|
||||
valid_targets="opencl"
|
||||
paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.armv8.clang.with_exception.with_extra.with_cv.opencl.tar.gz"
|
||||
end_index="71"
|
||||
else
|
||||
echo "DEVICE only suport ARM_CPU, ARM_GPU_OPENCL."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# prepare lite .nb model
|
||||
pip install paddlelite==2.9
|
||||
pip install paddlelite==2.10-rc
|
||||
current_dir=${PWD}
|
||||
IFS="|"
|
||||
model_path=./inference_models
|
||||
|
||||
for model in ${lite_model_list[*]}; do
|
||||
inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/${model}.tar
|
||||
if [[ $model =~ "PP-OCRv2" ]];then
|
||||
inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/${model}.tar
|
||||
elif [[ $model =~ "v2.0" ]];then
|
||||
inference_model_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/${model}.tar
|
||||
else
|
||||
echo "Model is wrong, please check."
|
||||
exit 3
|
||||
fi
|
||||
inference_model=${inference_model_url##*/}
|
||||
wget -nc -P ${model_path} ${inference_model_url}
|
||||
cd ${model_path} && tar -xf ${inference_model} && cd ../
|
||||
model_dir=${model_path}/${inference_model%.*}
|
||||
model_file=${model_dir}/inference.pdmodel
|
||||
param_file=${model_dir}/inference.pdiparams
|
||||
paddle_lite_opt --model_dir=${model_dir} --model_file=${model_file} --param_file=${param_file} --valid_targets=arm --optimize_out=${model_dir}_opt
|
||||
paddle_lite_opt --model_dir=${model_dir} --model_file=${model_file} --param_file=${param_file} --valid_targets=${valid_targets} --optimize_out=${model_dir}_opt
|
||||
done
|
||||
|
||||
# prepare test data
|
||||
|
@ -35,18 +72,19 @@ cd ./inference_models && tar -xf ${inference_model} && cd ../
|
|||
cd ./test_data && tar -xf ${data_file} && rm ${data_file} && cd ../
|
||||
|
||||
# prepare lite env
|
||||
paddlelite_url=https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz
|
||||
paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}')
|
||||
paddlelite_file=${paddlelite_zipfile:0:66}
|
||||
paddlelite_file=${paddlelite_zipfile:0:${end_index}}
|
||||
wget ${paddlelite_url} && tar -xf ${paddlelite_zipfile}
|
||||
mkdir -p ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp -r ${model_path}/*_opt.nb test_data ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp -r ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/
|
||||
cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp ${FILENAME} test_tipc/test_lite_arm_cpu_cpp.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp ${FILENAME} test_tipc/test_lite_arm_cpp.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cd ${paddlelite_file}/demo/cxx/ocr/
|
||||
git clone https://github.com/cuicheng01/AutoLog.git
|
||||
|
||||
# make
|
||||
make -j
|
||||
sleep 1
|
||||
make -j
|
|
@ -81,10 +81,11 @@ test_tipc/
|
|||
├── cpp_ppocr_det_mobile_results_fp16.txt # 预存的mobile版ppocr检测模型c++预测的fp16精度的结果
|
||||
├── ...
|
||||
├── prepare.sh # 完成test_*.sh运行所需要的数据和模型下载
|
||||
├── prepare_lite_cpp.sh # 完成手机端test_*.sh运行所需要的数据、模型、可执行文件
|
||||
├── test_train_inference_python.sh # 测试python训练预测的主程序
|
||||
├── test_inference_cpp.sh # 测试c++预测的主程序
|
||||
├── test_serving.sh # 测试serving部署预测的主程序
|
||||
├── test_lite_arm_cpu_cpp.sh # 测试lite在arm_cpu上部署的C++预测的主程序
|
||||
├── test_lite_arm_cpp.sh # 测试lite在arm上部署的C++预测的主程序
|
||||
├── compare_results.py # 用于对比log中的预测结果与results中的预存结果精度误差是否在限定范围内
|
||||
└── readme.md # 使用文档
|
||||
```
|
||||
|
@ -123,5 +124,5 @@ test_tipc/
|
|||
[test_train_inference_python 使用](docs/test_train_inference_python.md)
|
||||
[test_inference_cpp 使用](docs/test_inference_cpp.md)
|
||||
[test_serving 使用](docs/test_serving.md)
|
||||
[test_lite_arm_cpu_cpp 使用](docs/test_lite_arm_cpu_cpp.md)
|
||||
[test_lite_arm_cpp 使用](docs/test_lite_arm_cpp.md)
|
||||
[test_paddle2onnx 使用](docs/test_paddle2onnx.md)
|
||||
|
|
|
@ -0,0 +1,159 @@
|
|||
#!/bin/bash
|
||||
source ./common_func.sh
|
||||
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
|
||||
|
||||
FILENAME=$1
|
||||
dataline=$(cat $FILENAME)
|
||||
# parser params
|
||||
IFS=$'\n'
|
||||
lines=(${dataline})
|
||||
|
||||
# parser lite inference
|
||||
inference_cmd=$(func_parser_value "${lines[1]}")
|
||||
runtime_device=$(func_parser_value "${lines[2]}")
|
||||
det_model_list=$(func_parser_value "${lines[3]}")
|
||||
rec_model_list=$(func_parser_value "${lines[4]}")
|
||||
cls_model_list=$(func_parser_value "${lines[5]}")
|
||||
cpu_threads_list=$(func_parser_value "${lines[6]}")
|
||||
det_batch_size_list=$(func_parser_value "${lines[7]}")
|
||||
rec_batch_size_list=$(func_parser_value "${lines[8]}")
|
||||
infer_img_dir_list=$(func_parser_value "${lines[9]}")
|
||||
config_dir=$(func_parser_value "${lines[10]}")
|
||||
rec_dict_dir=$(func_parser_value "${lines[11]}")
|
||||
benchmark_value=$(func_parser_value "${lines[12]}")
|
||||
|
||||
if [[ $inference_cmd =~ "det" ]]; then
|
||||
lite_model_list=${det_lite_model_list}
|
||||
elif [[ $inference_cmd =~ "rec" ]]; then
|
||||
lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
|
||||
elif [[ $inference_cmd =~ "system" ]]; then
|
||||
lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
|
||||
else
|
||||
echo "inference_cmd is wrong, please check."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LOG_PATH="./output"
|
||||
mkdir -p ${LOG_PATH}
|
||||
status_log="${LOG_PATH}/results.log"
|
||||
|
||||
|
||||
function func_test_det(){
|
||||
IFS='|'
|
||||
_script=$1
|
||||
_det_model=$2
|
||||
_log_path=$3
|
||||
_img_dir=$4
|
||||
_config=$5
|
||||
if [[ $_det_model =~ "slim" ]]; then
|
||||
precision="INT8"
|
||||
else
|
||||
precision="FP32"
|
||||
fi
|
||||
|
||||
# lite inference
|
||||
for num_threads in ${cpu_threads_list[*]}; do
|
||||
for det_batchsize in ${det_batch_size_list[*]}; do
|
||||
_save_log_path="${_log_path}/lite_${_det_model}_runtime_device_${runtime_device}_precision_${precision}_det_batchsize_${det_batchsize}_threads_${num_threads}.log"
|
||||
command="${_script} ${_det_model} ${runtime_device} ${precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${benchmark_value} > ${_save_log_path} 2>&1"
|
||||
eval ${command}
|
||||
status_check $? "${command}" "${status_log}"
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
function func_test_rec(){
|
||||
IFS='|'
|
||||
_script=$1
|
||||
_rec_model=$2
|
||||
_cls_model=$3
|
||||
_log_path=$4
|
||||
_img_dir=$5
|
||||
_config=$6
|
||||
_rec_dict_dir=$7
|
||||
|
||||
if [[ $_det_model =~ "slim" ]]; then
|
||||
_precision="INT8"
|
||||
else
|
||||
_precision="FP32"
|
||||
fi
|
||||
|
||||
# lite inference
|
||||
for num_threads in ${cpu_threads_list[*]}; do
|
||||
for rec_batchsize in ${rec_batch_size_list[*]}; do
|
||||
_save_log_path="${_log_path}/lite_${_rec_model}_${cls_model}_runtime_device_${runtime_device}_precision_${_precision}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log"
|
||||
command="${_script} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${rec_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1"
|
||||
eval ${command}
|
||||
status_check $? "${command}" "${status_log}"
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
function func_test_system(){
|
||||
IFS='|'
|
||||
_script=$1
|
||||
_det_model=$2
|
||||
_rec_model=$3
|
||||
_cls_model=$4
|
||||
_log_path=$5
|
||||
_img_dir=$6
|
||||
_config=$7
|
||||
_rec_dict_dir=$8
|
||||
if [[ $_det_model =~ "slim" ]]; then
|
||||
_precision="INT8"
|
||||
else
|
||||
_precision="FP32"
|
||||
fi
|
||||
|
||||
# lite inference
|
||||
for num_threads in ${cpu_threads_list[*]}; do
|
||||
for det_batchsize in ${det_batch_size_list[*]}; do
|
||||
for rec_batchsize in ${rec_batch_size_list[*]}; do
|
||||
_save_log_path="${_log_path}/lite_${_det_model}_${_rec_model}_${_cls_model}_runtime_device_${runtime_device}_precision_${_precision}_det_batchsize_${det_batchsize}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log"
|
||||
command="${_script} ${_det_model} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1"
|
||||
eval ${command}
|
||||
status_check $? "${command}" "${status_log}"
|
||||
done
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
echo "################### run test ###################"
|
||||
|
||||
if [[ $inference_cmd =~ "det" ]]; then
|
||||
IFS="|"
|
||||
det_model_list=(${det_model_list[*]})
|
||||
|
||||
for i in {0..1}; do
|
||||
#run lite inference
|
||||
for img_dir in ${infer_img_dir_list[*]}; do
|
||||
func_test_det "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}"
|
||||
done
|
||||
done
|
||||
|
||||
elif [[ $inference_cmd =~ "rec" ]]; then
|
||||
IFS="|"
|
||||
rec_model_list=(${rec_model_list[*]})
|
||||
cls_model_list=(${cls_model_list[*]})
|
||||
|
||||
for i in {0..1}; do
|
||||
#run lite inference
|
||||
for img_dir in ${infer_img_dir_list[*]}; do
|
||||
func_test_rec "${inference_cmd}" "${rec_model}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${rec_dict_dir}" "${config_dir}"
|
||||
done
|
||||
done
|
||||
|
||||
elif [[ $inference_cmd =~ "system" ]]; then
|
||||
IFS="|"
|
||||
det_model_list=(${det_model_list[*]})
|
||||
rec_model_list=(${rec_model_list[*]})
|
||||
cls_model_list=(${cls_model_list[*]})
|
||||
|
||||
for i in {0..1}; do
|
||||
#run lite inference
|
||||
for img_dir in ${infer_img_dir_list[*]}; do
|
||||
func_test_system "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${rec_model_list[i]}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}" "${rec_dict_dir}"
|
||||
done
|
||||
done
|
||||
fi
|
|
@ -1,60 +0,0 @@
|
|||
#!/bin/bash
|
||||
source ./common_func.sh
|
||||
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
|
||||
|
||||
FILENAME=$1
|
||||
dataline=$(cat $FILENAME)
|
||||
# parser params
|
||||
IFS=$'\n'
|
||||
lines=(${dataline})
|
||||
|
||||
# parser lite inference
|
||||
lite_inference_cmd=$(func_parser_value "${lines[1]}")
|
||||
lite_model_dir_list=$(func_parser_value "${lines[2]}")
|
||||
runtime_device=$(func_parser_value "${lines[3]}")
|
||||
lite_cpu_threads_list=$(func_parser_value "${lines[4]}")
|
||||
lite_batch_size_list=$(func_parser_value "${lines[5]}")
|
||||
lite_infer_img_dir_list=$(func_parser_value "${lines[8]}")
|
||||
lite_config_dir=$(func_parser_value "${lines[9]}")
|
||||
lite_rec_dict_dir=$(func_parser_value "${lines[10]}")
|
||||
lite_benchmark_value=$(func_parser_value "${lines[11]}")
|
||||
|
||||
|
||||
LOG_PATH="./output"
|
||||
mkdir -p ${LOG_PATH}
|
||||
status_log="${LOG_PATH}/results.log"
|
||||
|
||||
|
||||
function func_lite(){
|
||||
IFS='|'
|
||||
_script=$1
|
||||
_lite_model=$2
|
||||
_log_path=$3
|
||||
_img_dir=$4
|
||||
_config=$5
|
||||
if [[ $lite_model =~ "slim" ]]; then
|
||||
precision="INT8"
|
||||
else
|
||||
precision="FP32"
|
||||
fi
|
||||
|
||||
# lite inference
|
||||
for num_threads in ${lite_cpu_threads_list[*]}; do
|
||||
for batchsize in ${lite_batch_size_list[*]}; do
|
||||
_save_log_path="${_log_path}/lite_${_lite_model}_runtime_device_${runtime_device}_precision_${precision}_batchsize_${batchsize}_threads_${num_threads}.log"
|
||||
command="${_script} ${_lite_model} ${runtime_device} ${precision} ${num_threads} ${batchsize} ${_img_dir} ${_config} ${lite_benchmark_value} > ${_save_log_path} 2>&1"
|
||||
eval ${command}
|
||||
status_check $? "${command}" "${status_log}"
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
echo "################### run test ###################"
|
||||
IFS="|"
|
||||
for lite_model in ${lite_model_dir_list[*]}; do
|
||||
#run lite inference
|
||||
for img_dir in ${lite_infer_img_dir_list[*]}; do
|
||||
func_lite "${lite_inference_cmd}" "${lite_model}_opt.nb" "${LOG_PATH}" "${img_dir}" "${lite_config_dir}"
|
||||
done
|
||||
done
|
Loading…
Reference in New Issue