mirror of
https://github.com/PaddlePaddle/PaddleOCR.git
synced 2025-06-03 21:53:39 +08:00
Merge branch 'dygraph' of https://github.com/PaddlePaddle/PaddleOCR into PTDN_ppocrv2
This commit is contained in:
commit
ce92ff2e86
@ -12,12 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle_api.h" // NOLINT
|
||||
#include <chrono>
|
||||
#include "paddle_api.h" // NOLINT
|
||||
#include "paddle_place.h"
|
||||
|
||||
#include "cls_process.h"
|
||||
#include "crnn_process.h"
|
||||
#include "db_post_process.h"
|
||||
#include "AutoLog/auto_log/lite_autolog.h"
|
||||
|
||||
using namespace paddle::lite_api; // NOLINT
|
||||
using namespace std;
|
||||
@ -27,7 +29,7 @@ void NeonMeanScale(const float *din, float *dout, int size,
|
||||
const std::vector<float> mean,
|
||||
const std::vector<float> scale) {
|
||||
if (mean.size() != 3 || scale.size() != 3) {
|
||||
std::cerr << "[ERROR] mean or scale size must equal to 3\n";
|
||||
std::cerr << "[ERROR] mean or scale size must equal to 3" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
float32x4_t vmean0 = vdupq_n_f32(mean[0]);
|
||||
@ -159,7 +161,8 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
|
||||
std::vector<float> &rec_text_score,
|
||||
std::vector<std::string> charactor_dict,
|
||||
std::shared_ptr<PaddlePredictor> predictor_cls,
|
||||
int use_direction_classify) {
|
||||
int use_direction_classify,
|
||||
std::vector<double> *times) {
|
||||
std::vector<float> mean = {0.5f, 0.5f, 0.5f};
|
||||
std::vector<float> scale = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f};
|
||||
|
||||
@ -226,14 +229,15 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
|
||||
|
||||
std::vector<std::vector<std::vector<int>>>
|
||||
RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img,
|
||||
std::map<std::string, double> Config) {
|
||||
std::map<std::string, double> Config, std::vector<double> *times) {
|
||||
// Read img
|
||||
int max_side_len = int(Config["max_side_len"]);
|
||||
int det_db_use_dilate = int(Config["det_db_use_dilate"]);
|
||||
|
||||
cv::Mat srcimg;
|
||||
img.copyTo(srcimg);
|
||||
|
||||
|
||||
auto preprocess_start = std::chrono::steady_clock::now();
|
||||
std::vector<float> ratio_hw;
|
||||
img = DetResizeImg(img, max_side_len, ratio_hw);
|
||||
cv::Mat img_fp;
|
||||
@ -248,8 +252,10 @@ RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img,
|
||||
std::vector<float> scale = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
|
||||
const float *dimg = reinterpret_cast<const float *>(img_fp.data);
|
||||
NeonMeanScale(dimg, data0, img_fp.rows * img_fp.cols, mean, scale);
|
||||
auto preprocess_end = std::chrono::steady_clock::now();
|
||||
|
||||
// Run predictor
|
||||
auto inference_start = std::chrono::steady_clock::now();
|
||||
predictor->Run();
|
||||
|
||||
// Get output and post process
|
||||
@ -257,8 +263,10 @@ RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img,
|
||||
std::move(predictor->GetOutput(0)));
|
||||
auto *outptr = output_tensor->data<float>();
|
||||
auto shape_out = output_tensor->shape();
|
||||
auto inference_end = std::chrono::steady_clock::now();
|
||||
|
||||
// Save output
|
||||
auto postprocess_start = std::chrono::steady_clock::now();
|
||||
float pred[shape_out[2] * shape_out[3]];
|
||||
unsigned char cbuf[shape_out[2] * shape_out[3]];
|
||||
|
||||
@ -287,14 +295,35 @@ RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img,
|
||||
|
||||
std::vector<std::vector<std::vector<int>>> filter_boxes =
|
||||
FilterTagDetRes(boxes, ratio_hw[0], ratio_hw[1], srcimg);
|
||||
auto postprocess_end = std::chrono::steady_clock::now();
|
||||
|
||||
std::chrono::duration<float> preprocess_diff = preprocess_end - preprocess_start;
|
||||
times->push_back(double(preprocess_diff.count() * 1000));
|
||||
std::chrono::duration<float> inference_diff = inference_end - inference_start;
|
||||
times->push_back(double(inference_diff.count() * 1000));
|
||||
std::chrono::duration<float> postprocess_diff = postprocess_end - postprocess_start;
|
||||
times->push_back(double(postprocess_diff.count() * 1000));
|
||||
|
||||
return filter_boxes;
|
||||
}
|
||||
|
||||
std::shared_ptr<PaddlePredictor> loadModel(std::string model_file) {
|
||||
std::shared_ptr<PaddlePredictor> loadModel(std::string model_file, std::string power_mode, int num_threads) {
|
||||
MobileConfig config;
|
||||
config.set_model_from_file(model_file);
|
||||
|
||||
if (power_mode == "LITE_POWER_HIGH"){
|
||||
config.set_power_mode(LITE_POWER_HIGH);
|
||||
} else {
|
||||
if (power_mode == "LITE_POWER_LOW") {
|
||||
config.set_power_mode(LITE_POWER_HIGH);
|
||||
} else {
|
||||
std::cerr << "Only support LITE_POWER_HIGH or LITE_POWER_HIGH." << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
config.set_threads(num_threads);
|
||||
|
||||
std::shared_ptr<PaddlePredictor> predictor =
|
||||
CreatePaddlePredictor<MobileConfig>(config);
|
||||
return predictor;
|
||||
@ -354,60 +383,255 @@ std::map<std::string, double> LoadConfigTxt(std::string config_path) {
|
||||
return dict;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
if (argc < 5) {
|
||||
std::cerr << "[ERROR] usage: " << argv[0]
|
||||
<< " det_model_file cls_model_file rec_model_file image_path "
|
||||
"charactor_dict\n";
|
||||
void check_params(int argc, char **argv) {
|
||||
if (argc<=1 || (strcmp(argv[1], "det")!=0 && strcmp(argv[1], "rec")!=0 && strcmp(argv[1], "system")!=0)) {
|
||||
std::cerr << "Please choose one mode of [det, rec, system] !" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
std::string det_model_file = argv[1];
|
||||
std::string rec_model_file = argv[2];
|
||||
std::string cls_model_file = argv[3];
|
||||
std::string img_path = argv[4];
|
||||
std::string dict_path = argv[5];
|
||||
if (strcmp(argv[1], "det") == 0) {
|
||||
if (argc < 9){
|
||||
std::cerr << "[ERROR] usage:" << argv[0]
|
||||
<< " det det_model num_threads batchsize power_mode img_dir det_config lite_benchmark_value" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (strcmp(argv[1], "rec") == 0) {
|
||||
if (argc < 9){
|
||||
std::cerr << "[ERROR] usage:" << argv[0]
|
||||
<< " rec rec_model num_threads batchsize power_mode img_dir key_txt lite_benchmark_value" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (strcmp(argv[1], "system") == 0) {
|
||||
if (argc < 12){
|
||||
std::cerr << "[ERROR] usage:" << argv[0]
|
||||
<< " system det_model rec_model clas_model num_threads batchsize power_mode img_dir det_config key_txt lite_benchmark_value" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void system(char **argv){
|
||||
std::string det_model_file = argv[2];
|
||||
std::string rec_model_file = argv[3];
|
||||
std::string cls_model_file = argv[4];
|
||||
std::string precision = argv[5];
|
||||
std::string num_threads = argv[6];
|
||||
std::string batchsize = argv[7];
|
||||
std::string power_mode = argv[8];
|
||||
std::string img_dir = argv[9];
|
||||
std::string det_config_path = argv[10];
|
||||
std::string dict_path = argv[11];
|
||||
|
||||
if (strcmp(argv[5], "FP32") != 0 && strcmp(argv[5], "INT8") != 0) {
|
||||
std::cerr << "Only support FP32 or INT8." << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
std::vector<cv::String> cv_all_img_names;
|
||||
cv::glob(img_dir, cv_all_img_names);
|
||||
|
||||
//// load config from txt file
|
||||
auto Config = LoadConfigTxt("./config.txt");
|
||||
auto Config = LoadConfigTxt(det_config_path);
|
||||
int use_direction_classify = int(Config["use_direction_classify"]);
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
auto det_predictor = loadModel(det_model_file);
|
||||
auto rec_predictor = loadModel(rec_model_file);
|
||||
auto cls_predictor = loadModel(cls_model_file);
|
||||
|
||||
auto charactor_dict = ReadDict(dict_path);
|
||||
charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc
|
||||
charactor_dict.push_back(" ");
|
||||
|
||||
cv::Mat srcimg = cv::imread(img_path, cv::IMREAD_COLOR);
|
||||
auto boxes = RunDetModel(det_predictor, srcimg, Config);
|
||||
auto det_predictor = loadModel(det_model_file, power_mode, std::stoi(num_threads));
|
||||
auto rec_predictor = loadModel(rec_model_file, power_mode, std::stoi(num_threads));
|
||||
auto cls_predictor = loadModel(cls_model_file, power_mode, std::stoi(num_threads));
|
||||
|
||||
std::vector<std::string> rec_text;
|
||||
std::vector<float> rec_text_score;
|
||||
for (int i = 0; i < cv_all_img_names.size(); ++i) {
|
||||
std::cout << "The predict img: " << cv_all_img_names[i] << std::endl;
|
||||
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
|
||||
|
||||
RunRecModel(boxes, srcimg, rec_predictor, rec_text, rec_text_score,
|
||||
charactor_dict, cls_predictor, use_direction_classify);
|
||||
if (!srcimg.data) {
|
||||
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
|
||||
std::vector<double> det_times;
|
||||
auto boxes = RunDetModel(det_predictor, srcimg, Config, &det_times);
|
||||
|
||||
std::vector<std::string> rec_text;
|
||||
std::vector<float> rec_text_score;
|
||||
|
||||
std::vector<double> rec_times;
|
||||
RunRecModel(boxes, srcimg, rec_predictor, rec_text, rec_text_score,
|
||||
charactor_dict, cls_predictor, use_direction_classify, &rec_times);
|
||||
|
||||
//// visualization
|
||||
auto img_vis = Visualization(srcimg, boxes);
|
||||
|
||||
//// print recognized text
|
||||
for (int i = 0; i < rec_text.size(); i++) {
|
||||
std::cout << i << "\t" << rec_text[i] << "\t" << rec_text_score[i]
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//// visualization
|
||||
auto img_vis = Visualization(srcimg, boxes);
|
||||
void det(int argc, char **argv) {
|
||||
std::string det_model_file = argv[2];
|
||||
std::string precision = argv[3];
|
||||
std::string num_threads = argv[4];
|
||||
std::string batchsize = argv[5];
|
||||
std::string power_mode = argv[6];
|
||||
std::string img_dir = argv[7];
|
||||
std::string det_config_path = argv[8];
|
||||
|
||||
//// print recognized text
|
||||
for (int i = 0; i < rec_text.size(); i++) {
|
||||
std::cout << i << "\t" << rec_text[i] << "\t" << rec_text_score[i]
|
||||
<< std::endl;
|
||||
if (strcmp(argv[3], "FP32") != 0 && strcmp(argv[3], "INT8") != 0) {
|
||||
std::cerr << "Only support FP32 or INT8." << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
std::cout << "花费了"
|
||||
<< double(duration.count()) *
|
||||
std::chrono::microseconds::period::num /
|
||||
std::chrono::microseconds::period::den
|
||||
<< "秒" << std::endl;
|
||||
std::vector<cv::String> cv_all_img_names;
|
||||
cv::glob(img_dir, cv_all_img_names);
|
||||
|
||||
//// load config from txt file
|
||||
auto Config = LoadConfigTxt(det_config_path);
|
||||
|
||||
auto det_predictor = loadModel(det_model_file, power_mode, std::stoi(num_threads));
|
||||
|
||||
std::vector<double> time_info = {0, 0, 0};
|
||||
for (int i = 0; i < cv_all_img_names.size(); ++i) {
|
||||
std::cout << "The predict img: " << cv_all_img_names[i] << std::endl;
|
||||
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
|
||||
|
||||
if (!srcimg.data) {
|
||||
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
std::vector<double> times;
|
||||
auto boxes = RunDetModel(det_predictor, srcimg, Config, ×);
|
||||
|
||||
//// visualization
|
||||
auto img_vis = Visualization(srcimg, boxes);
|
||||
std::cout << boxes.size() << " bboxes have detected:" << std::endl;
|
||||
|
||||
// for (int i=0; i<boxes.size(); i++){
|
||||
// std::cout << "The " << i << " box:" << std::endl;
|
||||
// for (int j=0; j<4; j++){
|
||||
// for (int k=0; k<2; k++){
|
||||
// std::cout << boxes[i][j][k] << "\t";
|
||||
// }
|
||||
// }
|
||||
// std::cout << std::endl;
|
||||
// }
|
||||
time_info[0] += times[0];
|
||||
time_info[1] += times[1];
|
||||
time_info[2] += times[2];
|
||||
}
|
||||
|
||||
if (strcmp(argv[9], "True") == 0) {
|
||||
AutoLogger autolog(det_model_file,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
std::stoi(num_threads),
|
||||
std::stoi(batchsize),
|
||||
"dynamic",
|
||||
precision,
|
||||
power_mode,
|
||||
time_info,
|
||||
cv_all_img_names.size());
|
||||
autolog.report();
|
||||
}
|
||||
}
|
||||
|
||||
void rec(int argc, char **argv) {
|
||||
std::string rec_model_file = argv[2];
|
||||
std::string precision = argv[3];
|
||||
std::string num_threads = argv[4];
|
||||
std::string batchsize = argv[5];
|
||||
std::string power_mode = argv[6];
|
||||
std::string img_dir = argv[7];
|
||||
std::string dict_path = argv[8];
|
||||
|
||||
if (strcmp(argv[3], "FP32") != 0 && strcmp(argv[3], "INT8") != 0) {
|
||||
std::cerr << "Only support FP32 or INT8." << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
std::vector<cv::String> cv_all_img_names;
|
||||
cv::glob(img_dir, cv_all_img_names);
|
||||
|
||||
auto charactor_dict = ReadDict(dict_path);
|
||||
charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc
|
||||
charactor_dict.push_back(" ");
|
||||
|
||||
auto rec_predictor = loadModel(rec_model_file, power_mode, std::stoi(num_threads));
|
||||
|
||||
std::shared_ptr<PaddlePredictor> cls_predictor;
|
||||
|
||||
std::vector<double> time_info = {0, 0, 0};
|
||||
for (int i = 0; i < cv_all_img_names.size(); ++i) {
|
||||
std::cout << "The predict img: " << cv_all_img_names[i] << std::endl;
|
||||
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
|
||||
|
||||
if (!srcimg.data) {
|
||||
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
int width = srcimg.cols;
|
||||
int height = srcimg.rows;
|
||||
std::vector<int> upper_left = {0, 0};
|
||||
std::vector<int> upper_right = {width, 0};
|
||||
std::vector<int> lower_right = {width, height};
|
||||
std::vector<int> lower_left = {0, height};
|
||||
std::vector<std::vector<int>> box = {upper_left, upper_right, lower_right, lower_left};
|
||||
std::vector<std::vector<std::vector<int>>> boxes = {box};
|
||||
|
||||
std::vector<std::string> rec_text;
|
||||
std::vector<float> rec_text_score;
|
||||
std::vector<double> times;
|
||||
RunRecModel(boxes, srcimg, rec_predictor, rec_text, rec_text_score,
|
||||
charactor_dict, cls_predictor, 0, ×);
|
||||
|
||||
//// print recognized text
|
||||
for (int i = 0; i < rec_text.size(); i++) {
|
||||
std::cout << i << "\t" << rec_text[i] << "\t" << rec_text_score[i]
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
// TODO: support autolog
|
||||
if (strcmp(argv[9], "True") == 0) {
|
||||
AutoLogger autolog(rec_model_file,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
std::stoi(num_threads),
|
||||
std::stoi(batchsize),
|
||||
"dynamic",
|
||||
precision,
|
||||
power_mode,
|
||||
time_info,
|
||||
cv_all_img_names.size());
|
||||
autolog.report();
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
check_params(argc, argv);
|
||||
std::cout << "mode: " << argv[1] << endl;
|
||||
|
||||
if (strcmp(argv[1], "system") == 0) {
|
||||
system(argv);
|
||||
}
|
||||
|
||||
if (strcmp(argv[1], "det") == 0) {
|
||||
det(argc, argv);
|
||||
}
|
||||
|
||||
if (strcmp(argv[1], "rec") == 0) {
|
||||
rec(argc, argv);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -98,3 +98,13 @@ null:null
|
||||
--benchmark:True
|
||||
null:null
|
||||
null:null
|
||||
===========================lite_params===========================
|
||||
inference:./ocr_db_crnn det
|
||||
infer_model:./models/ch_ppocr_mobile_v2.0_det_opt.nb|./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb
|
||||
--cpu_threads:1|4
|
||||
--batch_size:1
|
||||
--power_mode:LITE_POWER_HIGH|LITE_POWER_LOW
|
||||
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/|./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg
|
||||
--config_dir:./config.txt
|
||||
--rec_dict_dir:./ppocr_keys_v1.txt
|
||||
--benchmark:True
|
||||
|
@ -15,15 +15,15 @@ C++预测功能测试的主程序为`test_inference_cpp.sh`,可以测试基于
|
||||
|
||||
## 2. 测试流程
|
||||
### 2.1 功能测试
|
||||
先运行`prepare.sh`准备数据和模型,然后运行`test_inference_cpp.sh`进行测试,最终在```PTDN/output```目录下生成`cpp_infer_*.log`后缀的日志文件。
|
||||
先运行`prepare.sh`准备数据和模型,然后运行`test_inference_cpp.sh`进行测试,最终在```test_tipc/output```目录下生成`cpp_infer_*.log`后缀的日志文件。
|
||||
|
||||
```shell
|
||||
bash PTDN/prepare.sh ./PTDN/configs/ppocr_det_mobile_params.txt "cpp_infer"
|
||||
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt "cpp_infer"
|
||||
|
||||
# 用法1:
|
||||
bash PTDN/test_inference_cpp.sh ./PTDN/configs/ppocr_det_mobile_params.txt
|
||||
bash test_tipc/test_inference_cpp.sh ./test_tipc/configs/ppocr_det_mobile_params.txt
|
||||
# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号
|
||||
bash PTDN/test_inference_cpp.sh ./PTDN/configs/ppocr_det_mobile_params.txt '1'
|
||||
bash test_tipc/test_inference_cpp.sh ./test_tipc/configs/ppocr_det_mobile_params.txt '1'
|
||||
```
|
||||
|
||||
|
||||
@ -37,12 +37,12 @@ bash PTDN/test_inference_cpp.sh ./PTDN/configs/ppocr_det_mobile_params.txt '1'
|
||||
#### 使用方式
|
||||
运行命令:
|
||||
```shell
|
||||
python3.7 PTDN/compare_results.py --gt_file=./PTDN/results/cpp_*.txt --log_file=./PTDN/output/cpp_*.log --atol=1e-3 --rtol=1e-3
|
||||
python3.7 test_tipc/compare_results.py --gt_file=./test_tipc/results/cpp_*.txt --log_file=./test_tipc/output/cpp_*.log --atol=1e-3 --rtol=1e-3
|
||||
```
|
||||
|
||||
参数介绍:
|
||||
- gt_file: 指向事先保存好的预测结果路径,支持*.txt 结尾,会自动索引*.txt格式的文件,文件默认保存在PTDN/result/ 文件夹下
|
||||
- log_file: 指向运行PTDN/test_inference_cpp.sh 脚本的infer模式保存的预测日志,预测日志中打印的有预测结果,比如:文本框,预测文本,类别等等,同样支持cpp_infer_*.log格式传入
|
||||
- gt_file: 指向事先保存好的预测结果路径,支持*.txt 结尾,会自动索引*.txt格式的文件,文件默认保存在test_tipc/result/ 文件夹下
|
||||
- log_file: 指向运行test_tipc/test_inference_cpp.sh 脚本的infer模式保存的预测日志,预测日志中打印的有预测结果,比如:文本框,预测文本,类别等等,同样支持cpp_infer_*.log格式传入
|
||||
- atol: 设置的绝对误差
|
||||
- rtol: 设置的相对误差
|
||||
|
||||
|
@ -15,18 +15,18 @@ PaddleServing预测功能测试的主程序为`test_serving.sh`,可以测试
|
||||
|
||||
## 2. 测试流程
|
||||
### 2.1 功能测试
|
||||
先运行`prepare.sh`准备数据和模型,然后运行`test_serving.sh`进行测试,最终在```PTDN/output```目录下生成`serving_infer_*.log`后缀的日志文件。
|
||||
先运行`prepare.sh`准备数据和模型,然后运行`test_serving.sh`进行测试,最终在```test_tipc/output```目录下生成`serving_infer_*.log`后缀的日志文件。
|
||||
|
||||
```shell
|
||||
bash PTDN/prepare.sh ./PTDN/configs/ppocr_det_mobile_params.txt "serving_infer"
|
||||
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt "serving_infer"
|
||||
|
||||
# 用法:
|
||||
bash PTND/test_serving.sh ./PTDN/configs/ppocr_det_mobile_params.txt
|
||||
bash test_tipc/test_serving.sh ./test_tipc/configs/ppocr_det_mobile_params.txt
|
||||
```
|
||||
|
||||
#### 运行结果
|
||||
|
||||
各测试的运行情况会打印在 `PTDN/output/results_serving.log` 中:
|
||||
各测试的运行情况会打印在 `test_tipc/output/results_serving.log` 中:
|
||||
运行成功时会输出:
|
||||
|
||||
```
|
||||
@ -44,7 +44,7 @@ Run failed with command - xxxxx
|
||||
...
|
||||
```
|
||||
|
||||
详细的预测结果会存在 PTDN/output/ 文件夹下,例如`server_infer_gpu_usetrt_True_precision_fp16_batchsize_1.log`中会返回检测框的坐标:
|
||||
详细的预测结果会存在 test_tipc/output/ 文件夹下,例如`server_infer_gpu_usetrt_True_precision_fp16_batchsize_1.log`中会返回检测框的坐标:
|
||||
|
||||
```
|
||||
{'err_no': 0, 'err_msg': '', 'key': ['dt_boxes'], 'value': ['[[[ 78. 642.]\n [409. 640.]\n [409. 657.]\n
|
||||
|
@ -46,42 +46,42 @@
|
||||
|
||||
|
||||
### 2.2 功能测试
|
||||
先运行`prepare.sh`准备数据和模型,然后运行`test_train_inference_python.sh`进行测试,最终在```PTDN/output```目录下生成`python_infer_*.log`格式的日志文件。
|
||||
先运行`prepare.sh`准备数据和模型,然后运行`test_train_inference_python.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。
|
||||
|
||||
|
||||
`test_train_inference_python.sh`包含5种运行模式,每种模式的运行数据不同,分别用于测试速度和精度,分别是:
|
||||
|
||||
- 模式1:lite_train_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度;
|
||||
```shell
|
||||
bash PTDN/prepare.sh ./PTDN/configs/ppocr_det_mobile_params.txt 'lite_train_infer'
|
||||
bash PTDN/test_train_inference_python.sh ./PTDN/configs/ppocr_det_mobile_params.txt 'lite_train_infer'
|
||||
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'lite_train_infer'
|
||||
bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'lite_train_infer'
|
||||
```
|
||||
|
||||
- 模式2:whole_infer,使用少量数据训练,一定量数据预测,用于验证训练后的模型执行预测,预测速度是否合理;
|
||||
```shell
|
||||
bash PTDN/prepare.sh ./PTDN/configs/ppocr_det_mobile_params.txt 'whole_infer'
|
||||
bash PTDN/test_train_inference_python.sh ./PTDN/configs/ppocr_det_mobile_params.txt 'whole_infer'
|
||||
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'whole_infer'
|
||||
bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'whole_infer'
|
||||
```
|
||||
|
||||
- 模式3:infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度;
|
||||
```shell
|
||||
bash PTDN/prepare.sh ./PTDN/configs/ppocr_det_mobile_params.txt 'infer'
|
||||
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'infer'
|
||||
# 用法1:
|
||||
bash PTDN/test_train_inference_python.sh ./PTDN/configs/ppocr_det_mobile_params.txt 'infer'
|
||||
bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'infer'
|
||||
# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号
|
||||
bash PTDN/test_train_inference_python.sh ./PTDN/configs/ppocr_det_mobile_params.txt 'infer' '1'
|
||||
bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'infer' '1'
|
||||
```
|
||||
|
||||
- 模式4:whole_train_infer,CE: 全量数据训练,全量数据预测,验证模型训练精度,预测精度,预测速度;
|
||||
```shell
|
||||
bash PTDN/prepare.sh ./PTDN/configs/ppocr_det_mobile_params.txt 'whole_train_infer'
|
||||
bash PTDN/test_train_inference_python.sh ./PTDN/configs/ppocr_det_mobile_params.txt 'whole_train_infer'
|
||||
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'whole_train_infer'
|
||||
bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'whole_train_infer'
|
||||
```
|
||||
|
||||
- 模式5:klquant_infer,测试离线量化;
|
||||
```shell
|
||||
bash PTDN/prepare.sh ./PTDN/configs/ppocr_det_mobile_params.txt 'klquant_infer'
|
||||
bash PTDN/test_train_inference_python.sh PTDN/configs/ppocr_det_mobile_params.txt 'klquant_infer'
|
||||
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'klquant_infer'
|
||||
bash test_tipc/test_train_inference_python.sh test_tipc/configs/ppocr_det_mobile_params.txt 'klquant_infer'
|
||||
```
|
||||
|
||||
|
||||
@ -95,12 +95,12 @@ bash PTDN/test_train_inference_python.sh PTDN/configs/ppocr_det_mobile_params.tx
|
||||
#### 使用方式
|
||||
运行命令:
|
||||
```shell
|
||||
python3.7 PTDN/compare_results.py --gt_file=./PTDN/results/python_*.txt --log_file=./PTDN/output/python_*.log --atol=1e-3 --rtol=1e-3
|
||||
python3.7 test_tipc/compare_results.py --gt_file=./test_tipc/results/python_*.txt --log_file=./test_tipc/output/python_*.log --atol=1e-3 --rtol=1e-3
|
||||
```
|
||||
|
||||
参数介绍:
|
||||
- gt_file: 指向事先保存好的预测结果路径,支持*.txt 结尾,会自动索引*.txt格式的文件,文件默认保存在PTDN/result/ 文件夹下
|
||||
- log_file: 指向运行PTDN/test_train_inference_python.sh 脚本的infer模式保存的预测日志,预测日志中打印的有预测结果,比如:文本框,预测文本,类别等等,同样支持python_infer_*.log格式传入
|
||||
- gt_file: 指向事先保存好的预测结果路径,支持*.txt 结尾,会自动索引*.txt格式的文件,文件默认保存在test_tipc/result/ 文件夹下
|
||||
- log_file: 指向运行test_tipc/test_train_inference_python.sh 脚本的infer模式保存的预测日志,预测日志中打印的有预测结果,比如:文本框,预测文本,类别等等,同样支持python_infer_*.log格式传入
|
||||
- atol: 设置的绝对误差
|
||||
- rtol: 设置的相对误差
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
FILENAME=$1
|
||||
|
||||
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer',
|
||||
# 'cpp_infer', 'serving_infer', 'klquant_infer']
|
||||
# 'cpp_infer', 'serving_infer', 'klquant_infer', 'lite_infer']
|
||||
|
||||
MODE=$2
|
||||
|
||||
@ -160,3 +160,37 @@ if [ ${MODE} = "serving_infer" ];then
|
||||
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar
|
||||
cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar && cd ../
|
||||
fi
|
||||
|
||||
|
||||
if [ ${MODE} = "lite_infer" ];then
|
||||
# prepare lite nb model and test data
|
||||
current_dir=${PWD}
|
||||
wget -nc -P ./models https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_opt.nb
|
||||
wget -nc -P ./models https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_slim_opt.nb
|
||||
wget -nc -P ./test_data https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
|
||||
cd ./test_data && tar -xf icdar2015_lite.tar && rm icdar2015_lite.tar && cd ../
|
||||
# prepare lite env
|
||||
export http_proxy=http://172.19.57.45:3128
|
||||
export https_proxy=http://172.19.57.45:3128
|
||||
paddlelite_url=https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz
|
||||
paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}')
|
||||
paddlelite_file=inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv
|
||||
wget ${paddlelite_url}
|
||||
tar -xf ${paddlelite_zipfile}
|
||||
mkdir -p ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
mv models test_data ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/
|
||||
cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp PTDN/configs/ppocr_det_mobile_params.txt PTDN/test_lite.sh PTDN/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cd ${paddlelite_file}/demo/cxx/ocr/
|
||||
git clone https://github.com/LDOUBLEV/AutoLog.git
|
||||
unset http_proxy
|
||||
unset https_proxy
|
||||
make -j
|
||||
sleep 1
|
||||
make -j
|
||||
cp ocr_db_crnn test_lite && cp test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so
|
||||
tar -cf test_lite.tar ./test_lite && cp test_lite.tar ${current_dir} && cd ${current_dir}
|
||||
fi
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
|
||||
# 推理部署导航
|
||||
# 飞桨训推一体认证
|
||||
|
||||
## 1. 简介
|
||||
|
||||
飞桨除了基本的模型训练和预测,还提供了支持多端多平台的高性能推理部署工具。本文档提供了PaddleOCR中所有模型的推理部署导航PTDN(Paddle Train Deploy Navigation),方便用户查阅每种模型的推理部署打通情况,并可以进行一键测试。
|
||||
飞桨除了基本的模型训练和预测,还提供了支持多端多平台的高性能推理部署工具。本文档提供了PaddleOCR中所有模型的飞桨训推一体认证 (Training and Inference Pipeline Certification(TIPC)) 信息和测试工具,方便用户查阅每种模型的训练推理部署打通情况,并可以进行一键测试。
|
||||
|
||||
<div align="center">
|
||||
<img src="docs/guide.png" width="1000">
|
||||
@ -58,7 +58,7 @@
|
||||
### 目录介绍
|
||||
|
||||
```shell
|
||||
PTDN/
|
||||
test_tipc/
|
||||
├── configs/ # 配置文件目录
|
||||
├── det_mv3_db.yml # 测试mobile版ppocr检测模型训练的yml文件
|
||||
├── det_r50_vd_db.yml # 测试server版ppocr检测模型训练的yml文件
|
||||
|
69
test_tipc/test_lite.sh
Normal file
69
test_tipc/test_lite.sh
Normal file
@ -0,0 +1,69 @@
|
||||
#!/bin/bash
|
||||
source ./common_func.sh
|
||||
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
|
||||
|
||||
FILENAME=$1
|
||||
dataline=$(awk 'NR==101, NR==110{print}' $FILENAME)
|
||||
echo $dataline
|
||||
# parser params
|
||||
IFS=$'\n'
|
||||
lines=(${dataline})
|
||||
|
||||
# parser lite inference
|
||||
lite_inference_cmd=$(func_parser_value "${lines[1]}")
|
||||
lite_model_dir_list=$(func_parser_value "${lines[2]}")
|
||||
lite_cpu_threads_list=$(func_parser_value "${lines[3]}")
|
||||
lite_batch_size_list=$(func_parser_value "${lines[4]}")
|
||||
lite_power_mode_list=$(func_parser_value "${lines[5]}")
|
||||
lite_infer_img_dir_list=$(func_parser_value "${lines[6]}")
|
||||
lite_config_dir=$(func_parser_value "${lines[7]}")
|
||||
lite_rec_dict_dir=$(func_parser_value "${lines[8]}")
|
||||
lite_benchmark_value=$(func_parser_value "${lines[9]}")
|
||||
|
||||
LOG_PATH="./output"
|
||||
mkdir -p ${LOG_PATH}
|
||||
status_log="${LOG_PATH}/results.log"
|
||||
|
||||
|
||||
function func_lite(){
|
||||
IFS='|'
|
||||
_script=$1
|
||||
_lite_model=$2
|
||||
_log_path=$3
|
||||
_img_dir=$4
|
||||
_config=$5
|
||||
if [[ $lite_model =~ "slim" ]]; then
|
||||
precision="INT8"
|
||||
else
|
||||
precision="FP32"
|
||||
fi
|
||||
is_single_img=$(echo $_img_dir | grep -E ".jpg|.jpeg|.png|.JPEG|.JPG")
|
||||
if [[ "$is_single_img" != "" ]]; then
|
||||
single_img="True"
|
||||
else
|
||||
single_img="False"
|
||||
fi
|
||||
|
||||
# lite inference
|
||||
for num_threads in ${lite_cpu_threads_list[*]}; do
|
||||
for power_mode in ${lite_power_mode_list[*]}; do
|
||||
for batchsize in ${lite_batch_size_list[*]}; do
|
||||
model_name=$(echo $lite_model | awk -F "/" '{print $NF}')
|
||||
_save_log_path="${_log_path}/lite_${model_name}_precision_${precision}_batchsize_${batchsize}_threads_${num_threads}_powermode_${power_mode}_singleimg_${single_img}.log"
|
||||
command="${_script} ${lite_model} ${precision} ${num_threads} ${batchsize} ${power_mode} ${_img_dir} ${_config} ${lite_benchmark_value} > ${_save_log_path} 2>&1"
|
||||
eval ${command}
|
||||
status_check $? "${command}" "${status_log}"
|
||||
done
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
echo "################### run test ###################"
|
||||
IFS="|"
|
||||
for lite_model in ${lite_model_dir_list[*]}; do
|
||||
#run lite inference
|
||||
for img_dir in ${lite_infer_img_dir_list[*]}; do
|
||||
func_lite "${lite_inference_cmd}" "${lite_model}" "${LOG_PATH}" "${img_dir}" "${lite_config_dir}"
|
||||
done
|
||||
done
|
Loading…
x
Reference in New Issue
Block a user