add c++(cls, shitu) chain for tipc

pull/1511/head
dongshuilong 2021-12-01 07:34:06 +00:00
parent ded24addaf
commit 87873447f5
8 changed files with 718 additions and 483 deletions

View File

@ -18,102 +18,102 @@
namespace Feature {
void FeatureExtracter::LoadModel(const std::string &model_path,
const std::string &params_path) {
paddle_infer::Config config;
config.SetModel(model_path, params_path);
void FeatureExtracter::LoadModel(const std::string &model_path,
const std::string &params_path) {
paddle_infer::Config config;
config.SetModel(model_path, params_path);
if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) {
config.EnableTensorRtEngine(
1 << 20, 1, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, false);
}
} else {
config.DisableGpu();
if (this->use_mkldnn_) {
config.EnableMKLDNN();
// cache 10 different shapes for mkldnn to avoid memory leak
config.SetMkldnnCacheCapacity(10);
}
config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
}
config.SwitchUseFeedFetchOps(false);
// true for multiple input
config.SwitchSpecifyInputNames(true);
config.SwitchIrOptim(true);
config.EnableMemoryOptim();
config.DisableGlogInfo();
this->predictor_ = CreatePredictor(config);
if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) {
config.EnableTensorRtEngine(
1 << 20, 1, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, false);
}
void FeatureExtracter::Run(cv::Mat &img, std::vector<float> &out_data,
std::vector<double> &times) {
cv::Mat resize_img;
std::vector<double> time;
auto preprocess_start = std::chrono::system_clock::now();
this->resize_op_.Run(img, resize_img, this->resize_short_,
this->resize_size_);
this->normalize_op_.Run(&resize_img, this->mean_, this->std_, this->scale_);
std::vector<float> input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f);
this->permute_op_.Run(&resize_img, input.data());
auto input_names = this->predictor_->GetInputNames();
auto input_t = this->predictor_->GetInputHandle(input_names[0]);
input_t->Reshape({1, 3, resize_img.rows, resize_img.cols});
auto preprocess_end = std::chrono::system_clock::now();
auto infer_start = std::chrono::system_clock::now();
input_t->CopyFromCpu(input.data());
this->predictor_->Run();
auto output_names = this->predictor_->GetOutputNames();
auto output_t = this->predictor_->GetOutputHandle(output_names[0]);
std::vector<int> output_shape = output_t->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
std::multiplies<int>());
out_data.resize(out_num);
output_t->CopyToCpu(out_data.data());
auto infer_end = std::chrono::system_clock::now();
auto postprocess_start = std::chrono::system_clock::now();
if (this->feature_norm)
FeatureNorm(out_data);
auto postprocess_end = std::chrono::system_clock::now();
std::chrono::duration<float> preprocess_diff =
preprocess_end - preprocess_start;
time.push_back(double(preprocess_diff.count()));
std::chrono::duration<float> inference_diff = infer_end - infer_start;
double inference_cost_time = double(inference_diff.count());
time.push_back(inference_cost_time);
// std::chrono::duration<float> postprocess_diff =
// postprocess_end - postprocess_start;
time.push_back(0);
// std::cout << "result: " << std::endl;
// std::cout << "\tclass id: " << maxPosition << std::endl;
// std::cout << std::fixed << std::setprecision(10)
// << "\tscore: " << double(out_data[maxPosition]) << std::endl;
times[0] += time[0];
times[1] += time[1];
times[2] += time[2];
} else {
config.DisableGpu();
if (this->use_mkldnn_) {
config.EnableMKLDNN();
// cache 10 different shapes for mkldnn to avoid memory leak
config.SetMkldnnCacheCapacity(10);
}
config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
}
void FeatureExtracter::FeatureNorm(std::vector<float> &featuer) {
float featuer_sqrt = std::sqrt(std::inner_product(
featuer.begin(), featuer.end(), featuer.begin(), 0.0f));
for (int i = 0; i < featuer.size(); ++i)
featuer[i] /= featuer_sqrt;
}
config.SwitchUseFeedFetchOps(false);
// true for multiple input
config.SwitchSpecifyInputNames(true);
config.SwitchIrOptim(true);
config.EnableMemoryOptim();
config.DisableGlogInfo();
this->predictor_ = CreatePredictor(config);
}
void FeatureExtracter::Run(cv::Mat &img, std::vector<float> &out_data,
std::vector<double> &times) {
cv::Mat resize_img;
std::vector<double> time;
auto preprocess_start = std::chrono::steady_clock::now();
this->resize_op_.Run(img, resize_img, this->resize_short_,
this->resize_size_);
this->normalize_op_.Run(&resize_img, this->mean_, this->std_, this->scale_);
std::vector<float> input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f);
this->permute_op_.Run(&resize_img, input.data());
auto input_names = this->predictor_->GetInputNames();
auto input_t = this->predictor_->GetInputHandle(input_names[0]);
input_t->Reshape({1, 3, resize_img.rows, resize_img.cols});
auto preprocess_end = std::chrono::steady_clock::now();
auto infer_start = std::chrono::steady_clock::now();
input_t->CopyFromCpu(input.data());
this->predictor_->Run();
auto output_names = this->predictor_->GetOutputNames();
auto output_t = this->predictor_->GetOutputHandle(output_names[0]);
std::vector<int> output_shape = output_t->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
std::multiplies<int>());
out_data.resize(out_num);
output_t->CopyToCpu(out_data.data());
auto infer_end = std::chrono::steady_clock::now();
auto postprocess_start = std::chrono::steady_clock::now();
if (this->feature_norm)
FeatureNorm(out_data);
auto postprocess_end = std::chrono::steady_clock::now();
std::chrono::duration<float> preprocess_diff =
preprocess_end - preprocess_start;
time.push_back(double(preprocess_diff.count()) * 1000);
std::chrono::duration<float> inference_diff = infer_end - infer_start;
double inference_cost_time = double(inference_diff.count() * 1000);
time.push_back(inference_cost_time);
// std::chrono::duration<float> postprocess_diff =
// postprocess_end - postprocess_start;
time.push_back(0);
// std::cout << "result: " << std::endl;
// std::cout << "\tclass id: " << maxPosition << std::endl;
// std::cout << std::fixed << std::setprecision(10)
// << "\tscore: " << double(out_data[maxPosition]) << std::endl;
times[0] += time[0];
times[1] += time[1];
times[2] += time[2];
}
void FeatureExtracter::FeatureNorm(std::vector<float> &featuer) {
float featuer_sqrt = std::sqrt(std::inner_product(
featuer.begin(), featuer.end(), featuer.begin(), 0.0f));
for (int i = 0; i < featuer.size(); ++i)
featuer[i] /= featuer_sqrt;
}
} // namespace Feature

View File

@ -37,262 +37,304 @@
using namespace std;
using namespace cv;
DEFINE_string(config,
"", "Path of yaml file");
DEFINE_string(c,
"", "Path of yaml file");
DEFINE_string(config, "", "Path of yaml file");
DEFINE_string(c, "", "Path of yaml file");
void DetPredictImage(const std::vector <cv::Mat> &batch_imgs,
const std::vector <std::string> &all_img_paths,
void DetPredictImage(const std::vector<cv::Mat> &batch_imgs,
const std::vector<std::string> &all_img_paths,
const int batch_size, Detection::ObjectDetector *det,
std::vector <Detection::ObjectResult> &im_result,
std::vector<Detection::ObjectResult> &im_result,
std::vector<int> &im_bbox_num, std::vector<double> &det_t,
const bool visual_det = false,
const bool run_benchmark = false,
const std::string &output_dir = "output") {
int steps = ceil(float(all_img_paths.size()) / batch_size);
// printf("total images = %d, batch_size = %d, total steps = %d\n",
// all_img_paths.size(), batch_size, steps);
for (int idx = 0; idx < steps; idx++) {
int left_image_cnt = all_img_paths.size() - idx * batch_size;
if (left_image_cnt > batch_size) {
left_image_cnt = batch_size;
}
// for (int bs = 0; bs < left_image_cnt; bs++) {
// std::string image_file_path = all_img_paths.at(idx * batch_size+bs);
// cv::Mat im = cv::imread(image_file_path, 1);
// batch_imgs.insert(batch_imgs.end(), im);
// }
// Store all detected result
std::vector <Detection::ObjectResult> result;
std::vector<int> bbox_num;
std::vector<double> det_times;
bool is_rbox = false;
if (run_benchmark) {
det->Predict(batch_imgs, 10, 10, &result, &bbox_num, &det_times);
} else {
det->Predict(batch_imgs, 0, 1, &result, &bbox_num, &det_times);
// get labels and colormap
auto labels = det->GetLabelList();
auto colormap = Detection::GenerateColorMap(labels.size());
int item_start_idx = 0;
for (int i = 0; i < left_image_cnt; i++) {
cv::Mat im = batch_imgs[i];
int detect_num = 0;
for (int j = 0; j < bbox_num[i]; j++) {
Detection::ObjectResult item = result[item_start_idx + j];
if (item.confidence < det->GetThreshold() || item.class_id == -1) {
continue;
}
detect_num += 1;
im_result.push_back(item);
if (visual_det) {
if (item.rect.size() > 6) {
is_rbox = true;
printf(
"class=%d confidence=%.4f rect=[%d %d %d %d %d %d %d %d]\n",
item.class_id, item.confidence, item.rect[0], item.rect[1],
item.rect[2], item.rect[3], item.rect[4], item.rect[5],
item.rect[6], item.rect[7]);
} else {
printf("class=%d confidence=%.4f rect=[%d %d %d %d]\n",
item.class_id, item.confidence, item.rect[0], item.rect[1],
item.rect[2], item.rect[3]);
}
}
}
im_bbox_num.push_back(detect_num);
item_start_idx = item_start_idx + bbox_num[i];
// Visualization result
if (visual_det) {
std::cout << all_img_paths.at(idx * batch_size + i)
<< " The number of detected box: " << detect_num
<< std::endl;
cv::Mat vis_img = Detection::VisualizeResult(im, im_result, labels,
colormap, is_rbox);
std::vector<int> compression_params;
compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);
compression_params.push_back(95);
std::string output_path(output_dir);
if (output_dir.rfind(OS_PATH_SEP) != output_dir.size() - 1) {
output_path += OS_PATH_SEP;
}
std::string image_file_path = all_img_paths.at(idx * batch_size + i);
output_path +=
image_file_path.substr(image_file_path.find_last_of('/') + 1);
cv::imwrite(output_path, vis_img, compression_params);
printf("Visualized output saved as %s\n", output_path.c_str());
}
}
}
det_t[0] += det_times[0];
det_t[1] += det_times[1];
det_t[2] += det_times[2];
int steps = ceil(float(all_img_paths.size()) / batch_size);
// printf("total images = %d, batch_size = %d, total steps = %d\n",
// all_img_paths.size(), batch_size, steps);
for (int idx = 0; idx < steps; idx++) {
int left_image_cnt = all_img_paths.size() - idx * batch_size;
if (left_image_cnt > batch_size) {
left_image_cnt = batch_size;
}
// for (int bs = 0; bs < left_image_cnt; bs++) {
// std::string image_file_path = all_img_paths.at(idx * batch_size+bs);
// cv::Mat im = cv::imread(image_file_path, 1);
// batch_imgs.insert(batch_imgs.end(), im);
// }
// Store all detected result
std::vector<Detection::ObjectResult> result;
std::vector<int> bbox_num;
std::vector<double> det_times;
bool is_rbox = false;
if (run_benchmark) {
det->Predict(batch_imgs, 10, 10, &result, &bbox_num, &det_times);
} else {
det->Predict(batch_imgs, 0, 1, &result, &bbox_num, &det_times);
// get labels and colormap
auto labels = det->GetLabelList();
auto colormap = Detection::GenerateColorMap(labels.size());
int item_start_idx = 0;
for (int i = 0; i < left_image_cnt; i++) {
cv::Mat im = batch_imgs[i];
int detect_num = 0;
for (int j = 0; j < bbox_num[i]; j++) {
Detection::ObjectResult item = result[item_start_idx + j];
if (item.confidence < det->GetThreshold() || item.class_id == -1) {
continue;
}
detect_num += 1;
im_result.push_back(item);
if (visual_det) {
if (item.rect.size() > 6) {
is_rbox = true;
printf(
"class=%d confidence=%.4f rect=[%d %d %d %d %d %d %d %d]\n",
item.class_id, item.confidence, item.rect[0], item.rect[1],
item.rect[2], item.rect[3], item.rect[4], item.rect[5],
item.rect[6], item.rect[7]);
} else {
printf("class=%d confidence=%.4f rect=[%d %d %d %d]\n",
item.class_id, item.confidence, item.rect[0], item.rect[1],
item.rect[2], item.rect[3]);
}
}
}
im_bbox_num.push_back(detect_num);
item_start_idx = item_start_idx + bbox_num[i];
// Visualization result
if (visual_det) {
std::cout << all_img_paths.at(idx * batch_size + i)
<< " The number of detected box: " << detect_num
<< std::endl;
cv::Mat vis_img = Detection::VisualizeResult(im, im_result, labels,
colormap, is_rbox);
std::vector<int> compression_params;
compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);
compression_params.push_back(95);
std::string output_path(output_dir);
if (output_dir.rfind(OS_PATH_SEP) != output_dir.size() - 1) {
output_path += OS_PATH_SEP;
}
std::string image_file_path = all_img_paths.at(idx * batch_size + i);
output_path +=
image_file_path.substr(image_file_path.find_last_of('/') + 1);
cv::imwrite(output_path, vis_img, compression_params);
printf("Visualized output saved as %s\n", output_path.c_str());
}
}
}
det_t[0] += det_times[0];
det_t[1] += det_times[1];
det_t[2] += det_times[2];
}
}
void PrintResult(std::string &img_path,
std::vector <Detection::ObjectResult> &det_result,
std::vector<Detection::ObjectResult> &det_result,
std::vector<int> &indeices, VectorSearch &vector_search,
SearchResult &search_result) {
printf("%s:\n", img_path.c_str());
for (int i = 0; i < indeices.size(); ++i) {
int t = indeices[i];
printf("\tresult%d: bbox[%d, %d, %d, %d], score: %f, label: %s\n", i,
det_result[t].rect[0], det_result[t].rect[1], det_result[t].rect[2],
det_result[t].rect[3], det_result[t].confidence,
vector_search.GetLabel(search_result.I[search_result.return_k * t])
.c_str());
}
printf("%s:\n", img_path.c_str());
for (int i = 0; i < indeices.size(); ++i) {
int t = indeices[i];
printf("\tresult%d: bbox[%d, %d, %d, %d], score: %f, label: %s\n", i,
det_result[t].rect[0], det_result[t].rect[1], det_result[t].rect[2],
det_result[t].rect[3], det_result[t].confidence,
vector_search.GetLabel(search_result.I[search_result.return_k * t])
.c_str());
}
}
int main(int argc, char **argv) {
google::ParseCommandLineFlags(&argc, &argv, true);
std::string yaml_path = "";
if (FLAGS_config == "" && FLAGS_c == "") {
std::cerr << "[ERROR] usage: " << std::endl
<< argv[0] << " -c $yaml_path" << std::endl
<< "or:" << std::endl
<< argv[0] << " -config $yaml_path" << std::endl;
exit(1);
} else if (FLAGS_config != "") {
yaml_path = FLAGS_config;
} else {
yaml_path = FLAGS_c;
google::ParseCommandLineFlags(&argc, &argv, true);
std::string yaml_path = "";
if (FLAGS_config == "" && FLAGS_c == "") {
std::cerr << "[ERROR] usage: " << std::endl
<< argv[0] << " -c $yaml_path" << std::endl
<< "or:" << std::endl
<< argv[0] << " -config $yaml_path" << std::endl;
exit(1);
} else if (FLAGS_config != "") {
yaml_path = FLAGS_config;
} else {
yaml_path = FLAGS_c;
}
YamlConfig config(yaml_path);
config.PrintConfigInfo();
// initialize detector, rec_Model, vector_search
Feature::FeatureExtracter feature_extracter(config.config_file);
Detection::ObjectDetector detector(config.config_file);
VectorSearch searcher(config.config_file);
// config
const int batch_size = config.config_file["Global"]["batch_size"].as<int>();
bool visual_det = false;
if (config.config_file["Global"]["visual_det"].IsDefined()) {
visual_det = config.config_file["Global"]["visual_det"].as<bool>();
}
bool benchmark = false;
if (config.config_file["Global"]["benchmark"].IsDefined()) {
benchmark = config.config_file["Global"]["benchmark"].as<bool>();
}
int max_det_results = 5;
if (config.config_file["Global"]["max_det_results"].IsDefined()) {
max_det_results = config.config_file["Global"]["max_det_results"].as<int>();
}
float rec_nms_thresold = 0.05;
if (config.config_file["Global"]["rec_nms_thresold"].IsDefined()) {
rec_nms_thresold =
config.config_file["Global"]["rec_nms_thresold"].as<float>();
}
// load image_file_path
std::string path =
config.config_file["Global"]["infer_imgs"].as<std::string>();
std::vector<std::string> img_files_list;
if (cv::utils::fs::isDirectory(path)) {
std::vector<cv::String> filenames;
cv::glob(path, filenames);
for (auto f : filenames) {
img_files_list.push_back(f);
}
} else {
img_files_list.push_back(path);
}
std::cout << "img_file_list length: " << img_files_list.size() << std::endl;
// for time log
std::vector<double> cls_times = {0, 0, 0};
std::vector<double> det_times = {0, 0, 0};
std::vector<double> search_times = {0, 0, 0};
int instance_num = 0;
// for read images
std::vector<cv::Mat> batch_imgs;
std::vector<std::string> img_paths;
// for detection
std::vector<Detection::ObjectResult> det_result;
std::vector<int> det_bbox_num;
// for vector search
std::vector<float> features;
std::vector<float> feature;
// for nms
std::vector<int> indeices;
int warmup_iter = img_files_list.size() > 5 ? 5 : img_files_list.size();
if (benchmark) {
img_files_list.insert(img_files_list.begin(), img_files_list.begin(),
img_files_list.begin() + warmup_iter);
}
for (int idx = 0; idx < img_files_list.size(); ++idx) {
std::string img_path = img_files_list[idx];
cv::Mat srcimg = cv::imread(img_path, cv::IMREAD_COLOR);
if (!srcimg.data) {
std::cerr << "[ERROR] image read failed! image path: " << img_path
<< "\n";
exit(-1);
}
cv::cvtColor(srcimg, srcimg, cv::COLOR_BGR2RGB);
batch_imgs.push_back(srcimg);
img_paths.push_back(img_path);
// step1: get all detection results
DetPredictImage(batch_imgs, img_paths, batch_size, &detector, det_result,
det_bbox_num, det_times, visual_det, false);
// select max_det_results bbox
if (det_result.size() > max_det_results) {
det_result.resize(max_det_results);
}
instance_num += det_result.size();
// step2: add the whole image for recognition to improve recall
Detection::ObjectResult result_whole_img = {
{0, 0, srcimg.cols - 1, srcimg.rows - 1}, 0, 1.0};
det_result.push_back(result_whole_img);
det_bbox_num[0] = det_result.size() + 1;
// step3: extract feature for all boxes in an inmage
SearchResult search_result;
for (int j = 0; j < det_result.size(); ++j) {
int w = det_result[j].rect[2] - det_result[j].rect[0];
int h = det_result[j].rect[3] - det_result[j].rect[1];
cv::Rect rect(det_result[j].rect[0], det_result[j].rect[1], w, h);
cv::Mat crop_img = srcimg(rect);
feature_extracter.Run(crop_img, feature, cls_times);
features.insert(features.end(), feature.begin(), feature.end());
}
YamlConfig config(yaml_path);
config.PrintConfigInfo();
// step4: get search result
auto search_start = std::chrono::steady_clock::now();
search_result = searcher.Search(features.data(), det_result.size());
auto search_end = std::chrono::steady_clock::now();
// initialize detector, rec_Model, vector_search
Feature::FeatureExtracter feature_extracter(config.config_file);
Detection::ObjectDetector detector(config.config_file);
VectorSearch searcher(config.config_file);
// config
const int batch_size = config.config_file["Global"]["batch_size"].as<int>();
bool visual_det = false;
if (config.config_file["Global"]["visual_det"].IsDefined()) {
visual_det = config.config_file["Global"]["visual_det"].as<bool>();
// nms for search result
for (int i = 0; i < det_result.size(); ++i) {
det_result[i].confidence = search_result.D[search_result.return_k * i];
}
bool run_benchmark = false;
if (config.config_file["Global"]["benchmark"].IsDefined()) {
run_benchmark = config.config_file["Global"]["benchmark"].as<bool>();
}
int max_det_results = 5;
if (config.config_file["Global"]["max_det_results"].IsDefined()) {
max_det_results = config.config_file["Global"]["max_det_results"].as<int>();
}
float rec_nms_thresold = 0.05;
if (config.config_file["Global"]["rec_nms_thresold"].IsDefined()) {
rec_nms_thresold =
config.config_file["Global"]["rec_nms_thresold"].as<float>();
}
// load image_file_path
std::string path =
config.config_file["Global"]["infer_imgs"].as<std::string>();
std::vector <std::string> img_files_list;
if (cv::utils::fs::isDirectory(path)) {
std::vector <cv::String> filenames;
cv::glob(path, filenames);
for (auto f : filenames) {
img_files_list.push_back(f);
}
} else {
img_files_list.push_back(path);
}
std::cout << "img_file_list length: " << img_files_list.size() << std::endl;
// for time log
std::vector<double> cls_times = {0, 0, 0};
std::vector<double> det_times = {0, 0, 0};
// for read images
std::vector <cv::Mat> batch_imgs;
std::vector <std::string> img_paths;
// for detection
std::vector <Detection::ObjectResult> det_result;
std::vector<int> det_bbox_num;
// for vector search
std::vector<float> features;
std::vector<float> feature;
// for nms
std::vector<int> indeices;
int warmup_iter = img_files_list.size() > 5 ? 5 : 0;
for (int idx = 0; idx < img_files_list.size(); ++idx) {
std::string img_path = img_files_list[idx];
cv::Mat srcimg = cv::imread(img_path, cv::IMREAD_COLOR);
if (!srcimg.data) {
std::cerr << "[ERROR] image read failed! image path: " << img_path
<< "\n";
exit(-1);
}
cv::cvtColor(srcimg, srcimg, cv::COLOR_BGR2RGB);
batch_imgs.push_back(srcimg);
img_paths.push_back(img_path);
// step1: get all detection results
DetPredictImage(batch_imgs, img_paths, batch_size, &detector, det_result,
det_bbox_num, det_times, visual_det, run_benchmark);
// select max_det_results bbox
if (det_result.size() > max_det_results) {
det_result.resize(max_det_results);
}
// step2: add the whole image for recognition to improve recall
Detection::ObjectResult result_whole_img = {
{0, 0, srcimg.cols - 1, srcimg.rows - 1}, 0, 1.0};
det_result.push_back(result_whole_img);
det_bbox_num[0] = det_result.size() + 1;
// step3: extract feature for all boxes in an inmage
SearchResult search_result;
for (int j = 0; j < det_result.size(); ++j) {
int w = det_result[j].rect[2] - det_result[j].rect[0];
int h = det_result[j].rect[3] - det_result[j].rect[1];
cv::Rect rect(det_result[j].rect[0], det_result[j].rect[1], w, h);
cv::Mat crop_img = srcimg(rect);
feature_extracter.Run(crop_img, feature, cls_times);
features.insert(features.end(), feature.begin(), feature.end());
}
// step4: get search result
search_result = searcher.Search(features.data(), det_result.size());
// nms for search result
for (int i = 0; i < det_result.size(); ++i) {
det_result[i].confidence = search_result.D[search_result.return_k * i];
}
NMSBoxes(det_result, searcher.GetThreshold(), rec_nms_thresold, indeices);
// print result
PrintResult(img_path, det_result, indeices, searcher, search_result);
// for postprocess
batch_imgs.clear();
img_paths.clear();
det_bbox_num.clear();
det_result.clear();
feature.clear();
features.clear();
indeices.clear();
NMSBoxes(det_result, searcher.GetThreshold(), rec_nms_thresold, indeices);
auto nms_end = std::chrono::steady_clock::now();
std::chrono::duration<float> search_diff = search_end - search_start;
search_times[1] += double(search_diff.count() * 1000);
std::chrono::duration<float> nms_diff = nms_end - search_end;
search_times[2] += double(nms_diff.count() * 1000);
// print result
if (not benchmark or (benchmark and idx >= warmup_iter))
PrintResult(img_path, det_result, indeices, searcher, search_result);
// for postprocess
batch_imgs.clear();
img_paths.clear();
det_bbox_num.clear();
det_result.clear();
feature.clear();
features.clear();
indeices.clear();
if (benchmark and warmup_iter == idx + 1) {
det_times = {0, 0, 0};
cls_times = {0, 0, 0};
search_times = {0, 0, 0};
instance_num = 0;
}
}
if (benchmark) {
std::string presion = "fp32";
if (config.config_file["Global"]["use_fp16"].IsDefined() and
config.config_file["Global"]["use_fp16"].as<bool>())
presion = "fp16";
bool use_gpu = config.config_file["Global"]["use_gpu"].as<bool>();
bool use_tensorrt = config.config_file["Global"]["use_tensorrt"].as<bool>();
bool enable_mkldnn =
config.config_file["Global"]["enable_mkldnn"].as<bool>();
int cpu_num_threads =
config.config_file["Global"]["cpu_num_threads"].as<int>();
int batch_size = config.config_file["Global"]["batch_size"].as<int>();
std::vector<int> shape =
config.config_file["Global"]["image_shape"].as<std::vector<int>>();
std::string det_shape = std::to_string(shape[0]);
for (int i = 1; i < shape.size(); ++i)
det_shape = det_shape + ", " + std::to_string(shape[i]);
// if (config.use_fp16)
// presion = "fp16";
// if (config.benchmark) {
// AutoLogger autolog("Classification", config.use_gpu, config.use_tensorrt,
// config.use_mkldnn, config.cpu_threads, 1,
// "1, 3, 224, 224", presion, cls_times,
// img_files_list.size());
// autolog.report();
// }
return 0;
AutoLogger autolog_det("Det", use_gpu, use_tensorrt, enable_mkldnn,
cpu_num_threads, batch_size, det_shape, presion,
det_times, img_files_list.size() - warmup_iter);
autolog_det.report();
AutoLogger autolog_rec("Rec", use_gpu, use_tensorrt, enable_mkldnn,
cpu_num_threads, batch_size, "3, 224, 224", presion,
cls_times, instance_num);
autolog_rec.report();
AutoLogger autolog_search("Search", false, use_tensorrt, enable_mkldnn,
cpu_num_threads, batch_size, "dynamic", presion,
search_times, instance_num);
autolog_search.report();
}
return 0;
}

View File

@ -14,20 +14,21 @@ def parse_args():
def main():
args = parse_args()
with open(args.config)as fd:
config = yaml.load(fd)
with open(args.config) as fd:
config = yaml.load(fd.read(), yaml.FullLoader)
index_dir = ""
try:
index_dir = config["IndexProcess"]["index_dir"]
except Exception as e:
print("The IndexProcess.index_dir in config_file dose not exist")
print("The IndexProcess.index_dir in config_file dose not exist")
exit(1)
id_map_path = os.path.join(index_dir, "id_map.pkl")
assert os.path.exists(id_map_path), "The id_map file dose not exist: {}".format(id_map_path)
assert os.path.exists(
id_map_path), "The id_map file dose not exist: {}".format(id_map_path)
with open(id_map_path, "rb")as fd:
with open(id_map_path, "rb") as fd:
ids = pickle.load(fd)
with open(os.path.join(index_dir, "id_map.txt"), "w")as fd:
with open(os.path.join(index_dir, "id_map.txt"), "w") as fd:
for k, v in ids.items():
v = v.split("\t")[1]
fd.write(str(k) + " " + v + "\n")

View File

@ -0,0 +1,17 @@
===========================cpp_infer_params===========================
model_name:PPShiTu
cpp_infer_type:shitu
feature_inference_model_dir:./feature_inference/
det_inference_model_dir:./det_inference
cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar
det_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
infer_quant:False
inference_cmd:./deploy/cpp_shitu/build/pp_shitu -c inference_drink.yaml
use_gpu:True|False
enable_mkldnn:True|False
cpu_threads:1|6
batch_size:1
use_tensorrt:False|True
precision:fp32|fp16
data_dir:./dataset/drink_dataset_v1.0
benchmark:True

View File

@ -0,0 +1,17 @@
===========================cpp_infer_params===========================
model_name:ResNet50_vd
cpp_infer_type:cls
cls_inference_model_dir:./cls_inference/
det_inference_model_dir:
cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/ResNet50_vd_inference.tar
det_inference_url:
infer_quant:False
inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml
use_gpu:True|False
enable_mkldnn:True|False
cpu_threads:1|6
batch_size:1
use_tensorrt:False|True
precision:fp32|fp16
image_dir:./dataset/ILSVRC2012/val
benchmark:True

View File

@ -0,0 +1,78 @@
import os
import yaml
import argparse
def str2bool(v):
if v.lower() == 'true':
return True
else:
return False
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--type', required=True, choices=["cls", "shitu"])
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--mkldnn', type=str2bool, default=True)
parser.add_argument('--gpu', type=str2bool, default=False)
parser.add_argument('--cpu_thread', type=int, default=1)
parser.add_argument('--tensorrt', type=str2bool, default=False)
parser.add_argument('--precision', type=str, choices=["fp32", "fp16"])
parser.add_argument('--benchmark', type=str2bool, default=True)
parser.add_argument(
'--cls_yaml_path',
type=str,
default="deploy/configs/inference_cls.yaml")
parser.add_argument(
'--shitu_yaml_path',
type=str,
default="deploy/configs/inference_drink.yaml")
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--save_path', type=str, default='./')
parser.add_argument('--cls_model_dir', type=str)
parser.add_argument('--det_model_dir', type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
if args.type == "cls":
save_path = os.path.join(args.save_path,
os.path.basename(args.cls_yaml_path))
fd = open(args.cls_yaml_path)
else:
save_path = os.path.join(args.save_path,
os.path.basename(args.shitu_yaml_path))
fd = open(args.shitu_yaml_path)
config = yaml.load(fd, yaml.FullLoader)
fd.close()
config["Global"]["batch_size"] = args.batch_size
config["Global"]["use_gpu"] = args.gpu
config["Global"]["enable_mkldnn"] = args.mkldnn
config["Global"]["benchmark"] = args.benchmark
config["Global"]["use_tensorrt"] = args.tensorrt
config["Global"]["use_fp16"] = True if args.precision == "fp16" else False
if args.type == "cls":
config["Global"]["infer_imgs"] = args.data_dir
assert args.cls_model_dir
config["Global"]["inference_model_dir"] = args.cls_model_dir
else:
config["Global"]["infer_imgs"] = os.path.join(args.data_dir,
"test_images")
config["IndexProcess"]["index_dir"] = os.path.join(args.data_dir,
"index")
assert args.cls_model_dir
assert args.det_model_dir
config["Global"]["det_inference_model_dir"] = args.det_model_dir
config["Global"]["rec_inference_model_dir"] = args.cls_model_dir
with open(save_path, 'w') as fd:
yaml.dump(config, fd)
print("Generate new yaml done")
if __name__ == "__main__":
main()

View File

@ -33,6 +33,59 @@ function func_parser_value(){
fi
}
function func_get_url_file_name(){
strs=$1
IFS="/"
array=(${strs})
tmp=${array[${#array[@]}-1]}
echo ${tmp}
}
model_name=$(func_parser_value "${lines[1]}")
if [ ${MODE} = "cpp_infer" ];then
if [[ $FILENAME == *infer_cpp_linux_gpu_cpu.txt ]];then
cpp_type=$(func_parser_value "${lines[2]}")
cls_inference_model_dir=$(func_parser_value "${lines[3]}")
det_inference_model_dir=$(func_parser_value "${lines[4]}")
cls_inference_url=$(func_parser_value "${lines[5]}")
det_inference_url=$(func_parser_value "${lines[6]}")
if [[ $cpp_type == "cls" ]];then
eval "wget -nc $cls_inference_url"
tar xf "${model_name}_inference.tar"
eval "mv inference $cls_inference_model_dir"
cd dataset
rm -rf ILSVRC2012
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_infer.tar
tar xf whole_chain_infer.tar
ln -s whole_chain_infer ILSVRC2012
cd ..
elif [[ $cpp_type == "shitu" ]];then
eval "wget -nc $cls_inference_url"
tar_name=$(func_get_url_file_name "$cls_inference_url")
model_dir=${tar_name%.*}
eval "tar xf ${tar_name}"
eval "mv ${model_dir} ${cls_inference_model_dir}"
eval "wget -nc $det_inference_url"
tar_name=$(func_get_url_file_name "$det_inference_url")
model_dir=${tar_name%.*}
eval "tar xf ${tar_name}"
eval "mv ${model_dir} ${det_inference_model_dir}"
cd dataset
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar
tar -xf drink_dataset_v1.0.tar
else
echo "Wrong cpp type in config file in line 3. only support cls, shitu"
fi
exit 0
else
echo "use wrong config file"
exit 1
fi
fi
model_name=$(func_parser_value "${lines[1]}")
model_url_value=$(func_parser_value "${lines[35]}")
model_url_key=$(func_parser_key "${lines[35]}")
@ -114,63 +167,3 @@ if [ ${MODE} = "serving_infer" ];then
cd ./deploy/paddleserving
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar
fi
if [ ${MODE} = "cpp_infer" ];then
cd deploy/cpp
echo "################### build opencv ###################"
rm -rf 3.4.7.tar.gz opencv-3.4.7/
wget https://github.com/opencv/opencv/archive/3.4.7.tar.gz
tar -xf 3.4.7.tar.gz
install_path=$(pwd)/opencv-3.4.7/opencv3
cd opencv-3.4.7/
rm -rf build
mkdir build
cd build
cmake .. \
-DCMAKE_INSTALL_PREFIX=${install_path} \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=OFF \
-DWITH_IPP=OFF \
-DBUILD_IPP_IW=OFF \
-DWITH_LAPACK=OFF \
-DWITH_EIGEN=OFF \
-DCMAKE_INSTALL_LIBDIR=lib64 \
-DWITH_ZLIB=ON \
-DBUILD_ZLIB=ON \
-DWITH_JPEG=ON \
-DBUILD_JPEG=ON \
-DWITH_PNG=ON \
-DBUILD_PNG=ON \
-DWITH_TIFF=ON \
-DBUILD_TIFF=ON
make -j
make install
cd ../../
echo "################### build opencv finished ###################"
echo "################### build PaddleClas demo ####################"
OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/
LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/
CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`)
CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`)
BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DDEMO_NAME=clas_system \
-DWITH_GPU=OFF \
-DWITH_STATIC_LIB=OFF \
-DWITH_TENSORRT=OFF \
-DTENSORRT_DIR=${TENSORRT_DIR} \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
make -j
echo "################### build PaddleClas demo finished ###################"
fi

View File

@ -10,37 +10,27 @@ lines=(${dataline})
# parser cpp inference model
model_name=$(func_parser_value "${lines[1]}")
use_opencv=$(func_parser_value "${lines[2]}")
cpp_infer_model_dir_list=$(func_parser_value "${lines[3]}")
cpp_infer_is_quant=$(func_parser_value "${lines[4]}")
cpp_infer_type=$(func_parser_value "${lines[2]}")
cpp_infer_model_dir=$(func_parser_value "${lines[3]}")
cpp_det_infer_model_dir=$(func_parser_value "${lines[4]}")
cpp_infer_is_quant=$(func_parser_value "${lines[7]}")
# parser cpp inference
inference_cmd=$(func_parser_value "${lines[5]}")
cpp_use_gpu_key=$(func_parser_key "${lines[6]}")
cpp_use_gpu_list=$(func_parser_value "${lines[6]}")
cpp_use_mkldnn_key=$(func_parser_key "${lines[7]}")
cpp_use_mkldnn_list=$(func_parser_value "${lines[7]}")
cpp_cpu_threads_key=$(func_parser_key "${lines[8]}")
cpp_cpu_threads_list=$(func_parser_value "${lines[8]}")
cpp_batch_size_key=$(func_parser_key "${lines[9]}")
cpp_batch_size_list=$(func_parser_value "${lines[9]}")
cpp_use_trt_key=$(func_parser_key "${lines[10]}")
cpp_use_trt_list=$(func_parser_value "${lines[10]}")
cpp_precision_key=$(func_parser_key "${lines[11]}")
cpp_precision_list=$(func_parser_value "${lines[11]}")
cpp_infer_model_key=$(func_parser_key "${lines[12]}")
cpp_image_dir_key=$(func_parser_key "${lines[13]}")
cpp_infer_img_dir=$(func_parser_value "${lines[13]}")
cpp_infer_key1=$(func_parser_key "${lines[14]}")
cpp_infer_value1=$(func_parser_value "${lines[14]}")
cpp_benchmark_key=$(func_parser_key "${lines[15]}")
cpp_benchmark_value=$(func_parser_value "${lines[15]}")
inference_cmd=$(func_parser_value "${lines[8]}")
cpp_use_gpu_list=$(func_parser_value "${lines[9]}")
cpp_use_mkldnn_list=$(func_parser_value "${lines[10]}")
cpp_cpu_threads_list=$(func_parser_value "${lines[11]}")
cpp_batch_size_list=$(func_parser_value "${lines[12]}")
cpp_use_trt_list=$(func_parser_value "${lines[13]}")
cpp_precision_list=$(func_parser_value "${lines[14]}")
cpp_image_dir_value=$(func_parser_value "${lines[15]}")
cpp_benchmark_value=$(func_parser_value "${lines[16]}")
LOG_PATH="./test_tipc/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_cpp.log"
generate_yaml_cmd="python3 test_tipc/generate_cpp_yaml.py"
function func_cpp_inference(){
function func_shitu_cpp_inference(){
IFS='|'
_script=$1
_model_dir=$2
@ -48,6 +38,7 @@ function func_cpp_inference(){
_img_dir=$4
_flag_quant=$5
# inference
for use_gpu in ${cpp_use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
@ -60,17 +51,13 @@ function func_cpp_inference(){
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
precison="int8"
fi
_save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${cpp_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
_save_log_path="${_log_path}/shitu_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir}"
eval $command
command="${_script} 2>&1|tee ${_save_log_path}"
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
@ -88,20 +75,13 @@ function func_cpp_inference(){
continue
fi
for batch_size in ${cpp_batch_size_list[*]}; do
_save_log_path="${_log_path}/cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_tensorrt=$(func_set_params "${cpp_use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${cpp_precision_key}" "${precision}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
_save_log_path="${_log_path}/shitu_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir}"
eval $command
command="${_script} 2>&1|tee ${_save_log_path}"
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
status_check $last_status "${_script}" "${status_log}"
done
done
done
@ -111,25 +91,116 @@ function func_cpp_inference(){
done
}
function func_cls_cpp_inference(){
IFS='|'
_script=$1
_model_dir=$2
_log_path=$3
_img_dir=$4
_flag_quant=$5
# inference
cd deploy/cpp_infer
if [ ${use_opencv} = "True" ]; then
if [ -d "opencv-3.4.7/opencv3/" ] && [ $(md5sum opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ];then
echo "################### build opencv skipped ###################"
else
echo "################### build opencv ###################"
rm -rf opencv-3.4.7.tar.gz opencv-3.4.7/
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz
tar -xf opencv-3.4.7.tar.gz
for use_gpu in ${cpp_use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
continue
fi
for threads in ${cpp_cpu_threads_list[*]}; do
for batch_size in ${cpp_batch_size_list[*]}; do
precision="fp32"
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
precison="int8"
fi
_save_log_path="${_log_path}/cls_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
cd opencv-3.4.7/
install_path=$(pwd)/opencv3
command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir}"
eval $command
command1="${_script} 2>&1|tee ${_save_log_path}"
eval ${command1}
last_status=${PIPESTATUS[0]}
status_check $last_status "${command1}" "${status_log}"
done
done
done
elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
for use_trt in ${cpp_use_trt_list[*]}; do
for precision in ${cpp_precision_list[*]}; do
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
continue
fi
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
continue
fi
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
continue
fi
for batch_size in ${cpp_batch_size_list[*]}; do
_save_log_path="${_log_path}/cls_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir}"
eval $command
command="${_script} 2>&1|tee ${_save_log_path}"
eval $command
last_status=${PIPESTATUS[0]}
status_check $last_status "${command}" "${status_log}"
done
done
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
}
rm -rf build
mkdir build
cd build
if [[ $cpp_infer_type == "cls" ]]; then
cd deploy/cpp
elif [[ $cpp_infer_type == "shitu" ]]; then
cd deploy/cpp_shitu
else
echo "Only support cls and shitu"
exit 0
fi
cmake .. \
if [[ $cpp_infer_type == "shitu" ]]; then
echo "################### update cmake ###################"
wget -nc https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0.tar.gz
tar xf cmake-3.22.0.tar.gz
cd ./cmake-3.22.0
export root_path=$PWD
export install_path=${root_path}/cmake
eval "./bootstrap --prefix=${install_path}"
make -j
make install
export PATH=${install_path}/bin:$PATH
cd ..
echo "################### update cmake done ###################"
echo "################### build faiss ###################"
apt-get install -y libopenblas-dev
git clone https://github.com/facebookresearch/faiss.git
cd faiss
export faiss_install_path=$PWD/faiss_install
eval "cmake -B build . -DFAISS_ENABLE_PYTHON=OFF -DCMAKE_INSTALL_PREFIX=${faiss_install_path}"
make -C build -j faiss
make -C build install
fi
if [ -d "opencv-3.4.7/opencv3/" ] && [ $(md5sum opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ];then
echo "################### build opencv skipped ###################"
else
echo "################### build opencv ###################"
rm -rf opencv-3.4.7.tar.gz opencv-3.4.7/
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz
tar -xf opencv-3.4.7.tar.gz
cd opencv-3.4.7/
install_path=$(pwd)/opencv3
rm -rf build
mkdir build
cd build
cmake .. \
-DCMAKE_INSTALL_PREFIX=${install_path} \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=OFF \
@ -147,21 +218,17 @@ if [ ${use_opencv} = "True" ]; then
-DWITH_TIFF=ON \
-DBUILD_TIFF=ON
make -j
make install
cd ../
echo "################### build opencv finished ###################"
fi
make -j
make install
cd ../
echo "################### build opencv finished ###################"
fi
echo "################### build PaddleOCR demo ####################"
if [ ${use_opencv} = "True" ]; then
OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/
else
OPENCV_DIR=''
fi
LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/
echo "################### build PaddleClas demo ####################"
OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/
LIB_DIR=/work/project/project/test/paddle_inference/
# LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/
CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`)
CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`)
@ -169,20 +236,36 @@ BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DWITH_GPU=OFF \
-DWITH_STATIC_LIB=OFF \
-DWITH_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
-DTENSORRT_DIR=${TENSORRT_DIR} \
if [[ $cpp_infer_type == cls ]]; then
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DWITH_GPU=ON \
-DWITH_STATIC_LIB=OFF \
-DWITH_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
-DTENSORRT_DIR=${TENSORRT_DIR}
echo "---------------------------"
else
cmake ..\
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DWITH_GPU=ON \
-DWITH_STATIC_LIB=OFF \
-DWITH_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
-DTENSORRT_DIR=${TENSORRT_DIR} \
-DFAISS_DIR=${FAISS_DIR} \
-DFAISS_WITH_MKL=OFF
fi
make -j
cd ../../../
echo "################### build PaddleOCR demo finished ###################"
# cd ../../
echo "################### build PaddleClas demo finished ###################"
# set cuda device
@ -200,9 +283,13 @@ echo "################### run test ###################"
export Count=0
IFS="|"
infer_quant_flag=(${cpp_infer_is_quant})
for infer_model in ${cpp_infer_model_dir_list[*]}; do
for infer_model in ${cpp_infer_model_dir[*]}; do
#run inference
is_quant=${infer_quant_flag[Count]}
func_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_infer_img_dir}" ${is_quant}
if [[ $cpp_infer_type == "cls" ]]; then
func_cls_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
else
func_shitu_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
fi
Count=$(($Count + 1))
done