slim fleet_train config and fix paddle2onnx for GeneralRecognition model

pull/2129/head
HydrogenSulfate 2022-07-04 15:03:36 +08:00
parent 2e56bec7d6
commit ad707bde4a
2 changed files with 12 additions and 6 deletions

View File

@ -1,5 +1,5 @@
===========================paddle2onnx_params===========================
model_name:PP-ShiTu_general_rec
model_name:GeneralRecognition_PPLCNet_x2_5
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/
@ -9,8 +9,8 @@ python:python3.7
--opset_version:10
--enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar
inference:./python/predict_cls.py
inference:./python/predict_rec.py
Global.use_onnx:True
Global.inference_model_dir:./models/general_PPLCNet_x2_5_lite_v1.0_infer
Global.rec_inference_model_dir:./models/general_PPLCNet_x2_5_lite_v1.0_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
-c:configs/inference_rec.yaml

View File

@ -85,12 +85,18 @@ if [[ ${MODE} = "cpp_infer" ]]; then
if [[ ! -d "./deploy/cpp/paddle_inference/" ]]; then
pushd ./deploy/cpp/
PADDLEInfer=$3
if [ "" = "$PADDLEInfer" ];then
if [ "" = "$PADDLEInfer" ]; then
wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz --no-check-certificate
tar xf paddle_inference.tgz
else
wget -nc ${PADDLEInfer} --no-check-certificate
tar_name=$(func_get_url_file_name "$PADDLEInfer")
tar xf ${tar_name}
paddle_inference_install_dir=${tar_name%.*}
if [ ! -d "paddle_inference" ]; then
ln -s ${paddle_inference_install_dir} paddle_inference
fi
fi
tar xf paddle_inference.tgz
popd
fi
if [[ $FILENAME == *infer_cpp_linux_gpu_cpu.txt ]]; then