update config and script
parent
6ecaaba9fb
commit
e7d9ba5874
|
@ -34,7 +34,7 @@ distill_export:null
|
|||
kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.save_inference_dir=./general_PPLCNet_x2_5_lite_v1.0_infer
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar
|
||||
infer_model:../inference/
|
||||
infer_model:./general_PPLCNet_x2_5_lite_v1.0_infer/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_rec.py -c configs/inference_rec.yaml
|
||||
|
@ -47,7 +47,7 @@ inference:python/predict_rec.py -c configs/inference_rec.yaml
|
|||
-o Global.rec_inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/Aliproduct/demo_test/
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================infer_benchmark_params==========================
|
||||
|
|
|
@ -34,7 +34,7 @@ distill_export:null
|
|||
kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./MobileNetV3_large_x1_0_infer
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar
|
||||
infer_model:../inference/
|
||||
infer_model:./MobileNetV3_large_x1_0_infer/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_cls.py -c configs/inference_cls.yaml
|
||||
|
@ -45,9 +45,9 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
|
|||
-o Global.use_tensorrt:False
|
||||
-o Global.use_fp16:False
|
||||
-o Global.inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/ILSVRC2012/val
|
||||
-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
|
|
|
@ -34,20 +34,20 @@ distill_export:null
|
|||
kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.save_inference_dir=./PPHGNet_small_infer
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar
|
||||
infer_model:../inference/
|
||||
infer_model:./PPHGNet_small_infer/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236
|
||||
-o Global.use_gpu:True|False
|
||||
-o Global.enable_mkldnn:True|False
|
||||
-o Global.cpu_num_threads:1|6
|
||||
-o Global.batch_size:1|16
|
||||
-o Global.use_tensorrt:True|False
|
||||
-o Global.use_fp16:True|False
|
||||
-o Global.enable_mkldnn:False
|
||||
-o Global.cpu_num_threads:1
|
||||
-o Global.batch_size:1
|
||||
-o Global.use_tensorrt:False
|
||||
-o Global.use_fp16:False
|
||||
-o Global.inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/ILSVRC2012/val
|
||||
-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
-o Global.benchmark:False
|
||||
null:null
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,224,224]}]
|
||||
|
|
|
@ -34,7 +34,7 @@ distill_export:null
|
|||
kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.save_inference_dir=./PPLCNet_x1_0_infer
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar
|
||||
infer_model:../inference/
|
||||
infer_model:./PPLCNet_x1_0_infer/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_cls.py -c configs/inference_cls.yaml
|
||||
|
@ -45,9 +45,9 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
|
|||
-o Global.use_tensorrt:False
|
||||
-o Global.use_fp16:False
|
||||
-o Global.inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/ILSVRC2012/val
|
||||
-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
-o Global.benchmark:False
|
||||
null:null
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,224,224]}]
|
|
@ -34,20 +34,20 @@ distill_export:null
|
|||
kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Global.save_inference_dir=./PPLCNetV2_base_infer
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar
|
||||
infer_model:../inference/
|
||||
infer_model:./PPLCNetV2_base_infer/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_cls.py -c configs/inference_cls.yaml
|
||||
-o Global.use_gpu:True|False
|
||||
-o Global.enable_mkldnn:True|False
|
||||
-o Global.cpu_num_threads:1|6
|
||||
-o Global.batch_size:1|16
|
||||
-o Global.use_tensorrt:True|False
|
||||
-o Global.use_fp16:True|False
|
||||
-o Global.enable_mkldnn:False
|
||||
-o Global.cpu_num_threads:1
|
||||
-o Global.batch_size:1
|
||||
-o Global.use_tensorrt:False
|
||||
-o Global.use_fp16:False
|
||||
-o Global.inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/ILSVRC2012/val
|
||||
-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
-o Global.benchmark:False
|
||||
null:null
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,224,224]}]
|
||||
|
|
|
@ -34,7 +34,7 @@ distill_export:null
|
|||
kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./ResNet50_vd_infer
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar
|
||||
infer_model:../inference/
|
||||
infer_model:./ResNet50_vd_infer/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_cls.py -c configs/inference_cls.yaml
|
||||
|
@ -45,9 +45,9 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
|
|||
-o Global.use_tensorrt:False
|
||||
-o Global.use_fp16:False
|
||||
-o Global.inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/ILSVRC2012/val
|
||||
-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
|
|
|
@ -34,7 +34,7 @@ distill_export:null
|
|||
kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Global.save_inference_dir=./SwinTransformer_tiny_patch4_window7_224_infer
|
||||
export2:null
|
||||
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SwinTransformer_tiny_patch4_window7_224_infer.tar
|
||||
infer_model:../inference/
|
||||
infer_model:./SwinTransformer_tiny_patch4_window7_224_infer/
|
||||
infer_export:True
|
||||
infer_quant:Fasle
|
||||
inference:python/predict_cls.py -c configs/inference_cls.yaml
|
||||
|
@ -45,9 +45,9 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
|
|||
-o Global.use_tensorrt:False
|
||||
-o Global.use_fp16:False
|
||||
-o Global.inference_model_dir:../inference
|
||||
-o Global.infer_imgs:../dataset/ILSVRC2012/val
|
||||
-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
|
||||
-o Global.save_log_path:null
|
||||
-o Global.benchmark:True
|
||||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
|
|
|
@ -171,17 +171,32 @@ if [[ ${MODE} = "lite_train_lite_infer" ]] || [[ ${MODE} = "lite_train_whole_inf
|
|||
mv val.txt val_list.txt
|
||||
cp -r train/* val/
|
||||
cd ../../
|
||||
elif [[ ${MODE} = "whole_infer" ]] || [[ ${MODE} = "klquant_whole_infer" ]]; then
|
||||
elif [[ ${MODE} = "whole_infer" ]]; then
|
||||
# download data
|
||||
cd dataset
|
||||
rm -rf ILSVRC2012
|
||||
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_infer.tar
|
||||
tar xf whole_chain_infer.tar
|
||||
ln -s whole_chain_infer ILSVRC2012
|
||||
cd ILSVRC2012
|
||||
mv val.txt val_list.txt
|
||||
ln -s val_list.txt train_list.txt
|
||||
cd ../../
|
||||
if [[ ${model_name} =~ "GeneralRecognition" ]]; then
|
||||
cd dataset
|
||||
rm -rf Aliproduct
|
||||
rm -rf train_reg_all_data.txt
|
||||
rm -rf demo_train
|
||||
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/tipc_shitu_demo_data.tar --no-check-certificate
|
||||
tar -xf tipc_shitu_demo_data.tar
|
||||
ln -s tipc_shitu_demo_data Aliproduct
|
||||
ln -s tipc_shitu_demo_data/demo_train.txt train_reg_all_data.txt
|
||||
ln -s tipc_shitu_demo_data/demo_train demo_train
|
||||
cd tipc_shitu_demo_data
|
||||
ln -s demo_test.txt val_list.txt
|
||||
cd ../../
|
||||
else
|
||||
cd dataset
|
||||
rm -rf ILSVRC2012
|
||||
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_infer.tar
|
||||
tar xf whole_chain_infer.tar
|
||||
ln -s whole_chain_infer ILSVRC2012
|
||||
cd ILSVRC2012
|
||||
mv val.txt val_list.txt
|
||||
ln -s val_list.txt train_list.txt
|
||||
cd ../../
|
||||
fi
|
||||
# download inference or pretrained model
|
||||
eval "wget -nc $model_url_value"
|
||||
if [[ ${model_url_value} =~ ".tar" ]]; then
|
||||
|
|
|
@ -110,9 +110,6 @@ function func_inference() {
|
|||
for use_gpu in ${use_gpu_list[*]}; do
|
||||
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
|
||||
for use_mkldnn in ${use_mkldnn_list[*]}; do
|
||||
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
|
||||
continue
|
||||
fi
|
||||
for threads in ${cpu_threads_list[*]}; do
|
||||
for batch_size in ${batch_size_list[*]}; do
|
||||
_save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log"
|
||||
|
@ -136,9 +133,6 @@ function func_inference() {
|
|||
if [ ${precision} = "True" ] && [ ${use_trt} = "False" ]; then
|
||||
continue
|
||||
fi
|
||||
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
|
||||
continue
|
||||
fi
|
||||
for batch_size in ${batch_size_list[*]}; do
|
||||
_save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
|
||||
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
|
||||
|
@ -161,35 +155,6 @@ function func_inference() {
|
|||
done
|
||||
}
|
||||
|
||||
# if [[ ${MODE} = "whole_infer" ]] || [[ ${MODE} = "klquant_whole_infer" ]]; then
|
||||
# IFS="|"
|
||||
# infer_export_flag=(${infer_export_flag})
|
||||
# if [ ${infer_export_flag} != "null" ] && [ ${infer_export_flag} != "False" ]; then
|
||||
# rm -rf ${infer_model_dir_list/..\//}
|
||||
# export_cmd="${python} ${norm_export} -o Global.pretrained_model=${model_name}_pretrained -o Global.save_inference_dir=${infer_model_dir_list/..\//}"
|
||||
# eval $export_cmd
|
||||
# fi
|
||||
# fi
|
||||
|
||||
# if [[ ${MODE} = "whole_infer" ]]; then
|
||||
# GPUID=$3
|
||||
# if [ ${#GPUID} -le 0 ]; then
|
||||
# env=" "
|
||||
# else
|
||||
# env="export CUDA_VISIBLE_DEVICES=${GPUID}"
|
||||
# fi
|
||||
# # set CUDA_VISIBLE_DEVICES
|
||||
# eval $env
|
||||
# export Count=0
|
||||
# cd deploy
|
||||
# for infer_model in ${infer_model_dir_list[*]}; do
|
||||
# #run inference
|
||||
# is_quant=${infer_quant_flag[Count]}
|
||||
# echo "is_quant: ${is_quant}"
|
||||
# func_inference "${python}" "${inference_py}" "${infer_model}" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant}
|
||||
# Count=$(($Count + 1))
|
||||
# done
|
||||
# cd ..
|
||||
|
||||
if [[ ${MODE} = "whole_infer" ]]; then
|
||||
# for kl_quant
|
||||
|
@ -200,13 +165,13 @@ if [[ ${MODE} = "whole_infer" ]]; then
|
|||
eval $command
|
||||
last_status=${PIPESTATUS[0]}
|
||||
status_check $last_status "${command}" "${status_log}" "${model_name}"
|
||||
# cd inference/quant_post_static_model
|
||||
# ln -s __model__ inference.pdmodel
|
||||
# ln -s __params__ inference.pdiparams
|
||||
# cd ../../deploy
|
||||
# is_quant=True
|
||||
# func_inference "${python}" "${inference_py}" "${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant}
|
||||
# cd ..
|
||||
cd ${infer_model_dir_list}/quant_post_static_model
|
||||
ln -s __model__ inference.pdmodel
|
||||
ln -s __params__ inference.pdiparams
|
||||
cd ../../deploy
|
||||
is_quant=True
|
||||
func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant}
|
||||
cd ..
|
||||
fi
|
||||
else
|
||||
IFS="|"
|
||||
|
|
Loading…
Reference in New Issue