mirror of
https://github.com/PaddlePaddle/PaddleOCR.git
synced 2025-06-03 21:53:39 +08:00
fix tipc infer log
This commit is contained in:
parent
c1f9e807fb
commit
f1d5d5396e
@ -101,6 +101,7 @@ function func_inference(){
|
||||
_log_path=$4
|
||||
_img_dir=$5
|
||||
_flag_quant=$6
|
||||
_gpu=$7
|
||||
# inference
|
||||
for use_gpu in ${use_gpu_list[*]}; do
|
||||
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
|
||||
@ -119,7 +120,7 @@ function func_inference(){
|
||||
fi # skip when quant model inference but precision is not int8
|
||||
set_precision=$(func_set_params "${precision_key}" "${precision}")
|
||||
|
||||
_save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
|
||||
_save_log_path="${_log_path}/python_infer_cpu_gpus_${_gpu}_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
|
||||
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
|
||||
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
|
||||
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
|
||||
@ -150,7 +151,7 @@ function func_inference(){
|
||||
continue
|
||||
fi
|
||||
for batch_size in ${batch_size_list[*]}; do
|
||||
_save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
|
||||
_save_log_path="${_log_path}/python_infer_gpu_gpus_${_gpu}_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
|
||||
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
|
||||
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
|
||||
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
|
||||
@ -184,6 +185,7 @@ if [ ${MODE} = "whole_infer" ]; then
|
||||
# set CUDA_VISIBLE_DEVICES
|
||||
eval $env
|
||||
export Count=0
|
||||
gpu=0
|
||||
IFS="|"
|
||||
infer_run_exports=(${infer_export_list})
|
||||
infer_quant_flag=(${infer_is_quant})
|
||||
@ -205,7 +207,7 @@ if [ ${MODE} = "whole_infer" ]; then
|
||||
fi
|
||||
#run inference
|
||||
is_quant=${infer_quant_flag[Count]}
|
||||
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
|
||||
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} "${gpu}"
|
||||
Count=$(($Count + 1))
|
||||
done
|
||||
else
|
||||
@ -328,7 +330,7 @@ else
|
||||
else
|
||||
infer_model_dir=${save_infer_path}
|
||||
fi
|
||||
func_inference "${python}" "${inference_py}" "${infer_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}"
|
||||
func_inference "${python}" "${inference_py}" "${infer_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" "${gpu}"
|
||||
|
||||
eval "unset CUDA_VISIBLE_DEVICES"
|
||||
fi
|
||||
|
Loading…
x
Reference in New Issue
Block a user