commit
b1d26deda7
|
@ -1,16 +1,16 @@
|
|||
===========================ch_ppocr_mobile_v2.0===========================
|
||||
===========================ch_PP-OCRv2===========================
|
||||
model_name:ch_PP-OCRv2
|
||||
python:python3.7
|
||||
infer_model:./inference/ch_PP-OCRv2_det_infer/
|
||||
infer_export:null
|
||||
infer_quant:True
|
||||
infer_quant:False
|
||||
inference:tools/infer/predict_system.py
|
||||
--use_gpu:False|True
|
||||
--enable_mkldnn:False|True
|
||||
--cpu_threads:1|6
|
||||
--rec_batch_num:1
|
||||
--use_tensorrt:False|True
|
||||
--precision:fp32|fp16
|
||||
--precision:fp32
|
||||
--det_model_dir:
|
||||
--image_dir:./inference/ch_det_data_50/all-sum-510/
|
||||
--rec_model_dir:./inference/ch_PP-OCRv2_rec_infer/
|
||||
|
|
|
@ -3,14 +3,14 @@ model_name:ch_ppocr_mobile_v2.0
|
|||
python:python3.7
|
||||
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
|
||||
infer_export:null
|
||||
infer_quant:True
|
||||
infer_quant:False
|
||||
inference:tools/infer/predict_system.py
|
||||
--use_gpu:False|True
|
||||
--enable_mkldnn:False|True
|
||||
--cpu_threads:1|6
|
||||
--rec_batch_num:1
|
||||
--use_tensorrt:False|True
|
||||
--precision:fp32|fp16
|
||||
--precision:fp32
|
||||
--det_model_dir:
|
||||
--image_dir:./inference/ch_det_data_50/all-sum-510/
|
||||
--rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
===========================ch_ppocr_mobile_v2.0===========================
|
||||
===========================ch_ppocr_server_v2.0===========================
|
||||
model_name:ch_ppocr_server_v2.0
|
||||
python:python3.7
|
||||
infer_model:./inference/ch_ppocr_server_v2.0_det_infer/
|
||||
infer_export:null
|
||||
infer_quant:True
|
||||
inference:tools/infer/predict_system.py
|
||||
--use_gpu:False
|
||||
--enable_mkldnn:False
|
||||
--use_gpu:False|True
|
||||
--enable_mkldnn:False|True
|
||||
--cpu_threads:1|6
|
||||
--rec_batch_num:1
|
||||
--use_tensorrt:False
|
||||
--precision:int8
|
||||
--precision:fp32
|
||||
--det_model_dir:
|
||||
--image_dir:./inference/ch_det_data_50/all-sum-510/
|
||||
--rec_model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/
|
||||
|
|
|
@ -44,7 +44,7 @@ inference:tools/infer/predict_e2e.py
|
|||
--rec_batch_num:1
|
||||
--use_tensorrt:False|True
|
||||
--precision:fp32|fp16|int8
|
||||
--det_model_dir:
|
||||
--e2e_model_dir:
|
||||
--image_dir:./inference/ch_det_data_50/all-sum-510/
|
||||
null:null
|
||||
--benchmark:True
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Jeston端基础训练预测功能测试
|
||||
|
||||
Jeston端基础训练预测功能测试的主程序为`test_train_inference_python.sh`,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。
|
||||
Jeston端基础训练预测功能测试的主程序为`test_inference_inference.sh`,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。
|
||||
|
||||
## 1. 测试结论汇总
|
||||
|
||||
|
@ -40,21 +40,21 @@ Jeston端基础训练预测功能测试的主程序为`test_train_inference_pyth
|
|||
|
||||
### 2.2 功能测试
|
||||
|
||||
先运行`prepare.sh`准备数据和模型,然后运行`test_train_inference_python.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。
|
||||
先运行`prepare.sh`准备数据和模型,然后运行`test_inference_inference.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。
|
||||
|
||||
`test_train_inference_python.sh`包含5种[运行模式](./test_train_inference_python.md),在Jeston端,仅需要测试预测推理的模式即可:
|
||||
`test_inference_inference.sh`仅有一个模式`whole_infer`,在Jeston端,仅需要测试预测推理的模式即可:
|
||||
|
||||
```
|
||||
- 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度;
|
||||
```shell
|
||||
bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer'
|
||||
# 用法1:
|
||||
bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer'
|
||||
bash test_tipc/test_inference_inference.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer'
|
||||
# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号
|
||||
bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' '1'
|
||||
```
|
||||
|
||||
运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`lite_train_lite_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件:
|
||||
运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`whole_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件:
|
||||
```
|
||||
test_tipc/output/
|
||||
|- results_python.log # 运行指令状态的日志
|
||||
|
|
|
@ -45,7 +45,7 @@ if [ ${MODE} = "lite_train_lite_infer" ];then
|
|||
wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate
|
||||
wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate
|
||||
cd ./pretrain_models/ && tar xf en_server_pgnetA.tar && cd ../
|
||||
cd ./train_data && tar xf total_text_lite.tar && ln -s total_text && cd ../
|
||||
cd ./train_data && tar xf total_text_lite.tar && ln -s total_text_lite total_text && cd ../
|
||||
fi
|
||||
if [ ${model_name} == "det_r50_vd_sast_icdar15_v2.0" ] || [ ${model_name} == "det_r50_vd_sast_totaltext_v2.0" ]; then
|
||||
wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
#!/bin/bash
|
||||
source test_tipc/common_func.sh
|
||||
source test_tipc/test_train_inference_python.sh
|
||||
|
||||
FILENAME=$1
|
||||
# MODE be one of ['whole_infer']
|
||||
MODE=$2
|
||||
|
||||
dataline=$(awk 'NR==1, NR==17{print}' $FILENAME)
|
||||
|
||||
# parser params
|
||||
IFS=$'\n'
|
||||
lines=(${dataline})
|
||||
|
||||
model_name=$(func_parser_value "${lines[1]}")
|
||||
python=$(func_parser_value "${lines[2]}")
|
||||
|
||||
infer_model_dir_list=$(func_parser_value "${lines[3]}")
|
||||
infer_export_list=$(func_parser_value "${lines[4]}")
|
||||
infer_is_quant=$(func_parser_value "${lines[5]}")
|
||||
# parser inference
|
||||
inference_py=$(func_parser_value "${lines[6]}")
|
||||
use_gpu_key=$(func_parser_key "${lines[7]}")
|
||||
use_gpu_list=$(func_parser_value "${lines[7]}")
|
||||
use_mkldnn_key=$(func_parser_key "${lines[8]}")
|
||||
use_mkldnn_list=$(func_parser_value "${lines[8]}")
|
||||
cpu_threads_key=$(func_parser_key "${lines[9]}")
|
||||
cpu_threads_list=$(func_parser_value "${lines[9]}")
|
||||
batch_size_key=$(func_parser_key "${lines[10]}")
|
||||
batch_size_list=$(func_parser_value "${lines[10]}")
|
||||
use_trt_key=$(func_parser_key "${lines[11]}")
|
||||
use_trt_list=$(func_parser_value "${lines[11]}")
|
||||
precision_key=$(func_parser_key "${lines[12]}")
|
||||
precision_list=$(func_parser_value "${lines[12]}")
|
||||
infer_model_key=$(func_parser_key "${lines[13]}")
|
||||
image_dir_key=$(func_parser_key "${lines[14]}")
|
||||
infer_img_dir=$(func_parser_value "${lines[14]}")
|
||||
save_log_key=$(func_parser_key "${lines[15]}")
|
||||
benchmark_key=$(func_parser_key "${lines[16]}")
|
||||
benchmark_value=$(func_parser_value "${lines[16]}")
|
||||
infer_key1=$(func_parser_key "${lines[17]}")
|
||||
infer_value1=$(func_parser_value "${lines[17]}")
|
||||
|
||||
|
||||
LOG_PATH="./test_tipc/output"
|
||||
mkdir -p ${LOG_PATH}
|
||||
status_log="${LOG_PATH}/results_python.log"
|
||||
|
||||
|
||||
if [ ${MODE} = "whole_infer" ]; then
|
||||
GPUID=$3
|
||||
if [ ${#GPUID} -le 0 ];then
|
||||
env=" "
|
||||
else
|
||||
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
|
||||
fi
|
||||
# set CUDA_VISIBLE_DEVICES
|
||||
eval $env
|
||||
export Count=0
|
||||
IFS="|"
|
||||
infer_run_exports=(${infer_export_list})
|
||||
infer_quant_flag=(${infer_is_quant})
|
||||
for infer_model in ${infer_model_dir_list[*]}; do
|
||||
# run export
|
||||
if [ ${infer_run_exports[Count]} != "null" ];then
|
||||
save_infer_dir=$(dirname $infer_model)
|
||||
set_export_weight=$(func_set_params "${export_weight}" "${infer_model}")
|
||||
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}")
|
||||
export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}"
|
||||
echo ${infer_run_exports[Count]}
|
||||
echo $export_cmd
|
||||
eval $export_cmd
|
||||
status_export=$?
|
||||
status_check $status_export "${export_cmd}" "${status_log}"
|
||||
else
|
||||
save_infer_dir=${infer_model}
|
||||
fi
|
||||
#run inference
|
||||
is_quant=${infer_quant_flag[Count]}
|
||||
if [ ${MODE} = "klquant_infer" ]; then
|
||||
is_quant="True"
|
||||
fi
|
||||
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
|
||||
Count=$(($Count + 1))
|
||||
done
|
||||
fi
|
||||
|
|
@ -0,0 +1,169 @@
|
|||
#!/bin/bash
|
||||
source test_tipc/common_func.sh
|
||||
#source test_tipc/test_train_inference_python.sh
|
||||
|
||||
FILENAME=$1
|
||||
# MODE be one of ['whole_infer']
|
||||
MODE=$2
|
||||
|
||||
dataline=$(awk 'NR==1, NR==20{print}' $FILENAME)
|
||||
|
||||
# parser params
|
||||
IFS=$'\n'
|
||||
lines=(${dataline})
|
||||
|
||||
model_name=$(func_parser_value "${lines[1]}")
|
||||
python=$(func_parser_value "${lines[2]}")
|
||||
|
||||
infer_model_dir_list=$(func_parser_value "${lines[3]}")
|
||||
infer_export_list=$(func_parser_value "${lines[4]}")
|
||||
infer_is_quant=$(func_parser_value "${lines[5]}")
|
||||
# parser inference
|
||||
inference_py=$(func_parser_value "${lines[6]}")
|
||||
use_gpu_key=$(func_parser_key "${lines[7]}")
|
||||
use_gpu_list=$(func_parser_value "${lines[7]}")
|
||||
use_mkldnn_key=$(func_parser_key "${lines[8]}")
|
||||
use_mkldnn_list=$(func_parser_value "${lines[8]}")
|
||||
cpu_threads_key=$(func_parser_key "${lines[9]}")
|
||||
cpu_threads_list=$(func_parser_value "${lines[9]}")
|
||||
batch_size_key=$(func_parser_key "${lines[10]}")
|
||||
batch_size_list=$(func_parser_value "${lines[10]}")
|
||||
use_trt_key=$(func_parser_key "${lines[11]}")
|
||||
use_trt_list=$(func_parser_value "${lines[11]}")
|
||||
precision_key=$(func_parser_key "${lines[12]}")
|
||||
precision_list=$(func_parser_value "${lines[12]}")
|
||||
infer_model_key=$(func_parser_key "${lines[13]}")
|
||||
image_dir_key=$(func_parser_key "${lines[14]}")
|
||||
infer_img_dir=$(func_parser_value "${lines[14]}")
|
||||
rec_model_key=$(func_parser_key "${lines[15]}")
|
||||
rec_model_value=$(func_parser_value "${lines[15]}")
|
||||
benchmark_key=$(func_parser_key "${lines[16]}")
|
||||
benchmark_value=$(func_parser_value "${lines[16]}")
|
||||
infer_key1=$(func_parser_key "${lines[17]}")
|
||||
infer_value1=$(func_parser_value "${lines[17]}")
|
||||
|
||||
|
||||
|
||||
LOG_PATH="./test_tipc/output"
|
||||
mkdir -p ${LOG_PATH}
|
||||
status_log="${LOG_PATH}/results_python.log"
|
||||
|
||||
|
||||
function func_inference(){
|
||||
IFS='|'
|
||||
_python=$1
|
||||
_script=$2
|
||||
_model_dir=$3
|
||||
_log_path=$4
|
||||
_img_dir=$5
|
||||
_flag_quant=$6
|
||||
# inference
|
||||
for use_gpu in ${use_gpu_list[*]}; do
|
||||
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
|
||||
for use_mkldnn in ${use_mkldnn_list[*]}; do
|
||||
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
|
||||
continue
|
||||
fi
|
||||
for threads in ${cpu_threads_list[*]}; do
|
||||
for batch_size in ${batch_size_list[*]}; do
|
||||
for precision in ${precision_list[*]}; do
|
||||
if [ ${use_mkldnn} = "False" ] && [ ${precision} = "fp16" ]; then
|
||||
continue
|
||||
fi # skip when enable fp16 but disable mkldnn
|
||||
if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then
|
||||
continue
|
||||
fi # skip when quant model inference but precision is not int8
|
||||
set_precision=$(func_set_params "${precision_key}" "${precision}")
|
||||
|
||||
_save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
|
||||
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
|
||||
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
|
||||
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
|
||||
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
|
||||
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
|
||||
set_infer_params0=$(func_set_params "${rec_model_key}" "${rec_model_value}")
|
||||
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
|
||||
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 "
|
||||
eval $command
|
||||
last_status=${PIPESTATUS[0]}
|
||||
eval "cat ${_save_log_path}"
|
||||
status_check $last_status "${command}" "${status_log}"
|
||||
done
|
||||
done
|
||||
done
|
||||
done
|
||||
elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
|
||||
for use_trt in ${use_trt_list[*]}; do
|
||||
for precision in ${precision_list[*]}; do
|
||||
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
|
||||
continue
|
||||
fi
|
||||
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
|
||||
continue
|
||||
fi
|
||||
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
|
||||
continue
|
||||
fi
|
||||
for batch_size in ${batch_size_list[*]}; do
|
||||
_save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
|
||||
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
|
||||
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
|
||||
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
|
||||
set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}")
|
||||
set_precision=$(func_set_params "${precision_key}" "${precision}")
|
||||
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
|
||||
set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}")
|
||||
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
|
||||
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 "
|
||||
eval $command
|
||||
last_status=${PIPESTATUS[0]}
|
||||
eval "cat ${_save_log_path}"
|
||||
status_check $last_status "${command}" "${status_log}"
|
||||
|
||||
done
|
||||
done
|
||||
done
|
||||
else
|
||||
echo "Does not support hardware other than CPU and GPU Currently!"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
if [ ${MODE} = "whole_infer" ]; then
|
||||
GPUID=$3
|
||||
if [ ${#GPUID} -le 0 ];then
|
||||
env=" "
|
||||
else
|
||||
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
|
||||
fi
|
||||
# set CUDA_VISIBLE_DEVICES
|
||||
eval $env
|
||||
export Count=0
|
||||
IFS="|"
|
||||
infer_run_exports=(${infer_export_list})
|
||||
infer_quant_flag=(${infer_is_quant})
|
||||
for infer_model in ${infer_model_dir_list[*]}; do
|
||||
# run export
|
||||
if [ ${infer_run_exports[Count]} != "null" ];then
|
||||
save_infer_dir=$(dirname $infer_model)
|
||||
set_export_weight=$(func_set_params "${export_weight}" "${infer_model}")
|
||||
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}")
|
||||
export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}"
|
||||
echo ${infer_run_exports[Count]}
|
||||
eval $export_cmd
|
||||
status_export=$?
|
||||
status_check $status_export "${export_cmd}" "${status_log}"
|
||||
else
|
||||
save_infer_dir=${infer_model}
|
||||
fi
|
||||
#run inference
|
||||
is_quant=${infer_quant_flag[Count]}
|
||||
if [ ${MODE} = "klquant_infer" ]; then
|
||||
is_quant="True"
|
||||
fi
|
||||
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
|
||||
Count=$(($Count + 1))
|
||||
done
|
||||
fi
|
||||
|
||||
|
|
@ -118,6 +118,7 @@ if [ ${MODE} = "klquant_whole_infer" ]; then
|
|||
image_dir_key=$(func_parser_key "${lines[16]}")
|
||||
infer_img_dir=$(func_parser_value "${lines[16]}")
|
||||
save_log_key=$(func_parser_key "${lines[17]}")
|
||||
save_log_value=$(func_parser_value "${lines[17]}")
|
||||
benchmark_key=$(func_parser_key "${lines[18]}")
|
||||
benchmark_value=$(func_parser_value "${lines[18]}")
|
||||
infer_key1=$(func_parser_key "${lines[19]}")
|
||||
|
@ -161,8 +162,9 @@ function func_inference(){
|
|||
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
|
||||
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
|
||||
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
|
||||
set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}")
|
||||
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
|
||||
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 "
|
||||
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 "
|
||||
eval $command
|
||||
last_status=${PIPESTATUS[0]}
|
||||
eval "cat ${_save_log_path}"
|
||||
|
@ -191,8 +193,9 @@ function func_inference(){
|
|||
set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}")
|
||||
set_precision=$(func_set_params "${precision_key}" "${precision}")
|
||||
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
|
||||
set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}")
|
||||
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
|
||||
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
|
||||
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 "
|
||||
eval $command
|
||||
last_status=${PIPESTATUS[0]}
|
||||
eval "cat ${_save_log_path}"
|
||||
|
|
|
@ -211,7 +211,7 @@ def create_predictor(args, mode, logger):
|
|||
"nearest_interp_v2_0.tmp_0": [1, 256, 2, 2]
|
||||
}
|
||||
max_input_shape = {
|
||||
"x": [1, 3, 1280, 1280],
|
||||
"x": [1, 3, 1536, 1536],
|
||||
"conv2d_92.tmp_0": [1, 120, 400, 400],
|
||||
"conv2d_91.tmp_0": [1, 24, 200, 200],
|
||||
"conv2d_59.tmp_0": [1, 96, 400, 400],
|
||||
|
@ -261,7 +261,7 @@ def create_predictor(args, mode, logger):
|
|||
opt_input_shape.update(opt_pact_shape)
|
||||
elif mode == "rec":
|
||||
min_input_shape = {"x": [1, 3, 32, 10]}
|
||||
max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1024]}
|
||||
max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1536]}
|
||||
opt_input_shape = {"x": [args.rec_batch_num, 3, 32, 320]}
|
||||
elif mode == "cls":
|
||||
min_input_shape = {"x": [1, 3, 48, 10]}
|
||||
|
|
Loading…
Reference in New Issue