add whole_chain test

pull/1097/head
dongshuilong 2021-07-20 04:03:27 +00:00
parent 4af9f51088
commit 7d42f2dfe6
5 changed files with 486 additions and 0 deletions

View File

@ -41,6 +41,26 @@ class ClsPredictor(Predictor):
if "PostProcess" in config:
self.postprocess = build_postprocess(config["PostProcess"])
# for whole_chain project to test each repo of paddle
self.benchmark = config.get(["benchmark"], False)
if self.benchmark:
import auto_log
import os
pid = os.getpid()
self.auto_log = auto_log.AutoLogger(
model_name='cls',
model_precision='fp16'
if config["Global"]["use_fp16"] else 'fp32',
batch_size=1,
data_shape=[3, 224, 224],
save_path="../output/auto_log.lpg",
inference_config=None,
pids=pid,
process_name=None,
gpu_ids=None,
time_keys=['preprocess_time', 'inference_time'],
warmup=10)
def predict(self, images):
input_names = self.paddle_predictor.get_input_names()
input_tensor = self.paddle_predictor.get_input_handle(input_names[0])
@ -49,16 +69,22 @@ class ClsPredictor(Predictor):
output_tensor = self.paddle_predictor.get_output_handle(output_names[
0])
if self.benchmark:
self.auto_log.times.start()
if not isinstance(images, (list, )):
images = [images]
for idx in range(len(images)):
for ops in self.preprocess_ops:
images[idx] = ops(images[idx])
image = np.array(images)
if self.benchmark:
self.auto_log.times.stamp()
input_tensor.copy_from_cpu(image)
self.paddle_predictor.run()
batch_output = output_tensor.copy_to_cpu()
if self.benchmark:
self.auto_log.times.stamp()
return batch_output
@ -71,6 +97,9 @@ def main(config):
img = cv2.imread(image_file)[:, :, ::-1]
output = cls_predictor.predict(img)
output = cls_predictor.postprocess(output, [image_file])
if cls_predictor.benchmark:
cls_predictor.auto_log.times.end(stamp=True)
cls_predictor.auto_log.report()
print(output)
return

132
test/benchmark.yaml 100644
View File

@ -0,0 +1,132 @@
# global configs
Global:
checkpoints: null
pretrained_model: null
output_dir: ./output/
device: gpu
save_interval: 1
eval_during_train: True
eval_interval: 1
epochs: 10
print_batch_step: 10
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: ./inference
# training model under @to_static
to_static: False
# model architecture
Arch:
name: ResNet50
class_num: 1000
# loss function config for traing/eval process
Loss:
Train:
- CELoss:
weight: 1.0
Eval:
- CELoss:
weight: 1.0
Optimizer:
name: Momentum
momentum: 0.9
lr:
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.0001
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: ./dataset/chain_dataset/
cls_label_path: ./dataset/chain_dataset/train.txt
transform_ops:
- DecodeImage:
to_rgb: True
channel_first: False
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 4
use_shared_memory: True
Eval:
dataset:
name: ImageNetDataset
image_root: ./dataset/chain_dataset/
cls_label_path: ./dataset/chain_dataset/val.txt
transform_ops:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
- TopkAcc:
topk: [1, 5]

57
test/parames.txt 100644
View File

@ -0,0 +1,57 @@
===========================train_params===========================
model_name:ResNet50_vd|ResNeXt101_vd_64x4d|HRNet_W18_C|MobileNetV3_large_x1_0|DarkNet53|MobileNetV1|MobileNetV2|ShuffleNetV2_x1_0
model_name_pact:ResNet50_vd|MobileNetV3_large_x1_0
model_name_fpgm:ResNet50_vd|MobileNetV3_large_x1_0
model_name_kl:ResNet50_vd|MobileNetV3_large_x1_0
python:python3.7
gpu_list:0|0,1|-1
Global.epoch_num:10
Global.save_model_dir:./output/
Global.pretrained_model:null
Global.save_inference_dir:null
===========================scripts===========================
train:tools/train.py -c test_ci/benchmark.yaml
eval:tools/eval.py -c test_ci/benchmark.yaml
norm_export:tools/export_model.py -c test_ci/benchmark.yaml
inference:python/predict_cls.py -c configs/inference_cls.yaml
===========================infer_params===========================
Global.save_log_path:./test/output/
Global.use_gpu:True|False
Global.enable_mkldnn:True|False
Global.cpu_num_threads:1|6
Global.batch_size:1
Global.use_tensorrt:True|False
Global.use_fp16:True|False
Global.inference_model_dir:./inference
Global.infer_imgs:./dataset/chain_dataset/val
===========================pretrained_model===========================
ResNet50_vd:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams
ResNeXt101_vd_64x4d:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_64x4d_pretrained.pdparams
HRNet_W18_C:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W18_C_pretrained.pdparams
MobileNetV3_large_x1_0:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams
DarkNet53:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DarkNet53_pretrained.pdparams
MobileNetV1:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_pretrained.pdparams
MobileNetV2:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_pretrained.pdparams
ShuffleNetV2_x1_0:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_0_pretrained.pdparams

59
test/prepare.sh 100644
View File

@ -0,0 +1,59 @@
#!/bin/bash
FILENAME=$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer']
MODE=$2
dataline=$(cat ${FILENAME})
# parser params
IFS=$'\n'
lines=(${dataline})
function func_parser_value(){
strs=$1
ifs=":"
array=(${strs})
tmp=${array[1]}
echo ${tmp}
}
ResNet50_vd=$(func_parser_value "${lines[49]}")
ResNeXt101_vd_64x4d=$(func_parser_value "${lines[50]}")
HRNet_W18_C=$(func_parser_value "${lines[51]}")
MobileNetV3_large_x1_0=$(func_parser_value "${lines[52]}")
DarkNet53=$(func_parser_value "${lines[53]}")
MobileNetV1=$(func_parser_value "${lines[54]}")
MobileNetV2=$(func_parser_value "${lines[55]}")
ShuffleNetV2_x1_0=$(func_parser_value "${lines[56]}")
model_name_list=$(func_parser_value "${lines[1]}")
if [ ${MODE} = "lite_train_infer" ] || [ ${MODE} = "whole_infer" ];then
# pretrain lite train data
cd dataset
wget -nc http://10.21.226.181:8011/dataset/quanliantiao/little_train.tar
tar xf little_train.tar
ln -s little_train chain_dataset
cd ../
elif [ ${MODE} = "infer" ];then
# download data
cd dataset
wget -nc http://10.21.226.181:8011/dataset/quanliantiao/infer.tar
tar xf little.tar
ln -s infer chain_dataset
cd ../
# download pretrained model
mkdir -p pretrained_models
cd pretrained_models
eval "wget -nc $ResNet50_vd"
eval "wget -nc $ResNeXt101_vd_64x4d"
eval "wget -nc $HRNet_W18_C"
eval "wget -nc $MobileNetV3_large_x1_0"
eval "wget -nc $DarkNet53"
eval "wget -nc $MobileNetV1"
eval "wget -nc $MobileNetV2"
eval "wget -nc $ShuffleNetV2_x1_0"
elif [ ${MODE} = "whole_train_infer" ];then
cd dataset
wget -nc http://10.21.226.181:8011/dataset/quanliantiao/CIFAR100.tar
tar xf CIFAR100.tar
ln -s CIFAR100 chain_dataset
fi

209
test/test.sh 100644
View File

@ -0,0 +1,209 @@
#!/bin/bash
FILENAME=$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer']
MODE=$2
dataline=$(cat ${FILENAME})
# parser params
IFS=$'\n'
lines=(${dataline})
function func_parser_key(){
strs=$1
IFS=":"
array=(${strs})
tmp=${array[0]}
echo ${tmp}
}
function func_parser_value(){
strs=$1
IFS=":"
array=(${strs})
tmp=${array[1]}
echo ${tmp}
}
function status_check(){
last_status=$1 # the exit code
run_command=$2
run_log=$3
if [ $last_status -eq 0 ]; then
echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log}
else
echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log}
fi
}
IFS=$'\n'
# The training params
model_name_list=$(func_parser_value "${lines[1]}")
model_name_pact_list=$(func_parser_value "${lines[2]}")
model_name_fpgm_list=$(func_parser_value "${lines[3]}")
model_name_kl_list=$(func_parser_value "${lines[4]}")
python=$(func_parser_value "${lines[5]}")
gpu_list=$(func_parser_value "${lines[6]}")
epoch_key=$(func_parser_key "${lines[7]}")
epoch_value=$(func_parser_value "${lines[7]}")
save_model_key=$(func_parser_key "${lines[8]}")
save_model_value=$(func_parser_value "${lines[8]}")
pretrain_model_key=$(func_parser_key "${lines[9]}")
save_infer_key=$(func_parser_key "${lines[10]}")
#scripts
train_py=$(func_parser_value "${lines[20]}")
eval_py=$(func_parser_value "${lines[21]}")
norm_export=$(func_parser_value "${lines[22]}")
inference_py=$(func_parser_value "${lines[23]}")
#The inference params
use_gpu_key=$(func_parser_key "${lines[33]}")
use_gpu_list=$(func_parser_value "${lines[33]}")
use_mkldnn_key=$(func_parser_key "${lines[34]}")
use_mkldnn_list=$(func_parser_value "${lines[34]}")
cpu_threads_key=$(func_parser_key "${lines[35]}")
cpu_threads_list=$(func_parser_value "${lines[35]}")
batch_size_key=$(func_parser_key "${lines[36]}")
batch_size_list=$(func_parser_value "${lines[36]}")
use_trt_key=$(func_parser_key "${lines[37]}")
use_trt_list=$(func_parser_value "${lines[37]}")
precision_key=$(func_parser_key "${lines[38]}")
precision_list=$(func_parser_value "${lines[38]}")
infer_model_key=$(func_parser_key "${lines[39]}")
infer_model=$(func_parser_value "${lines[39]}")
image_dir_key=$(func_parser_key "${lines[40]}")
infer_img_dir=$(func_parser_value "${lines[40]}")
save_log_key=$(func_parser_key "${lines[32]}")
LOG_PATH="./test/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"
function func_inference(){
IFS='|'
_python=$1
_script=$2
_model_dir=$3
_log_path=$4
_img_dir=$5
_model_name=$6
# inference
for use_gpu in ${use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ]; then
for use_mkldnn in ${use_mkldnn_list[*]}; do
for threads in ${cpu_threads_list[*]}; do
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/${_model_name}_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log"
command="${_python} ${_script} -o ${use_gpu_key}=${use_gpu} -o ${use_mkldnn_key}=${use_mkldnn} -o ${cpu_threads_key}=${threads} -o ${infer_model_key}=${_model_dir} -o ${batch_size_key}=${batch_size} -o ${image_dir_key}=${_img_dir} -o ${save_log_key}=${_save_log_path} -o benchmark=True"
eval $command
status_check $? "${command}" "${status_log}"
done
done
done
else
for use_trt in ${use_trt_list[*]}; do
for precision in ${precision_list[*]}; do
if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then
continue
fi
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/${_model_name}_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
command="${_python} ${_script} -o ${use_gpu_key}=${use_gpu} -o ${use_trt_key}=${use_trt} -o ${precision_key}=${precision} -o ${infer_model_key}=${_model_dir} -o ${batch_size_key}=${batch_size} -o ${image_dir_key}=${_img_dir} -o ${save_log_key}=${_save_log_path} -o benchmark=True"
eval $command
status_check $? "${command}" "${status_log}"
done
done
done
fi
done
}
if [ ${MODE} != "infer" ]; then
IFS="|"
for gpu in ${gpu_list[*]}; do
use_gpu=True
if [ ${gpu} = "-1" ];then
use_gpu=False
env=""
elif [ ${#gpu} -le 1 ];then
env="export CUDA_VISIBLE_DEVICES=${gpu}"
eval ${env}
elif [ ${#gpu} -le 15 ];then
IFS=","
array=(${gpu})
env="export CUDA_VISIBLE_DEVICES=${array[0]}"
IFS="|"
else
IFS=";"
array=(${gpu})
ips=${array[0]}
gpu=${array[1]}
IFS="|"
env=" "
fi
for model_name in ${model_name_list[*]}; do
# not set epoch when whole_train_infer
if [ ${MODE} != "whole_train_infer" ]; then
set_epoch="-o ${epoch_key}=${epoch_num}"
else
set_epoch=" "
fi
save_log="${LOG_PATH}/${model_name}_gpus_${gpu}"
# train with cpu
if [ ${gpu} = "-1" ];then
cmd="${python} ${train_py} -o Arch.name=${model_name} -o Global.device=cpu -o ${save_model_key}=${save_log} ${set_epoch}"
# train with single gpu
elif [ ${#gpu} -le 2 ];then # train with single gpu
cmd="${python} ${train_py} -o Arch.name=${model_name} -o ${save_model_key}=${save_log} ${set_epoch}"
elif [ ${#gpu} -le 15 ];then # train with multi-gpu
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${train_py} -o Arch.name=${model_name} -o ${save_model_key}=${save_log} ${set_epoch}"
else # train with multi-machine
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${train_py} -o Arch.name=${model_name} -c ${save_model_key}=${save_log} ${set_epoch}"
fi
# run train
eval $cmd
status_check $? "${cmd}" "${status_log}"
# run eval
eval_cmd="${python} ${eval_py} -o Arch.name=${model_name} -o ${pretrain_model_key}=${save_log}/${model_name}/best_model"
eval $eval_cmd
status_check $? "${eval_cmd}" "${status_log}"
# run export model
save_infer_path="${save_log}"
export_cmd="${python} ${norm_export} -o Arch.name=${model_name} -o ${pretrain_model_key}=${save_log}/${model_name}/best_model -o ${save_infer_key}=${save_infer_path}"
eval $export_cmd
status_check $? "${export_cmd}" "${status_log}"
#run inference
eval $env
save_infer_path="${save_log}"
cd deploy
func_inference "${python}" "${inference_py}" "../${save_log}" "../${LOG_PATH}" "../${infer_img_dir}" model_name
eval "unset CUDA_VISIBLE_DEVICES"
cd ..
done
done
else
GPUID=$3
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
echo $env
# export inference model
mkdir -p inference_models
for model_name in ${model_name_list[*]}; do
export_cmd="${python} ${norm_export} -o Arch.name=${model_name} -o ${pretrain_model_key}=pretrained_models/${model_name}_pretrained -o ${save_infer_key}=./inference_models/${model_name}"
eval $export_cmd
done
#run inference
cd deploy
for model_name in ${model_name_list[*]}; do
func_inference "${python}" "${inference_py}" "../inference_models/${model_name}" "../${LOG_PATH}" "../${infer_img_dir}"
done
cd ..
fi