From ed523bebab8030fe68f570a75186a276f9899eac Mon Sep 17 00:00:00 2001
From: HydrogenSulfate <490868991@qq.com>
Date: Tue, 14 Jun 2022 10:51:17 +0800
Subject: [PATCH] split test_serving_infer.sh into test_serving_infer_cpp.sh
 and test_serving_infer_python.sh

---
 test_tipc/docs/test_serving_infer_cpp.md    |   6 +-
 test_tipc/docs/test_serving_infer_python.md |   6 +-
 test_tipc/test_serving_infer.sh             | 407 --------------------
 test_tipc/test_serving_infer_cpp.sh         | 262 +++++++++++++
 test_tipc/test_serving_infer_python.sh      | 309 +++++++++++++++
 5 files changed, 577 insertions(+), 413 deletions(-)
 delete mode 100644 test_tipc/test_serving_infer.sh
 create mode 100644 test_tipc/test_serving_infer_cpp.sh
 create mode 100644 test_tipc/test_serving_infer_python.sh

diff --git a/test_tipc/docs/test_serving_infer_cpp.md b/test_tipc/docs/test_serving_infer_cpp.md
index 188d36e37..acbc51f03 100644
--- a/test_tipc/docs/test_serving_infer_cpp.md
+++ b/test_tipc/docs/test_serving_infer_cpp.md
@@ -1,6 +1,6 @@
 # Linux GPU/CPU PYTHON 服务化部署测试
 
-Linux GPU/CPU  PYTHON 服务化部署测试的主程序为`test_serving_infer.sh`,可以测试基于Python的模型服务化部署功能。
+Linux GPU/CPU  PYTHON 服务化部署测试的主程序为`test_serving_infer_cpp.sh`,可以测试基于Python的模型服务化部署功能。
 
 
 ## 1. 测试结论汇总
@@ -60,14 +60,14 @@ Linux GPU/CPU  PYTHON 服务化部署测试的主程序为`test_serving_infer.sh
 测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。
 
 ```bash
-bash test_tipc/test_serving_infer.sh ${your_params_file}
+bash test_tipc/test_serving_infer_cpp.sh ${your_params_file}
 ```
 
 以`PPLCNet_x1_0`的`Linux GPU/CPU C++ 服务化部署测试`为例,命令如下所示。
 
 
 ```bash
-bash test_tipc/test_serving_infer.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
+bash test_tipc/test_serving_infer_cpp.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
 ```
 
 输出结果如下,表示命令运行成功。
diff --git a/test_tipc/docs/test_serving_infer_python.md b/test_tipc/docs/test_serving_infer_python.md
index ddc4ebfb1..4a3e3c2a7 100644
--- a/test_tipc/docs/test_serving_infer_python.md
+++ b/test_tipc/docs/test_serving_infer_python.md
@@ -1,6 +1,6 @@
 # Linux GPU/CPU PYTHON 服务化部署测试
 
-Linux GPU/CPU  PYTHON 服务化部署测试的主程序为`test_serving_infer.sh`,可以测试基于Python的模型服务化部署功能。
+Linux GPU/CPU  PYTHON 服务化部署测试的主程序为`test_serving_infer_python.sh`,可以测试基于Python的模型服务化部署功能。
 
 
 ## 1. 测试结论汇总
@@ -60,14 +60,14 @@ Linux GPU/CPU  PYTHON 服务化部署测试的主程序为`test_serving_infer.sh
 测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。
 
 ```bash
-bash test_tipc/test_serving_infer.sh ${your_params_file}
+bash test_tipc/test_serving_infer_python.sh ${your_params_file}
 ```
 
 以`ResNet50`的`Linux GPU/CPU PYTHON 服务化部署测试`为例,命令如下所示。
 
 
 ```bash
-bash test_tipc/test_serving_infer.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+bash test_tipc/test_serving_infer_python.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
 ```
 
 输出结果如下,表示命令运行成功。
diff --git a/test_tipc/test_serving_infer.sh b/test_tipc/test_serving_infer.sh
deleted file mode 100644
index 8e9e44a7a..000000000
--- a/test_tipc/test_serving_infer.sh
+++ /dev/null
@@ -1,407 +0,0 @@
-#!/bin/bash
-source test_tipc/common_func.sh
-
-FILENAME=$1
-dataline=$(awk 'NR==1, NR==19{print}'  $FILENAME)
-
-# parser params
-IFS=$'\n'
-lines=(${dataline})
-
-function func_get_url_file_name(){
-    strs=$1
-    IFS="/"
-    array=(${strs})
-    tmp=${array[${#array[@]}-1]}
-    echo ${tmp}
-}
-
-# parser serving
-model_name=$(func_parser_value "${lines[1]}")
-python=$(func_parser_value "${lines[2]}")
-trans_model_py=$(func_parser_value "${lines[4]}")
-infer_model_dir_key=$(func_parser_key "${lines[5]}")
-infer_model_dir_value=$(func_parser_value "${lines[5]}")
-model_filename_key=$(func_parser_key "${lines[6]}")
-model_filename_value=$(func_parser_value "${lines[6]}")
-params_filename_key=$(func_parser_key "${lines[7]}")
-params_filename_value=$(func_parser_value "${lines[7]}")
-serving_server_key=$(func_parser_key "${lines[8]}")
-serving_server_value=$(func_parser_value "${lines[8]}")
-serving_client_key=$(func_parser_key "${lines[9]}")
-serving_client_value=$(func_parser_value "${lines[9]}")
-serving_dir_value=$(func_parser_value "${lines[10]}")
-web_service_py=$(func_parser_value "${lines[11]}")
-web_use_gpu_key=$(func_parser_key "${lines[12]}")
-web_use_gpu_list=$(func_parser_value "${lines[12]}")
-pipeline_py=$(func_parser_value "${lines[13]}")
-
-
-function func_serving_cls(){
-    LOG_PATH="test_tipc/output/${model_name}"
-    mkdir -p ${LOG_PATH}
-    LOG_PATH="../../${LOG_PATH}"
-    status_log="${LOG_PATH}/results_serving.log"
-    IFS='|'
-
-    # pdserving
-    set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
-    set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
-    set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
-    set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
-    set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
-
-    for python_ in ${python[*]}; do
-        if [[ ${python_} =~ "python" ]]; then
-            trans_model_cmd="${python_} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
-            eval ${trans_model_cmd}
-            break
-        fi
-    done
-
-    # modify the alias_name of fetch_var to "outputs"
-    server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_server_value}/serving_server_conf.prototxt"
-    eval ${server_fetch_var_line_cmd}
-
-    client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_client_value}/serving_client_conf.prototxt"
-    eval ${client_fetch_var_line_cmd}
-
-    prototxt_dataline=$(awk 'NR==1, NR==3{print}'  ${serving_server_value}/serving_server_conf.prototxt)
-    IFS=$'\n'
-    prototxt_lines=(${prototxt_dataline})
-    feed_var_name=$(func_parser_value "${prototxt_lines[2]}")
-    IFS='|'
-
-    cd ${serving_dir_value}
-    unset https_proxy
-    unset http_proxy
-
-    if [[ ${FILENAME} =~ "cpp" ]]; then
-        for item in ${python[*]}; do
-            if [[ ${item} =~ "python" ]]; then
-                python_=${item}
-                break
-            fi
-        done
-        serving_client_dir_name=$(func_get_url_file_name "$serving_client_value")
-        set_client_feed_type_cmd="sed -i '/feed_type/,/: .*/s/feed_type: .*/feed_type: 20/' ${serving_client_dir_name}/serving_client_conf.prototxt"
-        eval ${set_client_feed_type_cmd}
-        set_client_shape_cmd="sed -i '/shape: 3/,/shape: 3/s/shape: 3/shape: 1/' ${serving_client_dir_name}/serving_client_conf.prototxt"
-        eval ${set_client_shape_cmd}
-        set_client_shape224_cmd="sed -i '/shape: 224/,/shape: 224/s/shape: 224//' ${serving_client_dir_name}/serving_client_conf.prototxt"
-        eval ${set_client_shape224_cmd}
-        set_client_shape224_cmd="sed -i '/shape: 224/,/shape: 224/s/shape: 224//' ${serving_client_dir_name}/serving_client_conf.prototxt"
-        eval ${set_client_shape224_cmd}
-
-        set_pipeline_load_config_cmd="sed -i '/load_client_config/,/.prototxt/s/.\/.*\/serving_client_conf.prototxt/.\/${serving_client_dir_name}\/serving_client_conf.prototxt/' ${pipeline_py}"
-        eval ${set_pipeline_load_config_cmd}
-
-        set_pipeline_feed_var_cmd="sed -i '/feed=/,/: image}/s/feed={.*: image}/feed={${feed_var_name}: image}/' ${pipeline_py}"
-        eval ${set_pipeline_feed_var_cmd}
-
-        serving_server_dir_name=$(func_get_url_file_name "$serving_server_value")
-
-        for use_gpu in ${web_use_gpu_list[*]}; do
-            if [[ ${use_gpu} = "null" ]]; then
-                web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 &"
-                eval ${web_service_cpp_cmd}
-                sleep 5s
-                _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_batchsize_1.log"
-                pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 "
-                eval ${pipeline_cmd}
-                last_status=${PIPESTATUS[0]}
-                eval "cat ${_save_log_path}"
-                status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
-                eval "${python_} -m paddle_serving_server.serve stop"
-                sleep 5s
-            else
-                web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 --gpu_id=${use_gpu} &"
-                eval ${web_service_cpp_cmd}
-                sleep 8s
-
-                _save_log_path="${LOG_PATH}/server_infer_cpp_gpu_pipeline_batchsize_1.log"
-                pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 "
-                eval ${pipeline_cmd}
-                last_status=${PIPESTATUS[0]}
-                eval "cat ${_save_log_path}"
-                status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
-                sleep 5s
-                eval "${python_} -m paddle_serving_server.serve stop"
-            fi
-        done
-    else
-        # python serving
-        # modify the input_name in "classification_web_service.py" to be consistent with feed_var.name in prototxt
-        set_web_service_feed_var_cmd="sed -i '/preprocess/,/input_imgs}/s/{.*: input_imgs}/{${feed_var_name}: input_imgs}/' ${web_service_py}"
-        eval ${set_web_service_feed_var_cmd}
-
-        model_config=21
-        serving_server_dir_name=$(func_get_url_file_name "$serving_server_value")
-        set_model_config_cmd="sed -i '${model_config}s/model_config: .*/model_config: ${serving_server_dir_name}/' config.yml"
-        eval ${set_model_config_cmd}
-
-        for use_gpu in ${web_use_gpu_list[*]}; do
-            if [[ ${use_gpu} = "null" ]]; then
-                device_type_line=24
-                set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 0/' config.yml"
-                eval ${set_device_type_cmd}
-
-                devices_line=27
-                set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml"
-                eval ${set_devices_cmd}
-
-                web_service_cmd="${python_} ${web_service_py} &"
-                eval ${web_service_cmd}
-                sleep 5s
-                for pipeline in ${pipeline_py[*]}; do
-                    _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
-                    pipeline_cmd="${python_} ${pipeline} > ${_save_log_path} 2>&1 "
-                    eval ${pipeline_cmd}
-                    last_status=${PIPESTATUS[0]}
-                    eval "cat ${_save_log_path}"
-                    status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
-                    sleep 5s
-                done
-                eval "${python_} -m paddle_serving_server.serve stop"
-            elif [ ${use_gpu} -eq 0 ]; then
-                if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
-                    continue
-                fi
-                if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
-                    continue
-                fi
-                if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then
-                    continue
-                fi
-
-                device_type_line=24
-                set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 1/' config.yml"
-                eval ${set_device_type_cmd}
-
-                devices_line=27
-                set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml"
-                eval ${set_devices_cmd}
-
-                web_service_cmd="${python_} ${web_service_py} & "
-                eval ${web_service_cmd}
-                sleep 5s
-                for pipeline in ${pipeline_py[*]}; do
-                    _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
-                    pipeline_cmd="${python_} ${pipeline} > ${_save_log_path} 2>&1"
-                    eval ${pipeline_cmd}
-                    last_status=${PIPESTATUS[0]}
-                    eval "cat ${_save_log_path}"
-                    status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
-                    sleep 5s
-                done
-                eval "${python_} -m paddle_serving_server.serve stop"
-            else
-                echo "Does not support hardware [${use_gpu}] other than CPU and GPU Currently!"
-            fi
-        done
-    fi
-}
-
-
-function func_serving_rec(){
-    LOG_PATH="test_tipc/output/${model_name}"
-    mkdir -p ${LOG_PATH}
-    LOG_PATH="../../../${LOG_PATH}"
-    status_log="${LOG_PATH}/results_serving.log"
-    trans_model_py=$(func_parser_value "${lines[5]}")
-    cls_infer_model_dir_key=$(func_parser_key "${lines[6]}")
-    cls_infer_model_dir_value=$(func_parser_value "${lines[6]}")
-    det_infer_model_dir_key=$(func_parser_key "${lines[7]}")
-    det_infer_model_dir_value=$(func_parser_value "${lines[7]}")
-    model_filename_key=$(func_parser_key "${lines[8]}")
-    model_filename_value=$(func_parser_value "${lines[8]}")
-    params_filename_key=$(func_parser_key "${lines[9]}")
-    params_filename_value=$(func_parser_value "${lines[9]}")
-
-    cls_serving_server_key=$(func_parser_key "${lines[10]}")
-    cls_serving_server_value=$(func_parser_value "${lines[10]}")
-    cls_serving_client_key=$(func_parser_key "${lines[11]}")
-    cls_serving_client_value=$(func_parser_value "${lines[11]}")
-
-    det_serving_server_key=$(func_parser_key "${lines[12]}")
-    det_serving_server_value=$(func_parser_value "${lines[12]}")
-    det_serving_client_key=$(func_parser_key "${lines[13]}")
-    det_serving_client_value=$(func_parser_value "${lines[13]}")
-
-    serving_dir_value=$(func_parser_value "${lines[14]}")
-    web_service_py=$(func_parser_value "${lines[15]}")
-    web_use_gpu_key=$(func_parser_key "${lines[16]}")
-    web_use_gpu_list=$(func_parser_value "${lines[16]}")
-    pipeline_py=$(func_parser_value "${lines[17]}")
-
-    IFS='|'
-    for python_ in ${python[*]}; do
-        if [[ ${python_} =~ "python" ]]; then
-            python_interp=${python_}
-            break
-        fi
-    done
-
-    # pdserving
-    cd ./deploy
-    set_dirname=$(func_set_params "${cls_infer_model_dir_key}" "${cls_infer_model_dir_value}")
-    set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
-    set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
-    set_serving_server=$(func_set_params "${cls_serving_server_key}" "${cls_serving_server_value}")
-    set_serving_client=$(func_set_params "${cls_serving_client_key}" "${cls_serving_client_value}")
-    cls_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
-    eval ${cls_trans_model_cmd}
-
-    set_dirname=$(func_set_params "${det_infer_model_dir_key}" "${det_infer_model_dir_value}")
-    set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
-    set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
-    set_serving_server=$(func_set_params "${det_serving_server_key}" "${det_serving_server_value}")
-    set_serving_client=$(func_set_params "${det_serving_client_key}" "${det_serving_client_value}")
-    det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
-    eval ${det_trans_model_cmd}
-
-    if [[ ${FILENAME} =~ "cpp" ]]; then
-        cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/*.prototxt ${cls_serving_server_value}"
-        eval ${cp_prototxt_cmd}
-        cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/*.prototxt ${cls_serving_client_value}"
-        eval ${cp_prototxt_cmd}
-        cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/*.prototxt ${det_serving_client_value}"
-        eval ${cp_prototxt_cmd}
-        cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/*.prototxt ${det_serving_server_value}"
-        eval ${cp_prototxt_cmd}
-    else
-        # modify the alias_name of fetch_var to "outputs"
-        server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_server_value/serving_server_conf.prototxt"
-        eval ${server_fetch_var_line_cmd}
-        client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_client_value/serving_client_conf.prototxt"
-        eval ${client_fetch_var_line_cmd}
-    fi
-    prototxt_dataline=$(awk 'NR==1, NR==3{print}'  ${cls_serving_server_value}/serving_server_conf.prototxt)
-    IFS=$'\n'
-    prototxt_lines=(${prototxt_dataline})
-    feed_var_name=$(func_parser_value "${prototxt_lines[2]}")
-    IFS='|'
-
-    cd ${serving_dir_value}
-    unset https_proxy
-    unset http_proxy
-
-    if [[ ${FILENAME} =~ "cpp" ]]; then
-        export SERVING_BIN=$PWD/../Serving/server-build-gpu-opencv/core/general-server/serving
-        for use_gpu in ${web_use_gpu_list[*]}; do
-            if [ ${use_gpu} = "null" ]; then
-                det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
-                web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &"
-                eval ${web_service_cpp_cmd}
-                sleep 5s
-                _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_batchsize_1.log"
-                pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
-                eval ${pipeline_cmd}
-                last_status=${PIPESTATUS[0]}
-                eval "cat ${_save_log_path}"
-                status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
-                eval "${python_} -m paddle_serving_server.serve stop"
-                sleep 5s
-            else
-                det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
-                web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=${use_gpu} &"
-                eval ${web_service_cpp_cmd}
-                sleep 5s
-                _save_log_path="${LOG_PATH}/server_infer_cpp_gpu_batchsize_1.log"
-                pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
-                eval ${pipeline_cmd}
-                last_status=${PIPESTATUS[0]}
-                eval "cat ${_save_log_path}"
-                status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
-                eval "${python_} -m paddle_serving_server.serve stop"
-                sleep 5s
-            fi
-        done
-    else
-        # modify the input_name in "recognition_web_service.py" to be consistent with feed_var.name in prototxt
-        set_web_service_feed_var_cmd="sed -i '/preprocess/,/input_imgs}/s/{.*: input_imgs}/{${feed_var_name}: input_imgs}/' ${web_service_py}"
-        eval ${set_web_service_feed_var_cmd}
-        # python serving
-        for use_gpu in ${web_use_gpu_list[*]}; do
-            if [[ ${use_gpu} = "null" ]]; then
-                device_type_line=24
-                set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 0/' config.yml"
-                eval ${set_device_type_cmd}
-
-                devices_line=27
-                set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml"
-                eval ${set_devices_cmd}
-
-                web_service_cmd="${python} ${web_service_py} &"
-                eval ${web_service_cmd}
-                sleep 5s
-                for pipeline in ${pipeline_py[*]}; do
-                    _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
-                    pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1 "
-                    eval ${pipeline_cmd}
-                    last_status=${PIPESTATUS[0]}
-                    eval "cat ${_save_log_path}"
-                    status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
-                    sleep 5s
-                done
-                eval "${python_} -m paddle_serving_server.serve stop"
-            elif [ ${use_gpu} -eq 0 ]; then
-                if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
-                    continue
-                fi
-                if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
-                    continue
-                fi
-                if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then
-                    continue
-                fi
-
-                device_type_line=24
-                set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 1/' config.yml"
-                eval ${set_device_type_cmd}
-
-                devices_line=27
-                set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml"
-                eval ${set_devices_cmd}
-
-                web_service_cmd="${python} ${web_service_py} & "
-                eval ${web_service_cmd}
-                sleep 10s
-                for pipeline in ${pipeline_py[*]}; do
-                    _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
-                    pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1"
-                    eval ${pipeline_cmd}
-                    last_status=${PIPESTATUS[0]}
-                    eval "cat ${_save_log_path}"
-                    status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
-                    sleep 10s
-                done
-                eval "${python_} -m paddle_serving_server.serve stop"
-            else
-                echo "Does not support hardware [${use_gpu}] other than CPU and GPU Currently!"
-            fi
-        done
-    fi
-}
-
-
-# set cuda device
-GPUID=$2
-if [ ${#GPUID} -le 0 ];then
-    env=" "
-else
-    env="export CUDA_VISIBLE_DEVICES=${GPUID}"
-fi
-set CUDA_VISIBLE_DEVICES
-eval ${env}
-
-
-echo "################### run test ###################"
-
-export Count=0
-IFS="|"
-if [[ ${model_name} =~ "ShiTu" ]]; then
-    func_serving_rec
-else
-    func_serving_cls
-fi
diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh
new file mode 100644
index 000000000..ff0a214cc
--- /dev/null
+++ b/test_tipc/test_serving_infer_cpp.sh
@@ -0,0 +1,262 @@
+#!/bin/bash
+source test_tipc/common_func.sh
+
+FILENAME=$1
+dataline=$(awk 'NR==1, NR==19{print}'  $FILENAME)
+
+# parser params
+IFS=$'\n'
+lines=(${dataline})
+
+function func_get_url_file_name(){
+    strs=$1
+    IFS="/"
+    array=(${strs})
+    tmp=${array[${#array[@]}-1]}
+    echo ${tmp}
+}
+
+# parser serving
+model_name=$(func_parser_value "${lines[1]}")
+python=$(func_parser_value "${lines[2]}")
+trans_model_py=$(func_parser_value "${lines[4]}")
+infer_model_dir_key=$(func_parser_key "${lines[5]}")
+infer_model_dir_value=$(func_parser_value "${lines[5]}")
+model_filename_key=$(func_parser_key "${lines[6]}")
+model_filename_value=$(func_parser_value "${lines[6]}")
+params_filename_key=$(func_parser_key "${lines[7]}")
+params_filename_value=$(func_parser_value "${lines[7]}")
+serving_server_key=$(func_parser_key "${lines[8]}")
+serving_server_value=$(func_parser_value "${lines[8]}")
+serving_client_key=$(func_parser_key "${lines[9]}")
+serving_client_value=$(func_parser_value "${lines[9]}")
+serving_dir_value=$(func_parser_value "${lines[10]}")
+web_service_py=$(func_parser_value "${lines[11]}")
+web_use_gpu_key=$(func_parser_key "${lines[12]}")
+web_use_gpu_list=$(func_parser_value "${lines[12]}")
+pipeline_py=$(func_parser_value "${lines[13]}")
+
+
+function func_serving_cls(){
+    LOG_PATH="test_tipc/output/${model_name}"
+    mkdir -p ${LOG_PATH}
+    LOG_PATH="../../${LOG_PATH}"
+    status_log="${LOG_PATH}/results_serving.log"
+    IFS='|'
+
+    # pdserving
+    set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
+    set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
+    set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
+    set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
+    set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
+
+    for python_ in ${python[*]}; do
+        if [[ ${python_} =~ "python" ]]; then
+            trans_model_cmd="${python_} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
+            eval ${trans_model_cmd}
+            break
+        fi
+    done
+
+    # modify the alias_name of fetch_var to "outputs"
+    server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_server_value}/serving_server_conf.prototxt"
+    eval ${server_fetch_var_line_cmd}
+
+    client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_client_value}/serving_client_conf.prototxt"
+    eval ${client_fetch_var_line_cmd}
+
+    prototxt_dataline=$(awk 'NR==1, NR==3{print}'  ${serving_server_value}/serving_server_conf.prototxt)
+    IFS=$'\n'
+    prototxt_lines=(${prototxt_dataline})
+    feed_var_name=$(func_parser_value "${prototxt_lines[2]}")
+    IFS='|'
+
+    cd ${serving_dir_value}
+    unset https_proxy
+    unset http_proxy
+
+    for item in ${python[*]}; do
+        if [[ ${item} =~ "python" ]]; then
+            python_=${item}
+            break
+        fi
+    done
+    serving_client_dir_name=$(func_get_url_file_name "$serving_client_value")
+    set_client_feed_type_cmd="sed -i '/feed_type/,/: .*/s/feed_type: .*/feed_type: 20/' ${serving_client_dir_name}/serving_client_conf.prototxt"
+    eval ${set_client_feed_type_cmd}
+    set_client_shape_cmd="sed -i '/shape: 3/,/shape: 3/s/shape: 3/shape: 1/' ${serving_client_dir_name}/serving_client_conf.prototxt"
+    eval ${set_client_shape_cmd}
+    set_client_shape224_cmd="sed -i '/shape: 224/,/shape: 224/s/shape: 224//' ${serving_client_dir_name}/serving_client_conf.prototxt"
+    eval ${set_client_shape224_cmd}
+    set_client_shape224_cmd="sed -i '/shape: 224/,/shape: 224/s/shape: 224//' ${serving_client_dir_name}/serving_client_conf.prototxt"
+    eval ${set_client_shape224_cmd}
+
+    set_pipeline_load_config_cmd="sed -i '/load_client_config/,/.prototxt/s/.\/.*\/serving_client_conf.prototxt/.\/${serving_client_dir_name}\/serving_client_conf.prototxt/' ${pipeline_py}"
+    eval ${set_pipeline_load_config_cmd}
+
+    set_pipeline_feed_var_cmd="sed -i '/feed=/,/: image}/s/feed={.*: image}/feed={${feed_var_name}: image}/' ${pipeline_py}"
+    eval ${set_pipeline_feed_var_cmd}
+
+    serving_server_dir_name=$(func_get_url_file_name "$serving_server_value")
+
+    for use_gpu in ${web_use_gpu_list[*]}; do
+        if [[ ${use_gpu} = "null" ]]; then
+            web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 &"
+            eval ${web_service_cpp_cmd}
+            sleep 5s
+            _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_batchsize_1.log"
+            pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 "
+            eval ${pipeline_cmd}
+            last_status=${PIPESTATUS[0]}
+            eval "cat ${_save_log_path}"
+            status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
+            eval "${python_} -m paddle_serving_server.serve stop"
+            sleep 5s
+        else
+            web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 --gpu_id=${use_gpu} &"
+            eval ${web_service_cpp_cmd}
+            sleep 8s
+
+            _save_log_path="${LOG_PATH}/server_infer_cpp_gpu_pipeline_batchsize_1.log"
+            pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 "
+            eval ${pipeline_cmd}
+            last_status=${PIPESTATUS[0]}
+            eval "cat ${_save_log_path}"
+            status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
+            sleep 5s
+            eval "${python_} -m paddle_serving_server.serve stop"
+        fi
+    done
+}
+
+
+function func_serving_rec(){
+    LOG_PATH="test_tipc/output/${model_name}"
+    mkdir -p ${LOG_PATH}
+    LOG_PATH="../../../${LOG_PATH}"
+    status_log="${LOG_PATH}/results_serving.log"
+    trans_model_py=$(func_parser_value "${lines[5]}")
+    cls_infer_model_dir_key=$(func_parser_key "${lines[6]}")
+    cls_infer_model_dir_value=$(func_parser_value "${lines[6]}")
+    det_infer_model_dir_key=$(func_parser_key "${lines[7]}")
+    det_infer_model_dir_value=$(func_parser_value "${lines[7]}")
+    model_filename_key=$(func_parser_key "${lines[8]}")
+    model_filename_value=$(func_parser_value "${lines[8]}")
+    params_filename_key=$(func_parser_key "${lines[9]}")
+    params_filename_value=$(func_parser_value "${lines[9]}")
+
+    cls_serving_server_key=$(func_parser_key "${lines[10]}")
+    cls_serving_server_value=$(func_parser_value "${lines[10]}")
+    cls_serving_client_key=$(func_parser_key "${lines[11]}")
+    cls_serving_client_value=$(func_parser_value "${lines[11]}")
+
+    det_serving_server_key=$(func_parser_key "${lines[12]}")
+    det_serving_server_value=$(func_parser_value "${lines[12]}")
+    det_serving_client_key=$(func_parser_key "${lines[13]}")
+    det_serving_client_value=$(func_parser_value "${lines[13]}")
+
+    serving_dir_value=$(func_parser_value "${lines[14]}")
+    web_service_py=$(func_parser_value "${lines[15]}")
+    web_use_gpu_key=$(func_parser_key "${lines[16]}")
+    web_use_gpu_list=$(func_parser_value "${lines[16]}")
+    pipeline_py=$(func_parser_value "${lines[17]}")
+
+    IFS='|'
+    for python_ in ${python[*]}; do
+        if [[ ${python_} =~ "python" ]]; then
+            python_interp=${python_}
+            break
+        fi
+    done
+
+    # pdserving
+    cd ./deploy
+    set_dirname=$(func_set_params "${cls_infer_model_dir_key}" "${cls_infer_model_dir_value}")
+    set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
+    set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
+    set_serving_server=$(func_set_params "${cls_serving_server_key}" "${cls_serving_server_value}")
+    set_serving_client=$(func_set_params "${cls_serving_client_key}" "${cls_serving_client_value}")
+    cls_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
+    eval ${cls_trans_model_cmd}
+
+    set_dirname=$(func_set_params "${det_infer_model_dir_key}" "${det_infer_model_dir_value}")
+    set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
+    set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
+    set_serving_server=$(func_set_params "${det_serving_server_key}" "${det_serving_server_value}")
+    set_serving_client=$(func_set_params "${det_serving_client_key}" "${det_serving_client_value}")
+    det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
+    eval ${det_trans_model_cmd}
+
+    cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/*.prototxt ${cls_serving_server_value}"
+    eval ${cp_prototxt_cmd}
+    cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/*.prototxt ${cls_serving_client_value}"
+    eval ${cp_prototxt_cmd}
+    cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/*.prototxt ${det_serving_client_value}"
+    eval ${cp_prototxt_cmd}
+    cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/*.prototxt ${det_serving_server_value}"
+    eval ${cp_prototxt_cmd}
+
+    prototxt_dataline=$(awk 'NR==1, NR==3{print}'  ${cls_serving_server_value}/serving_server_conf.prototxt)
+    IFS=$'\n'
+    prototxt_lines=(${prototxt_dataline})
+    feed_var_name=$(func_parser_value "${prototxt_lines[2]}")
+    IFS='|'
+
+    cd ${serving_dir_value}
+    unset https_proxy
+    unset http_proxy
+
+    export SERVING_BIN=${PWD}/../Serving/server-build-gpu-opencv/core/general-server/serving
+    for use_gpu in ${web_use_gpu_list[*]}; do
+        if [ ${use_gpu} = "null" ]; then
+            det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
+            web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &"
+            eval ${web_service_cpp_cmd}
+            sleep 5s
+            _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_batchsize_1.log"
+            pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
+            eval ${pipeline_cmd}
+            last_status=${PIPESTATUS[0]}
+            eval "cat ${_save_log_path}"
+            status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
+            eval "${python_} -m paddle_serving_server.serve stop"
+            sleep 5s
+        else
+            det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
+            web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=${use_gpu} &"
+            eval ${web_service_cpp_cmd}
+            sleep 5s
+            _save_log_path="${LOG_PATH}/server_infer_cpp_gpu_batchsize_1.log"
+            pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
+            eval ${pipeline_cmd}
+            last_status=${PIPESTATUS[0]}
+            eval "cat ${_save_log_path}"
+            status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
+            eval "${python_} -m paddle_serving_server.serve stop"
+            sleep 5s
+        fi
+    done
+}
+
+
+# set cuda device
+GPUID=$2
+if [ ${#GPUID} -le 0 ];then
+    env=" "
+else
+    env="export CUDA_VISIBLE_DEVICES=${GPUID}"
+fi
+set CUDA_VISIBLE_DEVICES
+eval ${env}
+
+
+echo "################### run test ###################"
+
+export Count=0
+IFS="|"
+if [[ ${model_name} =~ "ShiTu" ]]; then
+    func_serving_rec
+else
+    func_serving_cls
+fi
diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh
new file mode 100644
index 000000000..4dc0eb36c
--- /dev/null
+++ b/test_tipc/test_serving_infer_python.sh
@@ -0,0 +1,309 @@
+#!/bin/bash
+source test_tipc/common_func.sh
+
+FILENAME=$1
+dataline=$(awk 'NR==1, NR==19{print}'  $FILENAME)
+
+# parser params
+IFS=$'\n'
+lines=(${dataline})
+
+function func_get_url_file_name(){
+    strs=$1
+    IFS="/"
+    array=(${strs})
+    tmp=${array[${#array[@]}-1]}
+    echo ${tmp}
+}
+
+# parser serving
+model_name=$(func_parser_value "${lines[1]}")
+python=$(func_parser_value "${lines[2]}")
+trans_model_py=$(func_parser_value "${lines[4]}")
+infer_model_dir_key=$(func_parser_key "${lines[5]}")
+infer_model_dir_value=$(func_parser_value "${lines[5]}")
+model_filename_key=$(func_parser_key "${lines[6]}")
+model_filename_value=$(func_parser_value "${lines[6]}")
+params_filename_key=$(func_parser_key "${lines[7]}")
+params_filename_value=$(func_parser_value "${lines[7]}")
+serving_server_key=$(func_parser_key "${lines[8]}")
+serving_server_value=$(func_parser_value "${lines[8]}")
+serving_client_key=$(func_parser_key "${lines[9]}")
+serving_client_value=$(func_parser_value "${lines[9]}")
+serving_dir_value=$(func_parser_value "${lines[10]}")
+web_service_py=$(func_parser_value "${lines[11]}")
+web_use_gpu_key=$(func_parser_key "${lines[12]}")
+web_use_gpu_list=$(func_parser_value "${lines[12]}")
+pipeline_py=$(func_parser_value "${lines[13]}")
+
+
+function func_serving_cls(){
+    LOG_PATH="test_tipc/output/${model_name}"
+    mkdir -p ${LOG_PATH}
+    LOG_PATH="../../${LOG_PATH}"
+    status_log="${LOG_PATH}/results_serving.log"
+    IFS='|'
+
+    # pdserving
+    set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
+    set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
+    set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
+    set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
+    set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
+
+    for python_ in ${python[*]}; do
+        if [[ ${python_} =~ "python" ]]; then
+            trans_model_cmd="${python_} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
+            eval ${trans_model_cmd}
+            break
+        fi
+    done
+
+    # modify the alias_name of fetch_var to "outputs"
+    server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_server_value}/serving_server_conf.prototxt"
+    eval ${server_fetch_var_line_cmd}
+
+    client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_client_value}/serving_client_conf.prototxt"
+    eval ${client_fetch_var_line_cmd}
+
+    prototxt_dataline=$(awk 'NR==1, NR==3{print}'  ${serving_server_value}/serving_server_conf.prototxt)
+    IFS=$'\n'
+    prototxt_lines=(${prototxt_dataline})
+    feed_var_name=$(func_parser_value "${prototxt_lines[2]}")
+    IFS='|'
+
+    cd ${serving_dir_value}
+    unset https_proxy
+    unset http_proxy
+
+    # python serving
+    # modify the input_name in "classification_web_service.py" to be consistent with feed_var.name in prototxt
+    set_web_service_feed_var_cmd="sed -i '/preprocess/,/input_imgs}/s/{.*: input_imgs}/{${feed_var_name}: input_imgs}/' ${web_service_py}"
+    eval ${set_web_service_feed_var_cmd}
+
+    model_config=21
+    serving_server_dir_name=$(func_get_url_file_name "$serving_server_value")
+    set_model_config_cmd="sed -i '${model_config}s/model_config: .*/model_config: ${serving_server_dir_name}/' config.yml"
+    eval ${set_model_config_cmd}
+
+    for use_gpu in ${web_use_gpu_list[*]}; do
+        if [[ ${use_gpu} = "null" ]]; then
+            device_type_line=24
+            set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 0/' config.yml"
+            eval ${set_device_type_cmd}
+
+            devices_line=27
+            set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml"
+            eval ${set_devices_cmd}
+
+            web_service_cmd="${python_} ${web_service_py} &"
+            eval ${web_service_cmd}
+            sleep 5s
+            for pipeline in ${pipeline_py[*]}; do
+                _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
+                pipeline_cmd="${python_} ${pipeline} > ${_save_log_path} 2>&1 "
+                eval ${pipeline_cmd}
+                last_status=${PIPESTATUS[0]}
+                eval "cat ${_save_log_path}"
+                status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
+                sleep 5s
+            done
+            eval "${python_} -m paddle_serving_server.serve stop"
+        elif [ ${use_gpu} -eq 0 ]; then
+            if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
+                continue
+            fi
+            if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
+                continue
+            fi
+            if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then
+                continue
+            fi
+
+            device_type_line=24
+            set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 1/' config.yml"
+            eval ${set_device_type_cmd}
+
+            devices_line=27
+            set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml"
+            eval ${set_devices_cmd}
+
+            web_service_cmd="${python_} ${web_service_py} & "
+            eval ${web_service_cmd}
+            sleep 5s
+            for pipeline in ${pipeline_py[*]}; do
+                _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
+                pipeline_cmd="${python_} ${pipeline} > ${_save_log_path} 2>&1"
+                eval ${pipeline_cmd}
+                last_status=${PIPESTATUS[0]}
+                eval "cat ${_save_log_path}"
+                status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
+                sleep 5s
+            done
+            eval "${python_} -m paddle_serving_server.serve stop"
+        else
+            echo "Does not support hardware [${use_gpu}] other than CPU and GPU Currently!"
+        fi
+    done
+}
+
+
+function func_serving_rec(){
+    LOG_PATH="test_tipc/output/${model_name}"
+    mkdir -p ${LOG_PATH}
+    LOG_PATH="../../../${LOG_PATH}"
+    status_log="${LOG_PATH}/results_serving.log"
+    trans_model_py=$(func_parser_value "${lines[5]}")
+    cls_infer_model_dir_key=$(func_parser_key "${lines[6]}")
+    cls_infer_model_dir_value=$(func_parser_value "${lines[6]}")
+    det_infer_model_dir_key=$(func_parser_key "${lines[7]}")
+    det_infer_model_dir_value=$(func_parser_value "${lines[7]}")
+    model_filename_key=$(func_parser_key "${lines[8]}")
+    model_filename_value=$(func_parser_value "${lines[8]}")
+    params_filename_key=$(func_parser_key "${lines[9]}")
+    params_filename_value=$(func_parser_value "${lines[9]}")
+
+    cls_serving_server_key=$(func_parser_key "${lines[10]}")
+    cls_serving_server_value=$(func_parser_value "${lines[10]}")
+    cls_serving_client_key=$(func_parser_key "${lines[11]}")
+    cls_serving_client_value=$(func_parser_value "${lines[11]}")
+
+    det_serving_server_key=$(func_parser_key "${lines[12]}")
+    det_serving_server_value=$(func_parser_value "${lines[12]}")
+    det_serving_client_key=$(func_parser_key "${lines[13]}")
+    det_serving_client_value=$(func_parser_value "${lines[13]}")
+
+    serving_dir_value=$(func_parser_value "${lines[14]}")
+    web_service_py=$(func_parser_value "${lines[15]}")
+    web_use_gpu_key=$(func_parser_key "${lines[16]}")
+    web_use_gpu_list=$(func_parser_value "${lines[16]}")
+    pipeline_py=$(func_parser_value "${lines[17]}")
+
+    IFS='|'
+    for python_ in ${python[*]}; do
+        if [[ ${python_} =~ "python" ]]; then
+            python_interp=${python_}
+            break
+        fi
+    done
+
+    # pdserving
+    cd ./deploy
+    set_dirname=$(func_set_params "${cls_infer_model_dir_key}" "${cls_infer_model_dir_value}")
+    set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
+    set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
+    set_serving_server=$(func_set_params "${cls_serving_server_key}" "${cls_serving_server_value}")
+    set_serving_client=$(func_set_params "${cls_serving_client_key}" "${cls_serving_client_value}")
+    cls_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
+    eval ${cls_trans_model_cmd}
+
+    set_dirname=$(func_set_params "${det_infer_model_dir_key}" "${det_infer_model_dir_value}")
+    set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
+    set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
+    set_serving_server=$(func_set_params "${det_serving_server_key}" "${det_serving_server_value}")
+    set_serving_client=$(func_set_params "${det_serving_client_key}" "${det_serving_client_value}")
+    det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
+    eval ${det_trans_model_cmd}
+
+    # modify the alias_name of fetch_var to "outputs"
+    server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_server_value/serving_server_conf.prototxt"
+    eval ${server_fetch_var_line_cmd}
+    client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_client_value/serving_client_conf.prototxt"
+    eval ${client_fetch_var_line_cmd}
+
+    prototxt_dataline=$(awk 'NR==1, NR==3{print}'  ${cls_serving_server_value}/serving_server_conf.prototxt)
+    IFS=$'\n'
+    prototxt_lines=(${prototxt_dataline})
+    feed_var_name=$(func_parser_value "${prototxt_lines[2]}")
+    IFS='|'
+
+    cd ${serving_dir_value}
+    unset https_proxy
+    unset http_proxy
+
+    # modify the input_name in "recognition_web_service.py" to be consistent with feed_var.name in prototxt
+    set_web_service_feed_var_cmd="sed -i '/preprocess/,/input_imgs}/s/{.*: input_imgs}/{${feed_var_name}: input_imgs}/' ${web_service_py}"
+    eval ${set_web_service_feed_var_cmd}
+    # python serving
+    for use_gpu in ${web_use_gpu_list[*]}; do
+        if [[ ${use_gpu} = "null" ]]; then
+            device_type_line=24
+            set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 0/' config.yml"
+            eval ${set_device_type_cmd}
+
+            devices_line=27
+            set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml"
+            eval ${set_devices_cmd}
+
+            web_service_cmd="${python} ${web_service_py} &"
+            eval ${web_service_cmd}
+            sleep 5s
+            for pipeline in ${pipeline_py[*]}; do
+                _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
+                pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1 "
+                eval ${pipeline_cmd}
+                last_status=${PIPESTATUS[0]}
+                eval "cat ${_save_log_path}"
+                status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
+                sleep 5s
+            done
+            eval "${python_} -m paddle_serving_server.serve stop"
+        elif [ ${use_gpu} -eq 0 ]; then
+            if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
+                continue
+            fi
+            if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
+                continue
+            fi
+            if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then
+                continue
+            fi
+
+            device_type_line=24
+            set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 1/' config.yml"
+            eval ${set_device_type_cmd}
+
+            devices_line=27
+            set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml"
+            eval ${set_devices_cmd}
+
+            web_service_cmd="${python} ${web_service_py} & "
+            eval ${web_service_cmd}
+            sleep 10s
+            for pipeline in ${pipeline_py[*]}; do
+                _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
+                pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1"
+                eval ${pipeline_cmd}
+                last_status=${PIPESTATUS[0]}
+                eval "cat ${_save_log_path}"
+                status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
+                sleep 10s
+            done
+            eval "${python_} -m paddle_serving_server.serve stop"
+        else
+            echo "Does not support hardware [${use_gpu}] other than CPU and GPU Currently!"
+        fi
+    done
+}
+
+
+# set cuda device
+GPUID=$2
+if [ ${#GPUID} -le 0 ];then
+    env=" "
+else
+    env="export CUDA_VISIBLE_DEVICES=${GPUID}"
+fi
+set CUDA_VISIBLE_DEVICES
+eval ${env}
+
+
+echo "################### run test ###################"
+
+export Count=0
+IFS="|"
+if [[ ${model_name} =~ "ShiTu" ]]; then
+    func_serving_rec
+else
+    func_serving_cls
+fi