From 8641aed6705ea2407883c70a4fbdf3849e21b0b3 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Wed, 15 Jun 2022 16:05:33 +0800 Subject: [PATCH 1/2] polish prepare.sh and docs --- test_tipc/docs/test_serving_infer_cpp.md | 13 +++++++++++++ test_tipc/docs/test_serving_infer_python.md | 9 +++++++++ test_tipc/prepare.sh | 11 ----------- test_tipc/test_serving_infer_cpp.sh | 10 +++++++++- test_tipc/test_serving_infer_python.sh | 10 +++++++++- 5 files changed, 40 insertions(+), 13 deletions(-) diff --git a/test_tipc/docs/test_serving_infer_cpp.md b/test_tipc/docs/test_serving_infer_cpp.md index 736e18ace..2c269a104 100644 --- a/test_tipc/docs/test_serving_infer_cpp.md +++ b/test_tipc/docs/test_serving_infer_cpp.md @@ -52,6 +52,19 @@ Linux GPU/CPU C++ 服务化部署测试的主程序为`test_serving_infer_cpp.sh ``` - 安装 PaddleServing 相关组件,包括serving_client、serving-app,自动编译并安装带自定义OP的 serving_server 包,以及自动下载并解压推理模型 ```bash + # 安装必要依赖包 + python3.7 -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple + python3.7 -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple + + # 安装编译自定义OP的serving-server包 + pushd ./deploy/paddleserving + source build_server.sh python3.7 + popd + + # 测试PP-ShiTu识别模型时需安装faiss包 + python3.7-m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple + + # 下载模型与数据 bash test_tipc/prepare.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt serving_infer ``` diff --git a/test_tipc/docs/test_serving_infer_python.md b/test_tipc/docs/test_serving_infer_python.md index c336e3ec1..fb19c1764 100644 --- a/test_tipc/docs/test_serving_infer_python.md +++ b/test_tipc/docs/test_serving_infer_python.md @@ -52,6 +52,15 @@ Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer_pyt ``` - 安装 PaddleServing 相关组件,包括serving-server、serving_client、serving-app,自动下载并解压推理模型 ```bash + # 安装必要依赖包 + python3.7 -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple + python3.7 -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple + python3.7 -m pip install install paddle-serving-server-gpu==0.9.0.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple + + # 测试PP-ShiTu识别模型时需安装faiss包 + python3.7-m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple + + # 下载模型与数据 bash test_tipc/prepare.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt serving_infer ``` diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 19b9e43f8..e0908d76c 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -200,18 +200,7 @@ fi if [[ ${MODE} = "serving_infer" ]]; then # prepare serving env python_name=$(func_parser_value "${lines[2]}") - ${python_name} -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple - ${python_name} -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple - python_name=$(func_parser_value "${lines[2]}") - if [[ ${FILENAME} =~ "cpp" ]]; then - pushd ./deploy/paddleserving - source build_server.sh ${python_name} - popd - else - ${python_name} -m pip install install paddle-serving-server-gpu==0.9.0.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple - fi if [[ ${model_name} =~ "ShiTu" ]]; then - ${python_name} -m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple cls_inference_model_url=$(func_parser_value "${lines[3]}") cls_tar_name=$(func_get_url_file_name "${cls_inference_model_url}") det_inference_model_url=$(func_parser_value "${lines[4]}") diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh index ff0a214cc..7b834d5b0 100644 --- a/test_tipc/test_serving_infer_cpp.sh +++ b/test_tipc/test_serving_infer_cpp.sh @@ -104,6 +104,8 @@ function func_serving_cls(){ if [[ ${use_gpu} = "null" ]]; then web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 &" eval ${web_service_cpp_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" sleep 5s _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_batchsize_1.log" pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 " @@ -116,6 +118,8 @@ function func_serving_cls(){ else web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 --gpu_id=${use_gpu} &" eval ${web_service_cpp_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" sleep 8s _save_log_path="${LOG_PATH}/server_infer_cpp_gpu_pipeline_batchsize_1.log" @@ -213,6 +217,8 @@ function func_serving_rec(){ det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value") web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &" eval ${web_service_cpp_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" sleep 5s _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_batchsize_1.log" pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 " @@ -226,6 +232,8 @@ function func_serving_rec(){ det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value") web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=${use_gpu} &" eval ${web_service_cpp_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" sleep 5s _save_log_path="${LOG_PATH}/server_infer_cpp_gpu_batchsize_1.log" pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 " @@ -243,7 +251,7 @@ function func_serving_rec(){ # set cuda device GPUID=$2 if [ ${#GPUID} -le 0 ];then - env=" " + env="export CUDA_VISIBLE_DEVICES=0" else env="export CUDA_VISIBLE_DEVICES=${GPUID}" fi diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh index 4dc0eb36c..784a62ebd 100644 --- a/test_tipc/test_serving_infer_python.sh +++ b/test_tipc/test_serving_infer_python.sh @@ -98,6 +98,8 @@ function func_serving_cls(){ web_service_cmd="${python_} ${web_service_py} &" eval ${web_service_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" sleep 5s for pipeline in ${pipeline_py[*]}; do _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log" @@ -130,6 +132,8 @@ function func_serving_cls(){ web_service_cmd="${python_} ${web_service_py} & " eval ${web_service_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" sleep 5s for pipeline in ${pipeline_py[*]}; do _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log" @@ -237,6 +241,8 @@ function func_serving_rec(){ web_service_cmd="${python} ${web_service_py} &" eval ${web_service_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" sleep 5s for pipeline in ${pipeline_py[*]}; do _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log" @@ -269,6 +275,8 @@ function func_serving_rec(){ web_service_cmd="${python} ${web_service_py} & " eval ${web_service_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" sleep 10s for pipeline in ${pipeline_py[*]}; do _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log" @@ -290,7 +298,7 @@ function func_serving_rec(){ # set cuda device GPUID=$2 if [ ${#GPUID} -le 0 ];then - env=" " + env="export CUDA_VISIBLE_DEVICES=0" else env="export CUDA_VISIBLE_DEVICES=${GPUID}" fi From eea897d2c3ad801edb54bd9969102639df4a15d0 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Wed, 15 Jun 2022 20:26:25 +0800 Subject: [PATCH 2/2] polish test_serving* docs --- test_tipc/docs/test_serving_infer_cpp.md | 31 +++++++++++++-------- test_tipc/docs/test_serving_infer_python.md | 25 +++++++++-------- test_tipc/test_serving_infer_cpp.sh | 4 +-- test_tipc/test_serving_infer_python.sh | 2 +- 4 files changed, 35 insertions(+), 27 deletions(-) diff --git a/test_tipc/docs/test_serving_infer_cpp.md b/test_tipc/docs/test_serving_infer_cpp.md index 2c269a104..df13c2b26 100644 --- a/test_tipc/docs/test_serving_infer_cpp.md +++ b/test_tipc/docs/test_serving_infer_cpp.md @@ -38,18 +38,25 @@ Linux GPU/CPU C++ 服务化部署测试的主程序为`test_serving_infer_cpp.sh - 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。 - ```shell - # 需要安装2.2及以上版本的Paddle - # 安装GPU版本的Paddle - python3.7 -m pip install paddlepaddle-gpu==2.2.0 - # 安装CPU版本的Paddle - python3.7 -m pip install paddlepaddle==2.2.0 - ``` + ```shell + # 需要安装2.2及以上版本的Paddle + # 安装GPU版本的Paddle + python3.7 -m pip install paddlepaddle-gpu==2.2.0 + # 安装CPU版本的Paddle + python3.7 -m pip install paddlepaddle==2.2.0 + ``` - 安装依赖 - ```shell - python3.7 -m pip install -r requirements.txt - ``` + ```shell + python3.7 -m pip install -r requirements.txt + ``` + +- 安装TensorRT + 编译 serving-server 的脚本内会设置 `TENSORRT_LIBRARY_PATH` 这一环境变量,因此编译前需要安装TensorRT。 + + 如果使用`registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda10.1-cudnn7-gcc82`镜像进测试,则已自带TensorRT无需安装, + 否则可以参考 [3.2 安装TensorRT](install.md#32-安装tensorrt) 进行安装,并在修改 [build_server.sh](../../deploy/paddleserving/build_server.sh#L62) 的 `TENSORRT_LIBRARY_PATH` 地址为安装后的路径。 + - 安装 PaddleServing 相关组件,包括serving_client、serving-app,自动编译并安装带自定义OP的 serving_server 包,以及自动下载并解压推理模型 ```bash # 安装必要依赖包 @@ -73,14 +80,14 @@ Linux GPU/CPU C++ 服务化部署测试的主程序为`test_serving_infer_cpp.sh 测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。 ```bash -bash test_tipc/test_serving_infer_cpp.sh ${your_params_file} +bash test_tipc/test_serving_infer_cpp.sh ${your_params_file} ${mode} ``` 以`PPLCNet_x1_0`的`Linux GPU/CPU C++ 服务化部署测试`为例,命令如下所示。 ```bash -bash test_tipc/test_serving_infer_cpp.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt +bash test_tipc/test_serving_infer_cpp.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt serving_infer ``` 输出结果如下,表示命令运行成功。 diff --git a/test_tipc/docs/test_serving_infer_python.md b/test_tipc/docs/test_serving_infer_python.md index fb19c1764..1e802d3ff 100644 --- a/test_tipc/docs/test_serving_infer_python.md +++ b/test_tipc/docs/test_serving_infer_python.md @@ -38,18 +38,19 @@ Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer_pyt - 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。 - ```shell - # 需要安装2.2及以上版本的Paddle - # 安装GPU版本的Paddle - python3.7 -m pip install paddlepaddle-gpu==2.2.0 - # 安装CPU版本的Paddle - python3.7 -m pip install paddlepaddle==2.2.0 - ``` + ```shell + # 需要安装2.2及以上版本的Paddle + # 安装GPU版本的Paddle + python3.7 -m pip install paddlepaddle-gpu==2.2.0 + # 安装CPU版本的Paddle + python3.7 -m pip install paddlepaddle==2.2.0 + ``` - 安装依赖 - ```shell - python3.7 -m pip install -r requirements.txt - ``` + ```shell + python3.7 -m pip install -r requirements.txt + ``` + - 安装 PaddleServing 相关组件,包括serving-server、serving_client、serving-app,自动下载并解压推理模型 ```bash # 安装必要依赖包 @@ -69,14 +70,14 @@ Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer_pyt 测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。 ```bash -bash test_tipc/test_serving_infer_python.sh ${your_params_file} +bash test_tipc/test_serving_infer_python.sh ${your_params_file} ${mode} ``` 以`ResNet50`的`Linux GPU/CPU PYTHON 服务化部署测试`为例,命令如下所示。 ```bash -bash test_tipc/test_serving_infer_python.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt +bash test_tipc/test_serving_infer_python.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt serving_infer ``` 输出结果如下,表示命令运行成功。 diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh index 7b834d5b0..fdb7ef186 100644 --- a/test_tipc/test_serving_infer_cpp.sh +++ b/test_tipc/test_serving_infer_cpp.sh @@ -211,7 +211,7 @@ function func_serving_rec(){ unset https_proxy unset http_proxy - export SERVING_BIN=${PWD}/../Serving/server-build-gpu-opencv/core/general-server/serving + # export SERVING_BIN=${PWD}/../Serving/server-build-gpu-opencv/core/general-server/serving for use_gpu in ${web_use_gpu_list[*]}; do if [ ${use_gpu} = "null" ]; then det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value") @@ -249,7 +249,7 @@ function func_serving_rec(){ # set cuda device -GPUID=$2 +GPUID=$3 if [ ${#GPUID} -le 0 ];then env="export CUDA_VISIBLE_DEVICES=0" else diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh index 784a62ebd..6141e1108 100644 --- a/test_tipc/test_serving_infer_python.sh +++ b/test_tipc/test_serving_infer_python.sh @@ -296,7 +296,7 @@ function func_serving_rec(){ # set cuda device -GPUID=$2 +GPUID=$3 if [ ${#GPUID} -le 0 ];then env="export CUDA_VISIBLE_DEVICES=0" else