Merge pull request #2067 from HydrogenSulfate/polish_prepare
polish prepare.sh and docspull/2072/head
commit
dcd17d9437
|
@ -38,20 +38,40 @@ Linux GPU/CPU C++ 服务化部署测试的主程序为`test_serving_infer_cpp.sh
|
||||||
|
|
||||||
|
|
||||||
- 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。
|
- 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。
|
||||||
```shell
|
```shell
|
||||||
# 需要安装2.2及以上版本的Paddle
|
# 需要安装2.2及以上版本的Paddle
|
||||||
# 安装GPU版本的Paddle
|
# 安装GPU版本的Paddle
|
||||||
python3.7 -m pip install paddlepaddle-gpu==2.2.0
|
python3.7 -m pip install paddlepaddle-gpu==2.2.0
|
||||||
# 安装CPU版本的Paddle
|
# 安装CPU版本的Paddle
|
||||||
python3.7 -m pip install paddlepaddle==2.2.0
|
python3.7 -m pip install paddlepaddle==2.2.0
|
||||||
```
|
```
|
||||||
|
|
||||||
- 安装依赖
|
- 安装依赖
|
||||||
```shell
|
```shell
|
||||||
python3.7 -m pip install -r requirements.txt
|
python3.7 -m pip install -r requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- 安装TensorRT
|
||||||
|
编译 serving-server 的脚本内会设置 `TENSORRT_LIBRARY_PATH` 这一环境变量,因此编译前需要安装TensorRT。
|
||||||
|
|
||||||
|
如果使用`registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda10.1-cudnn7-gcc82`镜像进测试,则已自带TensorRT无需安装,
|
||||||
|
否则可以参考 [3.2 安装TensorRT](install.md#32-安装tensorrt) 进行安装,并在修改 [build_server.sh](../../deploy/paddleserving/build_server.sh#L62) 的 `TENSORRT_LIBRARY_PATH` 地址为安装后的路径。
|
||||||
|
|
||||||
- 安装 PaddleServing 相关组件,包括serving_client、serving-app,自动编译并安装带自定义OP的 serving_server 包,以及自动下载并解压推理模型
|
- 安装 PaddleServing 相关组件,包括serving_client、serving-app,自动编译并安装带自定义OP的 serving_server 包,以及自动下载并解压推理模型
|
||||||
```bash
|
```bash
|
||||||
|
# 安装必要依赖包
|
||||||
|
python3.7 -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
|
python3.7 -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
|
|
||||||
|
# 安装编译自定义OP的serving-server包
|
||||||
|
pushd ./deploy/paddleserving
|
||||||
|
source build_server.sh python3.7
|
||||||
|
popd
|
||||||
|
|
||||||
|
# 测试PP-ShiTu识别模型时需安装faiss包
|
||||||
|
python3.7-m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
|
|
||||||
|
# 下载模型与数据
|
||||||
bash test_tipc/prepare.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt serving_infer
|
bash test_tipc/prepare.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt serving_infer
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -60,14 +80,14 @@ Linux GPU/CPU C++ 服务化部署测试的主程序为`test_serving_infer_cpp.sh
|
||||||
测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。
|
测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash test_tipc/test_serving_infer_cpp.sh ${your_params_file}
|
bash test_tipc/test_serving_infer_cpp.sh ${your_params_file} ${mode}
|
||||||
```
|
```
|
||||||
|
|
||||||
以`PPLCNet_x1_0`的`Linux GPU/CPU C++ 服务化部署测试`为例,命令如下所示。
|
以`PPLCNet_x1_0`的`Linux GPU/CPU C++ 服务化部署测试`为例,命令如下所示。
|
||||||
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash test_tipc/test_serving_infer_cpp.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
|
bash test_tipc/test_serving_infer_cpp.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt serving_infer
|
||||||
```
|
```
|
||||||
|
|
||||||
输出结果如下,表示命令运行成功。
|
输出结果如下,表示命令运行成功。
|
||||||
|
|
|
@ -38,20 +38,30 @@ Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer_pyt
|
||||||
|
|
||||||
|
|
||||||
- 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。
|
- 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。
|
||||||
```shell
|
```shell
|
||||||
# 需要安装2.2及以上版本的Paddle
|
# 需要安装2.2及以上版本的Paddle
|
||||||
# 安装GPU版本的Paddle
|
# 安装GPU版本的Paddle
|
||||||
python3.7 -m pip install paddlepaddle-gpu==2.2.0
|
python3.7 -m pip install paddlepaddle-gpu==2.2.0
|
||||||
# 安装CPU版本的Paddle
|
# 安装CPU版本的Paddle
|
||||||
python3.7 -m pip install paddlepaddle==2.2.0
|
python3.7 -m pip install paddlepaddle==2.2.0
|
||||||
```
|
```
|
||||||
|
|
||||||
- 安装依赖
|
- 安装依赖
|
||||||
```shell
|
```shell
|
||||||
python3.7 -m pip install -r requirements.txt
|
python3.7 -m pip install -r requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
- 安装 PaddleServing 相关组件,包括serving-server、serving_client、serving-app,自动下载并解压推理模型
|
- 安装 PaddleServing 相关组件,包括serving-server、serving_client、serving-app,自动下载并解压推理模型
|
||||||
```bash
|
```bash
|
||||||
|
# 安装必要依赖包
|
||||||
|
python3.7 -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
|
python3.7 -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
|
python3.7 -m pip install install paddle-serving-server-gpu==0.9.0.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
|
|
||||||
|
# 测试PP-ShiTu识别模型时需安装faiss包
|
||||||
|
python3.7-m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
|
|
||||||
|
# 下载模型与数据
|
||||||
bash test_tipc/prepare.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt serving_infer
|
bash test_tipc/prepare.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt serving_infer
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -60,14 +70,14 @@ Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer_pyt
|
||||||
测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。
|
测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash test_tipc/test_serving_infer_python.sh ${your_params_file}
|
bash test_tipc/test_serving_infer_python.sh ${your_params_file} ${mode}
|
||||||
```
|
```
|
||||||
|
|
||||||
以`ResNet50`的`Linux GPU/CPU PYTHON 服务化部署测试`为例,命令如下所示。
|
以`ResNet50`的`Linux GPU/CPU PYTHON 服务化部署测试`为例,命令如下所示。
|
||||||
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash test_tipc/test_serving_infer_python.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
|
bash test_tipc/test_serving_infer_python.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt serving_infer
|
||||||
```
|
```
|
||||||
|
|
||||||
输出结果如下,表示命令运行成功。
|
输出结果如下,表示命令运行成功。
|
||||||
|
|
|
@ -200,18 +200,7 @@ fi
|
||||||
if [[ ${MODE} = "serving_infer" ]]; then
|
if [[ ${MODE} = "serving_infer" ]]; then
|
||||||
# prepare serving env
|
# prepare serving env
|
||||||
python_name=$(func_parser_value "${lines[2]}")
|
python_name=$(func_parser_value "${lines[2]}")
|
||||||
${python_name} -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
|
||||||
${python_name} -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
|
||||||
python_name=$(func_parser_value "${lines[2]}")
|
|
||||||
if [[ ${FILENAME} =~ "cpp" ]]; then
|
|
||||||
pushd ./deploy/paddleserving
|
|
||||||
source build_server.sh ${python_name}
|
|
||||||
popd
|
|
||||||
else
|
|
||||||
${python_name} -m pip install install paddle-serving-server-gpu==0.9.0.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
|
||||||
fi
|
|
||||||
if [[ ${model_name} =~ "ShiTu" ]]; then
|
if [[ ${model_name} =~ "ShiTu" ]]; then
|
||||||
${python_name} -m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
|
||||||
cls_inference_model_url=$(func_parser_value "${lines[3]}")
|
cls_inference_model_url=$(func_parser_value "${lines[3]}")
|
||||||
cls_tar_name=$(func_get_url_file_name "${cls_inference_model_url}")
|
cls_tar_name=$(func_get_url_file_name "${cls_inference_model_url}")
|
||||||
det_inference_model_url=$(func_parser_value "${lines[4]}")
|
det_inference_model_url=$(func_parser_value "${lines[4]}")
|
||||||
|
|
|
@ -104,6 +104,8 @@ function func_serving_cls(){
|
||||||
if [[ ${use_gpu} = "null" ]]; then
|
if [[ ${use_gpu} = "null" ]]; then
|
||||||
web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 &"
|
web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 &"
|
||||||
eval ${web_service_cpp_cmd}
|
eval ${web_service_cpp_cmd}
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
|
||||||
sleep 5s
|
sleep 5s
|
||||||
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_batchsize_1.log"
|
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_batchsize_1.log"
|
||||||
pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 "
|
pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 "
|
||||||
|
@ -116,6 +118,8 @@ function func_serving_cls(){
|
||||||
else
|
else
|
||||||
web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 --gpu_id=${use_gpu} &"
|
web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 --gpu_id=${use_gpu} &"
|
||||||
eval ${web_service_cpp_cmd}
|
eval ${web_service_cpp_cmd}
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
|
||||||
sleep 8s
|
sleep 8s
|
||||||
|
|
||||||
_save_log_path="${LOG_PATH}/server_infer_cpp_gpu_pipeline_batchsize_1.log"
|
_save_log_path="${LOG_PATH}/server_infer_cpp_gpu_pipeline_batchsize_1.log"
|
||||||
|
@ -207,12 +211,14 @@ function func_serving_rec(){
|
||||||
unset https_proxy
|
unset https_proxy
|
||||||
unset http_proxy
|
unset http_proxy
|
||||||
|
|
||||||
export SERVING_BIN=${PWD}/../Serving/server-build-gpu-opencv/core/general-server/serving
|
# export SERVING_BIN=${PWD}/../Serving/server-build-gpu-opencv/core/general-server/serving
|
||||||
for use_gpu in ${web_use_gpu_list[*]}; do
|
for use_gpu in ${web_use_gpu_list[*]}; do
|
||||||
if [ ${use_gpu} = "null" ]; then
|
if [ ${use_gpu} = "null" ]; then
|
||||||
det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
|
det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
|
||||||
web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &"
|
web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &"
|
||||||
eval ${web_service_cpp_cmd}
|
eval ${web_service_cpp_cmd}
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
|
||||||
sleep 5s
|
sleep 5s
|
||||||
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_batchsize_1.log"
|
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_batchsize_1.log"
|
||||||
pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
|
pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
|
||||||
|
@ -226,6 +232,8 @@ function func_serving_rec(){
|
||||||
det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
|
det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
|
||||||
web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=${use_gpu} &"
|
web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=${use_gpu} &"
|
||||||
eval ${web_service_cpp_cmd}
|
eval ${web_service_cpp_cmd}
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
|
||||||
sleep 5s
|
sleep 5s
|
||||||
_save_log_path="${LOG_PATH}/server_infer_cpp_gpu_batchsize_1.log"
|
_save_log_path="${LOG_PATH}/server_infer_cpp_gpu_batchsize_1.log"
|
||||||
pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
|
pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
|
||||||
|
@ -241,9 +249,9 @@ function func_serving_rec(){
|
||||||
|
|
||||||
|
|
||||||
# set cuda device
|
# set cuda device
|
||||||
GPUID=$2
|
GPUID=$3
|
||||||
if [ ${#GPUID} -le 0 ];then
|
if [ ${#GPUID} -le 0 ];then
|
||||||
env=" "
|
env="export CUDA_VISIBLE_DEVICES=0"
|
||||||
else
|
else
|
||||||
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
|
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -98,6 +98,8 @@ function func_serving_cls(){
|
||||||
|
|
||||||
web_service_cmd="${python_} ${web_service_py} &"
|
web_service_cmd="${python_} ${web_service_py} &"
|
||||||
eval ${web_service_cmd}
|
eval ${web_service_cmd}
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
|
||||||
sleep 5s
|
sleep 5s
|
||||||
for pipeline in ${pipeline_py[*]}; do
|
for pipeline in ${pipeline_py[*]}; do
|
||||||
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
|
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
|
||||||
|
@ -130,6 +132,8 @@ function func_serving_cls(){
|
||||||
|
|
||||||
web_service_cmd="${python_} ${web_service_py} & "
|
web_service_cmd="${python_} ${web_service_py} & "
|
||||||
eval ${web_service_cmd}
|
eval ${web_service_cmd}
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
|
||||||
sleep 5s
|
sleep 5s
|
||||||
for pipeline in ${pipeline_py[*]}; do
|
for pipeline in ${pipeline_py[*]}; do
|
||||||
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
|
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
|
||||||
|
@ -237,6 +241,8 @@ function func_serving_rec(){
|
||||||
|
|
||||||
web_service_cmd="${python} ${web_service_py} &"
|
web_service_cmd="${python} ${web_service_py} &"
|
||||||
eval ${web_service_cmd}
|
eval ${web_service_cmd}
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
|
||||||
sleep 5s
|
sleep 5s
|
||||||
for pipeline in ${pipeline_py[*]}; do
|
for pipeline in ${pipeline_py[*]}; do
|
||||||
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
|
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
|
||||||
|
@ -269,6 +275,8 @@ function func_serving_rec(){
|
||||||
|
|
||||||
web_service_cmd="${python} ${web_service_py} & "
|
web_service_cmd="${python} ${web_service_py} & "
|
||||||
eval ${web_service_cmd}
|
eval ${web_service_cmd}
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
|
||||||
sleep 10s
|
sleep 10s
|
||||||
for pipeline in ${pipeline_py[*]}; do
|
for pipeline in ${pipeline_py[*]}; do
|
||||||
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
|
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
|
||||||
|
@ -288,9 +296,9 @@ function func_serving_rec(){
|
||||||
|
|
||||||
|
|
||||||
# set cuda device
|
# set cuda device
|
||||||
GPUID=$2
|
GPUID=$3
|
||||||
if [ ${#GPUID} -le 0 ];then
|
if [ ${#GPUID} -le 0 ];then
|
||||||
env=" "
|
env="export CUDA_VISIBLE_DEVICES=0"
|
||||||
else
|
else
|
||||||
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
|
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
|
||||||
fi
|
fi
|
||||||
|
|
Loading…
Reference in New Issue