Merge pull request #1984 from HydrogenSulfate/tipc_pyserv
Add python serving infer chainpull/1993/head
commit
11dba15154
|
@ -30,4 +30,4 @@ op:
|
||||||
client_type: local_predictor
|
client_type: local_predictor
|
||||||
|
|
||||||
#Fetch结果列表,以client_config中fetch_var的alias_name为准
|
#Fetch结果列表,以client_config中fetch_var的alias_name为准
|
||||||
fetch_list: ["prediction"]
|
fetch_list: ["prediction"]
|
||||||
|
|
|
@ -31,7 +31,7 @@ op:
|
||||||
|
|
||||||
#Fetch结果列表,以client_config中fetch_var的alias_name为准
|
#Fetch结果列表,以client_config中fetch_var的alias_name为准
|
||||||
fetch_list: ["features"]
|
fetch_list: ["features"]
|
||||||
|
|
||||||
det:
|
det:
|
||||||
concurrency: 1
|
concurrency: 1
|
||||||
local_service_conf:
|
local_service_conf:
|
||||||
|
|
|
@ -103,10 +103,11 @@ bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/MobileNetV3/Mo
|
||||||
|
|
||||||
## 4 开始测试
|
## 4 开始测试
|
||||||
|
|
||||||
各功能测试中涉及混合精度、裁剪、量化等训练相关,及mkldnn、Tensorrt等多种预测相关参数配置,请点击下方相应链接了解更多细节和使用教程:
|
各功能测试中涉及混合精度、裁剪、量化等训练相关,及mkldnn、Tensorrt等多种预测相关参数配置,请点击下方相应链接了解更多细节和使用教程:
|
||||||
|
|
||||||
- [test_train_inference_python 使用](docs/test_train_inference_python.md):测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。
|
- [test_train_inference_python 使用](docs/test_train_inference_python.md):测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。
|
||||||
- [test_inference_cpp 使用](docs/test_inference_cpp.md) :测试基于C++的模型推理。
|
- [test_inference_cpp 使用](docs/test_inference_cpp.md) :测试基于C++的模型推理。
|
||||||
- [test_serving 使用](docs/test_serving.md) :测试基于Paddle Serving的服务化部署功能。
|
- [test_serving 使用](docs/test_serving.md) :测试基于Paddle Serving的服务化部署功能。
|
||||||
- [test_lite_arm_cpu_cpp 使用](docs/test_lite_arm_cpu_cpp.md): 测试基于Paddle-Lite的ARM CPU端c++预测部署功能.
|
- [test_lite_arm_cpu_cpp 使用](docs/test_lite_arm_cpu_cpp.md): 测试基于Paddle-Lite的ARM CPU端c++预测部署功能.
|
||||||
- [test_paddle2onnx 使用](docs/test_paddle2onnx.md):测试Paddle2ONNX的模型转化功能,并验证正确性。
|
- [test_paddle2onnx 使用](docs/test_paddle2onnx.md):测试Paddle2ONNX的模型转化功能,并验证正确性。
|
||||||
|
- [test_serving_infer_python 使用](docs/test_serving_infer_python.md):测试python serving功能。
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:MobileNetV3_large_x1_0
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/MobileNetV3_large_x1_0_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/MobileNetV3_large_x1_0_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/MobileNetV3_large_x1_0_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,18 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPShiTu
|
||||||
|
python:python3.7
|
||||||
|
cls_inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar
|
||||||
|
det_inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./models/general_PPLCNet_x2_5_lite_v1.0_infer/
|
||||||
|
--dirname:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./models/general_PPLCNet_x2_5_lite_v1.0_serving/
|
||||||
|
--serving_client:./models/general_PPLCNet_x2_5_lite_v1.0_client/
|
||||||
|
--serving_server:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/
|
||||||
|
--serving_client:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/
|
||||||
|
serving_dir:./paddleserving/recognition
|
||||||
|
web_service:recognition_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPHGNet_small
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/PPHGNet_small_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/PPHGNet_small_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/PPHGNet_small_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPHGNet_tiny
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/PPHGNet_tiny_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/PPHGNet_tiny_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/PPHGNet_tiny_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPLCNet_x0_25
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/PPLCNet_x0_25_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/PPLCNet_x0_25_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/PPLCNet_x0_25_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPLCNet_x0_35
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/PPLCNet_x0_35_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/PPLCNet_x0_35_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/PPLCNet_x0_35_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPLCNet_x0_5
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/PPLCNet_x0_5_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/PPLCNet_x0_5_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/PPLCNet_x0_5_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPLCNet_x0_75
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/PPLCNet_x0_75_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/PPLCNet_x0_75_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/PPLCNet_x0_75_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPLCNet_x1_0
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/PPLCNet_x1_0_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/PPLCNet_x1_0_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/PPLCNet_x1_0_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPLCNet_x1_5
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/PPLCNet_x1_5_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/PPLCNet_x1_5_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/PPLCNet_x1_5_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPLCNet_x2_0
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/PPLCNet_x2_0_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/PPLCNet_x2_0_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/PPLCNet_x2_0_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPLCNet_x2_5
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/PPLCNet_x2_5_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/PPLCNet_x2_5_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/PPLCNet_x2_5_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:PPLCNetV2_base
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/PPLCNetV2_base_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/PPLCNetV2_base_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/PPLCNetV2_base_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:ResNet50
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/ResNet50_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/ResNet50_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/ResNet50_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:ResNet50_vd
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/ResNet50_vd_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/ResNet50_vd_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/ResNet50_vd_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,14 @@
|
||||||
|
===========================serving_params===========================
|
||||||
|
model_name:SwinTransformer_tiny_patch4_window7_224
|
||||||
|
python:python3.7
|
||||||
|
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SwinTransformer_tiny_patch4_window7_224_infer.tar
|
||||||
|
trans_model:-m paddle_serving_client.convert
|
||||||
|
--dirname:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_infer/
|
||||||
|
--model_filename:inference.pdmodel
|
||||||
|
--params_filename:inference.pdiparams
|
||||||
|
--serving_server:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_serving/
|
||||||
|
--serving_client:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_client/
|
||||||
|
serving_dir:./deploy/paddleserving
|
||||||
|
web_service:classification_web_service.py
|
||||||
|
--use_gpu:0|null
|
||||||
|
pipline:pipeline_http_client.py
|
|
@ -0,0 +1,87 @@
|
||||||
|
# Linux GPU/CPU PYTHON 服务化部署测试
|
||||||
|
|
||||||
|
Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer.sh`,可以测试基于Python的模型服务化部署功能。
|
||||||
|
|
||||||
|
|
||||||
|
## 1. 测试结论汇总
|
||||||
|
|
||||||
|
- 推理相关:
|
||||||
|
|
||||||
|
| 算法名称 | 模型名称 | device_CPU | device_GPU |
|
||||||
|
| :----: | :----: | :----: | :----: |
|
||||||
|
| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 |
|
||||||
|
| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 |
|
||||||
|
| PPHGNet | PPHGNet_small | 支持 | 支持 |
|
||||||
|
| PPHGNet | PPHGNet_tiny | 支持 | 支持 |
|
||||||
|
| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 |
|
||||||
|
| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 |
|
||||||
|
| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 |
|
||||||
|
| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 |
|
||||||
|
| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 |
|
||||||
|
| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 |
|
||||||
|
| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 |
|
||||||
|
| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 |
|
||||||
|
| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 |
|
||||||
|
| ResNet | ResNet50 | 支持 | 支持 |
|
||||||
|
| ResNet | ResNet50_vd | 支持 | 支持 |
|
||||||
|
| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 |
|
||||||
|
|
||||||
|
|
||||||
|
## 2. 测试流程
|
||||||
|
|
||||||
|
### 2.1 准备数据
|
||||||
|
|
||||||
|
分类模型默认使用`./deploy/paddleserving/daisy.jpg`作为测试输入图片,无需下载
|
||||||
|
识别模型默认使用`drink_dataset_v1.0/test_images/001.jpeg`作为测试输入图片,在**2.2 准备环境**中会下载好。
|
||||||
|
|
||||||
|
### 2.2 准备环境
|
||||||
|
|
||||||
|
|
||||||
|
- 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。
|
||||||
|
```shell
|
||||||
|
# 需要安装2.2及以上版本的Paddle
|
||||||
|
# 安装GPU版本的Paddle
|
||||||
|
python3.7 -m pip install paddlepaddle-gpu==2.2.0
|
||||||
|
# 安装CPU版本的Paddle
|
||||||
|
python3.7 -m pip install paddlepaddle==2.2.0
|
||||||
|
```
|
||||||
|
|
||||||
|
- 安装依赖
|
||||||
|
```shell
|
||||||
|
python3.7 -m pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
- 安装 PaddleServing 相关组件,包括serving-server、serving_client、serving-app,自动下载并解压推理模型
|
||||||
|
```bash
|
||||||
|
bash test_tipc/prepare.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt serving_infer
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2.3 功能测试
|
||||||
|
|
||||||
|
测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
bash test_tipc/test_serving_infer_python.sh ${your_params_file} lite_train_lite_infer
|
||||||
|
```
|
||||||
|
|
||||||
|
以`ResNet50`的`Linux GPU/CPU PYTHON 服务化部署测试`为例,命令如下所示。
|
||||||
|
|
||||||
|
|
||||||
|
```bash
|
||||||
|
bash test_tipc/test_serving_infer_python.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt serving_infer
|
||||||
|
```
|
||||||
|
|
||||||
|
输出结果如下,表示命令运行成功。
|
||||||
|
|
||||||
|
```
|
||||||
|
Run successfully with command - python3.7 pipeline_http_client.py > ../../test_tipc/output/ResNet50/server_infer_gpu_pipeline_http_batchsize_1.log 2>&1!
|
||||||
|
Run successfully with command - python3.7 pipeline_http_client.py > ../../test_tipc/output/ResNet50/server_infer_cpu_pipeline_http_batchsize_1.log 2>&1 !
|
||||||
|
```
|
||||||
|
|
||||||
|
预测结果会自动保存在 `./test_tipc/output/ResNet50/server_infer_gpu_pipeline_http_batchsize_1.log` ,可以看到 PaddleServing 的运行结果:
|
||||||
|
|
||||||
|
```
|
||||||
|
{'err_no': 0, 'err_msg': '', 'key': ['label', 'prob'], 'value': ["['daisy']", '[0.998314619064331]']}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。
|
|
@ -162,13 +162,29 @@ fi
|
||||||
if [ ${MODE} = "serving_infer" ];then
|
if [ ${MODE} = "serving_infer" ];then
|
||||||
# prepare serving env
|
# prepare serving env
|
||||||
python_name=$(func_parser_value "${lines[2]}")
|
python_name=$(func_parser_value "${lines[2]}")
|
||||||
${python_name} -m pip install install paddle-serving-server-gpu==0.6.1.post101
|
${python_name} -m pip install install paddle-serving-server-gpu==0.7.0.post102
|
||||||
${python_name} -m pip install paddle_serving_client==0.6.1
|
${python_name} -m pip install paddle_serving_client==0.7.0
|
||||||
${python_name} -m pip install paddle-serving-app==0.6.1
|
${python_name} -m pip install paddle-serving-app==0.7.0
|
||||||
|
if [[ ${model_name} =~ "ShiTu" ]]; then
|
||||||
|
cls_inference_model_url=$(func_parser_value "${lines[3]}")
|
||||||
|
cls_tar_name=$(func_get_url_file_name "${cls_inference_model_url}")
|
||||||
|
det_inference_model_url=$(func_parser_value "${lines[4]}")
|
||||||
|
det_tar_name=$(func_get_url_file_name "${det_inference_model_url}")
|
||||||
|
cd ./deploy
|
||||||
|
mkdir models
|
||||||
|
cd models
|
||||||
|
wget -nc ${cls_inference_model_url} && tar xf ${cls_tar_name}
|
||||||
|
wget -nc ${det_inference_model_url} && tar xf ${det_tar_name}
|
||||||
|
cd ..
|
||||||
|
else
|
||||||
|
cls_inference_model_url=$(func_parser_value "${lines[3]}")
|
||||||
|
cls_tar_name=$(func_get_url_file_name "${cls_inference_model_url}")
|
||||||
|
cd ./deploy/paddleserving
|
||||||
|
wget -nc ${cls_inference_model_url} && tar xf ${cls_tar_name}
|
||||||
|
cd ../../
|
||||||
|
fi
|
||||||
unset http_proxy
|
unset http_proxy
|
||||||
unset https_proxy
|
unset https_proxy
|
||||||
cd ./deploy/paddleserving
|
|
||||||
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ${MODE} = "paddle2onnx_infer" ];then
|
if [ ${MODE} = "paddle2onnx_infer" ];then
|
||||||
|
|
|
@ -1,168 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
source test_tipc/common_func.sh
|
|
||||||
|
|
||||||
FILENAME=$1
|
|
||||||
dataline=$(awk 'NR==1, NR==18{print}' $FILENAME)
|
|
||||||
|
|
||||||
# parser params
|
|
||||||
IFS=$'\n'
|
|
||||||
lines=(${dataline})
|
|
||||||
|
|
||||||
# parser serving
|
|
||||||
model_name=$(func_parser_value "${lines[1]}")
|
|
||||||
python=$(func_parser_value "${lines[2]}")
|
|
||||||
trans_model_py=$(func_parser_value "${lines[3]}")
|
|
||||||
infer_model_dir_key=$(func_parser_key "${lines[4]}")
|
|
||||||
infer_model_dir_value=$(func_parser_value "${lines[4]}")
|
|
||||||
model_filename_key=$(func_parser_key "${lines[5]}")
|
|
||||||
model_filename_value=$(func_parser_value "${lines[5]}")
|
|
||||||
params_filename_key=$(func_parser_key "${lines[6]}")
|
|
||||||
params_filename_value=$(func_parser_value "${lines[6]}")
|
|
||||||
serving_server_key=$(func_parser_key "${lines[7]}")
|
|
||||||
serving_server_value=$(func_parser_value "${lines[7]}")
|
|
||||||
serving_client_key=$(func_parser_key "${lines[8]}")
|
|
||||||
serving_client_value=$(func_parser_value "${lines[8]}")
|
|
||||||
serving_dir_value=$(func_parser_value "${lines[9]}")
|
|
||||||
web_service_py=$(func_parser_value "${lines[10]}")
|
|
||||||
web_use_gpu_key=$(func_parser_key "${lines[11]}")
|
|
||||||
web_use_gpu_list=$(func_parser_value "${lines[11]}")
|
|
||||||
web_use_mkldnn_key=$(func_parser_key "${lines[12]}")
|
|
||||||
web_use_mkldnn_list=$(func_parser_value "${lines[12]}")
|
|
||||||
web_cpu_threads_key=$(func_parser_key "${lines[13]}")
|
|
||||||
web_cpu_threads_list=$(func_parser_value "${lines[13]}")
|
|
||||||
web_use_trt_key=$(func_parser_key "${lines[14]}")
|
|
||||||
web_use_trt_list=$(func_parser_value "${lines[14]}")
|
|
||||||
web_precision_key=$(func_parser_key "${lines[15]}")
|
|
||||||
web_precision_list=$(func_parser_value "${lines[15]}")
|
|
||||||
pipeline_py=$(func_parser_value "${lines[16]}")
|
|
||||||
image_dir_key=$(func_parser_key "${lines[17]}")
|
|
||||||
image_dir_value=$(func_parser_value "${lines[17]}")
|
|
||||||
|
|
||||||
LOG_PATH="../../test_tipc/output"
|
|
||||||
mkdir -p ./test_tipc/output
|
|
||||||
status_log="${LOG_PATH}/results_serving.log"
|
|
||||||
|
|
||||||
function func_serving(){
|
|
||||||
IFS='|'
|
|
||||||
_python=$1
|
|
||||||
_script=$2
|
|
||||||
_model_dir=$3
|
|
||||||
# pdserving
|
|
||||||
set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
|
|
||||||
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
|
|
||||||
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
|
|
||||||
set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
|
|
||||||
set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
|
|
||||||
set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}")
|
|
||||||
trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
|
|
||||||
eval $trans_model_cmd
|
|
||||||
cd ${serving_dir_value}
|
|
||||||
echo $PWD
|
|
||||||
unset https_proxy
|
|
||||||
unset http_proxy
|
|
||||||
for python in ${python[*]}; do
|
|
||||||
if [ ${python} = "cpp"]; then
|
|
||||||
for use_gpu in ${web_use_gpu_list[*]}; do
|
|
||||||
if [ ${use_gpu} = "null" ]; then
|
|
||||||
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
|
|
||||||
eval $web_service_cmd
|
|
||||||
sleep 2s
|
|
||||||
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
|
|
||||||
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
|
|
||||||
eval $pipeline_cmd
|
|
||||||
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
|
||||||
sleep 2s
|
|
||||||
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
|
||||||
else
|
|
||||||
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0"
|
|
||||||
eval $web_service_cmd
|
|
||||||
sleep 2s
|
|
||||||
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
|
|
||||||
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
|
|
||||||
eval $pipeline_cmd
|
|
||||||
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
|
||||||
sleep 2s
|
|
||||||
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
else
|
|
||||||
# python serving
|
|
||||||
for use_gpu in ${web_use_gpu_list[*]}; do
|
|
||||||
echo ${ues_gpu}
|
|
||||||
if [ ${use_gpu} = "null" ]; then
|
|
||||||
for use_mkldnn in ${web_use_mkldnn_list[*]}; do
|
|
||||||
if [ ${use_mkldnn} = "False" ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
for threads in ${web_cpu_threads_list[*]}; do
|
|
||||||
set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}")
|
|
||||||
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &"
|
|
||||||
eval $web_service_cmd
|
|
||||||
sleep 2s
|
|
||||||
for pipeline in ${pipeline_py[*]}; do
|
|
||||||
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log"
|
|
||||||
pipeline_cmd="${python} ${pipeline} ${set_image_dir} > ${_save_log_path} 2>&1 "
|
|
||||||
eval $pipeline_cmd
|
|
||||||
last_status=${PIPESTATUS[0]}
|
|
||||||
eval "cat ${_save_log_path}"
|
|
||||||
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
|
||||||
sleep 2s
|
|
||||||
done
|
|
||||||
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
|
||||||
done
|
|
||||||
done
|
|
||||||
elif [ ${use_gpu} = "0" ]; then
|
|
||||||
for use_trt in ${web_use_trt_list[*]}; do
|
|
||||||
for precision in ${web_precision_list[*]}; do
|
|
||||||
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}")
|
|
||||||
set_precision=$(func_set_params "${web_precision_key}" "${precision}")
|
|
||||||
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & "
|
|
||||||
eval $web_service_cmd
|
|
||||||
|
|
||||||
sleep 2s
|
|
||||||
for pipeline in ${pipeline_py[*]}; do
|
|
||||||
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_usetrt_${use_trt}_precision_${precision}_batchsize_1.log"
|
|
||||||
pipeline_cmd="${python} ${pipeline} ${set_image_dir}> ${_save_log_path} 2>&1"
|
|
||||||
eval $pipeline_cmd
|
|
||||||
last_status=${PIPESTATUS[0]}
|
|
||||||
eval "cat ${_save_log_path}"
|
|
||||||
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
|
||||||
sleep 2s
|
|
||||||
done
|
|
||||||
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
|
||||||
done
|
|
||||||
done
|
|
||||||
else
|
|
||||||
echo "Does not support hardware other than CPU and GPU Currently!"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# set cuda device
|
|
||||||
GPUID=$2
|
|
||||||
if [ ${#GPUID} -le 0 ];then
|
|
||||||
env=" "
|
|
||||||
else
|
|
||||||
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
|
|
||||||
fi
|
|
||||||
set CUDA_VISIBLE_DEVICES
|
|
||||||
eval $env
|
|
||||||
|
|
||||||
|
|
||||||
echo "################### run test ###################"
|
|
||||||
|
|
||||||
export Count=0
|
|
||||||
IFS="|"
|
|
||||||
func_serving "${web_service_cmd}"
|
|
|
@ -0,0 +1,353 @@
|
||||||
|
#!/bin/bash
|
||||||
|
source test_tipc/common_func.sh
|
||||||
|
|
||||||
|
FILENAME=$1
|
||||||
|
dataline=$(awk 'NR==1, NR==19{print}' $FILENAME)
|
||||||
|
|
||||||
|
# parser params
|
||||||
|
IFS=$'\n'
|
||||||
|
lines=(${dataline})
|
||||||
|
|
||||||
|
function func_get_url_file_name(){
|
||||||
|
strs=$1
|
||||||
|
IFS="/"
|
||||||
|
array=(${strs})
|
||||||
|
tmp=${array[${#array[@]}-1]}
|
||||||
|
echo ${tmp}
|
||||||
|
}
|
||||||
|
|
||||||
|
# parser serving
|
||||||
|
model_name=$(func_parser_value "${lines[1]}")
|
||||||
|
python=$(func_parser_value "${lines[2]}")
|
||||||
|
trans_model_py=$(func_parser_value "${lines[4]}")
|
||||||
|
infer_model_dir_key=$(func_parser_key "${lines[5]}")
|
||||||
|
infer_model_dir_value=$(func_parser_value "${lines[5]}")
|
||||||
|
model_filename_key=$(func_parser_key "${lines[6]}")
|
||||||
|
model_filename_value=$(func_parser_value "${lines[6]}")
|
||||||
|
params_filename_key=$(func_parser_key "${lines[7]}")
|
||||||
|
params_filename_value=$(func_parser_value "${lines[7]}")
|
||||||
|
serving_server_key=$(func_parser_key "${lines[8]}")
|
||||||
|
serving_server_value=$(func_parser_value "${lines[8]}")
|
||||||
|
serving_client_key=$(func_parser_key "${lines[9]}")
|
||||||
|
serving_client_value=$(func_parser_value "${lines[9]}")
|
||||||
|
serving_dir_value=$(func_parser_value "${lines[10]}")
|
||||||
|
web_service_py=$(func_parser_value "${lines[11]}")
|
||||||
|
web_use_gpu_key=$(func_parser_key "${lines[12]}")
|
||||||
|
web_use_gpu_list=$(func_parser_value "${lines[12]}")
|
||||||
|
pipeline_py=$(func_parser_value "${lines[13]}")
|
||||||
|
|
||||||
|
|
||||||
|
function func_serving_cls(){
|
||||||
|
LOG_PATH="../../test_tipc/output/${model_name}"
|
||||||
|
mkdir -p ${LOG_PATH}
|
||||||
|
status_log="${LOG_PATH}/results_serving.log"
|
||||||
|
IFS='|'
|
||||||
|
|
||||||
|
# pdserving
|
||||||
|
set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
|
||||||
|
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
|
||||||
|
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
|
||||||
|
set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
|
||||||
|
set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
|
||||||
|
|
||||||
|
trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
|
||||||
|
eval $trans_model_cmd
|
||||||
|
|
||||||
|
# modify the alias_name of fetch_var to "outputs"
|
||||||
|
server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_server_value}/serving_server_conf.prototxt"
|
||||||
|
eval ${server_fetch_var_line_cmd}
|
||||||
|
client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_client_value}/serving_client_conf.prototxt"
|
||||||
|
eval ${client_fetch_var_line_cmd}
|
||||||
|
|
||||||
|
prototxt_dataline=$(awk 'NR==1, NR==3{print}' ${serving_server_value}/serving_server_conf.prototxt)
|
||||||
|
IFS=$'\n'
|
||||||
|
prototxt_lines=(${prototxt_dataline})
|
||||||
|
feed_var_name=$(func_parser_value "${prototxt_lines[2]}")
|
||||||
|
IFS='|'
|
||||||
|
|
||||||
|
cd ${serving_dir_value}
|
||||||
|
unset https_proxy
|
||||||
|
unset http_proxy
|
||||||
|
|
||||||
|
# modify the input_name in "classification_web_service.py" to be consistent with feed_var.name in prototxt
|
||||||
|
set_web_service_feet_var_cmd="sed -i '/preprocess/,/input_imgs}/s/{.*: input_imgs}/{${feed_var_name}: input_imgs}/' ${web_service_py}"
|
||||||
|
eval ${set_web_service_feet_var_cmd}
|
||||||
|
|
||||||
|
model_config=21
|
||||||
|
serving_server_dir_name=$(func_get_url_file_name "$serving_server_value")
|
||||||
|
set_model_config_cmd="sed -i '${model_config}s/model_config: .*/model_config: ${serving_server_dir_name}/' config.yml"
|
||||||
|
eval ${set_model_config_cmd}
|
||||||
|
|
||||||
|
for python in ${python[*]}; do
|
||||||
|
if [[ ${python} = "cpp" ]]; then
|
||||||
|
for use_gpu in ${web_use_gpu_list[*]}; do
|
||||||
|
if [ ${use_gpu} = "null" ]; then
|
||||||
|
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
|
||||||
|
eval $web_service_cmd
|
||||||
|
sleep 5s
|
||||||
|
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
|
||||||
|
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
|
||||||
|
eval $pipeline_cmd
|
||||||
|
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
||||||
|
sleep 5s
|
||||||
|
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
||||||
|
else
|
||||||
|
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0"
|
||||||
|
eval $web_service_cmd
|
||||||
|
sleep 5s
|
||||||
|
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
|
||||||
|
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
|
||||||
|
eval $pipeline_cmd
|
||||||
|
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
||||||
|
sleep 5s
|
||||||
|
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
# python serving
|
||||||
|
for use_gpu in ${web_use_gpu_list[*]}; do
|
||||||
|
if [[ ${use_gpu} = "null" ]]; then
|
||||||
|
device_type_line=24
|
||||||
|
set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 0/' config.yml"
|
||||||
|
eval $set_device_type_cmd
|
||||||
|
|
||||||
|
devices_line=27
|
||||||
|
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml"
|
||||||
|
eval $set_devices_cmd
|
||||||
|
|
||||||
|
web_service_cmd="${python} ${web_service_py} &"
|
||||||
|
eval $web_service_cmd
|
||||||
|
sleep 5s
|
||||||
|
for pipeline in ${pipeline_py[*]}; do
|
||||||
|
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
|
||||||
|
pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1 "
|
||||||
|
eval $pipeline_cmd
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
eval "cat ${_save_log_path}"
|
||||||
|
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
||||||
|
sleep 5s
|
||||||
|
done
|
||||||
|
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
||||||
|
elif [ ${use_gpu} -eq 0 ]; then
|
||||||
|
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
device_type_line=24
|
||||||
|
set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 1/' config.yml"
|
||||||
|
eval $set_device_type_cmd
|
||||||
|
|
||||||
|
devices_line=27
|
||||||
|
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml"
|
||||||
|
eval $set_devices_cmd
|
||||||
|
|
||||||
|
web_service_cmd="${python} ${web_service_py} & "
|
||||||
|
eval $web_service_cmd
|
||||||
|
sleep 5s
|
||||||
|
for pipeline in ${pipeline_py[*]}; do
|
||||||
|
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
|
||||||
|
pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1"
|
||||||
|
eval $pipeline_cmd
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
eval "cat ${_save_log_path}"
|
||||||
|
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
||||||
|
sleep 5s
|
||||||
|
done
|
||||||
|
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
||||||
|
else
|
||||||
|
echo "Does not support hardware [${use_gpu}] other than CPU and GPU Currently!"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function func_serving_rec(){
|
||||||
|
LOG_PATH="../../../test_tipc/output/${model_name}"
|
||||||
|
mkdir -p ${LOG_PATH}
|
||||||
|
status_log="${LOG_PATH}/results_serving.log"
|
||||||
|
trans_model_py=$(func_parser_value "${lines[5]}")
|
||||||
|
cls_infer_model_dir_key=$(func_parser_key "${lines[6]}")
|
||||||
|
cls_infer_model_dir_value=$(func_parser_value "${lines[6]}")
|
||||||
|
det_infer_model_dir_key=$(func_parser_key "${lines[7]}")
|
||||||
|
det_infer_model_dir_value=$(func_parser_value "${lines[7]}")
|
||||||
|
model_filename_key=$(func_parser_key "${lines[8]}")
|
||||||
|
model_filename_value=$(func_parser_value "${lines[8]}")
|
||||||
|
params_filename_key=$(func_parser_key "${lines[9]}")
|
||||||
|
params_filename_value=$(func_parser_value "${lines[9]}")
|
||||||
|
|
||||||
|
cls_serving_server_key=$(func_parser_key "${lines[10]}")
|
||||||
|
cls_serving_server_value=$(func_parser_value "${lines[10]}")
|
||||||
|
cls_serving_client_key=$(func_parser_key "${lines[11]}")
|
||||||
|
cls_serving_client_value=$(func_parser_value "${lines[11]}")
|
||||||
|
|
||||||
|
det_serving_server_key=$(func_parser_key "${lines[12]}")
|
||||||
|
det_serving_server_value=$(func_parser_value "${lines[12]}")
|
||||||
|
det_serving_client_key=$(func_parser_key "${lines[13]}")
|
||||||
|
det_serving_client_value=$(func_parser_value "${lines[13]}")
|
||||||
|
|
||||||
|
serving_dir_value=$(func_parser_value "${lines[14]}")
|
||||||
|
web_service_py=$(func_parser_value "${lines[15]}")
|
||||||
|
web_use_gpu_key=$(func_parser_key "${lines[16]}")
|
||||||
|
web_use_gpu_list=$(func_parser_value "${lines[16]}")
|
||||||
|
pipeline_py=$(func_parser_value "${lines[17]}")
|
||||||
|
|
||||||
|
IFS='|'
|
||||||
|
|
||||||
|
# pdserving
|
||||||
|
cd ./deploy
|
||||||
|
set_dirname=$(func_set_params "${cls_infer_model_dir_key}" "${cls_infer_model_dir_value}")
|
||||||
|
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
|
||||||
|
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
|
||||||
|
set_serving_server=$(func_set_params "${cls_serving_server_key}" "${cls_serving_server_value}")
|
||||||
|
set_serving_client=$(func_set_params "${cls_serving_client_key}" "${cls_serving_client_value}")
|
||||||
|
cls_trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
|
||||||
|
eval $cls_trans_model_cmd
|
||||||
|
|
||||||
|
set_dirname=$(func_set_params "${det_infer_model_dir_key}" "${det_infer_model_dir_value}")
|
||||||
|
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
|
||||||
|
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
|
||||||
|
set_serving_server=$(func_set_params "${det_serving_server_key}" "${det_serving_server_value}")
|
||||||
|
set_serving_client=$(func_set_params "${det_serving_client_key}" "${det_serving_client_value}")
|
||||||
|
det_trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
|
||||||
|
eval $det_trans_model_cmd
|
||||||
|
|
||||||
|
# modify the alias_name of fetch_var to "outputs"
|
||||||
|
server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_server_value/serving_server_conf.prototxt"
|
||||||
|
eval ${server_fetch_var_line_cmd}
|
||||||
|
client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_client_value/serving_client_conf.prototxt"
|
||||||
|
eval ${client_fetch_var_line_cmd}
|
||||||
|
|
||||||
|
prototxt_dataline=$(awk 'NR==1, NR==3{print}' ${cls_serving_server_value}/serving_server_conf.prototxt)
|
||||||
|
IFS=$'\n'
|
||||||
|
prototxt_lines=(${prototxt_dataline})
|
||||||
|
feed_var_name=$(func_parser_value "${prototxt_lines[2]}")
|
||||||
|
IFS='|'
|
||||||
|
|
||||||
|
cd ${serving_dir_value}
|
||||||
|
unset https_proxy
|
||||||
|
unset http_proxy
|
||||||
|
|
||||||
|
# modify the input_name in "recognition_web_service.py" to be consistent with feed_var.name in prototxt
|
||||||
|
set_web_service_feet_var_cmd="sed -i '/preprocess/,/input_imgs}/s/{.*: input_imgs}/{${feed_var_name}: input_imgs}/' ${web_service_py}"
|
||||||
|
eval ${set_web_service_feet_var_cmd}
|
||||||
|
|
||||||
|
for python in ${python[*]}; do
|
||||||
|
if [[ ${python} = "cpp" ]]; then
|
||||||
|
for use_gpu in ${web_use_gpu_list[*]}; do
|
||||||
|
if [ ${use_gpu} = "null" ]; then
|
||||||
|
web_service_cpp_cmd="${python} web_service_py"
|
||||||
|
|
||||||
|
eval $web_service_cmd
|
||||||
|
sleep 5s
|
||||||
|
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
|
||||||
|
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
|
||||||
|
eval $pipeline_cmd
|
||||||
|
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
||||||
|
sleep 5s
|
||||||
|
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
||||||
|
else
|
||||||
|
web_service_cpp_cmd="${python} web_service_py"
|
||||||
|
eval $web_service_cmd
|
||||||
|
sleep 5s
|
||||||
|
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
|
||||||
|
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
|
||||||
|
eval $pipeline_cmd
|
||||||
|
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
||||||
|
sleep 5s
|
||||||
|
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
# python serving
|
||||||
|
for use_gpu in ${web_use_gpu_list[*]}; do
|
||||||
|
if [[ ${use_gpu} = "null" ]]; then
|
||||||
|
device_type_line=24
|
||||||
|
set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 0/' config.yml"
|
||||||
|
eval $set_device_type_cmd
|
||||||
|
|
||||||
|
devices_line=27
|
||||||
|
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml"
|
||||||
|
eval $set_devices_cmd
|
||||||
|
|
||||||
|
web_service_cmd="${python} ${web_service_py} &"
|
||||||
|
eval $web_service_cmd
|
||||||
|
sleep 5s
|
||||||
|
for pipeline in ${pipeline_py[*]}; do
|
||||||
|
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
|
||||||
|
pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1 "
|
||||||
|
eval $pipeline_cmd
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
eval "cat ${_save_log_path}"
|
||||||
|
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
||||||
|
sleep 5s
|
||||||
|
done
|
||||||
|
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
||||||
|
elif [ ${use_gpu} -eq 0 ]; then
|
||||||
|
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
device_type_line=24
|
||||||
|
set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 1/' config.yml"
|
||||||
|
eval $set_device_type_cmd
|
||||||
|
|
||||||
|
devices_line=27
|
||||||
|
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml"
|
||||||
|
eval $set_devices_cmd
|
||||||
|
|
||||||
|
web_service_cmd="${python} ${web_service_py} & "
|
||||||
|
eval $web_service_cmd
|
||||||
|
sleep 10s
|
||||||
|
for pipeline in ${pipeline_py[*]}; do
|
||||||
|
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
|
||||||
|
pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1"
|
||||||
|
eval $pipeline_cmd
|
||||||
|
last_status=${PIPESTATUS[0]}
|
||||||
|
eval "cat ${_save_log_path}"
|
||||||
|
status_check $last_status "${pipeline_cmd}" "${status_log}"
|
||||||
|
sleep 10s
|
||||||
|
done
|
||||||
|
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
|
||||||
|
else
|
||||||
|
echo "Does not support hardware [${use_gpu}] other than CPU and GPU Currently!"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# set cuda device
|
||||||
|
GPUID=$2
|
||||||
|
if [ ${#GPUID} -le 0 ];then
|
||||||
|
env=" "
|
||||||
|
else
|
||||||
|
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
|
||||||
|
fi
|
||||||
|
set CUDA_VISIBLE_DEVICES
|
||||||
|
eval $env
|
||||||
|
|
||||||
|
|
||||||
|
echo "################### run test ###################"
|
||||||
|
|
||||||
|
export Count=0
|
||||||
|
IFS="|"
|
||||||
|
if [[ ${model_name} =~ "ShiTu" ]]; then
|
||||||
|
func_serving_rec
|
||||||
|
else
|
||||||
|
func_serving_cls
|
||||||
|
fi
|
Loading…
Reference in New Issue