Merge pull request #1964 from HydrogenSulfate/add_paddle2onnx_tipc
add paddle2onnx tipc chainpull/1970/head^2
commit
ff81fa0179
test_tipc
config
PPLCNet
|
@ -35,11 +35,14 @@
|
|||
│ ├── MobileNetV3 # MobileNetV3系列模型测试配置文件目录
|
||||
│ │ ├── MobileNetV3_large_x1_0_train_infer_python.txt #基础训练预测配置文件
|
||||
│ │ ├── MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
|
||||
│ │ └── MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件
|
||||
│ │ ├── MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
|
||||
│ │ └── MobileNetV3_large_x1_0_paddle2onnx_infer_python.txt #paddle2onnx推理测试配置文件
|
||||
│ └── ResNet # ResNet系列模型测试配置文件目录
|
||||
│ ├── ResNet50_vd_train_infer_python.txt #基础训练预测配置文件
|
||||
│ ├── ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
|
||||
│ └── ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件
|
||||
│ ├── ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
|
||||
│ ├── ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件
|
||||
│ └── ResNet50_vd_paddle2onnx_infer_python.txt #paddle2onnx推理测试配置文件
|
||||
| ......
|
||||
├── docs
|
||||
│ ├── guide.png
|
||||
|
@ -47,6 +50,7 @@
|
|||
├── prepare.sh # 完成test_*.sh运行所需要的数据和模型下载
|
||||
├── README.md # 使用文档
|
||||
├── results # 预先保存的预测结果,用于和实际预测结果进行精读比对
|
||||
├── test_paddle2onnx.sh # 测试paddle2onnx推理预测的主程序
|
||||
└── test_train_inference_python.sh # 测试python训练预测的主程序
|
||||
```
|
||||
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:MobileNetV3_large_x1_0
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/MobileNetV3_large_x1_0_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/MobileNetV3_large_x1_0_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/MobileNetV3_large_x1_0_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PP-ShiTu_general_rec
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/general_PPLCNet_x2_5_lite_v1.0_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PP-ShiTu_mainbody_det
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,15 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PPHGNet_small
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/PPHGNet_small_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/PPHGNet_small_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/PPHGNet_small_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,15 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PPHGNet_tiny
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/PPHGNet_tiny_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/PPHGNet_tiny_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/PPHGNet_tiny_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,15 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PPLCNet_x0_25
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/PPLCNet_x0_25_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/PPLCNet_x0_25_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/PPLCNet_x0_25_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PPLCNet_x0_25
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/PPLCNet_x0_25_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/PPLCNet_x0_25_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/PPLCNet_x0_25_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PP-ShiTu_mainbody_det
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PPLCNet_x0_75
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/PPLCNet_x0_75_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/PPLCNet_x0_75_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/PPLCNet_x0_75_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PPLCNet_x1_0
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/PPLCNet_x1_0_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/PPLCNet_x1_0_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/PPLCNet_x1_0_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PPLCNet_x1_5
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/PPLCNet_x1_5_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/PPLCNet_x1_5_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/PPLCNet_x1_5_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PPLCNet_x2_0
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/PPLCNet_x2_0_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/PPLCNet_x2_0_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/PPLCNet_x2_0_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PPLCNet_x2_5
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/PPLCNet_x2_5_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/PPLCNet_x2_5_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/PPLCNet_x2_5_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:PPLCNetV2_base
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/PPLCNetV2_base_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/PPLCNetV2_base_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/PPLCNetV2_base_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:ResNet50
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/ResNet50_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/ResNet50_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/ResNet50_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -8,7 +8,9 @@ python:python3.7
|
|||
--save_file:./deploy/models/ResNet50_vd_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar
|
||||
inference: python/predict_cls.py -c configs/inference_cls.yaml
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:models/ResNet50_vd_infer/
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
===========================paddle2onnx_params===========================
|
||||
model_name:SwinTransformer_tiny_patch4_window7_224
|
||||
python:python3.7
|
||||
2onnx: paddle2onnx
|
||||
--model_dir:./deploy/models/SwinTransformer_tiny_patch4_window7_224_infer/
|
||||
--model_filename:inference.pdmodel
|
||||
--params_filename:inference.pdiparams
|
||||
--save_file:./deploy/models/SwinTransformer_tiny_patch4_window7_224_infer/inference.onnx
|
||||
--opset_version:10
|
||||
--enable_onnx_checker:True
|
||||
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SwinTransformer_tiny_patch4_window7_224_infer.tar
|
||||
inference:./python/predict_cls.py
|
||||
Global.use_onnx:True
|
||||
Global.inference_model_dir:./models/SwinTransformer_tiny_patch4_window7_224_infer
|
||||
Global.use_gpu:False
|
||||
-c:configs/inference_cls.yaml
|
|
@ -0,0 +1,52 @@
|
|||
# Paddle2onnx预测功能测试
|
||||
|
||||
PaddleServing预测功能测试的主程序为`test_paddle2onnx.sh`,可以测试Paddle2ONNX的模型转化功能,并验证正确性。
|
||||
|
||||
## 1. 测试结论汇总
|
||||
|
||||
基于训练是否使用量化,进行本测试的模型可以分为`正常模型`和`量化模型`,这两类模型对应的Paddle2ONNX预测功能汇总如下:
|
||||
|
||||
| 模型类型 |device |
|
||||
| ---- | ---- |
|
||||
| 正常模型 | GPU |
|
||||
| 正常模型 | CPU |
|
||||
|
||||
|
||||
## 2. 测试流程
|
||||
|
||||
以下内容以`ResNet50`模型的paddle2onnx测试为例
|
||||
|
||||
### 2.1 功能测试
|
||||
先运行`prepare.sh`准备数据和模型,然后运行`test_paddle2onnx.sh`进行测试,最终在`test_tipc/output/ResNet50`目录下生成`paddle2onnx_infer_*.log`后缀的日志文件
|
||||
下方展示以PPHGNet_small为例的测试命令与结果。
|
||||
|
||||
```shell
|
||||
bash test_tipc/prepare.sh ./test_tipc/config/ResNet/ResNet50_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt paddle2onnx_infer
|
||||
|
||||
# 用法:
|
||||
bash test_tipc/test_paddle2onnx.sh ./test_tipc/config/ResNet/ResNet50_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
|
||||
```
|
||||
|
||||
#### 运行结果
|
||||
|
||||
各测试的运行情况会打印在 `./test_tipc/output/ResNet50/results_paddle2onnx.log` 中:
|
||||
运行成功时会输出:
|
||||
|
||||
```
|
||||
Run successfully with command - paddle2onnx --model_dir=./deploy/models/ResNet50_infer/ --model_filename=inference.pdmodel --params_filename=inference.pdiparams --save_file=./deploy/models/ResNet50_infer/inference.onnx --opset_version=10 --enable_onnx_checker=True!
|
||||
Run successfully with command - cd deploy && python3.7 ./python/predict_cls.py -o Global.inference_model_dir=./models/ResNet50_infer -o Global.use_onnx=True -o Global.use_gpu=False -c=configs/inference_cls.yaml > ../test_tipc/output/ResNet50/paddle2onnx_infer_cpu.log 2>&1 && cd ../!
|
||||
|
||||
```
|
||||
|
||||
运行失败时会输出:
|
||||
|
||||
```
|
||||
Run failed with command - paddle2onnx --model_dir=./deploy/models/ResNet50_infer/ --model_filename=inference.pdmodel --params_filename=inference.pdiparams --save_file=./deploy/models/ResNet50_infer/inference.onnx --opset_version=10 --enable_onnx_checker=True!
|
||||
Run failed with command - cd deploy && python3.7 ./python/predict_cls.py -o Global.inference_model_dir=./models/ResNet50_infer -o Global.use_onnx=True -o Global.use_gpu=False -c=configs/inference_cls.yaml > ../test_tipc/output/ResNet50/paddle2onnx_infer_cpu.log 2>&1 && cd ../!
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
## 3. 更多教程
|
||||
|
||||
本文档为功能测试用,更详细的Paddle2onnx预测使用教程请参考:[Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX)
|
|
@ -174,13 +174,19 @@ fi
|
|||
if [ ${MODE} = "paddle2onnx_infer" ];then
|
||||
# prepare paddle2onnx env
|
||||
python_name=$(func_parser_value "${lines[2]}")
|
||||
inference_model_url=$(func_parser_value "${lines[10]}")
|
||||
tar_name=${inference_model_url##*/}
|
||||
|
||||
${python_name} -m pip install install paddle2onnx
|
||||
${python_name} -m pip install onnxruntime
|
||||
|
||||
# wget model
|
||||
cd deploy && mkdir models && cd models
|
||||
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar
|
||||
cd deploy
|
||||
mkdir models
|
||||
cd models
|
||||
wget -nc ${inference_model_url}
|
||||
tar xf ${tar_name}
|
||||
cd ../../
|
||||
|
||||
|
||||
fi
|
||||
|
||||
if [ ${MODE} = "benchmark_train" ];then
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/bin/bash
|
||||
source test_tipc/common_func.sh
|
||||
source test_tipc/common_func.sh
|
||||
|
||||
FILENAME=$1
|
||||
|
||||
|
@ -11,7 +11,7 @@ python=$(func_parser_value "${lines[2]}")
|
|||
|
||||
|
||||
# parser params
|
||||
dataline=$(awk 'NR==1, NR==14{print}' $FILENAME)
|
||||
dataline=$(awk 'NR==1, NR==16{print}' $FILENAME)
|
||||
IFS=$'\n'
|
||||
lines=(${dataline})
|
||||
|
||||
|
@ -31,16 +31,18 @@ opset_version_key=$(func_parser_key "${lines[8]}")
|
|||
opset_version_value=$(func_parser_value "${lines[8]}")
|
||||
enable_onnx_checker_key=$(func_parser_key "${lines[9]}")
|
||||
enable_onnx_checker_value=$(func_parser_value "${lines[9]}")
|
||||
# parser onnx inference
|
||||
inference_py=$(func_parser_value "${lines[10]}")
|
||||
use_onnx_key=$(func_parser_key "${lines[11]}")
|
||||
use_onnx_value=$(func_parser_value "${lines[11]}")
|
||||
inference_model_dir_key=$(func_parser_key "${lines[12]}")
|
||||
inference_model_dir_value=$(func_parser_value "${lines[12]}")
|
||||
inference_hardware_key=$(func_parser_key "${lines[13]}")
|
||||
inference_hardware_value=$(func_parser_value "${lines[13]}")
|
||||
# parser onnx inference
|
||||
inference_py=$(func_parser_value "${lines[11]}")
|
||||
use_onnx_key=$(func_parser_key "${lines[12]}")
|
||||
use_onnx_value=$(func_parser_value "${lines[12]}")
|
||||
inference_model_dir_key=$(func_parser_key "${lines[13]}")
|
||||
inference_model_dir_value=$(func_parser_value "${lines[13]}")
|
||||
inference_hardware_key=$(func_parser_key "${lines[14]}")
|
||||
inference_hardware_value=$(func_parser_value "${lines[14]}")
|
||||
inference_config_key=$(func_parser_key "${lines[15]}")
|
||||
inference_config_value=$(func_parser_value "${lines[15]}")
|
||||
|
||||
LOG_PATH="./test_tipc/output"
|
||||
LOG_PATH="./test_tipc/output/${model_name}"
|
||||
mkdir -p ./test_tipc/output
|
||||
status_log="${LOG_PATH}/results_paddle2onnx.log"
|
||||
|
||||
|
@ -65,7 +67,8 @@ function func_paddle2onnx(){
|
|||
set_model_dir=$(func_set_params "${inference_model_dir_key}" "${inference_model_dir_value}")
|
||||
set_use_onnx=$(func_set_params "${use_onnx_key}" "${use_onnx_value}")
|
||||
set_hardware=$(func_set_params "${inference_hardware_key}" "${inference_hardware_value}")
|
||||
infer_model_cmd="cd deploy && ${python} ${inference_py} -o ${set_model_dir} -o ${set_use_onnx} -o ${set_hardware} >${_save_log_path} 2>&1 && cd ../"
|
||||
set_inference_config=$(func_set_params "${inference_config_key}" "${inference_config_value}")
|
||||
infer_model_cmd="cd deploy && ${python} ${inference_py} -o ${set_model_dir} -o ${set_use_onnx} -o ${set_hardware} ${set_inference_config} > ${_save_log_path} 2>&1 && cd ../"
|
||||
eval $infer_model_cmd
|
||||
status_check $last_status "${infer_model_cmd}" "${status_log}"
|
||||
}
|
||||
|
@ -75,4 +78,4 @@ echo "################### run test ###################"
|
|||
|
||||
export Count=0
|
||||
IFS="|"
|
||||
func_paddle2onnx
|
||||
func_paddle2onnx
|
Loading…
Reference in New Issue