Merge pull request #1826 from cuicheng01/develop

Add cls G1-G2 model lite tipc
pull/1834/head
cuicheng01 2022-04-12 17:54:50 +08:00 committed by GitHub
commit a81542a038
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 542 additions and 498 deletions

View File

@ -1,9 +1,9 @@
ARM_ABI = arm8
export ARM_ABI
include ../Makefile.def
LITE_ROOT=./inference_lite_lib.android.armv8
LITE_ROOT=../../../
include ${LITE_ROOT}/demo/cxx/Makefile.def
THIRD_PARTY_DIR=${LITE_ROOT}/third_party
@ -29,7 +29,7 @@ OPENCV_LIBS = ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/libs/libopencv_im
${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/libtbb.a \
${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/libcpufeatures.a
OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/${ARM_PATH}/include
OPENCV_INCLUDE = -I${LITE_ROOT}/third_party/${OPENCV_VERSION}/${ARM_PATH}/include
CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include

View File

@ -1,6 +1,11 @@
clas_model_file ./MobileNetV3_large_x1_0.nb
label_path ./imagenet1k_label_list.txt
clas_model_file /data/local/tmp/arm_cpu/MobileNetV3_large_x1_0.nb
label_path /data/local/tmp/arm_cpu/imagenet1k_label_list.txt
resize_short_size 256
crop_size 224
visualize 0
num_threads 1
batch_size 1
precision FP32
runtime_device arm_cpu
enable_benchmark 0
tipc_benchmark 0

View File

@ -21,6 +21,7 @@
#include <opencv2/opencv.hpp>
#include <sys/time.h>
#include <vector>
#include "AutoLog/auto_log/lite_autolog.h"
using namespace paddle::lite_api; // NOLINT
using namespace std;
@ -149,8 +150,10 @@ cv::Mat CenterCropImg(const cv::Mat &img, const int &crop_size) {
std::vector<RESULT>
RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img,
const std::map<std::string, std::string> &config,
const std::vector<std::string> &word_labels, double &cost_time) {
const std::vector<std::string> &word_labels, double &cost_time,
std::vector<double> *time_info) {
// Read img
auto preprocess_start = std::chrono::steady_clock::now();
int resize_short_size = stoi(config.at("resize_short_size"));
int crop_size = stoi(config.at("crop_size"));
int visualize = stoi(config.at("visualize"));
@ -172,8 +175,8 @@ RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img,
std::vector<float> scale = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
const float *dimg = reinterpret_cast<const float *>(img_fp.data);
NeonMeanScale(dimg, data0, img_fp.rows * img_fp.cols, mean, scale);
auto start = std::chrono::system_clock::now();
auto preprocess_end = std::chrono::steady_clock::now();
auto inference_start = std::chrono::system_clock::now();
// Run predictor
predictor->Run();
@ -181,9 +184,10 @@ RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img,
std::unique_ptr<const Tensor> output_tensor(
std::move(predictor->GetOutput(0)));
auto *output_data = output_tensor->data<float>();
auto end = std::chrono::system_clock::now();
auto inference_end = std::chrono::system_clock::now();
auto postprocess_start = std::chrono::system_clock::now();
auto duration =
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::chrono::duration_cast<std::chrono::microseconds>(inference_end - inference_start);
cost_time = double(duration.count()) *
std::chrono::microseconds::period::num /
std::chrono::microseconds::period::den;
@ -196,6 +200,13 @@ RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img,
cv::Mat output_image;
auto results =
PostProcess(output_data, output_size, word_labels, output_image);
auto postprocess_end = std::chrono::system_clock::now();
std::chrono::duration<float> preprocess_diff = preprocess_end - preprocess_start;
time_info->push_back(double(preprocess_diff.count() * 1000));
std::chrono::duration<float> inference_diff = inference_end - inference_start;
time_info->push_back(double(inference_diff.count() * 1000));
std::chrono::duration<float> postprocess_diff = postprocess_end - postprocess_start;
time_info->push_back(double(postprocess_diff.count() * 1000));
if (visualize) {
std::string output_image_path = "./clas_result.png";
@ -309,6 +320,12 @@ int main(int argc, char **argv) {
std::string clas_model_file = config.at("clas_model_file");
std::string label_path = config.at("label_path");
std::string crop_size = config.at("crop_size");
int num_threads = stoi(config.at("num_threads"));
int batch_size = stoi(config.at("batch_size"));
std::string precision = config.at("precision");
std::string runtime_device = config.at("runtime_device");
bool tipc_benchmark = bool(stoi(config.at("tipc_benchmark")));
// Load Labels
std::vector<std::string> word_labels = LoadLabels(label_path);
@ -319,8 +336,9 @@ int main(int argc, char **argv) {
cv::cvtColor(srcimg, srcimg, cv::COLOR_BGR2RGB);
double run_time = 0;
std::vector<double> time_info;
std::vector<RESULT> results =
RunClasModel(clas_predictor, srcimg, config, word_labels, run_time);
RunClasModel(clas_predictor, srcimg, config, word_labels, run_time, &time_info);
std::cout << "===clas result for image: " << img_path << "===" << std::endl;
for (int i = 0; i < results.size(); i++) {
@ -338,6 +356,19 @@ int main(int argc, char **argv) {
} else {
std::cout << "Current time cost: " << run_time << " s." << std::endl;
}
if (tipc_benchmark) {
AutoLogger autolog(clas_model_file,
runtime_device,
num_threads,
batch_size,
crop_size,
precision,
time_info,
1);
std::cout << "=======================TIPC Lite Information=======================" << std::endl;
autolog.report();
}
}
return 0;

View File

@ -25,8 +25,8 @@ Paddle Lite是飞桨轻量化推理引擎为手机、IOT端提供高效推理
1. [建议]直接下载,预测库下载链接如下:
|平台|预测库下载链接|
|-|-|
|Android|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv7.gcc.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv8.gcc.c++_static.with_extra.with_cv.tar.gz)|
|iOS|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)|
|Android|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv7.clang.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv.tar.gz)|
|iOS|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)|
**注**
1. 如果是从 Paddle-Lite [官方文档](https://paddle-lite.readthedocs.io/zh/latest/quick_start/release_lib.html#android-toolchain-gcc)下载的预测库,
@ -44,11 +44,11 @@ git checkout develop
**注意**编译Paddle-Lite获得预测库时需要打开`--with_cv=ON --with_extra=ON`两个选项,`--arch`表示`arm`版本这里指定为armv8更多编译命令介绍请参考[链接](https://paddle-lite.readthedocs.io/zh/latest/user_guides/Compile/Android.html#id2)。
直接下载预测库并解压后,可以得到`inference_lite_lib.android.armv8/`文件夹通过编译Paddle-Lite得到的预测库位于`Paddle-Lite/build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/`文件夹下。
直接下载预测库并解压后,可以得到`inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/`文件夹通过编译Paddle-Lite得到的预测库位于`Paddle-Lite/build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/`文件夹下。
预测库的文件目录如下:
```
inference_lite_lib.android.armv8/
inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/
|-- cxx C++ 预测库和头文件
| |-- include C++ 头文件
| | |-- paddle_api.h
@ -86,7 +86,7 @@ Python下安装 `paddlelite`,目前最高支持`Python3.7`。
**注意**`paddlelite`whl包版本必须和预测库版本对应。
```shell
pip install paddlelite==2.8
pip install paddlelite==2.10
```
之后使用`paddle_lite_opt`工具可以进行inference模型的转换。`paddle_lite_opt`的部分参数如下
@ -146,6 +146,24 @@ paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel --
**注意**`--optimize_out` 参数为优化后模型的保存路径,无需加后缀`.nb``--model_file` 参数为模型结构信息文件的路径,`--param_file` 参数为模型权重信息文件的路径,请注意文件名。
<a name="2.1.4"></a>
#### 2.1.4 执行编译得到可执行文件clas_system
```shell
# 克隆 Autolog 代码库,以便获取自动化日志
cd PaddleClas_root_path
cd deploy/lite/
git clone https://github.com/LDOUBLEV/AutoLog.git
```
```shell
# 编译
make -j
```
执行 `make` 命令后,会在当前目录生成 `clas_system` 可执行文件,该文件用于 Lite 预测。
<a name="2.2与手机联调"></a>
### 2.2 与手机联调
@ -167,7 +185,7 @@ paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel --
win上安装需要去谷歌的安卓平台下载ADB软件包进行安装[链接](https://developer.android.com/studio)
4. 手机连接电脑后,开启手机`USB调试`选项,选择`文件传输`模式,在电脑终端中输入:
3. 手机连接电脑后,开启手机`USB调试`选项,选择`文件传输`模式,在电脑终端中输入:
```shell
adb devices
@ -178,40 +196,18 @@ List of devices attached
744be294 device
```
5. 准备优化后的模型、预测库文件、测试图像和类别映射文件。
4. 将优化后的模型、预测库文件、测试图像和类别映射文件push到手机上。
```shell
cd PaddleClas_root_path
cd deploy/lite/
# 运行prepare.sh
# prepare.sh 会将预测库文件、测试图像和使用的字典文件放置在预测库中的demo/cxx/clas文件夹下
sh prepare.sh /{lite prediction library path}/inference_lite_lib.android.armv8
# 进入lite demo的工作目录
cd /{lite prediction library path}/inference_lite_lib.android.armv8/
cd demo/cxx/clas/
# 将C++预测动态库so文件复制到debug文件夹中
cp ../../../cxx/lib/libpaddle_light_api_shared.so ./debug/
```
`prepare.sh``PaddleClas/deploy/lite/imgs/tabby_cat.jpg` 作为测试图像,将测试图像复制到`demo/cxx/clas/debug/` 文件夹下。
`paddle_lite_opt` 工具优化后的模型文件放置到 `/{lite prediction library path}/inference_lite_lib.android.armv8/demo/cxx/clas/debug/` 文件夹下。本例中,使用[2.1.3](#2.1.3)生成的 `MobileNetV3_large_x1_0.nb` 模型文件。
执行完成后clas文件夹下将有如下文件格式
```
demo/cxx/clas/
|-- debug/
| |--MobileNetV3_large_x1_0.nb 优化后的分类器模型文件
| |--tabby_cat.jpg 待测试图像
| |--imagenet1k_label_list.txt 类别映射文件
| |--libpaddle_light_api_shared.so C++预测库文件
| |--config.txt 分类预测超参数配置
|-- config.txt 分类预测超参数配置
|-- image_classfication.cpp 图像分类代码文件
|-- Makefile 编译文件
adb shell mkdir -p /data/local/tmp/arm_cpu/
adb push clas_system /data/local/tmp/arm_cpu/
adb shell chmod +x /data/local/tmp/arm_cpu//clas_system
adb push inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/cxx/lib/libpaddle_light_api_shared.so /data/local/tmp/arm_cpu/
adb push MobileNetV3_large_x1_0.nb /data/local/tmp/arm_cpu/
adb push config.txt /data/local/tmp/arm_cpu/
adb push ../../ppcls/utils/imagenet1k_label_list.txt /data/local/tmp/arm_cpu/
adb push imgs/tabby_cat.jpg /data/local/tmp/arm_cpu/
```
#### 注意:
@ -224,32 +220,22 @@ clas_model_file ./MobileNetV3_large_x1_0.nb # 模型文件地址
label_path ./imagenet1k_label_list.txt # 类别映射文本文件
resize_short_size 256 # resize之后的短边边长
crop_size 224 # 裁剪后用于预测的边长
visualize 0 # 是否进行可视化如果选择的话会在当前文件夹下生成名为clas_result.png的图像文件。
visualize 0 # 是否进行可视化如果选择的话会在当前文件夹下生成名为clas_result.png的图像文件
num_threads 1 # 线程数默认是1。
precision FP32 # 精度类型,可以选择 FP32 或者 INT8默认是 FP32。
runtime_device arm_cpu # 设备类型,默认是 arm_cpu
enable_benchmark 0 # 是否开启benchmark 默认是 0
tipc_benchmark 0 # 是否开启tipc_benchmark默认是 0
```
5. 启动调试上述步骤完成后就可以使用ADB将文件夹 `debug/` push到手机上运行步骤如下
5. 执行预测命令
执行以下命令,可完成在手机上的预测。
```shell
# 执行编译得到可执行文件clas_system
make -j
# 将编译得到的可执行文件移动到debug文件夹中
mv clas_system ./debug/
# 将上述debug文件夹push到手机上
adb push debug /data/local/tmp/
adb shell
cd /data/local/tmp/debug
export LD_LIBRARY_PATH=/data/local/tmp/debug:$LD_LIBRARY_PATH
# clas_system可执行文件的使用方式为:
# ./clas_system 配置文件路径 测试图像路径
./clas_system ./config.txt ./tabby_cat.jpg
adb shell 'export LD_LIBRARY_PATH=/data/local/tmp/arm_cpu/; /data/local/tmp/arm_cpu/clas_system /data/local/tmp/arm_cpu/config.txt /data/local/tmp/arm_cpu/tabby_cat.jpg'
```
如果对代码做了修改则需要重新编译并push到手机上。
运行效果如下:
<div align="center">
@ -263,3 +249,4 @@ A1如果已经走通了上述步骤更换模型只需要替换 `.nb` 模
Q2换一个图测试怎么做
A2替换 debug 下的测试图像为你想要测试的图像,使用 ADB 再次 push 到手机上即可。

View File

@ -18,6 +18,7 @@ If you only want to test speed, please refer to [The tutorial of Paddle-Lite mob
- [2.1.1 [RECOMMEND] Use pip to install Paddle-Lite and optimize model](#2.1.1)
- [2.1.2 Compile Paddle-Lite to generate opt tool](#2.1.2)
- [2.1.3 Demo of get the optimized model](#2.1.3)
- [2.1.4 Compile to get the executable file clas_system](#2.1.4)
- [2.2 Run optimized model on Phone](#2.2)
- [3. FAQ](#3)
@ -40,8 +41,8 @@ For the detailed compilation directions of different development environments, p
|Platform|Inference Library Download Link|
|-|-|
|Android|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv7.gcc.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv8.gcc.c++_static.with_extra.with_cv.tar.gz)|
|iOS|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)|
|Android|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv7.clang.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv.tar.gz) |
|iOS|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)|
**NOTE**:
@ -53,7 +54,7 @@ For the detailed compilation directions of different development environments, p
The structure of the inference library is as follows:
```
inference_lite_lib.android.armv8/
inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/
|-- cxx C++ inference library and header files
| |-- include C++ header files
| | |-- paddle_api.h
@ -148,6 +149,23 @@ paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel --
```
When the above code command is completed, there will be ``MobileNetV3_large_x1_0.nb` in the current directory, which is the converted model file.
<a name="2.1.4"></a>
#### 2.1.4 Compile to get the executable file clas_system
```shell
# Clone the Autolog repository to get automation logs
cd PaddleClas_root_path
cd deploy/lite/
git clone https://github.com/LDOUBLEV/AutoLog.git
```
```shell
# Compile
make -j
```
After executing the `make` command, the `clas_system` executable file is generated in the current directory, which is used for Lite prediction.
<a name="2.2"></a>
## 2.2 Run optimized model on Phone
@ -172,7 +190,7 @@ When the above code command is completed, there will be ``MobileNetV3_large_x1_0
* Install ADB for windows
If install ADB fo Windows, you need to download from Google's Android platform: [Download Link](https://developer.android.com/studio).
First, make sure the phone is connected to the computer, turn on the `USB debugging` option of the phone, and select the `file transfer` mode. Verify whether ADB is installed successfully as follows:
3. First, make sure the phone is connected to the computer, turn on the `USB debugging` option of the phone, and select the `file transfer` mode. Verify whether ADB is installed successfully as follows:
```shell
$ adb devices
@ -183,42 +201,22 @@ When the above code command is completed, there will be ``MobileNetV3_large_x1_0
If there is `device` output like the above, it means the installation was successful.
4. Prepare optimized model, inference library files, test image and dictionary file used.
4. Push the optimized model, prediction library file, test image and class map file to the phone.
```shell
cd PaddleClas_root_path
cd deploy/lite/
# prepare.sh will put the inference library files, the test image and the dictionary files in demo/cxx/clas
sh prepare.sh /{lite inference library path}/inference_lite_lib.android.armv8
# enter the working directory of lite demo
cd /{lite inference library path}/inference_lite_lib.android.armv8/
cd demo/cxx/clas/
# copy the C++ inference dynamic library file ie. .so) to the debug folder
cp ../../../cxx/lib/libpaddle_light_api_shared.so ./debug/
```shell
adb shell mkdir -p /data/local/tmp/arm_cpu/
adb push clas_system /data/local/tmp/arm_cpu/
adb shell chmod +x /data/local/tmp/arm_cpu//clas_system
adb push inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/cxx/lib/libpaddle_light_api_shared.so /data/local/tmp/arm_cpu/
adb push MobileNetV3_large_x1_0.nb /data/local/tmp/arm_cpu/
adb push config.txt /data/local/tmp/arm_cpu/
adb push ../../ppcls/utils/imagenet1k_label_list.txt /data/local/tmp/arm_cpu/
adb push imgs/tabby_cat.jpg /data/local/tmp/arm_cpu/
```
The `prepare.sh` take `PaddleClas/deploy/lite/imgs/tabby_cat.jpg` as the test image, and copy it to the `demo/cxx/clas/debug/` directory.
You should put the model that optimized by `paddle_lite_opt` under the `demo/cxx/clas/debug/` directory. In this example, use `MobileNetV3_large_x1_0.nb` model file generated in [2.1.3](#2.1.3).
The structure of the clas demo is as follows after the above command is completed:
```
demo/cxx/clas/
|-- debug/
| |--MobileNetV3_large_x1_0.nb class model
| |--tabby_cat.jpg test image
| |--imagenet1k_label_list.txt dictionary file
| |--libpaddle_light_api_shared.so C++ .so file
| |--config.txt config file
|-- config.txt config file
|-- image_classfication.cpp source code
|-- Makefile compile file
```
**NOTE**:
* `Imagenet1k_label_list.txt` is the category mapping file of the `ImageNet1k` dataset. If use a custom category, you need to replace the category mapping file.
@ -229,33 +227,22 @@ clas_model_file ./MobileNetV3_large_x1_0.nb # path of model file
label_path ./imagenet1k_label_list.txt # path of category mapping file
resize_short_size 256 # the short side length after resize
crop_size 224 # side length used for inference after cropping
visualize 0 # whether to visualize. If you set it to 1, an image file named 'clas_result.png' will be generated in the current directory.
num_threads 1 # The number of threads, the default is 1
precision FP32 # Precision type, you can choose FP32 or INT8, the default is FP32
runtime_device arm_cpu # Device type, the default is arm_cpu
enable_benchmark 0 # Whether to enable benchmark, the default is 0
tipc_benchmark 0 # Whether to enable tipc_benchmark, the default is 0
```
5. Run Model on Phone
Execute the following command to complete the prediction on the mobile phone.
```shell
# run compile to get the executable file 'clas_system'
make -j
# move the compiled executable file to the debug folder
mv clas_system ./debug/
# push the debug folder to Phone
adb push debug /data/local/tmp/
adb shell
cd /data/local/tmp/debug
export LD_LIBRARY_PATH=/data/local/tmp/debug:$LD_LIBRARY_PATH
# the usage of clas_system is as follows:
# ./clas_system "path of config file" "path of test image"
./clas_system ./config.txt ./tabby_cat.jpg
adb shell 'export LD_LIBRARY_PATH=/data/local/tmp/arm_cpu/; /data/local/tmp/arm_cpu/clas_system /data/local/tmp/arm_cpu/config.txt /data/local/tmp/arm_cpu/tabby_cat.jpg'
```
**NOTE**: If you make changes to the code, you need to recompile and repush the `debug ` folder to the phone.
The result is as follows:
![](../../images/inference_deployment/lite_demo_result.png)

View File

@ -1,11 +1,8 @@
# PaddleLite 推理部署
---
# 端侧部署
本教程将介绍基于[Paddle Lite](https://github.com/PaddlePaddle/Paddle-Lite)在移动端部署 PaddleClas 分类模型的详细步骤。识别模型的部署将在近期支持,敬请期待。
本教程将介绍基于[Paddle Lite](https://github.com/PaddlePaddle/Paddle-Lite) 在移动端部署PaddleClas分类模型的详细步骤。
Paddle Lite 是飞桨轻量化推理引擎为手机、IOT 端提供高效推理能力,并广泛整合跨平台硬件,为端侧部署及应用落地问题提供轻量化的部署方案。
如果希望直接测试速度,可以参考[Paddle-Lite 移动端 benchmark 测试教程](../others/paddle_mobile_inference.md)。
Paddle Lite是飞桨轻量化推理引擎为手机、IOT端提供高效推理能力并广泛整合跨平台硬件为端侧部署及应用落地问题提供轻量化的部署方案。如果希望直接测试速度可以参考[Paddle-Lite移动端benchmark测试教程](../../docs/zh_CN/extension/paddle_mobile_inference.md)。
---
@ -18,53 +15,54 @@ Paddle Lite 是飞桨轻量化推理引擎为手机、IOT 端提供高效推
- [2.1.1 pip 安装 paddlelite 并进行转换](#2.1.1)
- [2.1.2 源码编译 Paddle-Lite 生成 opt 工具](#2.1.2)
- [2.1.3 转换示例](#2.1.3)
- [2.1.4 执行编译得到可执行文件clas_system](#2.1.4)
- [2.2 与手机联调](#2.2)
- [3. FAQ](#3)
<a name="1"></a>
## 1. 准备环境
Paddle Lite 目前支持以下平台部署:
* 电脑(编译 Paddle Lite
* 安卓手机armv7 或 armv8
### 运行准备
- 电脑(编译Paddle Lite
- 安卓手机armv7或armv8
<a name="1.1"></a>
### 1.1 准备交叉编译环境
交叉编译环境用于编译 Paddle Lite 和 PaddleClas 的C++ demo。
支持多种开发环境,不同开发环境的编译流程请参考对应文档。
交叉编译环境用于编译 Paddle Lite 和 PaddleClas 的 C++ demo。
支持多种开发环境,关于 Docker、Linux、macOS、Windows 等不同开发环境的编译流程请参考[文档](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html)。
1. [Docker](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#docker)
2. [Linux](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#linux)
3. [MAC OS](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#mac-os)
<a name="1.2"></a>
### 1.2 准备预测库
预测库有两种获取方式:
1. [建议]直接下载,预测库下载链接如下:
|平台|预测库下载链接|
|-|-|
|Android|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv7.gcc.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv8.gcc.c++_static.with_extra.with_cv.tar.gz)|
|iOS|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)|
|Android|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv7.clang.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv.tar.gz)|
|iOS|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)|
**注**
1. 如果是从 Paddle-Lite [官方文档](https://paddle-lite.readthedocs.io/zh/latest/quick_start/release_lib.html#android-toolchain-gcc)下载的预测库,
注意选择 `with_extra=ONwith_cv=ON` 的下载链接。
2. 如果使用量化的模型部署在端侧,建议使用 Paddle-Lite develop 分支编译预测库。
2. 编译 Paddle-Lite 得到预测库Paddle-Lite 的编译方式如下:
注意选择`with_extra=ONwith_cv=ON`的下载链接。
2. 如果使用量化的模型部署在端侧建议使用Paddle-Lite develop分支编译预测库。
2. 编译Paddle-Lite得到预测库Paddle-Lite的编译方式如下
```shell
git clone https://github.com/PaddlePaddle/Paddle-Lite.git
cd Paddle-Lite
# 如果使用编译方式,建议使用 develop 分支编译预测库
# 如果使用编译方式建议使用develop分支编译预测库
git checkout develop
./lite/tools/build_android.sh --arch=armv8 --with_cv=ON --with_extra=ON
```
**注意**:编译 Paddle-Lite 获得预测库时,需要打开`--with_cv=ON --with_extra=ON` 两个选项,`--arch` 表示 `arm` 版本,这里指定为 armv8更多编译命令介绍请参考[Linux x86 环境下编译适用于 Android 的库](https://paddle-lite.readthedocs.io/zh/latest/source_compile/linux_x86_compile_android.html),关于其他平台的编译操作,具体请参考[PaddleLite](https://paddle-lite.readthedocs.io/zh/latest/)中`源码编译`部分。
直接下载预测库并解压后,可以得到 `inference_lite_lib.android.armv8/`文件夹,通过编译 Paddle-Lite 得到的预测库位于 `Paddle-Lite/build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/`文件夹下。
**注意**编译Paddle-Lite获得预测库时需要打开`--with_cv=ON --with_extra=ON`两个选项,`--arch`表示`arm`版本这里指定为armv8更多编译命令介绍请参考[链接](https://paddle-lite.readthedocs.io/zh/latest/user_guides/Compile/Android.html#id2)。
直接下载预测库并解压后,可以得到`inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/`文件夹通过编译Paddle-Lite得到的预测库位于`Paddle-Lite/build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/`文件夹下。
预测库的文件目录如下:
```
inference_lite_lib.android.armv8/
inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/
|-- cxx C++ 预测库和头文件
| |-- include C++ 头文件
| | |-- paddle_api.h
@ -77,7 +75,7 @@ inference_lite_lib.android.armv8/
| `-- lib C++预测库
| |-- libpaddle_api_light_bundled.a C++静态库
| `-- libpaddle_light_api_shared.so C++动态库
|-- java Java 预测库
|-- java Java预测库
| |-- jar
| | `-- PaddlePredictor.jar
| |-- so
@ -88,47 +86,43 @@ inference_lite_lib.android.armv8/
| `-- java Java 预测库demo
```
<a name="2"></a>
## 2. 开始运行
## 2 开始运行
<a name="2.1"></a>
### 2.1 模型优化
Paddle-Lite 提供了多种策略来自动优化原始的模型,其中包括量化、子图融合、混合精度、Kernel 优选等方法,使用 Paddle-Lite 的 `opt` 工具可以自动对 inference 模型进行优化,目前支持两种优化方式,优化后的模型更轻量,模型运行速度更快。在进行模型优化前,需要先准备 `opt` 优化工具,有以下两种方式。
Paddle-Lite 提供了多种策略来自动优化原始的模型,其中包括量化、子图融合、混合调度、Kernel优选等方法使用Paddle-Lite的`opt`工具可以自动对inference模型进行优化,目前支持两种优化方式,优化后的模型更轻量,模型运行速度更快。
**注意**:如果已经准备好了 `.nb` 结尾的模型文件,可以跳过此步骤。
<a name="2.1.1"></a>
#### 2.1.1 [建议]pip 安装 paddlelite 并进行转换
#### 2.1.1 [建议]pip安装paddlelite并进行转换
Python 下安装 `paddlelite`,目前最高支持 `Python3.7`
**注意**`paddlelite` whl 包版本必须和预测库版本对应。
Python下安装 `paddlelite`,目前最高支持`Python3.7`。
**注意**`paddlelite`whl包版本必须和预测库版本对应。
```shell
pip install paddlelite==2.8
pip install paddlelite==2.10
```
之后使用 `paddle_lite_opt` 工具可以进行 inference 模型的转换。`paddle_lite_opt` 的部分参数如下
之后使用`paddle_lite_opt`工具可以进行inference模型的转换。`paddle_lite_opt`的部分参数如下
|选项|说明|
|-|-|
|--model_dir|待优化的 PaddlePaddle 模型(非 combined 形式)的路径|
|--model_file|待优化的 PaddlePaddle 模型combined 形式)的网络结构文件路径|
|--param_file|待优化的 PaddlePaddle 模型combined 形式)的权重文件路径|
|--optimize_out_type|输出模型类型目前支持两种类型protobuf naive_buffer其中 naive_buffer 是一种更轻量级的序列化/反序列化实现。若您需要在 mobile 端执行模型预测,请将此选项设置为 naive_buffer。默认为 protobuf|
|--model_dir|待优化的PaddlePaddle模型非combined形式的路径|
|--model_file|待优化的PaddlePaddle模型combined形式的网络结构文件路径|
|--param_file|待优化的PaddlePaddle模型combined形式的权重文件路径|
|--optimize_out_type|输出模型类型目前支持两种类型protobuf和naive_buffer其中naive_buffer是一种更轻量级的序列化/反序列化实现。若您需要在mobile端执行模型预测请将此选项设置为naive_buffer。默认为protobuf|
|--optimize_out|优化模型的输出路径|
|--valid_targets|指定模型可执行的 backend默认为 arm。目前可支持 x86、arm、opencl、npu、xpu可以同时指定多个 backend以空格分隔Model Optimize Tool 将会自动选择最佳方式。如果需要支持华为 NPUKirin 810/990 Soc 搭载的达芬奇架构 NPU应当设置为 npu, arm|
|--record_tailoring_info|当使用 根据模型裁剪库文件 功能时,则设置该选项为 true以记录优化后模型含有的 kernel OP 信息,默认为 false|
|--valid_targets|指定模型可执行的backend默认为arm。目前可支持x86、arm、opencl、npu、xpu可以同时指定多个backend(以空格分隔)Model Optimize Tool将会自动选择最佳方式。如果需要支持华为NPUKirin 810/990 Soc搭载的达芬奇架构NPU应当设置为npu, arm|
|--record_tailoring_info|当使用 根据模型裁剪库文件 功能时则设置该选项为true以记录优化后模型含有的kernel和OP信息默认为false|
`--model_file` 表示 inference 模型的 model 文件地址,`--param_file` 表示 inference 模型的 param 文件地址;`optimize_out` 用于指定输出文件的名称(不需要添加 `.nb` 的后缀)。直接在命令行中运行 `paddle_lite_opt`,也可以查看所有参数及其说明。
`--model_file`表示inference模型的model文件地址`--param_file`表示inference模型的param文件地址`optimize_out`用于指定输出文件的名称(不需要添加`.nb`的后缀)。直接在命令行中运行`paddle_lite_opt`,也可以查看所有参数及其说明。
<a name="2.1.2"></a>
#### 2.1.2 源码编译 Paddle-Lite 生成 opt 工具
模型优化需要 Paddle-Lite 的 `opt` 可执行文件,可以通过编译 Paddle-Lite 源码获得,编译步骤如下:
#### 2.1.2 源码编译Paddle-Lite生成opt工具
模型优化需要Paddle-Lite的`opt`可执行文件可以通过编译Paddle-Lite源码获得编译步骤如下
```shell
# 如果准备环境时已经 clone Paddle-Lite则不用重新 clone Paddle-Lite
# 如果准备环境时已经clone了Paddle-Lite则不用重新clone Paddle-Lite
git clone https://github.com/PaddlePaddle/Paddle-Lite.git
cd Paddle-Lite
git checkout develop
@ -136,146 +130,137 @@ git checkout develop
./lite/tools/build.sh build_optimize_tool
```
编译完成后,`opt` 文件位于 `build.opt/lite/api/` 下,可通过如下方式查看 `opt` 的运行选项和使用方式:
编译完成后,`opt`文件位于`build.opt/lite/api/`下,可通过如下方式查看`opt`的运行选项和使用方式;
```shell
cd build.opt/lite/api/
./opt
```
`opt` 的使用方式与参数与上面的 `paddle_lite_opt` 完全一致。
`opt`的使用方式与参数与上面的`paddle_lite_opt`完全一致。
<a name="2.1.3"></a>
#### 2.1.3 转换示例
下面以 PaddleClas `MobileNetV3_large_x1_0` 模型为例,介绍使用 `paddle_lite_opt` 完成预训练模型到 inference 模型,再到 Paddle-Lite 优化模型的转换。
下面以PaddleClas的 `MobileNetV3_large_x1_0` 模型为例,介绍使用`paddle_lite_opt`完成预训练模型到inference模型再到Paddle-Lite优化模型的转换。
```shell
# 进入 PaddleClas 根目录
# 进入PaddleClas根目录
cd PaddleClas_root_path
# 下载并解压 inference 模型
# 下载并解压inference模型
wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar
tar -xf MobileNetV3_large_x1_0_infer.tar
# 将 inference 模型转化为 Paddle-Lite 优化模型
# 将inference模型转化为Paddle-Lite优化模型
paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel --param_file=./MobileNetV3_large_x1_0_infer/inference.pdiparams --optimize_out=./MobileNetV3_large_x1_0
```
最终在当前文件夹下生成 `MobileNetV3_large_x1_0.nb` 的文件。
最终在当前文件夹下生成`MobileNetV3_large_x1_0.nb`的文件。
**注意**`--optimize_out` 参数为优化后模型的保存路径,无需加后缀 `.nb``--model_file` 参数为模型结构信息文件的路径,`--param_file` 参数为模型权重信息文件的路径,请注意文件名。
**注意**`--optimize_out` 参数为优化后模型的保存路径,无需加后缀`.nb``--model_file` 参数为模型结构信息文件的路径,`--param_file` 参数为模型权重信息文件的路径,请注意文件名。
<a name="2.2"></a>
<a name="2.1.4"></a>
#### 2.1.4 执行编译得到可执行文件clas_system
```shell
# 克隆 Autolog 代码库,以便获取自动化日志
cd PaddleClas_root_path
cd deploy/lite/
git clone https://github.com/LDOUBLEV/AutoLog.git
```
```shell
# 编译
make -j
```
执行 `make` 命令后,会在当前目录生成 `clas_system` 可执行文件,该文件用于 Lite 预测。
<a name="2.2与手机联调"></a>
### 2.2 与手机联调
首先需要进行一些准备工作。
1. 准备一台 arm8 的安卓手机,如果编译的预测库和 opt 文件是 armv7则需要 arm7 的手机,并修改 Makefile 中 `ARM_ABI = arm7`
2. 电脑上安装 ADB 工具,用于调试。 ADB 安装方式如下:
1. 准备一台arm8的安卓手机如果编译的预测库和opt文件是armv7则需要arm7的手机并修改Makefile中`ARM_ABI = arm7`。
2. 电脑上安装ADB工具用于调试。 ADB安装方式如下
3.1. MAC电脑安装ADB:
* MAC 电脑安装 ADB:
```shell
brew cask install android-platform-tools
```
* Linux 安装 ADB
3.2. Linux安装ADB
```shell
sudo apt update
sudo apt install -y wget adb
```
* Window 安装 ADB
win 上安装需要去谷歌的安卓平台下载 ADB 软件包进行安装:[链接](https://developer.android.com/studio)
3.3. Window安装ADB
3. 手机连接电脑后,开启手机 `USB 调试` 选项,选择 `文件传输` 模式,在电脑终端中输入:
win上安装需要去谷歌的安卓平台下载ADB软件包进行安装[链接](https://developer.android.com/studio)
3. 手机连接电脑后,开启手机`USB调试`选项,选择`文件传输`模式,在电脑终端中输入:
```shell
adb devices
```
如果有 device 输出,则表示安装成功,如下所示:
如果有device输出则表示安装成功如下所示
```
List of devices attached
744be294 device
```
4. 准备优化后的模型、预测库文件、测试图像和类别映射文件。
4. 将优化后的模型、预测库文件、测试图像和类别映射文件push到手机上。
```shell
cd PaddleClas_root_path
cd deploy/lite/
# 运行 prepare.sh
# prepare.sh 会将预测库文件、测试图像和使用的字典文件放置在预测库中的 demo/cxx/clas 文件夹下
sh prepare.sh /{lite prediction library path}/inference_lite_lib.android.armv8
# 进入 lite demo 的工作目录
cd /{lite prediction library path}/inference_lite_lib.android.armv8/
cd demo/cxx/clas/
# 将 C++ 预测动态库 so 文件复制到 debug 文件夹中
cp ../../../cxx/lib/libpaddle_light_api_shared.so ./debug/
```
`prepare.sh``PaddleClas/deploy/lite/imgs/tabby_cat.jpg` 作为测试图像,将测试图像复制到 `demo/cxx/clas/debug/` 文件夹下。
`paddle_lite_opt` 工具优化后的模型文件放置到 `/{lite prediction library path}/inference_lite_lib.android.armv8/demo/cxx/clas/debug/` 文件夹下。本例中,使用 [2.1.3 转换示例](#2.1.3) 生成的 `MobileNetV3_large_x1_0.nb` 模型文件。
执行完成后clas 文件夹下将有如下文件格式:
```
demo/cxx/clas/
|-- debug/
| |--MobileNetV3_large_x1_0.nb 优化后的分类器模型文件
| |--tabby_cat.jpg 待测试图像
| |--imagenet1k_label_list.txt 类别映射文件
| |--libpaddle_light_api_shared.so C++预测库文件
| |--config.txt 分类预测超参数配置
|-- config.txt 分类预测超参数配置
|-- image_classfication.cpp 图像分类代码文件
|-- Makefile 编译文件
adb shell mkdir -p /data/local/tmp/arm_cpu/
adb push clas_system /data/local/tmp/arm_cpu/
adb shell chmod +x /data/local/tmp/arm_cpu//clas_system
adb push inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/cxx/lib/libpaddle_light_api_shared.so /data/local/tmp/arm_cpu/
adb push MobileNetV3_large_x1_0.nb /data/local/tmp/arm_cpu/
adb push config.txt /data/local/tmp/arm_cpu/
adb push ../../ppcls/utils/imagenet1k_label_list.txt /data/local/tmp/arm_cpu/
adb push imgs/tabby_cat.jpg /data/local/tmp/arm_cpu/
```
#### 注意:
* 上述文件中,`imagenet1k_label_list.txt` 是 ImageNet1k 数据集的类别映射文件,如果使用自定义的类别,需要更换该类别映射文件。
* 上述文件中,`imagenet1k_label_list.txt` 是ImageNet1k数据集的类别映射文件如果使用自定义的类别需要更换该类别映射文件。
* `config.txt` 包含了分类器的超参数,如下:
* `config.txt` 包含了分类器的超参数,如下:
```shell
clas_model_file ./MobileNetV3_large_x1_0.nb # 模型文件地址
label_path ./imagenet1k_label_list.txt # 类别映射文本文件
resize_short_size 256 # resize 之后的短边边长
crop_size 224 # 裁剪后用于预测的边长
visualize 0 # 是否进行可视化,如果选择的话,会在当前文件夹下生成名为 clas_result.png 的图像文件。
label_path ./imagenet1k_label_list.txt # 类别映射文本文件
resize_short_size 256 # resize之后的短边边长
crop_size 224 # 裁剪后用于预测的边长
visualize 0 # 是否进行可视化如果选择的话会在当前文件夹下生成名为clas_result.png的图像文件
num_threads 1 # 线程数默认是1。
precision FP32 # 精度类型,可以选择 FP32 或者 INT8默认是 FP32。
runtime_device arm_cpu # 设备类型,默认是 arm_cpu
enable_benchmark 0 # 是否开启benchmark 默认是 0
tipc_benchmark 0 # 是否开启tipc_benchmark默认是 0
```
5. 启动调试,上述步骤完成后就可以使用 ADB 将文件夹 `debug/` push 到手机上运行,步骤如下:
5. 执行预测命令
执行以下命令,可完成在手机上的预测。
```shell
# 执行编译,得到可执行文件 clas_system
make -j
# 将编译得到的可执行文件移动到 debug 文件夹中
mv clas_system ./debug/
# 将上述 debug 文件夹 push 到手机上
adb push debug /data/local/tmp/
adb shell
cd /data/local/tmp/debug
export LD_LIBRARY_PATH=/data/local/tmp/debug:$LD_LIBRARY_PATH
# clas_system 可执行文件的使用方式为:
# ./clas_system 配置文件路径 测试图像路径
./clas_system ./config.txt ./tabby_cat.jpg
adb shell 'export LD_LIBRARY_PATH=/data/local/tmp/arm_cpu/; /data/local/tmp/arm_cpu/clas_system /data/local/tmp/arm_cpu/config.txt /data/local/tmp/arm_cpu/tabby_cat.jpg'
```
如果对代码做了修改,则需要重新编译并 push 到手机上。
运行效果如下:
![](../../images/inference_deployment/lite_demo_result.png)
<div align="center">
<img src="./imgs/lite_demo_result.png" width="600">
</div>
<a name="3"></a>
## 3. FAQ
## FAQ
Q1如果想更换模型怎么办需要重新按照流程走一遍吗
A1如果已经走通了上述步骤更换模型只需要替换 `.nb` 模型文件即可,同时要注意修改下配置文件中的 `.nb` 文件路径以及类别映射文件(如有必要)。
Q2换一个图测试怎么做
A2替换 debug 下的测试图像为你想要测试的图像,使用 ADB 再次 push 到手机上即可。

View File

@ -16,6 +16,14 @@ function func_parser_value(){
echo ${tmp}
}
function func_parser_value_lite(){
strs=$1
IFS=$2
array=(${strs})
tmp=${array[1]}
echo ${tmp}
}
function func_set_params(){
key=$1
value=$2

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:MobileNetV3_large_x1_0
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:MobileNetV3_large_x1_0
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x0_25
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x0_5
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x0_75
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x1_0
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x1_5
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x2_0
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x2_5
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:ResNet50
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:ResNet50_vd
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,8 @@
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:SwinTransformer_tiny_patch4_window7_224
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32

View File

@ -0,0 +1,44 @@
# Lite_arm_cpp_cpu 预测功能测试
Lite_arm_cpp_cpu 预测功能测试的主程序为`test_lite_arm_cpu_cpp.sh`,可以测试基于 Paddle-Lite 预测库的模型推理功能。
## 1. 测试结论汇总
| 模型类型 |device | batchsize | 精度类型| 线程数 |
| :----: | :----: | :----: | :----: | :----: |
| 正常模型 | arm_cpu | 1 | FP32 | 1 |
## 2. 测试流程
运行环境配置请参考[文档](https://github.com/PaddlePaddle/models/blob/release/2.2/tutorials/mobilenetv3_prod/Step6/deploy/lite_infer_cpp_arm_cpu/README.md) 的内容配置 TIPC Lite 的运行环境。
### 2.1 功能测试
先运行 `prepare_lite_arm_cpu_cpp.sh` 准备数据和模型,然后运行 `test_lite_arm_cpu_cpp.sh` 进行测试,最终在 `./output` 目录下生成 `lite_*.log` 后缀的日志文件。
```shell
bash test_tipc/prepare_lite_arm_cpu_cpp.sh test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_lite_arm_cpu_cpp.txt
```
运行预测指令后,在`./output`文件夹下自动会保存运行日志,包括以下文件:
```shell
test_tipc/output/
|- results.log # 运行指令状态的日志
|- lite_MobileNetV3_large_x1_0_runtime_device_arm_cpu_precision_FP32_batchsize_1_threads_1.log # ARM_CPU 上 FP32 状态下线程数设置为1测试batch_size=1条件下的预测运行日志
......
```
其中results.log中包含了每条指令的运行状态如果运行成功会输出
```
Run successfully with command - adb shell 'export LD_LIBRARY_PATH=/data/local/tmp/arm_cpu/; /data/local/tmp/arm_cpu/mobilenet_v3 /data/local/tmp/arm_cpu/config.txt /data/local/tmp/arm_cpu/demo.jpg' > ./output/lite_MobileNetV3_large_x1_0_runtime_device_arm_cpu_precision_FP32_batchsize_1_threads_1.log 2>&1!
......
```
如果运行失败,会输出:
```
Run failed with command - adb shell 'export LD_LIBRARY_PATH=/data/local/tmp/arm_cpu/; /data/local/tmp/arm_cpu/mobilenet_v3 /data/local/tmp/arm_cpu/config.txt /data/local/tmp/arm_cpu/demo.jpg' > ./output/lite_MobileNetV3_large_x1_0_runtime_device_arm_cpu_precision_FP32_batchsize_1_threads_1.log 2>&1!
......
```
可以很方便的根据results.log中的内容判定哪一个指令运行错误。
## 3. 更多教程
本文档为功能测试用,更详细的 Lite 预测使用教程请参考:[PaddleLite 推理部署](../../docs/zh_CN/inference_deployment/paddle_lite_deploy.md) 。

View File

@ -0,0 +1,58 @@
#!/bin/bash
source test_tipc/common_func.sh
BASIC_CONFIG="./config.txt"
CONFIG=$1
# parser tipc config
IFS=$'\n'
TIPC_CONFIG=$1
tipc_dataline=$(cat $TIPC_CONFIG)
tipc_lines=(${tipc_dataline})
runtime_device=$(func_parser_value_lite "${tipc_lines[0]}" ":")
lite_arm_work_path=$(func_parser_value_lite "${tipc_lines[1]}" ":")
lite_arm_so_path=$(func_parser_value_lite "${tipc_lines[2]}" ":")
clas_model_name=$(func_parser_value_lite "${tipc_lines[3]}" ":")
inference_cmd=$(func_parser_value_lite "${tipc_lines[4]}" ":")
num_threads_list=$(func_parser_value_lite "${tipc_lines[5]}" ":")
batch_size_list=$(func_parser_value_lite "${tipc_lines[6]}" ":")
precision_list=$(func_parser_value_lite "${tipc_lines[7]}" ":")
# Prepare config and test.sh
work_path="./deploy/lite"
cp ${CONFIG} ${work_path}
cp test_tipc/test_lite_arm_cpu_cpp.sh ${work_path}
# Prepare model
cd ${work_path}
pip3 install paddlelite==2.10
model_url="https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/${clas_model_name}_infer.tar"
wget --no-proxy ${model_url}
model_tar=$(echo ${model_url} | awk -F "/" '{print $NF}')
tar -xf ${model_tar}
paddle_lite_opt --model_dir=${clas_model_name}_infer --model_file=${clas_model_name}_infer/inference.pdmodel --param_file=${clas_model_name}_infer/inference.pdiparams --valid_targets=arm --optimize_out=${clas_model_name}
rm -rf ${clas_model_name}_infer*
# Prepare paddlelite library
paddlelite_lib_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv.tar.gz"
wget ${paddlelite_lib_url}
paddlelite_lib_file=$(echo ${paddlelite_lib_url} | awk -F "/" '{print $NF}')
tar -xzf ${paddlelite_lib_file}
mv ${paddlelite_lib_file%*.tar.gz} inference_lite_lib.android.armv8
rm -rf ${paddlelite_lib_file%*.tar.gz}*
# Compile and obtain executable binary file
git clone https://github.com/LDOUBLEV/AutoLog.git
make
# push executable binary, library, lite model, data, etc. to arm device
adb shell mkdir -p ${lite_arm_work_path}
adb push $(echo ${inference_cmd} | awk '{print $1}') ${lite_arm_work_path}
adb shell chmod +x ${lite_arm_work_path}/$(echo ${inference_cmd} | awk '{print $1}')
adb push ${lite_arm_so_path} ${lite_arm_work_path}
adb push ${clas_model_name}.nb ${lite_arm_work_path}
adb push ${BASIC_CONFIG} ${lite_arm_work_path}
adb push ../../ppcls/utils/imagenet1k_label_list.txt ${lite_arm_work_path}
adb push imgs/$(echo ${inference_cmd} | awk '{print $3}') ${lite_arm_work_path}

View File

@ -1,93 +0,0 @@
#!/bin/bash
source ./test_tipc/common_func.sh
FILENAME=$1
dataline=$(cat ${FILENAME})
# parser params
IFS=$'\n'
lines=(${dataline})
IFS=$'\n'
inference_cmd=$(func_parser_value "${lines[1]}")
DEVICE=$(func_parser_value "${lines[2]}")
det_lite_model_list=$(func_parser_value "${lines[3]}")
rec_lite_model_list=$(func_parser_value "${lines[4]}")
cls_lite_model_list=$(func_parser_value "${lines[5]}")
if [[ $inference_cmd =~ "det" ]];then
lite_model_list=${det_lite_model_list}
elif [[ $inference_cmd =~ "rec" ]];then
lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
elif [[ $inference_cmd =~ "system" ]];then
lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
else
echo "inference_cmd is wrong, please check."
exit 1
fi
if [ ${DEVICE} = "ARM_CPU" ];then
valid_targets="arm"
paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz"
end_index="66"
elif [ ${DEVICE} = "ARM_GPU_OPENCL" ];then
valid_targets="opencl"
paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.armv8.clang.with_exception.with_extra.with_cv.opencl.tar.gz"
end_index="71"
else
echo "DEVICE only suport ARM_CPU, ARM_GPU_OPENCL."
exit 2
fi
# prepare lite .nb model
pip install paddlelite==2.10-rc
current_dir=${PWD}
IFS="|"
model_path=./inference_models
for model in ${lite_model_list[*]}; do
if [[ $model =~ "PP-OCRv2" ]];then
inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/${model}.tar
elif [[ $model =~ "v2.0" ]];then
inference_model_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/${model}.tar
else
echo "Model is wrong, please check."
exit 3
fi
inference_model=${inference_model_url##*/}
wget -nc -P ${model_path} ${inference_model_url}
cd ${model_path} && tar -xf ${inference_model} && cd ../
model_dir=${model_path}/${inference_model%.*}
model_file=${model_dir}/inference.pdmodel
param_file=${model_dir}/inference.pdiparams
paddle_lite_opt --model_dir=${model_dir} --model_file=${model_file} --param_file=${param_file} --valid_targets=${valid_targets} --optimize_out=${model_dir}_opt
done
# prepare test data
data_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
model_path=./inference_models
inference_model=${inference_model_url##*/}
data_file=${data_url##*/}
wget -nc -P ./inference_models ${inference_model_url}
wget -nc -P ./test_data ${data_url}
cd ./inference_models && tar -xf ${inference_model} && cd ../
cd ./test_data && tar -xf ${data_file} && rm ${data_file} && cd ../
# prepare lite env
paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}')
paddlelite_file=${paddlelite_zipfile:0:${end_index}}
wget ${paddlelite_url} && tar -xf ${paddlelite_zipfile}
mkdir -p ${paddlelite_file}/demo/cxx/ocr/test_lite
cp -r ${model_path}/*_opt.nb test_data ${paddlelite_file}/demo/cxx/ocr/test_lite
cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite
cp -r ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/
cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite
cp ${FILENAME} test_tipc/test_lite_arm_cpp.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite
cd ${paddlelite_file}/demo/cxx/ocr/
git clone https://github.com/cuicheng01/AutoLog.git
# make
make -j
sleep 1
make -j
cp ocr_db_crnn test_lite && cp test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so
tar -cf test_lite.tar ./test_lite && cp test_lite.tar ${current_dir} && cd ${current_dir}
rm -rf ${paddlelite_file}* && rm -rf ${model_path}

View File

@ -1,159 +0,0 @@
#!/bin/bash
source ./common_func.sh
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
FILENAME=$1
dataline=$(cat $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
# parser lite inference
inference_cmd=$(func_parser_value "${lines[1]}")
runtime_device=$(func_parser_value "${lines[2]}")
det_model_list=$(func_parser_value "${lines[3]}")
rec_model_list=$(func_parser_value "${lines[4]}")
cls_model_list=$(func_parser_value "${lines[5]}")
cpu_threads_list=$(func_parser_value "${lines[6]}")
det_batch_size_list=$(func_parser_value "${lines[7]}")
rec_batch_size_list=$(func_parser_value "${lines[8]}")
infer_img_dir_list=$(func_parser_value "${lines[9]}")
config_dir=$(func_parser_value "${lines[10]}")
rec_dict_dir=$(func_parser_value "${lines[11]}")
benchmark_value=$(func_parser_value "${lines[12]}")
if [[ $inference_cmd =~ "det" ]]; then
lite_model_list=${det_lite_model_list}
elif [[ $inference_cmd =~ "rec" ]]; then
lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
elif [[ $inference_cmd =~ "system" ]]; then
lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
else
echo "inference_cmd is wrong, please check."
exit 1
fi
LOG_PATH="./output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"
function func_test_det(){
IFS='|'
_script=$1
_det_model=$2
_log_path=$3
_img_dir=$4
_config=$5
if [[ $_det_model =~ "slim" ]]; then
precision="INT8"
else
precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for det_batchsize in ${det_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_det_model}_runtime_device_${runtime_device}_precision_${precision}_det_batchsize_${det_batchsize}_threads_${num_threads}.log"
command="${_script} ${_det_model} ${runtime_device} ${precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
}
function func_test_rec(){
IFS='|'
_script=$1
_rec_model=$2
_cls_model=$3
_log_path=$4
_img_dir=$5
_config=$6
_rec_dict_dir=$7
if [[ $_det_model =~ "slim" ]]; then
_precision="INT8"
else
_precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for rec_batchsize in ${rec_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_rec_model}_${cls_model}_runtime_device_${runtime_device}_precision_${_precision}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log"
command="${_script} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${rec_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
}
function func_test_system(){
IFS='|'
_script=$1
_det_model=$2
_rec_model=$3
_cls_model=$4
_log_path=$5
_img_dir=$6
_config=$7
_rec_dict_dir=$8
if [[ $_det_model =~ "slim" ]]; then
_precision="INT8"
else
_precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for det_batchsize in ${det_batch_size_list[*]}; do
for rec_batchsize in ${rec_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_det_model}_${_rec_model}_${_cls_model}_runtime_device_${runtime_device}_precision_${_precision}_det_batchsize_${det_batchsize}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log"
command="${_script} ${_det_model} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
done
}
echo "################### run test ###################"
if [[ $inference_cmd =~ "det" ]]; then
IFS="|"
det_model_list=(${det_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_det "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}"
done
done
elif [[ $inference_cmd =~ "rec" ]]; then
IFS="|"
rec_model_list=(${rec_model_list[*]})
cls_model_list=(${cls_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_rec "${inference_cmd}" "${rec_model}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${rec_dict_dir}" "${config_dir}"
done
done
elif [[ $inference_cmd =~ "system" ]]; then
IFS="|"
det_model_list=(${det_model_list[*]})
rec_model_list=(${rec_model_list[*]})
cls_model_list=(${cls_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_system "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${rec_model_list[i]}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}" "${rec_dict_dir}"
done
done
fi

View File

@ -0,0 +1,95 @@
#!/bin/bash
source test_tipc/common_func.sh
current_path=$PWD
IFS=$'\n'
TIPC_CONFIG=$1
tipc_dataline=$(cat $TIPC_CONFIG)
tipc_lines=(${tipc_dataline})
work_path="./deploy/lite"
cd ${work_path}
BASIC_CONFIG="config.txt"
basic_dataline=$(cat $BASIC_CONFIG)
basic_lines=(${basic_dataline})
# parser basic config
label_path=$(func_parser_value_lite "${basic_lines[1]}" " ")
resize_short_size=$(func_parser_value_lite "${basic_lines[2]}" " ")
crop_size=$(func_parser_value_lite "${basic_lines[3]}" " ")
visualize=$(func_parser_value_lite "${basic_lines[4]}" " ")
enable_benchmark=$(func_parser_value_lite "${basic_lines[9]}" " ")
tipc_benchmark=$(func_parser_value_lite "${basic_lines[10]}" " ")
# parser tipc config
runtime_device=$(func_parser_value_lite "${tipc_lines[0]}" ":")
lite_arm_work_path=$(func_parser_value_lite "${tipc_lines[1]}" ":")
lite_arm_so_path=$(func_parser_value_lite "${tipc_lines[2]}" ":")
clas_model_name=$(func_parser_value_lite "${tipc_lines[3]}" ":")
inference_cmd=$(func_parser_value_lite "${tipc_lines[4]}" ":")
num_threads_list=$(func_parser_value_lite "${tipc_lines[5]}" ":")
batch_size_list=$(func_parser_value_lite "${tipc_lines[6]}" ":")
precision_list=$(func_parser_value_lite "${tipc_lines[7]}" ":")
LOG_PATH=${current_path}"/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"
#run Lite TIPC
function func_test_tipc(){
IFS="|"
_basic_config=$1
_model_name=$2
_log_path=$3
for num_threads in ${num_threads_list[*]}; do
if [ $(uname) = "Darwin" ]; then
sed -i " " "s/num_threads.*/num_threads ${num_threads}/" ${_basic_config}
elif [ $(expr substr $(uname -s) 1 5) = "Linux"]; then
sed -i "s/num_threads.*/num_threads ${num_threads}/" ${_basic_config}
fi
for batch_size in ${batch_size_list[*]}; do
if [ $(uname) = "Darwin" ]; then
sed -i " " "s/batch_size.*/batch_size ${batch_size}/" ${_basic_config}
elif [ $(expr substr $(uname -s) 1 5) = "Linux"]; then
sed -i "s/batch_size.*/batch_size ${batch_size}/" ${_basic_config}
fi
for precision in ${precision_list[*]}; do
if [ $(uname) = "Darwin" ]; then
sed -i " " "s/precision.*/precision ${precision}/" ${_basic_config}
elif [ $(expr substr $(uname -s) 1 5) = "Linux"]; then
sed -i "s/precision.*/precision ${precision}/" ${_basic_config}
fi
_save_log_path="${_log_path}/lite_${_model_name}_runtime_device_${runtime_device}_precision_${precision}_batchsize_${batch_size}_threads_${num_threads}.log"
real_inference_cmd=$(echo ${inference_cmd} | awk -F " " '{print path $1" "path $2" "path $3}' path="$lite_arm_work_path")
command1="adb push ${_basic_config} ${lite_arm_work_path}"
eval ${command1}
command2="adb shell 'export LD_LIBRARY_PATH=${lite_arm_work_path}; ${real_inference_cmd}' > ${_save_log_path} 2>&1"
eval ${command2}
status_check $? "${command2}" "${status_log}"
done
done
done
}
echo "################### run test tipc ###################"
label_map=$(echo ${label_path} | awk -F "/" '{print $NF}')
if [ $(uname) = "Darwin" ]; then
# for Mac
sed -i " " "s/runtime_device.*/runtime_device arm_cpu/" ${BASIC_CONFIG}
escape_lite_arm_work_path=$(echo ${lite_arm_work_path//\//\\\/})
sed -i " " "s/clas_model_file.*/clas_model_file ${escape_lite_arm_work_path}${clas_model_name}.nb/" ${BASIC_CONFIG}
sed -i " " "s/label_path.*/label_path ${escape_lite_arm_work_path}${label_map}/" ${BASIC_CONFIG}
sed -i " " "s/tipc_benchmark.*/tipc_benchmark 1/" ${BASIC_CONFIG}
elif [ $(expr substr $(uname -s) 1 5) = "Linux"]; then
# for Linux
sed -i "s/runtime_device.*/runtime_device arm_cpu/" ${BASIC_CONFIG}
escape_lite_arm_work_path=$(echo ${lite_arm_work_path//\//\\\/})
sed -i "s/clas_model_file.*/clas_model_file ${escape_lite_arm_work_path}${clas_model_name}/" ${BASIC_CONFIG}
sed -i "s/label_path.*/label_path ${escape_lite_arm_work_path}${label_path}/" ${BASIC_CONFIG}
sed -i "s/tipc_benchmark.*/tipc_benchmark 1/" ${BASIC_CONFIG}
fi
func_test_tipc ${BASIC_CONFIG} ${clas_model_name} ${LOG_PATH}