mirror of
https://github.com/PaddlePaddle/PaddleOCR.git
synced 2025-06-03 21:53:39 +08:00
Merge branch 'test_v11' of https://github.com/LDOUBLEV/PaddleOCR into test_v11
This commit is contained in:
commit
b9964c83f8
@ -20,6 +20,7 @@ import numpy as np
|
||||
from .locality_aware_nms import nms_locality
|
||||
import cv2
|
||||
import paddle
|
||||
import lanms
|
||||
|
||||
import os
|
||||
import sys
|
||||
@ -29,6 +30,7 @@ class EASTPostProcess(object):
|
||||
"""
|
||||
The post process for EAST.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
score_thresh=0.8,
|
||||
cover_thresh=0.1,
|
||||
@ -38,11 +40,6 @@ class EASTPostProcess(object):
|
||||
self.score_thresh = score_thresh
|
||||
self.cover_thresh = cover_thresh
|
||||
self.nms_thresh = nms_thresh
|
||||
|
||||
# c++ la-nms is faster, but only support python 3.5
|
||||
self.is_python35 = False
|
||||
if sys.version_info.major == 3 and sys.version_info.minor == 5:
|
||||
self.is_python35 = True
|
||||
|
||||
def restore_rectangle_quad(self, origin, geometry):
|
||||
"""
|
||||
@ -79,11 +76,8 @@ class EASTPostProcess(object):
|
||||
boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32)
|
||||
boxes[:, :8] = text_box_restored.reshape((-1, 8))
|
||||
boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]]
|
||||
if self.is_python35:
|
||||
import lanms
|
||||
boxes = lanms.merge_quadrangle_n9(boxes, nms_thresh)
|
||||
else:
|
||||
boxes = nms_locality(boxes.astype(np.float64), nms_thresh)
|
||||
boxes = lanms.merge_quadrangle_n9(boxes, nms_thresh)
|
||||
# boxes = nms_locality(boxes.astype(np.float64), nms_thresh)
|
||||
if boxes.shape[0] == 0:
|
||||
return []
|
||||
# Here we filter some low score boxes by the average score map,
|
||||
@ -139,4 +133,4 @@ class EASTPostProcess(object):
|
||||
continue
|
||||
boxes_norm.append(box)
|
||||
dt_boxes_list.append({'points': np.array(boxes_norm)})
|
||||
return dt_boxes_list
|
||||
return dt_boxes_list
|
||||
|
@ -54,11 +54,28 @@ def load_model(config, model, optimizer=None):
|
||||
pretrained_model = global_config.get('pretrained_model')
|
||||
best_model_dict = {}
|
||||
if checkpoints:
|
||||
if checkpoints.endswith('pdparams'):
|
||||
if checkpoints.endswith('.pdparams'):
|
||||
checkpoints = checkpoints.replace('.pdparams', '')
|
||||
assert os.path.exists(checkpoints + ".pdopt"), \
|
||||
f"The {checkpoints}.pdopt does not exists!"
|
||||
load_pretrained_params(model, checkpoints)
|
||||
assert os.path.exists(checkpoints + ".pdparams"), \
|
||||
"The {}.pdparams does not exists!".format(checkpoints)
|
||||
|
||||
# load params from trained model
|
||||
params = paddle.load(checkpoints + '.pdparams')
|
||||
state_dict = model.state_dict()
|
||||
new_state_dict = {}
|
||||
for key, value in state_dict.items():
|
||||
if key not in params:
|
||||
logger.warning("{} not in loaded params {} !".format(
|
||||
key, params.keys()))
|
||||
pre_value = params[key]
|
||||
if list(value.shape) == list(pre_value.shape):
|
||||
new_state_dict[key] = pre_value
|
||||
else:
|
||||
logger.warning(
|
||||
"The shape of model params {} {} not matched with loaded params shape {} !".
|
||||
format(key, value.shape, pre_value.shape))
|
||||
model.set_state_dict(new_state_dict)
|
||||
|
||||
optim_dict = paddle.load(checkpoints + '.pdopt')
|
||||
if optimizer is not None:
|
||||
optimizer.set_state_dict(optim_dict)
|
||||
@ -80,10 +97,10 @@ def load_model(config, model, optimizer=None):
|
||||
|
||||
def load_pretrained_params(model, path):
|
||||
logger = get_logger()
|
||||
if path.endswith('pdparams'):
|
||||
if path.endswith('.pdparams'):
|
||||
path = path.replace('.pdparams', '')
|
||||
assert os.path.exists(path + ".pdparams"), \
|
||||
f"The {path}.pdparams does not exists!"
|
||||
"The {}.pdparams does not exists!".format(path)
|
||||
|
||||
params = paddle.load(path + '.pdparams')
|
||||
state_dict = model.state_dict()
|
||||
@ -92,11 +109,11 @@ def load_pretrained_params(model, path):
|
||||
if list(state_dict[k1].shape) == list(params[k2].shape):
|
||||
new_state_dict[k1] = params[k2]
|
||||
else:
|
||||
logger.info(
|
||||
f"The shape of model params {k1} {state_dict[k1].shape} not matched with loaded params {k2} {params[k2].shape} !"
|
||||
)
|
||||
logger.warning(
|
||||
"The shape of model params {} {} not matched with loaded params {} {} !".
|
||||
format(k1, state_dict[k1].shape, k2, params[k2].shape))
|
||||
model.set_state_dict(new_state_dict)
|
||||
logger.info(f"load pretrain successful from {path}")
|
||||
logger.info("load pretrain successful from {}".format(path))
|
||||
return model
|
||||
|
||||
|
||||
|
@ -12,4 +12,5 @@ cython
|
||||
lxml
|
||||
premailer
|
||||
openpyxl
|
||||
fasttext==0.9.1
|
||||
fasttext==0.9.1
|
||||
lanms-nova
|
@ -0,0 +1,13 @@
|
||||
===========================lite_params===========================
|
||||
inference:./ocr_db_crnn det
|
||||
runtime_device:ARM_GPU_OPENCL
|
||||
det_infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
|
||||
null:null
|
||||
null:null
|
||||
--cpu_threads:1|4
|
||||
--det_batch_size:1
|
||||
null:null
|
||||
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
|
||||
--config_dir:./config.txt
|
||||
null:null
|
||||
--benchmark:True
|
@ -0,0 +1,13 @@
|
||||
===========================lite_params===========================
|
||||
inference:./ocr_db_crnn system
|
||||
runtime_device:ARM_CPU
|
||||
det_infer_model:ch_ppocr_mobile_v2.0_det_infer|ch_ppocr_db_mobile_v2.0_det_quant_infer
|
||||
rec_infer_model:ch_ppocr_mobile_v2.0_rec_infer|ch_ppocr_mobile_v2.0_rec_slim_infer
|
||||
cls_infer_model:ch_ppocr_mobile_v2.0_cls_infer|ch_ppocr_mobile_v2.0_cls_slim_infer
|
||||
--cpu_threads:1|4
|
||||
--det_batch_size:1
|
||||
--rec_batch_size:1
|
||||
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
|
||||
--config_dir:./config.txt
|
||||
--rec_dict_dir:./ppocr_keys_v1.txt
|
||||
--benchmark:True
|
@ -0,0 +1,13 @@
|
||||
===========================lite_params===========================
|
||||
inference:./ocr_db_crnn system
|
||||
runtime_device:ARM_GPU_OPENCL
|
||||
det_infer_model:ch_ppocr_mobile_v2.0_det_infer|ch_ppocr_db_mobile_v2.0_det_quant_infer
|
||||
rec_infer_model:ch_ppocr_mobile_v2.0_rec_infer|ch_ppocr_mobile_v2.0_rec_slim_infer
|
||||
cls_infer_model:ch_ppocr_mobile_v2.0_cls_infer|ch_ppocr_mobile_v2.0_cls_slim_infer
|
||||
--cpu_threads:1|4
|
||||
--det_batch_size:1
|
||||
--rec_batch_size:1
|
||||
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
|
||||
--config_dir:./config.txt
|
||||
--rec_dict_dir:./ppocr_keys_v1.txt
|
||||
--benchmark:True
|
@ -1,12 +1,13 @@
|
||||
===========================lite_params===========================
|
||||
inference:./ocr_db_crnn det
|
||||
infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
|
||||
runtime_device:ARM_CPU
|
||||
det_infer_model:ch_ppocr_mobile_v2.0_det_infer|ch_ppocr_db_mobile_v2.0_det_quant_infer
|
||||
null:null
|
||||
null:null
|
||||
--cpu_threads:1|4
|
||||
--det_batch_size:1
|
||||
--rec_batch_size:1
|
||||
--system_batch_size:1
|
||||
null:null
|
||||
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
|
||||
--config_dir:./config.txt
|
||||
--rec_dict_dir:./ppocr_keys_v1.txt
|
||||
null:null
|
||||
--benchmark:True
|
||||
|
@ -1,7 +1,7 @@
|
||||
===========================lite_params===========================
|
||||
inference:./ocr_db_crnn det
|
||||
runtime_device:ARM_GPU_OPENCL
|
||||
det_infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
|
||||
det_infer_model:ch_ppocr_mobile_v2.0_det_infer|ch_ppocr_db_mobile_v2.0_det_quant_infer
|
||||
null:null
|
||||
null:null
|
||||
--cpu_threads:1|4
|
||||
|
109
test_tipc/configs/det_mv3_east_v2.0/det_mv3_east.yml
Normal file
109
test_tipc/configs/det_mv3_east_v2.0/det_mv3_east.yml
Normal file
@ -0,0 +1,109 @@
|
||||
Global:
|
||||
use_gpu: true
|
||||
epoch_num: 10000
|
||||
log_smooth_window: 20
|
||||
print_batch_step: 2
|
||||
save_model_dir: ./output/east_mv3/
|
||||
save_epoch_step: 1000
|
||||
# evaluation is run every 5000 iterations after the 4000th iteration
|
||||
eval_batch_step: [4000, 5000]
|
||||
cal_metric_during_train: False
|
||||
pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
|
||||
checkpoints:
|
||||
save_inference_dir:
|
||||
use_visualdl: False
|
||||
infer_img:
|
||||
save_res_path: ./output/det_east/predicts_east.txt
|
||||
|
||||
Architecture:
|
||||
model_type: det
|
||||
algorithm: EAST
|
||||
Transform:
|
||||
Backbone:
|
||||
name: MobileNetV3
|
||||
scale: 0.5
|
||||
model_name: large
|
||||
Neck:
|
||||
name: EASTFPN
|
||||
model_name: small
|
||||
Head:
|
||||
name: EASTHead
|
||||
model_name: small
|
||||
|
||||
Loss:
|
||||
name: EASTLoss
|
||||
|
||||
Optimizer:
|
||||
name: Adam
|
||||
beta1: 0.9
|
||||
beta2: 0.999
|
||||
lr:
|
||||
# name: Cosine
|
||||
learning_rate: 0.001
|
||||
# warmup_epoch: 0
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
factor: 0
|
||||
|
||||
PostProcess:
|
||||
name: EASTPostProcess
|
||||
score_thresh: 0.8
|
||||
cover_thresh: 0.1
|
||||
nms_thresh: 0.2
|
||||
|
||||
Metric:
|
||||
name: DetMetric
|
||||
main_indicator: hmean
|
||||
|
||||
Train:
|
||||
dataset:
|
||||
name: SimpleDataSet
|
||||
data_dir: ./train_data/icdar2015/text_localization/
|
||||
label_file_list:
|
||||
- ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
|
||||
ratio_list: [1.0]
|
||||
transforms:
|
||||
- DecodeImage: # load image
|
||||
img_mode: BGR
|
||||
channel_first: False
|
||||
- DetLabelEncode: # Class handling label
|
||||
- EASTProcessTrain:
|
||||
image_shape: [512, 512]
|
||||
background_ratio: 0.125
|
||||
min_crop_side_ratio: 0.1
|
||||
min_text_size: 10
|
||||
- KeepKeys:
|
||||
keep_keys: ['image', 'score_map', 'geo_map', 'training_mask'] # dataloader will return list in this order
|
||||
loader:
|
||||
shuffle: True
|
||||
drop_last: False
|
||||
batch_size_per_card: 16
|
||||
num_workers: 8
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: SimpleDataSet
|
||||
data_dir: ./train_data/icdar2015/text_localization/
|
||||
label_file_list:
|
||||
- ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
|
||||
transforms:
|
||||
- DecodeImage: # load image
|
||||
img_mode: BGR
|
||||
channel_first: False
|
||||
- DetLabelEncode: # Class handling label
|
||||
- DetResizeForTest:
|
||||
limit_side_len: 2400
|
||||
limit_type: max
|
||||
- NormalizeImage:
|
||||
scale: 1./255.
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: 'hwc'
|
||||
- ToCHWImage:
|
||||
- KeepKeys:
|
||||
keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
|
||||
loader:
|
||||
shuffle: False
|
||||
drop_last: False
|
||||
batch_size_per_card: 1 # must be 1
|
||||
num_workers: 2
|
51
test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt
Normal file
51
test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt
Normal file
@ -0,0 +1,51 @@
|
||||
===========================train_params===========================
|
||||
model_name:det_mv3_east_v2.0
|
||||
python:python3.7
|
||||
gpu_list:0
|
||||
Global.use_gpu:True|True
|
||||
Global.auto_cast:fp32
|
||||
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500
|
||||
Global.save_model_dir:./output/
|
||||
Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4
|
||||
Global.pretrained_model:null
|
||||
train_model_name:latest
|
||||
train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
|
||||
null:null
|
||||
##
|
||||
trainer:norm_train
|
||||
norm_train:tools/train.py -c test_tipc/configs/det_mv3_east_v2.0/det_mv3_east.yml -o
|
||||
pact_train:null
|
||||
fpgm_train:null
|
||||
distill_train:null
|
||||
null:null
|
||||
null:null
|
||||
##
|
||||
===========================eval_params===========================
|
||||
eval:null
|
||||
null:null
|
||||
##
|
||||
===========================infer_params===========================
|
||||
Global.save_inference_dir:./output/
|
||||
Global.pretrained_model:
|
||||
norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_east_v2.0/det_mv3_east.yml -o
|
||||
quant_export:null
|
||||
fpgm_export:null
|
||||
distill_export:null
|
||||
export1:null
|
||||
export2:null
|
||||
##
|
||||
train_model:./inference/det_mv3_east/best_accuracy
|
||||
infer_export:tools/export_model.py -c test_tipc/cconfigs/det_mv3_east_v2.0/det_mv3_east.yml -o
|
||||
infer_quant:False
|
||||
inference:tools/infer/predict_det.py
|
||||
--use_gpu:True|False
|
||||
--enable_mkldnn:True|False
|
||||
--cpu_threads:1|6
|
||||
--rec_batch_num:1
|
||||
--use_tensorrt:False|True
|
||||
--precision:fp32|fp16|int8
|
||||
--det_model_dir:
|
||||
--image_dir:./inference/ch_det_data_50/all-sum-510/
|
||||
--save_log_path:null
|
||||
--benchmark:True
|
||||
--det_algorithm:EAST
|
108
test_tipc/configs/det_r50_vd_east_v2.0/det_r50_vd_east.yml
Normal file
108
test_tipc/configs/det_r50_vd_east_v2.0/det_r50_vd_east.yml
Normal file
@ -0,0 +1,108 @@
|
||||
Global:
|
||||
use_gpu: true
|
||||
epoch_num: 10000
|
||||
log_smooth_window: 20
|
||||
print_batch_step: 2
|
||||
save_model_dir: ./output/east_r50_vd/
|
||||
save_epoch_step: 1000
|
||||
# evaluation is run every 5000 iterations after the 4000th iteration
|
||||
eval_batch_step: [4000, 5000]
|
||||
cal_metric_during_train: False
|
||||
pretrained_model:
|
||||
checkpoints:
|
||||
save_inference_dir:
|
||||
use_visualdl: False
|
||||
infer_img:
|
||||
save_res_path: ./output/det_east/predicts_east.txt
|
||||
|
||||
Architecture:
|
||||
model_type: det
|
||||
algorithm: EAST
|
||||
Transform:
|
||||
Backbone:
|
||||
name: ResNet
|
||||
layers: 50
|
||||
Neck:
|
||||
name: EASTFPN
|
||||
model_name: large
|
||||
Head:
|
||||
name: EASTHead
|
||||
model_name: large
|
||||
|
||||
Loss:
|
||||
name: EASTLoss
|
||||
|
||||
Optimizer:
|
||||
name: Adam
|
||||
beta1: 0.9
|
||||
beta2: 0.999
|
||||
lr:
|
||||
# name: Cosine
|
||||
learning_rate: 0.001
|
||||
# warmup_epoch: 0
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
factor: 0
|
||||
|
||||
PostProcess:
|
||||
name: EASTPostProcess
|
||||
score_thresh: 0.8
|
||||
cover_thresh: 0.1
|
||||
nms_thresh: 0.2
|
||||
|
||||
Metric:
|
||||
name: DetMetric
|
||||
main_indicator: hmean
|
||||
|
||||
Train:
|
||||
dataset:
|
||||
name: SimpleDataSet
|
||||
data_dir: ./train_data/icdar2015/text_localization/
|
||||
label_file_list:
|
||||
- ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
|
||||
ratio_list: [1.0]
|
||||
transforms:
|
||||
- DecodeImage: # load image
|
||||
img_mode: BGR
|
||||
channel_first: False
|
||||
- DetLabelEncode: # Class handling label
|
||||
- EASTProcessTrain:
|
||||
image_shape: [512, 512]
|
||||
background_ratio: 0.125
|
||||
min_crop_side_ratio: 0.1
|
||||
min_text_size: 10
|
||||
- KeepKeys:
|
||||
keep_keys: ['image', 'score_map', 'geo_map', 'training_mask'] # dataloader will return list in this order
|
||||
loader:
|
||||
shuffle: True
|
||||
drop_last: False
|
||||
batch_size_per_card: 8
|
||||
num_workers: 8
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
name: SimpleDataSet
|
||||
data_dir: ./train_data/icdar2015/text_localization/
|
||||
label_file_list:
|
||||
- ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
|
||||
transforms:
|
||||
- DecodeImage: # load image
|
||||
Fa: BGR
|
||||
channel_first: False
|
||||
- DetLabelEncode: # Class handling label
|
||||
- DetResizeForTest:
|
||||
limit_side_len: 2400
|
||||
limit_type: max
|
||||
- NormalizeImage:
|
||||
scale: 1./255.
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: 'hwc'
|
||||
- ToCHWImage:
|
||||
- KeepKeys:
|
||||
keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
|
||||
loader:
|
||||
shuffle: False
|
||||
drop_last: False
|
||||
batch_size_per_card: 1 # must be 1
|
||||
num_workers: 2
|
@ -0,0 +1,51 @@
|
||||
===========================train_params===========================
|
||||
model_name:det_r50_vd_east_v2.0
|
||||
python:python3.7
|
||||
gpu_list:0
|
||||
Global.use_gpu:True|True
|
||||
Global.auto_cast:fp32
|
||||
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500
|
||||
Global.save_model_dir:./output/
|
||||
Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4
|
||||
Global.pretrained_model:null
|
||||
train_model_name:latest
|
||||
train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
|
||||
null:null
|
||||
##
|
||||
trainer:norm_train
|
||||
norm_train:tools/train.py -c test_tipc/configs/det_r50_vd_east_v2.0/det_r50_vd_east.yml -o
|
||||
pact_train:null
|
||||
fpgm_train:null
|
||||
distill_train:null
|
||||
null:null
|
||||
null:null
|
||||
##
|
||||
===========================eval_params===========================
|
||||
eval:null
|
||||
null:null
|
||||
##
|
||||
===========================infer_params===========================
|
||||
Global.save_inference_dir:./output/
|
||||
Global.pretrained_model:
|
||||
norm_export:tools/export_model.py -c test_tipc/configs/det_r50_vd_east_v2.0/det_r50_vd_east.yml -o
|
||||
quant_export:null
|
||||
fpgm_export:null
|
||||
distill_export:null
|
||||
export1:null
|
||||
export2:null
|
||||
##
|
||||
train_model:./inference/det_mv3_east/best_accuracy
|
||||
infer_export:tools/export_model.py -c test_tipc/cconfigs/det_r50_vd_east_v2.0/det_r50_vd_east.yml -o
|
||||
infer_quant:False
|
||||
inference:tools/infer/predict_det.py
|
||||
--use_gpu:True|False
|
||||
--enable_mkldnn:True|False
|
||||
--cpu_threads:1|6
|
||||
--rec_batch_num:1
|
||||
--use_tensorrt:False|True
|
||||
--precision:fp32|fp16|int8
|
||||
--det_model_dir:
|
||||
--image_dir:./inference/ch_det_data_50/all-sum-510/
|
||||
--save_log_path:null
|
||||
--benchmark:True
|
||||
--det_algorithm:EAST
|
@ -16,7 +16,7 @@ Lite\_arm\_cpp预测功能测试的主程序为`test_lite_arm_cpp.sh`,可以
|
||||
|
||||
| 模型类型 | batch-size | threads | predictor数量 | 预测库来源 | 测试硬件 |
|
||||
| :----: | :----: | :----: | :----: | :----: | :----: |
|
||||
| 正常模型/量化模型 | 1 | 1/4 | 单/多 | 下载方式 | ARM\_CPU/ARM\_GPU_OPENCL |
|
||||
| 正常模型/量化模型 | 1 | 1/4 | 单/多 | 下载方式/编译方式 | ARM\_CPU/ARM\_GPU_OPENCL |
|
||||
|
||||
|
||||
## 2. 测试流程
|
||||
@ -30,8 +30,11 @@ Lite\_arm\_cpp预测功能测试的主程序为`test_lite_arm_cpp.sh`,可以
|
||||
|
||||
```shell
|
||||
|
||||
# 数据和模型准备
|
||||
bash test_tipc/prepare_lite_cpp.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
|
||||
# 数据、模型、Paddle-Lite预测库准备
|
||||
#预测库为下载方式
|
||||
bash test_tipc/prepare_lite_cpp.sh ./test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt download
|
||||
#预测库为编译方式
|
||||
bash test_tipc/prepare_lite_cpp.sh ./test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt compile
|
||||
|
||||
# 手机端测试:
|
||||
bash test_lite_arm_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
|
||||
@ -42,8 +45,11 @@ bash test_lite_arm_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
|
||||
|
||||
```shell
|
||||
|
||||
# 数据和模型准备
|
||||
bash test_tipc/prepare_lite_cpp.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
|
||||
# 数据、模型、Paddle-Lite预测库准备
|
||||
#预测库下载方式
|
||||
bash test_tipc/prepare_lite_cpp.sh ./test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt download
|
||||
#预测库编译方式
|
||||
bash test_tipc/prepare_lite_cpp.sh ./test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt compile
|
||||
|
||||
# 手机端测试:
|
||||
bash test_lite_arm_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
|
||||
@ -53,9 +59,7 @@ bash test_lite_arm_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.
|
||||
|
||||
**注意**:
|
||||
|
||||
1.由于运行该项目需要bash等命令,传统的adb方式不能很好的安装。所以此处推荐通在手机上开启虚拟终端的方式连接电脑,连接方式可以参考[安卓手机termux连接电脑](./termux_for_android.md)。
|
||||
|
||||
2.如果测试文本检测和识别完整的pipeline,在执行`prepare_lite_cpp.sh`时,配置文件需替换为`test_tipc/configs/ppocr_system_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt`。在手机端测试阶段,配置文件同样修改为该文件。
|
||||
由于运行该项目需要bash等命令,传统的adb方式不能很好的安装。所以此处推荐通在手机上开启虚拟终端的方式连接电脑,连接方式可以参考[安卓手机termux连接电脑](./termux_for_android.md)。
|
||||
|
||||
### 2.2 运行结果
|
||||
|
||||
|
@ -6,6 +6,7 @@ dataline=$(cat ${FILENAME})
|
||||
IFS=$'\n'
|
||||
lines=(${dataline})
|
||||
IFS=$'\n'
|
||||
paddlelite_library_source=$2
|
||||
|
||||
inference_cmd=$(func_parser_value "${lines[1]}")
|
||||
DEVICE=$(func_parser_value "${lines[2]}")
|
||||
@ -13,40 +14,42 @@ det_lite_model_list=$(func_parser_value "${lines[3]}")
|
||||
rec_lite_model_list=$(func_parser_value "${lines[4]}")
|
||||
cls_lite_model_list=$(func_parser_value "${lines[5]}")
|
||||
|
||||
if [[ $inference_cmd =~ "det" ]];then
|
||||
if [[ $inference_cmd =~ "det" ]]; then
|
||||
lite_model_list=${det_lite_model_list}
|
||||
elif [[ $inference_cmd =~ "rec" ]];then
|
||||
elif [[ $inference_cmd =~ "rec" ]]; then
|
||||
lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
|
||||
elif [[ $inference_cmd =~ "system" ]];then
|
||||
elif [[ $inference_cmd =~ "system" ]]; then
|
||||
lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
|
||||
else
|
||||
echo "inference_cmd is wrong, please check."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ${DEVICE} = "ARM_CPU" ];then
|
||||
if [ ${DEVICE} = "ARM_CPU" ]; then
|
||||
valid_targets="arm"
|
||||
paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz"
|
||||
paddlelite_library_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz"
|
||||
end_index="66"
|
||||
elif [ ${DEVICE} = "ARM_GPU_OPENCL" ];then
|
||||
compile_with_opencl="OFF"
|
||||
elif [ ${DEVICE} = "ARM_GPU_OPENCL" ]; then
|
||||
valid_targets="opencl"
|
||||
paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.armv8.clang.with_exception.with_extra.with_cv.opencl.tar.gz"
|
||||
paddlelite_library_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.armv8.clang.with_exception.with_extra.with_cv.opencl.tar.gz"
|
||||
end_index="71"
|
||||
compile_with_opencl="ON"
|
||||
else
|
||||
echo "DEVICE only suport ARM_CPU, ARM_GPU_OPENCL."
|
||||
echo "DEVICE only support ARM_CPU, ARM_GPU_OPENCL."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# prepare lite .nb model
|
||||
# prepare paddlelite model
|
||||
pip install paddlelite==2.10-rc
|
||||
current_dir=${PWD}
|
||||
IFS="|"
|
||||
model_path=./inference_models
|
||||
|
||||
for model in ${lite_model_list[*]}; do
|
||||
if [[ $model =~ "PP-OCRv2" ]];then
|
||||
if [[ $model =~ "PP-OCRv2" ]]; then
|
||||
inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/${model}.tar
|
||||
elif [[ $model =~ "v2.0" ]];then
|
||||
elif [[ $model =~ "v2.0" ]]; then
|
||||
inference_model_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/${model}.tar
|
||||
else
|
||||
echo "Model is wrong, please check."
|
||||
@ -63,31 +66,42 @@ done
|
||||
|
||||
# prepare test data
|
||||
data_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
|
||||
model_path=./inference_models
|
||||
inference_model=${inference_model_url##*/}
|
||||
data_file=${data_url##*/}
|
||||
wget -nc -P ./inference_models ${inference_model_url}
|
||||
wget -nc -P ./test_data ${data_url}
|
||||
cd ./inference_models && tar -xf ${inference_model} && cd ../
|
||||
cd ./test_data && tar -xf ${data_file} && rm ${data_file} && cd ../
|
||||
|
||||
# prepare lite env
|
||||
paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}')
|
||||
paddlelite_file=${paddlelite_zipfile:0:${end_index}}
|
||||
wget ${paddlelite_url} && tar -xf ${paddlelite_zipfile}
|
||||
mkdir -p ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp -r ${model_path}/*_opt.nb test_data ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp -r ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/
|
||||
cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cp ${FILENAME} test_tipc/test_lite_arm_cpp.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite
|
||||
cd ${paddlelite_file}/demo/cxx/ocr/
|
||||
# prepare paddlelite predict library
|
||||
if [[ ${paddlelite_library_source} = "download" ]]; then
|
||||
paddlelite_library_zipfile=$(echo $paddlelite_library_url | awk -F "/" '{print $NF}')
|
||||
paddlelite_library_file=${paddlelite_library_zipfile:0:${end_index}}
|
||||
wget ${paddlelite_library_url} && tar -xf ${paddlelite_library_zipfile}
|
||||
cd ${paddlelite_library_zipfile}
|
||||
elif [[ ${paddlelite_library_source} = "compile" ]]; then
|
||||
git clone -b release/v2.10 https://github.com/PaddlePaddle/Paddle-Lite.git
|
||||
cd Paddle-Lite
|
||||
./lite/tools/build_android.sh --arch=armv8 --with_cv=ON --with_extra=ON --toolchain=clang --with_opencl=${compile_with_opencl}
|
||||
cd ../
|
||||
cp -r Paddle-Lite/build.lite.android.armv8.clang/inference_lite_lib.android.armv8/ .
|
||||
paddlelite_library_file=inference_lite_lib.android.armv8
|
||||
else
|
||||
echo "paddlelite_library_source only support 'download' and 'compile'"
|
||||
exit 3
|
||||
fi
|
||||
|
||||
# organize the required files
|
||||
mkdir -p ${paddlelite_library_file}/demo/cxx/ocr/test_lite
|
||||
cp -r ${model_path}/*_opt.nb test_data ${paddlelite_library_file}/demo/cxx/ocr/test_lite
|
||||
cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_library_file}/demo/cxx/ocr/test_lite
|
||||
cp -r ./deploy/lite/* ${paddlelite_library_file}/demo/cxx/ocr/
|
||||
cp ${paddlelite_library_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_library_file}/demo/cxx/ocr/test_lite
|
||||
cp ${FILENAME} test_tipc/test_lite_arm_cpp.sh test_tipc/common_func.sh ${paddlelite_library_file}/demo/cxx/ocr/test_lite
|
||||
cd ${paddlelite_library_file}/demo/cxx/ocr/
|
||||
git clone https://github.com/cuicheng01/AutoLog.git
|
||||
|
||||
# make
|
||||
# compile and do some postprocess
|
||||
make -j
|
||||
sleep 1
|
||||
make -j
|
||||
cp ocr_db_crnn test_lite && cp test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so
|
||||
tar -cf test_lite.tar ./test_lite && cp test_lite.tar ${current_dir} && cd ${current_dir}
|
||||
rm -rf ${paddlelite_file}* && rm -rf ${model_path}
|
||||
rm -rf ${paddlelite_library_file}* && rm -rf ${model_path}
|
||||
|
5
tools/infer/utility.py
Executable file → Normal file
5
tools/infer/utility.py
Executable file → Normal file
@ -190,6 +190,7 @@ def create_predictor(args, mode, logger):
|
||||
config.enable_use_gpu(args.gpu_mem, 0)
|
||||
if args.use_tensorrt:
|
||||
config.enable_tensorrt_engine(
|
||||
workspace_size=1 << 30,
|
||||
precision_mode=precision,
|
||||
max_batch_size=args.max_batch_size,
|
||||
min_subgraph_size=args.min_subgraph_size)
|
||||
@ -310,10 +311,6 @@ def create_predictor(args, mode, logger):
|
||||
|
||||
|
||||
def get_infer_gpuid():
|
||||
cmd = "nvidia-smi"
|
||||
res = os.popen(cmd).readlines()
|
||||
if len(res) == 0:
|
||||
return None
|
||||
cmd = "env | grep CUDA_VISIBLE_DEVICES"
|
||||
env_cuda = os.popen(cmd).readlines()
|
||||
if len(env_cuda) == 0:
|
||||
|
Loading…
x
Reference in New Issue
Block a user