Compare commits
120 Commits
v2.9.0
...
release/2.
Author | SHA1 | Date |
---|---|---|
|
c833dfc9ff | |
|
65d35f92e2 | |
|
7d573cffd7 | |
|
a916057501 | |
|
b1f6c210b3 | |
|
b938161236 | |
|
9884073e71 | |
|
1f47bca525 | |
|
df54a7b422 | |
|
0105dfc90d | |
|
f04871ed3e | |
|
1e11f25409 | |
|
2b7b9dc2cf | |
|
d14ad02541 | |
|
b13f996076 | |
|
d65842fe5a | |
|
43753204b6 | |
|
8967e63392 | |
|
1dad0a980f | |
|
fac03876f3 | |
|
5128ceaf74 | |
|
61b3fee291 | |
|
f8cbbce65c | |
|
d1f3ca0691 | |
|
1213cb2171 | |
|
f022613e29 | |
|
795c81f183 | |
|
b06840d493 | |
|
890230e1f3 | |
|
a2ae303805 | |
|
ebfbb80dcf | |
|
b8888627a5 | |
|
1fa3575030 | |
|
4b275f1857 | |
|
8c9f43f44f | |
|
d983e555fd | |
|
3a0542e7d6 | |
|
03ceab9744 | |
|
d37209e552 | |
|
df064f4538 | |
|
991fb7c46a | |
|
44316ac7fd | |
|
013870d9bc | |
|
4b8e333f10 | |
|
7bae3db2ec | |
|
3fcf12be85 | |
|
2be9fe763e | |
|
9c22490f95 | |
|
15d7ef5765 | |
|
d8d76d59c9 | |
|
366ad29d6c | |
|
04457a8043 | |
|
e904b54c52 | |
|
b61ee47066 | |
|
a923f35de5 | |
|
0850586667 | |
|
d78b945608 | |
|
7ce0a5fc2e | |
|
14adbd09f2 | |
|
0ed9d8889f | |
|
b20014e89a | |
|
133d67f27d | |
|
49fef42292 | |
|
a9109b9921 | |
|
8de2ebf258 | |
|
29cdda4eda | |
|
4aff082c5b | |
|
1514659979 | |
|
303e81b0b4 | |
|
c811e62689 | |
|
ecd29bd28b | |
|
5e911afa37 | |
|
418d80c948 | |
|
0e1148312b | |
|
f9d0948b0c | |
|
36bf83239d | |
|
b4c25a3663 | |
|
90004fe6af | |
|
3560ff3a78 | |
|
1c40b84e4d | |
|
9a6ebb8682 | |
|
7e7630eb67 | |
|
23e034c40e | |
|
b28af5d865 | |
|
fa385979ee | |
|
42e9130eb7 | |
|
ae9cd5bba1 | |
|
34b9569800 | |
|
6eb5d3effd | |
|
0c3c48147e | |
|
be800b27c4 | |
|
5b27fa5e41 | |
|
7a9cfaad9d | |
|
b9c17d6990 | |
|
24a362d253 | |
|
6e4ba8dd0f | |
|
119b5e9bed | |
|
61fabdc09b | |
|
4dd3d01d75 | |
|
9873d47514 | |
|
0dc1556699 | |
|
d1a3c1bc1f | |
|
7b61b8f3d0 | |
|
44e60e81cc | |
|
6660e3b495 | |
|
0c69f1f3f1 | |
|
f36aa02670 | |
|
82ae953c68 | |
|
0704639187 | |
|
5f06a8068e | |
|
4604e76880 | |
|
37f2205d4a | |
|
84ec4503ef | |
|
00366ade49 | |
|
9d9591533a | |
|
f8b3b68293 | |
|
b8cde49ee3 | |
|
b1e37fa42c | |
|
6228d1ee13 | |
|
eae28dfcc6 |
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
name: New Feature Issue template
|
||||
about: Issue template for new features.
|
||||
title: ''
|
||||
labels: 'Code PR is needed'
|
||||
assignees: 'shiyutang'
|
||||
|
||||
---
|
||||
|
||||
## 背景
|
||||
|
||||
经过需求征集https://github.com/PaddlePaddle/PaddleOCR/issues/10334 和每周技术研讨会 https://github.com/PaddlePaddle/PaddleOCR/issues/10223 讨论,我们确定了XXXX任务。
|
||||
|
||||
## 解决步骤
|
||||
1. 根据开源代码进行网络结构、评估指标转换。代码链接:XXXX
|
||||
2. 结合[论文复现指南](https://github.com/PaddlePaddle/models/blob/release%2F2.2/tutorials/article-implementation/ArticleReproduction_CV.md),进行前反向对齐等操作,达到论文Table.1中的指标。
|
||||
3. 参考[PR提交规范](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/doc/doc_ch/code_and_doc.md)提交代码PR到ppocr中。
|
|
@ -0,0 +1,34 @@
|
|||
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
|
||||
#
|
||||
# You can adjust the behavior by modifying this file.
|
||||
# For more information, see:
|
||||
# https://github.com/actions/stale
|
||||
name: Mark stale issues and pull requests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
push:
|
||||
branches:
|
||||
- release/2.6
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: none
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed in 7 days if no further activity occurs. Thank you for your contributions.'
|
||||
stale-issue-label: 'stale'
|
||||
close-issue-reason: 'completed'
|
||||
exempt-issue-labels: 'bug, feature request, good first issue'
|
||||
operations-per-run: 900
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
ascending: true
|
||||
|
4
LICENSE
4
LICENSE
|
@ -1,3 +1,5 @@
|
|||
Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
@ -186,7 +188,7 @@
|
|||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1617,8 +1617,9 @@ class MainWindow(QMainWindow):
|
|||
key_cls = 'None' if not self.kie_mode else box.get('key_cls', 'None')
|
||||
shapes.append((box['transcription'], box['points'], None, key_cls, box.get('difficult', False)))
|
||||
|
||||
self.loadLabels(shapes)
|
||||
self.canvas.verified = False
|
||||
if shapes != []:
|
||||
self.loadLabels(shapes)
|
||||
self.canvas.verified = False
|
||||
|
||||
def validFilestate(self, filePath):
|
||||
if filePath not in self.fileStatedict.keys():
|
||||
|
@ -2203,7 +2204,7 @@ class MainWindow(QMainWindow):
|
|||
msg = 'Can not recognise the detection box in ' + self.filePath + '. Please change manually'
|
||||
QMessageBox.information(self, "Information", msg)
|
||||
return
|
||||
result = self.ocr.ocr(img_crop, cls=True, det=False)
|
||||
result = self.ocr.ocr(img_crop, cls=True, det=False)[0]
|
||||
if result[0][0] != '':
|
||||
if shape.line_color == DEFAULT_LOCK_COLOR:
|
||||
shape.label = result[0][0]
|
||||
|
@ -2264,7 +2265,7 @@ class MainWindow(QMainWindow):
|
|||
msg = 'Can not recognise the detection box in ' + self.filePath + '. Please change manually'
|
||||
QMessageBox.information(self, "Information", msg)
|
||||
return
|
||||
result = self.ocr.ocr(img_crop, cls=True, det=False)
|
||||
result = self.ocr.ocr(img_crop, cls=True, det=False)[0]
|
||||
if result[0][0] != '':
|
||||
result.insert(0, box)
|
||||
print('result in reRec is ', result)
|
||||
|
@ -2415,12 +2416,12 @@ class MainWindow(QMainWindow):
|
|||
# merge the text result in the cell
|
||||
texts = ''
|
||||
probs = 0. # the probability of the cell is avgerage prob of every text box in the cell
|
||||
bboxes = self.ocr.ocr(img_crop, det=True, rec=False, cls=False)
|
||||
bboxes = self.ocr.ocr(img_crop, det=True, rec=False, cls=False)[0]
|
||||
if len(bboxes) > 0:
|
||||
bboxes.reverse() # top row text at first
|
||||
for _bbox in bboxes:
|
||||
patch = get_rotate_crop_image(img_crop, np.array(_bbox, np.float32))
|
||||
rec_res = self.ocr.ocr(patch, det=False, rec=True, cls=False)
|
||||
rec_res = self.ocr.ocr(patch, det=False, rec=True, cls=False)[0]
|
||||
text = rec_res[0][0]
|
||||
if text != '':
|
||||
texts += text + ('' if text[0].isalpha() else ' ') # add space between english word
|
||||
|
|
|
@ -103,11 +103,11 @@ python PPOCRLabel.py --kie True # [KIE mode] for [detection + recognition + keyw
|
|||
```
|
||||
|
||||
#### 1.2.3 Build and Install the Whl Package Locally
|
||||
Compile and install a new whl package, where 1.0.2 is the version number, you can specify the new version in 'setup.py'.
|
||||
Compile and install a new whl package, where 0.0.0 is the version number, you can specify the new version in 'setup.py'.
|
||||
```bash
|
||||
cd ./PPOCRLabel
|
||||
python3 setup.py bdist_wheel
|
||||
pip3 install dist/PPOCRLabel-2.1.2-py2.py3-none-any.whl
|
||||
pip3 install dist/PPOCRLabel-0.0.0-py2.py3-none-any.whl
|
||||
```
|
||||
|
||||
|
||||
|
@ -157,7 +157,7 @@ labeling in the Excel file, the recommended steps are:
|
|||
|
||||
4. ***Adjust cell order:*** Click on the menu `View` - `Show Box Number` to show the box ordinal numbers, and drag all the results under the 'Recognition Results' column on the right side of the software interface to make the box numbers are arranged from left to right, top to bottom
|
||||
|
||||
5. Export JSON format annotation: close all Excel files corresponding to table images, click `File`-`Export table JSON annotation` to obtain JSON annotation results.
|
||||
5. Export JSON format annotation: close all Excel files corresponding to table images, click `File-Export Table Label` to obtain `gt.txt` annotation results.
|
||||
|
||||
### 2.3 Note
|
||||
|
||||
|
|
|
@ -101,12 +101,12 @@ python PPOCRLabel.py --lang ch
|
|||
|
||||
#### 1.2.3 本地构建whl包并安装
|
||||
|
||||
编译与安装新的whl包,其中1.0.2为版本号,可在 `setup.py` 中指定新版本。
|
||||
编译与安装新的whl包,其中0.0.0为版本号,可在 `setup.py` 中指定新版本。
|
||||
|
||||
```bash
|
||||
cd ./PPOCRLabel
|
||||
python3 setup.py bdist_wheel
|
||||
pip3 install dist/PPOCRLabel-2.1.2-py2.py3-none-any.whl -i https://mirror.baidu.com/pypi/simple
|
||||
pip3 install dist/PPOCRLabel-0.0.0-py2.py3-none-any.whl -i https://mirror.baidu.com/pypi/simple
|
||||
```
|
||||
|
||||
|
||||
|
@ -126,7 +126,7 @@ pip3 install dist/PPOCRLabel-2.1.2-py2.py3-none-any.whl -i https://mirror.baidu.
|
|||
9. 删除:点击 “删除图像”,图片将会被删除至回收站。
|
||||
10. 导出结果:用户可以通过菜单中“文件-导出标记结果”手动导出,同时也可以点击“文件 - 自动导出标记结果”开启自动导出。手动确认过的标记将会被存放在所打开图片文件夹下的*Label.txt*中。在菜单栏点击 “文件” - "导出识别结果"后,会将此类图片的识别训练数据保存在*crop_img*文件夹下,识别标签保存在*rec_gt.txt*中<sup>[4]</sup>。
|
||||
|
||||
### 2.2 表格标注
|
||||
### 2.2 表格标注([视频演示](https://www.bilibili.com/video/BV1wR4y1v7JE/?share_source=copy_web&vd_source=cf1f9d24648d49636e3d109c9f9a377d&t=1998))
|
||||
表格标注针对表格的结构化提取,将图片中的表格转换为Excel格式,因此标注时需要配合外部软件打开Excel同时完成。在PPOCRLabel软件中完成表格中的文字信息标注(文字与位置)、在Excel文件中完成表格结构信息标注,推荐的步骤为:
|
||||
1. 表格识别:打开表格图片后,点击软件右上角 `表格识别` 按钮,软件调用PP-Structure中的表格识别模型,自动为表格打标签,同时弹出Excel
|
||||
|
||||
|
@ -139,7 +139,7 @@ pip3 install dist/PPOCRLabel-2.1.2-py2.py3-none-any.whl -i https://mirror.baidu.
|
|||
|
||||
4. 标注表格结构:**在外部Excel软件中,将存在文字的单元格标记为任意标识符(如 `1` )**,保证Excel中的单元格合并情况与原图相同即可(即不需要Excel中的单元格文字与图片中的文字完全相同)
|
||||
|
||||
5. 导出JSON格式:关闭所有表格图像对应的Excel,点击 `文件`-`导出表格JSON标注` 获得JSON标注结果。
|
||||
5. 导出JSON格式:关闭所有表格图像对应的Excel,点击 `文件`-`导出表格标注`,生成gt.txt标注文件。
|
||||
|
||||
### 2.3 注意
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ class Worker(QThread):
|
|||
if self.model == 'paddle':
|
||||
h, w, _ = cv2.imdecode(np.fromfile(Imgpath, dtype=np.uint8), 1).shape
|
||||
if h > 32 and w > 32:
|
||||
self.result_dic = self.ocr.ocr(Imgpath, cls=True, det=True)
|
||||
self.result_dic = self.ocr.ocr(Imgpath, cls=True, det=True)[0]
|
||||
else:
|
||||
print('The size of', Imgpath, 'is too small to be recognised')
|
||||
self.result_dic = None
|
||||
|
|
|
@ -33,10 +33,10 @@ setup(
|
|||
package_dir={'PPOCRLabel': ''},
|
||||
include_package_data=True,
|
||||
entry_points={"console_scripts": ["PPOCRLabel= PPOCRLabel.PPOCRLabel:main"]},
|
||||
version='2.1.2',
|
||||
version='2.1.3',
|
||||
install_requires=requirements,
|
||||
license='Apache License 2.0',
|
||||
description='PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field, with built-in PPOCR model to automatically detect and re-recognize data. It is written in python3 and pyqt5, supporting rectangular box annotation and four-point annotation modes. Annotations can be directly used for the training of PPOCR detection and recognition models',
|
||||
description='PPOCRLabelv2 is a semi-automatic graphic annotation tool suitable for OCR field, with built-in PP-OCR model to automatically detect and re-recognize data. It is written in Python3 and PyQT5, supporting rectangular box, table, irregular text and key information annotation modes. Annotations can be directly used for the training of PP-OCR detection and recognition models.',
|
||||
long_description=readme(),
|
||||
long_description_content_type='text/markdown',
|
||||
url='https://github.com/PaddlePaddle/PaddleOCR',
|
||||
|
|
10
README.md
10
README.md
|
@ -26,12 +26,11 @@ PaddleOCR aims to create multilingual, awesome, leading, and practical OCR tools
|
|||
</div>
|
||||
|
||||
## 📣 Recent updates
|
||||
- 💥 **Live Preview: Oct 24 - Oct 26, China Standard Time, 20:30**, Engineers@PaddleOCR will show PP-StructureV2 optimization strategy for 3 days.
|
||||
- Scan the QR code below using WeChat, follow the PaddlePaddle official account and fill out the questionnaire to join the WeChat group, get the live link and 20G OCR learning materials (including PDF2Word application, 10 models in vertical scenarios, etc.)
|
||||
- 🔨**2022.11 Add implementation of [4 cutting-edge algorithms](doc/doc_en/algorithm_overview_en.md)**:Text Detection [DRRG](doc/doc_en/algorithm_det_drrg_en.md), Text Recognition [RFL](./doc/doc_en/algorithm_rec_rfl_en.md), Image Super-Resolution [Text Telescope](doc/doc_en/algorithm_sr_telescope_en.md),Handwritten Mathematical Expression Recognition [CAN](doc/doc_en/algorithm_rec_can_en.md)
|
||||
- **2022.10 Release [optimized JS version PP-OCRv3 model](./deploy/paddlejs/README.md)** with 4.3M model size, 8x faster inference time, and a ready-to-use web demo
|
||||
|
||||
- 💥 **Live Playback: Introduction to PP-StructureV2 optimization strategy**. Scan [the QR code below](#Community) using WeChat, follow the PaddlePaddle official account and fill out the questionnaire to join the WeChat group, get the live link and 20G OCR learning materials (including PDF2Word application, 10 models in vertical scenarios, etc.)
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/50011306/196944258-0eb82df1-d730-4b96-a350-c1d370fdc2b1.jpg" width = "150" height = "150" />
|
||||
</div>
|
||||
|
||||
- **🔥2022.8.24 Release PaddleOCR [release/2.6](https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.6)**
|
||||
- Release [PP-StructureV2](./ppstructure/),with functions and performance fully upgraded, adapted to Chinese scenes, and new support for [Layout Recovery](./ppstructure/recovery) and **one line command to convert PDF to Word**;
|
||||
|
@ -74,6 +73,7 @@ PaddleOCR support a variety of cutting-edge algorithms related to OCR, and devel
|
|||
- [Dive Into OCR ](./doc/doc_en/ocr_book_en.md)
|
||||
|
||||
<a name="Community"></a>
|
||||
|
||||
## 👫 Community
|
||||
|
||||
- For international developers, we regard [PaddleOCR Discussions](https://github.com/PaddlePaddle/PaddleOCR/discussions) as our international community platform. All ideas and questions can be discussed here in English.
|
||||
|
|
37
README_ch.md
37
README_ch.md
|
@ -27,28 +27,22 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力
|
|||
|
||||
## 📣 近期更新
|
||||
|
||||
- **💥 直播预告:10.24-10.26日每晚8点半**,PaddleOCR研发团队详解PP-StructureV2优化策略。微信扫描下方二维码,关注公众号并填写问卷后进入官方交流群,获取直播链接与20G重磅OCR学习大礼包(内含PDF转Word应用程序、10种垂类模型、《动手学OCR》电子书等)
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/50011306/196944258-0eb82df1-d730-4b96-a350-c1d370fdc2b1.jpg" width = "150" height = "150" />
|
||||
</div>
|
||||
- **🔥2023.3.10 PaddleOCR集成了高性能、全场景模型部署方案FastDeploy,欢迎参考[指南](https://github.com/PaddlePaddle/PaddleOCR/tree/dygraph/deploy/fastdeploy)试用(注意使用dygraph分支)。**
|
||||
- 📚**2022.12 发布[《OCR产业范例20讲》电子书](./applications/README.md)**,新增蒙古文、身份证、液晶屏缺陷等**7个场景应用范例**
|
||||
- 🔨**2022.11 新增实现[4种前沿算法](doc/doc_ch/algorithm_overview.md)**:文本检测 [DRRG](doc/doc_ch/algorithm_det_drrg.md), 文本识别 [RFL](doc/doc_ch/algorithm_rec_rfl.md), 文本超分[Text Telescope](doc/doc_ch/algorithm_sr_telescope.md),公式识别[CAN](doc/doc_ch/algorithm_rec_can.md)
|
||||
- **2022.10 优化[JS版PP-OCRv3模型](./deploy/paddlejs/README_ch.md)**:模型大小仅4.3M,预测速度提升8倍,配套web demo开箱即用
|
||||
- **💥 直播回放:PaddleOCR研发团队详解PP-StructureV2优化策略**。微信扫描[下方二维码](#开源社区),关注公众号并填写问卷后进入官方交流群,获取直播回放链接与20G重磅OCR学习大礼包(内含PDF转Word应用程序、10种垂类模型、《动手学OCR》电子书等)
|
||||
|
||||
- **🔥2022.8.24 发布 PaddleOCR [release/2.6](https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.6)**
|
||||
- 发布[PP-StructureV2](./ppstructure/README_ch.md),系统功能性能全面升级,适配中文场景,新增支持[版面复原](./ppstructure/recovery/README_ch.md),支持**一行命令完成PDF转Word**;
|
||||
- [版面分析](./ppstructure/layout/README_ch.md)模型优化:模型存储减少95%,速度提升11倍,平均CPU耗时仅需41ms;
|
||||
- [表格识别](./ppstructure/table/README_ch.md)模型优化:设计3大优化策略,预测耗时不变情况下,模型精度提升6%;
|
||||
- [关键信息抽取](./ppstructure/kie/README_ch.md)模型优化:设计视觉无关模型结构,语义实体识别精度提升2.8%,关系抽取精度提升9.1%。
|
||||
|
||||
- **🔥2022.8 发布 [OCR场景应用集合](./applications)**
|
||||
|
||||
- 包含数码管、液晶屏、车牌、高精度SVTR模型、手写体识别等**9个垂类模型**,覆盖通用,制造、金融、交通行业的主要OCR垂类应用。
|
||||
|
||||
|
||||
- **2022.8 新增实现[8种前沿算法](doc/doc_ch/algorithm_overview.md)**
|
||||
- 文本检测:[FCENet](doc/doc_ch/algorithm_det_fcenet.md), [DB++](doc/doc_ch/algorithm_det_db.md)
|
||||
- 文本识别:[ViTSTR](doc/doc_ch/algorithm_rec_vitstr.md), [ABINet](doc/doc_ch/algorithm_rec_abinet.md), [VisionLAN](doc/doc_ch/algorithm_rec_visionlan.md), [SPIN](doc/doc_ch/algorithm_rec_spin.md), [RobustScanner](doc/doc_ch/algorithm_rec_robustscanner.md)
|
||||
- 表格识别:[TableMaster](doc/doc_ch/algorithm_table_master.md)
|
||||
|
||||
- **2022.8 发布 [OCR场景应用集合](./applications)**:包含数码管、液晶屏、车牌、高精度SVTR模型、手写体识别等**9个垂类模型**,覆盖通用,制造、金融、交通行业的主要OCR垂类应用。
|
||||
- **2022.8 新增实现[8种前沿算法](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6rc/doc/doc_ch/algorithm_overview.md)**
|
||||
- 文本检测:[FCENet](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6rc/doc/doc_ch/algorithm_det_fcenet.md), [DB++](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6rc/doc/doc_ch/algorithm_det_db.md)
|
||||
- 文本识别:[ViTSTR](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6rc/doc/doc_ch/algorithm_rec_vitstr.md), [ABINet](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6rc/doc/doc_ch/algorithm_rec_abinet.md), [VisionLAN](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6rc/doc/doc_ch/algorithm_rec_visionlan.md), [SPIN](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6rc/doc/doc_ch/algorithm_rec_spin.md), [RobustScanner](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6rc/doc/doc_ch/algorithm_rec_robustscanner.md)
|
||||
- 表格识别:[TableMaster](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6rc/doc/doc_ch/algorithm_table_master.md)
|
||||
|
||||
- **2022.5.9 发布 PaddleOCR [release/2.5](https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.5)**
|
||||
- 发布[PP-OCRv3](./doc/doc_ch/ppocr_introduction.md#pp-ocrv3),速度可比情况下,中文场景效果相比于PP-OCRv2再提升5%,英文场景提升11%,80语种多语言模型平均识别准确率提升5%以上;
|
||||
|
@ -79,23 +73,22 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力
|
|||
## 📚《动手学OCR》电子书
|
||||
- [《动手学OCR》电子书](./doc/doc_ch/ocr_book.md)
|
||||
|
||||
|
||||
<a name="开源社区"></a>
|
||||
|
||||
## 👫 开源社区
|
||||
- **📑项目合作:** 如果您是企业开发者且有明确的OCR垂类应用需求,填写[问卷](https://paddle.wjx.cn/vj/QwF7GKw.aspx)后可免费与官方团队展开不同层次的合作。
|
||||
- **👫加入社区:** 微信扫描二维码并填写问卷之后,加入交流群领取20G重磅OCR学习大礼包
|
||||
- **包括《动手学OCR》电子书** ,配套讲解视频和notebook项目;PaddleOCR历次发版直播课视频;
|
||||
- **👫加入社区:** **微信扫描二维码并填写问卷之后,加入交流群领取20G重磅OCR学习大礼包**
|
||||
- **包括《动手学OCR》电子书** ,配套讲解视频和notebook项目;**PaddleOCR历次发版直播课回放链接**;
|
||||
- **OCR场景应用模型集合:** 包含数码管、液晶屏、车牌、高精度SVTR模型、手写体识别等垂类模型,覆盖通用,制造、金融、交通行业的主要OCR垂类应用。
|
||||
- PDF2Word应用程序;OCR社区优秀开发者项目分享视频。
|
||||
- **🏅️社区项目**:[社区项目](./doc/doc_ch/thirdparty.md)文档中包含了社区用户**使用PaddleOCR开发的各种工具、应用**以及**为PaddleOCR贡献的功能、优化的文档与代码**等,是官方为社区开发者打造的荣誉墙,也是帮助优质项目宣传的广播站。
|
||||
- **🎁社区常规赛**:社区常规赛是面向OCR开发者的积分赛事,覆盖文档、代码、模型和应用四大类型,以季度为单位评选并发放奖励,赛题详情与报名方法可参考[链接](https://github.com/PaddlePaddle/PaddleOCR/issues/4982)。
|
||||
|
||||
<div align="center">
|
||||
<img src="https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/dygraph/doc/joinus.PNG" width = "150" height = "150" />
|
||||
<img src="https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/dygraph/doc/joinus.PNG" width = "150" height = "150",caption='' />
|
||||
<p>PaddleOCR官方交流群二维码</p>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<a name="模型下载"></a>
|
||||
## 🛠️ PP-OCR系列模型列表(更新中)
|
||||
|
||||
|
|
|
@ -1,29 +1,30 @@
|
|||
[English](README_en.md) | 简体中文
|
||||
|
||||
# 场景应用
|
||||
# OCR产业范例20讲
|
||||
|
||||
PaddleOCR场景应用覆盖通用,制造、金融、交通行业的主要OCR垂类应用,在PP-OCR、PP-Structure的通用能力基础之上,以notebook的形式展示利用场景数据微调、模型优化方法、数据增广等内容,为开发者快速落地OCR应用提供示范与启发。
|
||||
PaddleOCR场景应用覆盖通用,制造、金融、交通等行业的主要OCR垂类应用,基于PP-OCR、PP-Structure的通用能力和各类垂类场景中落地的经验,PaddleOCR联合**北京师范大学副教授柯永红、云南省能源投资集团财务有限公司智能化项目经理钟榆星、信雅达科技股份有限公司高级研发工程师张少华、郑州三晖电气股份有限公司工程师郭媛媛、福建中烟工业有限责任公司工程师顾茜、内蒙古阿尔泰电子信息技术有限公司CTO欧日乐克、安科私(北京)科技有限公司创始人柯双喜等产学研同仁共同开源《OCR产业范例20讲》电子书**,通过Notebook的形式系统展示OCR在产业界应用的具体场景的调优过程与落地经验,为开发者快速落地OCR应用提供示范与启发。该书包含以下特点:
|
||||
|
||||
- [教程文档](#1)
|
||||
- [通用](#11)
|
||||
- [制造](#12)
|
||||
- [金融](#13)
|
||||
- [交通](#14)
|
||||
|
||||
- [模型下载](#2)
|
||||
- 20例OCR在工业、金融、教育、交通等行业的关键场景应用范例;
|
||||
- 覆盖从问题抽象、数据处理、训练调优、部署应用的全流程AI落地环节,为开发者提供常见的OCR优化思路;
|
||||
- 每个范例配有交互式Notebook教程,通过代码展示获得实际结果,便于学习修改与二次开发;
|
||||
- GitHub和AI Studio上开源本书中涉及的范例内容和代码,方便开发者学习和使用。
|
||||
|
||||
<a name="1"></a>
|
||||
|
||||
## 教程文档
|
||||
|
||||
《OCR产业范例20讲》中包含如下教程。如需获取整合后的电子版,请参考[资料下载](#2)
|
||||
|
||||
<a name="11"></a>
|
||||
|
||||
### 通用
|
||||
|
||||
| 类别 | 亮点 | 模型下载 | 教程 | 示例图 |
|
||||
| ---------------------- | ------------------------------------------------------------ | -------------- | --------------------------------------- | ------------------------------------------------------------ |
|
||||
| 高精度中文识别模型SVTR | 比PP-OCRv3识别模型精度高3%,<br />可用于数据挖掘或对预测效率要求不高的场景。 | [模型下载](#2) | [中文](./高精度中文识别模型.md)/English | <img src="../doc/ppocr_v3/svtr_tiny.png" width=200> |
|
||||
| 手写体识别 | 新增字形支持 | [模型下载](#2) | [中文](./手写文字识别.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/7a8865b2836f42d382e7c3fdaedc4d307d797fa2bcd0466e9f8b7705efff5a7b" width = "200" height = "100" /> |
|
||||
| 类别 | 亮点 | 模型下载 | 教程 | 示例图 |
|
||||
| ---------------------- | ------------------------------------------------------------ | -------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
|
||||
| 高精度中文识别模型SVTR | 比PP-OCRv3识别模型精度高3%,<br />可用于数据挖掘或对预测效率要求不高的场景。 | [模型下载](#2) | [中文](./高精度中文识别模型.md)/English | <img src="../doc/ppocr_v3/svtr_tiny.png" width=200> |
|
||||
| 手写体识别 | 新增字形支持 | [模型下载](#2) | [中文](./手写文字识别.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/7a8865b2836f42d382e7c3fdaedc4d307d797fa2bcd0466e9f8b7705efff5a7b" width = "200" height = "100" /> |
|
||||
| 蒙文识别 | 新语种识别支持 | 即将开源 | [中文](./蒙古文书籍文字识别.md)/English | <img src="https://user-images.githubusercontent.com/50011306/206182391-431c2441-1d1d-4f25-931c-b0f663bf3285.png" width = "200" height = "100" /> |
|
||||
| 甲骨文识别 | 新语种识别支持 | [模型下载](#2) | [中文](https://aistudio.baidu.com/aistudio/projectdetail/5216041?contributionType=1)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/b973566a4897458cb4ed76ecbc8e4a838d68ac471a504c0daa57c17bc203c4e0" width = "200" height = "100" /> |
|
||||
|
||||
<a name="12"></a>
|
||||
|
||||
|
@ -35,21 +36,24 @@ PaddleOCR场景应用覆盖通用,制造、金融、交通行业的主要OCR
|
|||
| 液晶屏读数识别 | 检测模型蒸馏、Serving部署 | [模型下载](#2) | [中文](./液晶屏读数识别.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/901ab741cb46441ebec510b37e63b9d8d1b7c95f63cc4e5e8757f35179ae6373" width = "200" height = "100" /> |
|
||||
| 包装生产日期 | 点阵字符合成、过曝过暗文字识别 | [模型下载](#2) | [中文](./包装生产日期识别.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/d9e0533cc1df47ffa3bbe99de9e42639a3ebfa5bce834bafb1ca4574bf9db684" width = "200" height = "100" /> |
|
||||
| PCB文字识别 | 小尺寸文本检测与识别 | [模型下载](#2) | [中文](./PCB字符识别/PCB字符识别.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/95d8e95bf1ab476987f2519c0f8f0c60a0cdc2c444804ed6ab08f2f7ab054880" width = "200" height = "100" /> |
|
||||
| 电表识别 | 大分辨率图像检测调优 | [模型下载](#2) | | |
|
||||
| 液晶屏缺陷检测 | 非文字字符识别 | | | |
|
||||
| 电表识别 | 大分辨率图像检测调优 | [模型下载](#2) | [中文](https://aistudio.baidu.com/aistudio/projectdetail/5297312?forkThirdPart=1)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/9d4ebb5bf8544bbeabfacbfa539518c8e1ae68cbc3d74f67a3eb576ca94754a2" width = "200" height = "100" /> |
|
||||
| 液晶屏缺陷检测 | 非文字字符识别 | [模型下载](#2) | [中文](https://aistudio.baidu.com/aistudio/projectdetail/4268015)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/c06b363d7ddb4b22b80701258c0a18003c40bca1d64a472698ee1bf746198e3a" width = "200" height = "100" /> |
|
||||
|
||||
<a name="13"></a>
|
||||
|
||||
### 金融
|
||||
|
||||
| 类别 | 亮点 | 模型下载 | 教程 | 示例图 |
|
||||
| -------------- | ----------------------------- | -------------- | ----------------------------------------- | ------------------------------------------------------------ |
|
||||
| 表单VQA | 多模态通用表单结构化提取 | [模型下载](#2) | [中文](./多模态表单识别.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/a3b25766f3074d2facdf88d4a60fc76612f51992fd124cf5bd846b213130665b" width = "200" height = "200" /> |
|
||||
| 增值税发票 | 关键信息抽取,SER、RE任务训练 | [模型下载](#2) | [中文](./发票关键信息抽取.md)/English | <img src="https://user-images.githubusercontent.com/14270174/185393805-c67ff571-cf7e-4217-a4b0-8b396c4f22bb.jpg" width = "200" /> |
|
||||
| 印章检测与识别 | 端到端弯曲文本识别 | [模型下载](#2) | [中文](./印章弯曲文字识别.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/498119182f0a414ab86ae2de752fa31c9ddc3a74a76847049cc57884602cb269" width = "150" /> |
|
||||
| 通用卡证识别 | 通用结构化提取 | [模型下载](#2) | [中文](./快速构建卡证类OCR.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/981640e17d05487e961162f8576c9e11634ca157f79048d4bd9d3bc21722afe8" width = "300" /> |
|
||||
| 身份证识别 | 结构化提取、图像阴影 | | | |
|
||||
| 合同比对 | 密集文本检测、NLP关键信息抽取 | [模型下载](#2) | [中文](./扫描合同关键信息提取.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/54f3053e6e1b47a39b26e757006fe2c44910d60a3809422ab76c25396b92e69b" width = "300" /> |
|
||||
| 类别 | 亮点 | 模型下载 | 教程 | 示例图 |
|
||||
| ------------------ | --------------------------------- | -------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
|
||||
| 表单VQA | 多模态通用表单结构化提取 | [模型下载](#2) | [中文](./多模态表单识别.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/a3b25766f3074d2facdf88d4a60fc76612f51992fd124cf5bd846b213130665b" width = "200" height = "200" /> |
|
||||
| 增值税发票 | 关键信息抽取,SER、RE任务训练 | [模型下载](#2) | [中文](./发票关键信息抽取.md)/English | <img src="https://user-images.githubusercontent.com/14270174/185393805-c67ff571-cf7e-4217-a4b0-8b396c4f22bb.jpg" width = "200" /> |
|
||||
| 印章检测与识别 | 端到端弯曲文本识别 | [模型下载](#2) | [中文](./印章弯曲文字识别.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/498119182f0a414ab86ae2de752fa31c9ddc3a74a76847049cc57884602cb269" width = "150" /> |
|
||||
| 通用卡证识别 | 通用结构化提取 | [模型下载](#2) | [中文](./快速构建卡证类OCR.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/981640e17d05487e961162f8576c9e11634ca157f79048d4bd9d3bc21722afe8" width = "300" /> |
|
||||
| 银行电子回单 | 回单关键信息抽取 | --- | [中文](https://aistudio.baidu.com/aistudio/projectdetail/5267489?contributionType=1)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/1c935a1e468e4911aadd1e8e9c30ca15420dc85fe95d49ce85c3c38ffff75adb" width = "200" /> |
|
||||
| 身份证识别 | 结构化提取、图像阴影 | [模型下载](#2) | [中文](https://aistudio.baidu.com/aistudio/projectdetail/4255861?contributionType=1)/English | <img src='https://ai-studio-static-online.cdn.bcebos.com/4e2054032a9244a7a713e07e0dca00167685ecbc98ce484987e8c3c51208d08d' width='300'> |
|
||||
| 合同比对 | 文本检测参数调整、NLP关键信息抽取 | --- | [中文](./扫描合同关键信息提取.md)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/54f3053e6e1b47a39b26e757006fe2c44910d60a3809422ab76c25396b92e69b" width = "300" /> |
|
||||
| 研报识别与实体统计 | 密集文本检测、NLP实体识别 | [模型下载](#2) | [中文](https://aistudio.baidu.com/aistudio/projectdetail/2574084)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/0bec003acb6444a69d8e3368962ca07452e9db6520ff44ceb5480011bc736609" width = "300" /> |
|
||||
| 通用表格识别 | 表格数据生成 | --- | [中文](https://aistudio.baidu.com/aistudio/projectdetail/5099668?contributionType=1)/English | <img src="https://ai-studio-static-online.cdn.bcebos.com/da82ae8ef8fd479aaa38e1049eb3a681cf020dc108fa458eb3ec79da53b45fd1" width = "300" /> |
|
||||
|
||||
<a name="14"></a>
|
||||
|
||||
|
@ -63,9 +67,9 @@ PaddleOCR场景应用覆盖通用,制造、金融、交通行业的主要OCR
|
|||
|
||||
<a name="2"></a>
|
||||
|
||||
## 模型下载
|
||||
## 资料下载
|
||||
|
||||
如需下载上述场景中已经训练好的垂类模型,可以扫描下方二维码,关注公众号填写问卷后,加入PaddleOCR官方交流群获取20G OCR学习大礼包(内含《动手学OCR》电子书、课程回放视频、前沿论文等重磅资料)
|
||||
如需下载《OCR产业范例20讲》和上述场景中已经训练好的垂类模型,可以扫描下方二维码,关注公众号填写问卷后,加入PaddleOCR官方交流群获取20G OCR学习大礼包(内含《动手学OCR》电子书、课程回放视频、前沿论文等重磅资料)
|
||||
|
||||
<div align="center">
|
||||
<img src="https://ai-studio-static-online.cdn.bcebos.com/dd721099bd50478f9d5fb13d8dd00fad69c22d6848244fd3a1d3980d7fefc63e" width = "150" height = "150" />
|
||||
|
@ -73,6 +77,4 @@ PaddleOCR场景应用覆盖通用,制造、金融、交通行业的主要OCR
|
|||
|
||||
如果您是企业开发者且未在上述场景中找到合适的方案,可以填写[OCR应用合作调研问卷](https://paddle.wjx.cn/vj/QwF7GKw.aspx),免费与官方团队展开不同层次的合作,包括但不限于问题抽象、确定技术方案、项目答疑、共同研发等。如果您已经使用PaddleOCR落地项目,也可以填写此问卷,与飞桨平台共同宣传推广,提升企业技术品宣。期待您的提交!
|
||||
|
||||
<a href="https://trackgit.com">
|
||||
<img src="https://us-central1-trackgit-analytics.cloudfunctions.net/token/ping/l63cvzo0w09yxypc7ygl" alt="traffic" />
|
||||
</a>
|
||||
<a href="https://trackgit.com"><img src="https://us-central1-trackgit-analytics.cloudfunctions.net/token/ping/l63cvzo0w09yxypc7ygl" alt="traffic" /></a>
|
||||
|
|
|
@ -0,0 +1,268 @@
|
|||
# 蒙古文书籍文字识别
|
||||
|
||||
本范例将使用OCR技术实现蒙古文书籍文字识别检测和识别,通过本章您可以掌握:
|
||||
|
||||
- 蒙古文的基本知识
|
||||
- 如何制作蒙古文字典与合成数据
|
||||
- 如何进行识别模型微调
|
||||
|
||||
本范例由内蒙古阿尔泰电子信息技术有限公司CTO欧日乐克、苏日图、达布希腊图、歆琪乐与飞桨联合打造
|
||||
|
||||
## 背景介绍
|
||||
|
||||
蒙古文文字识别技术在蒙古文信息处理领域成为一个亟待解决的问题。但由于诸多主客观原因,产品化蒙古文文字识别技术上有一段距离。而书籍的数字化是其中一项重要的课题,但因为涉及相关技术较多,难度较高,在整体上解决蒙古文书籍文字识别技术还不成熟。
|
||||
|
||||

|
||||
|
||||
*基本概念
|
||||
```txt
|
||||
字:即字符,不同于汉字的字(Character)一字一形,并包含字意,蒙古文的字类似于英语的
|
||||
字母(Letter)是一种表音文字,但却拥有若干字形(Glyph),若干个字构成一个词。
|
||||
|
||||
名义字符:Unicode十大法则中规定,"Character is not Glyph"(字符不是字形)。
|
||||
于是像蒙古文一样有形态变化的文字就有了一个字形代表其他形态的字形。该字形被记录在
|
||||
Unicode基本表中并分配编码。如蒙古文 ᠠ([a]) 为 a 的独立形式,分配码位\u1820,
|
||||
记作uni1820.isol,代表所有词首uni1820.init、词中uni1820.medi和词尾1820.fina形式。
|
||||
|
||||
变形显现字符:除名义字符外的其他没有实际编码位置的字形。在蒙古文文本中看到的字符皆为通过
|
||||
Unicode Scripts Processor(简称USP)处理后显示出来的字符。
|
||||
|
||||
词:语言里最小的可以独立运用的单位,等同于英语的word,以空格分割的字符串。
|
||||
```
|
||||
|
||||
### 项目难点
|
||||
|
||||
- 1.蒙古文字符的复杂性
|
||||
|
||||
一形多字
|
||||
|
||||

|
||||
|
||||
多字一形
|
||||
|
||||

|
||||
|
||||
- 2.蒙古文排版方向(竖写、换行左->右)
|
||||
|
||||
书写方向与换行方向会影响识别后的排序以及文字方向。
|
||||
|
||||

|
||||
|
||||
- 3.蒙古文字宽、行宽的不一致性
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
- 4.字符中的部件难以区分(MVS、NNBSP点的处理,以及合体字形)
|
||||
|
||||
蒙古文中有一些有关形态变化的控制字符,其中最频繁出现的有 Mongolian Vowel Separator
|
||||
(MVS,\u180E),和 Narrow No-Break Space(NNBSP,\u202F)。该两个控制符正常
|
||||
情况下在文本中是透明的,比空格窄,不能换行。MVS用于连接词干与词尾a、e元音,NNBSP则
|
||||
用于连接词与词附加成分(可以理解成分写后缀)。MVS会引发双向形变,而NNBSP则会引发后位
|
||||
形变。
|
||||
|
||||
此外,蒙古文中有一些字形为另一个字形的组成部件,导致识别时不好对应结果。
|
||||
|
||||
|
||||
针对以上问题, 本例选用PP-OCRv3这一开源超轻量OCR系统进行蒙古文文本识别系统的开发。我们首先使用数据合成工具合成了250万数据,基于这部分数据进行训练,通过精准切除白色边缘,随机加入标点符号,调整评估集数据使识别精度提升至75.78%。效果如下:
|
||||
|
||||
| 策略 | 精度 %|
|
||||
| :--------------- | :-------- |
|
||||
| 合成数据训练 | 42.38|
|
||||
| 优化合成训练 | 75.78|
|
||||
|
||||
具体流程为:
|
||||
- 第一步,选取真实语料并校对,并把语料副本转换为非Unicode编码版本
|
||||
- 第二步,选择多种字体生成,按行生成,生成时建议使用非Unicode字体生成
|
||||
- 第三步,从真实的扫描文本图片中按行切割保存,并保证对每个图进行Unicode编码的标注
|
||||
评估集数据均采用真实图片样本。
|
||||
- 第四步,开始训练
|
||||
- 第五部,识别文字
|
||||
|
||||
评估集数据的质量对模型的训练效率也起到很大的作用。
|
||||
|
||||
## 快速体验
|
||||
### 环境搭建
|
||||
|
||||
本任务基于Aistudio完成, 具体环境如下:
|
||||
|
||||
- 操作系统: Linux
|
||||
- PaddlePaddle: 2.3
|
||||
- PaddleOCR: Release/2.5
|
||||
- text_renderer: master
|
||||
|
||||
下载PaddlleOCR代码并安装依赖库:
|
||||
```bash
|
||||
git clone -b dygraph https://gitee.com/paddlepaddle/PaddleOCR
|
||||
|
||||
# 安装依赖库
|
||||
cd PaddleOCR
|
||||
pip install -r PaddleOCR/requirements.txt
|
||||
```
|
||||
|
||||
### 模型推理
|
||||
将下载或训练完成的模型放置在`PaddleOCR/output`下, 然后使用如下命令即可快速进行模型推理
|
||||
```bash
|
||||
python tools/infer_rec.py -c configs/rec/PP-OCRv3/multi_language/Mongolian_PP-OCRv3_rec.yml \
|
||||
-o Global.pretrained_model=output/v3_Mongolian_mobile/best_accuracy \
|
||||
Global.infer_img=doc/imgs_words/
|
||||
```
|
||||
|
||||
<!-- #region -->
|
||||
## 数据准备
|
||||
|
||||
本项目从真实语料中生成250万张图片作为训练集。另外生成1万张图片作为验证集。
|
||||
|
||||
### 语料准备
|
||||
蒙古文由于编码原因生成图片时不能直接用 Unicode 字符串生成。蒙古文 Unicode 的本质是音码,伴随复杂的形态变化,如果没有对应的复杂文本处理程序则只能显示蒙古文的名义字符,而非变形显现字符。
|
||||
因此如果想生成蒙古文图片则需要: 1.调用Windows系统的 USP10.dll,2.用支持形码的字体生成。
|
||||
本项目使用了第二种方案,即使用形码的字体生成图片,并对应 Unicode 标签。
|
||||
|
||||
直接使用 Unicode 生成的情况(字符会分开并以名义字符显示):
|
||||
|
||||

|
||||
|
||||
$$\mbox{左列为Unicode生成图片,右列为Unicode文本}$$
|
||||
|
||||

|
||||
|
||||
$$\mbox{左列为Unicode文本,右列为形码生成图片}$$
|
||||
|
||||
生成图片时建议将字符串长度保持在5个词(平均30个字符),否则训练较为困难。
|
||||
|
||||
### 图片处理
|
||||
|
||||
部分训练图片示例如下:
|
||||
|
||||

|
||||
|
||||
为验证模型对实际图片的效果,验证图片采用了真实扫描图片。在扫描完整的页面后对
|
||||
|
||||
标签文件格式如下:
|
||||
|
||||
<img src='https://ai-studio-static-online.cdn.bcebos.com/c7d98953fba24ed28a8f4e189b9d7cf81babdacc3fc3465b9cb65d09691dd4c8' width='800'>
|
||||
|
||||
|
||||
|数据集类型|数量|
|
||||
|---|---|
|
||||
|训练集| 250万|
|
||||
|验证集| 1.1万|
|
||||
<!-- #endregion -->
|
||||
|
||||
<!-- #region -->
|
||||
数据文件结构如下:
|
||||
|
||||
```txt
|
||||
PaddleOCRv3
|
||||
├── train_data # 训练数据文件夹
|
||||
│ ├── texts
|
||||
│ │ ├── train1.txt # 生成的训练数据标签,与图片文档一一对应
|
||||
│ │ ├── train2.txt
|
||||
│ │ ├── train3.txt
|
||||
│ │ ├── train4.txt
|
||||
│ │ ├── train11.txt
|
||||
│ │ ├── train20.txt
|
||||
│ │ ├── train21.txt
|
||||
│ │ └── train22.txt
|
||||
│ ├── image1 # 生成的训练图片
|
||||
│ ├── image2
|
||||
│ ├── image3
|
||||
│ ├── image4
|
||||
│ ├── image11
|
||||
│ ├── image20
|
||||
│ ├── image21
|
||||
│ └── image22
|
||||
├── test_data # 验证数据文件夹
|
||||
│ ├── test_data.txt # 验证数据标签
|
||||
│ ├── 0 # 每个文件夹有34张图片
|
||||
│ ├── 1
|
||||
: :
|
||||
: :
|
||||
│ └── 409
|
||||
```
|
||||
### 制作字典
|
||||
|
||||
根据 Unicode 编码顺序制作一个包含所有蒙古文字符的文本字典,建议保存到./ppocr/utils/dict目录下面,并在yml文件中更改地址。
|
||||
|
||||
<img src='https://ai-studio-static-online.cdn.bcebos.com/825976d0134c4b94a07ca2c8249d8d53f6f5834453cd4fb093d9fa8bc644cd4f' width='200'>
|
||||
|
||||
|
||||
## 基于合成数据训练
|
||||
### 模型训练和评估
|
||||
|
||||
准备好合成数据后,我们可以使用以下命令训练数据:
|
||||
<!-- #endregion -->
|
||||
```bash
|
||||
cd ${PaddleOCR_root}
|
||||
python tools/train.py -c configs/rec/PP-OCRv3/multi_language/Mongolian_PP-OCRv3_rec.yml
|
||||
```
|
||||
如果想从断点继续训练:
|
||||
```bash
|
||||
cd ${PaddleOCR_root}
|
||||
python tools/train.py -c configs/rec/PP-OCRv3/multi_language/Mongolian_PP-OCRv3_rec.yml \
|
||||
-o Global.checkpoints=./output/v3_Mongolian_mobile/best_accuracy
|
||||
```
|
||||
可填各参数含义如下:
|
||||
|
||||
```txt
|
||||
-c: 指定使用的配置文件,Mongolian_PP-OCRv3_rec.yml对应于OCRv3识别模型。
|
||||
-o: 覆盖配置文件中参数
|
||||
Global.pretrained_model: 指定使用的预训练模型
|
||||
Global.checkpoints: 断点位置
|
||||
Global.epoch_num: 指定训练的epoch数
|
||||
Global.eval_batch_step: 间隔多少step做一次评估
|
||||
Train.dataset.data_dir: 训练数据集路径
|
||||
Train.dataset.label_file_list: 训练集文件列表
|
||||
Train.loader.batch_size_per_card: 训练单卡batch size
|
||||
Eval.dataset.data_dir: 评估数据集路径
|
||||
Eval.dataset.label_file_list: 评估数据集文件列表
|
||||
Eval.loader.batch_size_per_card: 评估单卡batch size
|
||||
```
|
||||
|
||||
### 模型推测
|
||||
训练好的模型推测如下:
|
||||
```bash
|
||||
python tools/infer_rec.py -c configs/rec/PP-OCRv3/multi_language/Mongolian_PP-OCRv3_rec.yml \
|
||||
-o Global.pretrained_model=output/v3_Mongolian_mobile/best_accuracy \
|
||||
Global.infer_img=doc/imgs_words/
|
||||
```
|
||||
## 用真实数据测试模型
|
||||
|
||||
训练完成后可以测试模型。可将测试图片指定到某文件夹:
|
||||
```shell
|
||||
PaddleOCRv3
|
||||
├── doc
|
||||
├── imgs_words
|
||||
│ ├── arabic
|
||||
│ ├── belarusian
|
||||
│ ├── bulgarian
|
||||
: :
|
||||
: :
|
||||
│ ├── mongolian # 在此放入真实蒙古文图片,一个图片一行
|
||||
│ └── uyghur
|
||||
```
|
||||
快速推测
|
||||
|
||||
```bash
|
||||
python tools/eval.py -c configs/rec/PP-OCRv3/multi_language/Mongolian_PP-OCRv3_rec.yml \
|
||||
-o Global.checkpoints=./output/v3_Mongolian_mobile/best_accuracy
|
||||
```
|
||||
推测结果将被记录在predicts_ppocrv3_Mongolian.txt文件中。
|
||||
|
||||
```shell
|
||||
PaddleOCRv3
|
||||
├── output
|
||||
│ ├── rec
|
||||
│ │ └── predicts_ppocrv3_Mongolian.txt
|
||||
│ └── v3_Mongolian_mobile
|
||||
```
|
||||
|
||||
部分结果:三列分别为推测结果、真实标签、图片
|
||||
|
||||

|
||||
|
||||
|
||||
## 总结
|
||||
|
||||
本例选用PP-OCRv3这一开源超轻量OCR系统进行蒙古文文本识别系统的开发。加入250万合成数据,在现有模型基础上进行微调,通过修正训练集,设定评估标准,最终将蒙古文识别精度从42%提升至75%。
|
|
@ -22,7 +22,6 @@ Architecture:
|
|||
Backbone:
|
||||
name: ResNet_vd
|
||||
layers: 18
|
||||
disable_se: True
|
||||
Neck:
|
||||
name: DBFPN
|
||||
out_channels: 256
|
||||
|
|
|
@ -27,7 +27,7 @@ Optimizer:
|
|||
beta2: 0.999
|
||||
lr:
|
||||
name: Piecewise
|
||||
decay_epochs : [700, 800]
|
||||
decay_epochs : [700]
|
||||
values : [0.0005, 0.00005]
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
|
|
|
@ -27,7 +27,7 @@ Optimizer:
|
|||
beta2: 0.999
|
||||
lr:
|
||||
name: Piecewise
|
||||
decay_epochs : [700, 800]
|
||||
decay_epochs : [700]
|
||||
values : [0.001, 0.0001]
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
|
|
|
@ -27,7 +27,7 @@ Optimizer:
|
|||
beta2: 0.999
|
||||
lr:
|
||||
name: Piecewise
|
||||
decay_epochs : [700, 800]
|
||||
decay_epochs : [700]
|
||||
values : [0.001, 0.0001]
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
|
|
|
@ -27,7 +27,7 @@ Optimizer:
|
|||
beta2: 0.999
|
||||
lr:
|
||||
name: Piecewise
|
||||
decay_epochs : [700, 800]
|
||||
decay_epochs : [700]
|
||||
values : [0.001, 0.0001]
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
Global:
|
||||
use_gpu: True
|
||||
epoch_num: 400
|
||||
epoch_num: 6
|
||||
log_smooth_window: 20
|
||||
print_batch_step: 10
|
||||
save_model_dir: ./output/rec/seed
|
||||
|
@ -27,7 +27,7 @@ Optimizer:
|
|||
momentum: 0.9
|
||||
lr:
|
||||
name: Piecewise
|
||||
decay_epochs: [4,5,8]
|
||||
decay_epochs: [4, 5]
|
||||
values: [1.0, 0.1, 0.01]
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
|
|
|
@ -26,10 +26,10 @@ Optimizer:
|
|||
name: AdamW
|
||||
beta1: 0.9
|
||||
beta2: 0.99
|
||||
epsilon: 8.e-8
|
||||
epsilon: 1.e-8
|
||||
weight_decay: 0.05
|
||||
no_weight_decay_name: norm pos_embed
|
||||
one_dim_param_no_weight_decay: true
|
||||
one_dim_param_no_weight_decay: True
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 0.0005
|
||||
|
@ -48,7 +48,7 @@ Architecture:
|
|||
Backbone:
|
||||
name: SVTRNet
|
||||
img_size: [32, 100]
|
||||
out_char_num: 25
|
||||
out_char_num: 25 # W//4 or W//8 or W/12
|
||||
out_channels: 192
|
||||
patch_merging: 'Conv'
|
||||
embed_dim: [64, 128, 256]
|
||||
|
@ -57,7 +57,7 @@ Architecture:
|
|||
mixer: ['Local','Local','Local','Local','Local','Local','Global','Global','Global','Global','Global','Global']
|
||||
local_mixer: [[7, 11], [7, 11], [7, 11]]
|
||||
last_stage: True
|
||||
prenorm: false
|
||||
prenorm: False
|
||||
Neck:
|
||||
name: SequenceEncoder
|
||||
encoder_type: reshape
|
||||
|
@ -82,6 +82,8 @@ Train:
|
|||
- DecodeImage: # load image
|
||||
img_mode: BGR
|
||||
channel_first: False
|
||||
- SVTRRecAug:
|
||||
aug_type: 0 # or 1
|
||||
- CTCLabelEncode: # Class handling label
|
||||
- SVTRRecResizeImg:
|
||||
image_shape: [3, 64, 256]
|
||||
|
@ -92,7 +94,7 @@ Train:
|
|||
shuffle: True
|
||||
batch_size_per_card: 512
|
||||
drop_last: True
|
||||
num_workers: 4
|
||||
num_workers: 8
|
||||
|
||||
Eval:
|
||||
dataset:
|
||||
|
|
|
@ -23,7 +23,7 @@ Optimizer:
|
|||
name: AdamW
|
||||
beta1: 0.9
|
||||
beta2: 0.99
|
||||
epsilon: 8.0e-08
|
||||
epsilon: 1.0e-08
|
||||
weight_decay: 0.05
|
||||
no_weight_decay_name: norm pos_embed
|
||||
one_dim_param_no_weight_decay: true
|
||||
|
@ -40,7 +40,7 @@ Architecture:
|
|||
img_size:
|
||||
- 32
|
||||
- 320
|
||||
out_char_num: 40
|
||||
out_char_num: 40 # W//4 or W//8 or W/12
|
||||
out_channels: 96
|
||||
patch_merging: Conv
|
||||
embed_dim:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
paddlepaddle
|
||||
numpy
|
||||
opencv-python
|
||||
typing-extensions
|
||||
|
|
|
@ -121,7 +121,7 @@ CUDA_LIB、CUDNN_LIB、TENSORRT_DIR、WITH_GPU、WITH_TENSORRT
|
|||
```
|
||||
cd /d D:\projects\cpp\PaddleOCR\deploy\cpp_infer
|
||||
```
|
||||
可执行文件`ppocr.exe`即为样例的预测程序,其主要使用方法如下,更多使用方法可以参考[说明文档](../readme.md)`运行demo`部分。
|
||||
可执行文件`ppocr.exe`即为样例的预测程序,其主要使用方法如下,更多使用方法可以参考[说明文档](../readme_ch.md)`运行demo`部分。
|
||||
|
||||
```shell
|
||||
# 切换终端编码为utf8
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include "paddle_api.h"
|
||||
#include "paddle_inference_api.h"
|
||||
|
||||
#include <include/ocr_cls.h>
|
||||
#include <include/preprocess_op.h>
|
||||
#include <include/utility.h>
|
||||
|
||||
namespace PaddleOCR {
|
||||
|
|
|
@ -23,7 +23,7 @@ namespace PaddleOCR {
|
|||
class PPOCR {
|
||||
public:
|
||||
explicit PPOCR();
|
||||
~PPOCR();
|
||||
~PPOCR() = default;
|
||||
|
||||
std::vector<std::vector<OCRPredictResult>> ocr(std::vector<cv::Mat> img_list,
|
||||
bool det = true,
|
||||
|
@ -47,9 +47,9 @@ protected:
|
|||
std::vector<OCRPredictResult> &ocr_results);
|
||||
|
||||
private:
|
||||
DBDetector *detector_ = nullptr;
|
||||
Classifier *classifier_ = nullptr;
|
||||
CRNNRecognizer *recognizer_ = nullptr;
|
||||
std::unique_ptr<DBDetector> detector_;
|
||||
std::unique_ptr<Classifier> classifier_;
|
||||
std::unique_ptr<CRNNRecognizer> recognizer_;
|
||||
};
|
||||
|
||||
} // namespace PaddleOCR
|
||||
|
|
|
@ -23,7 +23,7 @@ namespace PaddleOCR {
|
|||
class PaddleStructure : public PPOCR {
|
||||
public:
|
||||
explicit PaddleStructure();
|
||||
~PaddleStructure();
|
||||
~PaddleStructure() = default;
|
||||
|
||||
std::vector<StructurePredictResult> structure(cv::Mat img,
|
||||
bool layout = false,
|
||||
|
@ -37,8 +37,8 @@ private:
|
|||
std::vector<double> time_info_table = {0, 0, 0};
|
||||
std::vector<double> time_info_layout = {0, 0, 0};
|
||||
|
||||
StructureTableRecognizer *table_model_ = nullptr;
|
||||
StructureLayoutRecognizer *layout_model_ = nullptr;
|
||||
std::unique_ptr<StructureTableRecognizer> table_model_;
|
||||
std::unique_ptr<StructureLayoutRecognizer> layout_model_;
|
||||
|
||||
void layout(cv::Mat img,
|
||||
std::vector<StructurePredictResult> &structure_result);
|
||||
|
|
|
@ -158,7 +158,7 @@ build/paddle_inference_install_dir/
|
|||
<a name="21"></a>
|
||||
### 2.1 Export the inference model
|
||||
|
||||
* You can refer to [Model inference](../../doc/doc_ch/inference.md) and export the inference model. After the model is exported, assuming it is placed in the `inference` directory, the directory structure is as follows.
|
||||
* You can refer to [Model inference](../../doc/doc_en/inference_en.md) and export the inference model. After the model is exported, assuming it is placed in the `inference` directory, the directory structure is as follows.
|
||||
|
||||
```
|
||||
inference/
|
||||
|
|
|
@ -82,7 +82,7 @@ void check_params() {
|
|||
}
|
||||
|
||||
void ocr(std::vector<cv::String> &cv_all_img_names) {
|
||||
PPOCR ocr = PPOCR();
|
||||
PPOCR ocr;
|
||||
|
||||
if (FLAGS_benchmark) {
|
||||
ocr.reset_timer();
|
||||
|
@ -120,7 +120,7 @@ void ocr(std::vector<cv::String> &cv_all_img_names) {
|
|||
}
|
||||
|
||||
void structure(std::vector<cv::String> &cv_all_img_names) {
|
||||
PaddleOCR::PaddleStructure engine = PaddleOCR::PaddleStructure();
|
||||
PaddleOCR::PaddleStructure engine;
|
||||
|
||||
if (FLAGS_benchmark) {
|
||||
engine.reset_timer();
|
||||
|
|
|
@ -20,12 +20,9 @@ void Classifier::Run(std::vector<cv::Mat> img_list,
|
|||
std::vector<int> &cls_labels,
|
||||
std::vector<float> &cls_scores,
|
||||
std::vector<double> ×) {
|
||||
std::chrono::duration<float> preprocess_diff =
|
||||
std::chrono::steady_clock::now() - std::chrono::steady_clock::now();
|
||||
std::chrono::duration<float> inference_diff =
|
||||
std::chrono::steady_clock::now() - std::chrono::steady_clock::now();
|
||||
std::chrono::duration<float> postprocess_diff =
|
||||
std::chrono::steady_clock::now() - std::chrono::steady_clock::now();
|
||||
std::chrono::duration<float> preprocess_diff = std::chrono::duration<float>::zero();
|
||||
std::chrono::duration<float> inference_diff = std::chrono::duration<float>::zero();
|
||||
std::chrono::duration<float> postprocess_diff = std::chrono::duration<float>::zero();
|
||||
|
||||
int img_num = img_list.size();
|
||||
std::vector<int> cls_image_shape = {3, 48, 192};
|
||||
|
|
|
@ -20,12 +20,9 @@ void CRNNRecognizer::Run(std::vector<cv::Mat> img_list,
|
|||
std::vector<std::string> &rec_texts,
|
||||
std::vector<float> &rec_text_scores,
|
||||
std::vector<double> ×) {
|
||||
std::chrono::duration<float> preprocess_diff =
|
||||
std::chrono::steady_clock::now() - std::chrono::steady_clock::now();
|
||||
std::chrono::duration<float> inference_diff =
|
||||
std::chrono::steady_clock::now() - std::chrono::steady_clock::now();
|
||||
std::chrono::duration<float> postprocess_diff =
|
||||
std::chrono::steady_clock::now() - std::chrono::steady_clock::now();
|
||||
std::chrono::duration<float> preprocess_diff = std::chrono::duration<float>::zero();
|
||||
std::chrono::duration<float> inference_diff = std::chrono::duration<float>::zero();
|
||||
std::chrono::duration<float> postprocess_diff = std::chrono::duration<float>::zero();
|
||||
|
||||
int img_num = img_list.size();
|
||||
std::vector<float> width_list;
|
||||
|
|
|
@ -21,28 +21,28 @@ namespace PaddleOCR {
|
|||
|
||||
PPOCR::PPOCR() {
|
||||
if (FLAGS_det) {
|
||||
this->detector_ = new DBDetector(
|
||||
this->detector_.reset(new DBDetector(
|
||||
FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id, FLAGS_gpu_mem,
|
||||
FLAGS_cpu_threads, FLAGS_enable_mkldnn, FLAGS_limit_type,
|
||||
FLAGS_limit_side_len, FLAGS_det_db_thresh, FLAGS_det_db_box_thresh,
|
||||
FLAGS_det_db_unclip_ratio, FLAGS_det_db_score_mode, FLAGS_use_dilation,
|
||||
FLAGS_use_tensorrt, FLAGS_precision);
|
||||
FLAGS_use_tensorrt, FLAGS_precision));
|
||||
}
|
||||
|
||||
if (FLAGS_cls && FLAGS_use_angle_cls) {
|
||||
this->classifier_ = new Classifier(
|
||||
this->classifier_.reset(new Classifier(
|
||||
FLAGS_cls_model_dir, FLAGS_use_gpu, FLAGS_gpu_id, FLAGS_gpu_mem,
|
||||
FLAGS_cpu_threads, FLAGS_enable_mkldnn, FLAGS_cls_thresh,
|
||||
FLAGS_use_tensorrt, FLAGS_precision, FLAGS_cls_batch_num);
|
||||
FLAGS_use_tensorrt, FLAGS_precision, FLAGS_cls_batch_num));
|
||||
}
|
||||
if (FLAGS_rec) {
|
||||
this->recognizer_ = new CRNNRecognizer(
|
||||
this->recognizer_.reset(new CRNNRecognizer(
|
||||
FLAGS_rec_model_dir, FLAGS_use_gpu, FLAGS_gpu_id, FLAGS_gpu_mem,
|
||||
FLAGS_cpu_threads, FLAGS_enable_mkldnn, FLAGS_rec_char_dict_path,
|
||||
FLAGS_use_tensorrt, FLAGS_precision, FLAGS_rec_batch_num,
|
||||
FLAGS_rec_img_h, FLAGS_rec_img_w);
|
||||
FLAGS_rec_img_h, FLAGS_rec_img_w));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
std::vector<std::vector<OCRPredictResult>>
|
||||
PPOCR::ocr(std::vector<cv::Mat> img_list, bool det, bool rec, bool cls) {
|
||||
|
@ -51,7 +51,7 @@ PPOCR::ocr(std::vector<cv::Mat> img_list, bool det, bool rec, bool cls) {
|
|||
if (!det) {
|
||||
std::vector<OCRPredictResult> ocr_result;
|
||||
ocr_result.resize(img_list.size());
|
||||
if (cls && this->classifier_ != nullptr) {
|
||||
if (cls && this->classifier_) {
|
||||
this->cls(img_list, ocr_result);
|
||||
for (int i = 0; i < img_list.size(); i++) {
|
||||
if (ocr_result[i].cls_label % 2 == 1 &&
|
||||
|
@ -92,7 +92,7 @@ std::vector<OCRPredictResult> PPOCR::ocr(cv::Mat img, bool det, bool rec,
|
|||
img_list.push_back(crop_img);
|
||||
}
|
||||
// cls
|
||||
if (cls && this->classifier_ != nullptr) {
|
||||
if (cls && this->classifier_) {
|
||||
this->cls(img_list, ocr_result);
|
||||
for (int i = 0; i < img_list.size(); i++) {
|
||||
if (ocr_result[i].cls_label % 2 == 1 &&
|
||||
|
@ -190,16 +190,4 @@ void PPOCR::benchmark_log(int img_num) {
|
|||
}
|
||||
}
|
||||
|
||||
PPOCR::~PPOCR() {
|
||||
if (this->detector_ != nullptr) {
|
||||
delete this->detector_;
|
||||
}
|
||||
if (this->classifier_ != nullptr) {
|
||||
delete this->classifier_;
|
||||
}
|
||||
if (this->recognizer_ != nullptr) {
|
||||
delete this->recognizer_;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace PaddleOCR
|
||||
|
|
|
@ -21,20 +21,20 @@ namespace PaddleOCR {
|
|||
|
||||
PaddleStructure::PaddleStructure() {
|
||||
if (FLAGS_layout) {
|
||||
this->layout_model_ = new StructureLayoutRecognizer(
|
||||
this->layout_model_.reset(new StructureLayoutRecognizer(
|
||||
FLAGS_layout_model_dir, FLAGS_use_gpu, FLAGS_gpu_id, FLAGS_gpu_mem,
|
||||
FLAGS_cpu_threads, FLAGS_enable_mkldnn, FLAGS_layout_dict_path,
|
||||
FLAGS_use_tensorrt, FLAGS_precision, FLAGS_layout_score_threshold,
|
||||
FLAGS_layout_nms_threshold);
|
||||
FLAGS_layout_nms_threshold));
|
||||
}
|
||||
if (FLAGS_table) {
|
||||
this->table_model_ = new StructureTableRecognizer(
|
||||
this->table_model_.reset(new StructureTableRecognizer(
|
||||
FLAGS_table_model_dir, FLAGS_use_gpu, FLAGS_gpu_id, FLAGS_gpu_mem,
|
||||
FLAGS_cpu_threads, FLAGS_enable_mkldnn, FLAGS_table_char_dict_path,
|
||||
FLAGS_use_tensorrt, FLAGS_precision, FLAGS_table_batch_num,
|
||||
FLAGS_table_max_len, FLAGS_merge_no_span_structure);
|
||||
FLAGS_table_max_len, FLAGS_merge_no_span_structure));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
std::vector<StructurePredictResult>
|
||||
PaddleStructure::structure(cv::Mat srcimg, bool layout, bool table, bool ocr) {
|
||||
|
@ -65,7 +65,7 @@ PaddleStructure::structure(cv::Mat srcimg, bool layout, bool table, bool ocr) {
|
|||
}
|
||||
|
||||
return structure_results;
|
||||
};
|
||||
}
|
||||
|
||||
void PaddleStructure::layout(
|
||||
cv::Mat img, std::vector<StructurePredictResult> &structure_result) {
|
||||
|
@ -123,7 +123,7 @@ void PaddleStructure::table(cv::Mat img,
|
|||
structure_result.cell_box = structure_boxes[i];
|
||||
structure_result.html_score = structure_scores[i];
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
std::string
|
||||
PaddleStructure::rebuild_table(std::vector<std::string> structure_html_tags,
|
||||
|
@ -286,10 +286,4 @@ void PaddleStructure::benchmark_log(int img_num) {
|
|||
}
|
||||
}
|
||||
|
||||
PaddleStructure::~PaddleStructure() {
|
||||
if (this->table_model_ != nullptr) {
|
||||
delete this->table_model_;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace PaddleOCR
|
|
@ -42,7 +42,7 @@ docker logs -f paddle_ocr
|
|||
```
|
||||
|
||||
## 4.测试服务
|
||||
a. 计算待识别图片的Base64编码(如果只是测试一下效果,可以通过免费的在线工具实现,如:http://tool.chinaz.com/tools/imgtobase/)
|
||||
a. 计算待识别图片的Base64编码(如果只是测试一下效果,可以通过免费的在线工具实现,如:http://tool.chinaz.com/tools/imgtobase/
|
||||
b. 发送服务请求(可参见sample_request.txt中的值)
|
||||
```
|
||||
curl -H "Content-Type:application/json" -X POST --data "{\"images\": [\"填入图片Base64编码(需要删除'data:image/jpg;base64,')\"]}" http://localhost:8868/predict/ocr_system
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
- [基于PaddleHub Serving的服务部署](#基于paddlehub-serving的服务部署)
|
||||
- [1. 近期更新](#1-近期更新)
|
||||
- [2. 快速启动服务](#2-快速启动服务)
|
||||
- [2.1 准备环境](#21-准备环境)
|
||||
- [2.1 安装PaddleHub](#21-安装PaddleHub)
|
||||
- [2.2 下载推理模型](#22-下载推理模型)
|
||||
- [2.3 安装服务模块](#23-安装服务模块)
|
||||
- [2.4 启动服务](#24-启动服务)
|
||||
|
@ -15,8 +15,8 @@
|
|||
|
||||
|
||||
PaddleOCR提供2种服务部署方式:
|
||||
- 基于PaddleHub Serving的部署:代码路径为"`./deploy/hubserving`",按照本教程使用;
|
||||
- 基于PaddleServing的部署:代码路径为"`./deploy/pdserving`",使用方法参考[文档](../../deploy/pdserving/README_CN.md)。
|
||||
- 基于PaddleHub Serving的部署:代码路径为`./deploy/hubserving`,按照本教程使用;
|
||||
- 基于PaddleServing的部署:代码路径为`./deploy/pdserving`,使用方法参考[文档](../../deploy/pdserving/README_CN.md)。
|
||||
|
||||
# 基于PaddleHub Serving的服务部署
|
||||
|
||||
|
@ -51,120 +51,77 @@ deploy/hubserving/ocr_system/
|
|||
|
||||
## 2. 快速启动服务
|
||||
以下步骤以检测+识别2阶段串联服务为例,如果只需要检测服务或识别服务,替换相应文件路径即可。
|
||||
### 2.1 准备环境
|
||||
```shell
|
||||
# 安装paddlehub
|
||||
# paddlehub 需要 python>3.6.2
|
||||
### 2.1 安装PaddleHub
|
||||
paddlehub 需要 python>3.6.2
|
||||
```bash
|
||||
pip3 install paddlehub==2.1.0 --upgrade -i https://mirror.baidu.com/pypi/simple
|
||||
```
|
||||
|
||||
### 2.2 下载推理模型
|
||||
安装服务模块前,需要准备推理模型并放到正确路径。默认使用的是PP-OCRv3模型,默认模型路径为:
|
||||
| 模型 | 路径 |
|
||||
| ------- | - |
|
||||
| 检测模型 | `./inference/ch_PP-OCRv3_det_infer/` |
|
||||
| 识别模型 | `./inference/ch_PP-OCRv3_rec_infer/` |
|
||||
| 方向分类器 | `./inference/ch_ppocr_mobile_v2.0_cls_infer/` |
|
||||
| 版面分析模型 | `./inference/picodet_lcnet_x1_0_fgd_layout_infer/` |
|
||||
| 表格结构识别模型 | `./inference/ch_ppstructure_mobile_v2.0_SLANet_infer/` |
|
||||
| 关键信息抽取SER模型 | `./inference/ser_vi_layoutxlm_xfund_infer/` |
|
||||
| 关键信息抽取RE模型 | `./inference/re_vi_layoutxlm_xfund_infer/` |
|
||||
|
||||
```
|
||||
检测模型:./inference/ch_PP-OCRv3_det_infer/
|
||||
识别模型:./inference/ch_PP-OCRv3_rec_infer/
|
||||
方向分类器:./inference/ch_ppocr_mobile_v2.0_cls_infer/
|
||||
版面分析模型:./inference/picodet_lcnet_x1_0_fgd_layout_infer/
|
||||
表格结构识别模型:./inference/ch_ppstructure_mobile_v2.0_SLANet_infer/
|
||||
关键信息抽取SER模型:./inference/ser_vi_layoutxlm_xfund_infer/
|
||||
关键信息抽取RE模型:./inference/re_vi_layoutxlm_xfund_infer/
|
||||
```
|
||||
**模型路径可在`params.py`中查看和修改。**
|
||||
|
||||
**模型路径可在`params.py`中查看和修改。** 更多模型可以从PaddleOCR提供的模型库[PP-OCR](../../doc/doc_ch/models_list.md)和[PP-Structure](../../ppstructure/docs/models_list.md)下载,也可以替换成自己训练转换好的模型。
|
||||
更多模型可以从PaddleOCR提供的模型库[PP-OCR](../../doc/doc_ch/models_list.md)和[PP-Structure](../../ppstructure/docs/models_list.md)下载,也可以替换成自己训练转换好的模型。
|
||||
|
||||
### 2.3 安装服务模块
|
||||
PaddleOCR提供5种服务模块,根据需要安装所需模块。
|
||||
|
||||
* 在Linux环境下,安装示例如下:
|
||||
```shell
|
||||
# 安装检测服务模块:
|
||||
hub install deploy/hubserving/ocr_det/
|
||||
|
||||
# 或,安装分类服务模块:
|
||||
hub install deploy/hubserving/ocr_cls/
|
||||
|
||||
# 或,安装识别服务模块:
|
||||
hub install deploy/hubserving/ocr_rec/
|
||||
|
||||
# 或,安装检测+识别串联服务模块:
|
||||
hub install deploy/hubserving/ocr_system/
|
||||
|
||||
# 或,安装表格识别服务模块:
|
||||
hub install deploy/hubserving/structure_table/
|
||||
|
||||
# 或,安装PP-Structure服务模块:
|
||||
hub install deploy/hubserving/structure_system/
|
||||
|
||||
# 或,安装版面分析服务模块:
|
||||
hub install deploy/hubserving/structure_layout/
|
||||
|
||||
# 或,安装关键信息抽取SER服务模块:
|
||||
hub install deploy/hubserving/kie_ser/
|
||||
|
||||
# 或,安装关键信息抽取SER+RE服务模块:
|
||||
hub install deploy/hubserving/kie_ser_re/
|
||||
```
|
||||
|
||||
* 在Windows环境下(文件夹的分隔符为`\`),安装示例如下:
|
||||
```shell
|
||||
# 安装检测服务模块:
|
||||
hub install deploy\hubserving\ocr_det\
|
||||
|
||||
# 或,安装分类服务模块:
|
||||
hub install deploy\hubserving\ocr_cls\
|
||||
|
||||
# 或,安装识别服务模块:
|
||||
hub install deploy\hubserving\ocr_rec\
|
||||
|
||||
# 或,安装检测+识别串联服务模块:
|
||||
hub install deploy\hubserving\ocr_system\
|
||||
|
||||
# 或,安装表格识别服务模块:
|
||||
hub install deploy\hubserving\structure_table\
|
||||
|
||||
# 或,安装PP-Structure服务模块:
|
||||
hub install deploy\hubserving\structure_system\
|
||||
|
||||
# 或,安装版面分析服务模块:
|
||||
hub install deploy\hubserving\structure_layout\
|
||||
|
||||
# 或,安装关键信息抽取SER服务模块:
|
||||
hub install deploy\hubserving\kie_ser\
|
||||
|
||||
# 或,安装关键信息抽取SER+RE服务模块:
|
||||
hub install deploy\hubserving\kie_ser_re\
|
||||
```
|
||||
在Linux环境(Windows环境请将`/`替换为`\`)下,安装模块命令如下表:
|
||||
| 服务模块 | 命令 |
|
||||
| ------- | - |
|
||||
| 检测 | `hub install deploy/hubserving/ocr_det` |
|
||||
| 分类 | `hub install deploy/hubserving/ocr_cls` |
|
||||
| 识别 | `hub install deploy/hubserving/ocr_rec` |
|
||||
| 检测+识别串联 | `hub install deploy/hubserving/ocr_system` |
|
||||
| 表格识别 | `hub install deploy/hubserving/structure_table` |
|
||||
| PP-Structure | `hub install deploy/hubserving/structure_system` |
|
||||
| 版面分析 | `hub install deploy/hubserving/structure_layout` |
|
||||
| 关键信息抽取SER | `hub install deploy/hubserving/kie_ser` |
|
||||
| 关键信息抽取SER+RE | `hub install deploy/hubserving/kie_ser_re` |
|
||||
|
||||
### 2.4 启动服务
|
||||
#### 2.4.1. 命令行命令启动(仅支持CPU)
|
||||
**启动命令:**
|
||||
```shell
|
||||
$ hub serving start --modules [Module1==Version1, Module2==Version2, ...] \
|
||||
--port XXXX \
|
||||
--use_multiprocess \
|
||||
--workers \
|
||||
```bash
|
||||
hub serving start --modules Module1==Version1, Module2==Version2, ... \
|
||||
--port 8866 \
|
||||
--use_multiprocess \
|
||||
--workers \
|
||||
```
|
||||
|
||||
**参数:**
|
||||
|
||||
|参数|用途|
|
||||
|---|---|
|
||||
|--modules/-m|PaddleHub Serving预安装模型,以多个Module==Version键值对的形式列出<br>*`当不指定Version时,默认选择最新版本`*|
|
||||
|--port/-p|服务端口,默认为8866|
|
||||
|--use_multiprocess|是否启用并发方式,默认为单进程方式,推荐多核CPU机器使用此方式<br>*`Windows操作系统只支持单进程方式`*|
|
||||
|--workers|在并发方式下指定的并发任务数,默认为`2*cpu_count-1`,其中`cpu_count`为CPU核数|
|
||||
|`--modules`/`-m`|PaddleHub Serving预安装模型,以多个Module==Version键值对的形式列出<br>**当不指定Version时,默认选择最新版本**|
|
||||
|`--port`/`-p`|服务端口,默认为8866|
|
||||
|`--use_multiprocess`|是否启用并发方式,默认为单进程方式,推荐多核CPU机器使用此方式<br>**Windows操作系统只支持单进程方式**|
|
||||
|`--workers`|在并发方式下指定的并发任务数,默认为`2*cpu_count-1`,其中`cpu_count`为CPU核数|
|
||||
|
||||
如启动串联服务: ```hub serving start -m ocr_system```
|
||||
如启动串联服务:
|
||||
```bash
|
||||
hub serving start -m ocr_system
|
||||
```
|
||||
|
||||
这样就完成了一个服务化API的部署,使用默认端口号8866。
|
||||
|
||||
#### 2.4.2 配置文件启动(支持CPU、GPU)
|
||||
**启动命令:**
|
||||
```hub serving start -c config.json```
|
||||
```bash
|
||||
hub serving start -c config.json
|
||||
```
|
||||
|
||||
其中,`config.json`格式如下:
|
||||
```python
|
||||
```json
|
||||
{
|
||||
"modules_info": {
|
||||
"ocr_system": {
|
||||
|
@ -182,48 +139,59 @@ $ hub serving start --modules [Module1==Version1, Module2==Version2, ...] \
|
|||
}
|
||||
```
|
||||
|
||||
- `init_args`中的可配参数与`module.py`中的`_initialize`函数接口一致。其中,**当`use_gpu`为`true`时,表示使用GPU启动服务**。
|
||||
- `init_args`中的可配参数与`module.py`中的`_initialize`函数接口一致。
|
||||
|
||||
**当`use_gpu`为`true`时,表示使用GPU启动服务。**
|
||||
- `predict_args`中的可配参数与`module.py`中的`predict`函数接口一致。
|
||||
|
||||
**注意:**
|
||||
**注意:**
|
||||
- 使用配置文件启动服务时,其他参数会被忽略。
|
||||
- 如果使用GPU预测(即,`use_gpu`置为`true`),则需要在启动服务之前,设置CUDA_VISIBLE_DEVICES环境变量,如:```export CUDA_VISIBLE_DEVICES=0```,否则不用设置。
|
||||
- 如果使用GPU预测(即,`use_gpu`置为`true`),则需要在启动服务之前,设置CUDA_VISIBLE_DEVICES环境变量,如:
|
||||
```bash
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
```
|
||||
- **`use_gpu`不可与`use_multiprocess`同时为`true`**。
|
||||
|
||||
如,使用GPU 3号卡启动串联服务:
|
||||
```shell
|
||||
```bash
|
||||
export CUDA_VISIBLE_DEVICES=3
|
||||
hub serving start -c deploy/hubserving/ocr_system/config.json
|
||||
```
|
||||
|
||||
## 3. 发送预测请求
|
||||
配置好服务端,可使用以下命令发送预测请求,获取预测结果:
|
||||
|
||||
```python tools/test_hubserving.py --server_url=server_url --image_dir=image_path```
|
||||
配置好服务端,可使用以下命令发送预测请求,获取预测结果:
|
||||
```bash
|
||||
python tools/test_hubserving.py --server_url=server_url --image_dir=image_path
|
||||
```
|
||||
|
||||
需要给脚本传递2个参数:
|
||||
- **server_url**:服务地址,格式为
|
||||
`http://[ip_address]:[port]/predict/[module_name]`
|
||||
例如,如果使用配置文件启动分类,检测、识别,检测+分类+识别3阶段,表格识别和PP-Structure服务,那么发送请求的url将分别是:
|
||||
`http://127.0.0.1:8865/predict/ocr_det`
|
||||
`http://127.0.0.1:8866/predict/ocr_cls`
|
||||
`http://127.0.0.1:8867/predict/ocr_rec`
|
||||
`http://127.0.0.1:8868/predict/ocr_system`
|
||||
`http://127.0.0.1:8869/predict/structure_table`
|
||||
`http://127.0.0.1:8870/predict/structure_system`
|
||||
`http://127.0.0.1:8870/predict/structure_layout`
|
||||
`http://127.0.0.1:8871/predict/kie_ser`
|
||||
`http://127.0.0.1:8872/predict/kie_ser_re`
|
||||
- **image_dir**:测试图像路径,可以是单张图片路径,也可以是图像集合目录路径
|
||||
- **visualize**:是否可视化结果,默认为False
|
||||
- **output**:可视化结果保存路径,默认为`./hubserving_result`
|
||||
- `server_url`:服务地址,格式为`http://[ip_address]:[port]/predict/[module_name]`
|
||||
|
||||
例如,如果使用配置文件启动分类,检测、识别,检测+分类+识别3阶段,表格识别和PP-Structure服务
|
||||
|
||||
并为每个服务修改了port,那么发送请求的url将分别是:
|
||||
```
|
||||
http://127.0.0.1:8865/predict/ocr_det
|
||||
http://127.0.0.1:8866/predict/ocr_cls
|
||||
http://127.0.0.1:8867/predict/ocr_rec
|
||||
http://127.0.0.1:8868/predict/ocr_system
|
||||
http://127.0.0.1:8869/predict/structure_table
|
||||
http://127.0.0.1:8870/predict/structure_system
|
||||
http://127.0.0.1:8870/predict/structure_layout
|
||||
http://127.0.0.1:8871/predict/kie_ser
|
||||
http://127.0.0.1:8872/predict/kie_ser_re
|
||||
```
|
||||
- `image_dir`:测试图像路径,可以是单张图片路径,也可以是图像集合目录路径
|
||||
- `visualize`:是否可视化结果,默认为False
|
||||
- `output`:可视化结果保存路径,默认为`./hubserving_result`
|
||||
|
||||
访问示例:
|
||||
```python tools/test_hubserving.py --server_url=http://127.0.0.1:8868/predict/ocr_system --image_dir=./doc/imgs/ --visualize=false```
|
||||
```bash
|
||||
python tools/test_hubserving.py --server_url=http://127.0.0.1:8868/predict/ocr_system --image_dir=./doc/imgs/ --visualize=false
|
||||
```
|
||||
|
||||
## 4. 返回结果格式说明
|
||||
返回结果为列表(list),列表中的每一项为词典(dict),词典一共可能包含3种字段,信息如下:
|
||||
|
||||
|字段名称|数据类型|意义|
|
||||
|---|---|---|
|
||||
|angle|str|文本角度|
|
||||
|
@ -231,41 +199,52 @@ hub serving start -c deploy/hubserving/ocr_system/config.json
|
|||
|confidence|float| 文本识别置信度或文本角度分类置信度|
|
||||
|text_region|list|文本位置坐标|
|
||||
|html|str|表格的html字符串|
|
||||
|regions|list|版面分析+表格识别+OCR的结果,每一项为一个list,包含表示区域坐标的`bbox`,区域类型的`type`和区域结果的`res`三个字段|
|
||||
|regions|list|版面分析+表格识别+OCR的结果,每一项为一个list<br>包含表示区域坐标的`bbox`,区域类型的`type`和区域结果的`res`三个字段|
|
||||
|layout|list|版面分析的结果,每一项一个dict,包含版面区域坐标的`bbox`,区域类型的`label`|
|
||||
|
||||
不同模块返回的字段不同,如,文本识别服务模块返回结果不含`text_region`字段,具体信息如下:
|
||||
|
||||
| 字段名/模块名 | ocr_det | ocr_cls | ocr_rec | ocr_system | structure_table | structure_system | Structure_layout | kie_ser | kie_re |
|
||||
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
|angle| | ✔ | | ✔ | |||
|
||||
|text| | |✔|✔| | ✔ | | ✔ | ✔ |
|
||||
|confidence| |✔ |✔| | | ✔| |✔ | ✔ |
|
||||
|text_region| ✔| | |✔ | | ✔| |✔ | ✔ |
|
||||
|html| | | | |✔ |✔||| |
|
||||
|regions| | | | |✔ |✔ | || |
|
||||
|layout| | | | | | | ✔ || |
|
||||
|ser_res| | | | | | | | ✔ | |
|
||||
|re_res| | | | | | | | | ✔ |
|
||||
|
||||
|字段名/模块名 |ocr_det |ocr_cls |ocr_rec |ocr_system |structure_table |structure_system |structure_layout |kie_ser |kie_re |
|
||||
|--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |
|
||||
|angle | |✔ | |✔ | | | |
|
||||
|text | | |✔ |✔ | |✔ | |✔ |✔ |
|
||||
|confidence | |✔ |✔ |✔ | |✔ | |✔ |✔ |
|
||||
|text_region |✔ | | |✔ | |✔ | |✔ |✔ |
|
||||
|html | | | | |✔ |✔ | | | |
|
||||
|regions | | | | |✔ |✔ | | | |
|
||||
|layout | | | | | | |✔ | | |
|
||||
|ser_res | | | | | | | |✔ | |
|
||||
|re_res | | | | | | | | |✔ |
|
||||
|
||||
**说明:** 如果需要增加、删除、修改返回字段,可在相应模块的`module.py`文件中进行修改,完整流程参考下一节自定义修改服务模块。
|
||||
|
||||
## 5. 自定义修改服务模块
|
||||
如果需要修改服务逻辑,你一般需要操作以下步骤(以修改`ocr_system`为例):
|
||||
如果需要修改服务逻辑,一般需要操作以下步骤(以修改`deploy/hubserving/ocr_system`为例):
|
||||
|
||||
- 1、 停止服务
|
||||
```hub serving stop --port/-p XXXX```
|
||||
1. 停止服务:
|
||||
```bash
|
||||
hub serving stop --port/-p XXXX
|
||||
```
|
||||
2. 到`deploy/hubserving/ocr_system`下的`module.py`和`params.py`等文件中根据实际需求修改代码。
|
||||
|
||||
- 2、 到相应的`module.py`和`params.py`等文件中根据实际需求修改代码。
|
||||
例如,如果需要替换部署服务所用模型,则需要到`params.py`中修改模型路径参数`det_model_dir`和`rec_model_dir`,如果需要关闭文本方向分类器,则将参数`use_angle_cls`置为`False`,当然,同时可能还需要修改其他相关参数,请根据实际情况修改调试。 **强烈建议修改后先直接运行`module.py`调试,能正确运行预测后再启动服务测试。**
|
||||
**注意** PPOCR-v3识别模型使用的图片输入shape为`3,48,320`,因此需要修改`params.py`中的`cfg.rec_image_shape = "3, 48, 320"`,如果不使用PPOCR-v3识别模型,则无需修改该参数。
|
||||
例如,如果需要替换部署服务所用模型,则需要到`params.py`中修改模型路径参数`det_model_dir`和`rec_model_dir`,如果需要关闭文本方向分类器,则将参数`use_angle_cls`置为`False`
|
||||
|
||||
- 3、 卸载旧服务包
|
||||
```hub uninstall ocr_system```
|
||||
当然,同时可能还需要修改其他相关参数,请根据实际情况修改调试。
|
||||
|
||||
- 4、 安装修改后的新服务包
|
||||
```hub install deploy/hubserving/ocr_system/```
|
||||
**强烈建议修改后先直接运行`module.py`调试,能正确运行预测后再启动服务测试。**
|
||||
|
||||
- 5、重新启动服务
|
||||
```hub serving start -m ocr_system```
|
||||
**注意:** PPOCR-v3识别模型使用的图片输入shape为`3,48,320`,因此需要修改`params.py`中的`cfg.rec_image_shape = "3, 48, 320"`,如果不使用PPOCR-v3识别模型,则无需修改该参数。
|
||||
3. (可选)如果想要重命名模块需要更改`module.py`文件中的以下行:
|
||||
- [`from deploy.hubserving.ocr_system.params import read_params`中的`ocr_system`](https://github.com/PaddlePaddle/PaddleOCR/blob/a923f35de57b5e378f8dd16e54d0a3e4f51267fd/deploy/hubserving/ocr_system/module.py#L35)
|
||||
- [`name="ocr_system",`中的`ocr_system`](https://github.com/PaddlePaddle/PaddleOCR/blob/a923f35de57b5e378f8dd16e54d0a3e4f51267fd/deploy/hubserving/ocr_system/module.py#L39)
|
||||
4. (可选)可能需要删除`__pycache__`目录以强制刷新CPython缓存:
|
||||
```bash
|
||||
find deploy/hubserving/ocr_system -name '__pycache__' -exec rm -r {} \;
|
||||
```
|
||||
5. 安装修改后的新服务包:
|
||||
```bash
|
||||
hub install deploy/hubserving/ocr_system
|
||||
```
|
||||
6. 重新启动服务:
|
||||
```bash
|
||||
hub serving start -m ocr_system
|
||||
```
|
||||
|
|
|
@ -3,24 +3,23 @@ English | [简体中文](readme.md)
|
|||
- [Service deployment based on PaddleHub Serving](#service-deployment-based-on-paddlehub-serving)
|
||||
- [1. Update](#1-update)
|
||||
- [2. Quick start service](#2-quick-start-service)
|
||||
- [2.1 Prepare the environment](#21-prepare-the-environment)
|
||||
- [2.1 Install PaddleHub](#21-install-paddlehub)
|
||||
- [2.2 Download inference model](#22-download-inference-model)
|
||||
- [2.3 Install Service Module](#23-install-service-module)
|
||||
- [2.4 Start service](#24-start-service)
|
||||
- [2.4.1 Start with command line parameters (CPU only)](#241-start-with-command-line-parameters-cpu-only)
|
||||
- [2.4.2 Start with configuration file(CPU、GPU)](#242-start-with-configuration-filecpugpu)
|
||||
- [2.4.2 Start with configuration file(CPU and GPU)](#242-start-with-configuration-filecpugpu)
|
||||
- [3. Send prediction requests](#3-send-prediction-requests)
|
||||
- [4. Returned result format](#4-returned-result-format)
|
||||
- [5. User defined service module modification](#5-user-defined-service-module-modification)
|
||||
|
||||
- [5. User-defined service module modification](#5-user-defined-service-module-modification)
|
||||
|
||||
PaddleOCR provides 2 service deployment methods:
|
||||
- Based on **PaddleHub Serving**: Code path is "`./deploy/hubserving`". Please follow this tutorial.
|
||||
- Based on **PaddleServing**: Code path is "`./deploy/pdserving`". Please refer to the [tutorial](../../deploy/pdserving/README.md) for usage.
|
||||
- Based on **PaddleHub Serving**: Code path is `./deploy/hubserving`. Please follow this tutorial.
|
||||
- Based on **PaddleServing**: Code path is `./deploy/pdserving`. Please refer to the [tutorial](../../deploy/pdserving/README.md) for usage.
|
||||
|
||||
# Service deployment based on PaddleHub Serving
|
||||
|
||||
The hubserving service deployment directory includes seven service packages: text detection, text angle class, text recognition, text detection+text angle class+text recognition three-stage series connection, layout analysis, table recognition and PP-Structure. Please select the corresponding service package to install and start service according to your needs. The directory is as follows:
|
||||
The hubserving service deployment directory includes seven service packages: text detection, text angle class, text recognition, text detection+text angle class+text recognition three-stage series connection, layout analysis, table recognition, and PP-Structure. Please select the corresponding service package to install and start the service according to your needs. The directory is as follows:
|
||||
```
|
||||
deploy/hubserving/
|
||||
└─ ocr_det text detection module service package
|
||||
|
@ -40,7 +39,7 @@ deploy/hubserving/ocr_system/
|
|||
└─ __init__.py Empty file, required
|
||||
└─ config.json Configuration file, optional, passed in as a parameter when using configuration to start the service
|
||||
└─ module.py Main module file, required, contains the complete logic of the service
|
||||
└─ params.py Parameter file, required, including parameters such as model path, pre- and post-processing parameters
|
||||
└─ params.py Parameter file, required, including parameters such as model path, pre and post-processing parameters
|
||||
```
|
||||
## 1. Update
|
||||
|
||||
|
@ -49,124 +48,76 @@ deploy/hubserving/ocr_system/
|
|||
* 2022.03.30 add PP-Structure and table recognition services.
|
||||
* 2022.05.05 add PP-OCRv3 text detection and recognition services.
|
||||
|
||||
|
||||
## 2. Quick start service
|
||||
The following steps take the 2-stage series service as an example. If only the detection service or recognition service is needed, replace the corresponding file path.
|
||||
|
||||
### 2.1 Prepare the environment
|
||||
```shell
|
||||
# Install paddlehub
|
||||
# python>3.6.2 is required bt paddlehub
|
||||
pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
### 2.1 Install PaddleHub
|
||||
```bash
|
||||
pip3 install paddlehub==2.1.0 --upgrade
|
||||
```
|
||||
|
||||
### 2.2 Download inference model
|
||||
Before installing the service module, you need to prepare the inference model and put it in the correct path. By default, the PP-OCRv3 models are used, and the default model path is:
|
||||
```
|
||||
text detection model: ./inference/ch_PP-OCRv3_det_infer/
|
||||
text recognition model: ./inference/ch_PP-OCRv3_rec_infer/
|
||||
text angle classifier: ./inference/ch_ppocr_mobile_v2.0_cls_infer/
|
||||
layout parse model: ./inference/picodet_lcnet_x1_0_fgd_layout_infer/
|
||||
tanle recognition: ./inference/ch_ppstructure_mobile_v2.0_SLANet_infer/
|
||||
KIE(SER): ./inference/ser_vi_layoutxlm_xfund_infer/
|
||||
KIE(SER+RE): ./inference/re_vi_layoutxlm_xfund_infer/
|
||||
```
|
||||
| Model | Path |
|
||||
| ------- | - |
|
||||
| text detection model | ./inference/ch_PP-OCRv3_det_infer/ |
|
||||
| text recognition model | ./inference/ch_PP-OCRv3_rec_infer/ |
|
||||
| text angle classifier | ./inference/ch_ppocr_mobile_v2.0_cls_infer/ |
|
||||
| layout parse model | ./inference/picodet_lcnet_x1_0_fgd_layout_infer/ |
|
||||
| tanle recognition | ./inference/ch_ppstructure_mobile_v2.0_SLANet_infer/ |
|
||||
| KIE(SER) | ./inference/ser_vi_layoutxlm_xfund_infer/ |
|
||||
| KIE(SER+RE) | ./inference/re_vi_layoutxlm_xfund_infer/ |
|
||||
|
||||
**The model path can be found and modified in `params.py`.** More models provided by PaddleOCR can be obtained from the [model library](../../doc/doc_en/models_list_en.md). You can also use models trained by yourself.
|
||||
**The model path can be found and modified in `params.py`.**
|
||||
More models provided by PaddleOCR can be obtained from the [model library](../../doc/doc_en/models_list_en.md). You can also use models trained by yourself.
|
||||
|
||||
### 2.3 Install Service Module
|
||||
PaddleOCR provides 5 kinds of service modules, install the required modules according to your needs.
|
||||
|
||||
* On Linux platform, the examples are as follows.
|
||||
```shell
|
||||
# Install the text detection service module:
|
||||
hub install deploy/hubserving/ocr_det/
|
||||
|
||||
# Or, install the text angle class service module:
|
||||
hub install deploy/hubserving/ocr_cls/
|
||||
|
||||
# Or, install the text recognition service module:
|
||||
hub install deploy/hubserving/ocr_rec/
|
||||
|
||||
# Or, install the 2-stage series service module:
|
||||
hub install deploy/hubserving/ocr_system/
|
||||
|
||||
# Or install table recognition service module
|
||||
hub install deploy/hubserving/structure_table/
|
||||
|
||||
# Or install PP-Structure service module
|
||||
hub install deploy/hubserving/structure_system/
|
||||
|
||||
# Or install KIE(SER) service module
|
||||
hub install deploy/hubserving/kie_ser/
|
||||
|
||||
# Or install KIE(SER+RE) service module
|
||||
hub install deploy/hubserving/kie_ser_re/
|
||||
```
|
||||
|
||||
* On Windows platform, the examples are as follows.
|
||||
```shell
|
||||
# Install the detection service module:
|
||||
hub install deploy\hubserving\ocr_det\
|
||||
|
||||
# Or, install the angle class service module:
|
||||
hub install deploy\hubserving\ocr_cls\
|
||||
|
||||
# Or, install the recognition service module:
|
||||
hub install deploy\hubserving\ocr_rec\
|
||||
|
||||
# Or, install the 2-stage series service module:
|
||||
hub install deploy\hubserving\ocr_system\
|
||||
|
||||
# Or install table recognition service module
|
||||
hub install deploy/hubserving/structure_table/
|
||||
|
||||
# Or install PP-Structure service module
|
||||
hub install deploy\hubserving\structure_system\
|
||||
|
||||
# Or install layout analysis service module
|
||||
hub install deploy\hubserving\structure_layout\
|
||||
|
||||
# Or install KIE(SER) service module
|
||||
hub install deploy\hubserving\kie_ser\
|
||||
|
||||
# Or install KIE(SER+RE) service module
|
||||
hub install deploy\hubserving\kie_ser_re\
|
||||
```
|
||||
* On the Linux platform(replace `/` with `\` if using Windows), the examples are as the following table:
|
||||
| Service model | Command |
|
||||
| text detection | `hub install deploy/hubserving/ocr_det` |
|
||||
| text angle class: | `hub install deploy/hubserving/ocr_cls` |
|
||||
| text recognition: | `hub install deploy/hubserving/ocr_rec` |
|
||||
| 2-stage series: | `hub install deploy/hubserving/ocr_system` |
|
||||
| table recognition | `hub install deploy/hubserving/structure_table` |
|
||||
| PP-Structure | `hub install deploy/hubserving/structure_system` |
|
||||
| KIE(SER) | `hub install deploy/hubserving/kie_ser` |
|
||||
| KIE(SER+RE) | `hub install deploy/hubserving/kie_ser_re` |
|
||||
|
||||
### 2.4 Start service
|
||||
#### 2.4.1 Start with command line parameters (CPU only)
|
||||
|
||||
**start command:**
|
||||
```shell
|
||||
$ hub serving start --modules [Module1==Version1, Module2==Version2, ...] \
|
||||
--port XXXX \
|
||||
--use_multiprocess \
|
||||
--workers \
|
||||
**start command:**
|
||||
```bash
|
||||
hub serving start --modules Module1==Version1, Module2==Version2, ... \
|
||||
--port 8866 \
|
||||
--use_multiprocess \
|
||||
--workers \
|
||||
```
|
||||
**parameters:**
|
||||
|
||||
**Parameters:**
|
||||
|parameters|usage|
|
||||
|---|---|
|
||||
|--modules/-m|PaddleHub Serving pre-installed model, listed in the form of multiple Module==Version key-value pairs<br>*`When Version is not specified, the latest version is selected by default`*|
|
||||
|--port/-p|Service port, default is 8866|
|
||||
|--use_multiprocess|Enable concurrent mode, the default is single-process mode, this mode is recommended for multi-core CPU machines<br>*`Windows operating system only supports single-process mode`*|
|
||||
|--workers|The number of concurrent tasks specified in concurrent mode, the default is `2*cpu_count-1`, where `cpu_count` is the number of CPU cores|
|
||||
|`--modules`/`-m`|PaddleHub Serving pre-installed model, listed in the form of multiple Module==Version key-value pairs<br>**When Version is not specified, the latest version is selected by default**|
|
||||
|`--port`/`-p`|Service port, default is 8866|
|
||||
|`--use_multiprocess`|Enable concurrent mode, by default using the single-process mode, this mode is recommended for multi-core CPU machines<br>**Windows operating system only supports single-process mode**|
|
||||
|`--workers`|The number of concurrent tasks specified in concurrent mode, the default is `2*cpu_count-1`, where `cpu_count` is the number of CPU cores|
|
||||
|
||||
For example, start the 2-stage series service:
|
||||
```shell
|
||||
```bash
|
||||
hub serving start -m ocr_system
|
||||
```
|
||||
|
||||
This completes the deployment of a service API, using the default port number 8866.
|
||||
|
||||
#### 2.4.2 Start with configuration file(CPU、GPU)
|
||||
**start command:**
|
||||
```shell
|
||||
#### 2.4.2 Start with configuration file(CPU and GPU)
|
||||
**start command:**
|
||||
```bash
|
||||
hub serving start --config/-c config.json
|
||||
```
|
||||
Wherein, the format of `config.json` is as follows:
|
||||
```python
|
||||
|
||||
In which the format of `config.json` is as follows:
|
||||
```json
|
||||
{
|
||||
"modules_info": {
|
||||
"ocr_system": {
|
||||
|
@ -183,51 +134,61 @@ Wherein, the format of `config.json` is as follows:
|
|||
"workers": 2
|
||||
}
|
||||
```
|
||||
- The configurable parameters in `init_args` are consistent with the `_initialize` function interface in `module.py`. Among them, **when `use_gpu` is `true`, it means that the GPU is used to start the service**.
|
||||
- The configurable parameters in `init_args` are consistent with the `_initialize` function interface in `module.py`.
|
||||
|
||||
**When `use_gpu` is `true`, it means that the GPU is used to start the service**.
|
||||
- The configurable parameters in `predict_args` are consistent with the `predict` function interface in `module.py`.
|
||||
|
||||
**Note:**
|
||||
- When using the configuration file to start the service, other parameters will be ignored.
|
||||
- If you use GPU prediction (that is, `use_gpu` is set to `true`), you need to set the environment variable CUDA_VISIBLE_DEVICES before starting the service, such as: ```export CUDA_VISIBLE_DEVICES=0```, otherwise you do not need to set it.
|
||||
- **`use_gpu` and `use_multiprocess` cannot be `true` at the same time.**
|
||||
**Note:**
|
||||
- When using the configuration file to start the service, other parameters will be ignored.
|
||||
- If you use GPU prediction (that is, `use_gpu` is set to `true`), you need to set the environment variable CUDA_VISIBLE_DEVICES before starting the service, such as:
|
||||
```bash
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
```
|
||||
- **`use_gpu` and `use_multiprocess` cannot be `true` at the same time.**
|
||||
|
||||
For example, use GPU card No. 3 to start the 2-stage series service:
|
||||
```shell
|
||||
```bash
|
||||
export CUDA_VISIBLE_DEVICES=3
|
||||
hub serving start -c deploy/hubserving/ocr_system/config.json
|
||||
```
|
||||
|
||||
## 3. Send prediction requests
|
||||
After the service starts, you can use the following command to send a prediction request to obtain the prediction result:
|
||||
```shell
|
||||
```bash
|
||||
python tools/test_hubserving.py --server_url=server_url --image_dir=image_path
|
||||
```
|
||||
|
||||
Two parameters need to be passed to the script:
|
||||
- **server_url**:service address,format of which is
|
||||
- **server_url**:service address, the format of which is
|
||||
`http://[ip_address]:[port]/predict/[module_name]`
|
||||
For example, if using the configuration file to start the text angle classification, text detection, text recognition, detection+classification+recognition 3 stages, table recognition and PP-Structure service, then the `server_url` to send the request will be:
|
||||
|
||||
`http://127.0.0.1:8865/predict/ocr_det`
|
||||
`http://127.0.0.1:8866/predict/ocr_cls`
|
||||
`http://127.0.0.1:8867/predict/ocr_rec`
|
||||
`http://127.0.0.1:8868/predict/ocr_system`
|
||||
`http://127.0.0.1:8869/predict/structure_table`
|
||||
`http://127.0.0.1:8870/predict/structure_system`
|
||||
`http://127.0.0.1:8870/predict/structure_layout`
|
||||
`http://127.0.0.1:8871/predict/kie_ser`
|
||||
`http://127.0.0.1:8872/predict/kie_ser_re`
|
||||
- **image_dir**:Test image path, can be a single image path or an image directory path
|
||||
- **visualize**:Whether to visualize the results, the default value is False
|
||||
- **output**:The floder to save Visualization result, default value is `./hubserving_result`
|
||||
For example, if using the configuration file to start the text angle classification, text detection, text recognition, detection+classification+recognition 3 stages, table recognition and PP-Structure service,
|
||||
|
||||
**Eg.**
|
||||
```shell
|
||||
also modified the port for each service, then the `server_url` to send the request will be:
|
||||
|
||||
```
|
||||
http://127.0.0.1:8865/predict/ocr_det
|
||||
http://127.0.0.1:8866/predict/ocr_cls
|
||||
http://127.0.0.1:8867/predict/ocr_rec
|
||||
http://127.0.0.1:8868/predict/ocr_system
|
||||
http://127.0.0.1:8869/predict/structure_table
|
||||
http://127.0.0.1:8870/predict/structure_system
|
||||
http://127.0.0.1:8870/predict/structure_layout
|
||||
http://127.0.0.1:8871/predict/kie_ser
|
||||
http://127.0.0.1:8872/predict/kie_ser_re
|
||||
```
|
||||
- **image_dir**:Test image path, which can be a single image path or an image directory path
|
||||
- **visualize**:Whether to visualize the results, the default value is False
|
||||
- **output**:The folder to save the Visualization result, the default value is `./hubserving_result`
|
||||
|
||||
Example:
|
||||
```bash
|
||||
python tools/test_hubserving.py --server_url=http://127.0.0.1:8868/predict/ocr_system --image_dir=./doc/imgs/ --visualize=false`
|
||||
```
|
||||
|
||||
## 4. Returned result format
|
||||
The returned result is a list. Each item in the list is a dict. The dict may contain three fields. The information is as follows:
|
||||
The returned result is a list. Each item in the list is a dictionary which may contain three fields. The information is as follows:
|
||||
|
||||
|field name|data type|description|
|
||||
|----|----|----|
|
||||
|
@ -235,45 +196,54 @@ The returned result is a list. Each item in the list is a dict. The dict may con
|
|||
|text|str|text content|
|
||||
|confidence|float|text recognition confidence|
|
||||
|text_region|list|text location coordinates|
|
||||
|html|str|table html str|
|
||||
|regions|list|The result of layout analysis + table recognition + OCR, each item is a list, including `bbox` indicating area coordinates, `type` of area type and `res` of area results|
|
||||
|html|str|table HTML string|
|
||||
|regions|list|The result of layout analysis + table recognition + OCR, each item is a list<br>including `bbox` indicating area coordinates, `type` of area type and `res` of area results|
|
||||
|layout|list|The result of layout analysis, each item is a dict, including `bbox` indicating area coordinates, `label` of area type|
|
||||
|
||||
The fields returned by different modules are different. For example, the results returned by the text recognition service module do not contain `text_region`. The details are as follows:
|
||||
The fields returned by different modules are different. For example, the results returned by the text recognition service module do not contain `text_region`, detailed table is as follows:
|
||||
|
||||
| field name/module name | ocr_det | ocr_cls | ocr_rec | ocr_system | structure_table | structure_system | structure_layout | kie_ser | kie_re |
|
||||
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
|angle| | ✔ | | ✔ | |||
|
||||
|text| | |✔|✔| | ✔ | | ✔ | ✔ |
|
||||
|confidence| |✔ |✔| | | ✔| |✔ | ✔ |
|
||||
|text_region| ✔| | |✔ | | ✔| |✔ | ✔ |
|
||||
|html| | | | |✔ |✔||| |
|
||||
|regions| | | | |✔ |✔ | || |
|
||||
|layout| | | | | | | ✔ || |
|
||||
|ser_res| | | | | | | | ✔ | |
|
||||
|re_res| | | | | | | | | ✔ |
|
||||
|field name/module name |ocr_det |ocr_cls |ocr_rec |ocr_system |structure_table |structure_system |structure_layout |kie_ser |kie_re |
|
||||
|--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |
|
||||
|angle | |✔ | |✔ | | | |
|
||||
|text | | |✔ |✔ | |✔ | |✔ |✔ |
|
||||
|confidence | |✔ |✔ |✔ | |✔ | |✔ |✔ |
|
||||
|text_region |✔ | | |✔ | |✔ | |✔ |✔ |
|
||||
|html | | | | |✔ |✔ | | | |
|
||||
|regions | | | | |✔ |✔ | | | |
|
||||
|layout | | | | | | |✔ | | |
|
||||
|ser_res | | | | | | | |✔ | |
|
||||
|re_res | | | | | | | | |✔ |
|
||||
|
||||
**Note:** If you need to add, delete or modify the returned fields, you can modify the file `module.py` of the corresponding module. For the complete process, refer to the user-defined modification service module in the next section.
|
||||
**Note:** If you need to add, delete or modify the returned fields, you can modify the file `module.py` of the corresponding module. For the complete process, refer to the user-defined modification service module in the next section.
|
||||
|
||||
## 5. User defined service module modification
|
||||
If you need to modify the service logic, the following steps are generally required (take the modification of `ocr_system` for example):
|
||||
## 5. User-defined service module modification
|
||||
If you need to modify the service logic, the following steps are generally required (take the modification of `deploy/hubserving/ocr_system` for example):
|
||||
|
||||
- 1. Stop service
|
||||
```shell
|
||||
1. Stop service:
|
||||
```bash
|
||||
hub serving stop --port/-p XXXX
|
||||
```
|
||||
- 2. Modify the code in the corresponding files, like `module.py` and `params.py`, according to the actual needs.
|
||||
For example, if you need to replace the model used by the deployed service, you need to modify model path parameters `det_model_dir` and `rec_model_dir` in `params.py`. If you want to turn off the text direction classifier, set the parameter `use_angle_cls` to `False`. Of course, other related parameters may need to be modified at the same time. Please modify and debug according to the actual situation. It is suggested to run `module.py` directly for debugging after modification before starting the service test.
|
||||
**Note** The image input shape used by the PPOCR-v3 recognition model is `3, 48, 320`, so you need to modify `cfg.rec_image_shape = "3, 48, 320"` in `params.py`, if you do not use the PPOCR-v3 recognition model, then there is no need to modify this parameter.
|
||||
- 3. Uninstall old service module
|
||||
```shell
|
||||
hub uninstall ocr_system
|
||||
```
|
||||
- 4. Install modified service module
|
||||
```shell
|
||||
hub install deploy/hubserving/ocr_system/
|
||||
```
|
||||
- 5. Restart service
|
||||
```shell
|
||||
hub serving start -m ocr_system
|
||||
```
|
||||
2. Modify the code in the corresponding files under `deploy/hubserving/ocr_system`, such as `module.py` and `params.py`, to your actual needs.
|
||||
|
||||
For example, if you need to replace the model used by the deployed service, you need to modify model path parameters `det_model_dir` and `rec_model_dir` in `params.py`. If you want to turn off the text direction classifier, set the parameter `use_angle_cls` to `False`.
|
||||
|
||||
Of course, other related parameters may need to be modified at the same time. Please modify and debug according to the actual situation.
|
||||
|
||||
**It is suggested to run `module.py` directly for debugging after modification before starting the service test.**
|
||||
|
||||
**Note** The image input shape used by the PPOCR-v3 recognition model is `3, 48, 320`, so you need to modify `cfg.rec_image_shape = "3, 48, 320"` in `params.py`, if you do not use the PPOCR-v3 recognition model, then there is no need to modify this parameter.
|
||||
3. (Optional) If you want to rename the module, the following lines should be modified:
|
||||
- [`ocr_system` within `from deploy.hubserving.ocr_system.params import read_params`](https://github.com/PaddlePaddle/PaddleOCR/blob/a923f35de57b5e378f8dd16e54d0a3e4f51267fd/deploy/hubserving/ocr_system/module.py#L35)
|
||||
- [`ocr_system` within `name="ocr_system",`](https://github.com/PaddlePaddle/PaddleOCR/blob/a923f35de57b5e378f8dd16e54d0a3e4f51267fd/deploy/hubserving/ocr_system/module.py#L39)
|
||||
4. (Optional) It may require you to delete the directory `__pycache__` to force flush build cache of CPython:
|
||||
```bash
|
||||
find deploy/hubserving/ocr_system -name '__pycache__' -exec rm -r {} \;
|
||||
```
|
||||
5. Install modified service module:
|
||||
```bash
|
||||
hub install deploy/hubserving/ocr_system/
|
||||
```
|
||||
6. Restart service:
|
||||
```bash
|
||||
hub serving start -m ocr_system
|
||||
```
|
||||
|
|
|
@ -75,9 +75,9 @@ paddle2onnx --model_dir ./inference/en_PP-OCRv3_rec_infer \
|
|||
--enable_onnx_checker True
|
||||
|
||||
paddle2onnx --model_dir ./inference/ch_ppocr_mobile_v2.0_cls_infer \
|
||||
--model_filename ch_ppocr_mobile_v2.0_cls_infer/inference.pdmodel \
|
||||
--params_filename ch_ppocr_mobile_v2.0_cls_infer/inference.pdiparams \
|
||||
--save_file ./inferencecls_onnx/model.onnx \
|
||||
--model_filename inference.pdmodel \
|
||||
--params_filename inference.pdiparams \
|
||||
--save_file ./inference/cls_onnx/model.onnx \
|
||||
--opset_version 10 \
|
||||
--input_shape_dict="{'x':[-1,3,-1,-1]}" \
|
||||
--enable_onnx_checker True
|
||||
|
|
|
@ -1,14 +1,30 @@
|
|||
English| [简体中文](README_ch.md)
|
||||
|
||||
# Paddle.js
|
||||
# Paddle.js Introduction
|
||||
|
||||
[Paddle.js](https://github.com/PaddlePaddle/Paddle.js) is a web project for Baidu PaddlePaddle, which is an open source deep learning framework running in the browser. Paddle.js can either load a pre-trained model, or transforming a model from paddle-hub with model transforming tools provided by Paddle.js. It could run in every browser with WebGL/WebGPU/WebAssembly supported. It could also run in Baidu Smartprogram and WX miniprogram.
|
||||
[Paddle.js](https://github.com/PaddlePaddle/Paddle.js) is a web project for Baidu PaddlePaddle, which is an open source deep learning framework running in the browser. Paddle.js can either load a pre-trained model, or transforming a model from paddle-hub with model transforming tools provided by Paddle.js. It could run in every browser with WebGL/WebGPU/WebAssembly supported. It could also run in Baidu Smartprogram and wechat miniprogram.
|
||||
|
||||
## Web Demo
|
||||
Run OCR demo in browser refer to [tutorial](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/web_demo).
|
||||
|
||||
|demo|web demo dicrctory|visualization|
|
||||
|-|-|-|
|
||||
|PP-OCRv3|[TextDetection、TextRecognition](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/web_demo/src/pages/cv/ocr/)|<img src="https://user-images.githubusercontent.com/26592129/196874354-1b5eecb0-f273-403c-aa6c-4463bf6d78db.png" height="200px">|
|
||||
|
||||
|
||||
- [Online experience](https://paddlejs.baidu.com/ocr)
|
||||
- [Tutorial](https://github.com/PaddlePaddle/Paddle.js/blob/release/v2.2.3/packages/paddlejs-models/ocr/README_cn.md)
|
||||
- Visualization:
|
||||
## Mini Program Demo
|
||||
The Mini Program demo running tutorial eference
|
||||
Run OCR demo in wechat miniprogram refer to [tutorial](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/mini_program).
|
||||
|
||||
|demo|directory|
|
||||
|-|-|
|
||||
|Text Detection| [ocrdetecXcx](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/mini_program/ocrdetectXcx/) |
|
||||
|Text Recognition| [ocrXcx](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/mini_program/ocrXcx/) |
|
||||
|
||||
<div align="center">
|
||||
<img src="./paddlejs_demo.gif" width="800">
|
||||
</div>
|
||||
|
||||
<a href="https://trackgit.com">
|
||||
<img src="https://us-central1-trackgit-analytics.cloudfunctions.net/token/ping/lb0jygcawaxcrq8cb8rl" alt="trackgit-views" />
|
||||
</a>
|
||||
|
|
|
@ -5,10 +5,31 @@
|
|||
[Paddle.js](https://github.com/PaddlePaddle/Paddle.js) 是百度 PaddlePaddle 的 web 方向子项目,是一个运行在浏览器中的开源深度学习框架。Paddle.js 可以加载提前训练好的 paddle 模型,通过 Paddle.js 的模型转换工具 paddlejs-converter 变成浏览器友好的模型进行在线推理预测使用。目前,Paddle.js 可以在支持 WebGL/WebGPU/WebAssembly 的浏览器中运行,也可以在百度小程序和微信小程序环境下运行。
|
||||
|
||||
|
||||
- [在线体验](https://paddlejs.baidu.com/ocr)
|
||||
- [直达教程](https://github.com/PaddlePaddle/Paddle.js/blob/release/v2.2.3/packages/paddlejs-models/ocr/README_cn.md)
|
||||
## Web Demo使用
|
||||
|
||||
在浏览器中直接运行官方OCR demo参考[教程](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/web_demo)
|
||||
|
||||
|demo名称|web demo目录|可视化|
|
||||
|-|-|-|
|
||||
|PP-OCRv3|[TextDetection、TextRecognition](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/web_demo/src/pages/cv/ocr/)|<img src="https://user-images.githubusercontent.com/26592129/196874354-1b5eecb0-f273-403c-aa6c-4463bf6d78db.png" height="200px">|
|
||||
|
||||
|
||||
## 微信小程序Demo使用
|
||||
|
||||
在微信小程序运行官方demo参考[教程](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/mini_program)
|
||||
|
||||
|名称|目录|
|
||||
|-|-|
|
||||
|OCR文本检测| [ocrdetecXcx](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/mini_program/ocrdetectXcx/) |
|
||||
|OCR文本识别| [ocrXcx](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/mini_program/ocrXcx/) |
|
||||
|
||||
|
||||
- 效果:
|
||||
|
||||
<div align="center">
|
||||
<img src="./paddlejs_demo.gif" width="800">
|
||||
<img src="https://user-images.githubusercontent.com/26592129/197918203-c7d46f8a-75d4-47f9-9687-405ee0d6727e.gif" width="800">
|
||||
</div>
|
||||
|
||||
<a href="https://trackgit.com">
|
||||
<img src="https://us-central1-trackgit-analytics.cloudfunctions.net/token/ping/lb0jzfbyyttdxne1imal" alt="trackgit-views" />
|
||||
</a>
|
||||
|
|
|
@ -232,6 +232,7 @@ cp -rf general_detection_op.cpp Serving/core/general-server/op
|
|||
# 启动服务,运行日志保存在log.txt
|
||||
python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 8181 &>log.txt &
|
||||
```
|
||||
|
||||
成功启动服务后,log.txt中会打印类似如下日志
|
||||

|
||||
|
||||
|
|
|
@ -54,4 +54,6 @@ python deploy/slim/quantization/export_model.py -c configs/det/ch_PP-OCRv3/ch_PP
|
|||
### 5. 量化模型部署
|
||||
|
||||
上述步骤导出的量化模型,参数精度仍然是FP32,但是参数的数值范围是int8,导出的模型可以通过PaddleLite的opt模型转换工具完成模型转换。
|
||||
量化模型部署的可参考 [移动端模型部署](../../lite/readme.md)
|
||||
量化模型移动端部署的可参考 [移动端模型部署](../../lite/readme.md)
|
||||
|
||||
备注:量化训练后的模型参数是float32类型,转inference model预测时相对不量化无加速效果,原因是量化后模型结构之间存在量化和反量化算子,如果要使用量化模型部署,建议使用TensorRT并设置precision为INT8加速量化模型的预测时间。
|
||||
|
|
|
@ -188,7 +188,7 @@ A:可以看下训练的尺度和预测的尺度是否相同,如果训练的
|
|||
|
||||
#### Q: 如何识别招牌或者广告图中的艺术字?
|
||||
|
||||
**A**: 招牌或者广告图中的艺术字是文本识别一个非常有挑战性的难题,因为艺术字中的单字和印刷体相比,变化非常大。如果需要识别的艺术字是在一个词典列表内,可以将改每个词典认为是一个待识别图像模板,通过通用图像检索识别系统解决识别问题。可以尝试使用PaddleClas的图像识别系统。
|
||||
**A**: 招牌或者广告图中的艺术字是文本识别一个非常有挑战性的难题,因为艺术字中的单字和印刷体相比,变化非常大。如果需要识别的艺术字是在一个词典列表内,可以将该每个词典认为是一个待识别图像模板,通过通用图像检索识别系统解决识别问题。可以尝试使用PaddleClas的图像识别系统PP-shituV2。
|
||||
|
||||
#### Q: 图像正常识别出来的文字是OK的,旋转90度后识别出来的结果就比较差,有什么方法可以优化?
|
||||
|
||||
|
@ -400,7 +400,7 @@ StyleText的用途主要是:提取style_image中的字体、背景等style信
|
|||
|
||||
A:无论是文字检测,还是文字识别,骨干网络的选择是预测效果和预测效率的权衡。一般,选择更大规模的骨干网络,例如ResNet101_vd,则检测或识别更准确,但预测耗时相应也会增加。而选择更小规模的骨干网络,例如MobileNetV3_small_x0_35,则预测更快,但检测或识别的准确率会大打折扣。幸运的是不同骨干网络的检测或识别效果与在ImageNet数据集图像1000分类任务效果正相关。飞桨图像分类套件PaddleClas汇总了ResNet_vd、Res2Net、HRNet、MobileNetV3、GhostNet等23种系列的分类网络结构,在上述图像分类任务的top1识别准确率,GPU(V100和T4)和CPU(骁龙855)的预测耗时以及相应的117个预训练模型下载地址。
|
||||
|
||||
(1)文字检测骨干网络的替换,主要是确定类似与ResNet的4个stages,以方便集成后续的类似FPN的检测头。此外,对于文字检测问题,使用ImageNet训练的分类预训练模型,可以加速收敛和效果提升。
|
||||
(1)文字检测骨干网络的替换,主要是确定类似于ResNet的4个stages,以方便集成后续的类似FPN的检测头。此外,对于文字检测问题,使用ImageNet训练的分类预训练模型,可以加速收敛和效果提升。
|
||||
|
||||
(2)文字识别的骨干网络的替换,需要注意网络宽高stride的下降位置。由于文本识别一般宽高比例很大,因此高度下降频率少一些,宽度下降频率多一些。可以参考PaddleOCR中MobileNetV3骨干网络的改动。
|
||||
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
[English](../doc_en/PP-OCRv3_det_train_en.md) | 简体中文
|
||||
|
||||
|
||||
# PP-OCRv3 文本检测模型训练
|
||||
|
||||
- [1. 简介](#1)
|
||||
- [2. PPOCRv3检测训练](#2)
|
||||
- [3. 基于PPOCRv3检测的finetune训练](#3)
|
||||
- [2. PP-OCRv3检测训练](#2)
|
||||
- [3. 基于PP-OCRv3检测的finetune训练](#3)
|
||||
|
||||
<a name="1"></a>
|
||||
## 1. 简介
|
||||
|
||||
PP-OCRv3在PP-OCRv2的基础上进一步升级。本节介绍PP-OCRv3检测模型的训练步骤。有关PPOCRv3策略介绍参考[文档](./PP-OCRv3_introduction.md)。
|
||||
PP-OCRv3在PP-OCRv2的基础上进一步升级。本节介绍PP-OCRv3检测模型的训练步骤。有关PP-OCRv3策略介绍参考[文档](./PP-OCRv3_introduction.md)。
|
||||
|
||||
|
||||
<a name="2"></a>
|
||||
|
@ -30,7 +32,7 @@ PP-OCRv3检测训练包括两个步骤:
|
|||
|
||||
### 2.2 训练教师模型
|
||||
|
||||
教师模型训练的配置文件是[ch_PP-OCRv3_det_dml.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release%2F2.5/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml)。教师模型模型结构的Backbone、Neck、Head分别为Resnet50, LKPAN, DBHead,采用DML的蒸馏方法训练。有关配置文件的详细介绍参考[文档](./knowledge_distillation)。
|
||||
教师模型训练的配置文件是[ch_PP-OCRv3_det_dml.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release%2F2.5/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml)。教师模型模型结构的Backbone、Neck、Head分别为Resnet50, LKPAN, DBHead,采用DML的蒸馏方法训练。有关配置文件的详细介绍参考[文档](./knowledge_distillation.md)。
|
||||
|
||||
|
||||
下载ImageNet预训练模型:
|
||||
|
@ -145,19 +147,19 @@ paddle.save(s_params, "./pretrain_models/cml_student.pdparams")
|
|||
|
||||
|
||||
<a name="3"></a>
|
||||
## 3. 基于PPOCRv3检测finetune训练
|
||||
## 3. 基于PP-OCRv3检测finetune训练
|
||||
|
||||
本节介绍如何使用PPOCRv3检测模型在其他场景上的finetune训练。
|
||||
本节介绍如何使用PP-OCRv3检测模型在其他场景上的finetune训练。
|
||||
|
||||
finetune训练适用于三种场景:
|
||||
- 基于CML蒸馏方法的finetune训练,适用于教师模型在使用场景上精度高于PPOCRv3检测模型,且希望得到一个轻量检测模型。
|
||||
- 基于PPOCRv3轻量检测模型的finetune训练,无需训练教师模型,希望在PPOCRv3检测模型基础上提升使用场景上的精度。
|
||||
- 基于CML蒸馏方法的finetune训练,适用于教师模型在使用场景上精度高于PP-OCRv3检测模型,且希望得到一个轻量检测模型。
|
||||
- 基于PP-OCRv3轻量检测模型的finetune训练,无需训练教师模型,希望在PP-OCRv3检测模型基础上提升使用场景上的精度。
|
||||
- 基于DML蒸馏方法的finetune训练,适用于采用DML方法进一步提升精度的场景。
|
||||
|
||||
|
||||
**基于CML蒸馏方法的finetune训练**
|
||||
|
||||
下载PPOCRv3训练模型:
|
||||
下载PP-OCRv3训练模型:
|
||||
```
|
||||
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar
|
||||
tar xf ch_PP-OCRv3_det_distill_train.tar
|
||||
|
@ -177,10 +179,10 @@ python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs
|
|||
Global.save_model_dir=./output/
|
||||
```
|
||||
|
||||
**基于PPOCRv3轻量检测模型的finetune训练**
|
||||
**基于PP-OCRv3轻量检测模型的finetune训练**
|
||||
|
||||
|
||||
下载PPOCRv3训练模型,并提取Student结构的模型参数:
|
||||
下载PP-OCRv3训练模型,并提取Student结构的模型参数:
|
||||
```
|
||||
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar
|
||||
tar xf ch_PP-OCRv3_det_distill_train.tar
|
||||
|
@ -248,5 +250,3 @@ python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/
|
|||
Architecture.Models.Student2.pretrained=./teacher \
|
||||
Global.save_model_dir=./output/
|
||||
```
|
||||
|
||||
|
|
@ -63,6 +63,8 @@ PP-OCRv3检测模型是对PP-OCRv2中的[CML](https://arxiv.org/pdf/2109.03144.p
|
|||
|
||||
测试环境: Intel Gold 6148 CPU,预测时开启MKLDNN加速。
|
||||
|
||||
PP-OCRv3检测模型训练步骤参考[文档](./PP-OCRv3_det_train.md)
|
||||
|
||||
**(1)LK-PAN:大感受野的PAN结构**
|
||||
|
||||
LK-PAN (Large Kernel PAN) 是一个具有更大感受野的轻量级[PAN](https://arxiv.org/pdf/1803.01534.pdf)结构,核心是将PAN结构的path augmentation中卷积核从`3*3`改为`9*9`。通过增大卷积核,提升特征图每个位置覆盖的感受野,更容易检测大字体的文字以及极端长宽比的文字。使用LK-PAN结构,可以将教师模型的hmean从83.2%提升到85.0%。
|
||||
|
|
|
@ -94,7 +94,7 @@ DB模型还支持以下推理部署方式:
|
|||
|
||||
<a name="5"></a>
|
||||
## 5. FAQ
|
||||
|
||||
无
|
||||
|
||||
## 引用
|
||||
|
||||
|
|
|
@ -26,8 +26,10 @@
|
|||
|
||||
|模型|骨干网络|配置文件|precision|recall|Hmean|下载链接|
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|EAST|ResNet50_vd|88.71%| 81.36%| 84.88%| [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_east_v2.0_train.tar)|
|
||||
|EAST| MobileNetV3| 78.20%| 79.10%| 78.65%| [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_east_v2.0_train.tar)|
|
||||
|EAST|ResNet50_vd| [det_r50_vd_east.yml](../../configs/det/det_r50_vd_east.yml)|88.71%| 81.36%| 84.88%| [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_east_v2.0_train.tar)|
|
||||
|EAST|MobileNetV3|[det_mv3_east.yml](../../configs/det/det_mv3_east.yml) | 78.20%| 79.10%| 78.65%| [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_east_v2.0_train.tar)|
|
||||
|
||||
|
||||
|
||||
|
||||
<a name="2"></a>
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
- [1. 两阶段OCR算法](#1)
|
||||
- [1.1 文本检测算法](#11)
|
||||
- [1.2 文本识别算法](#12)
|
||||
- [1.3 文本超分辨率算法](#13)
|
||||
- [1.4 公式识别算法](#14)
|
||||
- [2. 端到端OCR算法](#2)
|
||||
- [3. 表格识别算法](#3)
|
||||
- [4. 关键信息抽取算法](#4)
|
||||
|
@ -107,6 +109,34 @@ PaddleOCR将**持续新增**支持OCR领域前沿算法与模型,**欢迎广
|
|||
|RobustScanner|ResNet31| 87.77% | rec_r31_robustscanner | [训练模型](https://paddleocr.bj.bcebos.com/contribution/rec_r31_robustscanner.tar)|
|
||||
|RFL|ResNetRFL| 88.63% | rec_resnet_rfl_att | [训练模型](https://paddleocr.bj.bcebos.com/contribution/rec_resnet_rfl_att_train.tar) |
|
||||
|
||||
|
||||
<a name="13"></a>
|
||||
|
||||
### 1.3 文本超分辨率算法
|
||||
已支持的文本超分辨率算法列表(戳链接获取使用教程):
|
||||
- [x] [Text Gestalt](./algorithm_sr_gestalt.md)
|
||||
- [x] [Text Telescope](./algorithm_sr_telescope.md)
|
||||
|
||||
在TextZoom公开数据集上,算法效果如下:
|
||||
|
||||
|模型|骨干网络|PSNR_Avg|SSIM_Avg|配置文件|下载链接|
|
||||
|---|---|---|---|---|---|
|
||||
|Text Gestalt|tsrn|19.28|0.6560| [configs/sr/sr_tsrn_transformer_strock.yml](../../configs/sr/sr_tsrn_transformer_strock.yml)|[训练模型](https://paddleocr.bj.bcebos.com/sr_tsrn_transformer_strock_train.tar)|
|
||||
|Text Telescope|tbsrn|21.56|0.7411| [configs/sr/sr_telescope.yml](../../configs/sr/sr_telescope.yml)|[训练模型](https://paddleocr.bj.bcebos.com/contribution/sr_telescope_train.tar)|
|
||||
|
||||
<a name="14"></a>
|
||||
|
||||
### 1.4 公式识别算法
|
||||
|
||||
已支持的公式识别算法列表(戳链接获取使用教程):
|
||||
- [x] [CAN](./algorithm_rec_can.md)
|
||||
|
||||
在CROHME手写公式数据集上,算法效果如下:
|
||||
|
||||
|模型 |骨干网络|配置文件|ExpRate|下载链接|
|
||||
| ----- | ----- | ----- | ----- | ----- |
|
||||
|CAN|DenseNet|[rec_d28_can.yml](../../configs/rec/rec_d28_can.yml)|51.72%|[训练模型](https://paddleocr.bj.bcebos.com/contribution/rec_d28_can_train.tar)|
|
||||
|
||||
<a name="2"></a>
|
||||
|
||||
## 2. 端到端算法
|
||||
|
|
|
@ -159,7 +159,23 @@ Predicts of ./doc/imgs_words_en/word_10.png:('pain', 0.9999998807907104)
|
|||
<a name="5"></a>
|
||||
## 5. FAQ
|
||||
|
||||
1. 由于`SVTR`使用的算子大多为矩阵相乘,在GPU环境下,速度具有优势,但在CPU开启mkldnn加速环境下,`SVTR`相比于被优化的卷积网络没有优势。
|
||||
- 1. GPU和CPU速度对比
|
||||
- 由于`SVTR`使用的算子大多为矩阵相乘,在GPU环境下,速度具有优势,但在CPU开启mkldnn加速环境下,`SVTR`相比于被优化的卷积网络没有优势。
|
||||
- 2. SVTR模型转ONNX失败
|
||||
- 保证`paddle2onnx`和`onnxruntime`版本最新,转onnx命令参考[SVTR模型转onnx步骤实例](https://github.com/PaddlePaddle/PaddleOCR/issues/7821#issuecomment-1271214273)。
|
||||
- 3. SVTR转ONNX成功但是推理结果不正确
|
||||
- 可能的原因模型参数`out_char_num`设置不正确,应设置为W//4、W//8或者W//12,可以参考[高精度中文场景文本识别模型SVTR的3.3.3章节](https://aistudio.baidu.com/aistudio/projectdetail/5073182?contributionType=1)。
|
||||
- 4. 长文本识别优化
|
||||
- 参考[高精度中文场景文本识别模型SVTR的3.3章节](https://aistudio.baidu.com/aistudio/projectdetail/5073182?contributionType=1)。
|
||||
- 5. 论文结果复现注意事项
|
||||
- 数据集使用[ABINet](https://github.com/FangShancheng/ABINet)提供的数据集;
|
||||
- 默认使用4卡GPU训练,单卡Batchsize默认为512,总Batchsize为2048,对应的学习率为0.0005,当修改Batchsize或者改变GPU卡数,学习率应等比例修改。
|
||||
- 6. 进一步优化的探索点
|
||||
- 学习率调整:可以调整为默认的两倍保持Batchsize不变;或者将Batchsize减小为默认的1/2,保持学习率不变;
|
||||
- 数据增强策略:可选`RecConAug`和`RecAug`;
|
||||
- 如果不使用STN时,可以将`mixer`的`Local`替换为`Conv`、`local_mixer`全部修改为`[5, 5]`;
|
||||
- 网格搜索最优的`embed_dim`、`depth`、`num_heads`配置;
|
||||
- 使用`后Normalization策略`,即是将模型配置`prenorm`修改为`True`。
|
||||
|
||||
|
||||
## 引用
|
||||
|
|
|
@ -128,12 +128,12 @@ Predicts of ./doc/imgs_words_en/word_10.png:('pain', 0.9998350143432617)
|
|||
<a name="4-3"></a>
|
||||
### 4.3 Serving服务化部署
|
||||
|
||||
暂不支持
|
||||
暂不支持。
|
||||
|
||||
<a name="4-4"></a>
|
||||
### 4.4 更多推理部署
|
||||
|
||||
暂不支持
|
||||
暂不支持。
|
||||
|
||||
<a name="5"></a>
|
||||
## 5. FAQ
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# 表格识别算法-TableMASTER
|
||||
# 表格识别算法 - TableMASTER
|
||||
|
||||
- [1. 算法简介](#1-算法简介)
|
||||
- [2. 环境配置](#2-环境配置)
|
||||
|
|
|
@ -30,12 +30,12 @@ PaddleOCR场景应用覆盖通用,制造、金融、交通行业的主要OCR
|
|||
| 类别 | 亮点 | 类别 | 亮点 |
|
||||
| -------------- | ------------------------ | ------------ | --------------------- |
|
||||
| 表单VQA | 多模态通用表单结构化提取 | 通用卡证识别 | 通用结构化提取 |
|
||||
| 增值税发票 | 尽请期待 | 身份证识别 | 结构化提取、图像阴影 |
|
||||
| 增值税发票 | 敬请期待 | 身份证识别 | 结构化提取、图像阴影 |
|
||||
| 印章检测与识别 | 端到端弯曲文本识别 | 合同比对 | 密集文本检测、NLP串联 |
|
||||
|
||||
## 交通
|
||||
|
||||
| 类别 | 亮点 | 类别 | 亮点 |
|
||||
| ----------------- | ------------------------------ | ---------- | -------- |
|
||||
| 车牌识别 | 多角度图像、轻量模型、端侧部署 | 快递单识别 | 尽请期待 |
|
||||
| 驾驶证/行驶证识别 | 尽请期待 | | |
|
||||
| 车牌识别 | 多角度图像、轻量模型、端侧部署 | 快递单识别 | 敬请期待 |
|
||||
| 驾驶证/行驶证识别 | 敬请期待 | | |
|
||||
|
|
|
@ -223,4 +223,4 @@ PaddleOCR目前已支持80种(除中文外)语种识别,`configs/rec/multi
|
|||
| rec_cyrillic_lite_train.yml | CRNN | Mobilenet_v3 small 0.5 | None | BiLSTM | ctc | 斯拉夫字母 |
|
||||
| rec_devanagari_lite_train.yml | CRNN | Mobilenet_v3 small 0.5 | None | BiLSTM | ctc | 梵文字母 |
|
||||
|
||||
更多支持语种请参考: [多语言模型](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/doc/doc_ch/multi_languages.md#%E8%AF%AD%E7%A7%8D%E7%BC%A9%E5%86%99)
|
||||
更多支持语种请参考: [多语言模型](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/doc/doc_ch/multi_languages.md)
|
||||
|
|
|
@ -27,4 +27,4 @@ PaddleOCR提供了检测和识别模型的串联工具,可以将训练好的
|
|||
```
|
||||
python3 tools/infer/predict_system.py --image_dir="./doc/imgs/11.jpg" --det_model_dir="./inference/det/" --rec_model_dir="./inference/rec/"
|
||||
```
|
||||
更多的文本检测、识别串联推理使用方式请参考文档教程中的[基于预测引擎推理](./inference.md)。
|
||||
更多的文本检测、识别串联推理使用方式请参考文档教程中的[基于预测引擎推理](./algorithm_inference.md)。
|
||||
|
|
|
@ -26,21 +26,11 @@ PaddleOCR提供的PP-OCR系列模型在通用场景中性能优异,能够解
|
|||
|
||||
### 2.2 模型选择
|
||||
|
||||
建议选择PP-OCRv2模型(配置文件:[ch_PP-OCRv2_det_student.yml](../../configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_student.yml),预训练模型:[ch_PP-OCRv2_det_distill_train.tar](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar))进行微调,其精度与泛化性能是目前提供的最优预训练模型。
|
||||
建议选择PP-OCRv3模型(配置文件:[ch_PP-OCRv3_det_student.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml),预训练模型:[ch_PP-OCRv3_det_distill_train.tar](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar))进行微调,其精度与泛化性能是目前提供的最优预训练模型。
|
||||
|
||||
更多PP-OCR系列模型,请参考[PaddleOCR 首页说明文档](../../README_ch.md)。
|
||||
更多PP-OCR系列模型,请参考[PP-OCR 系列模型库](./models_list.md)。
|
||||
|
||||
注意:在使用上述预训练模型的时候,由于保存的模型中包含教师模型,因此需要将其中的学生模型单独提取出来,再加载学生模型即可进行模型微调。
|
||||
|
||||
```python
|
||||
import paddle
|
||||
# 加载完整的检测预训练模型
|
||||
a = paddle.load("ch_PP-OCRv2_det_distill_train/best_accuracy.pdparams")
|
||||
# 提取学生模型的参数
|
||||
b = {k[len("student_model."):]: a[k] for k in a if "student_model." in k}
|
||||
# 保存模型,用于后续模型微调
|
||||
paddle.save(b, "ch_PP-OCRv2_det_student.pdparams")
|
||||
```
|
||||
注意:在使用上述预训练模型的时候,需要使用文件夹中的`student.pdparams`文件作为预训练模型,即,仅使用学生模型。
|
||||
|
||||
|
||||
### 2.3 训练超参选择
|
||||
|
@ -49,7 +39,7 @@ paddle.save(b, "ch_PP-OCRv2_det_student.pdparams")
|
|||
|
||||
```yaml
|
||||
Global:
|
||||
pretrained_model: ./pretrain_models/student.pdparams # 预训练模型路径
|
||||
pretrained_model: ./ch_PP-OCRv3_det_distill_train/student.pdparams # 预训练模型路径
|
||||
Optimizer:
|
||||
lr:
|
||||
name: Cosine
|
||||
|
@ -67,7 +57,7 @@ Train:
|
|||
num_workers: 4
|
||||
```
|
||||
|
||||
上述配置文件中,首先需要将`pretrained_model`字段指定为2.2章节中提取出来的`ch_PP-OCRv2_det_student.pdparams`文件路径。
|
||||
上述配置文件中,首先需要将`pretrained_model`字段指定为`student.pdparams`文件路径。
|
||||
|
||||
PaddleOCR提供的配置文件是在8卡训练(相当于总的batch size是`8*8=64`)、且没有加载预训练模型情况下的配置文件,因此您的场景中,学习率与总的batch size需要对应线性调整,例如
|
||||
|
||||
|
@ -88,7 +78,7 @@ PaddleOCR提供的配置文件是在8卡训练(相当于总的batch size是`8*
|
|||
| det_db_score_mode | str | "fast" | DB的检测结果得分计算方法,支持`fast`和`slow`,`fast`是根据polygon的外接矩形边框内的所有像素计算平均得分,`slow`是根据原始polygon内的所有像素计算平均得分,计算速度相对较慢一些,但是更加准确一些。 |
|
||||
|
||||
|
||||
更多关于推理方法的介绍可以参考[Paddle Inference推理教程](./inference.md)。
|
||||
更多关于推理方法的介绍可以参考[Paddle Inference推理教程](././inference_ppocr.md)。
|
||||
|
||||
|
||||
## 3. 文本识别模型微调
|
||||
|
@ -109,9 +99,9 @@ PaddleOCR提供的配置文件是在8卡训练(相当于总的batch size是`8*
|
|||
|
||||
### 3.2 模型选择
|
||||
|
||||
建议选择PP-OCRv2模型(配置文件:[ch_PP-OCRv2_rec_distillation.yml](../../configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec_distillation.yml),预训练模型:[ch_PP-OCRv2_rec_train.tar](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_train.tar))进行微调,其精度与泛化性能是目前提供的最优预训练模型。
|
||||
建议选择PP-OCRv3模型(配置文件:[ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml),预训练模型:[ch_PP-OCRv3_rec_train.tar](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar))进行微调,其精度与泛化性能是目前提供的最优预训练模型。
|
||||
|
||||
更多PP-OCR系列,模型请参考[PaddleOCR 首页说明文档](../../README_ch.md)。
|
||||
更多PP-OCR系列模型,请参考[PP-OCR 系列模型库](./models_list.md)。
|
||||
|
||||
|
||||
### 3.3 训练超参选择
|
||||
|
@ -146,8 +136,7 @@ Train:
|
|||
|
||||
```
|
||||
|
||||
|
||||
上述配置文件中,首先需要将`pretrained_model`字段指定为2.2章节中解压得到的`ch_PP-OCRv2_rec_train/best_accuracy.pdparams`文件路径。
|
||||
上述配置文件中,首先需要将`pretrained_model`字段指定为3.2章节中解压得到的`ch_PP-OCRv3_rec_train/best_accuracy.pdparams`文件路径。
|
||||
|
||||
PaddleOCR提供的配置文件是在8卡训练(相当于总的batch size是`8*128=1024`)、且没有加载预训练模型情况下的配置文件,因此您的场景中,学习率与总的batch size需要对应线性调整,例如:
|
||||
|
||||
|
@ -175,5 +164,4 @@ Train:
|
|||
|
||||
### 3.4 训练调优
|
||||
|
||||
训练过程并非一蹴而就的,完成一个阶段的训练评估后,建议收集分析当前模型在真实场景中的 badcase,有针对性的调整训练数据比例,或者进一步新增合成数据。
|
||||
通过多次迭代训练,不断优化模型效果。
|
||||
训练过程并非一蹴而就的,完成一个阶段的训练评估后,建议收集分析当前模型在真实场景中的 badcase,有针对性的调整训练数据比例,或者进一步新增合成数据。通过多次迭代训练,不断优化模型效果。
|
||||
|
|
|
@ -88,7 +88,7 @@ PSE算法相关参数如下
|
|||
| :--: | :--: | :--: | :--: |
|
||||
| rec_algorithm | str | "CRNN" | 文本识别算法名称,目前支持`CRNN`, `SRN`, `RARE`, `NETR`, `SAR`, `ViTSTR`, `ABINet`, `VisionLAN`, `SPIN`, `RobustScanner`, `SVTR`, `SVTR_LCNet` |
|
||||
| rec_model_dir | str | 无,如果使用识别模型,该项是必填项 | 识别inference模型路径 |
|
||||
| rec_image_shape | list | [3, 48, 320] | 识别时的图像尺寸 |
|
||||
| rec_image_shape | str | "3,48,320" | 识别时的图像尺寸 |
|
||||
| rec_batch_num | int | 6 | 识别的batch size |
|
||||
| max_text_length | int | 25 | 识别结果最大长度,在`SRN`中有效 |
|
||||
| rec_char_dict_path | str | "./ppocr/utils/ppocr_keys_v1.txt" | 识别的字符字典文件 |
|
||||
|
@ -115,7 +115,7 @@ PSE算法相关参数如下
|
|||
| :--: | :--: | :--: | :--: |
|
||||
| use_angle_cls | bool | False | 是否使用方向分类器 |
|
||||
| cls_model_dir | str | 无,如果需要使用,则必须显式指定路径 | 方向分类器inference模型路径 |
|
||||
| cls_image_shape | list | [3, 48, 192] | 预测尺度 |
|
||||
| cls_image_shape | str | "3,48,192" | 预测尺度 |
|
||||
| label_list | list | ['0', '180'] | class id对应的角度值 |
|
||||
| cls_batch_num | int | 6 | 方向分类器预测的batch size |
|
||||
| cls_thresh | float | 0.9 | 预测阈值,模型预测结果为180度,且得分大于该阈值时,认为最终预测结果为180度,需要翻转 |
|
||||
|
|
|
@ -205,7 +205,7 @@ Architecture:
|
|||
name: LayoutXLMForSer
|
||||
pretrained: True
|
||||
mode: vi
|
||||
# 假设字典中包含n个字段(包含other),由于采用BIO标注,则类别数为2n-1
|
||||
# 由于采用BIO标注,假设字典中包含n个字段(包含other)时,则类别数为2n-1; 假设字典中包含n个字段(不含other)时,则类别数为2n+1。否则在train过程会报:IndexError: (OutOfRange) label value should less than the shape of axis dimension 。
|
||||
num_classes: &num_classes 7
|
||||
|
||||
PostProcess:
|
||||
|
@ -438,7 +438,25 @@ inference/ser_vi_layoutxlm/
|
|||
└── inference.pdmodel # inference模型的模型结构文件
|
||||
```
|
||||
|
||||
RE任务的动转静过程适配中,敬请期待。
|
||||
信息抽取模型中的RE任务转inference模型步骤如下:
|
||||
|
||||
``` bash
|
||||
# -c 后面设置训练算法的yml配置文件
|
||||
# -o 配置可选参数
|
||||
# Architecture.Backbone.checkpoints 参数设置待转换的训练模型地址
|
||||
# Global.save_inference_dir 参数设置转换的模型将保存的地址
|
||||
|
||||
python3 tools/export_model.py -c configs/kie/vi_layoutxlm/re_vi_layoutxlm_xfund_zh.yml -o Architecture.Backbone.checkpoints=./output/re_vi_layoutxlm_xfund_zh/best_accuracy Global.save_inference_dir=./inference/re_vi_layoutxlm
|
||||
```
|
||||
|
||||
转换成功后,在目录下有三个文件:
|
||||
|
||||
```
|
||||
inference/re_vi_layoutxlm/
|
||||
├── inference.pdiparams # inference模型的参数文件
|
||||
├── inference.pdiparams.info # inference模型的参数信息,可忽略
|
||||
└── inference.pdmodel # inference模型的模型结构文件
|
||||
```
|
||||
|
||||
## 4.2 模型推理
|
||||
|
||||
|
@ -461,9 +479,39 @@ python3 kie/predict_kie_token_ser.py \
|
|||
<img src="../../ppstructure/docs/kie/result_ser/zh_val_42_ser.jpg" width="800">
|
||||
</div>
|
||||
|
||||
VI-LayoutXLM模型基于RE任务进行推理,可以执行如下命令:
|
||||
|
||||
```bash
|
||||
cd ppstructure
|
||||
python3 kie/predict_kie_token_ser_re.py \
|
||||
--kie_algorithm=LayoutXLM \
|
||||
--re_model_dir=../inference/re_vi_layoutxlm \
|
||||
--ser_model_dir=../inference/ser_vi_layoutxlm \
|
||||
--use_visual_backbone=False \
|
||||
--image_dir=./docs/kie/input/zh_val_42.jpg \
|
||||
--ser_dict_path=../train_data/XFUND/class_list_xfun.txt \
|
||||
--vis_font_path=../doc/fonts/simfang.ttf \
|
||||
--ocr_order_method="tb-yx"
|
||||
```
|
||||
|
||||
RE可视化结果默认保存到`./output`文件夹里面,结果示例如下:
|
||||
|
||||
<div align="center">
|
||||
<img src="../../ppstructure/docs/kie/result_re/zh_val_42_re.jpg" width="800">
|
||||
</div>
|
||||
|
||||
# 5. FAQ
|
||||
|
||||
Q1: 训练模型转inference 模型之后预测效果不一致?
|
||||
|
||||
**A**:该问题多是trained model预测时候的预处理、后处理参数和inference model预测的时候的预处理、后处理参数不一致导致的。可以对比训练使用的配置文件中的预处理、后处理和预测时是否存在差异。
|
||||
|
||||
Q2: 训练过程中报如下错误:
|
||||
ValueError: (InvalidArgument) The 'shape' attribute in ReshapeOp is invalid. The input tensor X'size must be divisible by known capacity of 'shape'. But received X's shape = [4, 512, 23], X's size = 47104, 'shape' is [-1, 7], known capacity of 'shape' is -7.
|
||||
**A**:是由于训练使用的配置文件ser_vi_layoutxlm_xfund_zh.yml中Architecture.Backbone.num_classes的值与Loss.num_classes的值不一致导致。
|
||||
|
||||
Q3: 训练过程中报如下错误:
|
||||
IndexError: (OutOfRange) label value should less than the shape of axis dimension when label value(23) not equal to ignore_index(-100), But received label value as 23 and shape of axis dimension is 23.
|
||||
**A**:是由于训练使用的配置文件ser_vi_layoutxlm_xfund_zh.yml中Architecture.Backbone.num_classes的值与Loss.num_classes的值不正确。
|
||||
由于采用BIO标注,所以默认会有一个"O"标签,同时会忽略"OTHER", "OTHERS", "IGNORE"三个标签。PostProcess.class_path设置的字典文件中的每种类型会自动扩展成"B-"和"I-"为前缀的标签。
|
||||
所以假设字典文件中包含n个类型(包含OTHER)时,num_classes应该为2n-1;假设字典文件中包含n个类型(不含OTHER)时,num_classes应该为2n+1。
|
||||
|
|
|
@ -69,7 +69,7 @@ PaddleOCR中集成了知识蒸馏的算法,具体地,有以下几个主要
|
|||
|
||||
```yaml
|
||||
Architecture:
|
||||
model_type: &model_type "rec" # 模型类别,rec、det等,每个子网络的模型类别都与
|
||||
model_type: &model_type "rec" # 模型类别,rec、det等,每个子网络的模型类别
|
||||
name: DistillationModel # 结构名称,蒸馏任务中,为DistillationModel,用于构建对应的结构
|
||||
algorithm: Distillation # 算法名称
|
||||
Models: # 模型,包含子网络的配置信息
|
||||
|
|
|
@ -101,6 +101,7 @@ PaddleOCR提供的可下载模型包括`推理模型`、`训练模型`、`预训
|
|||
|en_number_mobile_slim_v2.0_rec|slim裁剪量化版超轻量模型,支持英文、数字识别|[rec_en_number_lite_train.yml](../../configs/rec/multi_language/rec_en_number_lite_train.yml)| 2.7M | [推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/en_number_mobile_v2.0_rec_slim_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/en_number_mobile_v2.0_rec_slim_train.tar) |
|
||||
|en_number_mobile_v2.0_rec|原始超轻量模型,支持英文、数字识别|[rec_en_number_lite_train.yml](../../configs/rec/multi_language/rec_en_number_lite_train.yml)|2.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_number_mobile_v2.0_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_number_mobile_v2.0_rec_train.tar) |
|
||||
|
||||
**注意:** 所有英文识别模型的字典文件均为`ppocr/utils/en_dict.txt`
|
||||
|
||||
<a name="多语言识别模型"></a>
|
||||
### 2.3 多语言识别模型(更多语言持续更新中...)
|
||||
|
@ -146,3 +147,4 @@ Paddle-Lite 是一个高性能、轻量级、灵活性强且易于扩展的深
|
|||
|PP-OCRv2(slim)|蒸馏版超轻量中文OCR移动端模型|4.9M|[下载地址](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_slim_opt.nb)|[下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_cls_slim_opt.nb)|[下载地址](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_opt.nb)|v2.9|
|
||||
|V2.0|ppocr_v2.0超轻量中文OCR移动端模型|7.8M|[下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_opt.nb)|[下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_cls_opt.nb)|[下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_rec_opt.nb)|v2.9|
|
||||
|V2.0(slim)|ppocr_v2.0超轻量中文OCR移动端模型|3.3M|[下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_slim_opt.nb)|[下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_cls_slim_opt.nb)|[下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_rec_slim_opt.nb)|v2.9|
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# 《动手学OCR》电子书
|
||||
|
||||
《动手学OCR》是PaddleOCR团队携手复旦大学青年研究员陈智能、中国移动研究院视觉领域资深专家黄文辉等产学研同仁,以及OCR开发者共同打造的结合OCR前沿理论与代码实践的教材。主要特色如下:
|
||||
《动手学OCR》是PaddleOCR团队携手华中科技大学博导/教授,IAPR Fellow 白翔、复旦大学青年研究员陈智能、中国移动研究院视觉领域资深专家黄文辉、中国工商银行大数据人工智能实验室研究员等产学研同仁,以及OCR开发者共同打造的结合OCR前沿理论与代码实践的教材。主要特色如下:
|
||||
|
||||
- 覆盖从文本检测识别到文档分析的OCR全栈技术
|
||||
- 紧密结合理论实践,跨越代码实现鸿沟,并配套教学视频
|
||||
|
@ -21,5 +21,5 @@
|
|||
|
||||
## 资料地址
|
||||
- 中文版电子书下载请扫描首页二维码入群后领取
|
||||
- [notebook教程](../../notebook/notebook_ch/)
|
||||
- [notebook教程](https://github.com/PaddleOCR-Community/Dive-into-OCR)
|
||||
- [教学视频](https://aistudio.baidu.com/aistudio/education/group/info/25207)
|
||||
|
|
|
@ -176,13 +176,14 @@ for idx in range(len(result)):
|
|||
print(line)
|
||||
|
||||
# 显示结果
|
||||
# 如果本地没有simfang.ttf,可以在doc/fonts目录下下载
|
||||
from PIL import Image
|
||||
result = result[0]
|
||||
image = Image.open(img_path).convert('RGB')
|
||||
boxes = [line[0] for line in result]
|
||||
txts = [line[1][0] for line in result]
|
||||
scores = [line[1][1] for line in result]
|
||||
im_show = draw_ocr(image, boxes, txts, scores, font_path='./fonts/simfang.ttf')
|
||||
im_show = draw_ocr(image, boxes, txts, scores, font_path='doc/fonts/simfang.ttf')
|
||||
im_show = Image.fromarray(im_show)
|
||||
im_show.save('result.jpg')
|
||||
```
|
||||
|
@ -210,7 +211,7 @@ from paddleocr import PaddleOCR, draw_ocr
|
|||
|
||||
# Paddleocr目前支持的多语言语种可以通过修改lang参数进行切换
|
||||
# 例如`ch`, `en`, `fr`, `german`, `korean`, `japan`
|
||||
ocr = PaddleOCR(use_angle_cls=True, lang="ch", page_num=2) # need to run only once to download and load model into memory
|
||||
ocr = PaddleOCR(use_angle_cls=True, lang="ch", page_num=2) # need to run only once to download and load model into memory
|
||||
img_path = './xxx.pdf'
|
||||
result = ocr.ocr(img_path, cls=True)
|
||||
for idx in range(len(result)):
|
||||
|
|
|
@ -14,6 +14,9 @@
|
|||
- [2.5. 分布式训练](#25-分布式训练)
|
||||
- [2.6. 其他训练环境](#26-其他训练环境)
|
||||
- [2.7. 模型微调](#27-模型微调)
|
||||
- [2.7.1 数据选择](#271-数据选择)
|
||||
- [2.7.2 模型选择](#272-模型选择)
|
||||
- [2.7.3 训练超参选择](#273-训练超参选择)
|
||||
- [3. 模型评估与预测](#3-模型评估与预测)
|
||||
- [3.1. 指标评估](#31-指标评估)
|
||||
- [3.2. 测试表格结构识别效果](#32-测试表格结构识别效果)
|
||||
|
@ -219,7 +222,39 @@ DCU设备上运行需要设置环境变量 `export HIP_VISIBLE_DEVICES=0,1,2,3`
|
|||
|
||||
## 2.7. 模型微调
|
||||
|
||||
实际使用过程中,建议加载官方提供的预训练模型,在自己的数据集中进行微调,关于模型的微调方法,请参考:[模型微调教程](./finetune.md)。
|
||||
### 2.7.1 数据选择
|
||||
|
||||
数据量:建议至少准备2000张的表格识别数据集用于模型微调。
|
||||
|
||||
### 2.7.2 模型选择
|
||||
|
||||
建议选择SLANet模型(配置文件:[SLANet_ch.yml](../../configs/table/SLANet_ch.yml),预训练模型:[ch_ppstructure_mobile_v2.0_SLANet_train.tar](https://paddleocr.bj.bcebos.com/ppstructure/models/slanet/ch_ppstructure_mobile_v2.0_SLANet_train.tar))进行微调,其精度与泛化性能是目前提供的最优中文表格预训练模型。
|
||||
|
||||
更多表格识别模型,请参考[PP-Structure 系列模型库](../../ppstructure/docs/models_list.md)。
|
||||
|
||||
### 2.7.3 训练超参选择
|
||||
|
||||
在模型微调的时候,最重要的超参就是预训练模型路径`pretrained_model`, 学习率`learning_rate`,部分配置文件如下所示。
|
||||
|
||||
```yaml
|
||||
Global:
|
||||
pretrained_model: ./ch_ppstructure_mobile_v2.0_SLANet_train/best_accuracy.pdparams # 预训练模型路径
|
||||
Optimizer:
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 0.001 #
|
||||
warmup_epoch: 0
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
factor: 0
|
||||
```
|
||||
|
||||
上述配置文件中,首先需要将`pretrained_model`字段指定为`best_accuracy.pdparams`文件路径。
|
||||
|
||||
PaddleOCR提供的配置文件是在4卡训练(相当于总的batch size是`4*48=192`)、且没有加载预训练模型情况下的配置文件,因此您的场景中,学习率与总的batch size需要对应线性调整,例如
|
||||
|
||||
* 如果您的场景中是单卡训练,单卡batch_size=48,则总的batch_size=48,建议将学习率调整为`0.00025`左右。
|
||||
* 如果您的场景中是单卡训练,由于显存限制,只能设置单卡batch_size=32,则总的batch_size=32,建议将学习率调整为`0.00017`左右。
|
||||
|
||||
|
||||
# 3. 模型评估与预测
|
||||
|
|
|
@ -294,7 +294,7 @@ paddleocr --image_dir PaddleOCR/doc/imgs_words/ch/word_1.jpg --use_angle_cls tru
|
|||
|
||||
## 3 自定义模型
|
||||
|
||||
当内置模型无法满足需求时,需要使用到自己训练的模型。 首先,参照[inference.md](./inference.md) 第一节转换将检测、分类和识别模型转换为inference模型,然后按照如下方式使用
|
||||
当内置模型无法满足需求时,需要使用到自己训练的模型。 首先,参照[模型导出](./detection.md#4-模型导出与预测)将检测、分类和识别模型转换为inference模型,然后按照如下方式使用
|
||||
|
||||
### 3.1 代码使用
|
||||
|
||||
|
|
|
@ -0,0 +1,253 @@
|
|||
English | [简体中文](../doc_ch/PP-OCRv3_det_train.md)
|
||||
|
||||
|
||||
# The training steps of PP-OCRv3 text detection model
|
||||
|
||||
- [1. Introduction](#1)
|
||||
- [2. PP-OCRv3 detection training](#2)
|
||||
- [3. Finetune training based on PP-OCRv3 detection](#3)
|
||||
|
||||
<a name="1"></a>
|
||||
## 1 Introduction
|
||||
|
||||
PP-OCRv3 is further upgraded on the basis of PP-OCRv2. This section describes the training steps of the PP-OCRv3 detection model. Refer to [documentation](./ppocr_introduction_en.md) for PP-OCRv3 introduction.
|
||||
|
||||
|
||||
<a name="2"></a>
|
||||
## 2. Detection training
|
||||
|
||||
The PP-OCRv3 detection model is an upgrade of the [CML](https://arxiv.org/pdf/2109.03144.pdf) (Collaborative Mutual Learning) collaborative mutual learning text detection distillation strategy in PP-OCRv2. PP-OCRv3 is further optimized for detecting teacher model and student model respectively. Among them, when optimizing the teacher model, the PAN structure LK-PAN with large receptive field and the DML (Deep Mutual Learning) distillation strategy are proposed. when optimizing the student model, the FPN structure RSE-FPN with residual attention mechanism is proposed.
|
||||
|
||||
PP-OCRv3 detection training consists of two steps:
|
||||
- Step 1: Train detection teacher model using DML distillation method
|
||||
- Step 2: Use the teacher model obtained in Step 1 to train a lightweight student model using the CML method
|
||||
|
||||
|
||||
### 2.1 Prepare data and environment
|
||||
|
||||
The training data adopts icdar2015 data, and the steps to prepare the training set refer to [ocr_dataset](./dataset/ocr_datasets.md).
|
||||
|
||||
Runtime environment preparation reference [documentation](./installation_en.md).
|
||||
|
||||
### 2.2 Train the teacher model
|
||||
|
||||
The configuration file for teacher model training is [ch_PP-OCRv3_det_dml.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release%2F2.5/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml). The Backbone, Neck, and Head of the model structure of the teacher model are Resnet50, LKPAN, and DBHead, respectively, and are trained by the distillation method of DML. Refer to [documentation](./knowledge_distillation) for a detailed introduction to configuration files.
|
||||
|
||||
|
||||
Download ImageNet pretrained models:
|
||||
````
|
||||
# Download the pretrained model of ResNet50_vd
|
||||
wget -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet50_vd_ssld_pretrained.pdparams
|
||||
````
|
||||
|
||||
**Start training**
|
||||
````
|
||||
# Single GPU training
|
||||
python3 tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml \
|
||||
-o Architecture.Models.Student.pretrained=./pretrain_models/ResNet50_vd_ssld_pretrained \
|
||||
Architecture.Models.Student2.pretrained=./pretrain_models/ResNet50_vd_ssld_pretrained \
|
||||
Global.save_model_dir=./output/
|
||||
|
||||
# If you want to use multi-GPU distributed training, use the following command:
|
||||
python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml \
|
||||
-o Architecture.Models.Student.pretrained=./pretrain_models/ResNet50_vd_ssld_pretrained \
|
||||
Architecture.Models.Student2.pretrained=./pretrain_models/ResNet50_vd_ssld_pretrained \
|
||||
Global.save_model_dir=./output/
|
||||
````
|
||||
|
||||
The model saved during training is in the output directory and contains the following files:
|
||||
````
|
||||
best_accuracy.states
|
||||
best_accuracy.pdparams # The model parameters with the best accuracy are saved by default
|
||||
best_accuracy.pdopt # optimizer-related parameters that save optimal accuracy by default
|
||||
latest.states
|
||||
latest.pdparams # The latest model parameters saved by default
|
||||
latest.pdopt # Optimizer related parameters of the latest model saved by default
|
||||
````
|
||||
Among them, best_accuracy is the saved model parameter with the highest accuracy, which can be directly evaluated using this model.
|
||||
|
||||
The model evaluation command is as follows:
|
||||
````
|
||||
python3 tools/eval.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml -o Global.checkpoints=./output/best_accuracy
|
||||
````
|
||||
|
||||
The trained teacher model has a larger structure and higher accuracy, which is used to improve the accuracy of the student model.
|
||||
|
||||
**Extract teacher model parameters**
|
||||
best_accuracy contains the parameters of two models, corresponding to Student and Student2 in the configuration file respectively. The method of extracting the parameters of Student is as follows:
|
||||
|
||||
````
|
||||
import paddle
|
||||
# load pretrained model
|
||||
all_params = paddle.load("output/best_accuracy.pdparams")
|
||||
# View the keys of the weight parameter
|
||||
print(all_params.keys())
|
||||
# model weight extraction
|
||||
s_params = {key[len("Student."):]: all_params[key] for key in all_params if "Student." in key}
|
||||
# View the keys of the model weight parameters
|
||||
print(s_params.keys())
|
||||
# save
|
||||
paddle.save(s_params, "./pretrain_models/dml_teacher.pdparams")
|
||||
````
|
||||
|
||||
The extracted model parameters can be used for further finetune training or distillation training of the model.
|
||||
|
||||
|
||||
### 2.3 Train the student model
|
||||
|
||||
The configuration file for training the student model is [ch_PP-OCRv3_det_cml.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release%2F2.5/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)
|
||||
The teacher model trained in the previous section is used as supervision, and the lightweight student model is obtained by training in CML.
|
||||
|
||||
Download the ImageNet pretrained model for the student model:
|
||||
````
|
||||
# Download the pre-trained model of MobileNetV3
|
||||
wget -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/MobileNetV3_large_x0_5_pretrained.pdparams
|
||||
````
|
||||
|
||||
**Start training**
|
||||
|
||||
````
|
||||
# Single card training
|
||||
python3 tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml \
|
||||
-o Architecture.Models.Student.pretrained=./pretrain_models/MobileNetV3_large_x0_5_pretrained \
|
||||
Architecture.Models.Student2.pretrained=./pretrain_models/MobileNetV3_large_x0_5_pretrained \
|
||||
Architecture.Models.Teacher.pretrained=./pretrain_models/dml_teacher \
|
||||
Global.save_model_dir=./output/
|
||||
# If you want to use multi-GPU distributed training, use the following command:
|
||||
python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml \
|
||||
-o Architecture.Models.Student.pretrained=./pretrain_models/MobileNetV3_large_x0_5_pretrained \
|
||||
Architecture.Models.Student2.pretrained=./pretrain_models/MobileNetV3_large_x0_5_pretrained \
|
||||
Architecture.Models.Teacher.pretrained=./pretrain_models/dml_teacher \
|
||||
Global.save_model_dir=./output/
|
||||
````
|
||||
|
||||
The model saved during training is in the output directory,
|
||||
The model evaluation command is as follows:
|
||||
````
|
||||
python3 tools/eval.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml -o Global.checkpoints=./output/best_accuracy
|
||||
````
|
||||
|
||||
best_accuracy contains three model parameters, corresponding to Student, Student2, and Teacher in the configuration file. The method to extract the Student parameter is as follows:
|
||||
|
||||
````
|
||||
import paddle
|
||||
# load pretrained model
|
||||
all_params = paddle.load("output/best_accuracy.pdparams")
|
||||
# View the keys of the weight parameter
|
||||
print(all_params.keys())
|
||||
# model weight extraction
|
||||
s_params = {key[len("Student."):]: all_params[key] for key in all_params if "Student." in key}
|
||||
# View the keys of the model weight parameters
|
||||
print(s_params.keys())
|
||||
# save
|
||||
paddle.save(s_params, "./pretrain_models/cml_student.pdparams")
|
||||
````
|
||||
|
||||
The extracted parameters of Student can be used for model deployment or further finetune training.
|
||||
|
||||
|
||||
|
||||
<a name="3"></a>
|
||||
## 3. Finetune training based on PP-OCRv3 detection
|
||||
|
||||
This section describes how to use the finetune training of the PP-OCRv3 detection model on other scenarios.
|
||||
|
||||
finetune training applies to three scenarios:
|
||||
- The finetune training based on the CML distillation method is suitable for the teacher model whose accuracy is higher than the PP-OCRv3 detection model in the usage scene, and a lightweight detection model is desired.
|
||||
- Finetune training based on the PP-OCRv3 lightweight detection model, without the need to train the teacher model, hoping to improve the accuracy of the usage scenarios based on the PP-OCRv3 detection model.
|
||||
- The finetune training based on the DML distillation method is suitable for scenarios where the DML method is used to further improve the accuracy.
|
||||
|
||||
|
||||
**finetune training based on CML distillation method**
|
||||
|
||||
Download the PP-OCRv3 training model:
|
||||
````
|
||||
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar
|
||||
tar xf ch_PP-OCRv3_det_distill_train.tar
|
||||
````
|
||||
ch_PP-OCRv3_det_distill_train/best_accuracy.pdparams contains the parameters of the Student, Student2, and Teacher models in the CML configuration file.
|
||||
|
||||
Start training:
|
||||
|
||||
````
|
||||
# Single card training
|
||||
python3 tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml \
|
||||
-o Global.pretrained_model=./ch_PP-OCRv3_det_distill_train/best_accuracy \
|
||||
Global.save_model_dir=./output/
|
||||
# If you want to use multi-GPU distributed training, use the following command:
|
||||
python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml \
|
||||
-o Global.pretrained_model=./ch_PP-OCRv3_det_distill_train/best_accuracy \
|
||||
Global.save_model_dir=./output/
|
||||
````
|
||||
|
||||
**finetune training based on PP-OCRv3 lightweight detection model**
|
||||
|
||||
|
||||
Download the PP-OCRv3 training model and extract the model parameters of the Student structure:
|
||||
````
|
||||
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar
|
||||
tar xf ch_PP-OCRv3_det_distill_train.tar
|
||||
````
|
||||
|
||||
The method to extract the Student parameter is as follows:
|
||||
|
||||
````
|
||||
import paddle
|
||||
# load pretrained model
|
||||
all_params = paddle.load("output/best_accuracy.pdparams")
|
||||
# View the keys of the weight parameter
|
||||
print(all_params.keys())
|
||||
# model weight extraction
|
||||
s_params = {key[len("Student."):]: all_params[key] for key in all_params if "Student." in key}
|
||||
# View the keys of the model weight parameters
|
||||
print(s_params.keys())
|
||||
# save
|
||||
paddle.save(s_params, "./student.pdparams")
|
||||
````
|
||||
|
||||
Trained using the configuration file [ch_PP-OCRv3_det_student.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release%2F2.5/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml).
|
||||
|
||||
**Start training**
|
||||
|
||||
````
|
||||
# Single card training
|
||||
python3 tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml \
|
||||
-o Global.pretrained_model=./student \
|
||||
Global.save_model_dir=./output/
|
||||
# If you want to use multi-GPU distributed training, use the following command:
|
||||
python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml \
|
||||
-o Global.pretrained_model=./student \
|
||||
Global.save_model_dir=./output/
|
||||
````
|
||||
|
||||
|
||||
**finetune training based on DML distillation method**
|
||||
|
||||
Taking the Teacher model in ch_PP-OCRv3_det_distill_train as an example, first extract the parameters of the Teacher structure as follows:
|
||||
````
|
||||
import paddle
|
||||
# load pretrained model
|
||||
all_params = paddle.load("ch_PP-OCRv3_det_distill_train/best_accuracy.pdparams")
|
||||
# View the keys of the weight parameter
|
||||
print(all_params.keys())
|
||||
# model weight extraction
|
||||
s_params = {key[len("Teacher."):]: all_params[key] for key in all_params if "Teacher." in key}
|
||||
# View the keys of the model weight parameters
|
||||
print(s_params.keys())
|
||||
# save
|
||||
paddle.save(s_params, "./teacher.pdparams")
|
||||
````
|
||||
|
||||
**Start training**
|
||||
````
|
||||
# Single card training
|
||||
python3 tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml \
|
||||
-o Architecture.Models.Student.pretrained=./teacher \
|
||||
Architecture.Models.Student2.pretrained=./teacher \
|
||||
Global.save_model_dir=./output/
|
||||
# If you want to use multi-GPU distributed training, use the following command:
|
||||
python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml \
|
||||
-o Architecture.Models.Student.pretrained=./teacher \
|
||||
Architecture.Models.Student2.pretrained=./teacher \
|
||||
Global.save_model_dir=./output/
|
||||
````
|
|
@ -65,6 +65,7 @@ The ablation experiments are as follows:
|
|||
|
||||
Testing environment: Intel Gold 6148 CPU, with MKLDNN acceleration enabled during inference.
|
||||
|
||||
The training steps of PP-OCRv3 detection model refer to [tutorial](./PP-OCRv3_det_train_en.md)
|
||||
|
||||
**(1) LK-PAN: A PAN structure with large receptive field**
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ On the ICDAR2015 dataset, the text detection result is as follows:
|
|||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|DB|ResNet50_vd|[configs/det/det_r50_vd_db.yml](../../configs/det/det_r50_vd_db.yml)|86.41%|78.72%|82.38%|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)|
|
||||
|DB|MobileNetV3|[configs/det/det_mv3_db.yml](../../configs/det/det_mv3_db.yml)|77.29%|73.08%|75.12%|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar)|
|
||||
|DB++|ResNet50|[configs/det/det_r50_db++_ic15.yml](../../configs/det/det_r50_db++_ic15.yml)|90.89%|82.66%|86.58%|[pretrained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/ResNet50_dcn_asf_synthtext_pretrained.pdparams)/[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_r50_db%2B%2B_icdar15_train.tar)|
|
||||
|DB++|ResNet50|[configs/det/det_r50_db++_icdar15.yml](../../configs/det/det_r50_db++_icdar15.yml)|90.89%|82.66%|86.58%|[pretrained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/ResNet50_dcn_asf_synthtext_pretrained.pdparams)/[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_r50_db%2B%2B_icdar15_train.tar)|
|
||||
|
||||
On the TD_TR dataset, the text detection result is as follows:
|
||||
|
||||
|
|
|
@ -26,8 +26,9 @@ On the ICDAR2015 dataset, the text detection result is as follows:
|
|||
|
||||
|Model|Backbone|Configuration|Precision|Recall|Hmean|Download|
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|EAST|ResNet50_vd|88.71%| 81.36%| 84.88%| [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_east_v2.0_train.tar)|
|
||||
|EAST| MobileNetV3| 78.20%| 79.10%| 78.65%| [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_east_v2.0_train.tar)|
|
||||
|EAST|ResNet50_vd| [det_r50_vd_east.yml](../../configs/det/det_r50_vd_east.yml)|88.71%| 81.36%| 84.88%| [model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_east_v2.0_train.tar)|
|
||||
|EAST|MobileNetV3|[det_mv3_east.yml](../../configs/det/det_mv3_east.yml) | 78.20%| 79.10%| 78.65%| [model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_east_v2.0_train.tar)|
|
||||
|
||||
|
||||
|
||||
<a name="2"></a>
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
- [1. Two-stage OCR Algorithms](#1)
|
||||
- [1.1 Text Detection Algorithms](#11)
|
||||
- [1.2 Text Recognition Algorithms](#12)
|
||||
- [1.3 Text Super-Resolution Algorithms](#13)
|
||||
- [1.4 Formula Recognition Algorithm](#14)
|
||||
- [2. End-to-end OCR Algorithms](#2)
|
||||
- [3. Table Recognition Algorithms](#3)
|
||||
- [4. Key Information Extraction Algorithms](#4)
|
||||
|
@ -97,13 +99,43 @@ Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation r
|
|||
|SAR|Resnet31| 87.20% | rec_r31_sar | [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_r31_sar_train.tar) |
|
||||
|SEED|Aster_Resnet| 85.35% | rec_resnet_stn_bilstm_att | [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_resnet_stn_bilstm_att.tar) |
|
||||
|SVTR|SVTR-Tiny| 89.25% | rec_svtr_tiny_none_ctc_en | [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/rec_svtr_tiny_none_ctc_en_train.tar) |
|
||||
|ViTSTR|ViTSTR| 79.82% | rec_vitstr_none_ce | [trained model](https://paddleocr.bj.bcebos.com/rec_vitstr_none_none_train.tar) |
|
||||
|ViTSTR|ViTSTR| 79.82% | rec_vitstr_none_ce | [trained model](https://paddleocr.bj.bcebos.com/rec_vitstr_none_ce_train.tar) |
|
||||
|ABINet|Resnet45| 90.75% | rec_r45_abinet | [trained model](https://paddleocr.bj.bcebos.com/rec_r45_abinet_train.tar) |
|
||||
|VisionLAN|Resnet45| 90.30% | rec_r45_visionlan | [trained model](https://paddleocr.bj.bcebos.com/VisionLAN/rec_r45_visionlan_train.tar) |
|
||||
|SPIN|ResNet32| 90.00% | rec_r32_gaspin_bilstm_att | [trained model](https://paddleocr.bj.bcebos.com/contribution/rec_r32_gaspin_bilstm_att.tar) |
|
||||
|RobustScanner|ResNet31| 87.77% | rec_r31_robustscanner | [trained model](https://paddleocr.bj.bcebos.com/contribution/rec_r31_robustscanner.tar)|
|
||||
|RFL|ResNetRFL| 88.63% | rec_resnet_rfl_att | [trained model](https://paddleocr.bj.bcebos.com/contribution/rec_resnet_rfl_att_train.tar) |
|
||||
|
||||
<a name="13"></a>
|
||||
|
||||
### 1.3 Text Super-Resolution Algorithms
|
||||
|
||||
Supported text super-resolution algorithms (Click the link to get the tutorial):
|
||||
- [x] [Text Gestalt](./algorithm_sr_gestalt_en.md)
|
||||
- [x] [Text Telescope](./algorithm_sr_telescope_en.md)
|
||||
|
||||
On the TextZoom public dataset, the effect of the algorithm is as follows:
|
||||
|
||||
|Model|Backbone|PSNR_Avg|SSIM_Avg|Config|Download link|
|
||||
|---|---|---|---|---|---|
|
||||
|Text Gestalt|tsrn|19.28|0.6560| [configs/sr/sr_tsrn_transformer_strock.yml](../../configs/sr/sr_tsrn_transformer_strock.yml)|[trained model](https://paddleocr.bj.bcebos.com/sr_tsrn_transformer_strock_train.tar)|
|
||||
|Text Telescope|tbsrn|21.56|0.7411| [configs/sr/sr_telescope.yml](../../configs/sr/sr_telescope.yml)|[trained model](https://paddleocr.bj.bcebos.com/contribution/sr_telescope_train.tar)|
|
||||
|
||||
<a name="14"></a>
|
||||
|
||||
### 1.4 Formula Recognition Algorithm
|
||||
|
||||
Supported formula recognition algorithms (Click the link to get the tutorial):
|
||||
|
||||
- [x] [CAN](./algorithm_rec_can_en.md)
|
||||
|
||||
On the CROHME handwritten formula dataset, the effect of the algorithm is as follows:
|
||||
|
||||
|Model |Backbone|Config|ExpRate|Download link|
|
||||
| ----- | ----- | ----- | ----- | ----- |
|
||||
|CAN|DenseNet|[rec_d28_can.yml](../../configs/rec/rec_d28_can.yml)|51.72%|[trained model](https://paddleocr.bj.bcebos.com/contribution/rec_d28_can_train.tar)|
|
||||
|
||||
|
||||
<a name="2"></a>
|
||||
|
||||
## 2. End-to-end OCR Algorithms
|
||||
|
@ -122,7 +154,7 @@ On the PubTabNet dataset, the algorithm result is as follows:
|
|||
|
||||
|Model|Backbone|Config|Acc|Download link|
|
||||
|---|---|---|---|---|
|
||||
|TableMaster|TableResNetExtra|[configs/table/table_master.yml](../../configs/table/table_master.yml)|77.47%|[trained](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_train.tar) / [inference model](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_infer.tar)|
|
||||
|TableMaster|TableResNetExtra|[configs/table/table_master.yml](../../configs/table/table_master.yml)|77.47%|[trained model](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_train.tar) / [inference model](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_infer.tar)|
|
||||
|
||||
|
||||
<a name="4"></a>
|
||||
|
|
|
@ -130,7 +130,23 @@ Not supported
|
|||
<a name="5"></a>
|
||||
## 5. FAQ
|
||||
|
||||
1. Since most of the operators used by `SVTR` are matrix multiplication, in the GPU environment, the speed has an advantage, but in the environment where mkldnn is enabled on the CPU, `SVTR` has no advantage over the optimized convolutional network.
|
||||
- 1. Speed situation on CPU and GPU
|
||||
- Since most of the operators used by `SVTR` are matrix multiplication, in the GPU environment, the speed has an advantage, but in the environment where mkldnn is enabled on the CPU, `SVTR` has no advantage over the optimized convolutional network.
|
||||
- 2. SVTR model convert to ONNX failed
|
||||
- Ensure `paddle2onnx` and `onnxruntime` versions are up to date, refer to [SVTR model to onnx step-by-step example](https://github.com/PaddlePaddle/PaddleOCR/issues/7821#issuecomment-) for the convert onnx command. 1271214273).
|
||||
- 3. SVTR model convert to ONNX is successful but the inference result is incorrect
|
||||
- The possible reason is that the model parameter `out_char_num` is not set correctly, it should be set to W//4, W//8 or W//12, please refer to [Section 3.3.3 of SVTR, a high-precision Chinese scene text recognition model](https://aistudio.baidu.com/aistudio/) projectdetail/5073182?contributionType=1).
|
||||
- 4. Optimization of long text recognition
|
||||
- Refer to [Section 3.3 of SVTR, a high-precision Chinese scene text recognition model](https://aistudio.baidu.com/aistudio/projectdetail/5073182?contributionType=1).
|
||||
- 5. Notes on the reproduction of the paper results
|
||||
- Dataset using provided by [ABINet](https://github.com/FangShancheng/ABINet).
|
||||
- By default, 4 cards of GPUs are used for training, the default Batchsize of a single card is 512, and the total Batchsize is 2048, corresponding to a learning rate of 0.0005. When modifying the Batchsize or changing the number of GPU cards, the learning rate should be modified in equal proportion.
|
||||
- 6. Exploration Directions for further optimization
|
||||
- Learning rate adjustment: adjusting to twice the default to keep Batchsize unchanged; or reducing Batchsize to 1/2 the default to keep the learning rate unchanged.
|
||||
- Data augmentation strategies: optionally `RecConAug` and `RecAug`.
|
||||
- If STN is not used, `Local` of `mixer` can be replaced by `Conv` and `local_mixer` can all be modified to `[5, 5]`.
|
||||
- Grid search for optimal `embed_dim`, `depth`, `num_heads` configurations.
|
||||
- Use the `Post-Normalization strategy`, which is to modify the model configuration `prenorm` to `True`.
|
||||
|
||||
## Citation
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ Using MJSynth and SynthText two text recognition datasets for training, and eval
|
|||
|
||||
|Model|Backbone|config|Acc|Download link|
|
||||
| --- | --- | --- | --- | --- |
|
||||
|ViTSTR|ViTSTR|[rec_vitstr_none_ce.yml](../../configs/rec/rec_vitstr_none_ce.yml)|79.82%|[trained model](https://paddleocr.bj.bcebos.com/rec_vitstr_none_none_train.tar)|
|
||||
|ViTSTR|ViTSTR|[rec_vitstr_none_ce.yml](../../configs/rec/rec_vitstr_none_ce.yml)|79.82%|[trained model](https://paddleocr.bj.bcebos.com/rec_vitstr_none_ce_train.tar)|
|
||||
|
||||
<a name="2"></a>
|
||||
## 2. Environment
|
||||
|
|
|
@ -27,7 +27,7 @@ Paper:
|
|||
Referring to the [FudanOCR](https://github.com/FudanVI/FudanOCR/tree/main/scene-text-telescope) data download instructions, the effect of the super-score algorithm on the TextZoom test set is as follows:
|
||||
|
||||
|Model|Backbone|config|Acc|Download link|
|
||||
|---|---|---|---|---|---|
|
||||
|---|---|---|---|---|
|
||||
|Text Gestalt|tsrn|21.56|0.7411| [configs/sr/sr_telescope.yml](../../configs/sr/sr_telescope.yml)|[train model](https://paddleocr.bj.bcebos.com/contribution/sr_telescope_train.tar)|
|
||||
|
||||
The [TextZoom dataset](https://paddleocr.bj.bcebos.com/dataset/TextZoom.tar) comes from two superfraction data sets, RealSR and SR-RAW, both of which contain LR-HR pairs. TextZoom has 17367 pairs of training data and 4373 pairs of test data.
|
||||
|
|
|
@ -13,6 +13,7 @@ This section uses the icdar2015 dataset as an example to introduce the training,
|
|||
* [2.5 Distributed Training](#25-distributed-training)
|
||||
* [2.6 Training with knowledge distillation](#26)
|
||||
* [2.7 Training on other platform(Windows/macOS/Linux DCU)](#27)
|
||||
* [2.8 Fine-tuning](#28)
|
||||
- [3. Evaluation and Test](#3-evaluation-and-test)
|
||||
- [3.1 Evaluation](#31-evaluation)
|
||||
- [3.2 Test](#32-test)
|
||||
|
@ -178,6 +179,10 @@ GPU mode is not supported, you need to set `use_gpu` to False in the configurati
|
|||
- Linux DCU
|
||||
Running on a DCU device requires setting the environment variable `export HIP_VISIBLE_DEVICES=0,1,2,3`, and the rest of the training and evaluation prediction commands are exactly the same as the Linux GPU.
|
||||
|
||||
### 2.8 Fine-tuning
|
||||
|
||||
In actual use, it is recommended to load the official pre-trained model and fine-tune it in your own data set. For the fine-tuning method of the detection model, please refer to: [Model Fine-tuning Tutorial](./finetune_en.md).
|
||||
|
||||
## 3. Evaluation and Test
|
||||
|
||||
### 3.1 Evaluation
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
# Fine-tune
|
||||
|
||||
## 1. background and meaning
|
||||
|
||||
The PP-OCR series models provided by PaddleOCR have excellent performance in general scenarios and can solve detection and recognition problems in most cases. In vertical scenarios, if you want to obtain better model, you can further improve the accuracy of the PP-OCR series detection and recognition models through fine-tune.
|
||||
|
||||
This article mainly introduces some precautions when fine-tuning the text detection and recognition model. Finally, you can obtain a text detection and recognition model with higher accuracy through model fine-tuning in your own scenarios.
|
||||
|
||||
The core points of this article are as follows:
|
||||
|
||||
1. The pre-trained model provided by PP-OCR has better generalization ability
|
||||
2. Adding a small amount of real data (detection:>=500, recognition:>=5000) will greatly improve the detection and recognition effect of vertical scenes
|
||||
3. When fine-tuning the model, adding real general scene data can further improve the model accuracy and generalization performance
|
||||
4. In the text detection task, increasing the prediction shape of the image can further improve the detection effect of the smaller text area
|
||||
5. When fine-tuning the model, it is necessary to properly adjust the hyperparameters (learning rate, batch size are the most important) to obtain a better fine-tuning effect.
|
||||
|
||||
For more details, please refer to Chapter 2 and Chapter 3。
|
||||
|
||||
## 2. Text detection model fine-tuning
|
||||
|
||||
### 2.1 Dataset
|
||||
|
||||
* Dataset: It is recommended to prepare at least 500 text detection datasets for model fine-tuning.
|
||||
|
||||
* Dataset annotation: single-line text annotation format, it is recommended that the labeled detection frame be consistent with the actual semantic content. For example, in the train ticket scene, the surname and first name may be far apart, but they belong to the same detection field semantically. Here, the entire name also needs to be marked as a detection frame.
|
||||
|
||||
### 2.2 Model
|
||||
|
||||
It is recommended to choose the PP-OCRv3 model (configuration file: [ch_PP-OCRv3_det_student.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml),pre-trained model: [ch_PP-OCRv3_det_distill_train.tar](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar), its accuracy and generalization performance is the best pre-training model currently available.
|
||||
|
||||
For more PP-OCR series models, please refer to [PP-OCR Series Model Library](./models_list_en.md)。
|
||||
|
||||
Note: When using the above pre-trained model, you need to use the `student.pdparams` file in the folder as the pre-trained model, that is, only use the student model.
|
||||
|
||||
|
||||
### 2.3 Training hyperparameter
|
||||
|
||||
When fine-tuning the model, the most important hyperparameter is the pre-training model path `pretrained_model`, `learning_rate`与`batch_size`,some hyperparameters are as follows:
|
||||
|
||||
```yaml
|
||||
Global:
|
||||
pretrained_model: ./ch_PP-OCRv3_det_distill_train/student.pdparams # pre-training model path
|
||||
Optimizer:
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 0.001 # learning_rate
|
||||
warmup_epoch: 2
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
factor: 0
|
||||
|
||||
Train:
|
||||
loader:
|
||||
shuffle: True
|
||||
drop_last: False
|
||||
batch_size_per_card: 8 # single gpu batch size
|
||||
num_workers: 4
|
||||
```
|
||||
|
||||
In the above configuration file, you need to specify the `pretrained_model` field as the `student.pdparams` file path.
|
||||
|
||||
The configuration file provided by PaddleOCR is for 8-gpu training (equivalent to a total batch size of `8*8=64`) and no pre-trained model is loaded. Therefore, in your scenario, the learning rate is the same as the total The batch size needs to be adjusted linearly, for example
|
||||
|
||||
* If your scenario is single-gpu training, single gpu batch_size=8, then the total batch_size=8, it is recommended to adjust the learning rate to about `1e-4`.
|
||||
* If your scenario is for single-gpu training, due to memory limitations, you can only set batch_size=4 for a single gpu, and the total batch_size=4. It is recommended to adjust the learning rate to about `5e-5`.
|
||||
|
||||
### 2.4 Prediction hyperparameter
|
||||
|
||||
When exporting and inferring the trained model, you can further adjust the predicted image scale to improve the detection effect of small-area text. The following are some hyperparameters during DBNet inference, which can be adjusted appropriately to improve the effect.
|
||||
|
||||
| hyperparameter | type | default | meaning |
|
||||
| :--: | :--: | :--: | :--: |
|
||||
| det_db_thresh | float | 0.3 | In the probability map output by DB, pixels with a score greater than the threshold will be considered as text pixels |
|
||||
| det_db_box_thresh | float | 0.6 | When the average score of all pixels within the frame of the detection result is greater than the threshold, the result will be considered as a text area |
|
||||
| det_db_unclip_ratio | float | 1.5 | The expansion coefficient of `Vatti clipping`, using this method to expand the text area |
|
||||
| max_batch_size | int | 10 | batch size |
|
||||
| use_dilation | bool | False | Whether to expand the segmentation results to obtain better detection results |
|
||||
| det_db_score_mode | str | "fast" | DB's detection result score calculation method supports `fast` and `slow`. `fast` calculates the average score based on all pixels in the polygon’s circumscribed rectangle border, and `slow` calculates the average score based on all pixels in the original polygon. The calculation speed is relatively slower, but more accurate. |
|
||||
|
||||
|
||||
For more information on inference methods, please refer to[Paddle Inference doc](././inference_ppocr_en.md)。
|
||||
|
||||
|
||||
## 3. Text recognition model fine-tuning
|
||||
|
||||
|
||||
### 3.1 Dataset
|
||||
|
||||
* Dataset:If the dictionary is not changed, it is recommended to prepare at least 5,000 text recognition datasets for model fine-tuning; if the dictionary is changed (not recommended), more quantities are required.
|
||||
|
||||
* Data distribution: It is recommended that the distribution be as consistent as possible with the actual measurement scenario. If the actual scene contains a lot of short text, it is recommended to include more short text in the training data. If the actual scene has high requirements for the recognition effect of spaces, it is recommended to include more text content with spaces in the training data.
|
||||
|
||||
* Data synthesis: In the case of some character recognition errors, it is recommended to obtain a batch of specific character dataset, add it to the original dataset and use a small learning rate for fine-tuning. The ratio of original dataset to new dataset can be 10:1 to 5:1 to avoid overfitting of the model caused by too much data in a single scene. At the same time, try to balance the word frequency of the corpus to ensure that the frequency of common words will not be too low.
|
||||
|
||||
Specific characters can be generated using the TextRenderer tool, for synthesis examples, please refer to [data synthesis](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/applications/%E5%85%89%E5%8A%9F%E7%8E%87%E8%AE%A1%E6%95%B0%E7%A0%81%E7%AE%A1%E5%AD%97%E7%AC%A6%E8%AF%86%E5%88%AB/%E5%85%89%E5%8A%9F%E7%8E%87%E8%AE%A1%E6%95%B0%E7%A0%81%E7%AE%A1%E5%AD%97%E7%AC%A6%E8%AF%86%E5%88%AB.md#31-%E6%95%B0%E6%8D%AE%E5%87%86%E5%A4%87)
|
||||
. The synthetic data corpus should come from real usage scenarios as much as possible, and keep the richness of fonts and backgrounds on the basis of being close to the real scene, which will help improve the model effect.
|
||||
|
||||
* Common Chinese and English data: During training, common real data can be added to the training set (for example, in the fine-tuning scenario without changing the dictionary, it is recommended to add real data such as LSVT, RCTW, MTWI) to further improve the generalization performance of the model.
|
||||
|
||||
### 3.2 Model
|
||||
|
||||
It is recommended to choose the PP-OCRv3 model (configuration file: [ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml),pre-trained model: [ch_PP-OCRv3_rec_train.tar](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar),its accuracy and generalization performance is the best pre-training model currently available.
|
||||
|
||||
For more PP-OCR series models, please refer to [PP-OCR Series Model Library](./models_list_en.md)。
|
||||
|
||||
|
||||
### 3.3 Training hyperparameter
|
||||
|
||||
Similar to text detection task fine-tuning, when fine-tuning the recognition model, the most important hyperparameters are the pre-trained model path `pretrained_model`, `learning_rate` and `batch_size`, some default configuration files are shown below.
|
||||
|
||||
```yaml
|
||||
Global:
|
||||
pretrained_model: # pre-training model path
|
||||
Optimizer:
|
||||
lr:
|
||||
name: Piecewise
|
||||
decay_epochs : [700, 800]
|
||||
values : [0.001, 0.0001] # learning_rate
|
||||
warmup_epoch: 5
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
factor: 0
|
||||
|
||||
Train:
|
||||
dataset:
|
||||
name: SimpleDataSet
|
||||
data_dir: ./train_data/
|
||||
label_file_list:
|
||||
- ./train_data/train_list.txt
|
||||
ratio_list: [1.0] # Sampling ratio, the default value is [1.0]
|
||||
loader:
|
||||
shuffle: True
|
||||
drop_last: False
|
||||
batch_size_per_card: 128 # single gpu batch size
|
||||
num_workers: 8
|
||||
|
||||
```
|
||||
|
||||
|
||||
In the above configuration file, you first need to specify the `pretrained_model` field as the `ch_PP-OCRv3_rec_train/best_accuracy.pdparams` file path decompressed in Chapter 3.2.
|
||||
|
||||
The configuration file provided by PaddleOCR is for 8-gpu training (equivalent to a total batch size of `8*128=1024`) and no pre-trained model is loaded. Therefore, in your scenario, the learning rate is the same as the total The batch size needs to be adjusted linearly, for example:
|
||||
|
||||
* If your scenario is single-gpu training, single gpu batch_size=128, then the total batch_size=128, in the case of loading the pre-trained model, it is recommended to adjust the learning rate to about `[1e-4, 2e-5]` (For the piecewise learning rate strategy, two values need to be set, the same below).
|
||||
* If your scenario is for single-gpu training, due to memory limitations, you can only set batch_size=64 for a single gpu, and the total batch_size=64. When loading the pre-trained model, it is recommended to adjust the learning rate to `[5e-5 , 1e-5]`about.
|
||||
|
||||
|
||||
If there is general real scene data added, it is recommended that in each epoch, the amount of vertical scene data and real scene data should be kept at about 1:1.
|
||||
|
||||
For example: your own vertical scene recognition data volume is 1W, the data label file is `vertical.txt`, the collected general scene recognition data volume is 10W, and the data label file is `general.txt`.
|
||||
|
||||
Then, the `label_file_list` and `ratio_list` parameters can be set as shown below. In each epoch, `vertical.txt` will be fully sampled (sampling ratio is 1.0), including 1W pieces of data; `general.txt` will be sampled according to a sampling ratio of 0.1, including `10W*0.1=1W` pieces of data, the final ratio of the two is `1:1`.
|
||||
|
||||
```yaml
|
||||
Train:
|
||||
dataset:
|
||||
name: SimpleDataSet
|
||||
data_dir: ./train_data/
|
||||
label_file_list:
|
||||
- vertical.txt
|
||||
- general.txt
|
||||
ratio_list: [1.0, 0.1]
|
||||
```
|
||||
|
||||
### 3.4 training optimization
|
||||
|
||||
The training process does not happen overnight. After completing a stage of training evaluation, it is recommended to collect and analyze the badcase of the current model in the real scene, adjust the proportion of training data in a targeted manner, or further add synthetic data. Through multiple iterations of training, the model effect is continuously optimized.
|
|
@ -88,7 +88,7 @@ The relevant parameters of the PSE algorithm are as follows
|
|||
| :--: | :--: | :--: | :--: |
|
||||
| rec_algorithm | str | "CRNN" | Text recognition algorithm name, currently supports `CRNN`, `SRN`, `RARE`, `NETR`, `SAR`, `ViTSTR`, `ABINet`, `VisionLAN`, `SPIN`, `RobustScanner`, `SVTR`, `SVTR_LCNet` |
|
||||
| rec_model_dir | str | None, it is required if using the recognition model | recognition inference model paths |
|
||||
| rec_image_shape | list | [3, 48, 320] | Image size at the time of recognition |
|
||||
| rec_image_shape | str | "3,48,320" ] | Image size at the time of recognition |
|
||||
| rec_batch_num | int | 6 | batch size |
|
||||
| max_text_length | int | 25 | The maximum length of the recognition result, valid in `SRN` |
|
||||
| rec_char_dict_path | str | "./ppocr/utils/ppocr_keys_v1.txt" | character dictionary file |
|
||||
|
@ -115,7 +115,7 @@ The relevant parameters of the PSE algorithm are as follows
|
|||
| :--: | :--: | :--: | :--: |
|
||||
| use_angle_cls | bool | False | whether to use an angle classifier |
|
||||
| cls_model_dir | str | None, if you need to use, you must specify the path explicitly | angle classifier inference model path |
|
||||
| cls_image_shape | list | [3, 48, 192] | prediction shape |
|
||||
| cls_image_shape | str | "3,48,192" | prediction shape |
|
||||
| label_list | list | ['0', '180'] | The angle value corresponding to the class id |
|
||||
| cls_batch_num | int | 6 | batch size |
|
||||
| cls_thresh | float | 0.9 | Prediction threshold, when the model prediction result is 180 degrees, and the score is greater than the threshold, the final prediction result is considered to be 180 degrees and needs to be flipped |
|
||||
|
|
|
@ -457,14 +457,31 @@ inference/ser_vi_layoutxlm/
|
|||
└── inference.pdmodel # The program file of recognition
|
||||
```
|
||||
|
||||
Export of RE model is also in adaptation.
|
||||
The RE model can be converted to the inference model using the following command.
|
||||
|
||||
|
||||
```bash
|
||||
# -c Set the training algorithm yml configuration file.
|
||||
# -o Set optional parameters.
|
||||
# Architecture.Backbone.checkpoints Set the training model address.
|
||||
# Global.save_inference_dir Set the address where the converted model will be saved.
|
||||
python3 tools/export_model.py -c configs/kie/vi_layoutxlm/re_vi_layoutxlm_xfund_zh.yml -o Architecture.Backbone.checkpoints=./output/re_vi_layoutxlm_xfund_zh/best_accuracy Global.save_inference_dir=./inference/re_vi_layoutxlm
|
||||
```
|
||||
|
||||
After the conversion is successful, there are three files in the model save directory:
|
||||
|
||||
```
|
||||
inference/re_vi_layoutxlm/
|
||||
├── inference.pdiparams # The parameter file of recognition inference model
|
||||
├── inference.pdiparams.info # The parameter information of recognition inference model, which can be ignored
|
||||
└── inference.pdmodel # The program file of recognition
|
||||
```
|
||||
## 4.2 Model inference
|
||||
|
||||
The VI layoutxlm model performs reasoning based on the ser task, and can execute the following commands:
|
||||
|
||||
|
||||
Using the following command to infer the VI-LayoutXLM model.
|
||||
Using the following command to infer the VI-LayoutXLM SER model.
|
||||
|
||||
```bash
|
||||
cd ppstructure
|
||||
|
@ -483,6 +500,26 @@ The visualized result will be saved in `./output`, which is shown as follows.
|
|||
<img src="../../ppstructure/docs/kie/result_ser/zh_val_42_ser.jpg" width="800">
|
||||
</div>
|
||||
|
||||
Using the following command to infer the VI-LayoutXLM RE model.
|
||||
|
||||
```bash
|
||||
cd ppstructure
|
||||
python3 kie/predict_kie_token_ser_re.py \
|
||||
--kie_algorithm=LayoutXLM \
|
||||
--re_model_dir=../inference/re_vi_layoutxlm \
|
||||
--ser_model_dir=../inference/ser_vi_layoutxlm \
|
||||
--use_visual_backbone=False \
|
||||
--image_dir=./docs/kie/input/zh_val_42.jpg \
|
||||
--ser_dict_path=../train_data/XFUND/class_list_xfun.txt \
|
||||
--vis_font_path=../doc/fonts/simfang.ttf \
|
||||
--ocr_order_method="tb-yx"
|
||||
```
|
||||
|
||||
The visualized result will be saved in `./output`, which is shown as follows.
|
||||
|
||||
<div align="center">
|
||||
<img src="../../ppstructure/docs/kie/result_re/zh_val_42_re.jpg" width="800">
|
||||
</div>
|
||||
|
||||
# 5. FAQ
|
||||
|
||||
|
|
|
@ -228,7 +228,6 @@ Architecture:
|
|||
enc_dim: 512
|
||||
max_text_length: *max_text_length
|
||||
```
|
||||
```
|
||||
|
||||
When the model is finally trained, it contains 3 sub-networks: `Teacher`, `Student`, `Student2`.
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# OCR Model List(V3, updated on 2022.4.28)
|
||||
> **Note**
|
||||
> 1. Compared with the model v2, the 3rd version of the detection model has a improvement in accuracy, and the 2.1 version of the recognition model has optimizations in accuracy and speed with CPU.
|
||||
> 1. Compared with model v2, the 3rd version of the detection model has an improvement in accuracy, and the 2.1 version of the recognition model has optimizations in accuracy and speed with CPU.
|
||||
> 2. Compared with [models 1.1](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/models_list_en.md), which are trained with static graph programming paradigm, models 2.0 or higher are the dynamic graph trained version and achieve close performance.
|
||||
> 3. All models in this tutorial are all ppocr-series models, for more introduction of algorithms and models based on public dataset, you can refer to [algorithm overview tutorial](./algorithm_overview_en.md).
|
||||
> 3. All models in this tutorial are from the PaddleOCR series, for more introduction to algorithms and models based on the public dataset, you can refer to [algorithm overview tutorial](./algorithm_overview_en.md).
|
||||
|
||||
- [OCR Model List(V3, updated on 2022.4.28)]()
|
||||
- [1. Text Detection Model](#1-text-detection-model)
|
||||
|
@ -16,15 +16,15 @@
|
|||
- [3. Text Angle Classification Model](#3-text-angle-classification-model)
|
||||
- [4. Paddle-Lite Model](#4-paddle-lite-model)
|
||||
|
||||
The downloadable models provided by PaddleOCR include `inference model`, `trained model`, `pre-trained model` and `nb model`. The differences between the models are as follows:
|
||||
The downloadable models provided by PaddleOCR include the `inference model`, `trained model`, `pre-trained model` and `nb model`. The differences between the models are as follows:
|
||||
|
||||
|model type|model format|description|
|
||||
|--- | --- | --- |
|
||||
|inference model|inference.pdmodel、inference.pdiparams|Used for inference based on Paddle inference engine,[detail](./inference_ppocr_en.md)|
|
||||
|trained model, pre-trained model|\*.pdparams、\*.pdopt、\*.states |The checkpoints model saved in the training process, which stores the parameters of the model, mostly used for model evaluation and continuous training.|
|
||||
|trained model, pre-trained model|\*.pdparams、\*.pdopt、\*.states |The checkpoints model saved in the training process, which stores the parameters of the model, is mostly used for model evaluation and continuous training.|
|
||||
|nb model|\*.nb| Model optimized by Paddle-Lite, which is suitable for mobile-side deployment scenarios (Paddle-Lite is needed for nb model deployment). |
|
||||
|
||||
Relationship of the above models is as follows.
|
||||
The relationship of the above models is as follows.
|
||||
|
||||

|
||||
|
||||
|
@ -51,10 +51,10 @@ Relationship of the above models is as follows.
|
|||
|
||||
|model name|description|config|model size|download|
|
||||
| --- | --- | --- | --- | --- |
|
||||
|en_PP-OCRv3_det_slim | [New] Slim qunatization with distillation lightweight detection model, supporting English | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_distill_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.nb) |
|
||||
|ch_PP-OCRv3_det | [New] Original lightweight detection model, supporting English |[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 3.8M | [inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_distill_train.tar) |
|
||||
|en_PP-OCRv3_det_slim | [New] Slim quantization with distillation lightweight detection model, supporting English | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_distill_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.nb) |
|
||||
|en_PP-OCRv3_det | [New] Original lightweight detection model, supporting English |[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 3.8M | [inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_distill_train.tar) |
|
||||
|
||||
* Note: English configuration file is same as Chinese except training data, here we only provide one configuration file.
|
||||
* Note: English configuration file is the same as Chinese except for training data, here we only provide one configuration file.
|
||||
|
||||
<a name="1.3"></a>
|
||||
|
||||
|
@ -62,10 +62,10 @@ Relationship of the above models is as follows.
|
|||
|
||||
|model name|description|config|model size|download|
|
||||
| --- | --- | --- | --- | --- |
|
||||
| ml_PP-OCRv3_det_slim | [New] Slim qunatization with distillation lightweight detection model, supporting English | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M | [inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.tar) / [trained model ](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_distill_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.nb) |
|
||||
| ml_PP-OCRv3_det_slim | [New] Slim quantization with distillation lightweight detection model, supporting English | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M | [inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.tar) / [trained model ](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_distill_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.nb) |
|
||||
| ml_PP-OCRv3_det |[New] Original lightweight detection model, supporting English | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 3.8M | [inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_distill_train.tar) |
|
||||
|
||||
* Note: English configuration file is same as Chinese except training data, here we only provide one configuration file.
|
||||
* Note: English configuration file is the same as Chinese except for training data, here we only provide one configuration file.
|
||||
|
||||
<a name="Recognition"></a>
|
||||
## 2. Text Recognition Model
|
||||
|
@ -75,27 +75,29 @@ Relationship of the above models is as follows.
|
|||
|
||||
|model name|description|config|model size|download|
|
||||
| --- | --- | --- | --- | --- |
|
||||
|ch_PP-OCRv3_rec_slim | [New] Slim qunatization with distillation lightweight model, supporting Chinese, English text recognition |[ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml)| 4.9M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_infer.nb) |
|
||||
|ch_PP-OCRv3_rec_slim | [New] Slim quantization with distillation lightweight model, supporting Chinese, English text recognition |[ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml)| 4.9M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_infer.nb) |
|
||||
|ch_PP-OCRv3_rec| [New] Original lightweight model, supporting Chinese, English, multilingual text recognition |[ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml)| 12.4M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar) |
|
||||
|ch_PP-OCRv2_rec_slim| Slim qunatization with distillation lightweight model, supporting Chinese, English text recognition|[ch_PP-OCRv2_rec.yml](../../configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec.yml)| 9.0M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_train.tar) |
|
||||
|ch_PP-OCRv2_rec| Original lightweight model, supporting Chinese, English, multilingual text recognition |[ch_PP-OCRv2_rec_distillation.yml](../../configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec_distillation.yml)|8.5M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_train.tar) |
|
||||
|ch_PP-OCRv2_rec_slim| Slim quantization with distillation lightweight model, supporting Chinese, English text recognition|[ch_PP-OCRv2_rec.yml](../../configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec.yml)| 9.0M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_train.tar) |
|
||||
|ch_PP-OCRv2_rec| Original lightweight model, supporting Chinese, English, and multilingual text recognition |[ch_PP-OCRv2_rec_distillation.yml](../../configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec_distillation.yml)|8.5M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_train.tar) |
|
||||
|ch_ppocr_mobile_slim_v2.0_rec|Slim pruned and quantized lightweight model, supporting Chinese, English and number recognition|[rec_chinese_lite_train_v2.0.yml](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml)| 6.0M | [inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_train.tar) |
|
||||
|ch_ppocr_mobile_v2.0_rec|Original lightweight model, supporting Chinese, English and number recognition|[rec_chinese_lite_train_v2.0.yml](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml)|5.2M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_pre.tar) |
|
||||
|ch_ppocr_server_v2.0_rec|General model, supporting Chinese, English and number recognition|[rec_chinese_common_train_v2.0.yml](../../configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml)|94.8M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_train.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_pre.tar) |
|
||||
|
||||
|
||||
**Note:** The `trained model` is fine-tuned on the `pre-trained model` with real data and synthesized vertical text data, which achieved better performance in real scene. The `pre-trained model` is directly trained on the full amount of real data and synthesized data, which is more suitable for fine-tune on your own dataset.
|
||||
**Note:** The `trained model` is fine-tuned on the `pre-trained model` with real data and synthesized vertical text data, which achieved better performance in the real scene. The `pre-trained model` is directly trained on the full amount of real data and synthesized data, which is more suitable for fine-tuning your dataset.
|
||||
|
||||
<a name="English"></a>
|
||||
### 2.2 English Recognition Model
|
||||
|
||||
|model name|description|config|model size|download|
|
||||
| --- | --- | --- | --- | --- |
|
||||
|en_PP-OCRv3_rec_slim | [New] Slim qunatization with distillation lightweight model, supporting english, English text recognition |[en_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml)| 3.2M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_slim_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_slim_infer.nb) |
|
||||
|en_PP-OCRv3_rec| [New] Original lightweight model, supporting english, English, multilingual text recognition |[en_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml)| 9.6M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_train.tar) |
|
||||
|en_PP-OCRv3_rec_slim | [New] Slim quantization with distillation lightweight model, supporting English, English text recognition |[en_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml)| 3.2M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_slim_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_slim_infer.nb) |
|
||||
|en_PP-OCRv3_rec| [New] Original lightweight model, supporting English, English, multilingual text recognition |[en_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml)| 9.6M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_train.tar) |
|
||||
|en_number_mobile_slim_v2.0_rec|Slim pruned and quantized lightweight model, supporting English and number recognition|[rec_en_number_lite_train.yml](../../configs/rec/multi_language/rec_en_number_lite_train.yml)| 2.7M | [inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/en_number_mobile_v2.0_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/en_number_mobile_v2.0_rec_slim_train.tar) |
|
||||
|en_number_mobile_v2.0_rec|Original lightweight model, supporting English and number recognition|[rec_en_number_lite_train.yml](../../configs/rec/multi_language/rec_en_number_lite_train.yml)|2.6M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_number_mobile_v2.0_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_number_mobile_v2.0_rec_train.tar) |
|
||||
|
||||
**Note:** Dictionary file of all English recognition models is `ppocr/utils/en_dict.txt`.
|
||||
|
||||
<a name="Multilingual"></a>
|
||||
### 2.3 Multilingual Recognition Model(Updating...)
|
||||
|
||||
|
@ -112,7 +114,7 @@ Relationship of the above models is as follows.
|
|||
| cyrillic_PP-OCRv3_rec | ppocr/utils/dict/cyrillic_dict.txt | Lightweight model for cyrillic recognition | [cyrillic_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/cyrillic_PP-OCRv3_rec.yml) |9.6M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/cyrillic_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/cyrillic_PP-OCRv3_rec_train.tar) |
|
||||
| devanagari_PP-OCRv3_rec | ppocr/utils/dict/devanagari_dict.txt | Lightweight model for devanagari recognition | [devanagari_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/devanagari_PP-OCRv3_rec.yml) |9.9M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/devanagari_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/devanagari_PP-OCRv3_rec_train.tar) |
|
||||
|
||||
For a complete list of languages and tutorials, please refer to : [Multi-language model](./multi_languages_en.md)
|
||||
For a complete list of languages and tutorials, please refer to [Multi-language model](./multi_languages_en.md)
|
||||
|
||||
<a name="Angle"></a>
|
||||
## 3. Text Angle Classification Model
|
||||
|
@ -125,9 +127,9 @@ For a complete list of languages and tutorials, please refer to : [Multi-l
|
|||
<a name="Paddle-Lite"></a>
|
||||
## 4. Paddle-Lite Model
|
||||
|
||||
Paddle Lite is an updated version of Paddle-Mobile, an open-open source deep learning framework designed to make it easy to perform inference on mobile, embeded, and IoT devices. It can further optimize the inference model and generate `nb model` used for edge devices. It's suggested to optimize the quantization model using Paddle-Lite because `INT8` format is used for the model storage and inference.
|
||||
Paddle Lite is an updated version of Paddle-Mobile, an open-open source deep learning framework designed to make it easy to perform inference on mobile, embedded, and IoT devices. It can further optimize the inference model and generate the `nb model` used for edge devices. It's suggested to optimize the quantization model using Paddle-Lite because the `INT8` format is used for the model storage and inference.
|
||||
|
||||
This chapter lists OCR nb models with PP-OCRv2 or earlier versions. You can access to the latest nb models from the above tables.
|
||||
This chapter lists OCR nb models with PP-OCRv2 or earlier versions. You can access the latest nb models from the above tables.
|
||||
|
||||
|Version|Introduction|Model size|Detection model|Text Direction model|Recognition model|Paddle-Lite branch|
|
||||
|---|---|---|---|---|---|---|
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# E-book: *Dive Into OCR*
|
||||
|
||||
"Dive Into OCR" is a textbook that combines OCR theory and practice, written by the PaddleOCR team, Chen Zhineng, a Pre-tenure Professor at Fudan University, Huang Wenhui, a senior expert in the field of vision at China Mobile Research Institute, and other industry-university-research colleagues, as well as OCR developers. The main features are as follows:
|
||||
"Dive Into OCR" is a textbook that combines OCR theory and practice, written by the PaddleOCR community. The main features are as follows:
|
||||
|
||||
- OCR full-stack technology covering text detection, recognition and document analysis
|
||||
- Closely integrate theory and practice, cross the code implementation gap, and supporting instructional videos
|
||||
|
@ -8,6 +8,10 @@
|
|||
|
||||
## Structure
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/50011306/187578511-9f3c351e-b68c-4359-a6e5-475810993c61.png" width = "500" />
|
||||
</div>
|
||||
|
||||
- The first part is the preliminary knowledge of the book, including the knowledge index and resource links needed in the process of positioning and using the book content of the book
|
||||
|
||||
- The second part is chapters 4-8 of the book, which introduce the concepts, applications, and industry practices related to the detection and identification capabilities of the OCR engine. In the "Introduction to OCR Technology", the application scenarios and challenges of OCR, the basic concepts of technology, and the pain points in industrial applications are comprehensively explained. Then, in the two chapters of "Text Detection" and "Text Recognition", the two basic tasks of OCR are introduced. In each chapter, an algorithm is accompanied by a detailed explanation of the code and practical exercises. Chapters 6 and 7 are a detailed introduction to the PP-OCR series model, PP-OCR is a set of OCR systems for industrial applications, on the basis of the basic detection and identification model, after a series of optimization strategies to achieve the general field of industrial SOTA model, while opening up a variety of predictive deployment solutions, enabling enterprises to quickly land OCR applications.
|
||||
|
@ -16,6 +20,11 @@
|
|||
|
||||
|
||||
## Address
|
||||
- [E-book: *Dive Into OCR* (link generating)]()
|
||||
- [Jupyter notebook](../../notebook/notebook_en/)
|
||||
- [videos (Chinese only)](https://aistudio.baidu.com/aistudio/education/group/info/25207)
|
||||
- [E-book: *Dive Into OCR* (PDF)](https://paddleocr.bj.bcebos.com/ebook/Dive_into_OCR.pdf)
|
||||
- [Notebook (.ipynb)](https://github.com/PaddleOCR-Community/Dive-into-OCR)
|
||||
- [Videos (Chinese only)](https://aistudio.baidu.com/aistudio/education/group/info/25207)
|
||||
|
||||
<a href="https://trackgit.com">
|
||||
<img src="https://us-central1-trackgit-analytics.cloudfunctions.net/token/ping/l7h1cua5ayvgmb8r958l" alt="trackgit-views" />
|
||||
</a>
|
||||
|
||||
|
|
|
@ -28,13 +28,13 @@
|
|||
- If you have CUDA 9 or CUDA 10 installed on your machine, please run the following command to install
|
||||
|
||||
```bash
|
||||
python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple
|
||||
python -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
```
|
||||
|
||||
- If you have no available GPU on your machine, please run the following command to install the CPU version
|
||||
|
||||
```bash
|
||||
python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
|
||||
python -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
```
|
||||
|
||||
For more software version requirements, please refer to the instructions in [Installation Document](https://www.paddlepaddle.org.cn/install/quick) for operation.
|
||||
|
@ -223,7 +223,7 @@ from paddleocr import PaddleOCR, draw_ocr
|
|||
# Paddleocr supports Chinese, English, French, German, Korean and Japanese.
|
||||
# You can set the parameter `lang` as `ch`, `en`, `fr`, `german`, `korean`, `japan`
|
||||
# to switch the language model in order.
|
||||
ocr = PaddleOCR(use_angle_cls=True, lang="ch", page_num=2) # need to run only once to download and load model into memory
|
||||
ocr = PaddleOCR(use_angle_cls=True, lang="ch", page_num=2) # need to run only once to download and load model into memory
|
||||
img_path = './xxx.pdf'
|
||||
result = ocr.ocr(img_path, cls=True)
|
||||
for idx in range(len(result)):
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
* [2.6 Training with knowledge distillation](#kd)
|
||||
* [2.7 Multi-language Training](#Multi_language)
|
||||
* [2.8 Training on other platform(Windows/macOS/Linux DCU)](#28)
|
||||
* [2.9 Fine-tuning](#29)
|
||||
- [3. Evaluation and Test](#3-evaluation-and-test)
|
||||
* [3.1 Evaluation](#31-evaluation)
|
||||
* [3.2 Test](#32-test)
|
||||
|
@ -79,7 +80,7 @@ PaddleOCR has built-in dictionaries, which can be used on demand.
|
|||
|
||||
`ppocr/utils/ppocr_keys_v1.txt` is a Chinese dictionary with 6623 characters.
|
||||
|
||||
`ppocr/utils/ic15_dict.txt` is an English dictionary with 63 characters
|
||||
`ppocr/utils/ic15_dict.txt` is an English dictionary with 36 characters
|
||||
|
||||
`ppocr/utils/dict/french_dict.txt` is a French dictionary with 118 characters
|
||||
|
||||
|
@ -384,6 +385,11 @@ GPU mode is not supported, you need to set `use_gpu` to False in the configurati
|
|||
- Linux DCU
|
||||
Running on a DCU device requires setting the environment variable `export HIP_VISIBLE_DEVICES=0,1,2,3`, and the rest of the training and evaluation prediction commands are exactly the same as the Linux GPU.
|
||||
|
||||
<a name="29"></a>
|
||||
## 2.9 Fine-tuning
|
||||
|
||||
In actual use, it is recommended to load the official pre-trained model and fine-tune it in your own data set. For the fine-tuning method of the recognition model, please refer to: [Model Fine-tuning Tutorial](./finetune_en.md).
|
||||
|
||||
<a name="3-evaluation-and-test"></a>
|
||||
## 3. Evaluation and Test
|
||||
|
||||
|
|
|
@ -14,6 +14,9 @@ This article provides a full-process guide for the PaddleOCR table recognition m
|
|||
- [2.5. Distributed Training](#25-distributed-training)
|
||||
- [2.6. Training on other platform(Windows/macOS/Linux DCU)](#26-training-on-other-platformwindowsmacoslinux-dcu)
|
||||
- [2.7. Fine-tuning](#27-fine-tuning)
|
||||
- [2.7.1 Dataset](#271-dataset)
|
||||
- [2.7.2 model selection](#272-model-selection)
|
||||
- [2.7.3 Training hyperparameter selection](#273-training-hyperparameter-selection)
|
||||
- [3. Evaluation and Test](#3-evaluation-and-test)
|
||||
- [3.1. Evaluation](#31-evaluation)
|
||||
- [3.2. Test table structure recognition effect](#32-test-table-structure-recognition-effect)
|
||||
|
@ -226,8 +229,40 @@ Running on a DCU device requires setting the environment variable `export HIP_VI
|
|||
|
||||
## 2.7. Fine-tuning
|
||||
|
||||
In the actual use process, it is recommended to load the officially provided pre-training model and fine-tune it in your own data set. For the fine-tuning method of the table recognition model, please refer to: [Model fine-tuning tutorial](./finetune.md).
|
||||
|
||||
### 2.7.1 Dataset
|
||||
|
||||
Data number: It is recommended to prepare at least 2000 table recognition datasets for model fine-tuning.
|
||||
|
||||
### 2.7.2 model selection
|
||||
|
||||
It is recommended to choose the SLANet model (configuration file: [SLANet_ch.yml](../../configs/table/SLANet_ch.yml), pre-training model: [ch_ppstructure_mobile_v2.0_SLANet_train.tar](https://paddleocr.bj.bcebos .com/ppstructure/models/slanet/ch_ppstructure_mobile_v2.0_SLANet_train.tar)) for fine-tuning, its accuracy and generalization performance is the best Chinese table pre-training model currently available.
|
||||
|
||||
For more table recognition models, please refer to [PP-Structure Series Model Library](../../ppstructure/docs/models_list.md).
|
||||
|
||||
### 2.7.3 Training hyperparameter selection
|
||||
|
||||
When fine-tuning the model, the most important hyperparameters are the pretrained model path `pretrained_model`, the learning rate `learning_rate`, and some configuration files are shown below.
|
||||
|
||||
```yaml
|
||||
Global:
|
||||
pretrained_model: ./ch_ppstructure_mobile_v2.0_SLANet_train/best_accuracy.pdparams # Pre-trained model path
|
||||
Optimizer:
|
||||
lr:
|
||||
name: Cosine
|
||||
learning_rate: 0.001 #
|
||||
warmup_epoch: 0
|
||||
regularizer:
|
||||
name: 'L2'
|
||||
factor: 0
|
||||
```
|
||||
|
||||
In the above configuration file, you first need to specify the `pretrained_model` field as the `best_accuracy.pdparams` file path.
|
||||
|
||||
The configuration file provided by PaddleOCR is for 4-card training (equivalent to a total batch size of `4*48=192`) and no pre-trained model is loaded. Therefore, in your scenario, the learning rate is the same as the total The batch size needs to be adjusted linearly, for example
|
||||
|
||||
* If your scenario is single card training, single card batch_size=48, then the total batch_size=48, it is recommended to adjust the learning rate to about `0.00025`.
|
||||
* If your scenario is for single-card training, due to memory limitations, you can only set batch_size=32 for a single card, then the total batch_size=32, it is recommended to adjust the learning rate to about `0.00017`.
|
||||
|
||||
# 3. Evaluation and Test
|
||||
|
||||
|
|
|
@ -261,7 +261,7 @@ Output will be a list, each item contains classification result and confidence
|
|||
|
||||
## 3 Use custom model
|
||||
When the built-in model cannot meet the needs, you need to use your own trained model.
|
||||
First, refer to the first section of [inference_en.md](./inference_en.md) to convert your det and rec model to inference model, and then use it as follows
|
||||
First, refer to [export](./detection_en.md#4-inference) doc to convert your det and rec model to inference model, and then use it as follows
|
||||
|
||||
### 3.1 Use by code
|
||||
|
||||
|
@ -335,7 +335,7 @@ ocr = PaddleOCR(use_angle_cls=True, lang="ch") # need to run only once to downlo
|
|||
img_path = 'PaddleOCR/doc/imgs/11.jpg'
|
||||
img = cv2.imread(img_path)
|
||||
# img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY), If your own training model supports grayscale images, you can uncomment this line
|
||||
result = ocr.ocr(img_path, cls=True)
|
||||
result = ocr.ocr(img, cls=True)
|
||||
for idx in range(len(result)):
|
||||
res = result[idx]
|
||||
for line in res:
|
||||
|
|
|
@ -89,7 +89,7 @@ paddleocr --image_dir /your/test/image.jpg --lang=japan # change for i18n abbr
|
|||
| 中国語と英語の超軽量 PP-OCRv3 モデル(16.2M) | ch_PP-OCRv3_xx | モバイル & サーバー | [推論モデル](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) / [トレーニングモデル](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar) | [推論モデル](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar) / [トレーニングモデル](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar) |
|
||||
|
||||
|
||||
- その他のモデルのダウンロード (多言語を含む) については、[PP-OCR シリーズ モデルのダウンロード] (../doc_en/models_list_en.md)をご参照ください。
|
||||
- その他のモデルのダウンロード (多言語を含む) については、[PP-OCR シリーズ モデルのダウンロード](../doc_en/models_list_en.md)をご参照ください。
|
||||
- 新しい言語のリクエストについては、 [新しい言語_リクエストのガイドライン](#language_requests)を参照してください。
|
||||
- 構造文書分析モデルについては、[PP-Structure models](../../ppstructure/docs/models_list_en.md)をご参照ください。
|
||||
|
||||
|
|
BIN
doc/joinus.PNG
BIN
doc/joinus.PNG
Binary file not shown.
Before Width: | Height: | Size: 100 KiB After Width: | Height: | Size: 18 KiB |
73
paddleocr.py
73
paddleocr.py
|
@ -27,15 +27,23 @@ import logging
|
|||
import numpy as np
|
||||
from pathlib import Path
|
||||
|
||||
tools = importlib.import_module('.', 'tools')
|
||||
ppocr = importlib.import_module('.', 'ppocr')
|
||||
ppstructure = importlib.import_module('.', 'ppstructure')
|
||||
def _import_file(module_name, file_path, make_importable=False):
|
||||
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
if make_importable:
|
||||
sys.modules[module_name] = module
|
||||
return module
|
||||
|
||||
tools = _import_file('tools', os.path.join(__dir__, 'tools/__init__.py'), make_importable=True)
|
||||
ppocr = importlib.import_module('ppocr', 'paddleocr')
|
||||
ppstructure = importlib.import_module('ppstructure', 'paddleocr')
|
||||
|
||||
from tools.infer import predict_system
|
||||
from ppocr.utils.logging import get_logger
|
||||
|
||||
logger = get_logger()
|
||||
from ppocr.utils.utility import check_and_read, get_image_file_list
|
||||
from ppocr.utils.utility import check_and_read, get_image_file_list, alpha_to_color, binarize_img
|
||||
from ppocr.utils.network import maybe_download, download_with_progressbar, is_link, confirm_model_dir_url
|
||||
from tools.infer.utility import draw_ocr, str2bool, check_gpu
|
||||
from ppstructure.utility import init_args, draw_structure_result
|
||||
|
@ -416,7 +424,7 @@ def get_model_config(type, version, model_type, lang):
|
|||
|
||||
def img_decode(content: bytes):
|
||||
np_arr = np.frombuffer(content, dtype=np.uint8)
|
||||
return cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
|
||||
return cv2.imdecode(np_arr, cv2.IMREAD_UNCHANGED)
|
||||
|
||||
|
||||
def check_img(img):
|
||||
|
@ -502,14 +510,17 @@ class PaddleOCR(predict_system.TextSystem):
|
|||
super().__init__(params)
|
||||
self.page_num = params.page_num
|
||||
|
||||
def ocr(self, img, det=True, rec=True, cls=True):
|
||||
def ocr(self, img, det=True, rec=True, cls=True, bin=False, inv=False, alpha_color=(255, 255, 255)):
|
||||
"""
|
||||
ocr with paddleocr
|
||||
OCR with PaddleOCR
|
||||
args:
|
||||
img: img for ocr, support ndarray, img_path and list or ndarray
|
||||
det: use text detection or not. If false, only rec will be exec. Default is True
|
||||
rec: use text recognition or not. If false, only det will be exec. Default is True
|
||||
cls: use angle classifier or not. Default is True. If true, the text with rotation of 180 degrees can be recognized. If no text is rotated by 180 degrees, use cls=False to get better performance. Text with rotation of 90 or 270 degrees can be recognized even if cls=False.
|
||||
img: img for OCR, support ndarray, img_path and list or ndarray
|
||||
det: use text detection or not. If False, only rec will be exec. Default is True
|
||||
rec: use text recognition or not. If False, only det will be exec. Default is True
|
||||
cls: use angle classifier or not. Default is True. If True, the text with rotation of 180 degrees can be recognized. If no text is rotated by 180 degrees, use cls=False to get better performance. Text with rotation of 90 or 270 degrees can be recognized even if cls=False.
|
||||
bin: binarize image to black and white. Default is False.
|
||||
inv: invert image colors. Default is False.
|
||||
alpha_color: set RGB color Tuple for transparent parts replacement. Default is pure white.
|
||||
"""
|
||||
assert isinstance(img, (np.ndarray, list, str, bytes))
|
||||
if isinstance(img, list) and det == True:
|
||||
|
@ -517,21 +528,35 @@ class PaddleOCR(predict_system.TextSystem):
|
|||
exit(0)
|
||||
if cls == True and self.use_angle_cls == False:
|
||||
logger.warning(
|
||||
'Since the angle classifier is not initialized, the angle classifier will not be uesd during the forward process'
|
||||
'Since the angle classifier is not initialized, it will not be used during the forward process'
|
||||
)
|
||||
|
||||
img = check_img(img)
|
||||
# for infer pdf file
|
||||
if isinstance(img, list):
|
||||
if self.page_num > len(img) or self.page_num == 0:
|
||||
self.page_num = len(img)
|
||||
imgs = img[:self.page_num]
|
||||
imgs = img
|
||||
else:
|
||||
imgs = img[:self.page_num]
|
||||
else:
|
||||
imgs = [img]
|
||||
|
||||
def preprocess_image(_image):
|
||||
_image = alpha_to_color(_image, alpha_color)
|
||||
if inv:
|
||||
_image = cv2.bitwise_not(_image)
|
||||
if bin:
|
||||
_image = binarize_img(_image)
|
||||
return _image
|
||||
|
||||
if det and rec:
|
||||
ocr_res = []
|
||||
for idx, img in enumerate(imgs):
|
||||
img = preprocess_image(img)
|
||||
dt_boxes, rec_res, _ = self.__call__(img, cls)
|
||||
if not dt_boxes and not rec_res:
|
||||
ocr_res.append(None)
|
||||
continue
|
||||
tmp_res = [[box.tolist(), res]
|
||||
for box, res in zip(dt_boxes, rec_res)]
|
||||
ocr_res.append(tmp_res)
|
||||
|
@ -539,7 +564,11 @@ class PaddleOCR(predict_system.TextSystem):
|
|||
elif det and not rec:
|
||||
ocr_res = []
|
||||
for idx, img in enumerate(imgs):
|
||||
img = preprocess_image(img)
|
||||
dt_boxes, elapse = self.text_detector(img)
|
||||
if not dt_boxes:
|
||||
ocr_res.append(None)
|
||||
continue
|
||||
tmp_res = [box.tolist() for box in dt_boxes]
|
||||
ocr_res.append(tmp_res)
|
||||
return ocr_res
|
||||
|
@ -548,6 +577,7 @@ class PaddleOCR(predict_system.TextSystem):
|
|||
cls_res = []
|
||||
for idx, img in enumerate(imgs):
|
||||
if not isinstance(img, list):
|
||||
img = preprocess_image(img)
|
||||
img = [img]
|
||||
if self.use_angle_cls and cls:
|
||||
img, cls_res_tmp, elapse = self.text_classifier(img)
|
||||
|
@ -649,10 +679,15 @@ def main():
|
|||
img_name = os.path.basename(img_path).split('.')[0]
|
||||
logger.info('{}{}{}'.format('*' * 10, img_path, '*' * 10))
|
||||
if args.type == 'ocr':
|
||||
result = engine.ocr(img_path,
|
||||
det=args.det,
|
||||
rec=args.rec,
|
||||
cls=args.use_angle_cls)
|
||||
result = engine.ocr(
|
||||
img_path,
|
||||
det=args.det,
|
||||
rec=args.rec,
|
||||
cls=args.use_angle_cls,
|
||||
bin=args.binarize,
|
||||
inv=args.invert,
|
||||
alpha_color=args.alphacolor
|
||||
)
|
||||
if result is not None:
|
||||
for idx in range(len(result)):
|
||||
res = result[idx]
|
||||
|
@ -694,7 +729,7 @@ def main():
|
|||
logger.info('processing {}/{} page:'.format(index + 1,
|
||||
len(img_paths)))
|
||||
new_img_name = os.path.basename(new_img_path).split('.')[0]
|
||||
result = engine(new_img_path, img_idx=index)
|
||||
result = engine(img, img_idx=index)
|
||||
save_structure_res(result, args.output, img_name, index)
|
||||
|
||||
if args.recovery and result != []:
|
||||
|
|
|
@ -27,7 +27,7 @@ from .make_pse_gt import MakePseGt
|
|||
from .rec_img_aug import BaseDataAugmentation, RecAug, RecConAug, RecResizeImg, ClsResizeImg, \
|
||||
SRNRecResizeImg, GrayRecResizeImg, SARRecResizeImg, PRENResizeImg, \
|
||||
ABINetRecResizeImg, SVTRRecResizeImg, ABINetRecAug, VLRecResizeImg, SPINRecResizeImg, RobustScannerRecResizeImg, \
|
||||
RFLRecResizeImg
|
||||
RFLRecResizeImg, SVTRRecAug
|
||||
from .ssl_img_aug import SSLRotateResize
|
||||
from .randaugment import RandAugment
|
||||
from .copy_paste import CopyPaste
|
||||
|
|
|
@ -405,3 +405,54 @@ class CVColorJitter(object):
|
|||
def __call__(self, img):
|
||||
if random.random() < self.p: return self.transforms(img)
|
||||
else: return img
|
||||
|
||||
|
||||
class SVTRDeterioration(object):
|
||||
def __init__(self, var, degrees, factor, p=0.5):
|
||||
self.p = p
|
||||
transforms = []
|
||||
if var is not None:
|
||||
transforms.append(CVGaussianNoise(var=var))
|
||||
if degrees is not None:
|
||||
transforms.append(CVMotionBlur(degrees=degrees))
|
||||
if factor is not None:
|
||||
transforms.append(CVRescale(factor=factor))
|
||||
self.transforms = transforms
|
||||
|
||||
def __call__(self, img):
|
||||
if random.random() < self.p:
|
||||
random.shuffle(self.transforms)
|
||||
transforms = Compose(self.transforms)
|
||||
return transforms(img)
|
||||
else:
|
||||
return img
|
||||
|
||||
|
||||
class SVTRGeometry(object):
|
||||
def __init__(self,
|
||||
aug_type=0,
|
||||
degrees=15,
|
||||
translate=(0.3, 0.3),
|
||||
scale=(0.5, 2.),
|
||||
shear=(45, 15),
|
||||
distortion=0.5,
|
||||
p=0.5):
|
||||
self.aug_type = aug_type
|
||||
self.p = p
|
||||
self.transforms = []
|
||||
self.transforms.append(CVRandomRotation(degrees=degrees))
|
||||
self.transforms.append(CVRandomAffine(
|
||||
degrees=degrees, translate=translate, scale=scale, shear=shear))
|
||||
self.transforms.append(CVRandomPerspective(distortion=distortion))
|
||||
|
||||
def __call__(self, img):
|
||||
if random.random() < self.p:
|
||||
if self.aug_type:
|
||||
random.shuffle(self.transforms)
|
||||
transforms = Compose(self.transforms[:random.randint(1, 3)])
|
||||
img = transforms(img)
|
||||
else:
|
||||
img = self.transforms[random.randint(0, 2)](img)
|
||||
return img
|
||||
else:
|
||||
return img
|
|
@ -20,7 +20,6 @@ import paddle
|
|||
|
||||
import numpy as np
|
||||
import Polygon as plg
|
||||
import scipy.io as scio
|
||||
|
||||
from PIL import Image
|
||||
import paddle.vision.transforms as transforms
|
||||
|
|
|
@ -64,7 +64,7 @@ class DetLabelEncode(object):
|
|||
return None
|
||||
boxes = self.expand_points_num(boxes)
|
||||
boxes = np.array(boxes, dtype=np.float32)
|
||||
txt_tags = np.array(txt_tags, dtype=np.bool)
|
||||
txt_tags = np.array(txt_tags, dtype=bool)
|
||||
|
||||
data['polys'] = boxes
|
||||
data['texts'] = txts
|
||||
|
@ -218,7 +218,7 @@ class E2ELabelEncodeTest(BaseRecLabelEncode):
|
|||
else:
|
||||
txt_tags.append(False)
|
||||
boxes = np.array(boxes, dtype=np.float32)
|
||||
txt_tags = np.array(txt_tags, dtype=np.bool)
|
||||
txt_tags = np.array(txt_tags, dtype=bool)
|
||||
data['polys'] = boxes
|
||||
data['ignore_tags'] = txt_tags
|
||||
temp_texts = []
|
||||
|
@ -254,7 +254,7 @@ class E2ELabelEncodeTrain(object):
|
|||
else:
|
||||
txt_tags.append(False)
|
||||
boxes = np.array(boxes, dtype=np.float32)
|
||||
txt_tags = np.array(txt_tags, dtype=np.bool)
|
||||
txt_tags = np.array(txt_tags, dtype=bool)
|
||||
|
||||
data['polys'] = boxes
|
||||
data['texts'] = txts
|
||||
|
@ -1396,10 +1396,9 @@ class VLLabelEncode(BaseRecLabelEncode):
|
|||
max_text_length,
|
||||
character_dict_path=None,
|
||||
use_space_char=False,
|
||||
lower=True,
|
||||
**kwargs):
|
||||
super(VLLabelEncode, self).__init__(
|
||||
max_text_length, character_dict_path, use_space_char, lower)
|
||||
super(VLLabelEncode, self).__init__(max_text_length,
|
||||
character_dict_path, use_space_char)
|
||||
self.dict = {}
|
||||
for i, char in enumerate(self.character):
|
||||
self.dict[char] = i
|
||||
|
|
|
@ -19,7 +19,7 @@ import random
|
|||
import copy
|
||||
from PIL import Image
|
||||
from .text_image_aug import tia_perspective, tia_stretch, tia_distort
|
||||
from .abinet_aug import CVGeometry, CVDeterioration, CVColorJitter
|
||||
from .abinet_aug import CVGeometry, CVDeterioration, CVColorJitter, SVTRGeometry, SVTRDeterioration
|
||||
from paddle.vision.transforms import Compose
|
||||
|
||||
|
||||
|
@ -169,6 +169,38 @@ class RecConAug(object):
|
|||
return data
|
||||
|
||||
|
||||
class SVTRRecAug(object):
|
||||
def __init__(self,
|
||||
aug_type=0,
|
||||
geometry_p=0.5,
|
||||
deterioration_p=0.25,
|
||||
colorjitter_p=0.25,
|
||||
**kwargs):
|
||||
self.transforms = Compose([
|
||||
SVTRGeometry(
|
||||
aug_type=aug_type,
|
||||
degrees=45,
|
||||
translate=(0.0, 0.0),
|
||||
scale=(0.5, 2.),
|
||||
shear=(45, 15),
|
||||
distortion=0.5,
|
||||
p=geometry_p), SVTRDeterioration(
|
||||
var=20, degrees=6, factor=4, p=deterioration_p),
|
||||
CVColorJitter(
|
||||
brightness=0.5,
|
||||
contrast=0.5,
|
||||
saturation=0.5,
|
||||
hue=0.1,
|
||||
p=colorjitter_p)
|
||||
])
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
img = self.transforms(img)
|
||||
data['image'] = img
|
||||
return data
|
||||
|
||||
|
||||
class ClsResizeImg(object):
|
||||
def __init__(self, image_shape, **kwargs):
|
||||
self.image_shape = image_shape
|
||||
|
@ -538,7 +570,7 @@ def resize_norm_img_chinese(img, image_shape):
|
|||
max_wh_ratio = imgW * 1.0 / imgH
|
||||
h, w = img.shape[0], img.shape[1]
|
||||
ratio = w * 1.0 / h
|
||||
max_wh_ratio = min(max(max_wh_ratio, ratio), max_wh_ratio)
|
||||
max_wh_ratio = max(max_wh_ratio, ratio)
|
||||
imgW = int(imgH * max_wh_ratio)
|
||||
if math.ceil(imgH * ratio) > imgW:
|
||||
resized_w = imgW
|
||||
|
|
|
@ -28,7 +28,7 @@ class CosineEmbeddingLoss(nn.Layer):
|
|||
|
||||
def forward(self, x1, x2, target):
|
||||
similarity = paddle.sum(
|
||||
x1 * x2, dim=-1) / (paddle.norm(
|
||||
x1 * x2, axis=-1) / (paddle.norm(
|
||||
x1, axis=-1) * paddle.norm(
|
||||
x2, axis=-1) + self.epsilon)
|
||||
one_list = paddle.full_like(target, fill_value=1)
|
||||
|
|
|
@ -32,7 +32,7 @@ def drop_path(x, drop_prob=0., training=False):
|
|||
"""
|
||||
if drop_prob == 0. or not training:
|
||||
return x
|
||||
keep_prob = paddle.to_tensor(1 - drop_prob)
|
||||
keep_prob = paddle.to_tensor(1 - drop_prob, dtype=x.dtype)
|
||||
shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1)
|
||||
random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype)
|
||||
random_tensor = paddle.floor(random_tensor) # binarize
|
||||
|
|
|
@ -31,7 +31,7 @@ def get_bias_attr(k):
|
|||
|
||||
|
||||
class Head(nn.Layer):
|
||||
def __init__(self, in_channels, name_list, kernel_list=[3, 2, 2], **kwargs):
|
||||
def __init__(self, in_channels, kernel_list=[3, 2, 2], **kwargs):
|
||||
super(Head, self).__init__()
|
||||
|
||||
self.conv1 = nn.Conv2D(
|
||||
|
@ -93,16 +93,8 @@ class DBHead(nn.Layer):
|
|||
def __init__(self, in_channels, k=50, **kwargs):
|
||||
super(DBHead, self).__init__()
|
||||
self.k = k
|
||||
binarize_name_list = [
|
||||
'conv2d_56', 'batch_norm_47', 'conv2d_transpose_0', 'batch_norm_48',
|
||||
'conv2d_transpose_1', 'binarize'
|
||||
]
|
||||
thresh_name_list = [
|
||||
'conv2d_57', 'batch_norm_49', 'conv2d_transpose_2', 'batch_norm_50',
|
||||
'conv2d_transpose_3', 'thresh'
|
||||
]
|
||||
self.binarize = Head(in_channels, binarize_name_list, **kwargs)
|
||||
self.thresh = Head(in_channels, thresh_name_list, **kwargs)
|
||||
self.binarize = Head(in_channels, **kwargs)
|
||||
self.thresh = Head(in_channels, **kwargs)
|
||||
|
||||
def step_function(self, x, y):
|
||||
return paddle.reciprocal(1 + paddle.exp(-self.k * (x - y)))
|
||||
|
|
|
@ -17,7 +17,6 @@ import paddle
|
|||
from paddle import nn
|
||||
import paddle.nn.functional as F
|
||||
from paddle.nn import LayerList
|
||||
# from paddle.nn.initializer import XavierNormal as xavier_uniform_
|
||||
from paddle.nn import Dropout, Linear, LayerNorm
|
||||
import numpy as np
|
||||
from ppocr.modeling.backbones.rec_svtrnet import Mlp, zeros_, ones_
|
||||
|
@ -30,7 +29,6 @@ class Transformer(nn.Layer):
|
|||
Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
|
||||
Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
|
||||
Processing Systems, pages 6000-6010.
|
||||
|
||||
Args:
|
||||
d_model: the number of expected features in the encoder/decoder inputs (default=512).
|
||||
nhead: the number of heads in the multiheadattention models (default=8).
|
||||
|
@ -162,7 +160,7 @@ class Transformer(nn.Layer):
|
|||
memory = src
|
||||
dec_seq = paddle.full((bs, 1), 2, dtype=paddle.int64)
|
||||
dec_prob = paddle.full((bs, 1), 1., dtype=paddle.float32)
|
||||
for len_dec_seq in range(1, self.max_len):
|
||||
for len_dec_seq in range(1, paddle.to_tensor(self.max_len)):
|
||||
dec_seq_embed = self.embedding(dec_seq)
|
||||
dec_seq_embed = self.positional_encoding(dec_seq_embed)
|
||||
tgt_mask = self.generate_square_subsequent_mask(
|
||||
|
@ -304,7 +302,7 @@ class Transformer(nn.Layer):
|
|||
inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(
|
||||
active_inst_idx_list)
|
||||
# Decode
|
||||
for len_dec_seq in range(1, self.max_len):
|
||||
for len_dec_seq in range(1, paddle.to_tensor(self.max_len)):
|
||||
src_enc_copy = src_enc.clone()
|
||||
active_inst_idx_list = beam_decode_step(
|
||||
inst_dec_beams, len_dec_seq, src_enc_copy,
|
||||
|
@ -348,15 +346,12 @@ class MultiheadAttention(nn.Layer):
|
|||
"""Allows the model to jointly attend to information
|
||||
from different representation subspaces.
|
||||
See reference: Attention Is All You Need
|
||||
|
||||
.. math::
|
||||
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
|
||||
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
|
||||
|
||||
Args:
|
||||
embed_dim: total dimension of the model
|
||||
num_heads: parallel attention layers, or heads
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, embed_dim, num_heads, dropout=0., self_attn=False):
|
||||
|
|
|
@ -144,9 +144,9 @@ class DBPostProcess(object):
|
|||
np.round(box[:, 0] / width * dest_width), 0, dest_width)
|
||||
box[:, 1] = np.clip(
|
||||
np.round(box[:, 1] / height * dest_height), 0, dest_height)
|
||||
boxes.append(box.astype(np.int16))
|
||||
boxes.append(box.astype("int32"))
|
||||
scores.append(score)
|
||||
return np.array(boxes, dtype=np.int16), scores
|
||||
return np.array(boxes, dtype="int32"), scores
|
||||
|
||||
def unclip(self, box, unclip_ratio):
|
||||
poly = Polygon(box)
|
||||
|
@ -185,15 +185,15 @@ class DBPostProcess(object):
|
|||
'''
|
||||
h, w = bitmap.shape[:2]
|
||||
box = _box.copy()
|
||||
xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int), 0, w - 1)
|
||||
xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int), 0, w - 1)
|
||||
ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int), 0, h - 1)
|
||||
ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int), 0, h - 1)
|
||||
xmin = np.clip(np.floor(box[:, 0].min()).astype("int32"), 0, w - 1)
|
||||
xmax = np.clip(np.ceil(box[:, 0].max()).astype("int32"), 0, w - 1)
|
||||
ymin = np.clip(np.floor(box[:, 1].min()).astype("int32"), 0, h - 1)
|
||||
ymax = np.clip(np.ceil(box[:, 1].max()).astype("int32"), 0, h - 1)
|
||||
|
||||
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
|
||||
box[:, 0] = box[:, 0] - xmin
|
||||
box[:, 1] = box[:, 1] - ymin
|
||||
cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
|
||||
cv2.fillPoly(mask, box.reshape(1, -1, 2).astype("int32"), 1)
|
||||
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
|
||||
|
||||
def box_score_slow(self, bitmap, contour):
|
||||
|
@ -214,7 +214,7 @@ class DBPostProcess(object):
|
|||
contour[:, 0] = contour[:, 0] - xmin
|
||||
contour[:, 1] = contour[:, 1] - ymin
|
||||
|
||||
cv2.fillPoly(mask, contour.reshape(1, -1, 2).astype(np.int32), 1)
|
||||
cv2.fillPoly(mask, contour.reshape(1, -1, 2).astype("int32"), 1)
|
||||
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
|
||||
|
||||
def __call__(self, outs_dict, shape_list):
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from bidi.algorithm import get_display
|
||||
import numpy as np
|
||||
import paddle
|
||||
from paddle.nn import functional as F
|
||||
|
@ -49,25 +50,71 @@ class BaseRecLabelDecode(object):
|
|||
self.character = dict_character
|
||||
|
||||
def pred_reverse(self, pred):
|
||||
pred_re = []
|
||||
c_current = ''
|
||||
for c in pred:
|
||||
if not bool(re.search('[a-zA-Z0-9 :*./%+-]', c)):
|
||||
if c_current != '':
|
||||
pred_re.append(c_current)
|
||||
pred_re.append(c)
|
||||
c_current = ''
|
||||
else:
|
||||
c_current += c
|
||||
if c_current != '':
|
||||
pred_re.append(c_current)
|
||||
|
||||
return ''.join(pred_re[::-1])
|
||||
return get_display(pred)
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
return dict_character
|
||||
|
||||
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
|
||||
def get_word_info(self, text, selection):
|
||||
"""
|
||||
Group the decoded characters and record the corresponding decoded positions.
|
||||
|
||||
Args:
|
||||
text: the decoded text
|
||||
selection: the bool array that identifies which columns of features are decoded as non-separated characters
|
||||
Returns:
|
||||
word_list: list of the grouped words
|
||||
word_col_list: list of decoding positions corresponding to each character in the grouped word
|
||||
state_list: list of marker to identify the type of grouping words, including two types of grouping words:
|
||||
- 'cn': continous chinese characters (e.g., 你好啊)
|
||||
- 'en&num': continous english characters (e.g., hello), number (e.g., 123, 1.123), or mixed of them connected by '-' (e.g., VGG-16)
|
||||
The remaining characters in text are treated as separators between groups (e.g., space, '(', ')', etc.).
|
||||
"""
|
||||
state = None
|
||||
word_content = []
|
||||
word_col_content = []
|
||||
word_list = []
|
||||
word_col_list = []
|
||||
state_list = []
|
||||
valid_col = np.where(selection==True)[0]
|
||||
|
||||
for c_i, char in enumerate(text):
|
||||
if '\u4e00' <= char <= '\u9fff':
|
||||
c_state = 'cn'
|
||||
elif bool(re.search('[a-zA-Z0-9]', char)):
|
||||
c_state = 'en&num'
|
||||
else:
|
||||
c_state = 'splitter'
|
||||
|
||||
if char == '.' and state == 'en&num' and c_i + 1 < len(text) and bool(re.search('[0-9]', text[c_i+1])): # grouping floting number
|
||||
c_state = 'en&num'
|
||||
if char == '-' and state == "en&num": # grouping word with '-', such as 'state-of-the-art'
|
||||
c_state = 'en&num'
|
||||
|
||||
if state == None:
|
||||
state = c_state
|
||||
|
||||
if state != c_state:
|
||||
if len(word_content) != 0:
|
||||
word_list.append(word_content)
|
||||
word_col_list.append(word_col_content)
|
||||
state_list.append(state)
|
||||
word_content = []
|
||||
word_col_content = []
|
||||
state = c_state
|
||||
|
||||
if state != "splitter":
|
||||
word_content.append(char)
|
||||
word_col_content.append(valid_col[c_i])
|
||||
|
||||
if len(word_content) != 0:
|
||||
word_list.append(word_content)
|
||||
word_col_list.append(word_col_content)
|
||||
state_list.append(state)
|
||||
|
||||
return word_list, word_col_list, state_list
|
||||
|
||||
def decode(self, text_index, text_prob=None, is_remove_duplicate=False, return_word_box=False):
|
||||
""" convert text-index into text-label. """
|
||||
result_list = []
|
||||
ignored_tokens = self.get_ignored_tokens()
|
||||
|
@ -96,7 +143,11 @@ class BaseRecLabelDecode(object):
|
|||
if self.reverse: # for arabic rec
|
||||
text = self.pred_reverse(text)
|
||||
|
||||
result_list.append((text, np.mean(conf_list).tolist()))
|
||||
if return_word_box:
|
||||
word_list, word_col_list, state_list = self.get_word_info(text, selection)
|
||||
result_list.append((text, np.mean(conf_list).tolist(), [len(text_index[batch_idx]), word_list, word_col_list, state_list]))
|
||||
else:
|
||||
result_list.append((text, np.mean(conf_list).tolist()))
|
||||
return result_list
|
||||
|
||||
def get_ignored_tokens(self):
|
||||
|
@ -111,14 +162,19 @@ class CTCLabelDecode(BaseRecLabelDecode):
|
|||
super(CTCLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
def __call__(self, preds, label=None, return_word_box=False, *args, **kwargs):
|
||||
if isinstance(preds, tuple) or isinstance(preds, list):
|
||||
preds = preds[-1]
|
||||
if isinstance(preds, paddle.Tensor):
|
||||
preds = preds.numpy()
|
||||
preds_idx = preds.argmax(axis=2)
|
||||
preds_prob = preds.max(axis=2)
|
||||
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
|
||||
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True, return_word_box=return_word_box)
|
||||
if return_word_box:
|
||||
for rec_idx, rec in enumerate(text):
|
||||
wh_ratio = kwargs['wh_ratio_list'][rec_idx]
|
||||
max_wh_ratio = kwargs['max_wh_ratio']
|
||||
rec[2][0] = rec[2][0]*(wh_ratio/max_wh_ratio)
|
||||
if label is None:
|
||||
return text
|
||||
label = self.decode(label)
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
import logging
|
||||
import os
|
||||
import imghdr
|
||||
import cv2
|
||||
import random
|
||||
import numpy as np
|
||||
|
@ -59,7 +58,6 @@ def get_image_file_list(img_file):
|
|||
if img_file is None or not os.path.exists(img_file):
|
||||
raise Exception("not found any img file in {}".format(img_file))
|
||||
|
||||
img_end = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff', 'gif', 'pdf'}
|
||||
if os.path.isfile(img_file) and _check_image_file(img_file):
|
||||
imgs_lists.append(img_file)
|
||||
elif os.path.isdir(img_file):
|
||||
|
@ -72,9 +70,28 @@ def get_image_file_list(img_file):
|
|||
imgs_lists = sorted(imgs_lists)
|
||||
return imgs_lists
|
||||
|
||||
def binarize_img(img):
|
||||
if len(img.shape) == 3 and img.shape[2] == 3:
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # conversion to grayscale image
|
||||
# use cv2 threshold binarization
|
||||
_, gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
|
||||
return img
|
||||
|
||||
def alpha_to_color(img, alpha_color=(255, 255, 255)):
|
||||
if len(img.shape) == 3 and img.shape[2] == 4:
|
||||
B, G, R, A = cv2.split(img)
|
||||
alpha = A / 255
|
||||
|
||||
R = (alpha_color[0] * (1 - alpha) + R * alpha).astype(np.uint8)
|
||||
G = (alpha_color[1] * (1 - alpha) + G * alpha).astype(np.uint8)
|
||||
B = (alpha_color[2] * (1 - alpha) + B * alpha).astype(np.uint8)
|
||||
|
||||
img = cv2.merge((B, G, R))
|
||||
return img
|
||||
|
||||
def check_and_read(img_path):
|
||||
if os.path.basename(img_path)[-3:] in ['gif', 'GIF']:
|
||||
if os.path.basename(img_path)[-3:].lower() == 'gif':
|
||||
gif = cv2.VideoCapture(img_path)
|
||||
ret, frame = gif.read()
|
||||
if not ret:
|
||||
|
@ -85,19 +102,19 @@ def check_and_read(img_path):
|
|||
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
|
||||
imgvalue = frame[:, :, ::-1]
|
||||
return imgvalue, True, False
|
||||
elif os.path.basename(img_path)[-3:] in ['pdf']:
|
||||
elif os.path.basename(img_path)[-3:].lower() == 'pdf':
|
||||
import fitz
|
||||
from PIL import Image
|
||||
imgs = []
|
||||
with fitz.open(img_path) as pdf:
|
||||
for pg in range(0, pdf.pageCount):
|
||||
for pg in range(0, pdf.page_count):
|
||||
page = pdf[pg]
|
||||
mat = fitz.Matrix(2, 2)
|
||||
pm = page.getPixmap(matrix=mat, alpha=False)
|
||||
pm = page.get_pixmap(matrix=mat, alpha=False)
|
||||
|
||||
# if width or height > 2000 pixels, don't enlarge the image
|
||||
if pm.width > 2000 or pm.height > 2000:
|
||||
pm = page.getPixmap(matrix=fitz.Matrix(1, 1), alpha=False)
|
||||
pm = page.get_pixmap(matrix=fitz.Matrix(1, 1), alpha=False)
|
||||
|
||||
img = Image.frombytes("RGB", [pm.width, pm.height], pm.samples)
|
||||
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
||||
|
|
|
@ -104,19 +104,6 @@ paddleocr --image_dir=ppstructure/recovery/UnrealText.pdf --type=structure --rec
|
|||
|
||||
通过OCR技术:
|
||||
|
||||
版面恢复分为2种方法,详细介绍请参考:[版面恢复教程](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/ppstructure/recovery/README_ch.md):
|
||||
|
||||
- PDF解析
|
||||
- OCR技术
|
||||
|
||||
通过PDF解析(只支持pdf格式的输入):
|
||||
|
||||
```bash
|
||||
paddleocr --image_dir=ppstructure/recovery/UnrealText.pdf --type=structure --recovery=true --use_pdf2docx_api=true
|
||||
```
|
||||
|
||||
通过OCR技术:
|
||||
|
||||
```bash
|
||||
# 中文测试图
|
||||
paddleocr --image_dir=ppstructure/docs/table/1.png --type=structure --recovery=true
|
||||
|
|
|
@ -311,7 +311,7 @@ Please refer to: [Key Information Extraction](../kie/README.md) .
|
|||
| save_pdf | Whether to convert docx to pdf when recovery| False |
|
||||
| structure_version | Structure version, optional PP-structure and PP-structurev2 | PP-structure |
|
||||
|
||||
Most of the parameters are consistent with the PaddleOCR whl package, see [whl package documentation](../../doc/doc_en/whl.md)
|
||||
Most of the parameters are consistent with the PaddleOCR whl package, see [whl package documentation](../../doc/doc_en/whl_en.md)
|
||||
|
||||
<a name="3"></a>
|
||||
## 3. Summary
|
||||
|
|
|
@ -89,7 +89,7 @@ Boxes of different colors in the image represent different categories.
|
|||
|
||||
The invoice and application form images have three categories: `request`, `answer` and `header`. The `question` and 'answer' can be used to extract the relationship.
|
||||
|
||||
For the ID card image, the mdoel can be directly identify the key information such as `name`, `gender`, `nationality`, so that the subsequent relationship extraction process is not required, and the key information extraction task can be completed using only on model.
|
||||
For the ID card image, the model can directly identify the key information such as `name`, `gender`, `nationality`, so that the subsequent relationship extraction process is not required, and the key information extraction task can be completed using only on model.
|
||||
|
||||
### 3.2 RE
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ from ppocr.utils.visual import draw_ser_results, draw_re_results
|
|||
from tools.infer.predict_system import TextSystem
|
||||
from ppstructure.layout.predict_layout import LayoutPredictor
|
||||
from ppstructure.table.predict_table import TableSystem, to_excel
|
||||
from ppstructure.utility import parse_args, draw_structure_result
|
||||
from ppstructure.utility import parse_args, draw_structure_result, cal_ocr_word_box
|
||||
|
||||
logger = get_logger()
|
||||
|
||||
|
@ -79,6 +79,8 @@ class StructureSystem(object):
|
|||
from ppstructure.kie.predict_kie_token_ser_re import SerRePredictor
|
||||
self.kie_predictor = SerRePredictor(args)
|
||||
|
||||
self.return_word_box = args.return_word_box
|
||||
|
||||
def __call__(self, img, return_ocr_result_in_table=False, img_idx=0):
|
||||
time_dict = {
|
||||
'image_orientation': 0,
|
||||
|
@ -156,17 +158,27 @@ class StructureSystem(object):
|
|||
]
|
||||
res = []
|
||||
for box, rec_res in zip(filter_boxes, filter_rec_res):
|
||||
rec_str, rec_conf = rec_res
|
||||
rec_str, rec_conf = rec_res[0], rec_res[1]
|
||||
for token in style_token:
|
||||
if token in rec_str:
|
||||
rec_str = rec_str.replace(token, '')
|
||||
if not self.recovery:
|
||||
box += [x1, y1]
|
||||
res.append({
|
||||
'text': rec_str,
|
||||
'confidence': float(rec_conf),
|
||||
'text_region': box.tolist()
|
||||
})
|
||||
if self.return_word_box:
|
||||
word_box_content_list, word_box_list = cal_ocr_word_box(rec_str, box, rec_res[2])
|
||||
res.append({
|
||||
'text': rec_str,
|
||||
'confidence': float(rec_conf),
|
||||
'text_region': box.tolist(),
|
||||
'text_word': word_box_content_list,
|
||||
'text_word_region': word_box_list
|
||||
})
|
||||
else:
|
||||
res.append({
|
||||
'text': rec_str,
|
||||
'confidence': float(rec_conf),
|
||||
'text_region': box.tolist()
|
||||
})
|
||||
res_list.append({
|
||||
'type': region['label'].lower(),
|
||||
'bbox': [x1, y1, x2, y2],
|
||||
|
@ -229,7 +241,9 @@ def main(args):
|
|||
|
||||
if args.recovery and args.use_pdf2docx_api and flag_pdf:
|
||||
from pdf2docx.converter import Converter
|
||||
docx_file = os.path.join(args.output, '{}.docx'.format(img_name))
|
||||
os.makedirs(args.output, exist_ok=True)
|
||||
docx_file = os.path.join(args.output,
|
||||
'{}_api.docx'.format(img_name))
|
||||
cv = Converter(image_file)
|
||||
cv.convert(docx_file)
|
||||
cv.close()
|
||||
|
|
|
@ -152,7 +152,7 @@ cd PaddleOCR/ppstructure
|
|||
# download model
|
||||
mkdir inference && cd inference
|
||||
# Download the detection model of the ultra-lightweight English PP-OCRv3 model and unzip it
|
||||
https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar && tar xf en_PP-OCRv3_det_infer.tar
|
||||
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar && tar xf en_PP-OCRv3_det_infer.tar
|
||||
# Download the recognition model of the ultra-lightweight English PP-OCRv3 model and unzip it
|
||||
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_infer.tar && tar xf en_PP-OCRv3_rec_infer.tar
|
||||
# Download the ultra-lightweight English table inch model and unzip it
|
||||
|
|
|
@ -73,7 +73,7 @@ def convert_info_docx(img, res, save_folder, img_name):
|
|||
text_run.font.size = shared.Pt(10)
|
||||
|
||||
# save to docx
|
||||
docx_path = os.path.join(save_folder, '{}.docx'.format(img_name))
|
||||
docx_path = os.path.join(save_folder, '{}_ocr.docx'.format(img_name))
|
||||
doc.save(docx_path)
|
||||
logger.info('docx save to {}'.format(docx_path))
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue