[Docs] Improve docs style (#474)

* new theme

* add theme

* update zh_cn

* improve docs style

* use customized

* fix

* update req

* docs

* Update docs

* update conf

* update

* update layout

* disable logo url

* free version limit

* update conf

* Fix api ref

* fix version
pull/486/head
Tong Gao 2021-09-08 11:40:51 +08:00 committed by GitHub
parent 76c9570b39
commit e23b765956
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 149 additions and 31 deletions

View File

@ -1,3 +1,6 @@
.wy-nav-content {
max-width: none;
.header-logo {
background-image: url("../images/mmocr.png");
background-size: 110px 40px;
height: 40px;
width: 110px;
}

BIN
docs/_static/images/mmocr.png vendored 100755

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View File

@ -1,6 +1,3 @@
API Reference
=============
mmocr.apis
-------------
.. automodule:: mmocr.apis

View File

@ -15,6 +15,8 @@ import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
@ -36,15 +38,16 @@ release = __version__
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx_markdown_tables',
'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode',
'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser'
]
autodoc_mock_imports = ['mmcv._ext']
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@ -69,7 +72,97 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
# 'logo_url': 'https://mmocr.readthedocs.io/en/latest/',
'menu': [
{
'name':
'Tutorial',
'url':
'https://colab.research.google.com/github/'
'open-mmlab/mmocr/blob/main/demo/MMOCR_Tutorial.ipynb'
},
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmocr'
},
{
'name':
'Upstream',
'children': [
{
'name': 'MMCV',
'url': 'https://github.com/open-mmlab/mmcv',
'description': 'Foundational library for computer vision'
},
{
'name': 'MMDetection',
'url': 'https://github.com/open-mmlab/mmdetection',
'description': 'Object detection toolbox and benchmark'
},
]
},
{
'name':
'Projects',
'children': [
{
'name': 'MMAction2',
'url': 'https://github.com/open-mmlab/mmaction2',
},
{
'name': 'MMClassification',
'url': 'https://github.com/open-mmlab/mmclassification',
},
{
'name': 'MMSegmentation',
'url': 'https://github.com/open-mmlab/mmsegmentation',
},
{
'name': 'MMDetection3D',
'url': 'https://github.com/open-mmlab/mmdetection3d',
},
{
'name': 'MMEditing',
'url': 'https://github.com/open-mmlab/mmediting',
},
{
'name': 'MMDetection3D',
'url': 'https://github.com/open-mmlab/mmdetection3d',
},
{
'name': 'MMPose',
'url': 'https://github.com/open-mmlab/mmpose',
},
{
'name': 'MMTracking',
'url': 'https://github.com/open-mmlab/mmtracking',
},
{
'name': 'MMGeneration',
'url': 'https://github.com/open-mmlab/mmgeneration',
},
]
},
{
'name':
'OpenMMLab',
'children': [
{
'name': 'Homepage',
'url': 'https://openmmlab.com/'
},
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/'
},
]
},
]
}
language = 'en'
@ -81,6 +174,9 @@ master_doc = 'index'
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# Enable ::: for my_st
myst_enable_extensions = ['colon_fence']
def builder_inited_handler(app):
subprocess.run(['./merge_docs.sh'])

View File

@ -76,7 +76,9 @@ test=dict(
```
You can check the content of the annotation file in `tests/data/toy_dataset/instances_test.json`.
Note: Icdar 2015/2017 and ctw1500 annotations need to be converted into the COCO format following the steps in [datasets.md](datasets.md).
:::{note}
Icdar 2015/2017 and ctw1500 annotations need to be converted into the COCO format following the steps in [datasets.md](datasets.md).
:::
## Text Recognition Task

View File

@ -46,9 +46,11 @@ The structure of the text detection dataset directory is organized as follows.
## Important Note
**Note: For users who want to train models on CTW1500, ICDAR 2015/2017, and Totaltext dataset,** there might be some images containing orientation info in EXIF data. The default OpenCV
:::{note}
**For users who want to train models on CTW1500, ICDAR 2015/2017, and Totaltext dataset,** there might be some images containing orientation info in EXIF data. The default OpenCV
backend used in MMCV would read them and apply the rotation on the images. However, their gold annotations are made on the raw pixels, and such
inconsistency results in false examples in the training set. Therefore, users should use `dict(type='LoadImageFromFile', color_type='color_ignore_orientation')` in pipelines to change MMCV's default loading behaviour. (see [DBNet's config](https://github.com/open-mmlab/mmocr/blob/main/configs/textdet/dbnet/dbnet_r18_fpnc_1200e_icdar2015.py) for example)
:::
## Preparation Steps
### ICDAR 2015

View File

@ -181,7 +181,7 @@ cd /path/to/mmocr/data/mixture
ln -s /path/to/SynthAdd SynthAdd
```
**Note:**
:::{note}
To convert label file with `txt` format to `lmdb` format,
```bash
python tools/data/utils/txt2lmdb.py -i <txt_label_path> -o <lmdb_label_path>
@ -190,6 +190,7 @@ For example,
```bash
python tools/data/utils/txt2lmdb.py -i data/mixture/Syn90k/label.txt -o data/mixture/Syn90k/label.lmdb
```
:::
### TextOCR
- Step1: Download [train_val_images.zip](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip), [TextOCR_0.1_train.json](https://dl.fbaipublicfiles.com/textvqa/data/textocr/TextOCR_0.1_train.json) and [TextOCR_0.1_val.json](https://dl.fbaipublicfiles.com/textvqa/data/textocr/TextOCR_0.1_val.json) to `textocr/`.

View File

@ -37,7 +37,9 @@ Description of arguments:
| `--show`| bool | Determines whether to visualize outputs of ONNXRuntime and PyTorch. Defaults to `False`. |
| `--dynamic-export`| bool | Determines whether to export ONNX model with dynamic input and output shapes. Defaults to `False`. |
**Note**: This tool is still experimental. For now, some customized operators are not supported, and we only support a subset of detection and recognition algorithms.
:::{note}
This tool is still experimental. For now, some customized operators are not supported, and we only support a subset of detection and recognition algorithms.
:::
### List of supported models exportable to ONNX
@ -52,11 +54,11 @@ The table below lists the models that are guaranteed to be exportable to ONNX an
| PANet | [panet_r18_fpem_ffm_600e_icdar2015.py](https://github.com/open-mmlab/mmocr/blob/main/configs/textdet/panet/panet_r18_fpem_ffm_600e_icdar2015.py) | Y | Y | |
| CRNN | [crnn_academic_dataset.py](https://github.com/open-mmlab/mmocr/blob/main/configs/textrecog/crnn/crnn_academic_dataset.py) | Y | Y | CRNN only accepts input with height 32 |
**Notes**:
:::{note}
- *All models above are tested with Pytorch==1.8.1 and onnxruntime==1.7.0*
- If you meet any problem with the listed models above, please create an issue and it would be taken care of soon.
- Because this feature is experimental and may change fast, please always try with the latest `mmcv` and `mmocr`.
:::
## Convert ONNX to TensorRT (experimental)
@ -96,7 +98,9 @@ Description of arguments:
| `--show`| bool | Determines whether to show the output of ONNX and TensorRT. Defaults to `False`. |
| `--verbose`| bool | Determines whether to verbose logging messages while creating TensorRT engine. Defaults to `False`. |
**Note**: This tool is still experimental. For now, some customized operators are not supported, and we only support a subset of detection and recognition algorithms.
:::{note}
This tool is still experimental. For now, some customized operators are not supported, and we only support a subset of detection and recognition algorithms.
:::
### List of supported models exportable to TensorRT
@ -111,11 +115,11 @@ The table below lists the models that are guaranteed to be exportable to TensorR
| PANet | [panet_r18_fpem_ffm_600e_icdar2015.py](https://github.com/open-mmlab/mmocr/blob/main/configs/textdet/panet/panet_r18_fpem_ffm_600e_icdar2015.py) | Y | Y | |
| CRNN | [crnn_academic_dataset.py](https://github.com/open-mmlab/mmocr/blob/main/configs/textrecog/crnn/crnn_academic_dataset.py) | Y | Y | CRNN only accepts input with height 32 |
**Notes**:
:::{note}
- *All models above are tested with Pytorch==1.8.1, onnxruntime==1.7.0 and tensorrt==7.2.1.6*
- If you meet any problem with the listed models above, please create an issue and it would be taken care of soon.
- Because this feature is experimental and may change fast, please always try with the latest `mmcv` and `mmocr`.
:::
## Evaluate ONNX and TensorRT Models (experimental)
@ -298,8 +302,9 @@ python tools/deploy_test.py \
</tbody>
</table>
**Notes**:
:::{note}
- TensorRT upsampling operation is a little different from PyTorch. For DBNet and PANet, we suggest replacing upsampling operations with the nearest mode to operations with bilinear mode. [Here](https://github.com/open-mmlab/mmocr/blob/50a25e718a028c8b9d96f497e241767dbe9617d1/mmocr/models/textdet/necks/fpem_ffm.py#L33) for PANet, [here](https://github.com/open-mmlab/mmocr/blob/50a25e718a028c8b9d96f497e241767dbe9617d1/mmocr/models/textdet/necks/fpn_cat.py#L111) and [here](https://github.com/open-mmlab/mmocr/blob/50a25e718a028c8b9d96f497e241767dbe9617d1/mmocr/models/textdet/necks/fpn_cat.py#L121) for DBNet. As is shown in the above table, networks with tag * mean the upsampling mode is changed.
- Note that changing upsampling mode reduces less performance compared with using the nearest mode. However, the weights of networks are trained through the nearest mode. To pursue the best performance, using bilinear mode for both training and TensorRT deployment is recommended.
- All ONNX and TensorRT models are evaluated with dynamic shapes on the datasets, and images are preprocessed according to the original config file.
- This tool is still experimental, and we only support a subset of detection and recognition algorithms for now.
:::

View File

@ -38,8 +38,11 @@ b. Install PyTorch and torchvision following the [official instructions](https:/
```shell
conda install pytorch==1.6.0 torchvision==0.7.0 cudatoolkit=10.1 -c pytorch
```
Note: Make sure that your compilation CUDA version and runtime CUDA version match.
:::{note}
Make sure that your compilation CUDA version and runtime CUDA version match.
You can check the supported CUDA version for precompiled packages on the [PyTorch website](https://pytorch.org/).
:::
c. Install mmcv, we recommend you to install the pre-build mmcv as below.

View File

@ -93,4 +93,6 @@ data = dict(
```
will test the model with 16 images in a batch.
**Warning:** Batch testing may incur performance decrease of the model due to the different behavior of the data preprocessing pipeline.
:::{warning}
Batch testing may incur performance decrease of the model due to the different behavior of the data preprocessing pipeline.
:::

View File

@ -15,6 +15,8 @@ import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
@ -56,8 +58,6 @@ source_suffix = {
'.md': 'markdown',
}
language = 'zh_CN'
# The master toctree document.
master_doc = 'index'
@ -71,14 +71,19 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
language = 'en'
master_doc = 'index'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# html_static_path = ['_static']
# html_css_files = ['css/readthedocs.css']
def builder_inited_handler(app):

View File

@ -1,4 +1,6 @@
recommonmark
sphinx==v2.4.4
docutils==0.16.0
myst-parser
-e git+https://github.com/gaotongxiao/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
sphinx==4.0.2
sphinx_copybutton
sphinx_markdown_tables
sphinx_rtd_theme

View File

@ -20,7 +20,7 @@ line_length = 79
multi_line_output = 0
known_standard_library = setuptools
known_first_party = mmocr
known_third_party = PIL,cv2,imgaug,lanms,lmdb,matplotlib,mmcv,mmdet,numpy,packaging,pyclipper,pytest,rapidfuzz,scipy,shapely,skimage,titlecase,torch,torchvision,yaml
known_third_party = PIL,cv2,imgaug,lanms,lmdb,matplotlib,mmcv,mmdet,numpy,packaging,pyclipper,pytest,pytorch_sphinx_theme,rapidfuzz,scipy,shapely,skimage,titlecase,torch,torchvision,yaml
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY