From b031934129a6d90ebd26c3f14fe6357da1420597 Mon Sep 17 00:00:00 2001 From: quincylin1 Date: Sat, 3 Apr 2021 00:41:23 +0800 Subject: [PATCH] documentation and dbnet related code --- docs/Makefile | 20 ++ docs/api.rst | 15 + docs/changelog.md | 1 + docs/code_of_conduct.md | 76 +++++ docs/conf.py | 83 +++++ docs/contributing.md | 134 ++++++++ docs/datasets.md | 182 ++++++++++ docs/getting_started.md | 319 ++++++++++++++++++ docs/index.rst | 38 +++ docs/install.md | 231 +++++++++++++ docs/make.bat | 36 ++ docs/merge_docs.sh | 10 + docs/requirements.txt | 4 + docs/res/git-workflow-feature.png | Bin 0 -> 24491 bytes docs/res/git-workflow-master-develop.png | Bin 0 -> 17594 bytes docs/stats.py | 94 ++++++ docs/technical_details.md | 226 +++++++++++++ mmocr/datasets/pipelines/dbnet_transforms.py | 272 +++++++++++++++ .../textdet_targets/dbnet_targets.py | 238 +++++++++++++ mmocr/models/textdet/dense_heads/db_head.py | 86 +++++ mmocr/models/textdet/detectors/dbnet.py | 23 ++ mmocr/models/textdet/losses/db_loss.py | 169 ++++++++++ mmocr_gitlab/mmocr | 1 + 23 files changed, 2258 insertions(+) create mode 100644 docs/Makefile create mode 100644 docs/api.rst create mode 100644 docs/changelog.md create mode 100644 docs/code_of_conduct.md create mode 100644 docs/conf.py create mode 100644 docs/contributing.md create mode 100644 docs/datasets.md create mode 100644 docs/getting_started.md create mode 100644 docs/index.rst create mode 100644 docs/install.md create mode 100644 docs/make.bat create mode 100755 docs/merge_docs.sh create mode 100644 docs/requirements.txt create mode 100644 docs/res/git-workflow-feature.png create mode 100644 docs/res/git-workflow-master-develop.png create mode 100755 docs/stats.py create mode 100644 docs/technical_details.md create mode 100644 mmocr/datasets/pipelines/dbnet_transforms.py create mode 100644 mmocr/datasets/pipelines/textdet_targets/dbnet_targets.py create mode 100644 mmocr/models/textdet/dense_heads/db_head.py create mode 100644 mmocr/models/textdet/detectors/dbnet.py create mode 100644 mmocr/models/textdet/losses/db_loss.py create mode 160000 mmocr_gitlab/mmocr diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..d4bb2cbb --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/api.rst b/docs/api.rst new file mode 100644 index 00000000..a23ab961 --- /dev/null +++ b/docs/api.rst @@ -0,0 +1,15 @@ +API Reference +============= + +mmocr.apis +------------- +.. automodule:: mmocr.apis + :members: + +mmocr.core +------------- + +evaluation +^^^^^^^^^^ +.. automodule:: mmocr.core.evaluation + :members: diff --git a/docs/changelog.md b/docs/changelog.md new file mode 100644 index 00000000..8a802039 --- /dev/null +++ b/docs/changelog.md @@ -0,0 +1 @@ +## Changelog diff --git a/docs/code_of_conduct.md b/docs/code_of_conduct.md new file mode 100644 index 00000000..efd43057 --- /dev/null +++ b/docs/code_of_conduct.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at chenkaidev@gmail.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 00000000..79beb055 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,83 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + +import os +import subprocess +import sys + +sys.path.insert(0, os.path.abspath('..')) + +# -- Project information ----------------------------------------------------- + +project = 'MMOCR' +copyright = '2020-2030, OpenMMLab' +author = 'OpenMMLab' + +# The full version, including alpha/beta/rc tags +release = '0.1.0' + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'recommonmark', + 'sphinx_markdown_tables', +] + +autodoc_mock_imports = ['torch', 'torchvision', 'mmcv', 'mmocr.version'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# The master toctree document. +master_doc = 'index' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +master_doc = 'index' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + + +def builder_inited_handler(app): + subprocess.run(['./merge_docs.sh']) + subprocess.run(['./stats.py']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 00000000..6313d4b5 --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,134 @@ +# Contributing to mmocr + +All kinds of contributions are welcome, including but not limited to the following. + +- Fixes (typo, bugs) +- New features and components + +## Workflow + +This document describes the fork & merge request workflow that should be used when contributing to **MMOCR**. + +The official public [repository](https://github.com/open-mmlab/mmocr) holds two branches with an infinite lifetime only: ++ master ++ develop + +The *master* branch is the main branch where the source code of **HEAD** always reflects a *production-ready state*. + +The *develop* branch is the branch where the source code of **HEAD** always reflects a state with the latest development changes for the next release. + +Feature branches are used to develop new features for the upcoming or a distant future release. + +![](res/git-workflow-master-develop.png) + +All new developers to **MMOCR** need to follow the following steps: + +### Step 1: creating a Fork + +1. Fork the repo on GitHub or GitLab to your personal account. Click the `Fork` button on the [project page](https://github.com/open-mmlab/mmocr). + +2. Clone your new forked repo to your computer. +``` +git clone https://github.com//mmocr.git +``` +3. Add the official repo as an upstream: +``` +git remote add upstream https://github.com/open-mmlab/mmocr.git +``` + +### Step 2: develop a new feature + +#### Step 2.1: keeping your fork up to date + +Whenever you want to update your fork with the latest upstream changes, you need to fetch the upstream repo's branches and latest commits to bring them into your repository: + +``` +# Fetch from upstream remote +git fetch upstream + +# Update your master branch +git checkout master +git rebase upstream/master +git push origin master + +# Update your develop branch +git checkout develop +git rebase upsteam/develop +git push origin develop +``` + +#### Step 2.2: creating a feature branch + +``` +git checkout -b develop +``` +Till now, your fork has three branches as follows: + +![](res/git-workflow-feature.png) + +#### Step 2.3: develop and test + +Develop your new feature and test it to make sure it works well. + +Pls run +``` +pre-commit run --all-files +pytest tests +``` +and fix all failures before every git commit. + +#### Step 2.4: prepare to PR + + +##### Merge official repo updates to your fork + +``` +# fetch from upstream remote. i.e., the official repo +git fetch upstream + +# update the develop branch of your fork +git checkout develop +git rebase upsteam/develop +git push origin develop + +# update the branch +git checkout +git rebase develop +# solve conflicts if any and Test +``` + +##### Push branch to your remote forked repo, +``` +git checkout +git push origin +``` +#### Step 2.5: send PR + +Go to the page for your fork on GitHub, select your new feature branch, and click the pull request button to integrate your feature branch into the upstream remote’s develop branch. + +#### Step 2.6: review code + + +#### Step 2.7: revise (optional) +If PR is not accepted, pls follow Step 2.1, 2.3, 2.4 and 2.5 till your PR is accepted. + +#### Step 2.8: del branch if your PR is accepted. +``` +git branch -d +git push origin : +``` + +## Code style + +### Python +We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style. + +We use the following tools for linting and formatting: +- [flake8](http://flake8.pycqa.org/en/latest/): linter +- [yapf](https://github.com/google/yapf): formatter +- [isort](https://github.com/timothycrosley/isort): sort imports + +>Before you create a PR, make sure that your code lints and is formatted by yapf. + +### C++ and CUDA +We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). diff --git a/docs/datasets.md b/docs/datasets.md new file mode 100644 index 00000000..7c13f24d --- /dev/null +++ b/docs/datasets.md @@ -0,0 +1,182 @@ +# Datasets Preparation +This page lists the datasets which are commonly used in text detection, text recognition and key information extraction, and their download links. + +## Text Detection +**The structure of the text detection dataset directory is organized as follows.** +``` +├── ctw1500 +│   ├── imgs +│   ├── instances_test.json +│   └── instances_training.json +├── icdar2015 +│   ├── imgs +│   ├── instances_test.json +│   └── instances_training.json +├── icdar2017 +│   ├── imgs +│   ├── instances_training.json +│   └── instances_val.json +├── synthtext +│   ├── imgs +│   ├── instances_training.json +│   ├── instances_training.txt +│   └── instances_training.lmdb +``` +| Dataset | | Images | | | Annotation Files | | | Note | | +|:---------:|:-:|:--------------------------:|:-:|:--------------------------------------------:|:---------------------------------------:|:----------------------------------------:|:-:|:----:|---| +| | | | | training | validation | testing | | | | +| CTW1500 | | [link](https://github.com/Yuliang-Liu/Curve-Text-Detector) | | [instances_training.json](https://download.openmmlab.com/mmocr/data/ctw1500/instances_training.json) | - | [instances_test.json](https://download.openmmlab.com/mmocr/data/ctw1500/instances_test.json) | | | | +| ICDAR2015 | | [link](https://rrc.cvc.uab.es/?ch=4&com=downloads) | | [instances_training.json](https://download.openmmlab.com/mmocr/data/icdar2015/instances_training.json) | - | [instances_test.json](https://download.openmmlab.com/mmocr/data/icdar2015/instances_test.json) | | | | +| ICDAR2017 | | [link](https://rrc.cvc.uab.es/?ch=8&com=downloads) | | [instances_training.json](https://download.openmmlab.com/mmocr/data/icdar2017/instances_training.json) | [instances_val.json](https://openmmlab) | [instances_test.json](https://download.openmmlab.com/mmocr/data/icdar2017/instances_test.json) | | | | +| Synthtext | | [link](https://www.robots.ox.ac.uk/~vgg/data/scenetext/) | | [instances_training.json](https://download.openmmlab.com/mmocr/data/synthtext/instances_training.json) [instances_training.txt](https://download.openmmlab.com/mmocr/data/synthtext/instances_training.txt)|-| | | | + +- For `icdar2015`: + - Step1: Download `ch4_training_images.zip` and `ch4_test_images.zip` from this [link](https://rrc.cvc.uab.es/?ch=4&com=downloads) + - Step2: Download [instances_training.json](https://download.openmmlab.com/mmocr/data/icdar2015/instances_training.json) and [instances_test.json](https://download.openmmlab.com/mmocr/data/icdar2015/instances_test.json) + - Step3: + ```bash + mkdir icdar2015 && cd icdar2015 + mv /path/to/instances_training.json . + mv /path/to/instances_test.json . + + mkdir imgs && cd imgs + ln -s /path/to/ch4_training_images training + ln -s /path/to/ch4_test_images test + ``` + +## Text Recognition +**The structure of the text recognition dataset directory is organized as follows.** + +``` +├── mixture +│   ├── coco_text +│ │ ├── train_label.txt +│ │ ├── train_words +│   ├── icdar_2011 +│ │ ├── training_label.txt +│ │ ├── Challenge1_Training_Task3_Images_GT +│   ├── icdar_2013 +│ │ ├── train_label.txt +│ │ ├── test_label_1015.txt +│ │ ├── test_label_1095.txt +│ │ ├── Challenge2_Training_Task3_Images_GT +│ │ ├── Challenge2_Test_Task3_Images +│   ├── icdar_2015 +│ │ ├── train_label.txt +│ │ ├── test_label.txt +│ │ ├── ch4_training_word_images_gt +│ │ ├── ch4_test_word_images_gt +│   ├── III5K +│ │ ├── train_label.txt +│ │ ├── test_label.txt +│ │ ├── train +│ │ ├── test +│   ├── ct80 +│ │ ├── test_label.txt +│ │ ├── image +│   ├── svt +│ │ ├── test_label.txt +│ │ ├── image +│   ├── svtp +│ │ ├── test_label.txt +│ │ ├── image +│   ├── Synth90k +│ │ ├── shuffle_labels.txt +│ │ ├── mnt +│   ├── SynthText +│ │ ├── shuffle_labels.txt +│ │ ├── instances_train.txt +│ │ ├── synthtext +│   ├── SynthAdd +│ │ ├── label.txt +│ │ ├── SynthText_Add + +``` +| Dataset | | images | annotation file | annotation file | Note | +|:----------:|:-:|:---------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------:|:----:| +|| | |training | test | | +| coco_text ||[link](https://rrc.cvc.uab.es/?ch=5&com=downloads) |[train_label.txt](https://download.openmmlab.com/mmocr/data/mixture/coco_text/train_label.txt) |- | | +| icdar_2011 ||[link](http://www.cvc.uab.es/icdar2011competition/?com=downloads) |[train_label.txt](https://download.openmmlab.com/mmocr/data/mixture/icdar_2015/train_label.txt) |- | | +| icdar_2013 | | [link](https://rrc.cvc.uab.es/?ch=2&com=downloads) | [train_label.txt](https://download.openmmlab.com/mmocr/data/mixture/icdar_2013/train_label.txt) | [test_label_1015.txt](https://download.openmmlab.com/mmocr/data/mixture/icdar_2013/test_label_1015.txt) | | +| icdar_2015 | | [link](https://rrc.cvc.uab.es/?ch=4&com=downloads) | [train_label.txt](https://download.openmmlab.com/mmocr/data/mixture/icdar_2015/train_label.txt) | [test_label.txt](https://download.openmmlab.com/mmocr/data/mixture/icdar_2015/test_label.txt) | | +| IIIT5K | | [link](http://cvit.iiit.ac.in/projects/SceneTextUnderstanding/IIIT5K.html) | [train_label.txt](https://download.openmmlab.com/mmocr/data/mixture/IIIT5K/train_label.txt) | [test_label.txt](https://download.openmmlab.com/mmocr/data/mixture/IIIT5K/test_label.txt) | | +| ct80 | | - |-|[test_label.txt](https://download.openmmlab.com/mmocr/data/mixture/ct80/test_label.txt)|| +| svt | | [link](http://www.iapr-tc11.org/mediawiki/index.php/The_Street_View_Text_Dataset) | - | [test_label.txt](https://download.openmmlab.com/mmocr/data/mixture/svt/test_label.txt) | | +| svtp | | - | - | [test_label.txt](https://download.openmmlab.com/mmocr/data/mixture/svtp/test_label.txt) | | +| Synth90k | | [link](https://www.robots.ox.ac.uk/~vgg/data/text/) | [shuffle_labels.txt](https://download.openmmlab.com/mmocr/data/mixture/Synth90k/shuffle_labels.txt) | - | | +| SynthText | | [link](https://www.robots.ox.ac.uk/~vgg/data/scenetext/) | [shuffle_labels.txt](https://download.openmmlab.com/mmocr/data/mixture/SynthText/shuffle_labels.txt) | [instances_train.txt](https://download.openmmlab.com/mmocr/data/mixture/SynthText/instances_train.txt) | - | | +| SynthAdd | | [link](https://download.openmmlab.com/mmocr/data/mixture/SynthAdd/SynthText_Add.zip) | [label.txt](https://download.openmmlab.com/mmocr/data/mixture/SynthAdd/label.txt)|- | | + +- For `icdar_2013`: + - Step1: Download `Challenge2_Test_Task3_Images.zip` and `Challenge2_Training_Task3_Images_GT.zip` from this [link](https://rrc.cvc.uab.es/?ch=2&com=downloads) + - Step2: Download [test_label_1015.txt](https://download.openmmlab.com/mmocr/data/mixture/icdar_2013/test_label_1015.txt) and [train_label.txt](https://download.openmmlab.com/mmocr/data/mixture/icdar_2013/train_label.txt) +- For `icdar_2015`: + - Step1: Download `ch4_training_word_images_gt.zip` and `ch4_test_word_images_gt.zip` from this [link](https://rrc.cvc.uab.es/?ch=4&com=downloads) + - Step2: Download [train_label.txt](https://download.openmmlab.com/mmocr/data/mixture/icdar_2015/train_label.txt) and [test_label.txt](https://download.openmmlab.com/mmocr/data/mixture/icdar_2015/test_label.txt) +- For `IIIT5K`: + - Step1: Download `IIIT5K-Word_V3.0.tar.gz` from this [link](http://cvit.iiit.ac.in/projects/SceneTextUnderstanding/IIIT5K.html) + - Step2: Download [train_label.txt](https://download.openmmlab.com/mmocr/data/mixture/IIIT5K/train_label.txt) and [test_label.txt](https://download.openmmlab.com/mmocr/data/mixture/IIIT5K/test_label.txt) +- For `svt`: + - Step1: Download `svt.zip` form this [link](http://www.iapr-tc11.org/mediawiki/index.php/The_Street_View_Text_Dataset) + - Step2: Download [test_label.txt](https://download.openmmlab.com/mmocr/data/mixture/svt/test_label.txt) +- For `ct80`: + - Step1: Download [test_label.txt](https://download.openmmlab.com/mmocr/data/mixture/ct80/test_label.txt) +- For `svtp`: + - Step1: Download [test_label.txt](https://download.openmmlab.com/mmocr/data/mixture/svtp/test_label.txt) +- For `coco_text`: + - Step1: Download from this [link](https://rrc.cvc.uab.es/?ch=5&com=downloads) + - Step2: Download [train_label.txt](https://download.openmmlab.com/mmocr/data/mixture/coco_text/train_label.txt) + +- For `Syn90k`: + - Step1: Download `mjsynth.tar.gz` from this [link](https://www.robots.ox.ac.uk/~vgg/data/text/) + - Step2: Download [shuffle_labels.txt](https://download.openmmlab.com/mmocr/data/mixture/Synth90k/shuffle_labels.txt) + - Step3: + ```bash + mkdir Syn90k && cd Syn90k + + mv /path/to/mjsynth.tar.gz . + + tar -xzf mjsynth.tar.gz + + mv /path/to/shuffle_labels.txt . + + # create soft link + cd /path/to/mmocr/data/mixture + + ln -s /path/to/Syn90k Syn90k + ``` +- For `SynthText`: + - Step1: Download `SynthText.zip` from this [link](https://www.robots.ox.ac.uk/~vgg/data/scenetext/) + - Step2: Download [shuffle_labels.txt](https://download.openmmlab.com/mmocr/data/mixture/SynthText/shuffle_labels.txt) + - Step3: Download [instances_train.txt](https://download.openmmlab.com/mmocr/data/mixture/SynthText/instances_train.txt) + - Step4: + ```bash + unzip SynthText.zip + + cd SynthText + + mv /path/to/shuffle_labels.txt . + + # create soft link + cd /path/to/mmocr/data/mixture + + ln -s /path/to/SynthText SynthText + ``` +- For `SynthAdd`: + - Step1: Download `SynthText_Add.zip` from this [link](https://download.openmmlab.com/mmocr/data/mixture/SynthAdd/SynthText_Add.zip) + - Step2: Download [label.txt](https://download.openmmlab.com/mmocr/data/mixture/SynthAdd/label.txt) + - Step3: + ```bash + mkdir SynthAdd && cd SynthAdd + + mv /path/to/SynthText_Add.zip . + + unzip SynthText_Add.zip + + mv /path/to/label.txt . + + # create soft link + cd /path/to/mmocr/data/mixture + + ln -s /path/to/SynthAdd SynthAdd + ``` diff --git a/docs/getting_started.md b/docs/getting_started.md new file mode 100644 index 00000000..81d4febf --- /dev/null +++ b/docs/getting_started.md @@ -0,0 +1,319 @@ +# Getting Started + +This page provides basic tutorials on the usage of MMOCR. +For the installation instructions, please see [INSTALL.md](INSTALL.md). + +## Inference with Pretrained Models + +We provide testing scripts to evaluate a full dataset, as well as some task-specific image demos. + +### Test a Single Image + +You can use the following command to test a single image with one GPU. + +```shell +python demo/image_demo.py ${TEST_IMG} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${SAVE_PATH} [--imshow] [--device ${GPU_ID}] +``` + +If `--imshow` is specified, the demo will also show the image with OpenCV. For example: + +```shell +python demo/image_demo.py demo/demo_text_det.jpg configs/xxx.py xxx.pth demo/demo_text_det_pred.jpg +``` + +The predicted result will be saved as `demo/demo_text_det_pred.jpg`. + +### Test Multiple Images + +```shell +# for text detection +sh tools/test_imgs.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${IMG_ROOT_PATH} ${IMG_LIST} ${RESULTS_DIR} + +# for text recognition +sh tools/ocr_test_imgs.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${IMG_ROOT_PATH} ${IMG_LIST} ${RESULTS_DIR} +``` +It will save both the prediction results and visualized images to `${RESULTS_DIR}` + +### Test a Dataset + +MMOCR implements **distributed** testing with `MMDistributedDataParallel`. (Please refer to [datasets.md](datasets.md) to prepare your datasets) + +#### Test with Single/Multiple GPUs + +You can use the following command to test a dataset with single/multiple GPUs. + +```shell +./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [--eval ${EVAL_METRIC}] +``` +For example, + +```shell +./tools/dist_test.sh configs/example_config.py work_dirs/example_exp/example_model_20200202.pth 1 --eval hmean-iou +``` +##### Optional Arguments + +- `--eval`: Specify the evaluation metric. For text detection, the metric should be either 'hmean-ic13' or 'hmean-iou'. For text recognition, the metric should be 'acc'. + +#### Test with Slurm + +If you run MMOCR on a cluster managed with [Slurm](https://slurm.schedmd.com/), you can use the script `slurm_test.sh`. + +```shell +[GPUS=${GPUS}] ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--eval ${EVAL_METRIC}] +``` +Here is an example of using 8 GPUs to test an example model on the 'dev' partition with job name 'test_job'. + +```shell +GPUS=8 ./tools/slurm_test.sh dev test_job configs/example_config.py work_dirs/example_exp/example_model_20200202.pth --eval hmean-iou +``` + +You can check [slurm_test.sh](https://github.com/open-mmlab/mmocr/blob/master/tools/slurm_test.sh) for full arguments and environment variables. + + +##### Optional Arguments + +- `--eval`: Specify the evaluation metric. For text detection, the metric should be either 'hmean-ic13' or 'hmean-iou'. For text recognition, the metric should be 'acc'. + + +## Train a Model + +MMOCR implements **distributed** training with `MMDistributedDataParallel`. (Please refer to [datasets.md](datasets.md) to prepare your datasets) + +All outputs (log files and checkpoints) will be saved to a working directory specified by `work_dir` in the config file. + +By default, we evaluate the model on the validation set after several iterations. You can change the evaluation interval by adding the interval argument in the training config as follows: +```python +evaluation = dict(interval=1, by_epoch=True) # This evaluates the model per epoch. +``` + + +### Train with Single/Multiple GPUs + +```shell +./tools/dist_train.sh ${CONFIG_FILE} ${WORK_DIR} ${GPU_NUM} [optional arguments] +``` + +Optional Arguments: + +- `--no-validate` (**not suggested**): By default, the codebase will perform evaluation at every k-th iteration during training. To disable this behavior, use `--no-validate`. + +#### Train with Toy Dataset. +We provide a toy dataset under `tests/data`, and you can train a toy model directly, before the academic dataset is prepared. + +For example, train a text recognition task with `seg` method and toy dataset, +``` +./tools/dist_train.sh configs/textrecog/seg/seg_r31_1by16_fpnocr_toy_dataset.py work_dirs/seg 1 +``` + +And train a text recognition task with `sar` method and toy dataset, +``` +./tools/dist_train.sh configs/textrecog/sar/sar_r31_parallel_decoder_toy_dataset.py work_dirs/sar 1 +``` + +### Train with Slurm + +If you run MMOCR on a cluster managed with [Slurm](https://slurm.schedmd.com/), you can use the script `slurm_train.sh`. + +```shell +[GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} +``` + +Here is an example of using 8 GPUs to train a text detection model on the dev partition. + +```shell +GPUS=8 ./tools/slurm_train.sh dev psenet-ic15 configs/textdet/psenet/psenet_r50_fpnf_sbn_1x_icdar2015.py /nfs/xxxx/psenet-ic15 +``` + +You can check [slurm_train.sh](https://github.com/open-mmlab/mmocr/blob/master/tools/slurm_train.sh) for full arguments and environment variables. + +### Launch Multiple Jobs on a Single Machine + +If you launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs, +you need to specify different ports (29500 by default) for each job to avoid communication conflicts. + +If you use `dist_train.sh` to launch training jobs, you can set the ports in the command shell. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +If you launch training jobs with Slurm, you need to modify the config files to set different communication ports. + +In `config1.py`, +```python +dist_params = dict(backend='nccl', port=29500) +``` + +In `config2.py`, +```python +dist_params = dict(backend='nccl', port=29501) +``` + +Then you can launch two jobs with `config1.py` ang `config2.py`. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} +CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} +``` + + +## Useful Tools + +We provide numerous useful tools under `mmocr/tools` directory. + +### Publish a Model + +Before you upload a model to AWS, you may want to +(1) convert the model weights to CPU tensors, (2) delete the optimizer states and +(3) compute the hash of the checkpoint file and append the hash id to the filename. + +```shell +python tools/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} +``` + +E.g., + +```shell +python tools/publish_model.py work_dirs/psenet/latest.pth psenet_r50_fpnf_sbn_1x_20190801.pth +``` + +The final output filename will be `psenet_r50_fpnf_sbn_1x_20190801-{hash id}.pth`. + +## Customized Settings + +### Flexible Dataset +To support the tasks of `text detection`, `text recognition` and `key information extraction`, we have designed a new type of dataset which consists of `loader` and `parser` to load and parse different types of annotation files. +- **loader**: Load the annotation file. There are two types of loader, `HardDiskLoader` and `LmdbLoader` + - `HardDiskLoader`: Load `txt` format annotation file from hard disk to memory. + - `LmdbLoader`: Load `lmdb` format annotation file with lmdb backend, which is very useful for **extremely large** annotation files to avoid out-of-memory problem when ten or more GPUs are used, since each GPU will start multiple processes to load annotation file to memory. +- **parser**: Parse the annotation file line-by-line and return with `dict` format. There are two types of parser, `LineStrParser` and `LineJsonParser`. + - `LineStrParser`: Parse one line in ann file while treating it as a string and separating it to several parts by a `separator`. It can be used on tasks with simple annotation files such as text recognition where each line of the annotation files contains the `filename` and `label` attribute only. + - `LineJsonParser`: Parse one line in ann file while treating it as a json-string and using `json.loads` to convert it to `dict`. It can be used on tasks with complex annotation files such as text detection where each line of the annotation files contains multiple attributes (e.g. `filename`, `height`, `width`, `box`, `segmentation`, `iscrowd`, `category_id`, etc.). + +Here we show some examples of using different combination of `loader` and `parser`. + +#### Encoder-Decoder-Based Text Recognition Task +```python +dataset_type = 'OCRDataset' +img_prefix = 'tests/data/ocr_toy_dataset/imgs' +train_anno_file = 'tests/data/ocr_toy_dataset/label.txt' +train = dict( + type=dataset_type, + img_prefix=img_prefix, + ann_file=train_anno_file, + loader=dict( + type='HardDiskLoader', + repeat=10, + parser=dict( + type='LineStrParser', + keys=['filename', 'text'], + keys_idx=[0, 1], + separator=' ')), + pipeline=train_pipeline, + test_mode=False) +``` +You can check the content of the annotation file in `tests/data/ocr_toy_dataset/label.txt`. +The combination of `HardDiskLoader` and `LineStrParser` will return a dict for each file by calling `__getitem__`: `{'filename': '1223731.jpg', 'text': 'GRAND'}`. + +##### Optional Arguments: + +- `repeat`: The number of repeated lines in the annotation files. For example, if there are `10` lines in the annotation file, setting `repeat=10` will generate a corresponding annotation file with size `100`. + +If the annotation file is extreme large, you can convert it from txt format to lmdb format with the following command: +```python +python tools/data_converter/txt2lmdb.py -i ann_file.txt -o ann_file.lmdb +``` + +After that, you can use `LmdbLoader` in dataset like below. +```python +img_prefix = 'tests/data/ocr_toy_dataset/imgs' +train_anno_file = 'tests/data/ocr_toy_dataset/label.lmdb' +train = dict( + type=dataset_type, + img_prefix=img_prefix, + ann_file=train_anno_file, + loader=dict( + type='LmdbLoader', + repeat=10, + parser=dict( + type='LineStrParser', + keys=['filename', 'text'], + keys_idx=[0, 1], + separator=' ')), + pipeline=train_pipeline, + test_mode=False) +``` + +#### Segmentation-Based Text Recognition Task +```python +prefix = 'tests/data/ocr_char_ann_toy_dataset/' +train = dict( + type='OCRSegDataset', + img_prefix=prefix + 'imgs', + ann_file=prefix + 'instances_train.txt', + loader=dict( + type='HardDiskLoader', + repeat=10, + parser=dict( + type='LineJsonParser', + keys=['file_name', 'annotations', 'text'])), + pipeline=train_pipeline, + test_mode=True) +``` +You can check the content of the annotation file in `tests/data/ocr_char_ann_toy_dataset/instances_train.txt`. +The combination of `HardDiskLoader` and `LineJsonParser` will return a dict for each file by calling `__getitem__` each time: +```python +{"file_name": "resort_88_101_1.png", "annotations": [{"char_text": "F", "char_box": [11.0, 0.0, 22.0, 0.0, 12.0, 12.0, 0.0, 12.0]}, {"char_text": "r", "char_box": [23.0, 2.0, 31.0, 1.0, 24.0, 11.0, 16.0, 11.0]}, {"char_text": "o", "char_box": [33.0, 2.0, 43.0, 2.0, 36.0, 12.0, 25.0, 12.0]}, {"char_text": "m", "char_box": [46.0, 2.0, 61.0, 2.0, 53.0, 12.0, 39.0, 12.0]}, {"char_text": ":", "char_box": [61.0, 2.0, 69.0, 2.0, 63.0, 12.0, 55.0, 12.0]}], "text": "From:"} +``` + +#### Text Detection Task +```python +dataset_type = 'TextDetDataset' +img_prefix = 'tests/data/toy_dataset/imgs' +test_anno_file = 'tests/data/toy_dataset/instances_test.txt' +test = dict( + type=dataset_type, + img_prefix=img_prefix, + ann_file=test_anno_file, + loader=dict( + type='HardDiskLoader', + repeat=4, + parser=dict( + type='LineJsonParser', + keys=['file_name', 'height', 'width', 'annotations'])), + pipeline=test_pipeline, + test_mode=True) +``` +The results are generated in the same way as the segmentation-based text recognition task above. +You can check the content of the annotation file in `tests/data/toy_dataset/instances_test.txt`. +The combination of `HardDiskLoader` and `LineJsonParser` will return a dict for each file by calling `__getitem__`: +```python +{"file_name": "test/img_10.jpg", "height": 720, "width": 1280, "annotations": [{"iscrowd": 1, "category_id": 1, "bbox": [260.0, 138.0, 24.0, 20.0], "segmentation": [[261, 138, 284, 140, 279, 158, 260, 158]]}, {"iscrowd": 0, "category_id": 1, "bbox": [288.0, 138.0, 129.0, 23.0], "segmentation": [[288, 138, 417, 140, 416, 161, 290, 157]]}, {"iscrowd": 0, "category_id": 1, "bbox": [743.0, 145.0, 37.0, 18.0], "segmentation": [[743, 145, 779, 146, 780, 163, 746, 163]]}, {"iscrowd": 0, "category_id": 1, "bbox": [783.0, 129.0, 50.0, 26.0], "segmentation": [[783, 129, 831, 132, 833, 155, 785, 153]]}, {"iscrowd": 1, "category_id": 1, "bbox": [831.0, 133.0, 43.0, 23.0], "segmentation": [[831, 133, 870, 135, 874, 156, 835, 155]]}, {"iscrowd": 1, "category_id": 1, "bbox": [159.0, 204.0, 72.0, 15.0], "segmentation": [[159, 205, 230, 204, 231, 218, 159, 219]]}, {"iscrowd": 1, "category_id": 1, "bbox": [785.0, 158.0, 75.0, 21.0], "segmentation": [[785, 158, 856, 158, 860, 178, 787, 179]]}, {"iscrowd": 1, "category_id": 1, "bbox": [1011.0, 157.0, 68.0, 16.0], "segmentation": [[1011, 157, 1079, 160, 1076, 173, 1011, 170]]}]} +``` + + +### COCO-like Dataset +For text detection, you can also use an annotation file in a COCO format that is defined in [mmdet](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/coco.py): +```python +dataset_type = 'IcdarDataset' +prefix = 'tests/data/toy_dataset/' +test=dict( + type=dataset_type, + ann_file=prefix + 'instances_test.json', + img_prefix=prefix + 'imgs', + pipeline=test_pipeline) +``` +You can check the content of the annotation file in `tests/data/toy_dataset/instances_test.json` +- The icdar2015/2017 annotations have to be converted into the COCO format using `tools/data_converter/icdar_converter.py`: + + ```shell + python tools/data_converter/icdar_converter.py ${src_root_path} -o ${out_path} -d ${data_type} --split-list training validation test + ``` + +- The ctw1500 annotations have to be converted into the COCO format using `tools/data_converter/ctw1500_converter.py`: + + ```shell + python tools/data_converter/ctw1500_converter.py ${src_root_path} -o ${out_path} --split-list training test + ``` +``` diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 00000000..8242dffc --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,38 @@ +Welcome to MMOCR's documentation! +======================================= + +.. toctree:: + :maxdepth: 2 + :caption: Get Started + + install.md + getting_started.md + technical_details.md + contributing.md + +.. toctree:: + :maxdepth: 2 + :caption: Model Zoo + + modelzoo.md + textdet_models.md + textrecog_models.md + kie_models.md + +.. toctree:: + :maxdepth: 2 + :caption: Notes + + changelog.md + faq.md + +.. toctree:: + :caption: API Reference + + api.rst + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/install.md b/docs/install.md new file mode 100644 index 00000000..7f2bbb8e --- /dev/null +++ b/docs/install.md @@ -0,0 +1,231 @@ +# Installation + +## Prerequisites + +- Linux (Windows is not officially supported) +- Python 3.7 +- PyTorch 1.5 +- torchvision 0.6.0 +- CUDA 10.1 +- NCCL 2 +- GCC 5.4.0 or higher +- [mmcv](https://github.com/open-mmlab/mmcv) 1.2.6 + +We have tested the following versions of OS and softwares: + +- OS: Ubuntu 16.04 +- CUDA: 10.1 +- GCC(G++): 5.4.0 +- mmcv 1.2.6 +- PyTorch 1.5 +- torchvision 0.6.0 + +MMOCR depends on Pytorch and mmdetection v2.9.0. + +## Step-by-Step Installation Instructions + +a. Create a conda virtual environment and activate it. + +```shell +conda create -n open-mmlab python=3.7 -y +conda activate open-mmlab +``` + +b. Install PyTorch and torchvision following the [official instructions](https://pytorch.org/), e.g., + +```shell +conda install pytorch==1.5.0 torchvision==0.6.0 cudatoolkit=10.1 -c pytorch +``` +Note: Make sure that your compilation CUDA version and runtime CUDA version match. +You can check the supported CUDA version for precompiled packages on the [PyTorch website](https://pytorch.org/). + +`E.g. 1` If you have CUDA 10.1 installed under `/usr/local/cuda` and would like to install +PyTorch 1.5, you need to install the prebuilt PyTorch with CUDA 10.1. + +```python +conda install pytorch cudatoolkit=10.1 torchvision -c pytorch +``` + +`E.g. 2` If you have CUDA 9.2 installed under `/usr/local/cuda` and would like to install +PyTorch 1.3.1., you need to install the prebuilt PyTorch with CUDA 9.2. + +```python +conda install pytorch=1.3.1 cudatoolkit=9.2 torchvision=0.4.2 -c pytorch +``` + +If you build PyTorch from source instead of installing the prebuilt package, +you can use more CUDA versions such as 9.0. + +c. Create a folder called `code` and clone the mmcv repository into it. + +```shell +mkdir code +cd code +git clone https://github.com/open-mmlab/mmcv.git +cd mmcv +git checkout -b v1.2.6 v1.2.6 +pip install -r requirements.txt +MMCV_WITH_OPS=1 pip install -v -e . +``` + +d. Clone the mmdetection repository into it. The mmdetection repo is separate from the mmcv repo in `code`. + +```shell +cd .. +git clone https://github.com/open-mmlab/mmdetection.git +cd mmdetection +git checkout -b v2.9.0 v2.9.0 +pip install -r requirements.txt +pip install -v -e . +export PYTHONPATH=$(pwd):$PYTHONPATH +``` + +Note that we have tested mmdetection v2.9.0 only. Other versions might be incompatible. + +e. Clone the mmocr repository into it. The mmdetection repo is separate from the mmcv and mmdetection repo in `code`. + +```shell +cd .. +git clone git@gitlab.sz.sensetime.com:kuangzhh/mmocr.git +cd mmocr +``` + +f. Install build requirements and then install MMOCR. + +```shell +pip install -r requirements.txt +pip install -v -e . # or "python setup.py build_ext --inplace" +export PYTHONPATH=$(pwd):$PYTHONPATH +``` + +## Full Set-up Script + +Here is the full script for setting up mmocr with conda. + +```shell +conda create -n open-mmlab python=3.7 -y +conda activate open-mmlab + +# install latest pytorch prebuilt with the default prebuilt CUDA version (usually the latest) +conda install pytorch==1.5.0 torchvision==0.6.0 cudatoolkit=10.1 -c pytorch + +# install mmcv +mkdir code +cd code +git clone https://github.com/open-mmlab/mmcv.git +cd mmcv # code/mmcv +git checkout -b v1.2.6 v1.2.6 +pip install -r requirements.txt +MMCV_WITH_OPS=1 pip install -v -e . + +# install mmdetection +cd .. # exit to code +git clone https://github.com/open-mmlab/mmdetection.git +cd mmdetection # code/mmdetection +git checkout -b v2.9.0 v2.9.0 +pip install -r requirements.txt +pip install -v -e . +export PYTHONPATH=$(pwd):$PYTHONPATH + +# install mmocr +cd .. +git clone git@gitlab.sz.sensetime.com:kuangzhh/mmocr.git +cd mmocr # code/mmocr + +pip install -r requirements.txt +pip install -v -e . # or "python setup.py build_ext --inplace" +export PYTHONPATH=$(pwd):$PYTHONPATH +``` + +## Another option: Docker Image + +We provide a [Dockerfile](https://github.com/open-mmlab/mmocr/blob/master/docker/Dockerfile) to build an image. + +```shell +# build an image with PyTorch 1.5, CUDA 10.1 +docker build -t mmocr docker/ +``` + +Run it with + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmocr/data mmocr +``` + +## Prepare Datasets + +It is recommended to symlink the dataset root to `mmocr/data`. Please refer to [datasets.md](datasets.md) to prepare your datasets. +If your folder structure is different, you may need to change the corresponding paths in config files. + +The `mmocr` folder is organized as follows: +``` +mmocr +├── configs +│ ├── _base_ +│ ├── textdet +│ └── textrecog +├── data +│ ├── icdar2015 +│ ├── icdar2017 +│ └── synthtext +├── demo +│ ├── demo_text_det.jpg +│ ├── demo_text_recog.jpg +│ ├── image_demo.py +│ └── webcam_demo.py +├── docs +│ ├── CHANGELOG.md +│ ├── CODE_OF_CONDUCT.md +│ ├── conf.py +│ ├── CONTRIBUTING.md +│ ├── GETTING_STARTED.md +│ ├── index.rst +│ ├── INSTALL.md +│ ├── make.bat +│ ├── Makefile +│ ├── MODEL_ZOO.md +│ ├── requirements.txt +│ ├── res +│ └── TECHNICAL_DETAILS.md +├── mmocr +│ ├── core +│ ├── datasets +│ ├── __init__.py +│ ├── models +│ ├── utils +│ └── version.py +├── README.md +├── requirements +│ ├── build.txt +│ ├── optional.txt +│ ├── runtime.txt +│ └── tests.txt +├── requirements.txt +├── resources +│ ├── illustration.jpg +│ └── mmocr-logo.jpg +├── setup.cfg +├── setup.py +├── tests +│ ├── data +│ ├── test_dataset +│ ├── test_metrics +│ ├── test_models +│ ├── test_tools +│ └── test_utils +└── tools + ├── data_converter + ├── dist_train.sh + ├── dist_test.sh + ├── ocr_test_imgs.py + ├── ocr_test_imgs.sh + ├── publish_model.py + ├── slurm_test.sh + ├── slurm_train.sh + ├── test_imgs.py + ├── test_imgs.sh + ├── test.py + └── train.py +``` + +The icdar2017 official annotations can be converted into the coco format that mmocr supports using `code/mmocr/tools/data_converter/icdar_converter.py`. diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 00000000..8a3a0e25 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,36 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/merge_docs.sh b/docs/merge_docs.sh new file mode 100755 index 00000000..4d9a9b66 --- /dev/null +++ b/docs/merge_docs.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +sed -i '$a\\n' ../configs/kie/*/*.md +sed -i '$a\\n' ../configs/textdet/*/*.md +sed -i '$a\\n' ../configs/textrecog/*/*.md + +# gather models +cat ../configs/kie/*/*.md | sed "s/md###t/html#t/g" | sed "s/#/#&/" | sed '1i\# Kie Models' | sed 's/](\/docs\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmediting/tree/master/=g' >kie_models.md +cat ../configs/textdet/*/*.md | sed "s/md###t/html#t/g" | sed "s/#/#&/" | sed '1i\# Text Detection Models' | sed 's/](\/docs\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmediting/tree/master/=g' >textdet_models.md +cat ../configs/textrecog/*/*.md | sed "s/md###t/html#t/g" | sed "s/#/#&/" | sed '1i\# Text Recognition Models' | sed 's/](\/docs\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmediting/tree/master/=g' >textrecog_models.md diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..89fbf86c --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,4 @@ +recommonmark +sphinx +sphinx_markdown_tables +sphinx_rtd_theme diff --git a/docs/res/git-workflow-feature.png b/docs/res/git-workflow-feature.png new file mode 100644 index 0000000000000000000000000000000000000000..4d9f9083a8f39dfb99e9d1366ddf111d44879ea1 GIT binary patch literal 24491 zcmce7by!tf*Du{E-QBh62I=kwi4AN*noT!ID5=s?0s;m|N=Qlz8&E(RL_iQ}kVd!@ z&pGe=efN29{d0X3mTQeU=cqa67{9UBBYj;pd>m>VBqSt!4RvKhBqS6kaQy`f1N{8G zBNGIEVSB4vz>$z}NpAlkBR$KfL_)&cb~QHlGuP3Ua`5uthdO%M!}x5l8k(i zH`Kum=ErCcb9VKVW!`J;WM*`Alw~#()q&`EE5Te`)kAz?Mj^V!4k2z1l8(&sa*Q%T zQeXlPm>-le$iv+eE)^up{AXS%aD97QfSK_RiJzM+v%>9yjOIG}j7naK$XWh9SS0|g{I?AM7lTy-V4i;e8V*$dzaRY1I*tziqVx9m zb^oI#M+X6zJIn*j2M00<{!1Wl7cW0AxQo~SV(NcA{-cQ71%S4s^n6``(?H#oy&U{+ zr_@lEWd=|99bFxz#DpN?N-C0q5LF?Fn1rC9gpiPevWTdVsIY{Jgs_6(KXd%ov48oH zqn861|F0FLgoVVRVnV>CPlAyb#V2(RTS5M839v<`oUzGg&;x@K8PqEMABGLL`qOtN?4Ew zA|VBV{6(tc<>=}Z{NG6h1&xIuQlb)4!Xp2T6l@_!s2}wINbKk!<>ck-0R`UV>H&3z z33z)tGc*2MbxL0DUcO*qz@6}ay|1C9r0?tHL{ za(4y1fej@Eb-3NzKp-gKnfdRV|A}E?VM%eAlcOk~I8+kK2ax~{Az=>_=MxeWw-*!> zgE>MSfkXW@X`l-XO!<##|AW2$B7=K5`2|9KVG7P*8~g{~QDWpYbMa;5{{JkBz^yEQ8A#wSNBXzz{_UH_upmEeXfVtd{ zZ~qgN6hJ7s^}7GWqTD}`PFz(?L67pqPZnUrW2Y z{x6yMPpkc5_rDxj{|pA_;O*tV4q)(~e;wg4PvA|y;DBbpgnU3kG6PXi!8j;?ryw}& zp2_*m*Fg4Ap+&1XA@m{B#2C`o>fAVC_&68nICsz^nK8`JCI_MZAL4Lm9}bm&t-kZS zB&Jc3kD2iav)uzuL*=qb`{`oz_s(_BPP=<)@V&KB$2vq-F6-Fkv8MFtLg-ky)93#EJob?KqKZc;9>qz*73OKrKUbCgm*#dDFFyr$VzI zx@?92t~io(E~i!*?&i(EPqs{Jrwgs3dMfrD{me?4 zCck(F@9K{Els*j(#Y~n$tq09;qX)5*iZHyrUyM(TRtyno!stX3A)8ATu^?<~1^1nb@ipQ+=7lmW5a5|s z)c0gQ1B`XE34uJT2g0}>>a`OK)W1-9D6LBHp3|PGz};$c+CyU)*B-4;Ezd{K<9MiH z@FHC|eNx3!(J00zieJ-SRu3$#^Skq<%&3-Aj<5pXPp!M*bT=RgrHdoV3dd=3X1Tq~ zY9VFlsfKYjnHSBe@*MW*u5Z%)D6J?j3IwMD5l$aP*Un2`b~D<$xz!SCLJqxHr`sjs zbDQJ^iwo5GU-fC`jMZB)lwvJ>;4zooUYU^6pN~tz>{^X8b;||jbo3f}_@tmOSJAtU zsLm_zI2or!i>3Nc(76UCy+}ca+TZCsbsH)dY-wvQxF%y0ZJRo&Yx;rqoHH&L4@NGY zzDN;a>uMlhW374heQD(ILn_HiMvH)EzdFrjIBaR;Ra~7CQEZIiW(VgnS)|!sljea% zgqML%u{#w3vD2;f8L?Di@L;+`Q*R&DgqSca7O)rZyE0jxAK4C;8*O{; zKI0JAUFYDItQI*toLVJ_M9zN*-(`Dn^VGVPUC~X8;WA+MV0EnRd2-_GmD4%w7|6AG z`|C7+H($ zpTe}nx29BQn}*jX>vd@Ka-p>@8rW{r$}2X80=t3T$FZZf%g*1JZyrjiDiFm!5n4FH zUhW^33SPTj$_O_R0+i;>gAAvSzf9#ucT)5ld}Zr)r-i`c`>xO<#M;OA7Uvq|Q~_Jv zDiLDUX+q96_C^eKUGgu*5yvI>BArcacF4z9&)C=w0?;Q~JS-Vt-%73<^WplD(yi5j z?ld!_mjQh_QY=1$<8oIv8zbI5PZkTrq#OJ1T(l%|CrM(S2Zht7 z>^_9ha3JJ|2Fi~=pDBI$eK;yPsAg6%lJO$JyUgXS-~bcuRVt-0-Q&xf5;iu!3YXPH zrnxgql5-icBw;T%jHLXeT?e0DzDyiC_~QHs%+70RztFPV zGv|Mbvx?co0&jQNvVnNyS3S0DW$%`4@di@5zTuo4XX3;#T=cW}qG0G2{xS*_%RD4}Blz8*Z@6pYB+O_Gm z4r?*sHZ{OsRkSo6*Q*$@f}wz|kmH=2^djd)YcZqpw4#mNAVfd$QQBd>Yk2a=Rw=@E zIgQX`q;MnY#?VJ_K>a99l$>9vC(Dzu>hcZzCms)dpKEEIlHMRBvijHV+Ex%$rG3@MTyBrccgZvtEJF zxiv>ORPjCP$-$+_P2JUS&YA$53hcpNXI{36xp6fWvU@=m!K!pG7_k(k@J_-svsbe- zYCJtMbhb*PG`5U^pC)JxpssEdVEx`*dG|T?;8GGZ7p?Y<*>PV5M^Qk4YZwM#QY>)s zljzEedPS&3P}^5T|1UpJG86I*v5cc10--GjLc;Ma-cRIy@2bSBTi~dP!Q#j-{wewS6u^3wRG&ySBOAZ0ESzMN?l~4bh{|YeRO~R(S*)Ve?{H zq7h4|GOb-{Y7u12h|Yi z?E$91Nv@njCLX#-@x+`#tgq;-USMu#ahrZyf=O2>G$%7*DX|K8n^f+4f0UZZc zcQd;jVVk(=m327l)Yfqpas$~=*0;x$^(*TFyGc28Q0D>F*P0b}24PBLVxQ=+CunpA zXZP<#BdD~3<|Kyl3TR&O7g#oUEReWXaLj$R@)1e2R%efr#r$sjj$r6eF{ z!)f2^vNv0B?_NWAHWrxtv`ee27HRQGzdMJ^ox=24B`F$eU zq`XL8feP%eD$+1vDJq=Fc@w9mVx8`CSBh@>L@If38oRJyD8&mU<;2hFn~Xmpn|o|OFwt*?aBQZ0oGvgiGm_#V6sf)2N*q{y6<^P1S1x@$~f$=OHyKH#2-r$ zC8FhKN-BJ|)I-)qzAnj_voUvz4SW8sFxGW-qbvnofs)6iiUK78xzkMZm~5`_v#N{} z2~pKM^ad23=9W2{=>5d-zL8BbX`P~*HR=_f1j^Yn!=E2SM#Fjb zSr}2m?s2gtjRODScB#rn3PklnDbm-B17n4jaqL9*CCet8t@f>CoK~}OqY06`kWKGb z3DADju-qbvmPhi!TtW}y@*>BXH6+Y`;JQj?xs{f>##)h5gtGL=b(QA`_8c&d(}7Qm z+eUF#Iwd9XG`(SJ8Y$2&A~^!%ZOm}(~7f9nHlyq z3@@6?sL%cVnay0C=bPK(~1M*k>9Qz7}4~6X#PtDabtarG6qRPF<2;uBBB_{yMCa;#M8`YRgm3#trgWsbk zoI}#l++&Ro>jdK)Y8kN{@T>gE_95P{D}pE>J;95p8KEcss0{RDC5YQRl_ZXoaQDMw z%o9qLIdh`e+J43rUNY-0%uK^KM_wl1YCNpsg*b4y;@b?F2)6~3|IC=85HFgZOH_hT ziJe8W&%tbxsTn{X?~h?j7L6x%Ceh4@#;V62U?lVmwo79HX`KS48y@p0dp;*B_H|K> z1h%ul2gGEn3jNF~p&pGqa+uof%Ju_~BrgfPS@%6S_&&vcX!O`%_&a%juD;*-mv<5~ z{mVN!xf+nWe~jNIz?i7@(L#(U+h}57|AyeY<~^n{Z15qS=%=~eZk(pitR9$jh1r!G zO`^Mdfz8j8Z&I3ucB6*`9vnrbq3mJmqH!D2lB3(8SwL+x12i>?PFZw7CqiQ-FJL?uWj`uDV#%nS^b zR6eo}{mwiKhix_-h-uSw=G+7_Vs$IOXb}yhxY@?21Mv%P%8Pkc;eHe zT@jPWY}y$ZiKS?Kujf3-#J5sIq~6(`)v(&rb(daW{DP6cq)!vVU!jWY`mOirZ(<3A zm5q^@Y`fU*qKeg6-mAGY1UPkLHGgnoDPDuh%?4>9NzLl4>JGaZqWrxdca~__(}zng z6dtl(pJtongUxvMJHW{k%J0FChTUBWq^4^oA$h~>Dq_b_gV*fO+pGcVFQaJZ12ny? zi=kW_e+cY)s9`Iq|6=CLySPI2gr*=u{ds*F%c81w#S2imaiF?IH8sQ7I((fRKlit` zHjdM2{1`q#b#g@z*x&!hi zy9jet3`q}dB0b1FFfN+RgIp@65(EUj8U4%#Izma$@%WGQ{UUKEF^A%!Ty>AiCu_z;9>c2`B1HUTjF~I+kYm_oWr7<@ww> z3+-m+?GQU?H^Hzx(~V)XQ8h4Kp#*ix6Lbdj(E#zG-zJF4XGI6u9SljXnMW`A3x4v2 z@$4V-@)YWW69JhfiqCRuLg*E6u`}qhOkgX*wy*NF5`3~>;WY{R+2=0tx=Iit1XHIj z?CePP6Q?e28m4mnB;?UnIZAUTK+DY|q+q%pfJ&w|y zH$2eSk&UyohcrK?VuqAjx6Rgm-eUi412(i)T!SlwcJGU-wva)XDK`9mYVRHo-9@iR zO>J;#IBNT6{w7Wo+RtET<<}=AJ2Xsf3F02cXylwJluiD8Qn-hx3n$y4KemWipX413 zq*0451skS%eVZ9XL#~D?2K0zsuJ?q^>Re=WAJ0Kjaf+PQGH+6ro@?N*uIsU#5^xxD z`R0>+@unWw3F+{VCkDN(rt30I5PdlZ#U!C9p-489GVD^_1k%BbC!eBNhLFllM@Ke+ zb$#2o=`|s))Ui(vjK;P!m%oX~Ix`Hw({x`An$e8)q~naFfoQGmSJ+iaz2eKXA2k(n zW}zCtwLl>^JT}?8p{~n$WX2Y{y!2&(IxKKhKqT?SLk=wg|Nio01JAa~^!c|xOIqP} z$zex)i=cYs7;F3myYG;Hx%&guma5Jrj)pi->_<5cxlUDHbutIf2s(pE@{+q?h412J z@3R!VvdO@xJXw{^#QJNo6BE{f-`|sgFkc>dgcbKb!|v`?;X0Pux9#HBJDBaNP9Sjx z%3+CazKfGF*6a11W47nTp~XjnGCll&ZF5HF=xb2OZB4Olo!b_OTwyQuZ`%Docjp^y6p=H}E`}gTq?q{1;Q|XyFU7xBa}UY*^iB zU}?f-kvn~}k-}{fm-;taNp~%-FZheT>jh}$gSbyy&(-P6jwnae%>2?Ufy}qbX{wl% z)VqT6{Z@QX-L6L3c+Jg;5_`73=IPk`5E}AhS7gK8k|8DZ$36PCiq5y|R_|vUmS&de zH*B_^Tn>Dn8^I<#e(`NO-)Og5mtJvL41N^LtmG4r7|8O~(|!|6>3Hdp0}cv6k20USvZCtQk5P#lRsCE$y5oJm=%V*Ni& z+P7!OSC7PveF1WiAF3eM3nA4d4&Jes*~4g~v?m{Jkv%uBsHPT)D(lxWUir!AlB;WQim1drJZm zql0(z`(#VXI()B8JspM(pG@`-5Cr5iVv%5$O?C>(ld5r#u7sNfYv@FzpblxiuDMg1 z+VB1~v!3HT|3Pv;ye~&OIr!&nnauonlz*OE5GpM`9&k;qV>9xf4e)V-0;mz zUbHQBSUdgr1I z2yO_d=YMPY3n`*WlZB(x5uHAi4bme7?Eej{L`~z{4T!<7ADrUJQrVp-%u1s>w2KPA zo7%8jUzK~C@pIg=li{WJP(*dc&y$S4{5txo8qC=}LT` ztm@b@DpkiV&~z+Pssr92BzqQCwv1KpG5{jrb+=@fZKh$hYZV3RZo{XI3`6OLDKAU{ zzo9}C&2~_!5a+CByyGrgnzZapzuv}l0|f7;IFK}Qor0mJWkAGZTwxhkmcO>MJO3# zZM4VbFkR@qv1`YRqE*k?7o@OPsd+-$xo^!9^D(sM0wK3=wfi<0-y%QXO783e-~wEx;8yv&l>&&4(Ah>` z6#y~yeBszV*?_!uGn10S#)h848~hvvAj>na&GlQHv3XFY73isDBg?abecrP<>36K97PPi9_K>5q=wYxz~@4i>}*fBcGsssO4P0LZpy!l z<4MA)H*NICn!Rz%{5KgPGokt*rDXFaSmTg%;06O6ZBGdTkB&joMY|VKb^NN!`H2{L z>?3tHC*~+|%43aX?J{F=knli|8d{{CP3aV2g&h3I+gQ~V0#iGlO8`lKFSJK7A_z)= zsn%}<@^bUz0~u7P34#@fnwKx@%$vl9H2ll9XQ&;HUtiBY1+Zw)T11|+)rG{Xw<3ix zJ!CdWi`J@20M#1+(ap5_Cv3D0hx>K1_$c11ixs7U)%=v*Bz1Sc!`5RJlDGA2!JUS$ z(d%<6Ay}4MZu`8&al|kNO>!84)hcF+DGi5>v(aG$2LZn%xh_$xPwJEu*42~flI;g? zIZ*-X5C?L>Q1AGy3wzm8qR00+ z1){bRgf!D28LWUy_q_G8gk$`PeW2&&qX7F;fnhX<=DndghjkrW*AfXgp=At_g`2#CsN~eSk=HI@0@#6ApKj$kF+} zLv7@kfw+4hwX(!)Tk%W?4cs^m(Krr7=^WcaA0Vbm6v|iWUA3t{k!kWV{Joe%)g8mA ztuI6hLn3SzaNlk_v2LU*Z4az&`tc38TFwaf9!`DG-7Hz09;idKIqx_4U_|lQl~39w zXiwx`y@mF3p(;N6Viv)Wq{73Z%Vw3Zu!+*3+wd-PtP zQ9(X-6;=$epb!Ac3~FZpMlUe;+gj;%XLFmp^BR{5K0@i`>9=QUqokdh34(CDI(F&I zv%HM!1}S*jKaZ{@e0`6SD|9CU>)r#6FCPkbr*n)9(;c#FM+!%Yex-dKh_ZqQMIVfA z$W~8~!R&fZr@ei@ZSTzKs%Yq>?iu9CTF_G`QWd8mcAB{=X6FlumE)^*W;{8&_K>Ew3y zsS}j$emgqyQQ^jH?ZqL=3IS{>rSyIrntj&>$d6GCF-WOy9&o6)O5bq`+{juJa8uph zTGDL}%x-dF@%ES^)8pmG)I}lCGs_5W#?+&Jx~ty0`FMMKqI-KQqQHAh*Xy&>^3O^h zax|!RX+{WHI{d)8>vDn=CNdfCzAStCTeFd`t$tNAM+j|i#k~wv_5};?u`q2Rw zRQUT?+B9SS!x!^G{SQ}0-QA@^PHQ?Z{YxHsETHCt(lms1zzM){A3z2jL4Fq9RM~6R zpOGjDnsihKbf8EFweX9>=HI9+Lv?|DWH}0P7Tj+wTH!MehaYv4oCqQx-`1;;WvI5c z5iM@g(D+B=EHqA@S0%mNJ#}F-Gs+I(AUJHMHcZSm34B517IcjR_k3m$LeotI>S4kU zy!ZF4$6f7cE{6(iOT_@D4MH0T5mJ{Y6`IDaG%Q1fx=T!0F^_RU9Y=vQ@~fH(JL;>M zrvE1C5CAMLhPVzgQOE_QHCd;$^co(#?GSsUM9SBt)rc5j95* zV{jQ%@aOC51IFK3Qmi@-6)bG|uKCJdmqUmjh;NGe6hK@Kp3_z4gkma9XZeBY>b)F+*hLU^{^@?hGxYa zup;gO0xwiKj5Y=i+eJoqWO~e9)IwjUNxZvmzeSg63Ym$W(ZLZN#9N*+X7@vEDe)j? ziBLw8+<^-~Yz((8;%MRM5YWP{R3J#PfBHoZ@2U+TMZv~+yhzLlLkN9bZZ-hD9TAoD zk~!G)vN@D>_<%ieHN%9d3nV-ISvGpo!`sd`j#J|0z2_Lhc>#nqw+-rkq6(ZCM$%+3 zSY}z@j$1!|vsJOH6R>X`*Wl4Wbjv+p?pyDn7ogGsi3g04sgZRvd*nGyn zsOr>Wa~});O%(tDY2U=Zsc4Cv6i11D{$SO)03RI0?Ri*#S0VWhfUIMgZ-*0sK(~%6A2MHf6M5#P>RXjRDgiA8Y;+a3aM}GGJYAjnc^c2xJeXJ_jlUed+XXe6 zU-2U~U{YIcE6xj1$L9SEyKC{hwygD2Bi1?7g7F3>H%ba0U0kEeAR$S6ib@W4-1-XM zSRQU0WM#4VqHQEeT8=4II75k@|J42sROBff7osqSoPEk2f zaidUWE>DqfFs^Z~=!3#4aVNoW*5wCx>AY#Z(MEBIzm>n|ticbW3c?8D{4{gu3*{vf zVtbWB0iA0enss)V3!>W>DNnhPR5ww4a>w)a8|2aM*y{W!Dkh(K%8U6ks^xEY9fp(8 z1^q~C-RJx3DxrjJPCKIpkc$0<1{bGdog9Ehd7-@3J!&AZ%QAc|9IV#-$`jk0PW5^6 z^+16NIGAvgS`3iuemdoWhc9q1|Of*hJ(zm0q@uutYB<_;lZBQvOOc5$9d#2 z&y09V3>CX@sUk#YPg&NNhhUl)@D6qB_`{?3P#K2C^E11?NB3t<*Hv=t0dvS9EucHr zKM$pBrf;sFNsy)^zR+1ta@^Rt!QI~k0)K@=vmZKy+t{r zR?WzCGWe2|BX+IDzs(JrjAeIYXJ=Rzj8AQrHw1~vh>}=S#HMk=0}uTJiMO`Z*)%(Wf?N2rPjEI#JYP#A2PG%Z&jidh%bL(c61wmPr0oZB01v_Y z6vJ^s)@5e%sG2)PZ1?^^DuK(YbTm)D5)C1d6AADnMKRYz(ky9Ve!$ zuG9Q#w=??d@*}pROp|3o?USn&YNlGs&nLfoL+-F2^+ge)Q(Kujd3l^j6xNaDXy9WE zciYNh+rp%ns4-fb5?q2J4K-^QgU@u69?Fc5(T$K&B+bsz#3q7p&fgs`RSGBXiA z(_mqon$>M!+2;F+L=K{i&Bxa(v_zh-TanM+w|bmkopI>tbUGZ}Ng{TgPnUUdNx2?s z`^mOQ_jMXYRL`Q2ZEzX!63hM}p=xDCo=cEzUBa&gd*WphY-iP_qa2G~q_ZKOeav#l zFZ}^tgijjVKYD~k03VyCGUIlPB@sWPkWrcpc3({cnu{p(DO0(7FXvt4=`Tw}wV?&J z9b+}hK;lMakhibNZ>peHtM%&K*y=hdj^ly%QV$!Of|G2JmY!_lF;*SZb(K--ZHHzW zM(Q-B0wIeGdq)0E1L9JWjv&mBbY6wBP;398kgbZzs4|t^ z$^`eFc#O2bx4B$J{R_6~QKFVxO@#0;6y4Oi*8PQ2w)FCK)eK~Fa<=peHo_ZzT=6fk zxzGq|38ET^l8?)EEn@RQ6L|zWb6}(BsRmNxB z+#|6$5&XU@1+h<#+BS}%FE~a!;x*uCkB|3Z++k#yXG0`&XEDHIa%}p?b~t$!V=7ln z`iV7)lmeo)6!|4lC3+Y4O)z~UN>QDhoc+5G3y##EVdr9S*xz~o{sqq{e^Te+`Mxui znmA?0VWQ)QQw!&A1xwDd>knPV^lHM2KIERS^AbLyd2YY194ZKeJYqGlO?3WYOi5cd z$&Vn!*vq+j*k=I_I)XwC-g9>cTb=IkZM8WGYifvm<2_Z@QJ@^*=%WP>TNN@>jl#KR zsc&+Pkgt7$T62Oh9-I5C$?G1d*Kb4JIcNfo*L(Vr^FOzxkO{oQk9-cHKUcrX980J! z@qMUN-(zNzoA0?Aqy5u|-d^o#v;^=H9aP=4d1Hf22Bx1=wwVxC8JhYRCUcx+yWM!M zA+pXaWX=p9Ry8#0xGHiMG_Y;z)9?Z+A3rqwDTxRaCh-Z7TiVYZy83s*=?+5~F4O1FdV*VLI3N4e+?3)Utwc5z`6KPL3%8gC_WJDG_?Uj0?6`E&s&(3`5WT z2rxUqV@zFVYg&HPv26;=G6RQIw-Jy?9aPNki*z?HEBgpVl^oFA<{|KXI*Y^7YvIaf z?K2h0ke#tU!b07tYELtUFa8Bz-B`wS@FOpR#VNyQ@zsRu-lN*9vTI{hx2C|OM`Y3~ zHt*)AbDYOZx8@~FgRNo`Ja$ezM_rcsh4T=a(l%oY=bb?w`h>Gv->{}-8-*jb$q3t@ z=vQxzNZ(LY`=}m?$<^9pg1IUxwKzl8k?R-m-|!Cjm~!oBvIiNJr}rJIQ6<)sEKp>P z6`hXacV$cD2Y1^Iq@a}<3j;U4enba5A6rVF?6+eN#bRMq@*%%Tu8J;-96i9_<{__; z(xuGD>$SKNZd6^BqYp|E3}x7;%r2ti{I+&(B2(Xz&Xc7L>v?N)>l^q<9PsPf^qHDB z*)YA`^7HfGlME!!!LPRF_B!$lZE&@Dl z>%}AR1%?Y3@VgSz=+$*v(&nePzrX*wx0YH@U>DHU){fsAYH`w$ODkBK3?9K|(_2YS za2?`V+Gw&ONmrOK`;}?2-4DrKs;bdaHA|<|AZ{V=Wgqd3X1!1^`^?b$tl}&gIU^$n zJqZt02G?@yfgtW!MQQR)Y;N)9N@Bf3hT%b9c|q}fkdTF{!o!uL+)nd_WaQ``%Po}` z=Bldd%a=LFgx1r2&d3c@*&#-*AoX|+aa%$uC7pIZuL+Cr68erzf&_qaRPyg%a|Fi( z1fc}D1s-Pk1uNy_-lm+n8-);RdCpp1-p;SB++8^ZNRKD<-jpNMLen-E=&iqgAQm2y zxG^Wm2<%N3(k-;@11{leE|aSQ=LvFxn}yvZ&9_QAL_ZP^atz0G=6wnMd`~#puRn(R zcC2t->z&wH;XJ8Xp#GwK`GjY_N2`~KY(~LOaE>wepUKnV{V~s)k=)H?g1fH2JzN$D zq*vuEYeaIXa(1AvY&5*@W@w?!^=^bK??G@>@bJ1leZRqJl8pLnbdFdeXPFI63GYMf z`#VOO9(vqcCYn3mL+jAC7fW>yAO|wCny;o4IN`D?i<+;*4KpP>Qx`2ftid%V*U{(5 ziE{2OW6d2uU$&OcUa4;fv91r!>93J6@Je^;P!rs$ZonH(R}#DCooezD!VjC zDR0-%F|8Vwm(Z9ZIy@2SZ!hz0jHyLN_&a1-+;Zy(No-cY@~BFD^>o-}ap3F>$X2GHk|g(5U18a%>=Tib<5g0 z`oXpb0hk67W?_^SG}cC+3X#?q>N@i8{UsC-dkK#o#r_q^1vb@W#yQq$stm=-)=5Ts z6bF)(TX>`4PZ*+P%;c`N+i%dy?u?eVqc<0!`plMF4ubTUO_iWgWr`5U=K$TSWzfv;iBc=5 z^IWd7++q>jx^n@s*!x=kE&|tSlLc4!o=@A(9hN}Wu-fyVU!FPQUI&~LyCf*{qrn{V`oO%v<{0(93ZmW*{kLwyu;AFeCNJdl;p#rFJObJsr|Yyg8KjbJcgznA zX-3BOHa^D(qn=nFJl1#_c4>jR4~_zJk){})EWERA*1%PkU~0Pn-A*FkizZnM_uNh>lJcS+|@ z58@0!1s^D;%7~&b4Lg@k*%i{d~`{S_^ZXGe>}k zoX=(+nDT7gEVaTLeLW!_tD*Q6tIxw${Ff%0zlZriYBqwhGJC4Uu1Awe`C@RXVCG2> z%~x7bP4zE=S5c6L1?8#BQW7dMBGS@^voj5KkU(C`6=d(KR-FVD z`@}OuYlB%Lp~1D|;%FS3%fXM1(da|tw8;b|%6}Z!s)1ktZ#3K6tQyFM+CgW2?^>RIW#?#R5) ztUiFDVJJvBACW}Acc2l@f}S3V(*;rm(w3+FJCgRQRKrW+!k(BpIl_TCqF*AQ?x5s#B^3Ws z)u-`|Xltk6EO%RHjIXvZc9L5Fl!ij1?)lN2s6p)4LyeD)Pgpq&5nKVh&v;dfsJ3Um zQL1LhO7y-OL&ELi6)W_5I@gSOrnMv>rR&Z9V?oP)d`dwjH$nbe?`3aD=wP5r(D>;~ z!ZUcGfaEUaE6$^Ryn~MSn4KOQqQ81|+tM8Fx-Ic%IggV&Z7@i?_SRF4nd=KzI&(|P zAJ61bhe4JsC+Zg>WbPq{GyN-wCKEivPNnh3|P8`4>?s^ zTif#V0F=V|`5V@tVn%dJnmplMUQk|3cYfX4jomwD0LAh zdUa|%fity^&=X~Ve;qgc{1zOCP^QYU(2`!|Sg%iMm2PjTLW+3Sl$0X>?4x!@bZ#7Z z+EJqadP@q)>TtwUa9^om%K32zuno@4is#y~-0r4ks=#IEhg3S;=5F5WN3-Fj^-HkY z^k0&L-T5kt&ud{UPA8sf#;y9Tl8(2J8K~eW;Ii)NSkR5cmLmNv&yGlkXZ@uywftZ2;~gaSGUO+E5UKy&B6o@p1q{I*As+ zT5G#@tyr2MopFxlbRqpY6;j)=^jYg7+nHWY9!dFO{AtP=t@QXtfhqp_bb-E17@2vZ z@Ee}F#`r#U>>L?v8&}Tyxo+Lwi7|@U_N(L9hon{|zoX*=}Se^r* z4)2MyP@6DMAaVs)+4U1AI(f9WT?dswJ5D-%w4>cAv8QZJ2ma&zt|Z-$&K-g5J!ue+ zFbT?VXlB;;#+3OJ@OT-~OUBYAzxt)kreIXGKAX08`_NSIBq zJg&WEq$!)C@$8JVKk4aPPL@wIgG%oJMdP7(~rgtUL zLCaPRqa+!GC<<(0$rSU^FZ$sP9=5*8KlXrsJItjUGV*t^>i@p`tOSq4=BPfXTaWn? zKGXjV+2ab$A&iowtIi|O&32*hSkESiz5lm(S(g|Y$r!4Zm&|bo^r(2DL7klGV!+l?%X2>ELbBjJbT3CPbgO5fJ)A#(>pC)fP1i$OobS$ul{iW&~;|8&9b&E{)ETw4>->(}>dR3#c* z=K(INldNS=pC5_DK{2si2v(r#;+7F)$m&Fk9C^_o?Go?>`sNG!q-?svHWoz=GDX)&;!dxfMKnDFE&=89IKD??#m&d|<^J6OTZY;~PZBFph>si3{s*9ir8T-Zc8~!{IZ?fgy(eO-!PSwd zSy|`H8=0srQA=67L@)fn)h+@;;{r0BFadc}=Tw%_YYG?jr463y-q$a?%q$DX-CWeP zY33KLKh_I;2pZC*Ffa`QXg}_m_Mx0@Fq>&GdPm)d`I*?Scsb$N>_k_;E#jH6BthR} z1GOW;LO=-FHW=MB7*_7lMmN##N?1X`!i?~Jh*jTB47xDrSx|B;x_ z0Z9lRaFH>4C%1C#c*pbKx-?ehDGr~DO*PN>yle4n*9oeQqgh9nXz=9qOERb#ANJ_@ z7xgYgatW(h$yKEA;cn*ys}nBur`dH(A4c&o1D~L9U_X7K?fcA$MxG$dT?XGE2{O#Y zjKI+YIifP+*ESoK=>hG2A5e16APzkc<-^rQ8+}fp0@-3tu8%k?;q-k_U9xu|y73C8 zc*=12jlr4;cH_MHT24tiH<-eghA6+`u~Tyik4|M|M~sRLG~1^VFvWM)tT6>!> zdn3ZHi0zgj38zr|&PbH2khaEJta@7J?#eUo71LDI)V?)M-|4eCLic{O(`vqWs_Yhk zCW(`=jHaRxbZM___MqLpp*eE{2iJ^6zvUKJmlkV4mL3WNFS*b9icDA90PDMg=yUX? z=-fw51ZVZU^%?T{Wx1BofiHsZR~z0}7Bh;wcq1yOQDys7xJF~UOWyJKu`=nryFMh&`AB2* zbm)|)?CCHXRFS2dHvzOF`&n*lzOSeW!HFvQ*er@};dnLFs~@#NtCNuxI^W1^&yUr; z3_!^TANQ{r^bTHi0puh@cmoX?gA67>I!LQPDSX>9C7W+Fx=!i$e>aujT9xMS^aPtl znTt%ZH}^5dgNJpOl&eWvxZN&{gq%!!$;weMmht#Ip}SjFUhQAH(H?Zh^GaulF^__e zP-8h;vFcy8gD)W%1)U@n?Gc9Quf!45PCEnc?e0Ba1YW4R(e-L$frcv-N$HVLnX$3k zZVKC6du$aeIpm7}Y7+y!@JRCiSB0p|f z^h_Q%?vMVBx!62m3WI{+yErJ8ql7RVwBhw!syGOJ=VZpV9vnG7gD5IzV0Zsu>DDHS zZ4-Pb5z4WDMzo^Mi2`mbLcyy5YiJ)Y+3>(8N!xS}C3q}vZ-2Y}EKPCg&pXn8I`b+I z9}0o*^=`kuqa@2P^g;s7`AQdJ19q&qKkp1UN*GZRluha?=aaDkp^7DL=Ora}?)JuG z8$~)tAwvRk=*!%;)9HyrSJRk!E&QRp&zf(|xuOVOAAtLEy-?0bvBe;EzDCIvX1bla zPwn=D`hJeL&xb@5-0B$Xd9|LlryAVCd%Ljx*jjQT*mKVGYpyi#sUHhaa3aCxS!~6bbahQHqL`NH0<)`2htf0tNv?F9IT9LT?E{MJxyw zdJ#TEL_mUq^b)E_mlg~q^p1p{gqE}NpNn&O?vm{N?985-wPxmB^NywSVTZXON^u?H zv~gl(#2&8{Ac`8@%x#nv9I;SXwvOg!j%}!FF!zehTslAw7jtPdkFkEWsaHu^jwa5B z$})sM&gb6r<+84auAkUCkJHhKI9c{Mwurn0o5b4E2)9MO2A>8U&k+UTN}wiN*gFxfO`{FG;QWZpa9QLy@a zWxK6Ag7?eD=`R}=4p8cT><#Y`Etgef%&FLul?D+{LwoJ3$`QO#tA@M`Q4&}jS)nXl zQr~=ZBWRNAWqLdi$JAOdP64!m{9^|G|BT7{k?pur7SCrh*-N~)3pCzUrFO z$8EUOgK7Pfk$QCe_+x%0%4)c&r)t!m2%_6*rfWgRJ~ z_o!-Um+GT62#W|S&fo^|#$-{$Qy_;5|HxbKUwNogAeRA7f^P7;{_*SAuFL8bGCp_M z%&i_O1_wbbBqTFdSk{Ui1vMpX!~GfRfnWdFRpK=N`qar3L|T%RfA0s)*4{CqTl+!; z)9DJZa`{|{<=xM*TO&*rv@dq(7ZE?pyN~+1UcWUCfAzPHTlb9mJW%^^6{~Bz>->AK z-qaT*(-(>UcO4!nE<`G!HEzY!;blA4wE2*Inv<14)pe5jqc?-=Gv{#~YOadhO6w?oVy+Ik+pj z>O+pM<2alUIF#WIlMwZ7U{($7jMC@Q-dOFbuhPTY+atAArM-2TqLn8qg931JC&Zdl zW2xN1H7!WRa4Sz!P)5%hMqK7gKlIt?t9{mxn8HLep)!AK39Y;3b=b(&LX=epMfl#2 z_Zcsg6gEOid8FpbgzHH4^SHgl>>S|3K?}2hHZ*&jf`c{5rfB9yZ7L~`N;+aA@XvZZ zrncTq0|aq_l8#4@sKCQjq5D_DHe|M^5!*lhb`;?H!jL5V@>S7mLmYY<264-3ay-9I zna{jmd2AHyI^VTDU8vvmBVRlrNn$WGQF&kabZxx9DXR#fDNP4v)NFOq__o z$dhZ9M>*PR52#nyuCLs9WTO(F&fh)zxDeJ#GN=0mREvF4}?2lJZP$~FI&ps0eo4zhHDEc2Y z*|hdTFRQK1nL;)$QAr-*iT(E6+G_W}1Lff2g@}kX#{%cYm(eeZ726HhV$fX95*C)J zC~i)5#er(2GaefhFNFD>%n6D&!u%bziZH+P0XauNIQ&30p{?xa49W|G{pwykDa*{@)*d_b>5}(sv~Zx3e0PZ7aA!dn_gnHQO>L zcwuV{m&_L>b)*iDbDg`xhU&HvvvRO6O*?C?x}NH&-JX&2W=0^HP`Wrq2Y}!AWGfZr zg)got!5JI#$fHhWp^^|MSYZFnWWQDR-i617(C(Ws2SFH}@o9Q5MuR*r%FOjGuR^S= zMviuQVf;XBuUa#7Fg53*jqBNU$FoyFkSJ&>_0tc{y!lUxU<}j)A@)2+Ayjzg zL6KKv^DXUh&9hl;GI_K00@fM1X+@z4`yXq&)b1C2u)E{n-ZzxBkJULUmO9aNOYqKm zwDc`^p|C3qF`_VqWV@e|5ql`ziw7V5>7wI*eY`_HKg%(Lg}4072%=d>pY`)M3$eD_ zW|D!@T^Sl)lv5uRlm#rk%!1s$Vd`h^JvLllb_>G`BOIKg5+r_pGlcu? zJZ~z?7d;R`kq~P1bCG&xXdW$xhzJ{zAdK&%EiwHoMW~=2n(xv3N0O`bzP} zU@*Q%&l>C3P#~q=nI&r&hzelGzk5hB-1kwTFqI}8 z9?=}BBp54)pxxx3hUZ=-i7exe~NapQ@UpOzZpVy#22Q53tb5y21=I^4EnODA$9QO zhebQOlUk-k&D_B<(6ZB)wY5w($}jdR5@2>@zua(m?oyDVIlIY#4c!K^<5RTra(lry z+!V4STofpaO0*ypBvjw@I()R}@l87?q58HL=AZSOd8-h>?F3Yyb4ZT=#K4#3z^S1^ z=`NFqwb6J{8T2&uhs7>v(ZH4w=-hVG35QmYK!BbK?Rorjqz~D#DB5`*Nnc3N{x@MS zl~65G`5R)Jov;U3YB>!wQ=)tVTpG}3n~x8(9N0m2yoz>O8#GLbW{GfopN-?Og9?kF zH}GX}tZT;Nq3K2k{G%IQ7znD`VwVMsqbMUJGm`^5*5F2E4~p=|Yuh0EIQTN8Xeaq` z{D%^W+`$2Z)sVeSZhum23^E})yDk|L6Tk|N>?@8vckMU2FWq6+GiXESs9o!$@c~xS#B^RWCA#Mhf`ho6B|KT}YHodg88qo=|7gM}-_2%a zW)V+z1oI?5l|h+N9xaC!kibI19_U0fB^n#$174rjhh8oxaGJ8upxUkJ&4%U%+w*|?$czJ3(e7VgO>;Kqd*C4M~J^qM8qA(Foc~}HHf&K;Y0(bi_ zk~u{zj1J*m7&%lD!@>`%1|}*oje17;>}3SeY#4M~%@pQ$>F}_*I1vuoz2%RMUEG-i zKPgHaXPS)26Qx~Qm&;pSFv^FiW8P|g3IyOp*#LXh>3eC3a5F8;Zc&z6Qw!O=ehb z^+8AtAorFK_ShzQ^HzburEOM_$JjO8CajF=3d$U!Y45f@)wZw)dD9uhT*!>(*d^6s z_AjozJyF@(@{B0kL$7Tz6tLHHs+0?*o9}R06MWMH(eVI`a3fLvfoo5#+HefS_#>t#=lvcK2Y*&A~|FS$GGY1_loR#Q>@#%FOP1-9zk9i{p>(WwU;A`+7;uM4Sxi z#`$gW`U(3Se@h)+eO&?{cblPfuq(zIg8f@c*FqGOPint6NV2~@o?LJ0>1K(R^4*{bq&u69&PVZJGTI*1 zCoyu6tO}o^)YQ=B1%rCcK0e9e4VWl2@l~4P#=WKYtg_ zDK2bYQTU^Dd!y`Oc$n)(S-_T|8S;rjCv?#gls4}I-vnf8_;<+DbLs5w4vwHs^wGFaib4;=0D$5re*PDb<7gk;Wl>+ zzZMI~#7us7UnFY%&_woWsn2(9!3i4Yz$>G-*X9_B05#kZ(^df~R4uG#xcgnxO#TYD zt*s)U3J%fg7te7=S3nwRi1g4J8gTa`bi?BEepCN)I|@Nli+%hD^F2_DS`*Fm1Il!z z?7ky5Hf?Ff|ACUQU)`^&x51f#jsbzK(VB9S0TK?)Ke=q(qr^InUZgfIDJ4j;Q2h_4 zRRbuq6zx&ffM7|xod443FUjf)xk|him4Lq{I(;67>vm{ahF*%Q1YX{T0Wud<2eqTgOFSj?>Wc4z{o6 z>O$$&)h<7RuLN|FxaM3lO`?)sluo;VB0pkMaxKWfLq=XNt=y-!O=Opep6nb5yUcQD zek!O-;c7W|&F-wh>+)wg+dJau34D4oCjjacargr|YO^sQxa|-Ba`)0rwhPxfwnG;Q z3rFO$H&R^?Rb_c8q*2~KDHrDy&B~`cv>l_>W1`7Un-!MKwN=u(s7H_p?AnM2CG%Fv ztv;ZZQRT04lc67QQm#O#PM~6ZL%ZP+Q7}hV$PL~A>q-^N%Nus@B^__9;Ol)0<`K0% zUQrzQ4{40p92%umBu^YoEQ+oAA(AGu3-Z`S&t#Y=N6(LO(2*0H$O zgplr6PxlfpafU=4c~2)K+6bZkSL|oGGr9hjUxuaRdK|zV=p8>vcIKc~oM_Nn zD|c($_7q2i_Ed+v)NZ&(Jy4l~(vHA}EAMWX52o=omj8`#o-gtBHxqZKMQ-W}T3xK= zq?NOb`no_%zDf(+_j;3j*f0;?-V8B(@3cE@6ZKME`XoSJBv{M_@ZSGGMNQJv>hCT6 zEhAQG>6*D{zB+;G0neY9Yx-X)-1d^?j{RHR(YC!Lp72RA!Ry#z?1|gex2>2LWF61H z{tUI?jnaJwPvOXEyLVcrRlokZ@Xd^~?7(2kikYRYq|N?QF^2I6gLn7YVzoO*oalIDp zY5p1TUe5uB%n4p+s|)}cGco>%&6G8cJ&sAjSwilaV(A-4=FBFUGL`w~>)_(m$H6Sd zkvWlwl}$0K*of_afy)sPF%ZqhVB2Rl>%g* hF_7*5o6&ZF{Zo7M%Kg1`KE^nG!<(izaJo)U{s%8J_YVL7 literal 0 HcmV?d00001 diff --git a/docs/res/git-workflow-master-develop.png b/docs/res/git-workflow-master-develop.png new file mode 100644 index 0000000000000000000000000000000000000000..624111c882bcd61add8fa6223960163e41f6b16c GIT binary patch literal 17594 zcmchd?NHcVIsDL!maSt$bw{!?72pDt;BB0VB-5`w8(#-&p5<@q< z8}Iv^^Lsv@^S;mX{PX%jx#qgo-h0(vd#&%f#+RDvj|lN-@E{Nfq4HxzZ3qP28eFH~ zVuOFxIa#{FuUoE<4Lu+beA1i0XppoFDhLGoo1LzKr-9lNaZ48`9+;Jj1)Rs%$rVsT zAd)h^t}sgnxF?eZ+{VsXig~A@g_+6DN{U%uNR3y`RRL~m_t?)JuH&b!Yw71;DQ3ki zBh4h~D-I@bf_uW4e4QMfJ;Z&bnE%Wx4z6!*Lz$WWka#*sG0WX7$Yh|V$)w=o4rdbL z5$3k!6Xa(S5#!+#<`v@?Y$1`V7m@l`p$Gv~epa}P{ zbhmT$v~zK0x|tDX;o{{f#SA$8TLdRpHMM_F?CkN6qJYYvzA#rP9}h3o$>~O}Khz$c z+VKCd@n5Mubf3Axq1tc{7cX~9I3U0GpTSDrVC8>g_&*q|Uha;6)MRA|g*(EXzEdDQ^8YgRzaRfm#LWUgTjCn-cED+1 zj*2dpUN=)JD@rkgCp=bmR^r0^ydnxpVtl+0`FVv!`S?Wn`Q;P^h4_U8M3qDZaei+=^QixZW+X7|{=N1(f6ykGBDR z^9hIx@Nx2riu3aRO{(T%WoP~De2Z)P6bc>voy z`zIdgz}^12a``E(4l>ajAf3er!YkRm@d-}lK;c_-$8~hjFQD9=zw{x~~ z@$mqT^Y;)-TbQ#A97I|u^No-al|{(Hgy-$a4l$nv*=pnp5ke{A>X+5ZT| zK=MDgKzzTs{P)2EKK%P=f;$5(xr2iz++^zw1R|B8tSG1Jo3WMYpJ4(Iyq>q=X{dac zX{CUE9~P#_bC>)z#4P18)&^TP`8Nx!V0(I_Dm-0%Vh#h?7H+_{J7dK?t0 zX!K~I)+jy8HX2K=P^-u2lQZ5&{1OrZtH%hB{>{|M$(|W+YnzsxaZcV@-Cz8HqCviEWog|QGZMI`Ad7zD2_AhJ?E;e-frpHcakSe^gBBjvi-t(! zR<-iS9J3b2`JEM91U(gsDOG&CRwv7?#G#4B)p>aQt;UvbMlgznt?rU&M2bM-(I3@J zC2>&U%u&RAY#z5)z?8Ek1Oqx+Wec}Hy>)8cK`?)&GOjmJ(F6RsL_7C{SoZQiK&NUI<6y^+(m zCnrWuA-wk}(OAW3rLOY0cA<#uOFvO=rgy3W8M8ZN5h2GAk(%C55pBOik^xC`9@o0P zIb-~4q_Pc8<_9yR3bK-0K!xPF-Yz9lR~Nen{n7BU-N2h(bgZGJaI5`X^`D))R=%cO zZ>N5$G~_EhH}v<&-*0m`F`$+#;ih$r$_}O`JtbH=y8-iVtZmVp$I1I`HL?Mlhn{sJ$u-?XawJA7@%wE%cGq9#98Pwr%zU(53LR4kZ(HBE z028iL7y+E;PPJqI?)N}atA}3l3;WO`5AP9~-_Lf_1&WM*FMQ}}u$I!Z<@Wcc@I@IL z9&K_#%3!(4s-+oMYTi97bU(gfMqw8(?oyi0DKSl4HF<|Ic{0S$h_sDyi%I(`3=|=x zQd1z=E_rQA$v)-IO+H;76nBO!t8qmIZFgH|4VkqV$LAS-4RJO+`_+64aMI^Hckft@ zjP_a1Wu1t$dNu-iHXR;-ie&J1f6ZC4ZH=f#2g~sd=;4Pk ztxYJ;6qMOjqvS9#L3CYH2CtYeJ(84;mT zhEMsC+q_D6YWCrKa+j_SkQgw~B`a?5e$AkXQ7G#)mDV&v68W0gPp>7Nx8_uz( z%Yf3dmsPr1C(0AN@lzrdn#c|AEEk;66-dr`&p(smjSkAt$T@i z3Q7GWD<&L|^@fx3Fl1-;iM&1+1r2r;W*{G8p{Ohos;mCV%Ee^Mm2sZlzD7J@^ z_US63;mm;8GM6F{9_t!!TY`mf``uJcHBA0`{~E|?#c?9a8y*jBsz{%xdM6gepG#p} z^UI)Sj*}1gl3+sWRDu2X3A^vzzdy-p*o+!7_liG{P51OE88(oZZIWL_>RPGqOpKY^ zGLCb81ZH+y?V}D^)cnMRE1&!N=(D($o^=!nE{)^F8vHlZ{h*k4)7fHcE-RZ4?rZPp zxnUeNcmI?#|;M&an5nV3#M0i~@pdn+$X>TyQvuW22_-C2hI}PhOys$ws zYy0<0SL zjfGdE1(LdzA3VUtT!=2Lclg4DB}eiFR)6%_sDI*@Oj)o- zyOqywQoxUs%MDAfu!%?8>1^s6($^NjZrHs!axoIlROuc=hB;M^q8)`cmh~JpvV+JW z0}eh18P3v{=Bo zq^nQT%?_LOGcAaX-O!cGHrE68ue@VPn*@hZW$xp6-2oALzttS#`OM#7)L+|yFb(4z z;m!9}3h6jU8S|0HE=SBsLz21~?S#{yOge~*LOF9<-UEboG%(N*$pY!YaKQCqoHPnw zk+ombCo5{KJk2t=e>h5!rgOWe!ir7j&jFQVN&*|3qw|#hg-2@eHF$C0Wk6R`YU)kE z<4X)KdEHqtC8KP?V_tFA8oC}wikXwk-j6XrG1c;AfV^URjz1(vZ<3$(j^)_g#mK2< znfoT~v&ifg;_xF{zv_@DJZi$`Zz~sAx%qAGF6_BwQ9gPPdl~#}`pj2Sq}Mm-o9V@{ z9T6pFPUW*Z6A<3FQXx3RrgN#3j4tF@cm(k~P^k0t%~tl`4@9yf1w)=Vr#x}4t)l45 z#AwwAXAL@13)vDXfY#-4@73x{3^l2C^K$v{KX2h7U+`u)06?p^DdH@ zk6Gw*IKONps{2k{kl;+UcwnWsg3gphPzou*;=$Idn#f2M#l0WW0%jffI+YTkC`m1i ze!_;L_{%G%FZJc!ukkL?O4`@s_Ljgl+`u+Zke00v{!#`Htm?iR~GpIrPHfNLCtllkZw5#5iM_i)?gTYsN5vUL=Hl%x2=vn{D&%B?@A;n!RL z5#v7xB(A_shD7pQ7^c>)GeK*-?Msa^%^M;25L0G9FO~2>gMq4 zeiZu1bXp)e&aw#T0IFuv%nT-)Eti z*^)qNWW0o7f?*O%yJNX#&O#QET^S`CNV-TUXog=@s=PvT0VaI2VmvDTC*SOpkGNZ^ z@;JOH;AYv0-J{{>$h1Py?=O*D*4S6a{`5NO0O~T@hZgpbyt=OEf z_If?4TI?d5f9WMQj~#ha0B?DR2rkh8JvTGis82%>Ip zFqSmq*K$xO(-%$i=D@zagUJ}ZHdVcpzS2ixfT?7gyn*}ng4kw;6M*IGl$or8F^=Z* zjrnt&GW2eH5e861VaN_zDcd|HDq?LnVr>t5*YCoY0Gjla$wlpTU;LXNdDQ7t?H1iG z7lz0_Fy;EnHCDSLD2nd=ln(MbZ4N0-4sl_honvUsDzOh}#NS#n7rus)V8Zx-A)tya zo5yuMQjy6F*Zk-*Ww(fTf^9W6E#-yjN zJ2dEKj4>aBhBR{{zLZ)12*RY>;n~?T(#njl3@3}39|lsKqG@_Yr9MNpjP zNL6EBLdG8SLW4!~I9PAiedcsVOWO;qhpa@&J1XcdFO;-9V!zgrC*W2B&p@xGr!Qo- z6GW1@lA?U@bJ7K~2%tDw-^eA`k3B1yPwvz=N+4~gAPKD!U zIewrTL4js8Kh5l(n)e< z<0_S8WX;C!*yW*3##!aI?z$M#HOihD!2)jF=v&rHKTz7pTL-%$R?)D= zf&)WGxwfR;BRf_j2ZlSf zRO6t;^SnSshF<24om5Hi-9*;$ZOk$BgOjAOJS&M%$Dmh~kFwl+-UUfhCSkoCjqTO3x0{5Scoc$QCrQ)Oj{f;jVTTFj4 z9AH(lI$h})9Ci81j{sWeKQ1zjc!vB1_IE?%tRl&CtO)H5`c@GWB_FgMel-D^X6y4dEBkU-3>&K_>T#GZNui>G$W^!_DsWuIR}g$*5TB)bZ1riEcrb=NAKI)H8S}; z*!UtI3s%0*>o-ag#Tl~0mS_DQ{;{tA6v@}f1$BBvBcCi{Fo$uR29$nI*z!|S`)*cI z{RlNkvg>xgXgSw;zv^8Zp*lV3qXt!C0w^P^rVM_SOn24&u0Bm=Gfa~In7RM*A{A8B zgbBnF!mohQvRejB4~Sk0rPJoy1rePX>uXglIu0SSWS`BLcdYFlm3W#Te> zGCYHSfyTSW(M)wxGx^OB1jAcwcpo*7-}uI^-1j$_3eZ{k! zZIcVonaaSW&BDyOR7svIfsRnb*5;+feu4kF2d8}N=&!h-1rkc1V__4dU?({xVOZO2-l z*Y?nI5$8*leik9a%-JgMoP2Gh)WCq8@I3!!8rB5vKlVsSiOKB4)Yey;u^MbP`g^g@!P{a02EnTtW zxgito?=k<$U0Uwm3|Oz%Z&e%L?MXvqn+rUq+$Js?BU;Mhcuq{6$@T^o8b$c!AJn65 zPUa1?9QUWPVibv2U#YjrS06vl8(~>Ht(DTG#)FVw5`Aq=tw4}9-UF+M}e`OhnrR9t`Udv?#S%Y>+z#NE+RPZKU?<`a^) zEH-sz7O6ITk&`KV2=f%F(4?1Yl~VaEqi~&Hot}T%qBj@(YGIV)hlECDo>ZQ+SxJJC zP1*`k-pJ>*ZXWeXBOj2l*IeAKqbZ~Y#}eV$b0-&pI>%rE6D8B`Ww7-Y5lS>>{m zrF8&=J~MVN3gi>~%a?X(AO|mn#Fh7C399X5<&6}EjCvam3HDI}J6Q9dw^L?G+y5PzsYIQ(+=5pTuX0+Rqw#SH(+0 zyx$_OHp@ZMl3{+dSFyB0eeB~zwYJmd7Wclhsms3US;d;uXbx;W93l8mIW-%9d9@yl2>rjdF^H+yW~T#Ekx`D=cb| zH_Tf?U&}+C4SG59wK?*!{r<#)ApK+^IdNN0-(X+un??5~yh7G(TI6F6>&fu?@ zaE25AQ~9t5Uw5HDvLJD=wA0Hv_d~T-E~( z6ZDC9b6tvemmKHr#C(*VI!G6$>ZzcaZdf3Y0YYWFERVo?n-j4^vjL9!tUvB-va+%j z#JBz=lbX2>3?E}jVEfHaEh%9gx6L4_e8qS58Gpk7ju=0a3_qNLgl2EY4l8`gTGX_O z+JCiwS3l+bg)`~t+jB|%=6S(BOg}zD7!4>A5mnDAj1n%j>ewnL4JB=x15Eth(=VTI ziPr8Jh{}o!r(@noOtD!Uj0Q5P$|CN3OGqumA>h?czOeRO))1qYsTVS@pIR z_XlCeqM}otriz&G@Few!+l)}`RT%&x^?o8kYCLf7{8$yp`O0Evs{WnS4XT#kQ+v?O zbAvBPyHheyP zC7~c1XGLHe-k#Rw>paoK4l12C?(P}VFgh(^qlJWPH)#O2aBMnrEuCr={eY+bg*I;L zb(~T+tPgRV`IUahcxA4|xhXk*#^g)p?5-uPzPi+WV8;0bfXku_IpgyarblxUifTlK z-D(5)HX`5l#lPiD_!XYM?-wtx_JIJ&cigr14d$1g@`*2x>x+SiNX9M}^w{#OZCsB0k#<-BzFN+JbzFLI$5{FK+50pC{; zRGFe9vcF9pl6O)6U~r`jWTj9RTE$_(m7mT{*vJ*udI;J&G$(bHnGEyeUtQC4Hkc)V8~q?3*T{I0$c2)S>VRb(JV@bq{?m3{6A`g$?4d zn~4KpK-n6Ri89g2utZDOgUP+@YX=(lU5Z5l_!UO=7jhLgNH9ap$NY~3wgOkT z^HbHXalM%(X?%l=j z0?X@x+dv$Rk-%Mwn{FSB3KYuFdbCpyQTx5MN!RxP28i$Tc9j6FH+?i2OZOkQ z>KoK?h5_v`PLD)WEreE@cQYxal0jG-7nskK5oG z(W@;B&ke`?@h;}i??>%QKv4$)YAkN8aZ?i|@GAWXEz8e0yWYLPJ%8krG{AJt=jzP$ zHtqfMc(wM5B4yYL=;HZQ)>5)D0Q3=`N5Sa^9-#lUUNj#OdQK*9VpO{|jGR~2YSQX@E$7XvUOdhp?B znLs*(YV%OqF4diQq!Qx zekkraFjJ#8?6~U5Os@=(5A+Kulnr6+%)QnI%Qw1T(8lioG%m@PKY#<}gG~ zsAZ>6_Ii>91(T{vQqMiVTF5f)Sl7t0+-%lq2BCsl+zj&Ny)?JrYcNh2MU5o37Y*b@ z>4S}v)n#-8*GTPJP2gOxyV7eqT@m2VUR}U}CgyCXjbZ`rB@gO)F_bzIeKWvo7)6J| zK=L5@{2%s_C1}4#znXe_f3c_&xYO{Ubo1-fz(&$pd_NJ?n*6IgPzneD@FNJx43O{N zA-VZtg7NBl{xYaoP+x8~Qca)(v`zj7TR1419$mY|u(U>1u?4v5tTO>@3znsx-66)i z>GmGy6x5cY`TeZ4BZf=^ED2vAg9B)uAB;6Z=C!(K zIQ$0?{uBZ?tawqU3F^80!E%S|RSs$5T61=C$Cp|+cwAZQZPtg;aD2%+3 ziXn?rA;5GC|we+Ok2LsEm@4bm$SkRs$IX2+7up>D1y)%} zv<@-)HRjBn4UJ8QNT)t#DH18Pt)-q&S@MD_0?k$*`OBB`0*<6K;af`@5Pi+e_~2U? z+$g7AH-F3XliXGbc5;i#T{2ezRIz3-^E|<{(T#bzHC39(48)@pnp*9VZ``$#7+#d1 zHA616GFJj9YsE+2`!@Ig7R)@5J&0|kZ&lr>BXE^+^=}~dnPNT|ZjJf`C0HtG^#|I# zwtH?Wzhs*oe@*La{z+zcKDJENmTo=N7IH#nlyLgHb_DuU`pWL!radLtOZ0MnOlRUC z-hAH+j2wvnIU2EW7z*>rHpD6lU~sUYbFla@#Bz+G|NM+p#`eRckR`(WLdx|pAu}?x zppdp>6{ob&l=D})JE_4WmtM}I>I!j44`2eLue@Fw_{?FePe^j&jzFXIG$IHkDqUT+ zSGPD{B%!4G9i0*HkhOWjq$i@z;y62iQZz5XT#360 z+#Oc(5$6LGYdf!<>zpPq14Ei&Lt|#hq-T}*wsm_Ls}xBS`U%Ij2(+VhxDr!CgQR0A zb2p&ybpfxi3e552Ma}?PFfdm@<*wd%+-3EtODpZ{G7y)w7E?nXV*M+fmqIUXsJKyp`^vLVy$TL;K>!w*VpPYkf|Qu*+5?&n6y%&_Ki%&?#iSdt=ttVweoUC@A|QHIADPiVPZaB|{}OjAeglEHv^ z8SqVB263A93J6F2lv|j`J!I`Na(bni3QSBK8jK)UU0KIN5^{Sqx%It~54}La@RnHK zo#M4JMx2q(7tc?fmHT&Z%W2pTQen@rsO}!bafd=pT5p%N;DtMIuTx*ol=d~()2soY z_F%)(VtFT^b_9mTBF!jofOKO{^TV@~9r?>!m1f4qYA<EB++t{uU1nT0gLRkj{7nt*jrD9|lUl zzj|SJ(X_hpEV@rEN7i{w_Vo3(!rFYG>b&%pE@Mlpki+=!Y;J@PebYt6F2U(R&K|>% z>a5z%`F`=ID1X9m>2Q&#%@GphJeKV*Z|0C@J<1G~BDv&874%oL4(4+1WB&z5{GjJqCzFH}5g)Q* z)4GQfJz`(RYQEKoCe?7fBy@H{bWn*l(e7_&uHg9fLB$l}9pN$TYrMJbcS4F=DxZDO zG1le|d{XiapMFvRc0OMFK^MZ^oy6nv!ihW}16!Eu`48h^n4zB5i@-a_u9{3QSSK26 zuc@zij*mUjQE|@>Vt6I=7w^EcWXoV52nKiULf_`J32ge0b^HGu7}x%*QhS5ElFmFV zGk257WZ;v_`d-%yen>}a&E|`}h52-_yNw(Mj?obiZ1I9A9hJ{t%+A0)edJj`VM_~` zv2|^uYr3J=)Dshx8abQ2|BMBNkY#k`MiB$rdKo}F94g~eow?BS5O&hg9oR^4V$$JU z=6m9Bz`MVLA)~DlvFi7MK3X&5ggh>KOOexUBr+6U!=!-%Z8x2qE|C$&ZvYsuVE zVlOK}zV)z3l8p0I-%4m&_$A}3 zV(zPk!@frX8QiY3$H(O&nGPQV!!*CeRrzIBZmm0Kv3c;Kvk;%39(@b{@>wCHiNa@j zOz|$>#Ir$_t8cc&)OM+r2enez%yQCYOay+vp zY+)CDKFFSbv^8m#dd_1Y%c69uINdA#09g&=d9}l_4x6r!=!n=gl`K~%{kpWB+OU~E zpfRmbEt0uZA9X|?-?ZWnO)wc>@4I>60Slnqsk0$A_O{_LQ?>OULw_Vj`%pi8sfYu) zn|zR8lpVJmZ%<}pBhE+=U$dS_8&cpYy5GAVmyVxr*Y(*fLrr~i%|7TTi=Qz(Yx8dJ z8=6kTT?yg*pld>eKvjuo;gDPV!3~H%U(Uxkush<)GO@R>|bo zjID6P5`SC9^2#K)Njud^>6GRCN5`bsr?5G1`xhqz#QQz7)aoXm8B@3$5bIksPr(|f8H5qhZi1FU=I@U~W%_J}zcY9lp$Syjk>kJ3$p30n zhuE&wj7~4Zd`N(U6qTo4E#xb_?~HPb23g%Eb3dOB}@C}OZr?##@@Ze9=+k|V)N!wU>?>i`ekst z)4{y9xrPm5X~Ugf5iIQP+pyIIT43)nG*yb_MT(N_(jnw7fo(G@h0j zK0)7y6X?T_wE6}6R&ChubHj~TK^aw)MrqzF9*F#C4P3&O1B|RF1GgC8s9Qcn*$USpTG-J zWppQmBq`e$94Asp8Bl&fj?(hQnmfcdCcL_0-c|R8$D=j;9P>(!aKj7%l^W9v=c1iX z^5X}+smHlCEc3Ya&ZiNF4MH|l17yT0rSnx3^YuTgdE?wi&2!hXQ16-QBpsH0k@Jb# zE?5(=%OL}sMuPyemEU4rOIXG!u|LVH0~^N$``q$I+&#OQL!~PJzzzX1}z)sw2H(K(kIC`-Cea*of^m{zglH>f;ir+^^RTItIOFdu4$Y~5M`gb zU5gccQd_|FD=|M1YQ8v%1O8&a49~ zp25+aAB;*aHBoas#Os?g9(T8^&ib(#u(|R^Izh5R)3oM6!Uj4;R%t6Pp^P7Bi^8-w z(1G>Kd+N6;JVGhA98SUSK?URZY%F~_b}wOG>*QyljZ!1*CA^17@!{j?rwUudh0Jro*?D#TZZCnpV?h3b00-VG#aB z=?9Nasu;x!#-l$z6h0uCRU&K*fJi;jl)Y}2ex1LX*E860-bZi-ZS#6B`4EbNd3K10 zsM8PQYOSEe4|eu;LZFvJ;_7t~hMc0d+-`o>E#c~K8^~_M4^g*~4;!3M2b>~=t^^w2 zwHJ}M{0<*`Yx%Q9V$iEcpF~R!T3{JmU|i%i?{esoQK8p}FQ`&Y2$aPucXWrI}q3 z_rhN@3A)wbh=uZ3U%CFE^7(Z~;dB2i8s@A04oNCw3HZFw{P3+rm%#m>jjLHwjwYnY zG_~Q?PmLhV?^o+o@do}ijcZ=#Ch4@vYxT&)uw!p0#LgX#B-a zntSo_Ol1@Z&@tnm6oY)fH|5U-g3Om?n1QPo9~?=YwxPHo!OLG(^f!_^zah`J!NgmMyJZ@ zE(MALw0Au4Hg%~bGPfo-av#2tC?n=H&!|0E*9`tpdyqJ*<4b&|q<}fqd!Q-)O$t-- z$?%1F0TU>W@Yt;9kGUl*%q(8J;~c}5qouE=L1n(wAF2I?@|^1cd?Tr+F?wGSkUN)H;wwlvgRisV0@YBJl->6zbxs=WtcboR`;srEt) zfi>A2d~@Vd7LQ8>7RpQeHAt~YnPqp5tvjHzSlB=Pj+pa|TiBI%WApHBY&~k@TBQ@2 zRIe{xKKLF{|7+4XB*C-A=+tC*Nyj9%s>As>(k_jdZ&FQ)6@(kPW0Ja4w>mb?7IQ4) zH2KNEd@d)zXx^xa2Ky+MV@F~z(JZHLgmJ>;Cf44BGJA1=v)S`hP9CiMGhf`MgV(%- z-rw70G}C14aebR^s2)Ay^A5e*UdO^!1Rv$N5YN<3)vzdDwkjAl9ioIfBCJMWx z8|dJlfqUscRee-NO1NI{g?_Fd5zxs{hCdF4f&5IA?i3swAU*q}$eUSN62;vh;5eDd z{*vB*`t#kJ+oJw^V|_RUFStWu9ng#LPNPX7vKrXnyiGWGEmiAOE2>jD6a;HS8GD8k zX*Q$0b=Zt$hU5|YqrC(lHaG3mPrY_mPEAh3n$&VO{Vxwa5q%NVij!~L1luh?=zgDBA2c3Ap3LUaayz(P zu&>395xAE@ZJ^{$rg7%hc2>K>P`Niz4!AV&nTxgckIzfht;W;oNl%7P1aY7Gl4aVU)t1@s+c`SDG+@ual60(R4v5um| zBYgaDfNY6Mk<5l=w&%R=L$0&$sJU=O?7ki0&;a$dkd1JOTmqV;)`gWJS;f-wOETK% zr{9xfezN9Ecne$Kh9CF4ZjiS64aUTZr1LYzW5_t2sYzh^hxL!MsZEK4-o&o?#IAJ2 zrf44#A@rPH4I<;b$1M?87tK$pp;=Uj*frAAQP3zFTCIiLziD@YE#9#CHP0if&v)T$ z&d@)1~lWmr-fz67#&1*UXidQ`MtE=H}vWA2&aiqx&ae?ikN$@h#x ze1x=KS$EB(6GxqqzFl)A8oApoD}!Lf%+}22&9d9x5!cykxYGJ0!#DFa-HQn{#0C$S zqabB?XJXD4+9>&Tj|;_5GH$hYA`bjRPWQP4ngs5QM{{EEo|@i(2HTa zH}amXcE#Db>-{Vt!>5<;`8zLxr02n1$Od#zTZpV$vOk3IYL z`Q|}{-m86%SHhEsyj{4t-Bh1AN#00=F0>oMfRW`PoOVVRAD@UwzU=Wt@Lgx8HQ~xiN@P1WzIZ^pu-3qR2 zAu>ye$nWih2pE6bLERldC3Ts3@i?tr(fjJ{l~C$A6dz4&Z(YGd8quv*TkteIsDX4U zss(c{OsbGm#uImbFHtK+@}_k!8N2YhG&6>*$I0;%V}m4^lIjb;F_a#NXDQHD5x z?qEkiM)G-$kMmtDkDS-koRlAA2o6~pzU;a?qZucH-WjdmCr*ObjMPI89KwziB}&d~ z*%H$-t*ijvG2HZVuC>u4LSF^1gm@;7pWJaJ^d7Je5-lFrbwqN_n4D?W#g~#Y>Z{>N zth^HgK5hQ+`~-9GtR3YeG=@-!>JGg44EC4YF|Qc+B;i`NzASO_TC|_r6(=eDI=p|I z=j$!+fl-@Gr^45)KEA=H9fq_K=ye@9-BHrgAPkHMbg+iSeXc7v(pp&*sI}&#=z2(V zbYw_1@LGehVz8r%!su~O+1PLJKC`H}-6F=CSpsO2g5dM`C1FzD7E<8u@9KFRBU0~(BN9jN}zBP6A#^> zEhK|a?56D?$b;TW%*Zkp0Nf5nal-o`1ZGGP2?H2|+4JqI-wx$!Ti*tL-cq7;lul3w z`0(Ke%Qqj?Qx^^wP3s$1%*Z*D@5y+UN$M>;YW=;LlbrOH;YC(lmYwda>_g13`qiSY zN%az|ia0h5UmLKA^aIC)BS9%nk=0mmpDga@VN__*$i{0)PhKwhNwTR-Z!wAnM|Ji^_v;>MPe?ZijeR zo*;4maI4ssV8ac_N>WR#-y5}XxUgkv^wiqPMs$RNt)I~eZ5-_8r%mJ{q{boh5oYLx zFIWe|ZInNKp{|SQ{S+s{v1Le)X+{drL5Js{GYWt+Y(MRRYma3AdVkb`a4%vN;El^K zog<#LAGy(;kowdWOb5jk8mLyjroJb`ue-f;*VFV;7o*pST?f3*%_ZpQb|iH$mG+Jb zJ4=>C!w1r_Q!r#NJN6ATV)s>Caavlf(5*)Hk@uno&AFpDFx?|4#?!YkU8vJtOh4ZG zdcH<-{sl5T5ZL{&-vteO;`8I5xB?Rx>%^9#8E0IQl5oziu5Y*DVxmqOIFD$I#I`Is z1@xrP?|5oV{#Ht;0{vVc$j;wHYedNPv^j+Mg?0C)>R>36a)dgXNigIhyrp*IIHU)^vZ9sM3lDaQ2XiRt1r)j4pRBQgvHCd$HZEUw@5rD@1CW>NMG+G8 zYltt8o=KH`h}Ow8awa%o_WQa0_H<=bxPn)PnHywyM48qL+V;kdXV-(3P?o_Kxsg3% zx0Ht&#aPbRS1D707e9a?#zM3Y1QUVVQ+!SM3o$GS0o2-P~9{qlmS9aU~ZiUDmR;;RBmrfsXD<&e_`H^(9^j@DNL1<1X$>Bd-6pvTGcMH1u@6PV!E1o4irWvaOppzpJ3%odHa7 z$XQM6Kz=&h#O2;^>#QhI4eWkd)+*DNK^Tm31K+0cE)0?k?YUwvid#2ZD5nv$p~)Zo zqolxh4&u#<&I)p_6iF*c@b@FQhx!#)B|QQpqlv0c$YZ`CZoXmzp_w0SzlauqUD7^2f@E0Wx=EN}jI5oINH#S;0y G!u}tENBtiF literal 0 HcmV?d00001 diff --git a/docs/stats.py b/docs/stats.py new file mode 100755 index 00000000..ef337ba5 --- /dev/null +++ b/docs/stats.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +import functools as func +import glob +import re +from os.path import basename, splitext + +import numpy as np +import titlecase + + +def anchor(name): + return re.sub(r'-+', '-', re.sub(r'[^a-zA-Z0-9]', '-', + name.strip().lower())).strip('-') + + +# Count algorithms + +files = sorted(glob.glob('*_models.md')) +# files = sorted(glob.glob('docs/*_models.md')) + +stats = [] + +for f in files: + with open(f, 'r') as content_file: + content = content_file.read() + + # title + title = content.split('\n')[0].replace('#', '') + + # count papers + papers = set((papertype, titlecase.titlecase(paper.lower().strip())) + for (papertype, paper) in re.findall( + r'\n\s*\[([A-Z]+?)\]\s*\n.*?\btitle\s*=\s*{(.*?)}', + content, re.DOTALL)) + # paper links + revcontent = '\n'.join(list(reversed(content.splitlines()))) + paperlinks = {} + for _, p in papers: + print(p) + q = p.replace('\\', '\\\\').replace('?', '\\?') + paperlinks[p] = ' '.join( + (f'[⇨]({splitext(basename(f))[0]}.html#{anchor(paperlink)})' + for paperlink in re.findall( + rf'\btitle\s*=\s*{{\s*{q}\s*}}.*?\n## (.*?)\s*[,;]?\s*\n', + revcontent, re.DOTALL | re.IGNORECASE))) + print(' ', paperlinks[p]) + paperlist = '\n'.join( + sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers)) + # count configs + configs = set(x.lower().strip() + for x in re.findall(r'https.*configs/.*\.py', content)) + + # count ckpts + ckpts = set(x.lower().strip() + for x in re.findall(r'https://download.*\.pth', content) + if 'mmaction' in x) + + statsmsg = f""" +## [{title}]({f}) + +* Number of checkpoints: {len(ckpts)} +* Number of configs: {len(configs)} +* Number of papers: {len(papers)} +{paperlist} + + """ + + stats.append((papers, configs, ckpts, statsmsg)) + +allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _, _ in stats]) +allconfigs = func.reduce(lambda a, b: a.union(b), [c for _, c, _, _ in stats]) +allckpts = func.reduce(lambda a, b: a.union(b), [c for _, _, c, _ in stats]) +msglist = '\n'.join(x for _, _, _, x in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# Overview + +* Number of checkpoints: {len(allckpts)} +* Number of configs: {len(allconfigs)} +* Number of papers: {len(allpapers)} +{countstr} + +For supported datasets, see [datasets overview](datasets.md). + +{msglist} +""" + +with open('modelzoo.md', 'w') as f: + f.write(modelzoo) diff --git a/docs/technical_details.md b/docs/technical_details.md new file mode 100644 index 00000000..91b0cfb9 --- /dev/null +++ b/docs/technical_details.md @@ -0,0 +1,226 @@ +# Technical Details + +In this section, we will introduce the main units of training a detector: +data pipeline, model and iteration pipeline. + +## Data pipeline + +Following typical conventions, we use `Dataset` and `DataLoader` for data loading +with multiple workers. `Dataset` returns a dict of data items corresponding +the arguments of models' forward method. +Since the data in object detection may not be the same size (image size, gt bbox size, etc.), +we introduce a new `DataContainer` type in MMCV to help collect and distribute +data of different size. +See [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py) for more details. + +The data preparation pipeline and the dataset is decomposed. Usually a dataset +defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict. +A pipeline consists of a sequence of operations. Each operation takes a dict as input and also output a dict for the next transform. + +We present a classical pipeline in the following figure. The blue blocks are pipeline operations. With the pipeline going on, each operator can add new keys (marked as green) to the result dict or update the existing keys (marked as orange). +![pipeline figure](../demo/data_pipeline.png) + +The operations are categorized into data loading, pre-processing, formatting and test-time augmentation. + +Here is an pipeline example for Faster R-CNN. +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +``` + +For each operation, we list the related dict fields that are added/updated/removed. + +### Data loading + +`LoadImageFromFile` +- add: img, img_shape, ori_shape + +`LoadAnnotations` +- add: gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg, bbox_fields, mask_fields + +`LoadProposals` +- add: proposals + +### Pre-processing + +`Resize` +- add: scale, scale_idx, pad_shape, scale_factor, keep_ratio +- update: img, img_shape, *bbox_fields, *mask_fields, *seg_fields + +`RandomFlip` +- add: flip +- update: img, *bbox_fields, *mask_fields, *seg_fields + +`Pad` +- add: pad_fixed_size, pad_size_divisor +- update: img, pad_shape, *mask_fields, *seg_fields + +`RandomCrop` +- update: img, pad_shape, gt_bboxes, gt_labels, gt_masks, *bbox_fields + +`Normalize` +- add: img_norm_cfg +- update: img + +`SegRescale` +- update: gt_semantic_seg + +`PhotoMetricDistortion` +- update: img + +`Expand` +- update: img, gt_bboxes + +`MinIoURandomCrop` +- update: img, gt_bboxes, gt_labels + +`Corrupt` +- update: img + +### Formatting + +`ToTensor` +- update: specified by `keys`. + +`ImageToTensor` +- update: specified by `keys`. + +`Transpose` +- update: specified by `keys`. + +`ToDataContainer` +- update: specified by `fields`. + +`DefaultFormatBundle` +- update: img, proposals, gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg + +`Collect` +- add: img_meta (the keys of img_meta is specified by `meta_keys`) +- remove: all other keys except for those specified by `keys` + +### Test time augmentation + +`MultiScaleFlipAug` + +## Model + +In MMDetection, model components are basically categorized as 4 types. + +- backbone: usually a FCN network to extract feature maps, e.g., ResNet. +- neck: the part between backbones and heads, e.g., FPN, ASPP. +- head: the part for specific tasks, e.g., bbox prediction and mask prediction. +- roi extractor: the part for extracting features from feature maps, e.g., RoI Align. + +We also write implement some general detection pipelines with the above components, +such as `SingleStageDetector` and `TwoStageDetector`. + +### Build a model with basic components + +Following some basic pipelines (e.g., two-stage detectors), the model structure +can be customized through config files with no pains. + +If we want to implement some new components, e.g, the path aggregation +FPN structure in [Path Aggregation Network for Instance Segmentation](https://arxiv.org/abs/1803.01534), there are two things to do. + +1. create a new file in `mmdet/models/necks/pafpn.py`. + + ```python + from ..registry import NECKS + + @NECKS.register + class PAFPN(nn.Module): + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False): + pass + + def forward(self, inputs): + # implementation is ignored + pass + ``` + +2. Import the module in `mmdet/models/necks/__init__.py`. + + ```python + from .pafpn import PAFPN + ``` + +2. modify the config file from + + ```python + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5) + ``` + + to + + ```python + neck=dict( + type='PAFPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5) + ``` + +We will release more components (backbones, necks, heads) for research purpose. + +### Write a new model + +To write a new detection pipeline, you need to inherit from `BaseDetector`, +which defines the following abstract methods. + +- `extract_feat()`: given an image batch of shape (n, c, h, w), extract the feature map(s). +- `forward_train()`: forward method of the training mode +- `simple_test()`: single scale testing without augmentation +- `aug_test()`: testing with augmentation (multi-scale, flip, etc.) + +[TwoStageDetector](https://github.com/hellock/mmdetection/blob/master/mmdet/models/detectors/two_stage.py) +is a good example which shows how to do that. + +## Iteration pipeline + +We adopt distributed training for both single machine and multiple machines. +Supposing that the server has 8 GPUs, 8 processes will be started and each process runs on a single GPU. + +Each process keeps an isolated model, data loader, and optimizer. +Model parameters are only synchronized once at the beginning. +After a forward and backward pass, gradients will be allreduced among all GPUs, +and the optimizer will update model parameters. +Since the gradients are allreduced, the model parameter stays the same for all processes after the iteration. + +## Other information + +For more information, please refer to our [technical report](https://arxiv.org/abs/1906.07155). diff --git a/mmocr/datasets/pipelines/dbnet_transforms.py b/mmocr/datasets/pipelines/dbnet_transforms.py new file mode 100644 index 00000000..1975793a --- /dev/null +++ b/mmocr/datasets/pipelines/dbnet_transforms.py @@ -0,0 +1,272 @@ +import cv2 +import imgaug +import imgaug.augmenters as iaa +import numpy as np + +from mmdet.core.mask import PolygonMasks +from mmdet.datasets.builder import PIPELINES + + +class AugmenterBuilder: + """Build imgaug object according ImgAug argmentations.""" + + def __init__(self): + pass + + def build(self, args, root=True): + if args is None: + return None + elif isinstance(args, (int, float, str)): + return args + elif isinstance(args, list): + if root: + sequence = [self.build(value, root=False) for value in args] + return iaa.Sequential(sequence) + arg_list = [self.to_tuple_if_list(a) for a in args[1:]] + return getattr(iaa, args[0])(*arg_list) + elif isinstance(args, dict): + if 'cls' in args: + cls = getattr(iaa, args['cls']) + return cls( + **{ + k: self.to_tuple_if_list(v) + for k, v in args.items() if not k == 'cls' + }) + else: + return { + key: self.build(value, root=False) + for key, value in args.items() + } + else: + raise RuntimeError('unknown augmenter arg: ' + str(args)) + + def to_tuple_if_list(self, obj): + if isinstance(obj, list): + return tuple(obj) + return obj + + +@PIPELINES.register_module() +class ImgAug: + """A wrapper to use imgaug https://github.com/aleju/imgaug. + + Args: + args ([list[list|dict]]): The argumentation list. For details, please + refer to imgaug document. Take args=[['Fliplr', 0.5], + dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]] as an + example. The args horizontally flip images with probability 0.5, + followed by random rotation with angles in range [-10, 10], and + resize with an independent scale in range [0.5, 3.0] for each + side of images. + """ + + def __init__(self, args=None): + self.augmenter_args = args + self.augmenter = AugmenterBuilder().build(self.augmenter_args) + + def __call__(self, results): + # img is bgr + image = results['img'] + aug = None + shape = image.shape + + if self.augmenter: + aug = self.augmenter.to_deterministic() + results['img'] = aug.augment_image(image) + results['img_shape'] = results['img'].shape + results['flip'] = 'unknown' # it's unknown + results['flip_direction'] = 'unknown' # it's unknown + target_shape = results['img_shape'] + + self.may_augment_annotation(aug, shape, target_shape, results) + + return results + + def may_augment_annotation(self, aug, shape, target_shape, results): + if aug is None: + return results + for key in results['mask_fields']: + # augment polygon mask + masks = [] + for mask in results[key]: + masks.append( + [self.may_augment_poly(aug, shape, target_shape, mask[0])]) + if len(masks) > 0: + results[key] = PolygonMasks(masks, *target_shape[:2]) + + for key in results['bbox_fields']: + # augment bbox + bboxes = [] + for bbox in results[key]: + bbox = self.may_augment_poly(aug, shape, target_shape, bbox) + bboxes.append(bbox) + results[key] = np.zeros(0) + if len(bboxes) > 0: + results[key] = np.stack(bboxes) + + return results + + def may_augment_poly(self, aug, img_shape, target_shape, poly): + # poly n x 2 + poly = poly.reshape(-1, 2) + keypoints = [imgaug.Keypoint(p[0], p[1]) for p in poly] + keypoints = aug.augment_keypoints( + [imgaug.KeypointsOnImage(keypoints, shape=img_shape)])[0].keypoints + poly = [[p.x, p.y] for p in keypoints] + poly = np.array(poly).flatten() + return poly + + def __repr__(self): + repr_str = self.__class__.__name__ + return repr_str + + +@PIPELINES.register_module() +class EastRandomCrop: + + def __init__(self, + target_size=(640, 640), + max_tries=10, + min_crop_side_ratio=0.1): + self.target_size = target_size + self.max_tries = max_tries + self.min_crop_side_ratio = min_crop_side_ratio + + def __call__(self, results): + # sampling crop + # crop image, boxes, masks + img = results['img'] + crop_x, crop_y, crop_w, crop_h = self.crop_area( + img, results['gt_masks']) + scale_w = self.target_size[0] / crop_w + scale_h = self.target_size[1] / crop_h + scale = min(scale_w, scale_h) + h = int(crop_h * scale) + w = int(crop_w * scale) + padimg = np.zeros( + (self.target_size[1], self.target_size[0], img.shape[2]), + img.dtype) + padimg[:h, :w] = cv2.resize( + img[crop_y:crop_y + crop_h, crop_x:crop_x + crop_w], (w, h)) + + # for bboxes + for key in results['bbox_fields']: + lines = [] + for box in results[key]: + box = box.reshape(2, 2) + poly = ((box - (crop_x, crop_y)) * scale) + if not self.is_poly_outside_rect(poly, 0, 0, w, h): + lines.append(poly.flatten()) + results[key] = np.array(lines) + # for masks + for key in results['mask_fields']: + polys = [] + polys_label = [] + for poly in results[key]: + poly = np.array(poly).reshape(-1, 2) + poly = ((poly - (crop_x, crop_y)) * scale) + if not self.is_poly_outside_rect(poly, 0, 0, w, h): + polys.append([poly]) + polys_label.append(0) + results[key] = PolygonMasks(polys, *self.target_size) + if key == 'gt_masks': + results['gt_labels'] = polys_label + + results['img'] = padimg + results['img_shape'] = padimg.shape + + return results + + def is_poly_in_rect(self, poly, x, y, w, h): + poly = np.array(poly) + if poly[:, 0].min() < x or poly[:, 0].max() > x + w: + return False + if poly[:, 1].min() < y or poly[:, 1].max() > y + h: + return False + return True + + def is_poly_outside_rect(self, poly, x, y, w, h): + poly = np.array(poly).reshape(-1, 2) + if poly[:, 0].max() < x or poly[:, 0].min() > x + w: + return True + if poly[:, 1].max() < y or poly[:, 1].min() > y + h: + return True + return False + + def split_regions(self, axis): + regions = [] + min_axis = 0 + for i in range(1, axis.shape[0]): + if axis[i] != axis[i - 1] + 1: + region = axis[min_axis:i] + min_axis = i + regions.append(region) + return regions + + def random_select(self, axis, max_size): + xx = np.random.choice(axis, size=2) + xmin = np.min(xx) + xmax = np.max(xx) + xmin = np.clip(xmin, 0, max_size - 1) + xmax = np.clip(xmax, 0, max_size - 1) + return xmin, xmax + + def region_wise_random_select(self, regions, max_size): + selected_index = list(np.random.choice(len(regions), 2)) + selected_values = [] + for index in selected_index: + axis = regions[index] + xx = int(np.random.choice(axis, size=1)) + selected_values.append(xx) + xmin = min(selected_values) + xmax = max(selected_values) + return xmin, xmax + + def crop_area(self, img, polys): + h, w, _ = img.shape + h_array = np.zeros(h, dtype=np.int32) + w_array = np.zeros(w, dtype=np.int32) + for points in polys: + points = np.round( + points, decimals=0).astype(np.int32).reshape(-1, 2) + minx = np.min(points[:, 0]) + maxx = np.max(points[:, 0]) + w_array[minx:maxx] = 1 + miny = np.min(points[:, 1]) + maxy = np.max(points[:, 1]) + h_array[miny:maxy] = 1 + # ensure the cropped area not across a text + h_axis = np.where(h_array == 0)[0] + w_axis = np.where(w_array == 0)[0] + + if len(h_axis) == 0 or len(w_axis) == 0: + return 0, 0, w, h + + h_regions = self.split_regions(h_axis) + w_regions = self.split_regions(w_axis) + + for i in range(self.max_tries): + if len(w_regions) > 1: + xmin, xmax = self.region_wise_random_select(w_regions, w) + else: + xmin, xmax = self.random_select(w_axis, w) + if len(h_regions) > 1: + ymin, ymax = self.region_wise_random_select(h_regions, h) + else: + ymin, ymax = self.random_select(h_axis, h) + + if xmax - xmin < self.min_crop_side_ratio * w or \ + ymax - ymin < self.min_crop_side_ratio * h: + # area too small + continue + num_poly_in_rect = 0 + for poly in polys: + if not self.is_poly_outside_rect(poly, xmin, ymin, xmax - xmin, + ymax - ymin): + num_poly_in_rect += 1 + break + + if num_poly_in_rect > 0: + return xmin, ymin, xmax - xmin, ymax - ymin + + return 0, 0, w, h diff --git a/mmocr/datasets/pipelines/textdet_targets/dbnet_targets.py b/mmocr/datasets/pipelines/textdet_targets/dbnet_targets.py new file mode 100644 index 00000000..fcbe69bb --- /dev/null +++ b/mmocr/datasets/pipelines/textdet_targets/dbnet_targets.py @@ -0,0 +1,238 @@ +import cv2 +import numpy as np +import pyclipper +from shapely.geometry import Polygon + +from mmdet.core import BitmapMasks +from mmdet.datasets.builder import PIPELINES +from . import BaseTextDetTargets + + +@PIPELINES.register_module() +class DBNetTargets(BaseTextDetTargets): + """Generate gt shrinked text, gt threshold map, and their effective region + masks to learn DBNet: Real-time Scene Text Detection with Differentiable + Binarization [https://arxiv.org/abs/1911.08947]. This was partially adapted + from https://github.com/MhLiao/DB. + + Args: + shrink_ratio (float): The area shrinked ratio between text + kernels and their text masks. + thr_min (float): The minimum value of the threshold map. + thr_max (float): The maximum value of the threshold map. + min_short_size (int): The minimum size of polygon below which + the polygon is invalid. + """ + + def __init__(self, + shrink_ratio=0.4, + thr_min=0.3, + thr_max=0.7, + min_short_size=8): + super().__init__() + self.shrink_ratio = shrink_ratio + self.thr_min = thr_min + self.thr_max = thr_max + self.min_short_size = min_short_size + + def find_invalid(self, results): + """Find invalid polygons. + + Args: + results (dict): The dict containing gt_mask. + + Returns: + ignore_tags (list[bool]): The indicators for ignoring polygons. + """ + texts = results['gt_masks'].masks + ignore_tags = [False] * len(texts) + + for inx, text in enumerate(texts): + if self.invalid_polygon(text[0]): + ignore_tags[inx] = True + return ignore_tags + + def invalid_polygon(self, poly): + """Judge the input polygon is invalid or not. It is invalid if its area + smaller than 1 or the shorter side of its minimum bounding box smaller + than min_short_size. + + Args: + poly (ndarray): The polygon boundary point sequence. + + Returns: + True/False (bool): Whether the polygon is invalid. + """ + area = self.polygon_area(poly) + if abs(area) < 1: + return True + short_size = min(self.polygon_size(poly)) + if short_size < self.min_short_size: + return True + + return False + + def ignore_texts(self, results, ignore_tags): + """Ignore gt masks and gt_labels while padding gt_masks_ignore in + results given ignore_tags. + + Args: + results (dict): Result for one image. + ignore_tags (list[int]): Indicate whether to ignore its + corresponding ground truth text. + + Returns: + results (dict): Results after filtering. + """ + flag_len = len(ignore_tags) + assert flag_len == len(results['gt_masks'].masks) + assert flag_len == len(results['gt_labels']) + + results['gt_masks_ignore'].masks += [ + mask for i, mask in enumerate(results['gt_masks'].masks) + if ignore_tags[i] + ] + results['gt_masks'].masks = [ + mask for i, mask in enumerate(results['gt_masks'].masks) + if not ignore_tags[i] + ] + results['gt_labels'] = np.array([ + mask for i, mask in enumerate(results['gt_labels']) + if not ignore_tags[i] + ]) + + return results + + def generate_thr_map(self, img_size, polygons): + """Generate threshold map. + + Args: + img_size (tuple(int)): The image size (h,w) + polygons (list(ndarray)): The polygon list. + + Returns: + thr_map (ndarray): The generated threshold map. + thr_mask (ndarray): The effective mask of threshold map. + """ + thr_map = np.zeros(img_size, dtype=np.float32) + thr_mask = np.zeros(img_size, dtype=np.uint8) + + for polygon in polygons: + self.draw_border_map(polygon[0], thr_map, mask=thr_mask) + thr_map = thr_map * (self.thr_max - self.thr_min) + self.thr_min + + return thr_map, thr_mask + + def draw_border_map(self, polygon, canvas, mask): + """Generate threshold map for one polygon. + + Args: + polygon(ndarray): The polygon boundary ndarray. + canvas(ndarray): The generated threshold map. + mask(ndarray): The generated threshold mask. + """ + polygon = polygon.reshape(-1, 2) + assert polygon.ndim == 2 + assert polygon.shape[1] == 2 + + polygon_shape = Polygon(polygon) + distance = polygon_shape.area * \ + (1 - np.power(self.shrink_ratio, 2)) / polygon_shape.length + subject = [tuple(p) for p in polygon] + padding = pyclipper.PyclipperOffset() + padding.AddPath(subject, pyclipper.JT_ROUND, + pyclipper.ET_CLOSEDPOLYGON) + padded_polygon = padding.Execute(distance) + if len(padded_polygon) > 0: + padded_polygon = np.array(padded_polygon[0]) + else: + print(f'padding {polygon} with {distance} gets {padded_polygon}') + padded_polygon = polygon.copy().astype(np.int32) + cv2.fillPoly(mask, [padded_polygon.astype(np.int32)], 1.0) + + x_min = padded_polygon[:, 0].min() + x_max = padded_polygon[:, 0].max() + y_min = padded_polygon[:, 1].min() + y_max = padded_polygon[:, 1].max() + width = x_max - x_min + 1 + height = y_max - y_min + 1 + + polygon[:, 0] = polygon[:, 0] - x_min + polygon[:, 1] = polygon[:, 1] - y_min + + xs = np.broadcast_to( + np.linspace(0, width - 1, num=width).reshape(1, width), + (height, width)) + ys = np.broadcast_to( + np.linspace(0, height - 1, num=height).reshape(height, 1), + (height, width)) + + distance_map = np.zeros((polygon.shape[0], height, width), + dtype=np.float32) + for i in range(polygon.shape[0]): + j = (i + 1) % polygon.shape[0] + absolute_distance = self.point2line(xs, ys, polygon[i], polygon[j]) + distance_map[i] = np.clip(absolute_distance / distance, 0, 1) + distance_map = distance_map.min(axis=0) + + x_min_valid = min(max(0, x_min), canvas.shape[1] - 1) + x_max_valid = min(max(0, x_max), canvas.shape[1] - 1) + y_min_valid = min(max(0, y_min), canvas.shape[0] - 1) + y_max_valid = min(max(0, y_max), canvas.shape[0] - 1) + canvas[y_min_valid:y_max_valid + 1, + x_min_valid:x_max_valid + 1] = np.fmax( + 1 - distance_map[y_min_valid - y_min:y_max_valid - y_max + + height, x_min_valid - x_min:x_max_valid - + x_max + width], + canvas[y_min_valid:y_max_valid + 1, + x_min_valid:x_max_valid + 1]) + + def generate_targets(self, results): + """Generate the gt targets for DBNet. + + Args: + results (dict): The input result dictionary. + + Returns: + results (dict): The output result dictionary. + """ + assert isinstance(results, dict) + polygons = results['gt_masks'].masks + if 'bbox_fields' in results: + results['bbox_fields'].clear() + ignore_tags = self.find_invalid(results) + h, w, _ = results['img_shape'] + + gt_shrink, ignore_tags = self.generate_kernels((h, w), + polygons, + self.shrink_ratio, + ignore_tags=ignore_tags) + + results = self.ignore_texts(results, ignore_tags) + + # polygons and polygons_ignore reassignment. + polygons = results['gt_masks'].masks + polygons_ignore = results['gt_masks_ignore'].masks + + gt_shrink_mask = self.generate_effective_mask((h, w), polygons_ignore) + + gt_thr, gt_thr_mask = self.generate_thr_map((h, w), polygons) + + results['mask_fields'].clear() # rm gt_masks encoded by polygons + results.pop('gt_labels', None) + results.pop('gt_masks', None) + results.pop('gt_bboxes', None) + results.pop('gt_bboxes_ignore', None) + + mapping = { + 'gt_shrink': gt_shrink, + 'gt_shrink_mask': gt_shrink_mask, + 'gt_thr': gt_thr, + 'gt_thr_mask': gt_thr_mask + } + for key, value in mapping.items(): + value = value if isinstance(value, list) else [value] + results[key] = BitmapMasks(value, h, w) + results['mask_fields'].append(key) + + return results diff --git a/mmocr/models/textdet/dense_heads/db_head.py b/mmocr/models/textdet/dense_heads/db_head.py new file mode 100644 index 00000000..f32b296c --- /dev/null +++ b/mmocr/models/textdet/dense_heads/db_head.py @@ -0,0 +1,86 @@ +import torch +import torch.nn as nn + +from mmdet.models.builder import HEADS, build_loss +from .head_mixin import HeadMixin + + +@HEADS.register_module() +class DBHead(HeadMixin, nn.Module): + """The class for DBNet head. + + This was partially adapted from https://github.com/MhLiao/DB + """ + + def __init__(self, + in_channels, + with_bias=False, + decoding_type='db', + text_repr_type='poly', + downsample_ratio=1.0, + loss=dict(type='DBLoss'), + train_cfg=None, + test_cfg=None): + """Initialization. + + Args: + in_channels (int): The number of input channels of the db head. + decoding_type (str): The type of decoder for dbnet. + text_repr_type (str): Boundary encoding type 'poly' or 'quad'. + downsample_ratio (float): The downsample ratio of ground truths. + loss (dict): The type of loss for dbnet. + """ + super().__init__() + + assert isinstance(in_channels, int) + + self.in_channels = in_channels + self.text_repr_type = text_repr_type + self.loss_module = build_loss(loss) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.downsample_ratio = downsample_ratio + self.decoding_type = decoding_type + + self.binarize = nn.Sequential( + nn.Conv2d( + in_channels, in_channels // 4, 3, bias=with_bias, padding=1), + nn.BatchNorm2d(in_channels // 4), nn.ReLU(inplace=True), + nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 2, 2), + nn.BatchNorm2d(in_channels // 4), nn.ReLU(inplace=True), + nn.ConvTranspose2d(in_channels // 4, 1, 2, 2), nn.Sigmoid()) + + self.threshold = self._init_thr(in_channels) + + def init_weights(self): + self.binarize.apply(self.init_class_parameters) + self.threshold.apply(self.init_class_parameters) + + def init_class_parameters(self, m): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + nn.init.kaiming_normal_(m.weight.data) + elif classname.find('BatchNorm') != -1: + m.weight.data.fill_(1.) + m.bias.data.fill_(1e-4) + + def diff_binarize(self, prob_map, thr_map, k): + return torch.reciprocal(1.0 + torch.exp(-k * (prob_map - thr_map))) + + def forward(self, inputs): + prob_map = self.binarize(inputs) + thr_map = self.threshold(inputs) + binary_map = self.diff_binarize(prob_map, thr_map, k=50) + outputs = torch.cat((prob_map, thr_map, binary_map), dim=1) + return (outputs, ) + + def _init_thr(self, inner_channels, bias=False): + in_channels = inner_channels + seq = nn.Sequential( + nn.Conv2d( + in_channels, inner_channels // 4, 3, padding=1, bias=bias), + nn.BatchNorm2d(inner_channels // 4), nn.ReLU(inplace=True), + nn.ConvTranspose2d(inner_channels // 4, inner_channels // 4, 2, 2), + nn.BatchNorm2d(inner_channels // 4), nn.ReLU(inplace=True), + nn.ConvTranspose2d(inner_channels // 4, 1, 2, 2), nn.Sigmoid()) + return seq diff --git a/mmocr/models/textdet/detectors/dbnet.py b/mmocr/models/textdet/detectors/dbnet.py new file mode 100644 index 00000000..468c4259 --- /dev/null +++ b/mmocr/models/textdet/detectors/dbnet.py @@ -0,0 +1,23 @@ +from mmdet.models.builder import DETECTORS +from . import SingleStageTextDetector, TextDetectorMixin + + +@DETECTORS.register_module() +class DBNet(TextDetectorMixin, SingleStageTextDetector): + """The class for implementing DBNet text detector: Real-time Scene Text + Detection with Differentiable Binarization. + + [https://arxiv.org/abs/1911.08947]. + """ + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + show_score=False): + SingleStageTextDetector.__init__(self, backbone, neck, bbox_head, + train_cfg, test_cfg, pretrained) + TextDetectorMixin.__init__(self, show_score) diff --git a/mmocr/models/textdet/losses/db_loss.py b/mmocr/models/textdet/losses/db_loss.py new file mode 100644 index 00000000..0081ecda --- /dev/null +++ b/mmocr/models/textdet/losses/db_loss.py @@ -0,0 +1,169 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from mmdet.models.builder import LOSSES +from mmocr.core.visualize import show_feature # noqa F401 +from mmocr.models.common.losses.dice_loss import DiceLoss + + +@LOSSES.register_module() +class DBLoss(nn.Module): + """The class for implementing DBNet loss. + + This is partially adapted from https://github.com/MhLiao/DB. + """ + + def __init__(self, + alpha=1, + beta=1, + reduction='mean', + negative_ratio=3.0, + eps=1e-6, + bbce_loss=False): + """Initialization. + + Args: + alpha (float): The binary loss coef. + beta (float): The threshold loss coef. + reduction (str): The way to reduce the loss. + negative_ratio (float): The ratio of positives to negatives. + eps (float): Epsilon in the threshold loss function. + bbce_loss (bool): Whether to use balanced bce for probability loss. + If False, dice loss will be used instead. + """ + super().__init__() + assert reduction in ['mean', + 'sum'], " reduction must in ['mean','sum']" + self.alpha = alpha + self.beta = beta + self.reduction = reduction + self.negative_ratio = negative_ratio + self.eps = eps + self.bbce_loss = bbce_loss + self.dice_loss = DiceLoss(eps=eps) + + def bitmasks2tensor(self, bitmasks, target_sz): + """Convert Bitmasks to tensor. + + Args: + bitmasks (list[BitMasks]): The BitMasks list. Each item is for + one img. + target_sz (tuple(int, int)): The target tensor size of KxHxW + with K being the number of kernels. + + Returns + result_tensors (list[tensor]): The list of kernel tensors. Each + element is for one kernel level. + """ + assert isinstance(bitmasks, list) + assert isinstance(target_sz, tuple) + + batch_size = len(bitmasks) + num_levels = len(bitmasks[0]) + + result_tensors = [] + + for level_inx in range(num_levels): + kernel = [] + for batch_inx in range(batch_size): + mask = torch.from_numpy(bitmasks[batch_inx].masks[level_inx]) + mask_sz = mask.shape + pad = [ + 0, target_sz[1] - mask_sz[1], 0, target_sz[0] - mask_sz[0] + ] + mask = F.pad(mask, pad, mode='constant', value=0) + kernel.append(mask) + kernel = torch.stack(kernel) + result_tensors.append(kernel) + + return result_tensors + + def balance_bce_loss(self, pred, gt, mask): + + positive = (gt * mask) + negative = ((1 - gt) * mask) + positive_count = int(positive.float().sum()) + negative_count = min( + int(negative.float().sum()), + int(positive_count * self.negative_ratio)) + + assert gt.max() <= 1 and gt.min() >= 0 + assert pred.max() <= 1 and pred.min() >= 0 + loss = F.binary_cross_entropy(pred, gt, reduction='none') + positive_loss = loss * positive.float() + negative_loss = loss * negative.float() + + negative_loss, _ = torch.topk(negative_loss.view(-1), negative_count) + + balance_loss = (positive_loss.sum() + negative_loss.sum()) / ( + positive_count + negative_count + self.eps) + + return balance_loss + + def l1_thr_loss(self, pred, gt, mask): + thr_loss = torch.abs((pred - gt) * mask).sum() / ( + mask.sum() + self.eps) + return thr_loss + + def forward(self, preds, downsample_ratio, gt_shrink, gt_shrink_mask, + gt_thr, gt_thr_mask): + """Compute DBNet loss. + + Args: + preds (tensor): The output tensor with size of Nx3xHxW. + downsample_ratio (float): The downsample ratio for the + ground truths. + gt_shrink (list[BitmapMasks]): The mask list with each element + being the shrinked text mask for one img. + gt_shrink_mask (list[BitmapMasks]): The effective mask list with + each element being the shrinked effective mask for one img. + gt_thr (list[BitmapMasks]): The mask list with each element + being the threshold text mask for one img. + gt_thr_mask (list[BitmapMasks]): The effective mask list with + each element being the threshold effective mask for one img. + + Returns: + results(dict): The dict for dbnet losses with loss_prob, + loss_db and loss_thresh. + """ + assert isinstance(downsample_ratio, float) + + assert isinstance(gt_shrink, list) + assert isinstance(gt_shrink_mask, list) + assert isinstance(gt_thr, list) + assert isinstance(gt_thr_mask, list) + + preds = preds[0] + + pred_prob = preds[:, 0, :, :] + pred_thr = preds[:, 1, :, :] + pred_db = preds[:, 2, :, :] + feature_sz = preds.size() + + keys = ['gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'] + gt = {} + for k in keys: + gt[k] = eval(k) + gt[k] = [item.rescale(downsample_ratio) for item in gt[k]] + gt[k] = self.bitmasks2tensor(gt[k], feature_sz[2:]) + gt[k] = [item.to(preds.device) for item in gt[k]] + gt['gt_shrink'][0] = (gt['gt_shrink'][0] > 0).float() + if self.bbce_loss: + loss_prob = self.balance_bce_loss(pred_prob, gt['gt_shrink'][0], + gt['gt_shrink_mask'][0]) + else: + loss_prob = self.dice_loss(pred_prob, gt['gt_shrink'][0], + gt['gt_shrink_mask'][0]) + + loss_db = self.dice_loss(pred_db, gt['gt_shrink'][0], + gt['gt_shrink_mask'][0]) + loss_thr = self.l1_thr_loss(pred_thr, gt['gt_thr'][0], + gt['gt_thr_mask'][0]) + + results = dict( + loss_prob=self.alpha * loss_prob, + loss_db=loss_db, + loss_thr=self.beta * loss_thr) + + return results diff --git a/mmocr_gitlab/mmocr b/mmocr_gitlab/mmocr new file mode 160000 index 00000000..4c96c275 --- /dev/null +++ b/mmocr_gitlab/mmocr @@ -0,0 +1 @@ +Subproject commit 4c96c2754d6785fa19663f3c62e54470ec185862