[Docs] Add model-pages in Model Zoo (#480)
* Add model-pages * Add shortname in configs * Use link directly instead of `switch_language.md` * Auto collapse model-zoo pages. * Fix link in RegVGG * Add link replace * fix lintpull/488/head
parent
63a2211315
commit
fd0f5cce92
|
@ -1,4 +1,5 @@
|
|||
# Backpropagation Applied to Handwritten Zip Code Recognition
|
||||
<!-- {LeNet} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# MobileNetV2: Inverted Residuals and Linear Bottlenecks
|
||||
<!-- {MobileNet V2} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Searching for MobileNetV3
|
||||
<!-- {MobileNet V3} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Designing Network Design Spaces
|
||||
<!-- {RegNet} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Repvgg: Making vgg-style convnets great again
|
||||
<!-- {RepVGG} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
@ -43,7 +44,7 @@ python ./tools/convert_models/reparameterize_repvgg.py ${CFG_PATH} ${SRC_CKPT_PA
|
|||
|
||||
`${CFG_PATH}` is the config file, `${SRC_CKPT_PATH}` is the source chenpoint file, `${TARGET_CKPT_PATH}` is the target deploy weight file path.
|
||||
|
||||
To use reparameterized repvgg weight, the config file must switch to [the deploy config files](./configs/repvgg/deploy) as below:
|
||||
To use reparameterized repvgg weight, the config file must switch to [the deploy config files](./deploy) as below:
|
||||
|
||||
```bash
|
||||
python ./tools/test.py ${RapVGG_Deploy_CFG} ${CHECK_POINT}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# ResNeSt: Split-Attention Networks
|
||||
<!-- {ResNeSt} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Deep Residual Learning for Image Recognition
|
||||
<!-- {ResNet} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Aggregated Residual Transformations for Deep Neural Networks
|
||||
<!-- {ResNeXt} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Squeeze-and-Excitation Networks
|
||||
<!-- {SE-ResNet} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Squeeze-and-Excitation Networks
|
||||
<!-- {SE-ResNeXt} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices
|
||||
<!-- {ShuffleNet V1} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Shufflenet v2: Practical guidelines for efficient cnn architecture design
|
||||
<!-- {ShuffleNet V2} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
|
||||
<!-- {Swin Transformer} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Very Deep Convolutional Networks for Large-Scale Image Recognition
|
||||
<!-- {VGG} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale
|
||||
<!-- {Vision Transformer} -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
var collapsedSections = ['Model zoo'];
|
|
@ -161,6 +161,7 @@ html_theme_options = {
|
|||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
html_css_files = ['css/readthedocs.css']
|
||||
html_js_files = ['js/custom.js']
|
||||
|
||||
master_doc = 'index'
|
||||
|
||||
|
@ -270,4 +271,5 @@ def setup(app):
|
|||
'enable_eval_rst': True,
|
||||
}, True)
|
||||
app.add_transform(AutoStructify)
|
||||
app.add_js_file('./_static/js/custom.js')
|
||||
app.connect('builder-inited', builder_inited_handler)
|
||||
|
|
|
@ -13,14 +13,6 @@ You can switch between Chinese and English documents in the lower-left corner of
|
|||
getting_started.md
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Model zoo
|
||||
|
||||
modelzoo_statistics.md
|
||||
model_zoo.md
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Tutorials
|
||||
|
@ -31,6 +23,9 @@ You can switch between Chinese and English documents in the lower-left corner of
|
|||
tutorials/new_modules.md
|
||||
|
||||
|
||||
.. include:: _model_zoo.rst
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Useful Tools and Scripts
|
||||
|
@ -57,8 +52,8 @@ You can switch between Chinese and English documents in the lower-left corner of
|
|||
.. toctree::
|
||||
:caption: Language Switch
|
||||
|
||||
switch_language.md
|
||||
|
||||
English <https://mmclassification.readthedocs.io/en/latest/>
|
||||
简体中文 <https://mmclassification.readthedocs.io/zh_CN/latest/>
|
||||
|
||||
|
||||
Indices and tables
|
||||
|
|
72
docs/stat.py
72
docs/stat.py
|
@ -1,14 +1,18 @@
|
|||
#!/usr/bin/env python
|
||||
import functools as func
|
||||
import glob
|
||||
import os.path as osp
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
MMCLS_ROOT = Path(__file__).absolute().parents[1]
|
||||
url_prefix = 'https://github.com/open-mmlab/mmclassification/blob/master/'
|
||||
|
||||
files = sorted(glob.glob('../configs/*/README.md'))
|
||||
papers_root = Path('papers')
|
||||
papers_root.mkdir(exist_ok=True)
|
||||
files = [Path(f) for f in sorted(glob.glob('../configs/*/README.md'))]
|
||||
|
||||
stats = []
|
||||
titles = []
|
||||
|
@ -16,35 +20,59 @@ num_ckpts = 0
|
|||
num_configs = 0
|
||||
|
||||
for f in files:
|
||||
url = osp.dirname(f.replace('../', url_prefix))
|
||||
|
||||
with open(f, 'r') as content_file:
|
||||
content = content_file.read()
|
||||
|
||||
title = content.split('\n')[0].replace('# ', '').strip()
|
||||
|
||||
# Extract checkpoints
|
||||
ckpts = set(x.lower().strip()
|
||||
for x in re.findall(r'\[model\]\((https?.*)\)', content))
|
||||
|
||||
if len(ckpts) == 0:
|
||||
continue
|
||||
num_ckpts += len(ckpts)
|
||||
|
||||
# Extract paper title
|
||||
title = content.split('\n')[0].replace('# ', '').strip()
|
||||
titles.append(title)
|
||||
|
||||
# Extract paper abbreviation
|
||||
abbr = [x for x in re.findall(r'<!-- {(.+)} -->', content)]
|
||||
abbr = abbr[0] if len(abbr) > 0 else title
|
||||
|
||||
# Extract paper type
|
||||
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
|
||||
assert len(_papertype) > 0
|
||||
papertype = _papertype[0]
|
||||
|
||||
paper = set([(papertype, title)])
|
||||
|
||||
num_ckpts += len(ckpts)
|
||||
titles.append(title)
|
||||
# Write a copy of README
|
||||
copy = papers_root / (f.parent.name + '.md')
|
||||
if copy.exists():
|
||||
os.remove(copy)
|
||||
|
||||
def replace_link(matchobj):
|
||||
# Replace relative link to GitHub link.
|
||||
name = matchobj.group(1)
|
||||
link = matchobj.group(2)
|
||||
if not link.startswith('http') and (f.parent / link).exists():
|
||||
rel_link = (f.parent / link).absolute().relative_to(MMCLS_ROOT)
|
||||
link = url_prefix + str(rel_link)
|
||||
return f'[{name}]({link})'
|
||||
|
||||
content = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', replace_link, content)
|
||||
|
||||
with open(copy, 'w') as copy_file:
|
||||
copy_file.write(content)
|
||||
|
||||
statsmsg = f"""
|
||||
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
|
||||
\t* [{papertype}] [{title}]({copy}) ({len(ckpts)} ckpts)
|
||||
"""
|
||||
stats.append((paper, ckpts, statsmsg))
|
||||
stats.append(
|
||||
dict(
|
||||
paper=paper, ckpts=ckpts, statsmsg=statsmsg, abbr=abbr, copy=copy))
|
||||
|
||||
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
|
||||
msglist = '\n'.join(x for _, _, x in stats)
|
||||
allpapers = func.reduce(lambda a, b: a.union(b),
|
||||
[stat['paper'] for stat in stats])
|
||||
msglist = '\n'.join(stat['statsmsg'] for stat in stats)
|
||||
|
||||
papertypes, papercounts = np.unique([t for t, _ in allpapers],
|
||||
return_counts=True)
|
||||
|
@ -52,7 +80,7 @@ countstr = '\n'.join(
|
|||
[f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])
|
||||
|
||||
modelzoo = f"""
|
||||
# Model Zoo Statistics
|
||||
# Model Zoo Summary
|
||||
|
||||
* Number of papers: {len(set(titles))}
|
||||
{countstr}
|
||||
|
@ -63,3 +91,17 @@ modelzoo = f"""
|
|||
|
||||
with open('modelzoo_statistics.md', 'w') as f:
|
||||
f.write(modelzoo)
|
||||
|
||||
toctree = """
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Model zoo
|
||||
:glob:
|
||||
|
||||
modelzoo_statistics.md
|
||||
model_zoo.md
|
||||
"""
|
||||
with open('_model_zoo.rst', 'w') as f:
|
||||
f.write(toctree)
|
||||
for stat in stats:
|
||||
f.write(f' {stat["abbr"]} <{stat["copy"]}>\n')
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
# <a href='https://mmclassification.readthedocs.io/en/latest/'>English</a>
|
||||
|
||||
# <a href='https://mmclassification.readthedocs.io/zh_CN/latest/'>简体中文</a>
|
|
@ -13,14 +13,6 @@ You can switch between Chinese and English documents in the lower-left corner of
|
|||
getting_started.md
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: 模型库
|
||||
|
||||
modelzoo_statistics.md
|
||||
model_zoo.md
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: 教程
|
||||
|
@ -31,6 +23,9 @@ You can switch between Chinese and English documents in the lower-left corner of
|
|||
tutorials/new_modules.md
|
||||
|
||||
|
||||
.. include:: _model_zoo.rst
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: 实用工具
|
||||
|
@ -57,8 +52,8 @@ You can switch between Chinese and English documents in the lower-left corner of
|
|||
.. toctree::
|
||||
:caption: 语言切换
|
||||
|
||||
switch_language.md
|
||||
|
||||
English <https://mmclassification.readthedocs.io/en/latest/>
|
||||
简体中文 <https://mmclassification.readthedocs.io/zh_CN/latest/>
|
||||
|
||||
|
||||
索引与表格
|
||||
|
|
|
@ -1,14 +1,18 @@
|
|||
#!/usr/bin/env python
|
||||
import functools as func
|
||||
import glob
|
||||
import os.path as osp
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
MMCLS_ROOT = Path(__file__).absolute().parents[1]
|
||||
url_prefix = 'https://github.com/open-mmlab/mmclassification/blob/master/'
|
||||
|
||||
files = sorted(glob.glob('../configs/*/README.md'))
|
||||
papers_root = Path('papers')
|
||||
papers_root.mkdir(exist_ok=True)
|
||||
files = [Path(f) for f in sorted(glob.glob('../configs/*/README.md'))]
|
||||
|
||||
stats = []
|
||||
titles = []
|
||||
|
@ -16,35 +20,59 @@ num_ckpts = 0
|
|||
num_configs = 0
|
||||
|
||||
for f in files:
|
||||
url = osp.dirname(f.replace('../', url_prefix))
|
||||
|
||||
with open(f, 'r') as content_file:
|
||||
content = content_file.read()
|
||||
|
||||
title = content.split('\n')[0].replace('# ', '').strip()
|
||||
|
||||
# Extract checkpoints
|
||||
ckpts = set(x.lower().strip()
|
||||
for x in re.findall(r'\[model\]\((https?.*)\)', content))
|
||||
|
||||
if len(ckpts) == 0:
|
||||
continue
|
||||
num_ckpts += len(ckpts)
|
||||
|
||||
# Extract paper title
|
||||
title = content.split('\n')[0].replace('# ', '').strip()
|
||||
titles.append(title)
|
||||
|
||||
# Extract paper abbreviation
|
||||
abbr = [x for x in re.findall(r'<!-- {(.+)} -->', content)]
|
||||
abbr = abbr[0] if len(abbr) > 0 else title
|
||||
|
||||
# Extract paper type
|
||||
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
|
||||
assert len(_papertype) > 0
|
||||
papertype = _papertype[0]
|
||||
|
||||
paper = set([(papertype, title)])
|
||||
|
||||
num_ckpts += len(ckpts)
|
||||
titles.append(title)
|
||||
# Write a copy of README
|
||||
copy = papers_root / (f.parent.name + '.md')
|
||||
if copy.exists():
|
||||
os.remove(copy)
|
||||
|
||||
def replace_link(matchobj):
|
||||
# Replace relative link to GitHub link.
|
||||
name = matchobj.group(1)
|
||||
link = matchobj.group(2)
|
||||
if not link.startswith('http') and (f.parent / link).exists():
|
||||
rel_link = (f.parent / link).absolute().relative_to(MMCLS_ROOT)
|
||||
link = url_prefix + str(rel_link)
|
||||
return f'[{name}]({link})'
|
||||
|
||||
content = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', replace_link, content)
|
||||
|
||||
with open(copy, 'w') as copy_file:
|
||||
copy_file.write(content)
|
||||
|
||||
statsmsg = f"""
|
||||
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
|
||||
\t* [{papertype}] [{title}]({copy}) ({len(ckpts)} ckpts)
|
||||
"""
|
||||
stats.append((paper, ckpts, statsmsg))
|
||||
stats.append(
|
||||
dict(
|
||||
paper=paper, ckpts=ckpts, statsmsg=statsmsg, abbr=abbr, copy=copy))
|
||||
|
||||
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
|
||||
msglist = '\n'.join(x for _, _, x in stats)
|
||||
allpapers = func.reduce(lambda a, b: a.union(b),
|
||||
[stat['paper'] for stat in stats])
|
||||
msglist = '\n'.join(stat['statsmsg'] for stat in stats)
|
||||
|
||||
papertypes, papercounts = np.unique([t for t, _ in allpapers],
|
||||
return_counts=True)
|
||||
|
@ -63,3 +91,17 @@ modelzoo = f"""
|
|||
|
||||
with open('modelzoo_statistics.md', 'w') as f:
|
||||
f.write(modelzoo)
|
||||
|
||||
toctree = """
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: 模型库
|
||||
:glob:
|
||||
|
||||
modelzoo_statistics.md
|
||||
model_zoo.md
|
||||
"""
|
||||
with open('_model_zoo.rst', 'w') as f:
|
||||
f.write(toctree)
|
||||
for stat in stats:
|
||||
f.write(f' {stat["abbr"]} <{stat["copy"]}>\n')
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
# <a href='https://mmclassification.readthedocs.io/en/latest/'>English</a>
|
||||
|
||||
# <a href='https://mmclassification.readthedocs.io/zh_CN/latest/'>简体中文</a>
|
Loading…
Reference in New Issue