diff --git a/.dev_scripts/generate_readme.py b/.dev_scripts/generate_readme.py index 4dc43ed5..695c2f61 100644 --- a/.dev_scripts/generate_readme.py +++ b/.dev_scripts/generate_readme.py @@ -61,7 +61,7 @@ print(type(feats)) TRAIN_TEST_TEMPLATE = """\ **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: @@ -79,7 +79,7 @@ python tools/test.py {test_config} {test_weights} TEST_ONLY_TEMPLATE = """\ **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index ca2a4926..20a74222 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,6 +1,6 @@ blank_issues_enabled: false contact_links: - - name: MMClassification Documentation - url: https://mmclassification.readthedocs.io/en/latest/ + - name: MMPreTrain Documentation + url: https://mmpretrain.readthedocs.io/en/latest/ about: Check if your question is answered in docs diff --git a/CITATION.cff b/CITATION.cff index 0c0d7730..81ea8f79 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,9 +1,9 @@ cff-version: 1.2.0 message: "If you use this software, please cite it as below." -title: "OpenMMLab's Image Classification Toolbox and Benchmark" +title: "OpenMMLab's Pre-training Toolbox and Benchmark" authors: - - name: "MMClassification Contributors" + - name: "MMPreTrain Contributors" version: 0.15.0 -date-released: 2020-07-09 -repository-code: "https://github.com/open-mmlab/mmclassification" +date-released: 2023-04-06 +repository-code: "https://github.com/open-mmlab/mmpretrain" license: Apache-2.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 58b520d0..ce84c2a0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,13 +1,13 @@ -# Contributing to MMClassification +# Contributing to MMPreTrain -- [Contributing to MMClassification](#contributing-to-mmclassification) +- [Contributing to MMPreTrain](#contributing-to-mmpretrain) - [Workflow](#workflow) - [Code style](#code-style) - [Python](#python) - [C++ and CUDA](#c-and-cuda) - [Pre-commit Hook](#pre-commit-hook) -Thanks for your interest in contributing to MMClassification! All kinds of contributions are welcome, including but not limited to the following. +Thanks for your interest in contributing to MMPreTrain! All kinds of contributions are welcome, including but not limited to the following. - Fix typo or bugs - Add documentation or translate the documentation into other languages @@ -17,7 +17,7 @@ Thanks for your interest in contributing to MMClassification! All kinds of contr We recommend the potential contributors follow this workflow for contribution. -1. Fork and pull the latest MMClassification repository, follow [get started](https://mmclassification.readthedocs.io/en/1.x/get_started.html) to setup the environment. +1. Fork and pull the latest MMPreTrain repository, follow [get started](https://mmpretrain.readthedocs.io/en/latest/get_started.html) to setup the environment. 2. Checkout a new branch (**do not use the master or dev branch** for PRs) ```bash @@ -44,7 +44,7 @@ We use the following tools for linting and formatting: - [mdformat](https://github.com/executablebooks/mdformat): Mdformat is an opinionated Markdown formatter that can be used to enforce a consistent style in Markdown files. - [docformatter](https://github.com/myint/docformatter): A formatter to format docstring. -Style configurations of yapf and isort can be found in [setup.cfg](https://github.com/open-mmlab/mmclassification/blob/1.x/setup.cfg). +Style configurations of yapf and isort can be found in [setup.cfg](https://github.com/open-mmlab/mmpretrain/blob/main/setup.cfg). ### C++ and CUDA @@ -54,7 +54,7 @@ We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppgu We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, fixes `end-of-files`, `double-quoted-strings`, `python-encoding-pragma`, `mixed-line-ending`, sorts `requirments.txt` automatically on every commit. -The config for a pre-commit hook is stored in [.pre-commit-config](https://github.com/open-mmlab/mmclassification/blob/1.x/.pre-commit-config.yaml). +The config for a pre-commit hook is stored in [.pre-commit-config](https://github.com/open-mmlab/mmpretrain/blob/main/.pre-commit-config.yaml). After you clone the repository, you will need to install initialize pre-commit hook. diff --git a/LICENSE b/LICENSE index f731325b..ae873437 100644 --- a/LICENSE +++ b/LICENSE @@ -188,7 +188,7 @@ Copyright (c) OpenMMLab. All rights reserved same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2020 MMClassification Authors. + Copyright 2020 MMPreTrain Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 2b8481cd..d813bfc0 100644 --- a/README.md +++ b/README.md @@ -20,18 +20,18 @@
 
[![PyPI](https://img.shields.io/pypi/v/mmpretrain)](https://pypi.org/project/mmpretrain) -[![Docs](https://img.shields.io/badge/docs-latest-blue)](https://mmclassification.readthedocs.io/en/1.x/) -[![Build Status](https://github.com/open-mmlab/mmclassification/workflows/build/badge.svg)](https://github.com/open-mmlab/mmclassification/actions) -[![codecov](https://codecov.io/gh/open-mmlab/mmclassification/branch/1.x/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmclassification) -[![license](https://img.shields.io/github/license/open-mmlab/mmclassification.svg)](https://github.com/open-mmlab/mmclassification/blob/1.x/LICENSE) -[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmclassification.svg)](https://github.com/open-mmlab/mmclassification/issues) -[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmclassification.svg)](https://github.com/open-mmlab/mmclassification/issues) +[![Docs](https://img.shields.io/badge/docs-latest-blue)](https://mmpretrain.readthedocs.io/en/latest/) +[![Build Status](https://github.com/open-mmlab/mmpretrain/workflows/build/badge.svg)](https://github.com/open-mmlab/mmpretrain/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmpretrain/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmpretrain) +[![license](https://img.shields.io/github/license/open-mmlab/mmpretrain.svg)](https://github.com/open-mmlab/mmpretrain/blob/main/LICENSE) +[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmpretrain.svg)](https://github.com/open-mmlab/mmpretrain/issues) +[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmpretrain.svg)](https://github.com/open-mmlab/mmpretrain/issues) -[📘 Documentation](https://mmclassification.readthedocs.io/en/1.x/) | -[🛠️ Installation](https://mmclassification.readthedocs.io/en/dev-1.x/get_started.html#installation) | -[👀 Model Zoo](https://mmclassification.readthedocs.io/en/1.x/modelzoo_statistics.html) | -[🆕 Update News](https://mmclassification.readthedocs.io/en/1.x/notes/changelog.html) | -[🤔 Reporting Issues](https://github.com/open-mmlab/mmclassification/issues/new/choose) +[📘 Documentation](https://mmpretrain.readthedocs.io/en/latest/) | +[🛠️ Installation](https://mmpretrain.readthedocs.io/en/latest/get_started.html#installation) | +[👀 Model Zoo](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html) | +[🆕 Update News](https://mmpretrain.readthedocs.io/en/latest/notes/changelog.html) | +[🤔 Reporting Issues](https://github.com/open-mmlab/mmpretrain/issues/new/choose) @@ -91,15 +91,15 @@ Previous version update - Support confusion matrix calculation and plot. - Support **multi-task** training and testing. - Support Test-time Augmentation. -- Upgrade API to get pre-defined models of MMClassification. +- Upgrade API to get pre-defined models of MMPreTrain. - Refactor BEiT backbone and support v1/v2 inference. This release introduced a brand new and flexible training & test engine, but it's still in progress. Welcome -to try according to [the documentation](https://mmclassification.readthedocs.io/en/1.x/). +to try according to [the documentation](https://mmpretrain.readthedocs.io/en/latest/). -And there are some BC-breaking changes. Please check [the migration tutorial](https://mmclassification.readthedocs.io/en/1.x/migration.html). +And there are some BC-breaking changes. Please check [the migration tutorial](https://mmpretrain.readthedocs.io/en/latest/migration.html). -Please refer to [changelog](https://mmclassification.readthedocs.io/en/1.x/notes/changelog.html) for more details and other release history. +Please refer to [changelog](https://mmpretrain.readthedocs.io/en/latest/notes/changelog.html) for more details and other release history. ## Installation @@ -114,24 +114,24 @@ cd mmpretrain mim install -e . ``` -Please refer to [installation documentation](https://mmclassification.readthedocs.io/en/1.x/get_started.html) for more detailed installation and dataset preparation. +Please refer to [installation documentation](https://mmpretrain.readthedocs.io/en/latest/get_started.html) for more detailed installation and dataset preparation. ## User Guides We provided a series of tutorials about the basic usage of MMPreTrain for new users: -- [Learn about Configs](https://mmclassification.readthedocs.io/en/1.x/user_guides/config.html) -- [Prepare Dataset](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html) -- [Inference with existing models](https://mmclassification.readthedocs.io/en/1.x/user_guides/inference.html) -- [Train](https://mmclassification.readthedocs.io/en/pretrain/user_guides/train.html) -- [Test](https://mmclassification.readthedocs.io/en/pretrain/user_guides/test.html) -- [Downstream tasks](https://mmclassification.readthedocs.io/en/pretrain/user_guides/downstream.html) +- [Learn about Configs](https://mmpretrain.readthedocs.io/en/latest/user_guides/config.html) +- [Prepare Dataset](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html) +- [Inference with existing models](https://mmpretrain.readthedocs.io/en/latest/user_guides/inference.html) +- [Train](https://mmpretrain.readthedocs.io/en/pretrain/user_guides/train.html) +- [Test](https://mmpretrain.readthedocs.io/en/pretrain/user_guides/test.html) +- [Downstream tasks](https://mmpretrain.readthedocs.io/en/pretrain/user_guides/downstream.html) -For more information, please refer to [our documentation](https://mmclassification.readthedocs.io/en/pretrain/). +For more information, please refer to [our documentation](https://mmpretrain.readthedocs.io/en/pretrain/). ## Model zoo -Results and models are available in the [model zoo](https://mmclassification.readthedocs.io/en/1.x/modelzoo_statistics.html). +Results and models are available in the [model zoo](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html).
Overview @@ -242,7 +242,7 @@ Results and models are available in the [model zoo](https://mmclassification.rea ## Contributing We appreciate all contributions to improve MMPreTrain. -Please refer to [CONTRUBUTING](https://mmclassification.readthedocs.io/en/1.x/notes/contribution_guide.html) for the contributing guideline. +Please refer to [CONTRUBUTING](https://mmpretrain.readthedocs.io/en/latest/notes/contribution_guide.html) for the contributing guideline. ## Acknowledgement @@ -254,11 +254,11 @@ We wish that the toolbox and benchmark could serve the growing research communit If you find this project useful in your research, please consider cite: ```BibTeX -@misc{2020mmclassification, - title={OpenMMLab's Image Classification Toolbox and Benchmark}, - author={MMClassification Contributors}, - howpublished = {\url{https://github.com/open-mmlab/mmclassification}}, - year={2020} +@misc{2023mmpretrain, + title={OpenMMLab's Pre-training Toolbox and Benchmark}, + author={MMPreTrain Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmpretrain}}, + year={2023} } ``` diff --git a/README_zh-CN.md b/README_zh-CN.md index e20eb09f..a62da9f9 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -20,18 +20,18 @@
 
[![PyPI](https://img.shields.io/pypi/v/mmpretrain)](https://pypi.org/project/mmpretrain) -[![Docs](https://img.shields.io/badge/docs-latest-blue)](https://mmclassification.readthedocs.io/zh_CN/1.x/) -[![Build Status](https://github.com/open-mmlab/mmclassification/workflows/build/badge.svg)](https://github.com/open-mmlab/mmclassification/actions) -[![codecov](https://codecov.io/gh/open-mmlab/mmclassification/branch/1.x/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmclassification) -[![license](https://img.shields.io/github/license/open-mmlab/mmclassification.svg)](https://github.com/open-mmlab/mmclassification/blob/1.x/LICENSE) -[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmclassification.svg)](https://github.com/open-mmlab/mmclassification/issues) -[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmclassification.svg)](https://github.com/open-mmlab/mmclassification/issues) +[![Docs](https://img.shields.io/badge/docs-latest-blue)](https://mmpretrain.readthedocs.io/zh_CN/latest/) +[![Build Status](https://github.com/open-mmlab/mmpretrain/workflows/build/badge.svg)](https://github.com/open-mmlab/mmpretrain/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmpretrain/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmpretrain) +[![license](https://img.shields.io/github/license/open-mmlab/mmpretrain.svg)](https://github.com/open-mmlab/mmpretrain/blob/main/LICENSE) +[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmpretrain.svg)](https://github.com/open-mmlab/mmpretrain/issues) +[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmpretrain.svg)](https://github.com/open-mmlab/mmpretrain/issues) -[📘 中文文档](https://mmclassification.readthedocs.io/zh_CN/1.x/) | -[🛠️ 安装教程](https://mmclassification.readthedocs.io/zh_CN/1.x/get_started.html) | -[👀 模型库](https://mmclassification.readthedocs.io/zh_CN/1.x/modelzoo_statistics.html) | -[🆕 更新日志](https://mmclassification.readthedocs.io/zh_CN/1.x/notes/changelog.html) | -[🤔 报告问题](https://github.com/open-mmlab/mmclassification/issues/new/choose) +[📘 中文文档](https://mmpretrain.readthedocs.io/zh_CN/latest/) | +[🛠️ 安装教程](https://mmpretrain.readthedocs.io/zh_CN/latest/get_started.html) | +[👀 模型库](https://mmpretrain.readthedocs.io/zh_CN/latest/modelzoo_statistics.html) | +[🆕 更新日志](https://mmpretrain.readthedocs.io/zh_CN/latest/notes/changelog.html) | +[🤔 报告问题](https://github.com/open-mmlab/mmpretrain/issues/new/choose) @@ -89,14 +89,14 @@ MMPreTrain 是一款基于 PyTorch 的开源深度学习预训练工具箱,是 - 支持混淆矩阵计算和画图。 - 支持了 **多任务** 训练和测试。 - 支持了测试时增强(TTA)。 -- 更新了主要 API 接口,用以方便地获取 MMClassification 中预定义的模型。 +- 更新了主要 API 接口,用以方便地获取 MMPreTrain 中预定义的模型。 - 重构 BEiT 主干网络结构,并支持 v1 和 v2 模型的推理。 -这个版本引入一个全新的,可扩展性强的训练和测试引擎,但目前仍在开发中。欢迎根据 [文档](https://mmclassification.readthedocs.io/zh_CN/1.x/) 进行试用。 +这个版本引入一个全新的,可扩展性强的训练和测试引擎,但目前仍在开发中。欢迎根据 [文档](https://mmpretrain.readthedocs.io/zh_CN/latest/) 进行试用。 -同时,新版本中存在一些与旧版本不兼容的修改。请查看 [迁移文档](https://mmclassification.readthedocs.io/zh_CN/1.x/migration.html) 来详细了解这些变动。 +同时,新版本中存在一些与旧版本不兼容的修改。请查看 [迁移文档](https://mmpretrain.readthedocs.io/zh_CN/latest/migration.html) 来详细了解这些变动。 -发布历史和更新细节请参考 [更新日志](https://mmclassification.readthedocs.io/zh_CN/1.x/notes/changelog.html)。 +发布历史和更新细节请参考 [更新日志](https://mmpretrain.readthedocs.io/zh_CN/latest/notes/changelog.html)。 ## 安装 @@ -111,24 +111,24 @@ cd mmpretrain mim install -e . ``` -更详细的步骤请参考 [安装指南](https://mmclassification.readthedocs.io/zh_CN/1.x/get_started.html) 进行安装。 +更详细的步骤请参考 [安装指南](https://mmpretrain.readthedocs.io/zh_CN/latest/get_started.html) 进行安装。 ## 基础教程 我们为新用户提供了一系列基础教程: -- [学习配置文件](https://mmclassification.readthedocs.io/zh_CN/1.x/user_guides/config.html) -- [准备数据集](https://mmclassification.readthedocs.io/zh_CN/1.x/user_guides/dataset_prepare.html) -- [使用现有模型推理](https://mmclassification.readthedocs.io/zh_CN/1.x/user_guides/inference.html) -- [训练](https://mmclassification.readthedocs.io/zh_CN/pretrain/user_guides/train.html) -- [测试](https://mmclassification.readthedocs.io/zh_CN/pretrain/user_guides/test.html) -- [下游任务](https://mmclassification.readthedocs.io/zh_CN/pretrain/user_guides/downstream.html) +- [学习配置文件](https://mmpretrain.readthedocs.io/zh_CN/latest/user_guides/config.html) +- [准备数据集](https://mmpretrain.readthedocs.io/zh_CN/latest/user_guides/dataset_prepare.html) +- [使用现有模型推理](https://mmpretrain.readthedocs.io/zh_CN/latest/user_guides/inference.html) +- [训练](https://mmpretrain.readthedocs.io/zh_CN/pretrain/user_guides/train.html) +- [测试](https://mmpretrain.readthedocs.io/zh_CN/pretrain/user_guides/test.html) +- [下游任务](https://mmpretrain.readthedocs.io/zh_CN/pretrain/user_guides/downstream.html) -关于更多的信息,请查阅我们的 [相关文档](https://mmclassification.readthedocs.io/zh_CN/pretrain/)。 +关于更多的信息,请查阅我们的 [相关文档](https://mmpretrain.readthedocs.io/zh_CN/pretrain/)。 ## 模型库 -相关结果和模型可在 [模型库](https://mmclassification.readthedocs.io/zh_CN/1.x/modelzoo_statistics.html) 中获得。 +相关结果和模型可在 [模型库](https://mmpretrain.readthedocs.io/zh_CN/latest/modelzoo_statistics.html) 中获得。
概览 @@ -237,7 +237,7 @@ mim install -e . ## 参与贡献 -我们非常欢迎任何有助于提升 MMPreTrain 的贡献,请参考 [贡献指南](https://mmclassification.readthedocs.io/zh_CN/1.x/notes/contribution_guide.html) 来了解如何参与贡献。 +我们非常欢迎任何有助于提升 MMPreTrain 的贡献,请参考 [贡献指南](https://mmpretrain.readthedocs.io/zh_CN/latest/notes/contribution_guide.html) 来了解如何参与贡献。 ## 致谢 @@ -249,11 +249,11 @@ MMPreTrain 是一款由不同学校和公司共同贡献的开源项目。我们 如果你在研究中使用了本项目的代码或者性能基准,请参考如下 bibtex 引用 MMPreTrain。 ```BibTeX -@misc{2020mmclassification, - title={OpenMMLab's Image Classification Toolbox and Benchmark}, - author={MMClassification Contributors}, - howpublished = {\url{https://github.com/open-mmlab/mmclassification}}, - year={2020} +@misc{2023mmpretrain, + title={OpenMMLab's Pre-training Toolbox and Benchmark}, + author={MMPreTrain Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmpretrain}}, + year={2023} } ``` diff --git a/configs/arcface/README.md b/configs/arcface/README.md index 37cd0f91..c1384da7 100644 --- a/configs/arcface/README.md +++ b/configs/arcface/README.md @@ -44,7 +44,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/arcface/metafile.yml b/configs/arcface/metafile.yml index b8a133d1..20080ddd 100644 --- a/configs/arcface/metafile.yml +++ b/configs/arcface/metafile.yml @@ -10,7 +10,7 @@ Collections: README: configs/arcface/README.md Code: Version: v1.0.0rc3 - URL: https://github.com/open-mmlab/mmclassification/blob/v1.0.0rc3/mmcls/models/heads/margin_head.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc3/mmcls/models/heads/margin_head.py Models: - Name: resnet50-arcface_8xb32_inshop diff --git a/configs/barlowtwins/README.md b/configs/barlowtwins/README.md index d022c0e7..515d1388 100644 --- a/configs/barlowtwins/README.md +++ b/configs/barlowtwins/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/beit/README.md b/configs/beit/README.md index 037057e9..8116bd78 100644 --- a/configs/beit/README.md +++ b/configs/beit/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py b/configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py index e7c54379..2a467e65 100644 --- a/configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py +++ b/configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py @@ -1,4 +1,3 @@ -# mmcls:: means we use the default settings from MMClassification _base_ = [ '../../_base_/datasets/imagenet_bs64_swin_224.py', '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', diff --git a/configs/beit/metafile.yml b/configs/beit/metafile.yml index 6243815d..e4524fae 100644 --- a/configs/beit/metafile.yml +++ b/configs/beit/metafile.yml @@ -16,7 +16,7 @@ Collections: URL: https://arxiv.org/abs/2106.08254 README: configs/beit/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/dev-1.x/mmcls/models/backbones/beit.py + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/beit.py Version: v1.0.0rc4 Models: diff --git a/configs/beitv2/README.md b/configs/beitv2/README.md index c46eb459..3a7cb4f7 100644 --- a/configs/beitv2/README.md +++ b/configs/beitv2/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/beitv2/metafile.yml b/configs/beitv2/metafile.yml index 78f1f957..74c3885e 100644 --- a/configs/beitv2/metafile.yml +++ b/configs/beitv2/metafile.yml @@ -16,7 +16,7 @@ Collections: URL: https://arxiv.org/abs/2208.06366 README: configs/beitv2/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/dev-1.x/mmcls/models/backbones/beit.py + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/beit.py Version: v1.0.0rc4 Models: diff --git a/configs/byol/README.md b/configs/byol/README.md index 3f23598b..2bfc8d06 100644 --- a/configs/byol/README.md +++ b/configs/byol/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/cae/README.md b/configs/cae/README.md index 8e90b540..dc1c818d 100644 --- a/configs/cae/README.md +++ b/configs/cae/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/clip/README.md b/configs/clip/README.md index 7dcd1e61..116107b2 100644 --- a/configs/clip/README.md +++ b/configs/clip/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/clip/metafile.yml b/configs/clip/metafile.yml index 38853268..c6989429 100644 --- a/configs/clip/metafile.yml +++ b/configs/clip/metafile.yml @@ -16,7 +16,7 @@ Collections: URL: https://arxiv.org/abs/2103.00020 README: configs/clip/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/dev-1.x/mmcls/models/backbones/vision_transformer.py + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/vision_transformer.py Version: v1.0.0 Models: diff --git a/configs/conformer/README.md b/configs/conformer/README.md index e56d42a2..6b149a8a 100644 --- a/configs/conformer/README.md +++ b/configs/conformer/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/conformer/metafile.yml b/configs/conformer/metafile.yml index 8721c444..c0821bad 100644 --- a/configs/conformer/metafile.yml +++ b/configs/conformer/metafile.yml @@ -11,7 +11,7 @@ Collections: Title: "Conformer: Local Features Coupling Global Representations for Visual Recognition" README: configs/conformer/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.19.0/mmcls/models/backbones/conformer.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.19.0/mmcls/models/backbones/conformer.py Version: v0.19.0 Models: diff --git a/configs/convmixer/README.md b/configs/convmixer/README.md index 8b9de427..597c5781 100644 --- a/configs/convmixer/README.md +++ b/configs/convmixer/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/convnext/README.md b/configs/convnext/README.md index fa6127db..79c09c49 100644 --- a/configs/convnext/README.md +++ b/configs/convnext/README.md @@ -55,7 +55,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/convnext/metafile.yml b/configs/convnext/metafile.yml index 542bbcd7..16896629 100644 --- a/configs/convnext/metafile.yml +++ b/configs/convnext/metafile.yml @@ -11,7 +11,7 @@ Collections: README: configs/convnext/README.md Code: Version: v0.20.1 - URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/convnext.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.20.1/mmcls/models/backbones/convnext.py Models: - Name: convnext-tiny_32xb128_in1k diff --git a/configs/convnext_v2/README.md b/configs/convnext_v2/README.md index a6d13e49..614fb7fa 100644 --- a/configs/convnext_v2/README.md +++ b/configs/convnext_v2/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/cspnet/README.md b/configs/cspnet/README.md index f4e88d69..41b31447 100644 --- a/configs/cspnet/README.md +++ b/configs/cspnet/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/cspnet/metafile.yml b/configs/cspnet/metafile.yml index 815ca56a..31036325 100644 --- a/configs/cspnet/metafile.yml +++ b/configs/cspnet/metafile.yml @@ -10,7 +10,7 @@ Collections: README: configs/cspnet/README.md Code: Version: v0.22.0 - URL: https://github.com/open-mmlab/mmclassification/blob/v0.22.0/mmcls/models/backbones/cspnet.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.22.0/mmcls/models/backbones/cspnet.py Models: - Name: cspdarknet50_3rdparty_8xb32_in1k diff --git a/configs/csra/README.md b/configs/csra/README.md index 99f71067..99b29571 100644 --- a/configs/csra/README.md +++ b/configs/csra/README.md @@ -33,7 +33,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/csra/metafile.yml b/configs/csra/metafile.yml index 543063b8..112f50c9 100644 --- a/configs/csra/metafile.yml +++ b/configs/csra/metafile.yml @@ -10,7 +10,7 @@ Collections: README: configs/csra/README.md Code: Version: v0.24.0 - URL: https://github.com/open-mmlab/mmclassification/blob/v0.24.0/mmcls/models/heads/multi_label_csra_head.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.24.0/mmcls/models/heads/multi_label_csra_head.py Models: - Name: resnet101-csra_1xb16_voc07-448px diff --git a/configs/davit/README.md b/configs/davit/README.md index 08a412e1..7f4a4777 100644 --- a/configs/davit/README.md +++ b/configs/davit/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/davit/metafile.yml b/configs/davit/metafile.yml index 02b4933d..588c18fd 100644 --- a/configs/davit/metafile.yml +++ b/configs/davit/metafile.yml @@ -11,7 +11,7 @@ Collections: Title: 'DaViT: Dual Attention Vision Transformers' README: configs/davit/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v1.0.0rc3/mmcls/models/backbones/davit.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc3/mmcls/models/backbones/davit.py Version: v1.0.0rc3 Models: diff --git a/configs/deit/README.md b/configs/deit/README.md index d23f3bbc..7b2f58a5 100644 --- a/configs/deit/README.md +++ b/configs/deit/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/deit/metafile.yml b/configs/deit/metafile.yml index ba643b47..f6f0c5e5 100644 --- a/configs/deit/metafile.yml +++ b/configs/deit/metafile.yml @@ -13,7 +13,7 @@ Collections: README: configs/deit/README.md Code: URL: v0.19.0 - Version: https://github.com/open-mmlab/mmclassification/blob/v0.19.0/mmcls/models/backbones/deit.py + Version: https://github.com/open-mmlab/mmpretrain/blob/v0.19.0/mmcls/models/backbones/deit.py Models: - Name: deit-tiny_4xb256_in1k diff --git a/configs/deit3/README.md b/configs/deit3/README.md index 1bb79f13..18f678e2 100644 --- a/configs/deit3/README.md +++ b/configs/deit3/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/deit3/metafile.yml b/configs/deit3/metafile.yml index fd5593da..6f50fdc3 100644 --- a/configs/deit3/metafile.yml +++ b/configs/deit3/metafile.yml @@ -16,7 +16,7 @@ Collections: Title: 'DeiT III: Revenge of the ViT' README: configs/deit3/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v1.0.0rc2/mmcls/models/backbones/deit3.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc2/mmcls/models/backbones/deit3.py Version: v1.0.0rc2 Models: diff --git a/configs/densecl/README.md b/configs/densecl/README.md index 911ca2c9..d1e1295d 100644 --- a/configs/densecl/README.md +++ b/configs/densecl/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/densenet/README.md b/configs/densenet/README.md index 58c4b67e..774ba574 100644 --- a/configs/densenet/README.md +++ b/configs/densenet/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/edgenext/README.md b/configs/edgenext/README.md index 5564e3d5..2906a171 100644 --- a/configs/edgenext/README.md +++ b/configs/edgenext/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/edgenext/metafile.yml b/configs/edgenext/metafile.yml index 0a332b52..e69ac174 100644 --- a/configs/edgenext/metafile.yml +++ b/configs/edgenext/metafile.yml @@ -12,7 +12,7 @@ Collections: README: configs/edgenext/README.md Code: Version: v1.0.0rc1 - URL: https://github.com/open-mmlab/mmclassification/blob/v0.23.2/mmcls/models/backbones/edgenext.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.23.2/mmcls/models/backbones/edgenext.py Models: - Name: edgenext-xxsmall_3rdparty_in1k diff --git a/configs/efficientformer/README.md b/configs/efficientformer/README.md index 557a5b54..4ef79225 100644 --- a/configs/efficientformer/README.md +++ b/configs/efficientformer/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/efficientformer/metafile.yml b/configs/efficientformer/metafile.yml index 757567f3..5c70f07e 100644 --- a/configs/efficientformer/metafile.yml +++ b/configs/efficientformer/metafile.yml @@ -13,7 +13,7 @@ Collections: README: configs/efficientformer/README.md Code: Version: v1.0.0rc1 - URL: https://github.com/open-mmlab/mmclassification/blob/v1.0.0rc1/configs/efficientformer/metafile.yml + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc1/configs/efficientformer/metafile.yml Models: - Name: efficientformer-l1_3rdparty_8xb128_in1k diff --git a/configs/efficientnet/README.md b/configs/efficientnet/README.md index 07caea95..e4417403 100644 --- a/configs/efficientnet/README.md +++ b/configs/efficientnet/README.md @@ -56,7 +56,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/efficientnet/metafile.yml b/configs/efficientnet/metafile.yml index ddfa71db..21130c4f 100644 --- a/configs/efficientnet/metafile.yml +++ b/configs/efficientnet/metafile.yml @@ -18,7 +18,7 @@ Collections: README: configs/efficientnet/README.md Code: Version: v0.20.1 - URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/efficientnet.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.20.1/mmcls/models/backbones/efficientnet.py Models: - Name: efficientnet-b0_3rdparty_8xb32_in1k diff --git a/configs/efficientnet_v2/README.md b/configs/efficientnet_v2/README.md index ea833075..4b8ccee4 100644 --- a/configs/efficientnet_v2/README.md +++ b/configs/efficientnet_v2/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/efficientnet_v2/metafile.yml b/configs/efficientnet_v2/metafile.yml index cfbdd5f3..6c927dce 100644 --- a/configs/efficientnet_v2/metafile.yml +++ b/configs/efficientnet_v2/metafile.yml @@ -17,7 +17,7 @@ Collections: Title: "EfficientNetV2: Smaller Models and Faster Training" README: configs/efficientnet_v2/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/dev-1.x/mmcls/models/backbones/beit.py + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/beit.py Version: v1.0.0rc4 Models: diff --git a/configs/eva/README.md b/configs/eva/README.md index 2498f3a2..5d2820ff 100644 --- a/configs/eva/README.md +++ b/configs/eva/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/hornet/README.md b/configs/hornet/README.md index 528cb421..20367c18 100644 --- a/configs/hornet/README.md +++ b/configs/hornet/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/hornet/metafile.yml b/configs/hornet/metafile.yml index 06b0ccbb..eba0ed2f 100644 --- a/configs/hornet/metafile.yml +++ b/configs/hornet/metafile.yml @@ -14,7 +14,7 @@ Collections: README: configs/hornet/README.md Code: Version: v0.24.0 - URL: https://github.com/open-mmlab/mmclassification/blob/v0.24.0/mmcls/models/backbones/hornet.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.24.0/mmcls/models/backbones/hornet.py Models: - Name: hornet-tiny_3rdparty_in1k diff --git a/configs/hrnet/README.md b/configs/hrnet/README.md index 6642efd9..4ada7810 100644 --- a/configs/hrnet/README.md +++ b/configs/hrnet/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/hrnet/metafile.yml b/configs/hrnet/metafile.yml index 64fe1422..3a17b125 100644 --- a/configs/hrnet/metafile.yml +++ b/configs/hrnet/metafile.yml @@ -12,7 +12,7 @@ Collections: Title: "Deep High-Resolution Representation Learning for Visual Recognition" README: configs/hrnet/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/hrnet.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.20.1/mmcls/models/backbones/hrnet.py Version: v0.20.1 Models: diff --git a/configs/inception_v3/README.md b/configs/inception_v3/README.md index 3ebe8d22..4fea38c9 100644 --- a/configs/inception_v3/README.md +++ b/configs/inception_v3/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/inception_v3/metafile.yml b/configs/inception_v3/metafile.yml index c127ca66..0b556dec 100644 --- a/configs/inception_v3/metafile.yml +++ b/configs/inception_v3/metafile.yml @@ -15,7 +15,7 @@ Collections: Title: "Rethinking the Inception Architecture for Computer Vision" README: configs/inception_v3/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v1.0.0rc1/configs/inception_v3/metafile.yml + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc1/configs/inception_v3/metafile.yml Version: v1.0.0rc1 Models: diff --git a/configs/levit/README.md b/configs/levit/README.md index 6594910d..9586ac28 100644 --- a/configs/levit/README.md +++ b/configs/levit/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/levit/metafile.yml b/configs/levit/metafile.yml index 41e9fb15..78b62c5c 100644 --- a/configs/levit/metafile.yml +++ b/configs/levit/metafile.yml @@ -10,7 +10,7 @@ Collections: URL: https://arxiv.org/abs/2104.01136 README: configs/levit/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/dev-1.x/mmcls/models/backbones/levit.py + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/levit.py Version: v1.0.0rc5 Models: diff --git a/configs/mae/README.md b/configs/mae/README.md index 66001264..69f5f9bf 100644 --- a/configs/mae/README.md +++ b/configs/mae/README.md @@ -58,7 +58,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/maskfeat/README.md b/configs/maskfeat/README.md index 603a55dc..d25b32bb 100644 --- a/configs/maskfeat/README.md +++ b/configs/maskfeat/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/milan/README.md b/configs/milan/README.md index 13655ef8..e1fe2289 100644 --- a/configs/milan/README.md +++ b/configs/milan/README.md @@ -61,7 +61,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/mixmim/README.md b/configs/mixmim/README.md index 95fe5e31..e07f5011 100644 --- a/configs/mixmim/README.md +++ b/configs/mixmim/README.md @@ -60,7 +60,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/mixmim/metafile.yml b/configs/mixmim/metafile.yml index 66ebcd2d..5bf87bda 100644 --- a/configs/mixmim/metafile.yml +++ b/configs/mixmim/metafile.yml @@ -17,7 +17,7 @@ Collections: URL: https://arxiv.org/abs/2205.13137 README: configs/mixmim/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/dev-1.x/mmcls/models/backbones/mixmim.py + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/mixmim.py Version: v1.0.0rc4 Models: diff --git a/configs/mlp_mixer/README.md b/configs/mlp_mixer/README.md index 3d39ccd6..3f10a583 100644 --- a/configs/mlp_mixer/README.md +++ b/configs/mlp_mixer/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/mlp_mixer/metafile.yml b/configs/mlp_mixer/metafile.yml index e8efa085..8b632db1 100644 --- a/configs/mlp_mixer/metafile.yml +++ b/configs/mlp_mixer/metafile.yml @@ -11,7 +11,7 @@ Collections: Title: "MLP-Mixer: An all-MLP Architecture for Vision" README: configs/mlp_mixer/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.18.0/mmcls/models/backbones/mlp_mixer.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.18.0/mmcls/models/backbones/mlp_mixer.py Version: v0.18.0 Models: diff --git a/configs/mobilenet_v2/README.md b/configs/mobilenet_v2/README.md index 02e65840..74548e19 100644 --- a/configs/mobilenet_v2/README.md +++ b/configs/mobilenet_v2/README.md @@ -57,7 +57,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/mobilenet_v2/metafile.yml b/configs/mobilenet_v2/metafile.yml index e16557fb..aaa490ae 100644 --- a/configs/mobilenet_v2/metafile.yml +++ b/configs/mobilenet_v2/metafile.yml @@ -15,7 +15,7 @@ Collections: Title: "MobileNetV2: Inverted Residuals and Linear Bottlenecks" README: configs/mobilenet_v2/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/mobilenet_v2.py#L101 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/mobilenet_v2.py#L101 Version: v0.15.0 Models: diff --git a/configs/mobilenet_v3/README.md b/configs/mobilenet_v3/README.md index 1fd59fea..dcf7f4c4 100644 --- a/configs/mobilenet_v3/README.md +++ b/configs/mobilenet_v3/README.md @@ -55,7 +55,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/mobilenet_v3/metafile.yml b/configs/mobilenet_v3/metafile.yml index 048e5f47..53f16536 100644 --- a/configs/mobilenet_v3/metafile.yml +++ b/configs/mobilenet_v3/metafile.yml @@ -15,7 +15,7 @@ Collections: Title: Searching for MobileNetV3 README: configs/mobilenet_v3/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/mobilenet_v3.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/mobilenet_v3.py Version: v0.15.0 Models: diff --git a/configs/mobileone/README.md b/configs/mobileone/README.md index 3c3fac5d..e753aff9 100644 --- a/configs/mobileone/README.md +++ b/configs/mobileone/README.md @@ -58,7 +58,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/mobileone/metafile.yml b/configs/mobileone/metafile.yml index 2a480dcd..70370da0 100644 --- a/configs/mobileone/metafile.yml +++ b/configs/mobileone/metafile.yml @@ -12,7 +12,7 @@ Collections: Title: 'An Improved One millisecond Mobile Backbone' README: configs/mobileone/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v1.0.0rc1/configs/mobileone/metafile.yml + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc1/configs/mobileone/metafile.yml Version: v1.0.0rc1 Models: diff --git a/configs/mobilevit/README.md b/configs/mobilevit/README.md index 28319d0c..28f6c050 100644 --- a/configs/mobilevit/README.md +++ b/configs/mobilevit/README.md @@ -62,7 +62,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/mocov2/README.md b/configs/mocov2/README.md index bc8bc17e..cb0ae4ee 100644 --- a/configs/mocov2/README.md +++ b/configs/mocov2/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/mocov3/README.md b/configs/mocov3/README.md index 22a99c3a..a9477e8a 100644 --- a/configs/mocov3/README.md +++ b/configs/mocov3/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/mvit/README.md b/configs/mvit/README.md index e3354166..8428e14f 100644 --- a/configs/mvit/README.md +++ b/configs/mvit/README.md @@ -50,7 +50,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/mvit/metafile.yml b/configs/mvit/metafile.yml index 4dfae023..c16f4f88 100644 --- a/configs/mvit/metafile.yml +++ b/configs/mvit/metafile.yml @@ -14,7 +14,7 @@ Collections: Title: 'MViTv2: Improved Multiscale Vision Transformers for Classification and Detection' README: configs/mvit/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.24.0/mmcls/models/backbones/mvit.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.24.0/mmcls/models/backbones/mvit.py Version: v0.24.0 Models: diff --git a/configs/poolformer/README.md b/configs/poolformer/README.md index ac79feaa..fda14627 100644 --- a/configs/poolformer/README.md +++ b/configs/poolformer/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/poolformer/metafile.yml b/configs/poolformer/metafile.yml index d0e40d2e..55285ddd 100644 --- a/configs/poolformer/metafile.yml +++ b/configs/poolformer/metafile.yml @@ -12,7 +12,7 @@ Collections: README: configs/poolformer/README.md Code: Version: v0.22.1 - URL: https://github.com/open-mmlab/mmclassification/blob/v0.22.1/mmcls/models/backbones/poolformer.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.22.1/mmcls/models/backbones/poolformer.py Models: - Name: poolformer-s12_3rdparty_32xb128_in1k diff --git a/configs/regnet/README.md b/configs/regnet/README.md index 59c88363..63031f4e 100644 --- a/configs/regnet/README.md +++ b/configs/regnet/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/regnet/metafile.yml b/configs/regnet/metafile.yml index 6b301abb..4796a9f4 100644 --- a/configs/regnet/metafile.yml +++ b/configs/regnet/metafile.yml @@ -12,7 +12,7 @@ Collections: Title: Designing Network Design Spaces README: configs/regnet/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.18.0/mmcls/models/backbones/regnet.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.18.0/mmcls/models/backbones/regnet.py Version: v0.18.0 Models: diff --git a/configs/replknet/README.md b/configs/replknet/README.md index 23a6b0b9..49aaa77b 100644 --- a/configs/replknet/README.md +++ b/configs/replknet/README.md @@ -45,7 +45,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/replknet/metafile.yml b/configs/replknet/metafile.yml index 05f19b79..f9f37449 100644 --- a/configs/replknet/metafile.yml +++ b/configs/replknet/metafile.yml @@ -10,7 +10,7 @@ Collections: Title: 'Scaling Up Your Kernels to 31x31: Revisiting Large Kernel Design in CNNs' README: configs/replknet/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v1.0.0rc3/mmcls/models/backbones/replknet.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc3/mmcls/models/backbones/replknet.py Version: v1.0.0rc3 Models: diff --git a/configs/repmlp/README.md b/configs/repmlp/README.md index f40350f4..73cb6123 100644 --- a/configs/repmlp/README.md +++ b/configs/repmlp/README.md @@ -45,7 +45,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/repmlp/metafile.yml b/configs/repmlp/metafile.yml index f498e494..7f391e04 100644 --- a/configs/repmlp/metafile.yml +++ b/configs/repmlp/metafile.yml @@ -10,7 +10,7 @@ Collections: Title: 'RepMLP: Re-parameterizing Convolutions into Fully-connected Layers for Image Recognition' README: configs/repmlp/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.21.0/mmcls/models/backbones/repmlp.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.21.0/mmcls/models/backbones/repmlp.py Version: v0.21.0 Models: diff --git a/configs/repvgg/README.md b/configs/repvgg/README.md index 59c7733f..c4b73c69 100644 --- a/configs/repvgg/README.md +++ b/configs/repvgg/README.md @@ -61,7 +61,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/repvgg/metafile.yml b/configs/repvgg/metafile.yml index 8c550729..e93250ae 100644 --- a/configs/repvgg/metafile.yml +++ b/configs/repvgg/metafile.yml @@ -10,7 +10,7 @@ Collections: Title: 'RepVGG: Making VGG-style ConvNets Great Again' README: configs/repvgg/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.16.0/mmcls/models/backbones/repvgg.py#L257 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.16.0/mmcls/models/backbones/repvgg.py#L257 Version: v0.16.0 Models: diff --git a/configs/res2net/README.md b/configs/res2net/README.md index 1c32d517..bb8b5f1b 100644 --- a/configs/res2net/README.md +++ b/configs/res2net/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/res2net/metafile.yml b/configs/res2net/metafile.yml index 8d850b69..b19b102f 100644 --- a/configs/res2net/metafile.yml +++ b/configs/res2net/metafile.yml @@ -16,7 +16,7 @@ Collections: URL: https://arxiv.org/abs/1904.01169 README: configs/res2net/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.17.0/mmcls/models/backbones/res2net.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.17.0/mmcls/models/backbones/res2net.py Version: v0.17.0 Models: diff --git a/configs/resnet/README.md b/configs/resnet/README.md index 8ff262c6..286b7738 100644 --- a/configs/resnet/README.md +++ b/configs/resnet/README.md @@ -67,7 +67,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/resnet/metafile.yml b/configs/resnet/metafile.yml index 29aa84df..16387248 100644 --- a/configs/resnet/metafile.yml +++ b/configs/resnet/metafile.yml @@ -15,7 +15,7 @@ Collections: Title: "Deep Residual Learning for Image Recognition" README: configs/resnet/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnet.py#L383 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/resnet.py#L383 Version: v0.15.0 Models: diff --git a/configs/resnext/README.md b/configs/resnext/README.md index 6923dc1d..b901b31b 100644 --- a/configs/resnext/README.md +++ b/configs/resnext/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/resnext/metafile.yml b/configs/resnext/metafile.yml index c68e7f9d..71283288 100644 --- a/configs/resnext/metafile.yml +++ b/configs/resnext/metafile.yml @@ -15,7 +15,7 @@ Collections: Title: "Aggregated Residual Transformations for Deep Neural Networks" README: configs/resnext/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnext.py#L90 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/resnext.py#L90 Version: v0.15.0 Models: diff --git a/configs/revvit/README.md b/configs/revvit/README.md index 2faaa306..ac2415cc 100644 --- a/configs/revvit/README.md +++ b/configs/revvit/README.md @@ -57,7 +57,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/revvit/metafile.yml b/configs/revvit/metafile.yml index 7c3eb4d1..842de071 100644 --- a/configs/revvit/metafile.yml +++ b/configs/revvit/metafile.yml @@ -11,7 +11,7 @@ Collections: README: configs/revvit/README.md Code: Version: v1.0.0rc5 - URL: https://github.com/open-mmlab/mmclassification/blob/1.0.0rc5/mmcls/models/backbones/revvit.py + URL: https://github.com/open-mmlab/mmpretrain/blob/1.0.0rc5/mmcls/models/backbones/revvit.py Models: - Name: revvit-small_3rdparty_in1k diff --git a/configs/seresnet/README.md b/configs/seresnet/README.md index e135dc15..b5151ccd 100644 --- a/configs/seresnet/README.md +++ b/configs/seresnet/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/seresnet/metafile.yml b/configs/seresnet/metafile.yml index 7d2a3810..1a9f116d 100644 --- a/configs/seresnet/metafile.yml +++ b/configs/seresnet/metafile.yml @@ -15,7 +15,7 @@ Collections: Title: "Squeeze-and-Excitation Networks" README: configs/seresnet/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/seresnet.py#L58 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/seresnet.py#L58 Version: v0.15.0 Models: diff --git a/configs/shufflenet_v1/README.md b/configs/shufflenet_v1/README.md index 080ddf18..618a22d7 100644 --- a/configs/shufflenet_v1/README.md +++ b/configs/shufflenet_v1/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/shufflenet_v1/metafile.yml b/configs/shufflenet_v1/metafile.yml index 2cfffa10..e3ca1393 100644 --- a/configs/shufflenet_v1/metafile.yml +++ b/configs/shufflenet_v1/metafile.yml @@ -16,7 +16,7 @@ Collections: Title: "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" README: configs/shufflenet_v1/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/shufflenet_v1.py#L152 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/shufflenet_v1.py#L152 Version: v0.15.0 Models: diff --git a/configs/shufflenet_v2/README.md b/configs/shufflenet_v2/README.md index ee3724f3..804aac18 100644 --- a/configs/shufflenet_v2/README.md +++ b/configs/shufflenet_v2/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/shufflenet_v2/metafile.yml b/configs/shufflenet_v2/metafile.yml index a06322dd..9c1eebc5 100644 --- a/configs/shufflenet_v2/metafile.yml +++ b/configs/shufflenet_v2/metafile.yml @@ -16,7 +16,7 @@ Collections: Title: "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" README: configs/shufflenet_v2/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/shufflenet_v2.py#L134 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/shufflenet_v2.py#L134 Version: v0.15.0 Models: diff --git a/configs/simclr/README.md b/configs/simclr/README.md index 0bcdeb77..17d0de2b 100644 --- a/configs/simclr/README.md +++ b/configs/simclr/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/simmim/README.md b/configs/simmim/README.md index 7ac960d0..3e44b079 100644 --- a/configs/simmim/README.md +++ b/configs/simmim/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/simsiam/README.md b/configs/simsiam/README.md index 6c575d5c..117e45bf 100644 --- a/configs/simsiam/README.md +++ b/configs/simsiam/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/swav/README.md b/configs/swav/README.md index 71f91943..fdcdfeb2 100644 --- a/configs/swav/README.md +++ b/configs/swav/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/swin_transformer/README.md b/configs/swin_transformer/README.md index 3ee74242..7e00b13c 100644 --- a/configs/swin_transformer/README.md +++ b/configs/swin_transformer/README.md @@ -57,7 +57,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/swin_transformer/metafile.yml b/configs/swin_transformer/metafile.yml index 1bc44f9d..8bff5992 100644 --- a/configs/swin_transformer/metafile.yml +++ b/configs/swin_transformer/metafile.yml @@ -15,7 +15,7 @@ Collections: Title: "Swin Transformer: Hierarchical Vision Transformer using Shifted Windows" README: configs/swin_transformer/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/swin_transformer.py#L176 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/swin_transformer.py#L176 Version: v0.15.0 Models: diff --git a/configs/swin_transformer_v2/README.md b/configs/swin_transformer_v2/README.md index b9e6a2b7..b1bfd30e 100644 --- a/configs/swin_transformer_v2/README.md +++ b/configs/swin_transformer_v2/README.md @@ -6,7 +6,7 @@ ## Introduction -**Swin Transformer V2** is a work on the scale up visual model based on [Swin Transformer](https://github.com/open-mmlab/mmclassification/tree/1.x/configs/swin_transformer). In the visual field, We can not increase the performance by just simply scaling up the visual model like NLP models. The possible reasons mentioned in the article are: +**Swin Transformer V2** is a work on the scale up visual model based on [Swin Transformer](https://github.com/open-mmlab/mmpretrain/tree/main/configs/swin_transformer). In the visual field, We can not increase the performance by just simply scaling up the visual model like NLP models. The possible reasons mentioned in the article are: - Training instability when increasing the vision model - Migrating the model trained at low resolution to a larger scale resolution task @@ -67,7 +67,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/t2t_vit/README.md b/configs/t2t_vit/README.md index ed5b5798..bf0967cf 100644 --- a/configs/t2t_vit/README.md +++ b/configs/t2t_vit/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/t2t_vit/metafile.yml b/configs/t2t_vit/metafile.yml index f2125426..72cb2dfc 100644 --- a/configs/t2t_vit/metafile.yml +++ b/configs/t2t_vit/metafile.yml @@ -13,7 +13,7 @@ Collections: Title: "Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet" README: configs/t2t_vit/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.17.0/mmcls/models/backbones/t2t_vit.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.17.0/mmcls/models/backbones/t2t_vit.py Version: v0.17.0 Models: diff --git a/configs/tinyvit/README.md b/configs/tinyvit/README.md index 8b4fabf2..3354788f 100644 --- a/configs/tinyvit/README.md +++ b/configs/tinyvit/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/tinyvit/metafile.yml b/configs/tinyvit/metafile.yml index 402a7fc4..a1c5438a 100644 --- a/configs/tinyvit/metafile.yml +++ b/configs/tinyvit/metafile.yml @@ -11,7 +11,7 @@ Collections: README: configs/tinyvit/README.md Code: Version: v1.0.0rc1 - URL: https://github.com/open-mmlab/mmclassification/blob/v0.23.2/mmcls/models/backbones/tinyvit.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.23.2/mmcls/models/backbones/tinyvit.py Models: - Name: tinyvit-5m_3rdparty_in1k diff --git a/configs/tnt/README.md b/configs/tnt/README.md index afa4f626..bb21f791 100644 --- a/configs/tnt/README.md +++ b/configs/tnt/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/tnt/metafile.yml b/configs/tnt/metafile.yml index 67f3c782..dcc2eddb 100644 --- a/configs/tnt/metafile.yml +++ b/configs/tnt/metafile.yml @@ -7,7 +7,7 @@ Collections: Title: "Transformer in Transformer" README: configs/tnt/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/tnt.py#L203 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/tnt.py#L203 Version: v0.15.0 Models: diff --git a/configs/twins/README.md b/configs/twins/README.md index 82383c8c..f8a97202 100644 --- a/configs/twins/README.md +++ b/configs/twins/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/twins/metafile.yml b/configs/twins/metafile.yml index f8a7d819..d0d8ff4a 100644 --- a/configs/twins/metafile.yml +++ b/configs/twins/metafile.yml @@ -12,7 +12,7 @@ Collections: Title: "Twins: Revisiting the Design of Spatial Attention in Vision Transformers" README: configs/twins/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/twins.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.20.1/mmcls/models/backbones/twins.py Version: v0.20.1 Models: diff --git a/configs/van/README.md b/configs/van/README.md index a71f10cb..b0272022 100644 --- a/configs/van/README.md +++ b/configs/van/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/van/metafile.yml b/configs/van/metafile.yml index fc3a6f84..db5a6e64 100644 --- a/configs/van/metafile.yml +++ b/configs/van/metafile.yml @@ -12,7 +12,7 @@ Collections: Title: "Visual Attention Network" README: configs/van/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.23.0/mmcls/models/backbones/van.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.23.0/mmcls/models/backbones/van.py Version: v0.23.0 Models: diff --git a/configs/vgg/README.md b/configs/vgg/README.md index 0782399b..7af69ce6 100644 --- a/configs/vgg/README.md +++ b/configs/vgg/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/vgg/metafile.yml b/configs/vgg/metafile.yml index ff8d8987..ce3af191 100644 --- a/configs/vgg/metafile.yml +++ b/configs/vgg/metafile.yml @@ -15,7 +15,7 @@ Collections: Title: "Very Deep Convolutional Networks for Large-Scale Image Recognition" README: configs/vgg/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/vgg.py#L39 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/vgg.py#L39 Version: v0.15.0 Models: diff --git a/configs/vig/README.md b/configs/vig/README.md index 7798c350..ae6cf566 100644 --- a/configs/vig/README.md +++ b/configs/vig/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/vision_transformer/README.md b/configs/vision_transformer/README.md index a93eccae..b97336ec 100644 --- a/configs/vision_transformer/README.md +++ b/configs/vision_transformer/README.md @@ -58,7 +58,7 @@ print(type(feats)) **Train/Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Train: diff --git a/configs/vision_transformer/metafile.yml b/configs/vision_transformer/metafile.yml index 51519fdb..891c413a 100644 --- a/configs/vision_transformer/metafile.yml +++ b/configs/vision_transformer/metafile.yml @@ -17,7 +17,7 @@ Collections: URL: https://arxiv.org/abs/2010.11929 README: configs/vision_transformer/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.17.0/mmcls/models/backbones/vision_transformer.py + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.17.0/mmcls/models/backbones/vision_transformer.py Version: v0.17.0 Models: diff --git a/configs/wrn/README.md b/configs/wrn/README.md index 34d775ad..28e178f2 100644 --- a/configs/wrn/README.md +++ b/configs/wrn/README.md @@ -43,7 +43,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/configs/wrn/metafile.yml b/configs/wrn/metafile.yml index cc37eefd..75e34672 100644 --- a/configs/wrn/metafile.yml +++ b/configs/wrn/metafile.yml @@ -23,7 +23,7 @@ Collections: Title: "Wide Residual Networks" README: configs/wrn/README.md Code: - URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/resnet.py#L383 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.20.1/mmcls/models/backbones/resnet.py#L383 Version: v0.20.1 Models: diff --git a/configs/xcit/README.md b/configs/xcit/README.md index 2dc4c8f7..9008c1fc 100644 --- a/configs/xcit/README.md +++ b/configs/xcit/README.md @@ -33,7 +33,7 @@ print(type(feats)) **Test Command** -Prepare your dataset according to the [docs](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset). +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). Test: diff --git a/demo/image_demo.py b/demo/image_demo.py index 9c96a74f..01587350 100644 --- a/demo/image_demo.py +++ b/demo/image_demo.py @@ -32,7 +32,7 @@ def main(): raise ValueError( f'Unavailable model "{args.model}", you can specify find a model ' 'name or a config file or find a model name from ' - 'https://mmpretrain.readthedocs.io/en/1.x/modelzoo_statistics.html#all-checkpoints' # noqa: E501 + 'https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html#all-checkpoints' # noqa: E501 ) result = inferencer(args.img, show=args.show, show_dir=args.show_dir)[0] # show the results diff --git a/docker/Dockerfile b/docker/Dockerfile index a1687d60..5f7df525 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -21,6 +21,6 @@ RUN pip install openmim # Install MMPretrain RUN conda clean --all -RUN git clone -b pretrain https://github.com/open-mmlab/mmclassification.git mmpretrain +RUN git clone https://github.com/open-mmlab/mmpretrain.git WORKDIR ./mmpretrain RUN mim install --no-cache-dir -e . diff --git a/docs/en/_templates/404.html b/docs/en/_templates/404.html index 869dba17..639d2559 100644 --- a/docs/en/_templates/404.html +++ b/docs/en/_templates/404.html @@ -7,10 +7,12 @@ The page you are looking for cannot be found.

- If you just switched documentation versions, it is likely that the page you were on is moved. You can look for it in the content table left, or go to the homepage. + If you just switched documentation versions, it is likely that the page you were on is moved. You can look for it in + the content table left, or go to the homepage.

- If you cannot find documentation you want, please open an issue to tell us! + If you cannot find documentation you want, please open an issue to tell us!

{% endblock %} diff --git a/docs/en/advanced_guides/datasets.md b/docs/en/advanced_guides/datasets.md index 1a8d993d..1a018e44 100644 --- a/docs/en/advanced_guides/datasets.md +++ b/docs/en/advanced_guides/datasets.md @@ -1,7 +1,7 @@ # Adding New Dataset You can write a new dataset class inherited from `BaseDataset`, and overwrite `load_data_list(self)`, -like [CIFAR10](https://github.com/open-mmlab/mmclassification/blob/pretrain/mmpretrain/datasets/cifar.py) and [ImageNet](https://github.com/open-mmlab/mmclassification/blob/pretrain/mmpretrain/datasets/imagenet.py). +like [CIFAR10](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/cifar.py) and [ImageNet](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/imagenet.py). Typically, this function returns a list, where each sample is a dict, containing necessary data information, e.g., `img` and `gt_label`. Assume we are going to implement a `Filelist` dataset, which takes filelists for both training and testing. The format of annotation list is as follows: @@ -65,7 +65,7 @@ train_dataloader = dict( ) ``` -All the dataset classes inherit from [`BaseDataset`](https://github.com/open-mmlab/mmclassification/blob/pretrain/mmpretrain/datasets/base_dataset.py) have **lazy loading** and **memory saving** features, you can refer to related documents of {external+mmengine:doc}`BaseDataset `. +All the dataset classes inherit from [`BaseDataset`](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/base_dataset.py) have **lazy loading** and **memory saving** features, you can refer to related documents of {external+mmengine:doc}`BaseDataset `. ```{note} If the dictionary of the data sample contains 'img_path' but not 'img', then 'LoadImgFromFile' transform must be added in the pipeline. diff --git a/docs/en/advanced_guides/schedule.md b/docs/en/advanced_guides/schedule.md index a2530ae9..f0207592 100644 --- a/docs/en/advanced_guides/schedule.md +++ b/docs/en/advanced_guides/schedule.md @@ -1,6 +1,6 @@ # Customize Training Schedule -In our codebase, [default training schedules](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/_base_/schedules) have been provided for common datasets such as CIFAR, ImageNet, etc. If we attempt to experiment on these datasets for higher accuracy or on different new methods and datasets, we might possibly need to modify the strategies. +In our codebase, [default training schedules](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/schedules) have been provided for common datasets such as CIFAR, ImageNet, etc. If we attempt to experiment on these datasets for higher accuracy or on different new methods and datasets, we might possibly need to modify the strategies. In this tutorial, we will introduce how to modify configs to construct optimizers, use parameter-wise finely configuration, gradient clipping, gradient accumulation as well as customize learning rate and momentum schedules. Furthermore, introduce a template to customize self-implemented optimizationmethods for the project. diff --git a/docs/en/api/data_process.rst b/docs/en/api/data_process.rst index 29f817b2..8c015909 100644 --- a/docs/en/api/data_process.rst +++ b/docs/en/api/data_process.rst @@ -4,7 +4,7 @@ Data Process ================= -In MMClassification, the data process and the dataset is decomposed. The +In MMPreTrain, the data process and the dataset is decomposed. The datasets only define how to get samples' basic information from the file system. These basic information includes the ground-truth label and raw images data / the paths of images.The data process includes data transforms, @@ -154,7 +154,7 @@ The data preprocessor is also a component to process the data before feeding dat Comparing with the data transforms, the data preprocessor is a module of the classifier, and it takes a batch of data to process, which means it can use GPU and batch to accelebrate the processing. -The default data preprocessor in MMClassification could do the pre-processing like following: +The default data preprocessor in MMPreTrain could do the pre-processing like following: 1. Move data to the target device. 2. Pad inputs to the maximum size of current batch. @@ -235,7 +235,7 @@ You can also specify the probabilities of every batch augmentation by the ``prob ], probs=[0.3, 0.7]) ) -Here is a list of batch augmentations can be used in MMClassification. +Here is a list of batch augmentations can be used in MMPreTrain. .. autosummary:: :toctree: generated diff --git a/docs/en/api/datasets.rst b/docs/en/api/datasets.rst index daac3ce2..fb390e9c 100644 --- a/docs/en/api/datasets.rst +++ b/docs/en/api/datasets.rst @@ -71,7 +71,7 @@ Dataset Wrappers .. autoclass:: KFoldDataset -The dataset wrappers in the MMEngine can be directly used in MMClassification. +The dataset wrappers in the MMEngine can be directly used in MMPreTrain. .. list-table:: diff --git a/docs/en/api/models.rst b/docs/en/api/models.rst index 32c0a349..cc4397cb 100644 --- a/docs/en/api/models.rst +++ b/docs/en/api/models.rst @@ -13,7 +13,7 @@ The ``models`` package contains several sub-packages for addressing the differen - :mod:`~mmpretrain.models.retrievers`: The top-level module which defines the whole process of a retrieval model. - :mod:`~mmpretrain.models.backbones`: Usually a feature extraction network, e.g., ResNet, MobileNet. - :mod:`~mmpretrain.models.necks`: The component between backbones and heads, e.g., GlobalAveragePooling. -- :mod:`~mmpretrain.models.heads`: The component for specific tasks. In MMClassification, we provides heads for classification. +- :mod:`~mmpretrain.models.heads`: The component for specific tasks. - :mod:`~mmpretrain.models.losses`: Loss functions. - :mod:`~mmpretrain.models.utils`: Some helper functions and common components used in various networks. diff --git a/docs/en/conf.py b/docs/en/conf.py index 384922ef..31cc6f10 100644 --- a/docs/en/conf.py +++ b/docs/en/conf.py @@ -93,7 +93,7 @@ html_theme_options = { 'menu': [ { 'name': 'GitHub', - 'url': 'https://github.com/open-mmlab/mmclassification/tree/pretrain' + 'url': 'https://github.com/open-mmlab/mmpretrain' }, { 'name': 'Colab Tutorials', @@ -107,12 +107,12 @@ html_theme_options = { { 'name': 'Version', 'children': [ - {'name': 'MMPretrain 0.x', + {'name': 'MMPreTrain 0.x', + 'url': 'https://mmpretrain.readthedocs.io/en/0.x/', + 'description': '0.x branch'}, + {'name': 'MMPreTrain 1.x', 'url': 'https://mmpretrain.readthedocs.io/en/latest/', - 'description': 'master branch'}, - {'name': 'MMPretrain 1.x', - 'url': 'https://mmpretrain.readthedocs.io/en/dev-1.x/', - 'description': '1.x branch'}, + 'description': 'Main branch'}, ], } ], @@ -177,7 +177,7 @@ man_pages = [(root_doc, 'mmpretrain', 'MMPretrain Documentation', [author], 1)] # dir menu entry, description, category) texinfo_documents = [ (root_doc, 'mmpretrain', 'MMPretrain Documentation', author, 'mmpretrain', - 'OpenMMLab image classification toolbox and benchmark.', 'Miscellaneous'), + 'OpenMMLab pre-training toolbox and benchmark.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- diff --git a/docs/en/get_started.md b/docs/en/get_started.md index 9a66bd9a..4c4bfdc2 100644 --- a/docs/en/get_started.md +++ b/docs/en/get_started.md @@ -49,7 +49,7 @@ According to your needs, we support two install modes: In this case, install mmpretrain from source: ```shell -git clone -b pretrain https://github.com/open-mmlab/mmclassification.git mmpretrain +git clone https://github.com/open-mmlab/mmpretrain.git cd mmpretrain pip install -U openmim && mim install -e . ``` @@ -63,7 +63,7 @@ pip install -U openmim && mim install -e . Just install with mim. ```shell -pip install -U openmim && mim install "mmpretrain>=1.0.0rc5" +pip install -U openmim && mim install "mmpretrain>=1.0.0rc6" ``` ```{note} @@ -129,7 +129,7 @@ See [the Colab tutorial](https://colab.research.google.com/github/mzr1996/mmclas ### Using MMPretrain with Docker -We provide a [Dockerfile](https://github.com/open-mmlab/mmclassification/blob/pretrain/docker/Dockerfile) +We provide a [Dockerfile](https://github.com/open-mmlab/mmpretrain/blob/main/docker/Dockerfile) to build an image. Ensure that your [docker version](https://docs.docker.com/engine/install/) >=19.03. ```shell @@ -147,5 +147,5 @@ docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmpretrain/data mmpretrai ## Trouble shooting If you have some issues during the installation, please first view the [FAQ](./notes/faq.md) page. -You may [open an issue](https://github.com/open-mmlab/mmclassification/issues/new/choose) +You may [open an issue](https://github.com/open-mmlab/mmpretrain/issues/new/choose) on GitHub if no solution is found. diff --git a/docs/en/notes/faq.md b/docs/en/notes/faq.md index 10353b3f..23399501 100644 --- a/docs/en/notes/faq.md +++ b/docs/en/notes/faq.md @@ -4,7 +4,7 @@ We list some common troubles faced by many users and their corresponding solutions here. Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. If the contents here do not cover your issue, please create an issue using the -[provided templates](https://github.com/open-mmlab/mmclassification/issues/new/choose) +[provided templates](https://github.com/open-mmlab/mmpretrain/issues/new/choose) and make sure you fill in all required information in the template. ## Installation diff --git a/docs/en/notes/finetune_custom_dataset.md b/docs/en/notes/finetune_custom_dataset.md index 6f1b4512..4000268c 100644 --- a/docs/en/notes/finetune_custom_dataset.md +++ b/docs/en/notes/finetune_custom_dataset.md @@ -81,7 +81,7 @@ freeze the first two stages' parameters, just use the following configs: ```{note} Not all backbones support the `frozen_stages` argument by now. Please check -[the docs](https://mmpretrain.readthedocs.io/en/main/api.html#module-mmpretrain.models.backbones) +[the docs](https://mmpretrain.readthedocs.io/en/latest/api.html#module-mmpretrain.models.backbones) to confirm if your backbone supports it. ``` diff --git a/docs/en/stat.py b/docs/en/stat.py index 666e3fbb..86766be9 100755 --- a/docs/en/stat.py +++ b/docs/en/stat.py @@ -10,7 +10,7 @@ from tabulate import tabulate MMPT_ROOT = Path(__file__).absolute().parents[2] PAPERS_ROOT = Path('papers') # Path to save generated paper pages. -GITHUB_PREFIX = 'https://github.com/open-mmlab/mmpretrain/blob/1.x/' +GITHUB_PREFIX = 'https://github.com/open-mmlab/mmpretrain/blob/main/' MODELZOO_TEMPLATE = """\ # Model Zoo Summary diff --git a/docs/en/user_guides/config.md b/docs/en/user_guides/config.md index 62c6eb9c..6077c707 100644 --- a/docs/en/user_guides/config.md +++ b/docs/en/user_guides/config.md @@ -4,7 +4,7 @@ To manage various configurations in a deep-learning experiment, we use a kind of these configurations. This config system has a modular and inheritance design, and more details can be found in {external+mmengine:doc}`the tutorial in MMEngine `. -Usually, we use python files as config file. All configuration files are placed under the [`configs`](https://github.com/open-mmlab/mmclassification/tree/pretrain/configs) folder, and the directory structure is as follows: +Usually, we use python files as config file. All configuration files are placed under the [`configs`](https://github.com/open-mmlab/mmpretrain/tree/main/configs) folder, and the directory structure is as follows: ```text MMPretrain/ @@ -26,20 +26,20 @@ MMPretrain/ If you wish to inspect the config file, you may run `python tools/misc/print_config.py /PATH/TO/CONFIG` to see the complete config. -This article mainly explains the structure of configuration files, and how to modify it based on the existing configuration files. We will take [ResNet50 config file](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/resnet/resnet50_8xb32_in1k.py) as an example and explain it line by line. +This article mainly explains the structure of configuration files, and how to modify it based on the existing configuration files. We will take [ResNet50 config file](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnet/resnet50_8xb32_in1k.py) as an example and explain it line by line. ## Config Structure There are four kinds of basic component files in the `configs/_base_` folders, namely: -- [models](https://github.com/open-mmlab/mmclassification/tree/pretrain/configs/_base_/models) -- [datasets](https://github.com/open-mmlab/mmclassification/tree/pretrain/configs/_base_/datasets) -- [schedules](https://github.com/open-mmlab/mmclassification/tree/pretrain/configs/_base_/schedules) -- [runtime](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/_base_/default_runtime.py) +- [models](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/models) +- [datasets](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/datasets) +- [schedules](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/schedules) +- [runtime](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/default_runtime.py) We call all the config files in the `_base_` folder as _primitive_ config files. You can easily build your training config file by inheriting some primitive config files. -For easy understanding, we use [ResNet50 config file](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/resnet/resnet50_8xb32_in1k.py) as an example and comment on each line. +For easy understanding, we use [ResNet50 config file](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnet/resnet50_8xb32_in1k.py) as an example and comment on each line. ```python _base_ = [ # This config file will inherit all config files in `_base_`. @@ -75,7 +75,7 @@ describe the initialization arguments as below: - `data_preprocessor`: The component before the model forwarding to preprocess the inputs. See the [documentation](mmpretrain.models.utils.data_preprocessor) for more details. - `train_cfg`: The extra settings of `ImageClassifier` during training. In `ImageClassifier`, we mainly use it to specify batch augmentation settings, like `Mixup` and `CutMix`. See the [documentation](mmpretrain.models.utils.batch_augments) for more details. -Following is the model primitive config of the ResNet50 config file in [`configs/_base_/models/resnet50.py`](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/_base_/models/resnet50.py): +Following is the model primitive config of the ResNet50 config file in [`configs/_base_/models/resnet50.py`](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/models/resnet50.py): ```python model = dict( @@ -83,7 +83,7 @@ model = dict( backbone=dict( type='ResNet', # The type of the backbone module. # All fields except `type` come from the __init__ method of class `ResNet` - # and you can find them from https://mmclassification.readthedocs.io/en/pretrain/api/generated/mmpretrain.models.backbones.ResNet.html + # and you can find them from https://mmpretrain.readthedocs.io/en/latest/api/generated/mmpretrain.models.backbones.ResNet.html depth=50, num_stages=4, out_indices=(3, ), @@ -93,7 +93,7 @@ model = dict( head=dict( type='LinearClsHead', # The type of the classification head module. # All fields except `type` come from the __init__ method of class `LinearClsHead` - # and you can find them from https://mmclassification.readthedocs.io/en/pretrain/api/generated/mmpretrain.models.heads.LinearClsHead.html + # and you can find them from https://mmpretrain.readthedocs.io/en/latest/api/generated/mmpretrain.models.heads.LinearClsHead.html num_classes=1000, in_channels=2048, loss=dict(type='CrossEntropyLoss', loss_weight=1.0), @@ -113,9 +113,9 @@ This primitive config file includes information to construct the dataloader and - `persistent_workers`: Whether to persistent workers after finishing one epoch. - `dataset`: The settings of the dataset. - `type`: The type of the dataset, we support `CustomDataset`, `ImageNet` and many other datasets, refer to [documentation](mmpretrain.datasets). - - `pipeline`: The data transform pipeline. You can find how to design a pipeline in [this tutorial](https://mmpretrain.readthedocs.io/en/1.x/tutorials/data_pipeline.html). + - `pipeline`: The data transform pipeline. You can find how to design a pipeline in [this tutorial](https://mmpretrain.readthedocs.io/en/latest/tutorials/data_pipeline.html). -Following is the data primitive config of the ResNet50 config in [`configs/_base_/datasets/imagenet_bs32.py`](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/_base_/datasets/imagenet_bs32.py): +Following is the data primitive config of the ResNet50 config in [`configs/_base_/datasets/imagenet_bs32.py`](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/datasets/imagenet_bs32.py): ```python dataset_type = 'ImageNet' @@ -193,7 +193,7 @@ test loops: - `param_scheduler`: Optimizer parameters policy. You can use it to specify learning rate and momentum curves during training. See the {external+mmengine:doc}`documentation ` in MMEngine for more details. - `train_cfg | val_cfg | test_cfg`: The settings of the training, validation and test loops, refer to the relevant {external+mmengine:doc}`MMEngine documentation `. -Following is the schedule primitive config of the ResNet50 config in [`configs/_base_/datasets/imagenet_bs32.py`](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/_base_/datasets/imagenet_bs32.py): +Following is the schedule primitive config of the ResNet50 config in [`configs/_base_/datasets/imagenet_bs32.py`](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/datasets/imagenet_bs32.py): ```python optim_wrapper = dict( @@ -223,7 +223,7 @@ auto_scale_lr = dict(base_batch_size=256) This part mainly includes saving the checkpoint strategy, log configuration, training parameters, breakpoint weight path, working directory, etc. -Here is the runtime primitive config file ['configs/_base_/default_runtime.py'](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/_base_/default_runtime.py) file used by almost all configs: +Here is the runtime primitive config file ['configs/_base_/default_runtime.py'](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/default_runtime.py) file used by almost all configs: ```python # defaults to use registries in mmpretrain @@ -371,7 +371,7 @@ param_scheduler = dict(type='CosineAnnealingLR', by_epoch=True, _delete_=True) Sometimes, you may refer to some fields in the `_base_` config, to avoid duplication of definitions. You can refer to {external+mmengine:doc}`MMEngine ` for some more instructions. -The following is an example of using auto augment in the training data preprocessing pipeline, refer to [`configs/resnest/resnest50_32xb64_in1k.py`](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/resnest/resnest50_32xb64_in1k.py). When defining `train_pipeline`, just add the definition file name of auto augment to `_base_`, and then use `_base_.auto_increasing_policies` to reference the variables in the primitive config: +The following is an example of using auto augment in the training data preprocessing pipeline, refer to [`configs/resnest/resnest50_32xb64_in1k.py`](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnest/resnest50_32xb64_in1k.py). When defining `train_pipeline`, just add the definition file name of auto augment to `_base_`, and then use `_base_.auto_increasing_policies` to reference the variables in the primitive config: ```python _base_ = [ diff --git a/docs/en/user_guides/inference.md b/docs/en/user_guides/inference.md index 80e5aa7d..86f5ec46 100644 --- a/docs/en/user_guides/inference.md +++ b/docs/en/user_guides/inference.md @@ -15,7 +15,7 @@ MMPretrain provides high-level Python APIs for inference on a given image: Here is an example of building the model and inference on a given image by using ImageNet-1k pre-trained checkpoint. ```{note} -You can use `wget https://github.com/open-mmlab/mmclassification/raw/master/demo/demo.JPEG` to download the example image or use your own image. +You can use `wget https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG` to download the example image or use your own image. ``` ```python @@ -35,4 +35,4 @@ result = inference_model(model, img_path) {"pred_label":65,"pred_score":0.6649366617202759,"pred_class":"sea snake", "pred_scores": [..., 0.6649366617202759, ...]} ``` -An image demo can be found in [demo/image_demo.py](https://github.com/open-mmlab/mmclassification/blob/pretrain/demo/image_demo.py). +An image demo can be found in [demo/image_demo.py](https://github.com/open-mmlab/mmpretrain/blob/main/demo/image_demo.py). diff --git a/docs/en/user_guides/test.md b/docs/en/user_guides/test.md index 0c4aa3aa..65ec073e 100644 --- a/docs/en/user_guides/test.md +++ b/docs/en/user_guides/test.md @@ -23,7 +23,7 @@ CUDA_VISIBLE_DEVICES=-1 python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ | ARGS | Description | | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `CONFIG_FILE` | The path to the config file. | -| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://mmclassification.readthedocs.io/en/1.x/modelzoo_statistics.html)). | +| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)). | | `--work-dir WORK_DIR` | The directory to save the file containing evaluation metrics. | | `--out OUT` | The path to save the file containing test results. | | `--out-item OUT_ITEM` | To specify the content of the test results file, and it can be "pred" or "metrics". If "pred", save the outputs of the model for offline evaluation. If "metrics", save the evaluation metrics. Defaults to "pred". | @@ -44,12 +44,12 @@ We provide a shell script to start a multi-GPUs task with `torch.distributed.lau bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] ``` -| ARGS | Description | -| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `CONFIG_FILE` | The path to the config file. | -| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://mmclassification.readthedocs.io/en/1.x/modelzoo_statistics.html)). | -| `GPU_NUM` | The number of GPUs to be used. | -| `[PY_ARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | +| ARGS | Description | +| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | The path to the config file. | +| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)). | +| `GPU_NUM` | The number of GPUs to be used. | +| `[PY_ARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | You can also specify extra arguments of the launcher by environment variables. For example, change the communication port of the launcher to 29666 by the below command: @@ -105,13 +105,13 @@ If you run MMPretrain on a cluster managed with [slurm](https://slurm.schedmd.co Here are the arguments description of the script. -| ARGS | Description | -| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `PARTITION` | The partition to use in your cluster. | -| `JOB_NAME` | The name of your job, you can name it as you like. | -| `CONFIG_FILE` | The path to the config file. | -| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://mmclassification.readthedocs.io/en/1.x/modelzoo_statistics.html)). | -| `[PY_ARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | +| ARGS | Description | +| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `PARTITION` | The partition to use in your cluster. | +| `JOB_NAME` | The name of your job, you can name it as you like. | +| `CONFIG_FILE` | The path to the config file. | +| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)). | +| `[PY_ARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | Here are the environment variables can be used to configure the slurm job. diff --git a/docs/zh_CN/_templates/404.html b/docs/zh_CN/_templates/404.html index 2e894f1d..abf3356c 100644 --- a/docs/zh_CN/_templates/404.html +++ b/docs/zh_CN/_templates/404.html @@ -10,7 +10,7 @@ 如果你是从旧版本文档跳转至此,可能是对应的页面被移动了。请从左侧的目录中寻找新版本文档,或者跳转至首页

- 如果你找不到希望打开的文档,欢迎在 Issue 中告诉我们! + 如果你找不到希望打开的文档,欢迎在 Issue 中告诉我们!

{% endblock %} diff --git a/docs/zh_CN/advanced_guides/datasets.md b/docs/zh_CN/advanced_guides/datasets.md index 999501f4..83b7959b 100644 --- a/docs/zh_CN/advanced_guides/datasets.md +++ b/docs/zh_CN/advanced_guides/datasets.md @@ -1,6 +1,6 @@ # 添加新数据集 -用户可以编写一个继承自 [BasesDataset](https://mmclassification.readthedocs.io/zh_CN/latest/_modules/mmpretrain/datasets/base_dataset.html#BaseDataset) 的新数据集类,并重载 `load_data_list(self)` 方法,类似 [CIFAR10](https://github.com/open-mmlab/mmclassification/blob/pretrain/mmpretrain/datasets/cifar.py) 和 [ImageNet](https://github.com/open-mmlab/mmclassification/blob/pretrain/mmpretrain/datasets/imagenet.py)。 +用户可以编写一个继承自 [BasesDataset](https://mmpretrain.readthedocs.io/zh_CN/latest/_modules/mmpretrain/datasets/base_dataset.html#BaseDataset) 的新数据集类,并重载 `load_data_list(self)` 方法,类似 [CIFAR10](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/cifar.py) 和 [ImageNet](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/imagenet.py)。 通常,此方法返回一个包含所有样本的列表,其中的每个样本都是一个字典。字典中包含了必要的数据信息,例如 `img` 和 `gt_label`。 @@ -66,7 +66,7 @@ train_dataloader = dict( ) ``` -所有继承 [`BaseDataset`](https://github.com/open-mmlab/mmclassification/blob/pretrain/mmpretrain/datasets/base_dataset.py) 的数据集类都具有**懒加载**以及**节省内存**的特性,可以参考相关文档 {external+mmengine:doc}`BaseDataset `。 +所有继承 [`BaseDataset`](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/base_dataset.py) 的数据集类都具有**懒加载**以及**节省内存**的特性,可以参考相关文档 {external+mmengine:doc}`BaseDataset `。 ```{note} 如果数据样本时获取的字典中,只包含了 'img_path' 不包含 'img', 则在 pipeline 中必须包含 'LoadImgFromFile'。 diff --git a/docs/zh_CN/advanced_guides/schedule.md b/docs/zh_CN/advanced_guides/schedule.md index 9c37b591..d1c347d1 100644 --- a/docs/zh_CN/advanced_guides/schedule.md +++ b/docs/zh_CN/advanced_guides/schedule.md @@ -1,6 +1,6 @@ # 自定义训练优化策略 -在我们的算法库中,已经提供了通用数据集(如ImageNet,CIFAR)的[默认训练策略配置](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/_base_/schedules)。如果想要在这些数据集上继续提升模型性能,或者在不同数据集和方法上进行新的尝试,我们通常需要修改这些默认的策略。 +在我们的算法库中,已经提供了通用数据集(如ImageNet,CIFAR)的[默认训练策略配置](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/schedules)。如果想要在这些数据集上继续提升模型性能,或者在不同数据集和方法上进行新的尝试,我们通常需要修改这些默认的策略。 在本教程中,我们将介绍如何在运行自定义训练时,通过修改配置文件进行构造优化器、参数化精细配置、梯度裁剪、梯度累计以及定制动量调整策略等。同时也会通过模板简单介绍如何自定义开发优化器和构造器。 diff --git a/docs/zh_CN/conf.py b/docs/zh_CN/conf.py index 10ac2ffe..b01d0c6c 100644 --- a/docs/zh_CN/conf.py +++ b/docs/zh_CN/conf.py @@ -93,7 +93,7 @@ html_theme_options = { 'menu': [ { 'name': 'GitHub', - 'url': 'https://github.com/open-mmlab/mmclassification/tree/pretrain' + 'url': 'https://github.com/open-mmlab/mmpretrain' }, { 'name': 'Colab 教程', @@ -108,11 +108,11 @@ html_theme_options = { 'name': 'Version', 'children': [ {'name': 'MMPretrain 0.x', - 'url': 'https://mmpretrain.readthedocs.io/zh_CN/latest/', - 'description': 'master branch'}, + 'url': 'https://mmpretrain.readthedocs.io/zh_CN/0.x/', + 'description': '0.x branch'}, {'name': 'MMPretrain 1.x', - 'url': 'https://mmpretrain.readthedocs.io/zh_CN/dev-1.x/', - 'description': '1.x branch'}, + 'url': 'https://mmpretrain.readthedocs.io/zh_CN/latest/', + 'description': 'Main branch'}, ], } ], @@ -181,7 +181,7 @@ man_pages = [(root_doc, 'mmpretrain', 'MMPretrain Documentation', [author], 1)] # dir menu entry, description, category) texinfo_documents = [ (root_doc, 'mmpretrain', 'MMPretrain Documentation', author, 'mmpretrain', - 'OpenMMLab image classification toolbox and benchmark.', 'Miscellaneous'), + 'OpenMMLab pre-training toolbox and benchmark.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- diff --git a/docs/zh_CN/get_started.md b/docs/zh_CN/get_started.md index 974242cd..ef682c1f 100644 --- a/docs/zh_CN/get_started.md +++ b/docs/zh_CN/get_started.md @@ -53,7 +53,7 @@ conda install pytorch torchvision cpuonly -c pytorch 这种情况下,从源码按如下方式安装 mmpretrain: ```shell -git clone -b pretrain https://github.com/open-mmlab/mmclassification.git mmpretrain +git clone https://github.com/open-mmlab/mmpretrain.git cd mmpretrain pip install -U openmim && mim install -e . ``` @@ -67,7 +67,7 @@ pip install -U openmim && mim install -e . 直接使用 mim 安装即可。 ```shell -pip install -U openmim && mim install "mmpretrain>=1.0rc5" +pip install -U openmim && mim install "mmpretrain>=1.0.0rc6" ``` ```{note} @@ -130,7 +130,7 @@ MMPretrain 可以仅在 CPU 环境中安装,在 CPU 模式下,你可以完 ### 通过 Docker 使用 MMPretrain -MMPretrain 提供 [Dockerfile](https://github.com/open-mmlab/mmclassification/blob/pretrain/docker/Dockerfile) +MMPretrain 提供 [Dockerfile](https://github.com/open-mmlab/mmpretrain/blob/main/docker/Dockerfile) 用于构建镜像。请确保你的 [Docker 版本](https://docs.docker.com/engine/install/) >=19.03。 ```shell @@ -148,4 +148,4 @@ docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmpretrain/data mmpretrai ## 故障解决 如果你在安装过程中遇到了什么问题,请先查阅[常见问题](./notes/faq.md)。如果没有找到解决方法,可以在 GitHub -上[提出 issue](https://github.com/open-mmlab/mmclassification/issues/new/choose)。 +上[提出 issue](https://github.com/open-mmlab/mmpretrain/issues/new/choose)。 diff --git a/docs/zh_CN/index.rst b/docs/zh_CN/index.rst index 7f23c013..cceff12b 100644 --- a/docs/zh_CN/index.rst +++ b/docs/zh_CN/index.rst @@ -130,8 +130,8 @@ MMPretrain 上手路线 .. toctree:: :caption: 切换语言 - English - 简体中文 + English + 简体中文 索引与表格 diff --git a/docs/zh_CN/notes/contribution_guide.md b/docs/zh_CN/notes/contribution_guide.md index 37e48cef..2549cc28 100644 --- a/docs/zh_CN/notes/contribution_guide.md +++ b/docs/zh_CN/notes/contribution_guide.md @@ -8,7 +8,7 @@ ## 工作流程 -1. fork 并 pull 最新的 OpenMMLab 仓库 (MMClassification) +1. fork 并 pull 最新的 OpenMMLab 仓库 (MMPreTrain) 2. 签出到一个新分支(不要使用 master 分支提交 PR) 3. 进行修改并提交至 fork 出的自己的远程仓库 4. 在我们的仓库中创建一个 PR @@ -32,12 +32,12 @@ - [mdformat](https://github.com/executablebooks/mdformat): 检查 markdown 文件的工具 - [docformatter](https://github.com/myint/docformatter): 一个 docstring 格式化工具。 -yapf 和 isort 的格式设置位于 [setup.cfg](https://github.com/open-mmlab/mmclassification/blob/1.x/setup.cfg) +yapf 和 isort 的格式设置位于 [setup.cfg](https://github.com/open-mmlab/mmpretrain/blob/main/setup.cfg) 我们使用 [pre-commit hook](https://pre-commit.com/) 来保证每次提交时自动进行代 码检查和格式化,启用的功能包括 `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, 修复 `end-of-files`, `double-quoted-strings`, `python-encoding-pragma`, `mixed-line-ending`, 对 `requirments.txt`的排序等。 -pre-commit hook 的配置文件位于 [.pre-commit-config](https://github.com/open-mmlab/mmclassification/blob/1.x/.pre-commit-config.yaml) +pre-commit hook 的配置文件位于 [.pre-commit-config](https://github.com/open-mmlab/mmpretrain/blob/main/.pre-commit-config.yaml) 在你克隆仓库后,你需要按照如下步骤安装并初始化 pre-commit hook。 diff --git a/docs/zh_CN/notes/faq.md b/docs/zh_CN/notes/faq.md index 4173eef3..90172eea 100644 --- a/docs/zh_CN/notes/faq.md +++ b/docs/zh_CN/notes/faq.md @@ -2,7 +2,7 @@ 我们在这里列出了一些常见问题及其相应的解决方案。如果您发现任何常见问题并有方法 帮助解决,欢迎随时丰富列表。如果这里的内容没有涵盖您的问题,请按照 -[提问模板](https://github.com/open-mmlab/mmclassification/issues/new/choose) +[提问模板](https://github.com/open-mmlab/mmpretrain/issues/new/choose) 在 GitHub 上提出问题,并补充模板中需要的信息。 ## 安装 diff --git a/docs/zh_CN/notes/finetune_custom_dataset.md b/docs/zh_CN/notes/finetune_custom_dataset.md index 9bf426b3..2b8cbd68 100644 --- a/docs/zh_CN/notes/finetune_custom_dataset.md +++ b/docs/zh_CN/notes/finetune_custom_dataset.md @@ -75,7 +75,7 @@ _base_ = [ 络的参数,只需要在上面的配置中添加一行: ```{note} -注意,目前并非所有的主干网络都支持 `frozen_stages` 参数。请检查[文档](https://mmpretrain.readthedocs.io/en/main/api.html#module-mmpretrain.models.backbones) +注意,目前并非所有的主干网络都支持 `frozen_stages` 参数。请检查[文档](https://mmpretrain.readthedocs.io/en/latest/api.html#module-mmpretrain.models.backbones) 确认使用的主干网络是否支持这一参数。 ``` diff --git a/docs/zh_CN/stat.py b/docs/zh_CN/stat.py index 253e7e47..70ea692d 100755 --- a/docs/zh_CN/stat.py +++ b/docs/zh_CN/stat.py @@ -10,7 +10,7 @@ from tabulate import tabulate MMPT_ROOT = Path(__file__).absolute().parents[2] PAPERS_ROOT = Path('papers') # Path to save generated paper pages. -GITHUB_PREFIX = 'https://github.com/open-mmlab/mmpretrain/blob/1.x/' +GITHUB_PREFIX = 'https://github.com/open-mmlab/mmpretrain/blob/main/' MODELZOO_TEMPLATE = """\ # 模型库统计 diff --git a/docs/zh_CN/useful_tools/verify_dataset.md b/docs/zh_CN/useful_tools/verify_dataset.md index acb7b22b..655ce977 100644 --- a/docs/zh_CN/useful_tools/verify_dataset.md +++ b/docs/zh_CN/useful_tools/verify_dataset.md @@ -19,7 +19,7 @@ python tools/print_config.py \ - `--out-path` : 输出结果路径,默认为 ‘brokenfiles.log’。 - `--phase` : 检查哪个阶段的数据集,可用值为 “train” 、”test” 或者 “val”, 默认为 “train”。 - `--num-process` : 指定的进程数,默认为 1。 -- `--cfg-options`: 额外的配置选项,会被合入配置文件,参考[教程 1:如何编写配置文件](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/config.html)。 +- `--cfg-options`: 额外的配置选项,会被合入配置文件,参考[教程 1:如何编写配置文件](https://mmpretrain.readthedocs.io/zh_CN/latest/tutorials/config.html)。 ## 示例: diff --git a/docs/zh_CN/user_guides/config.md b/docs/zh_CN/user_guides/config.md index bd024d2a..795ec13c 100644 --- a/docs/zh_CN/user_guides/config.md +++ b/docs/zh_CN/user_guides/config.md @@ -2,7 +2,7 @@ 为了管理深度学习实验的各种设置,我们使用配置文件来记录所有这些配置。这种配置文件系统具有模块化和继承特性,更多细节可以在{external+mmengine:doc}`MMEngine 中的教程 `。 -MMPretrain 主要使用 python 文件作为配置文件,所有配置文件都放置在 [`configs`](https://github.com/open-mmlab/mmclassification/tree/pretrain/configs) 文件夹下,目录结构如下所示: +MMPretrain 主要使用 python 文件作为配置文件,所有配置文件都放置在 [`configs`](https://github.com/open-mmlab/mmpretrain/tree/main/configs) 文件夹下,目录结构如下所示: ```text MMPretrain/ @@ -24,20 +24,20 @@ MMPretrain/ 可以使用 `python tools/misc/print_config.py /PATH/TO/CONFIG` 命令来查看完整的配置信息,从而方便检查所对应的配置文件。 -本文主要讲解 MMPretrain 配置文件的命名和结构,以及如何基于已有的配置文件修改,并以 [ResNet50 配置文件](https://github.com/open-mmlab/mmclassification/blob/main/configs/resnet/resnet50_8xb32_in1k.py) 逐行解释。 +本文主要讲解 MMPretrain 配置文件的命名和结构,以及如何基于已有的配置文件修改,并以 [ResNet50 配置文件](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnet/resnet50_8xb32_in1k.py) 逐行解释。 ## 配置文件结构 在 `configs/_base_` 文件夹下有 4 个基本组件类型,分别是: -- [模型(model)](https://github.com/open-mmlab/mmclassification/tree/pretrain/configs/_base_/models) -- [数据(data)](https://github.com/open-mmlab/mmclassification/tree/pretrain/configs/_base_/datasets) -- [训练策略(schedule)](https://github.com/open-mmlab/mmclassification/tree/pretrain/configs/_base_/schedules) -- [运行设置(runtime)](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/_base_/default_runtime.py) +- [模型(model)](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/models) +- [数据(data)](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/datasets) +- [训练策略(schedule)](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/schedules) +- [运行设置(runtime)](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/default_runtime.py) 你可以通过继承一些基本配置文件轻松构建自己的训练配置文件。我们称这些被继承的配置文件为 _原始配置文件_,如 `_base_` 文件夹中的文件一般仅作为原始配置文件。 -下面使用 [ResNet50 配置文件](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/resnet/resnet50_8xb32_in1k.py) 作为案例进行说明并注释每一行含义。 +下面使用 [ResNet50 配置文件](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnet/resnet50_8xb32_in1k.py) 作为案例进行说明并注释每一行含义。 ```python _base_ = [ # 此配置文件将继承所有 `_base_` 中的配置 @@ -72,7 +72,7 @@ _base_ = [ # 此配置文件将继承所有 ` - `train_cfg`:训练模型时的额外设置。在 MMCLS 中,我们主要使用它来配置批量增强,例如 `Mixup` 和 `CutMix`。有关详细信息,请参阅 [文档](mmpretrain.models.utils.batch_augments)。 - `train_cfg`: `ImageClassifier` 的额外训练配置。在 `ImageClassifier` 中,我们使用这一参数指定批数据增强设置,比如 `Mixup` 和 `CutMix`。详见[文档](mmpretrain.models.utils.batch_augments)。 -以下是 ResNet50 的模型配置['configs/_base_/models/resnet50.py'](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/_base_/models/resnet50.py): +以下是 ResNet50 的模型配置['configs/_base_/models/resnet50.py'](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/models/resnet50.py): ```python model = dict( @@ -80,7 +80,7 @@ model = dict( backbone=dict( type='ResNet', # 主干网络类型 # 除了 `type` 之外的所有字段都来自 `ResNet` 类的 __init__ 方法 - # 可查阅 https://mmclassification.readthedocs.io/zh_CN/pretrain/api/generated/mmpretrain.models.backbones.ResNet.html + # 可查阅 https://mmpretrain.readthedocs.io/zh_CN/latest/api/generated/mmpretrain.models.backbones.ResNet.html depth=50, num_stages=4, # 主干网络状态(stages)的数目,这些状态产生的特征图作为后续的 head 的输入。 out_indices=(3, ), # 输出的特征图输出索引。 @@ -90,7 +90,7 @@ model = dict( head=dict( type='LinearClsHead', # 分类颈网络类型 # 除了 `type` 之外的所有字段都来自 `LinearClsHead` 类的 __init__ 方法 - # 可查阅 https://mmclassification.readthedocs.io/zh_CN/pretrain/api/generated/mmpretrain.models.heads.LinearClsHead.html + # 可查阅 https://mmpretrain.readthedocs.io/zh_CN/latest/api/generated/mmpretrain.models.heads.LinearClsHead.html num_classes=1000, in_channels=2048, loss=dict(type='CrossEntropyLoss', loss_weight=1.0), # 损失函数配置信息 @@ -112,7 +112,7 @@ model = dict( - `type`: 数据集类型, MMPretrain 支持 `ImageNet`、 `Cifar` 等数据集 ,参考 [API 文档](mmpretrain.datasets) - `pipeline`: 数据处理流水线,参考相关教程文档 [如何设计数据处理流水线](../advanced_guides/pipeline.md) -以下是 ResNet50 的数据配置 ['configs/_base_/datasets/imagenet_bs32.py'](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/_base_/datasets/imagenet_bs32.py): +以下是 ResNet50 的数据配置 ['configs/_base_/datasets/imagenet_bs32.py'](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/datasets/imagenet_bs32.py): ```python dataset_type = 'ImageNet' @@ -188,7 +188,7 @@ test_evaluator = val_evaluator # 测试集的评估配置,这里直接与 v - `param_scheduler` : 学习率策略,你可以指定训练期间的学习率和动量曲线。有关详细信息,请参阅 MMEngine 中的 {external+mmengine:doc}`文档 `。 - `train_cfg | val_cfg | test_cfg`: 训练、验证以及测试的循环执行器配置,请参考相关的{external+mmengine:doc}`MMEngine 文档 `。 -以下是 ResNet50 的训练策略配置['configs/_base_/schedules/imagenet_bs256.py'](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/_base_/schedules/imagenet_bs256.py): +以下是 ResNet50 的训练策略配置['configs/_base_/schedules/imagenet_bs256.py'](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/schedules/imagenet_bs256.py): ```python optim_wrapper = dict( @@ -218,7 +218,7 @@ auto_scale_lr = dict(base_batch_size=256) 本部分主要包括保存权重策略、日志配置、训练参数、断点权重路径和工作目录等等。 -以下是几乎所有算法都使用的运行配置['configs/_base_/default_runtime.py'](https://github.com/open-mmlab/mmclassification/blob/pretrain//configs/_base_/default_runtime.py): +以下是几乎所有算法都使用的运行配置['configs/_base_/default_runtime.py'](https://github.com/open-mmlab/mmpretrain/blob/main//configs/_base_/default_runtime.py): ```python # 默认所有注册器使用的域 @@ -362,7 +362,7 @@ param_scheduler = dict(type='CosineAnnealingLR', by_epoch=True, _delete_=True) 有时,您可以引用 `_base_` 配置信息的一些域内容,这样可以避免重复定义。可以查看 {external+mmengine:doc}`MMEngine 文档 ` 进一步了解该设计。 -以下是一个简单应用案例,在训练数据预处理流水线中使用 `auto augment` 数据增强,参考配置文件 [`configs/resnest/resnest50_32xb64_in1k.py`](https://github.com/open-mmlab/mmclassification/blob/pretrain/configs/resnest/resnest50_32xb64_in1k.py)。 在定义 `train_pipeline` 时,可以直接在 `_base_` 中加入定义 auto augment 数据增强的文件命名,再通过 `{{_base_.auto_increasing_policies}}` 引用变量: +以下是一个简单应用案例,在训练数据预处理流水线中使用 `auto augment` 数据增强,参考配置文件 [`configs/resnest/resnest50_32xb64_in1k.py`](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnest/resnest50_32xb64_in1k.py)。 在定义 `train_pipeline` 时,可以直接在 `_base_` 中加入定义 auto augment 数据增强的文件命名,再通过 `{{_base_.auto_increasing_policies}}` 引用变量: ```python _base_ = [ diff --git a/docs/zh_CN/user_guides/inference.md b/docs/zh_CN/user_guides/inference.md index 7018c159..5bd07344 100644 --- a/docs/zh_CN/user_guides/inference.md +++ b/docs/zh_CN/user_guides/inference.md @@ -15,7 +15,7 @@ MMPretrain 为图像推理提供高级 Python API: 下面是一个示例,如何使用一个 ImageNet-1k 预训练权重初始化模型并推理给定图像。 ```{note} -可以运行 `wget https://github.com/open-mmlab/mmclassification/raw/master/demo/demo.JPEG` 下载样例图片,或使用其他图片。 +可以运行 `wget https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG` 下载样例图片,或使用其他图片。 ``` ```python diff --git a/docs/zh_CN/user_guides/test.md b/docs/zh_CN/user_guides/test.md index 34fedd8d..054e1e41 100644 --- a/docs/zh_CN/user_guides/test.md +++ b/docs/zh_CN/user_guides/test.md @@ -21,7 +21,7 @@ CUDA_VISIBLE_DEVICES=-1 python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ | 参数 | 描述 | | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `CONFIG_FILE` | 配置文件的路径。 | -| `CHECKPOINT_FILE` | 权重文件路径(支持 http 链接,你可以在[这里](https://mmclassification.readthedocs.io/en/1.x/modelzoo_statistics.html)寻找需要的权重文件)。 | +| `CHECKPOINT_FILE` | 权重文件路径(支持 http 链接,你可以在[这里](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)寻找需要的权重文件)。 | | `--work-dir WORK_DIR` | 用来保存测试指标结果的文件夹。 | | `--out OUT` | 用来保存测试输出的文件。 | | `--out-item OUT_ITEM` | 指定测试输出文件的内容,可以为 "pred" 或 "metrics",其中 "pred" 表示保存所有模型输出,这些数据可以用于离线测评;"metrics" 表示输出测试指标。默认为 "pred"。 | @@ -42,12 +42,12 @@ CUDA_VISIBLE_DEVICES=-1 python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] ``` -| 参数 | 描述 | -| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -| `CONFIG_FILE` | 配置文件的路径。 | -| `CHECKPOINT_FILE` | 权重文件路径(支持 http 链接,你可以在[这里](https://mmclassification.readthedocs.io/en/1.x/modelzoo_statistics.html)寻找需要的权重文件)。 | -| `GPU_NUM` | 使用的 GPU 数量。 | -| `[PY_ARGS]` | `tools/test.py` 支持的其他可选参数,参见[上文](#单机单卡测试)。 | +| 参数 | 描述 | +| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | 配置文件的路径。 | +| `CHECKPOINT_FILE` | 权重文件路径(支持 http 链接,你可以在[这里](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)寻找需要的权重文件)。 | +| `GPU_NUM` | 使用的 GPU 数量。 | +| `[PY_ARGS]` | `tools/test.py` 支持的其他可选参数,参见[上文](#单机单卡测试)。 | 你还可以使用环境变量来指定启动器的额外参数,比如用如下命令将启动器的通讯端口变更为 29666: @@ -99,13 +99,13 @@ NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_ 这里是该脚本的一些参数: -| 参数 | 描述 | -| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -| `PARTITION` | 使用的集群分区。 | -| `JOB_NAME` | 任务的名称,你可以随意起一个名字。 | -| `CONFIG_FILE` | 配置文件路径。 | -| `CHECKPOINT_FILE` | 权重文件路径(支持 http 链接,你可以在[这里](https://mmclassification.readthedocs.io/en/1.x/modelzoo_statistics.html)寻找需要的权重文件)。 | -| `[PY_ARGS]` | `tools/test.py` 支持的其他可选参数,参见[上文](#单机单卡测试)。 | +| 参数 | 描述 | +| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| `PARTITION` | 使用的集群分区。 | +| `JOB_NAME` | 任务的名称,你可以随意起一个名字。 | +| `CONFIG_FILE` | 配置文件路径。 | +| `CHECKPOINT_FILE` | 权重文件路径(支持 http 链接,你可以在[这里](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)寻找需要的权重文件)。 | +| `[PY_ARGS]` | `tools/test.py` 支持的其他可选参数,参见[上文](#单机单卡测试)。 | 这里是一些你可以用来配置 slurm 任务的环境变量: diff --git a/mmpretrain/apis/image_classification.py b/mmpretrain/apis/image_classification.py index 7edbc42f..712abb2e 100644 --- a/mmpretrain/apis/image_classification.py +++ b/mmpretrain/apis/image_classification.py @@ -35,7 +35,7 @@ class ImageClassificationInferencer(BaseInferencer): the device of the input model. Defaults to None. Example: - 1. Use a pre-trained model in MMClassification to inference an image. + 1. Use a pre-trained model in MMPreTrain to inference an image. >>> from mmpretrain import ImageClassificationInferencer >>> inferencer = ImageClassificationInferencer('resnet50_8xb32_in1k') diff --git a/mmpretrain/apis/image_retrieval.py b/mmpretrain/apis/image_retrieval.py index 99507495..f233877d 100644 --- a/mmpretrain/apis/image_retrieval.py +++ b/mmpretrain/apis/image_retrieval.py @@ -35,7 +35,7 @@ class ImageRetrievalInferencer(BaseInferencer): the device of the input model. Defaults to None. Example: - 1. Use a pre-trained model in MMClassification to inference an image. + 1. Use a pre-trained model in MMPreTrain to inference an image. >>> from mmpretrain import ImageClassificationInferencer >>> inferencer = ImageClassificationInferencer('resnet50_8xb32_in1k') diff --git a/mmpretrain/models/utils/clip_generator_helper.py b/mmpretrain/models/utils/clip_generator_helper.py index 80cc74a7..90d7b483 100644 --- a/mmpretrain/models/utils/clip_generator_helper.py +++ b/mmpretrain/models/utils/clip_generator_helper.py @@ -30,8 +30,8 @@ class QuickGELU(nn.Module): class ResidualAttentionBlock(nn.Module): """Residual Attention Block (RAB). - This module implements the same function as the MultiheadAttention in - MMClassification, but with a different interface, which is mainly used + This module implements the same function as the MultiheadAttention, + but with a different interface, which is mainly used in CLIP. Args: diff --git a/projects/README.md b/projects/README.md index 77098dff..5122e4b6 100644 --- a/projects/README.md +++ b/projects/README.md @@ -1,21 +1,21 @@ -# Welcome to Projects of MMClassification +# Welcome to Projects of MMPreTrain In this folder, we welcome all contribution of vision deep-learning backbone from community. -Here, these requirements, e.g. code standards, are not that strict as in core package. Thus, developers from the community can implement their algorithms much more easily and efficiently in MMClassification. We appreciate all contributions from community to make MMClassification greater. +Here, these requirements, e.g. code standards, are not that strict as in core package. Thus, developers from the community can implement their algorithms much more easily and efficiently in MMPreTrain. We appreciate all contributions from community to make MMPreTrain greater. Here is an [example project](./example_project) about how to add your algorithms easily. We also provide some documentation listed below: -- [New Model Guide](https://mmclassification.readthedocs.io/en/dev-1.x/advanced_guides/modules.html) +- [New Model Guide](https://mmpretrain.readthedocs.io/en/latest/advanced_guides/modules.html) The documentation of adding new models. -- [Contribution Guide](https://mmclassification.readthedocs.io/en/dev-1.x/notes/contribution_guide.html) +- [Contribution Guide](https://mmpretrain.readthedocs.io/en/latest/notes/contribution_guide.html) - The guides for new contributors about how to add your projects to MMClassification. + The guides for new contributors about how to add your projects to MMPreTrain. -- [Discussions](https://github.com/open-mmlab/mmclassification/discussions) +- [Discussions](https://github.com/open-mmlab/mmpretrain/discussions) Welcome to start discussion! diff --git a/projects/example_project/README.md b/projects/example_project/README.md index 32325b24..b9532e44 100644 --- a/projects/example_project/README.md +++ b/projects/example_project/README.md @@ -8,8 +8,8 @@ according to your project. ### Setup Environment -Please refer to [Get Started](https://mmclassification.readthedocs.io/en/1.x/get_started.html) to install -MMClassification. +Please refer to [Get Started](https://mmpretrain.readthedocs.io/en/latest/get_started.html) to install +MMPreTrain. At first, add the current folder to `PYTHONPATH`, so that Python can find your code. Run command in the current directory to add it. @@ -21,26 +21,26 @@ export PYTHONPATH=`pwd`:$PYTHONPATH ### Data Preparation -Prepare the ImageNet-2012 dataset according to the [instruction](https://mmclassification.readthedocs.io/en/dev-1.x/user_guides/dataset_prepare.html#imagenet). +Prepare the ImageNet-2012 dataset according to the [instruction](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#imagenet). ### Training commands **To train with single GPU:** ```bash -mim train mmcls configs/examplenet_8xb32_in1k.py +mim train mmpretrain configs/examplenet_8xb32_in1k.py ``` **To train with multiple GPUs:** ```bash -mim train mmcls configs/examplenet_8xb32_in1k.py --launcher pytorch --gpus 8 +mim train mmpretrain configs/examplenet_8xb32_in1k.py --launcher pytorch --gpus 8 ``` **To train with multiple GPUs by slurm:** ```bash -mim train mmcls configs/examplenet_8xb32_in1k.py --launcher slurm \ +mim train mmpretrain configs/examplenet_8xb32_in1k.py --launcher slurm \ --gpus 16 --gpus-per-node 8 --partition $PARTITION ``` @@ -49,19 +49,19 @@ mim train mmcls configs/examplenet_8xb32_in1k.py --launcher slurm \ **To test with single GPU:** ```bash -mim test mmcls configs/examplenet_8xb32_in1k.py $CHECKPOINT +mim test mmpretrain configs/examplenet_8xb32_in1k.py $CHECKPOINT ``` **To test with multiple GPUs:** ```bash -mim test mmcls configs/examplenet_8xb32_in1k.py $CHECKPOINT --launcher pytorch --gpus 8 +mim test mmpretrain configs/examplenet_8xb32_in1k.py $CHECKPOINT --launcher pytorch --gpus 8 ``` **To test with multiple GPUs by slurm:** ```bash -mim test mmcls configs/examplenet_8xb32_in1k.py $CHECKPOINT --launcher slurm \ +mim test mmpretrain configs/examplenet_8xb32_in1k.py $CHECKPOINT --launcher slurm \ --gpus 16 --gpus-per-node 8 --partition $PARTITION ``` @@ -79,25 +79,25 @@ mim test mmcls configs/examplenet_8xb32_in1k.py $CHECKPOINT --launcher slurm \ -```bibtex -@misc{2020mmclassification, - title={OpenMMLab's Image Classification Toolbox and Benchmark}, - author={MMClassification Contributors}, - howpublished = {\url{https://github.com/open-mmlab/mmclassification}}, - year={2020} +```BibTeX +@misc{2023mmpretrain, + title={OpenMMLab's Pre-training Toolbox and Benchmark}, + author={MMPreTrain Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmpretrain}}, + year={2023} } ``` ## Checklist Here is a checklist of this project's progress. And you can ignore this part if you don't plan to contribute -to MMClassification projects. +to MMPreTrain projects. - [ ] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. - [ ] Finish the code - + - [ ] Basic docstrings & proper citation @@ -117,7 +117,7 @@ to MMClassification projects. - [ ] Unit tests - + - [ ] Code style @@ -125,4 +125,4 @@ to MMClassification projects. - [ ] `metafile.yml` and `README.md` - + diff --git a/projects/fgia_accv2022_1st/README.md b/projects/fgia_accv2022_1st/README.md index 6b3ff24d..f929fb70 100644 --- a/projects/fgia_accv2022_1st/README.md +++ b/projects/fgia_accv2022_1st/README.md @@ -78,7 +78,7 @@ those provided by the official website of the competition. ### Start pre-training -First, you should install all these requirements, following this [page](https://mmpretrain.readthedocs.io/en/main/get_started.html). +First, you should install all these requirements, following this [page](https://mmpretrain.readthedocs.io/en/latest/get_started.html). Then change your current directory to the root of MMPretrain ```shell diff --git a/projects/maskfeat_video/README.md b/projects/maskfeat_video/README.md index ca2424af..6a8ce03e 100644 --- a/projects/maskfeat_video/README.md +++ b/projects/maskfeat_video/README.md @@ -37,7 +37,7 @@ Requirements: - MMPretrain >= 1.0.0rc0 - MMAction2 >= 1.0.0rc3 -Please refer to [Get Started](https://mmpretrain.readthedocs.io/en/main/get_started.html) documentation of MMPretrain to finish installation. +Please refer to [Get Started](https://mmpretrain.readthedocs.io/en/latest/get_started.html) documentation of MMPretrain to finish installation. Besides, to process the video data, we apply transforms in MMAction2. The instruction to install MMAction2 can be found in [Get Started documentation](https://mmaction2.readthedocs.io/en/1.x/get_started.html). diff --git a/tools/analysis_tools/analyze_results.py b/tools/analysis_tools/analyze_results.py index 68557eed..5f2feb3c 100644 --- a/tools/analysis_tools/analyze_results.py +++ b/tools/analysis_tools/analyze_results.py @@ -15,7 +15,7 @@ from mmpretrain.visualization import UniversalVisualizer def parse_args(): parser = argparse.ArgumentParser( - description='MMCls evaluate prediction success/fail') + description='MMPreTrain evaluate prediction success/fail') parser.add_argument('config', help='test config file path') parser.add_argument('result', help='test result json/pkl file') parser.add_argument( diff --git a/tools/analysis_tools/eval_metric.py b/tools/analysis_tools/eval_metric.py index e9cf1425..4b2fec11 100644 --- a/tools/analysis_tools/eval_metric.py +++ b/tools/analysis_tools/eval_metric.py @@ -9,7 +9,7 @@ from mmengine.evaluator import Evaluator from mmpretrain.registry import METRICS HELP_URL = ( - 'https://mmpretrain.readthedocs.io/en/dev-1.x/useful_tools/' + 'https://mmpretrain.readthedocs.io/en/latest/useful_tools/' 'log_result_analysis.html#how-to-conduct-offline-metric-evaluation') prog_description = f"""\ diff --git a/tools/test.py b/tools/test.py index 42664495..230d39ca 100644 --- a/tools/test.py +++ b/tools/test.py @@ -12,7 +12,7 @@ from mmengine.runner import Runner def parse_args(): parser = argparse.ArgumentParser( - description='MMCLS test (and eval) a model') + description='MMPreTrain test (and eval) a model') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument( diff --git a/tools/train.py b/tools/train.py index 35413f96..7a149e74 100644 --- a/tools/train.py +++ b/tools/train.py @@ -11,7 +11,7 @@ from mmengine.utils.dl_utils import TORCH_VERSION def parse_args(): - parser = argparse.ArgumentParser(description='Train a classifier') + parser = argparse.ArgumentParser(description='Train a model') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument(