diff --git a/.dev_scripts/benchmark_regression/1-benchmark_valid.py b/.dev_scripts/benchmark_regression/1-benchmark_valid.py index 250b44a5..e8de6c40 100644 --- a/.dev_scripts/benchmark_regression/1-benchmark_valid.py +++ b/.dev_scripts/benchmark_regression/1-benchmark_valid.py @@ -17,10 +17,10 @@ from modelindex.load_model_index import load from rich.console import Console from rich.table import Table -from mmcls.apis import init_model -from mmcls.datasets import CIFAR10, CIFAR100, ImageNet -from mmcls.utils import register_all_modules -from mmcls.visualization import ClsVisualizer +from mmpretrain.apis import init_model +from mmpretrain.datasets import CIFAR10, CIFAR100, ImageNet +from mmpretrain.utils import register_all_modules +from mmpretrain.visualization import ClsVisualizer console = Console() MMCLS_ROOT = Path(__file__).absolute().parents[2] diff --git a/.dev_scripts/benchmark_regression/4-benchmark_speed.py b/.dev_scripts/benchmark_regression/4-benchmark_speed.py index 71ec017a..a1521a7a 100644 --- a/.dev_scripts/benchmark_regression/4-benchmark_speed.py +++ b/.dev_scripts/benchmark_regression/4-benchmark_speed.py @@ -18,9 +18,9 @@ from modelindex.load_model_index import load from rich.console import Console from rich.table import Table -from mmcls.datasets.builder import build_dataloader -from mmcls.datasets.pipelines import Compose -from mmcls.models.builder import build_classifier +from mmpretrain.datasets.builder import build_dataloader +from mmpretrain.datasets.pipelines import Compose +from mmpretrain.models.builder import build_classifier console = Console() MMCLS_ROOT = Path(__file__).absolute().parents[2] diff --git a/.dev_scripts/ckpt_tree.py b/.dev_scripts/ckpt_tree.py index 787020e1..c7ff1074 100644 --- a/.dev_scripts/ckpt_tree.py +++ b/.dev_scripts/ckpt_tree.py @@ -47,7 +47,7 @@ def ckpt_to_state_dict(checkpoint, key=None): if key is not None: state_dict = checkpoint[key] elif 'state_dict' in checkpoint: - # try mmcls style + # try mmpretrain style state_dict = checkpoint['state_dict'] elif 'model' in checkpoint: state_dict = checkpoint['model'] @@ -149,7 +149,7 @@ def main(): if args.path.suffix in ['.json', '.py', '.yml']: from mmengine.runner import get_state_dict - from mmcls.apis import init_model + from mmpretrain.apis import init_model model = init_model(args.path, device='cpu') state_dict = get_state_dict(model) else: diff --git a/.dev_scripts/compare_init.py b/.dev_scripts/compare_init.py index 71030f67..fcb97266 100644 --- a/.dev_scripts/compare_init.py +++ b/.dev_scripts/compare_init.py @@ -55,7 +55,7 @@ def state_dict_from_cfg_or_ckpt(path, state_key=None): if path.suffix in ['.json', '.py', '.yml']: from mmengine.runner import get_state_dict - from mmcls.apis import init_model + from mmpretrain.apis import init_model model = init_model(path, device='cpu') model.init_weights() return get_state_dict(model) diff --git a/.dev_scripts/fill_metafile.py b/.dev_scripts/fill_metafile.py index 3749df86..ed39819b 100644 --- a/.dev_scripts/fill_metafile.py +++ b/.dev_scripts/fill_metafile.py @@ -84,8 +84,8 @@ def get_flops(config_path): from mmengine.dataset import Compose from mmengine.registry import DefaultScope - import mmcls.datasets # noqa: F401 - from mmcls.apis import init_model + import mmpretrain.datasets # noqa: F401 + from mmpretrain.apis import init_model cfg = Config.fromfile(config_path) @@ -98,7 +98,7 @@ def get_flops(config_path): # The image shape of CIFAR is (32, 32, 3) test_dataset.pipeline.insert(1, dict(type='Resize', scale=32)) - with DefaultScope.overwrite_default_scope('mmcls'): + with DefaultScope.overwrite_default_scope('mmpretrain'): data = Compose(test_dataset.pipeline)({ 'img': np.random.randint(0, 256, (224, 224, 3), dtype=np.uint8) diff --git a/.gitignore b/.gitignore index 6f33f4c2..b813dd9c 100644 --- a/.gitignore +++ b/.gitignore @@ -127,7 +127,7 @@ venv.bak/ /work_dirs /projects/*/work_dirs /projects/*/data -/mmcls/.mim +/mmpretrain/.mim .DS_Store # Pytorch diff --git a/MANIFEST.in b/MANIFEST.in index 17ddc8c7..c039b371 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ include requirements/*.txt -include mmcls/.mim/model-index.yml -recursive-include mmcls/.mim/configs *.py *.yml -recursive-include mmcls/.mim/tools *.py *.sh +include mmpretrain/.mim/model-index.yml +recursive-include mmpretrain/.mim/configs *.py *.yml +recursive-include mmpretrain/.mim/tools *.py *.sh diff --git a/configs/_base_/default_runtime.py b/configs/_base_/default_runtime.py index 186f9454..6e66911c 100644 --- a/configs/_base_/default_runtime.py +++ b/configs/_base_/default_runtime.py @@ -1,5 +1,5 @@ -# defaults to use registries in mmcls -default_scope = 'mmcls' +# defaults to use registries in mmpretrain +default_scope = 'mmpretrain' # configure default hooks default_hooks = dict( diff --git a/configs/_base_/models/replknet-31B_in1k.py b/configs/_base_/models/replknet-31B_in1k.py index a6839537..0cc50959 100644 --- a/configs/_base_/models/replknet-31B_in1k.py +++ b/configs/_base_/models/replknet-31B_in1k.py @@ -1,4 +1,4 @@ -from mmcls.models import build_classifier +from mmpretrain.models import build_classifier model = dict( type='ImageClassifier', diff --git a/configs/csra/resnet101-csra_1xb16_voc07-448px.py b/configs/csra/resnet101-csra_1xb16_voc07-448px.py index 5163febb..b80a14ae 100644 --- a/configs/csra/resnet101-csra_1xb16_voc07-448px.py +++ b/configs/csra/resnet101-csra_1xb16_voc07-448px.py @@ -2,11 +2,11 @@ _base_ = ['../_base_/datasets/voc_bs16.py', '../_base_/default_runtime.py'] # Pre-trained Checkpoint Path checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth' # noqa -# If you want to use the pre-trained weight of ResNet101-CutMix from -# the originary repo(https://github.com/Kevinz-code/CSRA). Script of -# 'tools/convert_models/torchvision_to_mmcls.py' can help you convert weight -# into mmcls format. The mAP result would hit 95.5 by using the weight. -# checkpoint = 'PATH/TO/PRE-TRAINED_WEIGHT' +# If you want to use the pre-trained weight of ResNet101-CutMix from the +# originary repo(https://github.com/Kevinz-code/CSRA). Script of +# 'tools/model_converters/torchvision_to_mmpretrain.py' can help you convert +# weight into mmpretrain format. The mAP result would hit 95.5 by using the +# weight. checkpoint = 'PATH/TO/PRE-TRAINED_WEIGHT' # model settings model = dict( diff --git a/configs/lenet/lenet5_mnist.py b/configs/lenet/lenet5_mnist.py index feef609c..df53c94d 100644 --- a/configs/lenet/lenet5_mnist.py +++ b/configs/lenet/lenet5_mnist.py @@ -51,7 +51,7 @@ val_cfg = dict() test_cfg = dict() # runtime settings -default_scope = 'mmcls' +default_scope = 'mmpretrain' default_hooks = dict( # record the time of every iteration. diff --git a/demo/image_demo.py b/demo/image_demo.py index 73d21d0c..47c5695b 100644 --- a/demo/image_demo.py +++ b/demo/image_demo.py @@ -4,7 +4,7 @@ from argparse import ArgumentParser from mmengine.fileio import dump from rich import print_json -from mmcls.apis import ImageClassificationInferencer +from mmpretrain.apis import ImageClassificationInferencer def main(): @@ -30,7 +30,7 @@ def main(): raise ValueError( f'Unavailable model "{args.model}", you can specify find a model ' 'name or a config file or find a model name from ' - 'https://mmclassification.readthedocs.io/en/1.x/modelzoo_statistics.html#all-checkpoints' # noqa: E501 + 'https://mmpretrain.readthedocs.io/en/1.x/modelzoo_statistics.html#all-checkpoints' # noqa: E501 ) result = inferencer(args.img, show=args.show, show_dir=args.show_dir)[0] # show the results diff --git a/docs/en/conf.py b/docs/en/conf.py index 3925133b..f619cf07 100644 --- a/docs/en/conf.py +++ b/docs/en/conf.py @@ -22,12 +22,12 @@ sys.path.insert(0, os.path.abspath('../../')) # -- Project information ----------------------------------------------------- -project = 'MMClassification' +project = 'MMPretrain' copyright = '2020, OpenMMLab' -author = 'MMClassification Authors' +author = 'MMPretrain Authors' # The full version, including alpha/beta/rc tags -version_file = '../../mmcls/version.py' +version_file = '../../mmpretrain/version.py' def get_version(): @@ -92,25 +92,25 @@ html_theme_options = { 'menu': [ { 'name': 'GitHub', - 'url': 'https://github.com/open-mmlab/mmclassification' + 'url': 'https://github.com/open-mmlab/mmpretrain' }, { 'name': 'Colab Tutorials', 'children': [ {'name': 'Train and inference with shell commands', - 'url': 'https://colab.research.google.com/github/mzr1996/mmclassification-tutorial/blob/master/1.x/MMClassification_tools.ipynb'}, + 'url': 'https://colab.research.google.com/github/mzr1996/mmpretrain-tutorial/blob/master/1.x/MMPretrain_tools.ipynb'}, {'name': 'Train and inference with Python APIs', - 'url': 'https://colab.research.google.com/github/mzr1996/mmclassification-tutorial/blob/master/1.x/MMClassification_python.ipynb'}, + 'url': 'https://colab.research.google.com/github/mzr1996/mmpretrain-tutorial/blob/master/1.x/MMPretrain_python.ipynb'}, ] }, { 'name': 'Version', 'children': [ - {'name': 'MMClassification 0.x', - 'url': 'https://mmclassification.readthedocs.io/en/latest/', + {'name': 'MMPretrain 0.x', + 'url': 'https://mmpretrain.readthedocs.io/en/latest/', 'description': 'master branch'}, - {'name': 'MMClassification 1.x', - 'url': 'https://mmclassification.readthedocs.io/en/dev-1.x/', + {'name': 'MMPretrain 1.x', + 'url': 'https://mmpretrain.readthedocs.io/en/dev-1.x/', 'description': '1.x branch'}, ], } @@ -138,7 +138,7 @@ html_js_files = [ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'mmclsdoc' +htmlhelp_basename = 'mmpretraindoc' # -- Options for LaTeX output ------------------------------------------------ @@ -160,16 +160,14 @@ latex_elements = { # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (root_doc, 'mmcls.tex', 'MMClassification Documentation', author, - 'manual'), + (root_doc, 'mmpretrain.tex', 'MMPretrain Documentation', author, 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(root_doc, 'mmcls', 'MMClassification Documentation', [author], 1) - ] +man_pages = [(root_doc, 'mmpretrain', 'MMPretrain Documentation', [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -177,7 +175,7 @@ man_pages = [(root_doc, 'mmcls', 'MMClassification Documentation', [author], 1) # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (root_doc, 'mmcls', 'MMClassification Documentation', author, 'mmcls', + (root_doc, 'mmpretrain', 'MMPretrain Documentation', author, 'mmpretrain', 'OpenMMLab image classification toolbox and benchmark.', 'Miscellaneous'), ] diff --git a/docs/en/stat.py b/docs/en/stat.py index 77f6a5f1..84987be6 100755 --- a/docs/en/stat.py +++ b/docs/en/stat.py @@ -9,7 +9,7 @@ from tabulate import tabulate MMCLS_ROOT = Path(__file__).absolute().parents[2] PAPERS_ROOT = Path('papers') # Path to save generated paper pages. -GITHUB_PREFIX = 'https://github.com/open-mmlab/mmclassification/blob/1.x/' +GITHUB_PREFIX = 'https://github.com/open-mmlab/mmpretrain/blob/1.x/' MODELZOO_TEMPLATE = """ # Model Zoo Summary diff --git a/docs/zh_CN/conf.py b/docs/zh_CN/conf.py index 4b857da7..a1a87786 100644 --- a/docs/zh_CN/conf.py +++ b/docs/zh_CN/conf.py @@ -22,12 +22,12 @@ sys.path.insert(0, os.path.abspath('../..')) # -- Project information ----------------------------------------------------- -project = 'MMClassification' +project = 'MMPretrain' copyright = '2020, OpenMMLab' -author = 'MMClassification Authors' +author = 'MMPretrain Authors' # The full version, including alpha/beta/rc tags -version_file = '../../mmcls/version.py' +version_file = '../../mmpretrain/version.py' def get_version(): @@ -92,25 +92,25 @@ html_theme_options = { 'menu': [ { 'name': 'GitHub', - 'url': 'https://github.com/open-mmlab/mmclassification' + 'url': 'https://github.com/open-mmlab/mmpretrain' }, { 'name': 'Colab 教程', 'children': [ {'name': '用命令行工具训练和推理', - 'url': 'https://colab.research.google.com/github/mzr1996/mmclassification-tutorial/blob/master/1.x/MMClassification_tools.ipynb'}, + 'url': 'https://colab.research.google.com/github/mzr1996/mmpretrain-tutorial/blob/master/1.x/MMPretrain_tools.ipynb'}, {'name': '用 Python API 训练和推理', - 'url': 'https://colab.research.google.com/github/mzr1996/mmclassification-tutorial/blob/master/1.x/MMClassification_python.ipynb'}, + 'url': 'https://colab.research.google.com/github/mzr1996/mmpretrain-tutorial/blob/master/1.x/MMPretrain_python.ipynb'}, ] }, { 'name': 'Version', 'children': [ - {'name': 'MMClassification 0.x', - 'url': 'https://mmclassification.readthedocs.io/zh_CN/latest/', + {'name': 'MMPretrain 0.x', + 'url': 'https://mmpretrain.readthedocs.io/zh_CN/latest/', 'description': 'master branch'}, - {'name': 'MMClassification 1.x', - 'url': 'https://mmclassification.readthedocs.io/zh_CN/dev-1.x/', + {'name': 'MMPretrain 1.x', + 'url': 'https://mmpretrain.readthedocs.io/zh_CN/dev-1.x/', 'description': '1.x branch'}, ], } @@ -138,7 +138,7 @@ html_js_files = [ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'mmclsdoc' +htmlhelp_basename = 'mmpretraindoc' # -- Options for LaTeX output ------------------------------------------------ @@ -164,16 +164,14 @@ latex_elements = { # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (root_doc, 'mmcls.tex', 'MMClassification Documentation', author, - 'manual'), + (root_doc, 'mmpretrain.tex', 'MMPretrain Documentation', author, 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(root_doc, 'mmcls', 'MMClassification Documentation', [author], 1) - ] +man_pages = [(root_doc, 'mmpretrain', 'MMPretrain Documentation', [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -181,7 +179,7 @@ man_pages = [(root_doc, 'mmcls', 'MMClassification Documentation', [author], 1) # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (root_doc, 'mmcls', 'MMClassification Documentation', author, 'mmcls', + (root_doc, 'mmpretrain', 'MMPretrain Documentation', author, 'mmpretrain', 'OpenMMLab image classification toolbox and benchmark.', 'Miscellaneous'), ] diff --git a/docs/zh_CN/stat.py b/docs/zh_CN/stat.py index 86210157..0f3edb8b 100755 --- a/docs/zh_CN/stat.py +++ b/docs/zh_CN/stat.py @@ -9,7 +9,7 @@ from tabulate import tabulate MMCLS_ROOT = Path(__file__).absolute().parents[2] PAPERS_ROOT = Path('papers') # Path to save generated paper pages. -GITHUB_PREFIX = 'https://github.com/open-mmlab/mmclassification/blob/1.x/' +GITHUB_PREFIX = 'https://github.com/open-mmlab/mmpretrain/blob/1.x/' MODELZOO_TEMPLATE = """ # 模型库统计 diff --git a/mmcls/utils/setup_env.py b/mmcls/utils/setup_env.py deleted file mode 100644 index 2d959d23..00000000 --- a/mmcls/utils/setup_env.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import datetime -import warnings - -from mmengine import DefaultScope - - -def register_all_modules(init_default_scope: bool = True) -> None: - """Register all modules in mmcls into the registries. - - Args: - init_default_scope (bool): Whether initialize the mmcls default scope. - If True, the global default scope will be set to `mmcls`, and all - registries will build modules from mmcls's registry node. To - understand more about the registry, please refer to - https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md - Defaults to True. - """ # noqa - import mmcls.datasets # noqa: F401,F403 - import mmcls.engine # noqa: F401,F403 - import mmcls.evaluation # noqa: F401,F403 - import mmcls.models # noqa: F401,F403 - import mmcls.structures # noqa: F401,F403 - import mmcls.visualization # noqa: F401,F403 - - if not init_default_scope: - return - - current_scope = DefaultScope.get_current_instance() - if current_scope is None: - DefaultScope.get_instance('mmcls', scope_name='mmcls') - elif current_scope.scope_name != 'mmcls': - warnings.warn(f'The current default scope "{current_scope.scope_name}"' - ' is not "mmcls", `register_all_modules` will force the ' - 'current default scope to be "mmcls". If this is not ' - 'expected, please set `init_default_scope=False`.') - # avoid name conflict - new_instance_name = f'mmcls-{datetime.datetime.now()}' - DefaultScope.get_instance(new_instance_name, scope_name='mmcls') diff --git a/mmcls/__init__.py b/mmpretrain/__init__.py similarity index 100% rename from mmcls/__init__.py rename to mmpretrain/__init__.py diff --git a/mmcls/apis/__init__.py b/mmpretrain/apis/__init__.py similarity index 100% rename from mmcls/apis/__init__.py rename to mmpretrain/apis/__init__.py diff --git a/mmcls/apis/inference.py b/mmpretrain/apis/inference.py similarity index 96% rename from mmcls/apis/inference.py rename to mmpretrain/apis/inference.py index 5d5e8a08..a55fa12d 100644 --- a/mmcls/apis/inference.py +++ b/mmpretrain/apis/inference.py @@ -11,8 +11,8 @@ from mmengine.infer import BaseInferencer from mmengine.model import BaseModel from mmengine.runner import load_checkpoint -from mmcls.registry import TRANSFORMS -from mmcls.structures import ClsDataSample +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import ClsDataSample from .model import get_model, init_model, list_models ModelType = Union[BaseModel, str, Config] @@ -63,7 +63,7 @@ class ImageClassificationInferencer(BaseInferencer): Example: 1. Use a pre-trained model in MMClassification to inference an image. - >>> from mmcls import ImageClassificationInferencer + >>> from mmpretrain import ImageClassificationInferencer >>> inferencer = ImageClassificationInferencer('resnet50_8xb32_in1k') >>> inferencer('demo/demo.JPEG') [{'pred_score': array([...]), @@ -74,7 +74,7 @@ class ImageClassificationInferencer(BaseInferencer): 2. Use a config file and checkpoint to inference multiple images on GPU, and save the visualization results in a folder. - >>> from mmcls import ImageClassificationInferencer + >>> from mmpretrain import ImageClassificationInferencer >>> inferencer = ImageClassificationInferencer( model='configs/resnet/resnet50_8xb32_in1k.py', weights='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', @@ -107,7 +107,7 @@ class ImageClassificationInferencer(BaseInferencer): else: raise TypeError( 'The `model` can be a name of model and you can use ' - '`mmcls.list_models` to get an available name. It can ' + '`mmpretrain.list_models` to get an available name. It can ' 'also be a Config object or a path to the config file.') model.eval() @@ -185,7 +185,7 @@ class ImageClassificationInferencer(BaseInferencer): return None if self.visualizer is None: - from mmcls.visualization import ClsVisualizer + from mmpretrain.visualization import ClsVisualizer self.visualizer = ClsVisualizer() if self.classes is not None: self.visualizer._dataset_meta = dict(classes=self.classes) diff --git a/mmcls/apis/model.py b/mmpretrain/apis/model.py similarity index 88% rename from mmcls/apis/model.py rename to mmpretrain/apis/model.py index 31bd4777..c08d13c8 100644 --- a/mmcls/apis/model.py +++ b/mmpretrain/apis/model.py @@ -15,7 +15,7 @@ from modelindex.models.Model import Model class ModelHub: """A hub to host the meta information of all pre-defined models.""" _models_dict = {} - __mmcls_registered = False + __mmpretrain_registered = False @classmethod def register_model_index(cls, @@ -52,7 +52,7 @@ class ModelHub: Returns: modelindex.models.Model: The metainfo of the specified model. """ - cls._register_mmcls_models() + cls._register_mmpretrain_models() # lazy load config metainfo = copy.deepcopy(cls._models_dict.get(model_name.lower())) if metainfo is None: @@ -75,15 +75,15 @@ class ModelHub: return config_path @classmethod - def _register_mmcls_models(cls): - # register models in mmcls - if not cls.__mmcls_registered: + def _register_mmpretrain_models(cls): + # register models in mmpretrain + if not cls.__mmpretrain_registered: from mmengine.utils import get_installed_path - mmcls_root = Path(get_installed_path('mmcls')) - model_index_path = mmcls_root / '.mim' / 'model-index.yml' + mmpretrain_root = Path(get_installed_path('mmpretrain')) + model_index_path = mmpretrain_root / '.mim' / 'model-index.yml' ModelHub.register_model_index( - model_index_path, config_prefix=mmcls_root / '.mim') - cls.__mmcls_registered = True + model_index_path, config_prefix=mmpretrain_root / '.mim') + cls.__mmpretrain_registered = True @classmethod def has(cls, model_name): @@ -118,7 +118,7 @@ def init_model(config, checkpoint=None, device=None, **kwargs): config.model.setdefault('data_preprocessor', config.get('data_preprocessor', None)) - from mmcls.registry import MODELS + from mmpretrain.registry import MODELS model = MODELS.build(config.model) if checkpoint is not None: @@ -130,13 +130,13 @@ def init_model(config, checkpoint=None, device=None, **kwargs): # Don't set CLASSES if the model is headless. pass elif 'dataset_meta' in checkpoint.get('meta', {}): - # mmcls 1.x + # mmpretrain 1.x model.CLASSES = checkpoint['meta']['dataset_meta'].get('classes') elif 'CLASSES' in checkpoint.get('meta', {}): - # mmcls < 1.x + # mmpretrain < 1.x or mmselfsup < 1.x model.CLASSES = checkpoint['meta']['CLASSES'] else: - from mmcls.datasets.categories import IMAGENET_CATEGORIES + from mmpretrain.datasets.categories import IMAGENET_CATEGORIES warnings.simplefilter('once') warnings.warn('Class names are not saved in the checkpoint\'s ' 'meta data, use imagenet by default.') @@ -165,7 +165,7 @@ def get_model(model_name, pretrained=False, device=None, **kwargs): Get a ResNet-50 model and extract images feature: >>> import torch - >>> from mmcls import get_model + >>> from mmpretrain import get_model >>> inputs = torch.rand(16, 3, 224, 224) >>> model = get_model('resnet50_8xb32_in1k', pretrained=True, backbone=dict(out_indices=(0, 1, 2, 3))) >>> feats = model.extract_feat(inputs) @@ -178,7 +178,7 @@ def get_model(model_name, pretrained=False, device=None, **kwargs): Get Swin-Transformer model with pre-trained weights and inference: - >>> from mmcls import get_model, inference_model + >>> from mmpretrain import get_model, inference_model >>> model = get_model('swin-base_16xb64_in1k', pretrained=True) >>> result = inference_model(model, 'demo/demo.JPEG') >>> print(result['pred_class']) @@ -201,7 +201,7 @@ def get_model(model_name, pretrained=False, device=None, **kwargs): def list_models(pattern=None) -> List[str]: - """List all models available in MMClassification. + """List all models available in MMPretrain. Args: pattern (str | None): A wildcard pattern to match model names. @@ -212,12 +212,12 @@ def list_models(pattern=None) -> List[str]: Examples: List all models: - >>> from mmcls import list_models + >>> from mmpretrain import list_models >>> print(list_models()) List ResNet-50 models on ImageNet-1k dataset: - >>> from mmcls import list_models + >>> from mmpretrain import list_models >>> print(list_models('resnet*in1k')) ['resnet50_8xb32_in1k', 'resnet50_8xb32-fp16_in1k', @@ -225,7 +225,7 @@ def list_models(pattern=None) -> List[str]: 'resnet50_8xb256-rsb-a2-300e_in1k', 'resnet50_8xb256-rsb-a3-100e_in1k'] """ - ModelHub._register_mmcls_models() + ModelHub._register_mmpretrain_models() if pattern is None: return sorted(list(ModelHub._models_dict.keys())) # Always match keys with any postfix. diff --git a/mmcls/datasets/__init__.py b/mmpretrain/datasets/__init__.py similarity index 100% rename from mmcls/datasets/__init__.py rename to mmpretrain/datasets/__init__.py diff --git a/mmcls/datasets/base_dataset.py b/mmpretrain/datasets/base_dataset.py similarity index 99% rename from mmcls/datasets/base_dataset.py rename to mmpretrain/datasets/base_dataset.py index 584b27ea..bd876350 100644 --- a/mmcls/datasets/base_dataset.py +++ b/mmpretrain/datasets/base_dataset.py @@ -7,7 +7,7 @@ import mmengine import numpy as np from mmengine.dataset import BaseDataset as _BaseDataset -from mmcls.registry import DATASETS, TRANSFORMS +from mmpretrain.registry import DATASETS, TRANSFORMS def expanduser(path): diff --git a/mmcls/datasets/builder.py b/mmpretrain/datasets/builder.py similarity index 88% rename from mmcls/datasets/builder.py rename to mmpretrain/datasets/builder.py index bc7fcc1c..dfa3872f 100644 --- a/mmcls/datasets/builder.py +++ b/mmpretrain/datasets/builder.py @@ -1,12 +1,12 @@ # Copyright (c) OpenMMLab. All rights reserved. -from mmcls.registry import DATASETS +from mmpretrain.registry import DATASETS def build_dataset(cfg): """Build dataset. Examples: - >>> from mmcls.datasets import build_dataset + >>> from mmpretrain.datasets import build_dataset >>> mnist_train = build_dataset( ... dict(type='MNIST', data_prefix='data/mnist/', test_mode=False)) >>> print(mnist_train) diff --git a/mmcls/datasets/categories.py b/mmpretrain/datasets/categories.py similarity index 100% rename from mmcls/datasets/categories.py rename to mmpretrain/datasets/categories.py diff --git a/mmcls/datasets/cifar.py b/mmpretrain/datasets/cifar.py similarity index 99% rename from mmcls/datasets/cifar.py rename to mmpretrain/datasets/cifar.py index 25d9d058..aa8f50c9 100644 --- a/mmcls/datasets/cifar.py +++ b/mmpretrain/datasets/cifar.py @@ -7,7 +7,7 @@ import numpy as np from mmengine.fileio import (LocalBackend, exists, get, get_file_backend, join_path) -from mmcls.registry import DATASETS +from mmpretrain.registry import DATASETS from .base_dataset import BaseDataset from .categories import CIFAR10_CATEGORIES, CIFAR100_CATEGORIES from .utils import check_md5, download_and_extract_archive diff --git a/mmcls/datasets/cub.py b/mmpretrain/datasets/cub.py similarity index 98% rename from mmcls/datasets/cub.py rename to mmpretrain/datasets/cub.py index 5248df09..b8466a59 100644 --- a/mmcls/datasets/cub.py +++ b/mmpretrain/datasets/cub.py @@ -3,7 +3,7 @@ from typing import List from mmengine import get_file_backend, list_from_file -from mmcls.registry import DATASETS +from mmpretrain.registry import DATASETS from .base_dataset import BaseDataset from .categories import CUB_CATEGORIES @@ -51,7 +51,7 @@ class CUB(BaseDataset): Examples: - >>> from mmcls.datasets import CUB + >>> from mmpretrain.datasets import CUB >>> cub_train_cfg = dict(data_root='data/CUB_200_2011', test_mode=True) >>> cub_train = CUB(**cub_train_cfg) >>> cub_train diff --git a/mmcls/datasets/custom.py b/mmpretrain/datasets/custom.py similarity index 99% rename from mmcls/datasets/custom.py rename to mmpretrain/datasets/custom.py index af1c0c14..3ed40b3d 100644 --- a/mmcls/datasets/custom.py +++ b/mmpretrain/datasets/custom.py @@ -5,7 +5,7 @@ from mmengine.fileio import (BaseStorageBackend, get_file_backend, list_from_file) from mmengine.logging import MMLogger -from mmcls.registry import DATASETS +from mmpretrain.registry import DATASETS from .base_dataset import BaseDataset diff --git a/mmcls/datasets/dataset_wrappers.py b/mmpretrain/datasets/dataset_wrappers.py similarity index 99% rename from mmcls/datasets/dataset_wrappers.py rename to mmpretrain/datasets/dataset_wrappers.py index 9010d526..1adff10b 100644 --- a/mmcls/datasets/dataset_wrappers.py +++ b/mmpretrain/datasets/dataset_wrappers.py @@ -4,7 +4,7 @@ import copy import numpy as np from mmengine.dataset import BaseDataset, force_full_init -from mmcls.registry import DATASETS +from mmpretrain.registry import DATASETS @DATASETS.register_module() diff --git a/mmcls/datasets/imagenet.py b/mmpretrain/datasets/imagenet.py similarity index 98% rename from mmcls/datasets/imagenet.py rename to mmpretrain/datasets/imagenet.py index 63878776..e1a8619e 100644 --- a/mmcls/datasets/imagenet.py +++ b/mmpretrain/datasets/imagenet.py @@ -3,7 +3,7 @@ from typing import Optional, Union from mmengine.logging import MMLogger -from mmcls.registry import DATASETS +from mmpretrain.registry import DATASETS from .categories import IMAGENET_CATEGORIES from .custom import CustomDataset diff --git a/mmcls/datasets/inshop.py b/mmpretrain/datasets/inshop.py similarity index 97% rename from mmcls/datasets/inshop.py rename to mmpretrain/datasets/inshop.py index 2efdb638..691f6fd8 100644 --- a/mmcls/datasets/inshop.py +++ b/mmpretrain/datasets/inshop.py @@ -1,8 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmengine import get_file_backend, list_from_file -from mmcls.datasets.base_dataset import BaseDataset -from mmcls.registry import DATASETS +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset @DATASETS.register_module() @@ -35,7 +35,7 @@ class InShop(BaseDataset): **kwargs: Other keyword arguments in :class:`BaseDataset`. Examples: - >>> from mmcls.datasets import InShop + >>> from mmpretrain.datasets import InShop >>> >>> # build train InShop dataset >>> inshop_train_cfg = dict(data_root='data/inshop', split='train') diff --git a/mmcls/datasets/mnist.py b/mmpretrain/datasets/mnist.py similarity index 99% rename from mmcls/datasets/mnist.py rename to mmpretrain/datasets/mnist.py index 71d980df..2617146f 100644 --- a/mmcls/datasets/mnist.py +++ b/mmpretrain/datasets/mnist.py @@ -8,7 +8,7 @@ import numpy as np import torch from mmengine.fileio import LocalBackend, exists, get_file_backend, join_path -from mmcls.registry import DATASETS +from mmpretrain.registry import DATASETS from .base_dataset import BaseDataset from .categories import FASHIONMNIST_CATEGORITES, MNIST_CATEGORITES from .utils import (download_and_extract_archive, open_maybe_compressed_file, diff --git a/mmcls/datasets/multi_label.py b/mmpretrain/datasets/multi_label.py similarity index 98% rename from mmcls/datasets/multi_label.py rename to mmpretrain/datasets/multi_label.py index b5e45cf3..40aeef0b 100644 --- a/mmcls/datasets/multi_label.py +++ b/mmpretrain/datasets/multi_label.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. from typing import List -from mmcls.registry import DATASETS +from mmpretrain.registry import DATASETS from .base_dataset import BaseDataset diff --git a/mmcls/datasets/multi_task.py b/mmpretrain/datasets/multi_task.py similarity index 97% rename from mmcls/datasets/multi_task.py rename to mmpretrain/datasets/multi_task.py index a28b4982..577647db 100644 --- a/mmcls/datasets/multi_task.py +++ b/mmpretrain/datasets/multi_task.py @@ -60,7 +60,7 @@ class MultiTaskDataset: Assume we put our dataset in the ``data/mydataset`` folder in the repository and organize it as the below format: :: - mmclassification/ + mmpretrain/ └── data └── mydataset ├── annotation @@ -81,7 +81,7 @@ class MultiTaskDataset: .. code:: python - >>> from mmcls.datasets import build_dataset + >>> from mmpretrain.datasets import build_dataset >>> train_cfg = dict( ... type="MultiTaskDataset", ... ann_file="annotation/train.json", @@ -94,7 +94,7 @@ class MultiTaskDataset: Or we can put all files in the same folder: :: - mmclassification/ + mmpretrain/ └── data └── mydataset ├── train.json @@ -109,7 +109,7 @@ class MultiTaskDataset: .. code:: python - >>> from mmcls.datasets import build_dataset + >>> from mmpretrain.datasets import build_dataset >>> train_cfg = dict( ... type="MultiTaskDataset", ... ann_file="train.json", @@ -133,8 +133,8 @@ class MultiTaskDataset: ``data_root`` for the ``"img_path"`` field in the annotation file. Defaults to None. pipeline (Sequence[dict]): A list of dict, where each element - represents a operation defined in :mod:`mmcls.datasets.pipelines`. - Defaults to an empty tuple. + represents a operation defined in + :mod:`mmpretrain.datasets.pipelines`. Defaults to an empty tuple. test_mode (bool): in train mode or test mode. Defaults to False. file_client_args (dict, optional): Arguments to instantiate a FileClient. See :class:`mmengine.fileio.FileClient` for details. diff --git a/mmcls/datasets/samplers/__init__.py b/mmpretrain/datasets/samplers/__init__.py similarity index 100% rename from mmcls/datasets/samplers/__init__.py rename to mmpretrain/datasets/samplers/__init__.py diff --git a/mmcls/datasets/samplers/repeat_aug.py b/mmpretrain/datasets/samplers/repeat_aug.py similarity index 98% rename from mmcls/datasets/samplers/repeat_aug.py rename to mmpretrain/datasets/samplers/repeat_aug.py index d4b7e1e9..d833a195 100644 --- a/mmcls/datasets/samplers/repeat_aug.py +++ b/mmpretrain/datasets/samplers/repeat_aug.py @@ -5,7 +5,7 @@ import torch from mmengine.dist import get_dist_info, is_main_process, sync_random_seed from torch.utils.data import Sampler -from mmcls.registry import DATA_SAMPLERS +from mmpretrain.registry import DATA_SAMPLERS @DATA_SAMPLERS.register_module() diff --git a/mmcls/datasets/transforms/__init__.py b/mmpretrain/datasets/transforms/__init__.py similarity index 100% rename from mmcls/datasets/transforms/__init__.py rename to mmpretrain/datasets/transforms/__init__.py diff --git a/mmcls/datasets/transforms/auto_augment.py b/mmpretrain/datasets/transforms/auto_augment.py similarity index 99% rename from mmcls/datasets/transforms/auto_augment.py rename to mmpretrain/datasets/transforms/auto_augment.py index b444dd27..9b9987b9 100644 --- a/mmcls/datasets/transforms/auto_augment.py +++ b/mmpretrain/datasets/transforms/auto_augment.py @@ -11,7 +11,7 @@ from mmcv.transforms import BaseTransform, Compose, RandomChoice from mmcv.transforms.utils import cache_randomness from mmengine.utils import is_list_of, is_seq_of -from mmcls.registry import TRANSFORMS +from mmpretrain.registry import TRANSFORMS def merge_hparams(policy: dict, hparams: dict) -> dict: @@ -125,7 +125,7 @@ class RandAugment(BaseTransform): time, and magnitude_level of every policy is 6 (total is 10 by default) >>> import numpy as np - >>> from mmcls.datasets import RandAugment + >>> from mmpretrain.datasets import RandAugment >>> transform = RandAugment( ... policies='timm_increasing', ... num_policies=2, diff --git a/mmcls/datasets/transforms/formatting.py b/mmpretrain/datasets/transforms/formatting.py similarity index 97% rename from mmcls/datasets/transforms/formatting.py rename to mmpretrain/datasets/transforms/formatting.py index d96ffed9..9723833a 100644 --- a/mmcls/datasets/transforms/formatting.py +++ b/mmpretrain/datasets/transforms/formatting.py @@ -9,8 +9,8 @@ from mmcv.transforms import BaseTransform from mmengine.utils import is_str from PIL import Image -from mmcls.registry import TRANSFORMS -from mmcls.structures import ClsDataSample, MultiTaskDataSample +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import ClsDataSample, MultiTaskDataSample def to_tensor(data): @@ -53,8 +53,8 @@ class PackClsInputs(BaseTransform): **Added Keys:** - inputs (:obj:`torch.Tensor`): The forward data of models. - - data_samples (:obj:`~mmcls.structures.ClsDataSample`): The annotation - info of the sample. + - data_samples (:obj:`~mmpretrain.structures.ClsDataSample`): The + annotation info of the sample. Args: meta_keys (Sequence[str]): The meta keys to be saved in the diff --git a/mmcls/datasets/transforms/processing.py b/mmpretrain/datasets/transforms/processing.py similarity index 99% rename from mmcls/datasets/transforms/processing.py rename to mmpretrain/datasets/transforms/processing.py index 20b7b0b4..c66a1339 100644 --- a/mmcls/datasets/transforms/processing.py +++ b/mmpretrain/datasets/transforms/processing.py @@ -11,7 +11,7 @@ import numpy as np from mmcv.transforms import BaseTransform from mmcv.transforms.utils import cache_randomness -from mmcls.registry import TRANSFORMS +from mmpretrain.registry import TRANSFORMS try: import albumentations @@ -1008,7 +1008,7 @@ class Lighting(BaseTransform): return repr_str -# 'Albu' is used in previous versions of mmcls, here is for compatibility +# 'Albu' is used in previous versions of mmpretrain, here is for compatibility # users can use both 'Albumentations' and 'Albu'. @TRANSFORMS.register_module(['Albumentations', 'Albu']) class Albumentations(BaseTransform): @@ -1055,13 +1055,13 @@ class Albumentations(BaseTransform): Args: transforms (List[Dict]): List of albumentations transform configs. - keymap (Optional[Dict]): Mapping of mmcls to albumentations fields, - in format {'input key':'albumentation-style key'}. Defaults to - None. + keymap (Optional[Dict]): Mapping of mmpretrain to albumentations + fields, in format {'input key':'albumentation-style key'}. + Defaults to None. Example: >>> import mmcv - >>> from mmcls.datasets import Albumentations + >>> from mmpretrain.datasets import Albumentations >>> transforms = [ ... dict( ... type='ShiftScaleRotate', diff --git a/mmcls/datasets/utils.py b/mmpretrain/datasets/utils.py similarity index 100% rename from mmcls/datasets/utils.py rename to mmpretrain/datasets/utils.py diff --git a/mmcls/datasets/voc.py b/mmpretrain/datasets/voc.py similarity index 99% rename from mmcls/datasets/voc.py rename to mmpretrain/datasets/voc.py index 346ff330..fa8475c5 100644 --- a/mmcls/datasets/voc.py +++ b/mmpretrain/datasets/voc.py @@ -4,7 +4,7 @@ from typing import List, Optional, Union from mmengine import get_file_backend, list_from_file -from mmcls.registry import DATASETS +from mmpretrain.registry import DATASETS from .base_dataset import expanduser from .categories import VOC2007_CATEGORIES from .multi_label import MultiLabelDataset diff --git a/mmcls/engine/__init__.py b/mmpretrain/engine/__init__.py similarity index 100% rename from mmcls/engine/__init__.py rename to mmpretrain/engine/__init__.py diff --git a/mmcls/engine/hooks/__init__.py b/mmpretrain/engine/hooks/__init__.py similarity index 100% rename from mmcls/engine/hooks/__init__.py rename to mmpretrain/engine/hooks/__init__.py diff --git a/mmcls/engine/hooks/class_num_check_hook.py b/mmpretrain/engine/hooks/class_num_check_hook.py similarity index 98% rename from mmcls/engine/hooks/class_num_check_hook.py rename to mmpretrain/engine/hooks/class_num_check_hook.py index 4414836c..38170d66 100644 --- a/mmcls/engine/hooks/class_num_check_hook.py +++ b/mmpretrain/engine/hooks/class_num_check_hook.py @@ -2,7 +2,7 @@ from mmengine.hooks import Hook from mmengine.utils import is_seq_of -from mmcls.registry import HOOKS +from mmpretrain.registry import HOOKS @HOOKS.register_module() diff --git a/mmcls/engine/hooks/ema_hook.py b/mmpretrain/engine/hooks/ema_hook.py similarity index 99% rename from mmcls/engine/hooks/ema_hook.py rename to mmpretrain/engine/hooks/ema_hook.py index c1d270c3..284d211b 100644 --- a/mmcls/engine/hooks/ema_hook.py +++ b/mmpretrain/engine/hooks/ema_hook.py @@ -8,7 +8,7 @@ from mmengine.hooks import EMAHook as BaseEMAHook from mmengine.logging import MMLogger from mmengine.runner import Runner -from mmcls.registry import HOOKS +from mmpretrain.registry import HOOKS @HOOKS.register_module() diff --git a/mmcls/engine/hooks/margin_head_hooks.py b/mmpretrain/engine/hooks/margin_head_hooks.py similarity index 96% rename from mmcls/engine/hooks/margin_head_hooks.py rename to mmpretrain/engine/hooks/margin_head_hooks.py index 7ca87843..fbeae7a3 100644 --- a/mmcls/engine/hooks/margin_head_hooks.py +++ b/mmpretrain/engine/hooks/margin_head_hooks.py @@ -3,8 +3,8 @@ import numpy as np from mmengine.hooks import Hook from mmengine.model import is_model_wrapper -from mmcls.models.heads import ArcFaceClsHead -from mmcls.registry import HOOKS +from mmpretrain.models.heads import ArcFaceClsHead +from mmpretrain.registry import HOOKS @HOOKS.register_module() diff --git a/mmcls/engine/hooks/precise_bn_hook.py b/mmpretrain/engine/hooks/precise_bn_hook.py similarity index 99% rename from mmcls/engine/hooks/precise_bn_hook.py rename to mmpretrain/engine/hooks/precise_bn_hook.py index d72ae9c2..9f5f5841 100644 --- a/mmcls/engine/hooks/precise_bn_hook.py +++ b/mmpretrain/engine/hooks/precise_bn_hook.py @@ -20,7 +20,7 @@ from torch.nn.modules.batchnorm import _BatchNorm from torch.nn.modules.instancenorm import _InstanceNorm from torch.utils.data import DataLoader -from mmcls.registry import HOOKS +from mmpretrain.registry import HOOKS DATA_BATCH = Optional[Sequence[dict]] diff --git a/mmcls/engine/hooks/retriever_hooks.py b/mmpretrain/engine/hooks/retriever_hooks.py similarity index 74% rename from mmcls/engine/hooks/retriever_hooks.py rename to mmpretrain/engine/hooks/retriever_hooks.py index f09c5ff0..6bd7c7aa 100644 --- a/mmcls/engine/hooks/retriever_hooks.py +++ b/mmpretrain/engine/hooks/retriever_hooks.py @@ -4,8 +4,8 @@ import warnings from mmengine.hooks import Hook from mmengine.model import is_model_wrapper -from mmcls.models import BaseRetriever -from mmcls.registry import HOOKS +from mmpretrain.models import BaseRetriever +from mmpretrain.registry import HOOKS @HOOKS.register_module() @@ -27,5 +27,6 @@ class PrepareProtoBeforeValLoopHook(Hook): model.prepare_prototype() else: warnings.warn( - 'Only the `mmcls.models.retrievers.BaseRetriever` can execute ' - f'`PrepareRetrieverPrototypeHook`, but got `{type(model)}`') + 'Only the `mmpretrain.models.retrievers.BaseRetriever` ' + 'can execute `PrepareRetrieverPrototypeHook`, but got ' + f'`{type(model)}`') diff --git a/mmcls/engine/hooks/switch_recipe_hook.py b/mmpretrain/engine/hooks/switch_recipe_hook.py similarity index 95% rename from mmcls/engine/hooks/switch_recipe_hook.py rename to mmpretrain/engine/hooks/switch_recipe_hook.py index b8739e5a..914b9572 100644 --- a/mmcls/engine/hooks/switch_recipe_hook.py +++ b/mmpretrain/engine/hooks/switch_recipe_hook.py @@ -6,8 +6,8 @@ from mmcv.transforms import Compose from mmengine.hooks import Hook from mmengine.model import is_model_wrapper -from mmcls.models.utils import RandomBatchAugment -from mmcls.registry import HOOKS, MODEL_WRAPPERS, MODELS +from mmpretrain.models.utils import RandomBatchAugment +from mmpretrain.registry import HOOKS, MODEL_WRAPPERS, MODELS @HOOKS.register_module() @@ -25,9 +25,9 @@ class SwitchRecipeHook(Hook): train dataset. If not specified, keep the original settings. - ``batch_augments`` (dict | None, optional): The new batch augmentations of during training. See :mod:`Batch Augmentations - ` for more details. If None, - disable batch augmentations. If not specified, keep the original - settings. + ` for more details. + If None, disable batch augmentations. If not specified, keep the + original settings. - ``loss`` (dict, optional): The new loss module config. If not specified, keep the original settings. diff --git a/mmcls/engine/hooks/visualization_hook.py b/mmpretrain/engine/hooks/visualization_hook.py similarity index 96% rename from mmcls/engine/hooks/visualization_hook.py rename to mmpretrain/engine/hooks/visualization_hook.py index 921804fe..5a392357 100644 --- a/mmcls/engine/hooks/visualization_hook.py +++ b/mmpretrain/engine/hooks/visualization_hook.py @@ -8,8 +8,8 @@ from mmengine.hooks import Hook from mmengine.runner import EpochBasedTrainLoop, Runner from mmengine.visualization import Visualizer -from mmcls.registry import HOOKS -from mmcls.structures import ClsDataSample +from mmpretrain.registry import HOOKS +from mmpretrain.structures import ClsDataSample @HOOKS.register_module() @@ -30,7 +30,7 @@ class VisualizationHook(Hook): in the testing process. If None, handle with the backends of the visualizer. Defaults to None. **kwargs: other keyword arguments of - :meth:`mmcls.visualization.ClsVisualizer.add_datasample`. + :meth:`mmpretrain.visualization.ClsVisualizer.add_datasample`. """ def __init__(self, diff --git a/mmcls/engine/optimizers/__init__.py b/mmpretrain/engine/optimizers/__init__.py similarity index 100% rename from mmcls/engine/optimizers/__init__.py rename to mmpretrain/engine/optimizers/__init__.py diff --git a/mmcls/engine/optimizers/adan_t.py b/mmpretrain/engine/optimizers/adan_t.py similarity index 99% rename from mmcls/engine/optimizers/adan_t.py rename to mmpretrain/engine/optimizers/adan_t.py index f031c523..571a71b6 100644 --- a/mmcls/engine/optimizers/adan_t.py +++ b/mmpretrain/engine/optimizers/adan_t.py @@ -19,7 +19,7 @@ import torch from torch import Tensor from torch.optim.optimizer import Optimizer -from mmcls.registry import OPTIMIZERS +from mmpretrain.registry import OPTIMIZERS @OPTIMIZERS.register_module() diff --git a/mmcls/engine/optimizers/lamb.py b/mmpretrain/engine/optimizers/lamb.py similarity index 99% rename from mmcls/engine/optimizers/lamb.py rename to mmpretrain/engine/optimizers/lamb.py index d39e5779..0b44a1c1 100644 --- a/mmcls/engine/optimizers/lamb.py +++ b/mmpretrain/engine/optimizers/lamb.py @@ -61,7 +61,7 @@ import math import torch from torch.optim import Optimizer -from mmcls.registry import OPTIMIZERS +from mmpretrain.registry import OPTIMIZERS @OPTIMIZERS.register_module() diff --git a/mmcls/evaluation/__init__.py b/mmpretrain/evaluation/__init__.py similarity index 66% rename from mmcls/evaluation/__init__.py rename to mmpretrain/evaluation/__init__.py index 7cbc4f2d..f70dc226 100644 --- a/mmcls/evaluation/__init__.py +++ b/mmpretrain/evaluation/__init__.py @@ -1,2 +1,3 @@ # Copyright (c) OpenMMLab. All rights reserved. +from .functional import * # noqa: F401,F403 from .metrics import * # noqa: F401,F403 diff --git a/mmcls/evaluation/metrics/__init__.py b/mmpretrain/evaluation/metrics/__init__.py similarity index 100% rename from mmcls/evaluation/metrics/__init__.py rename to mmpretrain/evaluation/metrics/__init__.py diff --git a/mmcls/evaluation/metrics/multi_label.py b/mmpretrain/evaluation/metrics/multi_label.py similarity index 98% rename from mmcls/evaluation/metrics/multi_label.py rename to mmpretrain/evaluation/metrics/multi_label.py index 7f8aa312..7027b9e1 100644 --- a/mmcls/evaluation/metrics/multi_label.py +++ b/mmpretrain/evaluation/metrics/multi_label.py @@ -7,7 +7,7 @@ from mmengine.evaluator import BaseMetric from mmengine.logging import MMLogger from mmengine.structures import LabelData -from mmcls.registry import METRICS +from mmpretrain.registry import METRICS from .single_label import _precision_recall_f1_support, to_tensor @@ -77,7 +77,7 @@ class MultiLabelMetric(BaseMetric): Examples: >>> import torch - >>> from mmcls.evaluation import MultiLabelMetric + >>> from mmpretrain.evaluation import MultiLabelMetric >>> # ------ The Basic Usage for category indices labels ------- >>> y_pred = [[0], [1], [0, 1], [3]] >>> y_true = [[0, 3], [0, 2], [1], [3]] @@ -114,7 +114,7 @@ class MultiLabelMetric(BaseMetric): (tensor(62.5000), tensor(31.2500), tensor(39.1667), tensor(8)) >>> >>> # ------------------- Use with Evalutor ------------------- - >>> from mmcls.structures import ClsDataSample + >>> from mmpretrain.structures import ClsDataSample >>> from mmengine.evaluator import Evaluator >>> data_sampels = [ ... ClsDataSample().set_pred_score(pred).set_gt_score(gt) @@ -466,7 +466,7 @@ class AveragePrecision(BaseMetric): Examples: >>> import torch - >>> from mmcls.evaluation import AveragePrecision + >>> from mmpretrain.evaluation import AveragePrecision >>> # --------- The Basic Usage for one-hot pred scores --------- >>> y_pred = torch.Tensor([[0.9, 0.8, 0.3, 0.2], ... [0.1, 0.2, 0.2, 0.1], @@ -479,7 +479,7 @@ class AveragePrecision(BaseMetric): >>> AveragePrecision.calculate(y_pred, y_true) tensor(70.833) >>> # ------------------- Use with Evalutor ------------------- - >>> from mmcls.structures import ClsDataSample + >>> from mmpretrain.structures import ClsDataSample >>> from mmengine.evaluator import Evaluator >>> data_samples = [ ... ClsDataSample().set_pred_score(i).set_gt_score(j) diff --git a/mmcls/evaluation/metrics/multi_task.py b/mmpretrain/evaluation/metrics/multi_task.py similarity index 97% rename from mmcls/evaluation/metrics/multi_task.py rename to mmpretrain/evaluation/metrics/multi_task.py index 5f07bdd0..0e6af768 100644 --- a/mmcls/evaluation/metrics/multi_task.py +++ b/mmpretrain/evaluation/metrics/multi_task.py @@ -3,7 +3,7 @@ from typing import Dict, Sequence from mmengine.evaluator import BaseMetric -from mmcls.registry import METRICS +from mmpretrain.registry import METRICS @METRICS.register_module() @@ -14,7 +14,7 @@ class MultiTasksMetric(BaseMetric): and the values is a list of the metric corresponds to this task Examples: >>> import torch - >>> from mmcls.evaluation import MultiTasksMetric + >>> from mmpretrain.evaluation import MultiTasksMetric # -------------------- The Basic Usage -------------------- >>>task_metrics = { 'task0': [dict(type='Accuracy', topk=(1, ))], diff --git a/mmcls/evaluation/metrics/retrieval.py b/mmpretrain/evaluation/metrics/retrieval.py similarity index 98% rename from mmcls/evaluation/metrics/retrieval.py rename to mmpretrain/evaluation/metrics/retrieval.py index 8dba9421..92a83980 100644 --- a/mmcls/evaluation/metrics/retrieval.py +++ b/mmpretrain/evaluation/metrics/retrieval.py @@ -8,7 +8,7 @@ from mmengine.evaluator import BaseMetric from mmengine.structures import LabelData from mmengine.utils import is_seq_of -from mmcls.registry import METRICS +from mmpretrain.registry import METRICS from .single_label import to_tensor @@ -33,7 +33,7 @@ class RetrievalRecall(BaseMetric): Use in the code: >>> import torch - >>> from mmcls.evaluation import RetrievalRecall + >>> from mmpretrain.evaluation import RetrievalRecall >>> # -------------------- The Basic Usage -------------------- >>> y_pred = [[0], [1], [2], [3]] >>> y_true = [[0, 1], [2], [1], [0, 3]] @@ -48,7 +48,7 @@ class RetrievalRecall(BaseMetric): [tensor(9.3000), tensor(48.4000)] >>> >>> # ------------------- Use with Evalutor ------------------- - >>> from mmcls.structures import ClsDataSample + >>> from mmpretrain.structures import ClsDataSample >>> from mmengine.evaluator import Evaluator >>> data_samples = [ ... ClsDataSample().set_gt_label([0, 1]).set_pred_score( diff --git a/mmcls/evaluation/metrics/single_label.py b/mmpretrain/evaluation/metrics/single_label.py similarity index 98% rename from mmcls/evaluation/metrics/single_label.py rename to mmpretrain/evaluation/metrics/single_label.py index eda31eb4..44584ad3 100644 --- a/mmcls/evaluation/metrics/single_label.py +++ b/mmpretrain/evaluation/metrics/single_label.py @@ -8,7 +8,7 @@ import torch import torch.nn.functional as F from mmengine.evaluator import BaseMetric -from mmcls.registry import METRICS +from mmpretrain.registry import METRICS def to_tensor(value): @@ -91,7 +91,7 @@ class Accuracy(BaseMetric): Examples: >>> import torch - >>> from mmcls.evaluation import Accuracy + >>> from mmpretrain.evaluation import Accuracy >>> # -------------------- The Basic Usage -------------------- >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] @@ -104,7 +104,7 @@ class Accuracy(BaseMetric): [[tensor([9.9000])], [tensor([51.5000])]] >>> >>> # ------------------- Use with Evalutor ------------------- - >>> from mmcls.structures import ClsDataSample + >>> from mmpretrain.structures import ClsDataSample >>> from mmengine.evaluator import Evaluator >>> data_samples = [ ... ClsDataSample().set_gt_label(0).set_pred_score(torch.rand(10)) @@ -343,7 +343,7 @@ class SingleLabelMetric(BaseMetric): Examples: >>> import torch - >>> from mmcls.evaluation import SingleLabelMetric + >>> from mmpretrain.evaluation import SingleLabelMetric >>> # -------------------- The Basic Usage -------------------- >>> y_pred = [0, 1, 1, 3] >>> y_true = [0, 2, 1, 3] @@ -358,7 +358,7 @@ class SingleLabelMetric(BaseMetric): (tensor(10.), tensor(0.5500), tensor(1.0427), tensor(1000))] >>> >>> # ------------------- Use with Evalutor ------------------- - >>> from mmcls.structures import ClsDataSample + >>> from mmpretrain.structures import ClsDataSample >>> from mmengine.evaluator import Evaluator >>> data_samples = [ ... ClsDataSample().set_gt_label(i%5).set_pred_score(torch.rand(5)) @@ -606,7 +606,7 @@ class ConfusionMatrix(BaseMetric): 1. The basic usage. >>> import torch - >>> from mmcls.evaluation import ConfusionMatrix + >>> from mmpretrain.evaluation import ConfusionMatrix >>> y_pred = [0, 1, 1, 3] >>> y_true = [0, 2, 1, 3] >>> ConfusionMatrix.calculate(y_pred, y_true, num_classes=4) diff --git a/mmcls/evaluation/metrics/voc_multi_label.py b/mmpretrain/evaluation/metrics/voc_multi_label.py similarity index 99% rename from mmcls/evaluation/metrics/voc_multi_label.py rename to mmpretrain/evaluation/metrics/voc_multi_label.py index 3566adeb..ecbf1d03 100644 --- a/mmcls/evaluation/metrics/voc_multi_label.py +++ b/mmpretrain/evaluation/metrics/voc_multi_label.py @@ -3,7 +3,7 @@ from typing import Optional, Sequence from mmengine.structures import LabelData -from mmcls.registry import METRICS +from mmpretrain.registry import METRICS from .multi_label import AveragePrecision, MultiLabelMetric diff --git a/mmcls/models/__init__.py b/mmpretrain/models/__init__.py similarity index 88% rename from mmcls/models/__init__.py rename to mmpretrain/models/__init__.py index b3ba9232..5d7c95ae 100644 --- a/mmcls/models/__init__.py +++ b/mmpretrain/models/__init__.py @@ -8,6 +8,8 @@ from .heads import * # noqa: F401,F403 from .losses import * # noqa: F401,F403 from .necks import * # noqa: F401,F403 from .retrievers import * # noqa: F401,F403 +from .selfsup import * # noqa: F401,F403 +from .target_generators import * # noqa: F401,F403 from .tta import * # noqa: F401,F403 from .utils import * # noqa: F401,F403 diff --git a/mmcls/models/backbones/__init__.py b/mmpretrain/models/backbones/__init__.py similarity index 100% rename from mmcls/models/backbones/__init__.py rename to mmpretrain/models/backbones/__init__.py diff --git a/mmcls/models/backbones/alexnet.py b/mmpretrain/models/backbones/alexnet.py similarity index 97% rename from mmcls/models/backbones/alexnet.py rename to mmpretrain/models/backbones/alexnet.py index 4e115107..f7c2891f 100644 --- a/mmcls/models/backbones/alexnet.py +++ b/mmpretrain/models/backbones/alexnet.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/base_backbone.py b/mmpretrain/models/backbones/base_backbone.py similarity index 100% rename from mmcls/models/backbones/base_backbone.py rename to mmpretrain/models/backbones/base_backbone.py diff --git a/mmcls/models/backbones/beit.py b/mmpretrain/models/backbones/beit.py similarity index 99% rename from mmcls/models/backbones/beit.py rename to mmpretrain/models/backbones/beit.py index 607c11b3..03011265 100644 --- a/mmcls/models/backbones/beit.py +++ b/mmpretrain/models/backbones/beit.py @@ -9,7 +9,7 @@ from mmcv.cnn.bricks.drop import build_dropout from mmcv.cnn.bricks.transformer import FFN, PatchEmbed from mmengine.model import BaseModule, ModuleList -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import (BEiTAttention, resize_pos_embed, resize_relative_position_bias_table, to_2tuple) from .vision_transformer import TransformerEncoderLayer, VisionTransformer diff --git a/mmcls/models/backbones/conformer.py b/mmpretrain/models/backbones/conformer.py similarity index 99% rename from mmcls/models/backbones/conformer.py rename to mmpretrain/models/backbones/conformer.py index a563dc08..eda72b05 100644 --- a/mmcls/models/backbones/conformer.py +++ b/mmpretrain/models/backbones/conformer.py @@ -10,7 +10,7 @@ from mmcv.cnn.bricks.transformer import AdaptivePadding from mmengine.model import BaseModule from mmengine.model.weight_init import trunc_normal_ -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone from .vision_transformer import TransformerEncoderLayer diff --git a/mmcls/models/backbones/convmixer.py b/mmpretrain/models/backbones/convmixer.py similarity index 99% rename from mmcls/models/backbones/convmixer.py rename to mmpretrain/models/backbones/convmixer.py index 108601ab..480050d5 100644 --- a/mmcls/models/backbones/convmixer.py +++ b/mmpretrain/models/backbones/convmixer.py @@ -7,7 +7,7 @@ from mmcv.cnn.bricks import (Conv2dAdaptivePadding, build_activation_layer, build_norm_layer) from mmengine.utils import digit_version -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/convnext.py b/mmpretrain/models/backbones/convnext.py similarity index 99% rename from mmcls/models/backbones/convnext.py rename to mmpretrain/models/backbones/convnext.py index 50387d37..f9c29cf2 100644 --- a/mmcls/models/backbones/convnext.py +++ b/mmpretrain/models/backbones/convnext.py @@ -9,7 +9,7 @@ import torch.utils.checkpoint as cp from mmcv.cnn.bricks import DropPath from mmengine.model import BaseModule, ModuleList, Sequential -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import GRN, build_norm_layer from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/cspnet.py b/mmpretrain/models/backbones/cspnet.py similarity index 98% rename from mmcls/models/backbones/cspnet.py rename to mmpretrain/models/backbones/cspnet.py index e1b7da4a..7492e977 100644 --- a/mmcls/models/backbones/cspnet.py +++ b/mmpretrain/models/backbones/cspnet.py @@ -9,7 +9,7 @@ from mmcv.cnn.bricks import DropPath from mmengine.model import BaseModule, Sequential from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import to_ntuple from .resnet import Bottleneck as ResNetBottleneck from .resnext import Bottleneck as ResNeXtBottleneck @@ -278,8 +278,8 @@ class CSPNet(BaseModule): >>> from functools import partial >>> import torch >>> import torch.nn as nn - >>> from mmcls.models import CSPNet - >>> from mmcls.models.backbones.resnet import Bottleneck + >>> from mmpretrain.models import CSPNet + >>> from mmpretrain.models.backbones.resnet import Bottleneck >>> >>> # A simple example to build CSPNet. >>> arch = dict( @@ -427,7 +427,7 @@ class CSPDarkNet(CSPNet): Default: None. Example: - >>> from mmcls.models import CSPDarkNet + >>> from mmpretrain.models import CSPDarkNet >>> import torch >>> model = CSPDarkNet(depth=53, out_indices=(0, 1, 2, 3, 4)) >>> model.eval() @@ -523,7 +523,7 @@ class CSPResNet(CSPNet): init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. Example: - >>> from mmcls.models import CSPResNet + >>> from mmpretrain.models import CSPResNet >>> import torch >>> model = CSPResNet(depth=50, out_indices=(0, 1, 2, 3)) >>> model.eval() @@ -645,7 +645,7 @@ class CSPResNeXt(CSPResNet): init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. Example: - >>> from mmcls.models import CSPResNeXt + >>> from mmpretrain.models import CSPResNeXt >>> import torch >>> model = CSPResNeXt(depth=50, out_indices=(0, 1, 2, 3)) >>> model.eval() diff --git a/mmcls/models/backbones/davit.py b/mmpretrain/models/backbones/davit.py similarity index 99% rename from mmcls/models/backbones/davit.py rename to mmpretrain/models/backbones/davit.py index 2febe5da..cf25e2ed 100644 --- a/mmcls/models/backbones/davit.py +++ b/mmpretrain/models/backbones/davit.py @@ -12,8 +12,8 @@ from mmengine.model import BaseModule, ModuleList from mmengine.utils import to_2tuple from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.models.backbones.base_backbone import BaseBackbone -from mmcls.registry import MODELS +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.registry import MODELS from ..utils import ShiftWindowMSA diff --git a/mmcls/models/backbones/deit.py b/mmpretrain/models/backbones/deit.py similarity index 99% rename from mmcls/models/backbones/deit.py rename to mmpretrain/models/backbones/deit.py index 72c1d319..20cd32e5 100644 --- a/mmcls/models/backbones/deit.py +++ b/mmpretrain/models/backbones/deit.py @@ -3,7 +3,7 @@ import torch import torch.nn as nn from mmengine.model.weight_init import trunc_normal_ -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .vision_transformer import VisionTransformer diff --git a/mmcls/models/backbones/deit3.py b/mmpretrain/models/backbones/deit3.py similarity index 99% rename from mmcls/models/backbones/deit3.py rename to mmpretrain/models/backbones/deit3.py index 5361d30a..68ee0cb0 100644 --- a/mmcls/models/backbones/deit3.py +++ b/mmpretrain/models/backbones/deit3.py @@ -10,7 +10,7 @@ from mmengine.model import BaseModule, ModuleList, Sequential from mmengine.utils import deprecated_api_warning from torch import nn -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import LayerScale, MultiheadAttention, resize_pos_embed, to_2tuple from .vision_transformer import VisionTransformer diff --git a/mmcls/models/backbones/densenet.py b/mmpretrain/models/backbones/densenet.py similarity index 99% rename from mmcls/models/backbones/densenet.py rename to mmpretrain/models/backbones/densenet.py index 6321221f..c9f05302 100644 --- a/mmcls/models/backbones/densenet.py +++ b/mmpretrain/models/backbones/densenet.py @@ -10,7 +10,7 @@ import torch.utils.checkpoint as cp from mmcv.cnn.bricks import build_activation_layer, build_norm_layer from torch.jit.annotations import List -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/edgenext.py b/mmpretrain/models/backbones/edgenext.py similarity index 99% rename from mmcls/models/backbones/edgenext.py rename to mmpretrain/models/backbones/edgenext.py index 14f4b486..ad4e768e 100644 --- a/mmcls/models/backbones/edgenext.py +++ b/mmpretrain/models/backbones/edgenext.py @@ -8,7 +8,7 @@ import torch.nn as nn from mmcv.cnn.bricks import DropPath from mmengine.model import BaseModule, ModuleList, Sequential -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import (ChannelMultiheadAttention, PositionEncodingFourier, build_norm_layer) from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/efficientformer.py b/mmpretrain/models/backbones/efficientformer.py similarity index 99% rename from mmcls/models/backbones/efficientformer.py rename to mmpretrain/models/backbones/efficientformer.py index 79757106..c2525c8f 100644 --- a/mmcls/models/backbones/efficientformer.py +++ b/mmpretrain/models/backbones/efficientformer.py @@ -8,7 +8,7 @@ from mmcv.cnn.bricks import (ConvModule, DropPath, build_activation_layer, build_norm_layer) from mmengine.model import BaseModule, ModuleList, Sequential -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import LayerScale from .base_backbone import BaseBackbone from .poolformer import Pooling @@ -376,7 +376,7 @@ class EfficientFormer(BaseBackbone): Defaults to None. Example: - >>> from mmcls.models import EfficientFormer + >>> from mmpretrain.models import EfficientFormer >>> import torch >>> inputs = torch.rand((1, 3, 224, 224)) >>> # build EfficientFormer backbone for classification task diff --git a/mmcls/models/backbones/efficientnet.py b/mmpretrain/models/backbones/efficientnet.py similarity index 98% rename from mmcls/models/backbones/efficientnet.py rename to mmpretrain/models/backbones/efficientnet.py index b7ea5a82..9ec7ee81 100644 --- a/mmcls/models/backbones/efficientnet.py +++ b/mmpretrain/models/backbones/efficientnet.py @@ -9,9 +9,9 @@ import torch.utils.checkpoint as cp from mmcv.cnn.bricks import ConvModule, DropPath from mmengine.model import BaseModule, Sequential -from mmcls.models.backbones.base_backbone import BaseBackbone -from mmcls.models.utils import InvertedResidual, SELayer, make_divisible -from mmcls.registry import MODELS +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.models.utils import InvertedResidual, SELayer, make_divisible +from mmpretrain.registry import MODELS class EdgeResidual(BaseModule): diff --git a/mmcls/models/backbones/efficientnet_v2.py b/mmpretrain/models/backbones/efficientnet_v2.py similarity index 98% rename from mmcls/models/backbones/efficientnet_v2.py rename to mmpretrain/models/backbones/efficientnet_v2.py index 84539c49..fec002a4 100644 --- a/mmcls/models/backbones/efficientnet_v2.py +++ b/mmpretrain/models/backbones/efficientnet_v2.py @@ -7,10 +7,10 @@ from mmcv.cnn.bricks import ConvModule, DropPath from mmengine.model import Sequential from torch import Tensor -from mmcls.models.backbones.base_backbone import BaseBackbone -from mmcls.models.backbones.efficientnet import EdgeResidual as FusedMBConv -from mmcls.models.utils import InvertedResidual as MBConv -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS +from ..utils import InvertedResidual as MBConv +from .base_backbone import BaseBackbone +from .efficientnet import EdgeResidual as FusedMBConv class EnhancedConvModule(ConvModule): diff --git a/mmcls/models/backbones/hornet.py b/mmpretrain/models/backbones/hornet.py similarity index 99% rename from mmcls/models/backbones/hornet.py rename to mmpretrain/models/backbones/hornet.py index 7e563e24..460f2dc5 100644 --- a/mmcls/models/backbones/hornet.py +++ b/mmpretrain/models/backbones/hornet.py @@ -16,8 +16,8 @@ import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from mmcv.cnn.bricks import DropPath -from mmcls.models.backbones.base_backbone import BaseBackbone -from mmcls.registry import MODELS +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.registry import MODELS from ..utils import LayerScale diff --git a/mmcls/models/backbones/hrnet.py b/mmpretrain/models/backbones/hrnet.py similarity index 99% rename from mmcls/models/backbones/hrnet.py rename to mmpretrain/models/backbones/hrnet.py index 950a1cfb..99afa908 100644 --- a/mmcls/models/backbones/hrnet.py +++ b/mmpretrain/models/backbones/hrnet.py @@ -4,7 +4,7 @@ from mmcv.cnn import build_conv_layer, build_norm_layer from mmengine.model import BaseModule, ModuleList, Sequential from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .resnet import BasicBlock, Bottleneck, ResLayer, get_expansion @@ -232,7 +232,7 @@ class HRNet(BaseModule): Example: >>> import torch - >>> from mmcls.models import HRNet + >>> from mmpretrain.models import HRNet >>> extra = dict( >>> stage1=dict( >>> num_modules=1, diff --git a/mmcls/models/backbones/inception_v3.py b/mmpretrain/models/backbones/inception_v3.py similarity index 99% rename from mmcls/models/backbones/inception_v3.py rename to mmpretrain/models/backbones/inception_v3.py index 814672a6..1d6c04b9 100644 --- a/mmcls/models/backbones/inception_v3.py +++ b/mmpretrain/models/backbones/inception_v3.py @@ -6,7 +6,7 @@ import torch.nn as nn from mmcv.cnn import build_conv_layer from mmengine.model import BaseModule -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone @@ -389,7 +389,7 @@ class InceptionV3(BaseBackbone): Example: >>> import torch - >>> from mmcls.models import build_backbone + >>> from mmpretrain.models import build_backbone >>> >>> inputs = torch.rand(2, 3, 299, 299) >>> cfg = dict(type='InceptionV3', num_classes=100) diff --git a/mmcls/models/backbones/lenet.py b/mmpretrain/models/backbones/lenet.py similarity index 97% rename from mmcls/models/backbones/lenet.py rename to mmpretrain/models/backbones/lenet.py index 377a48af..8e423c0b 100644 --- a/mmcls/models/backbones/lenet.py +++ b/mmpretrain/models/backbones/lenet.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/levit.py b/mmpretrain/models/backbones/levit.py similarity index 99% rename from mmcls/models/backbones/levit.py rename to mmpretrain/models/backbones/levit.py index b696ab42..5f7aa324 100644 --- a/mmcls/models/backbones/levit.py +++ b/mmpretrain/models/backbones/levit.py @@ -7,8 +7,8 @@ from mmcv.cnn import build_activation_layer, fuse_conv_bn from mmcv.cnn.bricks import DropPath from mmengine.model import BaseModule, ModuleList, Sequential -from mmcls.models.backbones.base_backbone import BaseBackbone -from mmcls.registry import MODELS +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.registry import MODELS from ..utils import build_norm_layer diff --git a/mmcls/models/backbones/mixmim.py b/mmpretrain/models/backbones/mixmim.py similarity index 98% rename from mmcls/models/backbones/mixmim.py rename to mmpretrain/models/backbones/mixmim.py index 6bed2cf4..d594f633 100644 --- a/mmcls/models/backbones/mixmim.py +++ b/mmpretrain/models/backbones/mixmim.py @@ -9,11 +9,10 @@ from mmengine.model import BaseModule from torch import nn from torch.utils.checkpoint import checkpoint -from mmcls.models.backbones.base_backbone import BaseBackbone -from mmcls.models.backbones.vision_transformer import TransformerEncoderLayer -from mmcls.models.utils.attention import WindowMSA -from mmcls.models.utils.helpers import to_2tuple -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS +from ..utils import WindowMSA, to_2tuple +from .base_backbone import BaseBackbone +from .vision_transformer import TransformerEncoderLayer class MixMIMWindowAttention(WindowMSA): diff --git a/mmcls/models/backbones/mlp_mixer.py b/mmpretrain/models/backbones/mlp_mixer.py similarity index 99% rename from mmcls/models/backbones/mlp_mixer.py rename to mmpretrain/models/backbones/mlp_mixer.py index e8494f7f..af714fea 100644 --- a/mmcls/models/backbones/mlp_mixer.py +++ b/mmpretrain/models/backbones/mlp_mixer.py @@ -6,7 +6,7 @@ from mmcv.cnn import build_norm_layer from mmcv.cnn.bricks.transformer import FFN, PatchEmbed from mmengine.model import BaseModule, ModuleList -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import to_2tuple from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/mobilenet_v2.py b/mmpretrain/models/backbones/mobilenet_v2.py similarity index 99% rename from mmcls/models/backbones/mobilenet_v2.py rename to mmpretrain/models/backbones/mobilenet_v2.py index 0583208e..bca1418a 100644 --- a/mmcls/models/backbones/mobilenet_v2.py +++ b/mmpretrain/models/backbones/mobilenet_v2.py @@ -5,8 +5,8 @@ from mmcv.cnn import ConvModule from mmengine.model import BaseModule from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.utils import make_divisible -from mmcls.registry import MODELS +from mmpretrain.models.utils import make_divisible +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/mobilenet_v3.py b/mmpretrain/models/backbones/mobilenet_v3.py similarity index 99% rename from mmcls/models/backbones/mobilenet_v3.py rename to mmpretrain/models/backbones/mobilenet_v3.py index 8ca7b4b4..577dba94 100644 --- a/mmcls/models/backbones/mobilenet_v3.py +++ b/mmpretrain/models/backbones/mobilenet_v3.py @@ -2,7 +2,7 @@ from mmcv.cnn import ConvModule from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import InvertedResidual from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/mobileone.py b/mmpretrain/models/backbones/mobileone.py similarity index 99% rename from mmcls/models/backbones/mobileone.py rename to mmpretrain/models/backbones/mobileone.py index c82cb8b5..1111441a 100644 --- a/mmcls/models/backbones/mobileone.py +++ b/mmpretrain/models/backbones/mobileone.py @@ -9,7 +9,7 @@ from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer from mmengine.model import BaseModule, ModuleList, Sequential from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils.se_layer import SELayer from .base_backbone import BaseBackbone @@ -302,7 +302,7 @@ class MobileOne(BaseBackbone): init_cfg (dict or list[dict], optional): Initialization config dict. Example: - >>> from mmcls.models import MobileOne + >>> from mmpretrain.models import MobileOne >>> import torch >>> x = torch.rand(1, 3, 224, 224) >>> model = MobileOne("s0", out_indices=(0, 1, 2, 3)) diff --git a/mmcls/models/backbones/mobilevit.py b/mmpretrain/models/backbones/mobilevit.py similarity index 99% rename from mmcls/models/backbones/mobilevit.py rename to mmpretrain/models/backbones/mobilevit.py index d5edb6de..9e4043fe 100644 --- a/mmcls/models/backbones/mobilevit.py +++ b/mmpretrain/models/backbones/mobilevit.py @@ -7,7 +7,7 @@ import torch.nn.functional as F from mmcv.cnn import ConvModule, build_norm_layer from torch import nn -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone from .mobilenet_v2 import InvertedResidual from .vision_transformer import TransformerEncoderLayer diff --git a/mmcls/models/backbones/mvit.py b/mmpretrain/models/backbones/mvit.py similarity index 99% rename from mmcls/models/backbones/mvit.py rename to mmpretrain/models/backbones/mvit.py index 03043539..68aee97d 100644 --- a/mmcls/models/backbones/mvit.py +++ b/mmpretrain/models/backbones/mvit.py @@ -489,7 +489,7 @@ class MViT(BaseBackbone): Examples: >>> import torch - >>> from mmcls.models import build_backbone + >>> from mmpretrain.models import build_backbone >>> >>> cfg = dict(type='MViT', arch='tiny', out_scales=[0, 1, 2, 3]) >>> model = build_backbone(cfg) diff --git a/mmcls/models/backbones/poolformer.py b/mmpretrain/models/backbones/poolformer.py similarity index 99% rename from mmcls/models/backbones/poolformer.py rename to mmpretrain/models/backbones/poolformer.py index da69b756..e2ad6704 100644 --- a/mmcls/models/backbones/poolformer.py +++ b/mmpretrain/models/backbones/poolformer.py @@ -6,7 +6,7 @@ import torch.nn as nn from mmcv.cnn.bricks import DropPath, build_activation_layer, build_norm_layer from mmengine.model import BaseModule -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/regnet.py b/mmpretrain/models/backbones/regnet.py similarity index 99% rename from mmcls/models/backbones/regnet.py rename to mmpretrain/models/backbones/regnet.py index e7140fad..85dbdef0 100644 --- a/mmcls/models/backbones/regnet.py +++ b/mmpretrain/models/backbones/regnet.py @@ -3,7 +3,7 @@ import numpy as np import torch.nn as nn from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .resnet import ResNet from .resnext import Bottleneck @@ -43,7 +43,7 @@ class RegNet(ResNet): in resblocks to let them behave as identity. Default: True. Example: - >>> from mmcls.models import RegNet + >>> from mmpretrain.models import RegNet >>> import torch >>> self = RegNet( arch=dict( diff --git a/mmcls/models/backbones/replknet.py b/mmpretrain/models/backbones/replknet.py similarity index 99% rename from mmcls/models/backbones/replknet.py rename to mmpretrain/models/backbones/replknet.py index 3611c8b7..4dce4154 100644 --- a/mmcls/models/backbones/replknet.py +++ b/mmpretrain/models/backbones/replknet.py @@ -7,7 +7,7 @@ from mmcv.cnn.bricks import DropPath from mmengine.model import BaseModule from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/repmlp.py b/mmpretrain/models/backbones/repmlp.py similarity index 99% rename from mmcls/models/backbones/repmlp.py rename to mmpretrain/models/backbones/repmlp.py index 19431235..f7c06c48 100644 --- a/mmcls/models/backbones/repmlp.py +++ b/mmpretrain/models/backbones/repmlp.py @@ -8,8 +8,8 @@ from mmcv.cnn import (ConvModule, build_activation_layer, build_conv_layer, from mmcv.cnn.bricks.transformer import PatchEmbed as _PatchEmbed from mmengine.model import BaseModule, ModuleList, Sequential -from mmcls.models.utils import SELayer, to_2tuple -from mmcls.registry import MODELS +from mmpretrain.models.utils import SELayer, to_2tuple +from mmpretrain.registry import MODELS def fuse_bn(conv_or_fc, bn): @@ -88,7 +88,7 @@ class PatchEmbed(_PatchEmbed): class GlobalPerceptron(SELayer): - """GlobalPerceptron implemented by using ``mmcls.modes.SELayer``. + """GlobalPerceptron implemented by using ``mmpretrain.modes.SELayer``. Args: input_channels (int): The number of input (and output) channels diff --git a/mmcls/models/backbones/repvgg.py b/mmpretrain/models/backbones/repvgg.py similarity index 99% rename from mmcls/models/backbones/repvgg.py rename to mmpretrain/models/backbones/repvgg.py index 8dd38e45..67c9d147 100644 --- a/mmcls/models/backbones/repvgg.py +++ b/mmpretrain/models/backbones/repvgg.py @@ -8,7 +8,7 @@ from mmengine.model import BaseModule, Sequential from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm from torch import nn -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils.se_layer import SELayer from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/res2net.py b/mmpretrain/models/backbones/res2net.py similarity index 99% rename from mmcls/models/backbones/res2net.py rename to mmpretrain/models/backbones/res2net.py index 70f048ca..33293104 100644 --- a/mmcls/models/backbones/res2net.py +++ b/mmpretrain/models/backbones/res2net.py @@ -7,7 +7,7 @@ import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer from mmengine.model import ModuleList, Sequential -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet @@ -258,7 +258,7 @@ class Res2Net(ResNet): Defaults to None. Example: - >>> from mmcls.models import Res2Net + >>> from mmpretrain.models import Res2Net >>> import torch >>> model = Res2Net(depth=50, ... scales=4, diff --git a/mmcls/models/backbones/resnest.py b/mmpretrain/models/backbones/resnest.py similarity index 99% rename from mmcls/models/backbones/resnest.py rename to mmpretrain/models/backbones/resnest.py index 006ecea6..4bb438f0 100644 --- a/mmcls/models/backbones/resnest.py +++ b/mmpretrain/models/backbones/resnest.py @@ -5,7 +5,7 @@ import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .resnet import Bottleneck as _Bottleneck from .resnet import ResLayer, ResNetV1d diff --git a/mmcls/models/backbones/resnet.py b/mmpretrain/models/backbones/resnet.py similarity index 99% rename from mmcls/models/backbones/resnet.py rename to mmpretrain/models/backbones/resnet.py index a96e568f..704d4955 100644 --- a/mmcls/models/backbones/resnet.py +++ b/mmpretrain/models/backbones/resnet.py @@ -9,7 +9,7 @@ from mmengine.model import BaseModule from mmengine.model.weight_init import constant_init from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone eps = 1.0e-5 @@ -438,7 +438,7 @@ class ResNet(BaseBackbone): in resblocks to let them behave as identity. Default: True. Example: - >>> from mmcls.models import ResNet + >>> from mmpretrain.models import ResNet >>> import torch >>> self = ResNet(depth=18) >>> self.eval() diff --git a/mmcls/models/backbones/resnet_cifar.py b/mmpretrain/models/backbones/resnet_cifar.py similarity index 98% rename from mmcls/models/backbones/resnet_cifar.py rename to mmpretrain/models/backbones/resnet_cifar.py index c97e3152..9f17f92f 100644 --- a/mmcls/models/backbones/resnet_cifar.py +++ b/mmpretrain/models/backbones/resnet_cifar.py @@ -2,7 +2,7 @@ import torch.nn as nn from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .resnet import ResNet diff --git a/mmcls/models/backbones/resnext.py b/mmpretrain/models/backbones/resnext.py similarity index 99% rename from mmcls/models/backbones/resnext.py rename to mmpretrain/models/backbones/resnext.py index c87bf7cb..8858b7d3 100644 --- a/mmcls/models/backbones/resnext.py +++ b/mmpretrain/models/backbones/resnext.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .resnet import Bottleneck as _Bottleneck from .resnet import ResLayer, ResNet diff --git a/mmcls/models/backbones/revvit.py b/mmpretrain/models/backbones/revvit.py similarity index 99% rename from mmcls/models/backbones/revvit.py rename to mmpretrain/models/backbones/revvit.py index 56454774..ec3b2ce6 100644 --- a/mmcls/models/backbones/revvit.py +++ b/mmpretrain/models/backbones/revvit.py @@ -12,8 +12,8 @@ from mmengine.model.weight_init import trunc_normal_ from torch import nn from torch.autograd import Function as Function -from mmcls.models.backbones.base_backbone import BaseBackbone -from mmcls.registry import MODELS +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.registry import MODELS from ..utils import MultiheadAttention, resize_pos_embed, to_2tuple diff --git a/mmcls/models/backbones/seresnet.py b/mmpretrain/models/backbones/seresnet.py similarity index 98% rename from mmcls/models/backbones/seresnet.py rename to mmpretrain/models/backbones/seresnet.py index 2de4101b..4437c17f 100644 --- a/mmcls/models/backbones/seresnet.py +++ b/mmpretrain/models/backbones/seresnet.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch.utils.checkpoint as cp -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils.se_layer import SELayer from .resnet import Bottleneck, ResLayer, ResNet @@ -95,7 +95,7 @@ class SEResNet(ResNet): in resblocks to let them behave as identity. Default: True. Example: - >>> from mmcls.models import SEResNet + >>> from mmpretrain.models import SEResNet >>> import torch >>> self = SEResNet(depth=50) >>> self.eval() diff --git a/mmcls/models/backbones/seresnext.py b/mmpretrain/models/backbones/seresnext.py similarity index 99% rename from mmcls/models/backbones/seresnext.py rename to mmpretrain/models/backbones/seresnext.py index 79744137..6a283807 100644 --- a/mmcls/models/backbones/seresnext.py +++ b/mmpretrain/models/backbones/seresnext.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .resnet import ResLayer from .seresnet import SEBottleneck as _SEBottleneck from .seresnet import SEResNet diff --git a/mmcls/models/backbones/shufflenet_v1.py b/mmpretrain/models/backbones/shufflenet_v1.py similarity index 99% rename from mmcls/models/backbones/shufflenet_v1.py rename to mmpretrain/models/backbones/shufflenet_v1.py index 4dca5e39..2cc3617f 100644 --- a/mmcls/models/backbones/shufflenet_v1.py +++ b/mmpretrain/models/backbones/shufflenet_v1.py @@ -7,8 +7,8 @@ from mmengine.model import BaseModule from mmengine.model.weight_init import constant_init, normal_init from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.utils import channel_shuffle, make_divisible -from mmcls.registry import MODELS +from mmpretrain.models.utils import channel_shuffle, make_divisible +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/shufflenet_v2.py b/mmpretrain/models/backbones/shufflenet_v2.py similarity index 99% rename from mmcls/models/backbones/shufflenet_v2.py rename to mmpretrain/models/backbones/shufflenet_v2.py index 7ea7c611..02f9c749 100644 --- a/mmcls/models/backbones/shufflenet_v2.py +++ b/mmpretrain/models/backbones/shufflenet_v2.py @@ -7,8 +7,8 @@ from mmengine.model import BaseModule from mmengine.model.weight_init import constant_init, normal_init from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.utils import channel_shuffle -from mmcls.registry import MODELS +from mmpretrain.models.utils import channel_shuffle +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/swin_transformer.py b/mmpretrain/models/backbones/swin_transformer.py similarity index 99% rename from mmcls/models/backbones/swin_transformer.py rename to mmpretrain/models/backbones/swin_transformer.py index baeafac7..77e03f65 100644 --- a/mmcls/models/backbones/swin_transformer.py +++ b/mmpretrain/models/backbones/swin_transformer.py @@ -12,7 +12,7 @@ from mmengine.model import BaseModule, ModuleList from mmengine.model.weight_init import trunc_normal_ from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import (ShiftWindowMSA, resize_pos_embed, resize_relative_position_bias_table, to_2tuple) from .base_backbone import BaseBackbone @@ -260,7 +260,7 @@ class SwinTransformer(BaseBackbone): Defaults to None. Examples: - >>> from mmcls.models import SwinTransformer + >>> from mmpretrain.models import SwinTransformer >>> import torch >>> extra_config = dict( >>> arch='tiny', diff --git a/mmcls/models/backbones/swin_transformer_v2.py b/mmpretrain/models/backbones/swin_transformer_v2.py similarity index 99% rename from mmcls/models/backbones/swin_transformer_v2.py rename to mmpretrain/models/backbones/swin_transformer_v2.py index cb30ed47..142505a8 100644 --- a/mmcls/models/backbones/swin_transformer_v2.py +++ b/mmpretrain/models/backbones/swin_transformer_v2.py @@ -287,7 +287,7 @@ class SwinTransformerV2(BaseBackbone): Defaults to None. Examples: - >>> from mmcls.models import SwinTransformerV2 + >>> from mmpretrain.models import SwinTransformerV2 >>> import torch >>> extra_config = dict( >>> arch='tiny', diff --git a/mmcls/models/backbones/t2t_vit.py b/mmpretrain/models/backbones/t2t_vit.py similarity index 99% rename from mmcls/models/backbones/t2t_vit.py rename to mmpretrain/models/backbones/t2t_vit.py index 6d90751c..4195a7de 100644 --- a/mmcls/models/backbones/t2t_vit.py +++ b/mmpretrain/models/backbones/t2t_vit.py @@ -10,7 +10,7 @@ from mmcv.cnn.bricks.transformer import FFN from mmengine.model import BaseModule, ModuleList from mmengine.model.weight_init import trunc_normal_ -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import MultiheadAttention, resize_pos_embed, to_2tuple from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/timm_backbone.py b/mmpretrain/models/backbones/timm_backbone.py similarity index 99% rename from mmcls/models/backbones/timm_backbone.py rename to mmpretrain/models/backbones/timm_backbone.py index 90a8b161..69169b4a 100644 --- a/mmcls/models/backbones/timm_backbone.py +++ b/mmpretrain/models/backbones/timm_backbone.py @@ -3,7 +3,7 @@ import warnings from mmengine.logging import MMLogger -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/tinyvit.py b/mmpretrain/models/backbones/tinyvit.py similarity index 99% rename from mmcls/models/backbones/tinyvit.py rename to mmpretrain/models/backbones/tinyvit.py index 18353248..52798321 100644 --- a/mmcls/models/backbones/tinyvit.py +++ b/mmpretrain/models/backbones/tinyvit.py @@ -8,7 +8,7 @@ from mmcv.cnn.bricks import DropPath, build_activation_layer, build_norm_layer from mmengine.model import BaseModule, ModuleList, Sequential from torch.nn import functional as F -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import LeAttention from .base_backbone import BaseBackbone @@ -139,8 +139,8 @@ class PatchMerging(nn.Module): Adapted from https://github.com/microsoft/Cream/blob/main/TinyViT/models/tiny_vit.py - Different from `mmcls.models.utils.PatchMerging`, this module use Conv2d - and BatchNorm2d to implement PatchMerging. + Different from `mmpretrain.models.utils.PatchMerging`, this module use + Conv2d and BatchNorm2d to implement PatchMerging. Args: in_channels (int): The number of input channels. diff --git a/mmcls/models/backbones/tnt.py b/mmpretrain/models/backbones/tnt.py similarity index 99% rename from mmcls/models/backbones/tnt.py rename to mmpretrain/models/backbones/tnt.py index 24201d4a..e1b241c1 100644 --- a/mmcls/models/backbones/tnt.py +++ b/mmpretrain/models/backbones/tnt.py @@ -8,7 +8,7 @@ from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention from mmengine.model import BaseModule, ModuleList from mmengine.model.weight_init import trunc_normal_ -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import to_2tuple from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/twins.py b/mmpretrain/models/backbones/twins.py similarity index 99% rename from mmcls/models/backbones/twins.py rename to mmpretrain/models/backbones/twins.py index 7c9b784d..42916706 100644 --- a/mmcls/models/backbones/twins.py +++ b/mmpretrain/models/backbones/twins.py @@ -12,9 +12,8 @@ from mmengine.model.weight_init import (constant_init, normal_init, trunc_normal_init) from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.utils.attention import MultiheadAttention -from mmcls.models.utils.position_encoding import ConditionalPositionEncoding -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS +from ..utils import ConditionalPositionEncoding, MultiheadAttention class GlobalSubsampledAttention(MultiheadAttention): @@ -386,7 +385,7 @@ class PCPVT(BaseModule): Defaults to None. Examples: - >>> from mmcls.models import PCPVT + >>> from mmpretrain.models import PCPVT >>> import torch >>> pcpvt_cfg = {'arch': "small", >>> 'norm_after_stage': [False, False, False, True]} @@ -633,7 +632,7 @@ class SVT(PCPVT): Defaults to None. Examples: - >>> from mmcls.models import SVT + >>> from mmpretrain.models import SVT >>> import torch >>> svt_cfg = {'arch': "small", >>> 'norm_after_stage': [False, False, False, True]} diff --git a/mmcls/models/backbones/van.py b/mmpretrain/models/backbones/van.py similarity index 99% rename from mmcls/models/backbones/van.py rename to mmpretrain/models/backbones/van.py index e3814b24..c34dc336 100644 --- a/mmcls/models/backbones/van.py +++ b/mmpretrain/models/backbones/van.py @@ -7,7 +7,7 @@ from mmcv.cnn.bricks.transformer import PatchEmbed from mmengine.model import BaseModule, ModuleList from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone @@ -293,7 +293,7 @@ class VAN(BaseBackbone): Defaults to None. Examples: - >>> from mmcls.models import VAN + >>> from mmpretrain.models import VAN >>> import torch >>> cfg = dict(arch='tiny') >>> model = VAN(**cfg) diff --git a/mmcls/models/backbones/vgg.py b/mmpretrain/models/backbones/vgg.py similarity index 99% rename from mmcls/models/backbones/vgg.py rename to mmpretrain/models/backbones/vgg.py index 876ebbe9..026b9162 100644 --- a/mmcls/models/backbones/vgg.py +++ b/mmpretrain/models/backbones/vgg.py @@ -3,7 +3,7 @@ import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/vig.py b/mmpretrain/models/backbones/vig.py similarity index 99% rename from mmcls/models/backbones/vig.py rename to mmpretrain/models/backbones/vig.py index d9466aed..c1a7879b 100644 --- a/mmcls/models/backbones/vig.py +++ b/mmpretrain/models/backbones/vig.py @@ -12,8 +12,8 @@ from mmcv.cnn.bricks import DropPath from mmengine.model import ModuleList, Sequential from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones.base_backbone import BaseBackbone -from mmcls.registry import MODELS +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.registry import MODELS from ..utils import build_norm_layer diff --git a/mmcls/models/backbones/vision_transformer.py b/mmpretrain/models/backbones/vision_transformer.py similarity index 99% rename from mmcls/models/backbones/vision_transformer.py rename to mmpretrain/models/backbones/vision_transformer.py index 655f8edc..41c61521 100644 --- a/mmcls/models/backbones/vision_transformer.py +++ b/mmpretrain/models/backbones/vision_transformer.py @@ -9,7 +9,7 @@ from mmcv.cnn.bricks.transformer import FFN, PatchEmbed from mmengine.model import BaseModule, ModuleList from mmengine.model.weight_init import trunc_normal_ -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import MultiheadAttention, resize_pos_embed, to_2tuple from .base_backbone import BaseBackbone diff --git a/mmcls/models/backbones/xcit.py b/mmpretrain/models/backbones/xcit.py similarity index 99% rename from mmcls/models/backbones/xcit.py rename to mmpretrain/models/backbones/xcit.py index 46dd6ff2..392ebbed 100644 --- a/mmcls/models/backbones/xcit.py +++ b/mmpretrain/models/backbones/xcit.py @@ -12,7 +12,7 @@ from mmengine.model import BaseModule, Sequential from mmengine.model.weight_init import trunc_normal_ from mmengine.utils import digit_version -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..utils import build_norm_layer, to_2tuple from .base_backbone import BaseBackbone diff --git a/mmcls/models/builder.py b/mmpretrain/models/builder.py similarity index 94% rename from mmcls/models/builder.py rename to mmpretrain/models/builder.py index 4fb663bd..2ea4e25c 100644 --- a/mmcls/models/builder.py +++ b/mmpretrain/models/builder.py @@ -1,5 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS BACKBONES = MODELS NECKS = MODELS diff --git a/mmcls/models/classifiers/__init__.py b/mmpretrain/models/classifiers/__init__.py similarity index 100% rename from mmcls/models/classifiers/__init__.py rename to mmpretrain/models/classifiers/__init__.py diff --git a/mmcls/models/classifiers/base.py b/mmpretrain/models/classifiers/base.py similarity index 100% rename from mmcls/models/classifiers/base.py rename to mmpretrain/models/classifiers/base.py diff --git a/mmcls/models/classifiers/hugging_face.py b/mmpretrain/models/classifiers/hugging_face.py similarity index 96% rename from mmcls/models/classifiers/hugging_face.py rename to mmpretrain/models/classifiers/hugging_face.py index 2873d775..b99f431e 100644 --- a/mmcls/models/classifiers/hugging_face.py +++ b/mmpretrain/models/classifiers/hugging_face.py @@ -7,8 +7,8 @@ import torch import torch.nn as nn import torch.nn.functional as F -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample from .base import BaseClassifier @@ -41,7 +41,7 @@ class HuggingFaceClassifier(BaseClassifier): fields are: - augments (List[dict]): The batch augmentation methods to use. - More details can be found in :mod:`mmcls.model.utils.augment`. + More details can be found in :mod:`mmpretrain.model.utils.augment`. Defaults to None. with_cp (bool): Use checkpoint or not. Using checkpoint will save some @@ -57,7 +57,7 @@ class HuggingFaceClassifier(BaseClassifier): Examples: >>> import torch - >>> from mmcls.models import build_classifier + >>> from mmpretrain.models import build_classifier >>> cfg = dict(type='HuggingFaceClassifier', model_name='microsoft/resnet-50', pretrained=True) >>> model = build_classifier(cfg) >>> inputs = torch.rand(1, 3, 224, 224) @@ -79,7 +79,7 @@ class HuggingFaceClassifier(BaseClassifier): if data_preprocessor is None: data_preprocessor = {} # The build process is in MMEngine, so we need to add scope here. - data_preprocessor.setdefault('type', 'mmcls.ClsDataPreprocessor') + data_preprocessor.setdefault('type', 'mmpretrain.ClsDataPreprocessor') if train_cfg is not None and 'augments' in train_cfg: # Set batch augmentations by `train_cfg` diff --git a/mmcls/models/classifiers/image.py b/mmpretrain/models/classifiers/image.py similarity index 92% rename from mmcls/models/classifiers/image.py rename to mmpretrain/models/classifiers/image.py index 54cc19b5..2b80a8b3 100644 --- a/mmcls/models/classifiers/image.py +++ b/mmpretrain/models/classifiers/image.py @@ -4,8 +4,8 @@ from typing import List, Optional import torch import torch.nn as nn -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample from .base import BaseClassifier @@ -15,11 +15,11 @@ class ImageClassifier(BaseClassifier): Args: backbone (dict): The backbone module. See - :mod:`mmcls.models.backbones`. + :mod:`mmpretrain.models.backbones`. neck (dict, optional): The neck module to process features from - backbone. See :mod:`mmcls.models.necks`. Defaults to None. + backbone. See :mod:`mmpretrain.models.necks`. Defaults to None. head (dict, optional): The head module to do prediction and calculate - loss from processed features. See :mod:`mmcls.models.heads`. + loss from processed features. See :mod:`mmpretrain.models.heads`. Notice that if the head is not set, almost all methods cannot be used except :meth:`extract_feat`. Defaults to None. pretrained (str, optional): The pretrained checkpoint path, support @@ -28,7 +28,8 @@ class ImageClassifier(BaseClassifier): fields are: - augments (List[dict]): The batch augmentation methods to use. - More details can be found in :mod:`mmcls.model.utils.augment`. + More details can be found in + :mod:`mmpretrain.model.utils.augment`. Defaults to None. data_preprocessor (dict, optional): The config for preprocessing input @@ -53,7 +54,7 @@ class ImageClassifier(BaseClassifier): if data_preprocessor is None: data_preprocessor = {} # The build process is in MMEngine, so we need to add scope here. - data_preprocessor.setdefault('type', 'mmcls.ClsDataPreprocessor') + data_preprocessor.setdefault('type', 'mmpretrain.ClsDataPreprocessor') if train_cfg is not None and 'augments' in train_cfg: # Set batch augmentations by `train_cfg` @@ -104,7 +105,7 @@ class ImageClassifier(BaseClassifier): - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of - :obj:`mmcls.structures.ClsDataSample`. + :obj:`mmpretrain.structures.ClsDataSample`. - If ``mode="loss"``, return a dict of tensor. """ if mode == 'tensor': @@ -145,7 +146,7 @@ class ImageClassifier(BaseClassifier): >>> import torch >>> from mmengine import Config - >>> from mmcls.models import build_classifier + >>> from mmpretrain.models import build_classifier >>> >>> cfg = Config.fromfile('configs/resnet/resnet18_8xb32_in1k.py').model >>> cfg.backbone.out_indices = (0, 1, 2, 3) # Output multi-scale feature maps @@ -162,7 +163,7 @@ class ImageClassifier(BaseClassifier): >>> import torch >>> from mmengine import Config - >>> from mmcls.models import build_classifier + >>> from mmpretrain.models import build_classifier >>> >>> cfg = Config.fromfile('configs/resnet/resnet18_8xb32_in1k.py').model >>> cfg.backbone.out_indices = (0, 1, 2, 3) # Output multi-scale feature maps @@ -180,7 +181,7 @@ class ImageClassifier(BaseClassifier): >>> import torch >>> from mmengine import Config - >>> from mmcls.models import build_classifier + >>> from mmpretrain.models import build_classifier >>> >>> cfg = Config.fromfile('configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py').model >>> model = build_classifier(cfg) diff --git a/mmcls/models/classifiers/timm.py b/mmpretrain/models/classifiers/timm.py similarity index 96% rename from mmcls/models/classifiers/timm.py rename to mmpretrain/models/classifiers/timm.py index fa4c4d0a..0336d1c4 100644 --- a/mmcls/models/classifiers/timm.py +++ b/mmpretrain/models/classifiers/timm.py @@ -7,8 +7,8 @@ import torch import torch.nn as nn import torch.nn.functional as F -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample from .base import BaseClassifier @@ -34,7 +34,7 @@ class TimmClassifier(BaseClassifier): fields are: - augments (List[dict]): The batch augmentation methods to use. - More details can be found in :mod:`mmcls.model.utils.augment`. + More details can be found in :mod:`mmpretrain.model.utils.augment`. Defaults to None. with_cp (bool): Use checkpoint or not. Using checkpoint will save some @@ -50,7 +50,7 @@ class TimmClassifier(BaseClassifier): Examples: >>> import torch - >>> from mmcls.models import build_classifier + >>> from mmpretrain.models import build_classifier >>> cfg = dict(type='TimmClassifier', model_name='resnet50', pretrained=True) >>> model = build_classifier(cfg) >>> inputs = torch.rand(1, 3, 224, 224) @@ -70,7 +70,7 @@ class TimmClassifier(BaseClassifier): if data_preprocessor is None: data_preprocessor = {} # The build process is in MMEngine, so we need to add scope here. - data_preprocessor.setdefault('type', 'mmcls.ClsDataPreprocessor') + data_preprocessor.setdefault('type', 'mmpretrain.ClsDataPreprocessor') if train_cfg is not None and 'augments' in train_cfg: # Set batch augmentations by `train_cfg` diff --git a/mmcls/models/heads/__init__.py b/mmpretrain/models/heads/__init__.py similarity index 100% rename from mmcls/models/heads/__init__.py rename to mmpretrain/models/heads/__init__.py diff --git a/mmcls/models/heads/base_head.py b/mmpretrain/models/heads/base_head.py similarity index 100% rename from mmcls/models/heads/base_head.py rename to mmpretrain/models/heads/base_head.py diff --git a/mmcls/models/heads/cls_head.py b/mmpretrain/models/heads/cls_head.py similarity index 97% rename from mmcls/models/heads/cls_head.py rename to mmpretrain/models/heads/cls_head.py index 1338947b..5c5d015f 100644 --- a/mmcls/models/heads/cls_head.py +++ b/mmpretrain/models/heads/cls_head.py @@ -5,9 +5,9 @@ import torch import torch.nn as nn import torch.nn.functional as F -from mmcls.evaluation.metrics import Accuracy -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.evaluation.metrics import Accuracy +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample from .base_head import BaseHead diff --git a/mmcls/models/heads/conformer_head.py b/mmpretrain/models/heads/conformer_head.py similarity index 97% rename from mmcls/models/heads/conformer_head.py rename to mmpretrain/models/heads/conformer_head.py index 87e8a4ab..a01fecdc 100644 --- a/mmcls/models/heads/conformer_head.py +++ b/mmpretrain/models/heads/conformer_head.py @@ -4,9 +4,9 @@ from typing import List, Sequence, Tuple import torch import torch.nn as nn -from mmcls.evaluation.metrics import Accuracy -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.evaluation.metrics import Accuracy +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample from .cls_head import ClsHead diff --git a/mmcls/models/heads/deit_head.py b/mmpretrain/models/heads/deit_head.py similarity index 96% rename from mmcls/models/heads/deit_head.py rename to mmpretrain/models/heads/deit_head.py index f6458e7d..c2ce9d4c 100644 --- a/mmcls/models/heads/deit_head.py +++ b/mmpretrain/models/heads/deit_head.py @@ -5,7 +5,7 @@ from typing import List, Tuple import torch import torch.nn as nn -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .vision_transformer_head import VisionTransformerClsHead @@ -58,7 +58,7 @@ class DeiTClsHead(VisionTransformerClsHead): def forward(self, feats: Tuple[List[torch.Tensor]]) -> torch.Tensor: """The forward process.""" if self.training: - warnings.warn('MMClassification cannot train the ' + warnings.warn('MMPretrain cannot train the ' 'distilled version DeiT.') cls_token, dist_token = self.pre_logits(feats) # The final classification head. diff --git a/mmcls/models/heads/efficientformer_head.py b/mmpretrain/models/heads/efficientformer_head.py similarity index 95% rename from mmcls/models/heads/efficientformer_head.py rename to mmpretrain/models/heads/efficientformer_head.py index 1b67c1b3..9d1d74dc 100644 --- a/mmcls/models/heads/efficientformer_head.py +++ b/mmpretrain/models/heads/efficientformer_head.py @@ -4,8 +4,8 @@ from typing import List, Tuple import torch import torch.nn as nn -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample from .cls_head import ClsHead @@ -83,7 +83,7 @@ class EfficientFormerClsHead(ClsHead): """ if self.dist: raise NotImplementedError( - "MMClassification doesn't support to train" + "MMPretrain doesn't support to train" ' the distilled version EfficientFormer.') else: return super().loss(feats, data_samples, **kwargs) diff --git a/mmcls/models/heads/levit_head.py b/mmpretrain/models/heads/levit_head.py similarity index 93% rename from mmcls/models/heads/levit_head.py rename to mmpretrain/models/heads/levit_head.py index b9a3bfeb..a74d7ecc 100644 --- a/mmcls/models/heads/levit_head.py +++ b/mmpretrain/models/heads/levit_head.py @@ -3,8 +3,8 @@ import torch import torch.nn as nn from mmengine.model import BaseModule -from mmcls.models.heads import ClsHead -from mmcls.registry import MODELS +from mmpretrain.models.heads import ClsHead +from mmpretrain.registry import MODELS from ..utils import build_norm_layer @@ -74,7 +74,7 @@ class LeViTClsHead(ClsHead): if not self.training: x = (x[0] + x[1]) / 2 else: - raise NotImplementedError("MMClassification doesn't support " + raise NotImplementedError("MMPretrain doesn't support " 'training in distillation mode.') else: x = self.head(x) diff --git a/mmcls/models/heads/linear_head.py b/mmpretrain/models/heads/linear_head.py similarity index 98% rename from mmcls/models/heads/linear_head.py rename to mmpretrain/models/heads/linear_head.py index 0bd746dd..90b4c2b1 100644 --- a/mmcls/models/heads/linear_head.py +++ b/mmpretrain/models/heads/linear_head.py @@ -4,7 +4,7 @@ from typing import Optional, Tuple import torch import torch.nn as nn -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .cls_head import ClsHead diff --git a/mmcls/models/heads/margin_head.py b/mmpretrain/models/heads/margin_head.py similarity index 98% rename from mmcls/models/heads/margin_head.py rename to mmpretrain/models/heads/margin_head.py index 2875c841..2441bae6 100644 --- a/mmcls/models/heads/margin_head.py +++ b/mmpretrain/models/heads/margin_head.py @@ -9,9 +9,9 @@ from mmengine.fileio import list_from_file from mmengine.runner import autocast from mmengine.utils import is_seq_of -from mmcls.models.losses import convert_to_one_hot -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.models.losses import convert_to_one_hot +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample from .cls_head import ClsHead diff --git a/mmcls/models/heads/multi_label_cls_head.py b/mmpretrain/models/heads/multi_label_cls_head.py similarity index 98% rename from mmcls/models/heads/multi_label_cls_head.py rename to mmpretrain/models/heads/multi_label_cls_head.py index 746bb6f7..0eb6a769 100644 --- a/mmcls/models/heads/multi_label_cls_head.py +++ b/mmpretrain/models/heads/multi_label_cls_head.py @@ -5,8 +5,8 @@ import torch import torch.nn as nn from mmengine.structures import LabelData -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample from .base_head import BaseHead diff --git a/mmcls/models/heads/multi_label_csra_head.py b/mmpretrain/models/heads/multi_label_csra_head.py similarity index 99% rename from mmcls/models/heads/multi_label_csra_head.py rename to mmpretrain/models/heads/multi_label_csra_head.py index e42aecaf..95a3a0e8 100644 --- a/mmcls/models/heads/multi_label_csra_head.py +++ b/mmpretrain/models/heads/multi_label_csra_head.py @@ -6,7 +6,7 @@ import torch import torch.nn as nn from mmengine.model import BaseModule, ModuleList -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .multi_label_cls_head import MultiLabelClsHead diff --git a/mmcls/models/heads/multi_label_linear_head.py b/mmpretrain/models/heads/multi_label_linear_head.py similarity index 98% rename from mmcls/models/heads/multi_label_linear_head.py rename to mmpretrain/models/heads/multi_label_linear_head.py index 08742f9c..81217ec5 100644 --- a/mmcls/models/heads/multi_label_linear_head.py +++ b/mmpretrain/models/heads/multi_label_linear_head.py @@ -4,7 +4,7 @@ from typing import Dict, Optional, Tuple import torch import torch.nn as nn -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .multi_label_cls_head import MultiLabelClsHead diff --git a/mmcls/models/heads/multi_task_head.py b/mmpretrain/models/heads/multi_task_head.py similarity index 98% rename from mmcls/models/heads/multi_task_head.py rename to mmpretrain/models/heads/multi_task_head.py index 64167739..ecffcc83 100644 --- a/mmcls/models/heads/multi_task_head.py +++ b/mmpretrain/models/heads/multi_task_head.py @@ -5,8 +5,8 @@ import torch import torch.nn as nn from mmengine.model import ModuleDict -from mmcls.registry import MODELS -from mmcls.structures import MultiTaskDataSample +from mmpretrain.registry import MODELS +from mmpretrain.structures import MultiTaskDataSample from .base_head import BaseHead diff --git a/mmcls/models/heads/stacked_head.py b/mmpretrain/models/heads/stacked_head.py similarity index 99% rename from mmcls/models/heads/stacked_head.py rename to mmpretrain/models/heads/stacked_head.py index eceaccb6..6cd819de 100644 --- a/mmcls/models/heads/stacked_head.py +++ b/mmpretrain/models/heads/stacked_head.py @@ -6,7 +6,7 @@ import torch.nn as nn from mmcv.cnn import build_activation_layer, build_norm_layer from mmengine.model import BaseModule, ModuleList -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .cls_head import ClsHead diff --git a/mmcls/models/heads/vig_head.py b/mmpretrain/models/heads/vig_head.py similarity index 98% rename from mmcls/models/heads/vig_head.py rename to mmpretrain/models/heads/vig_head.py index e557f7f5..ecb984de 100644 --- a/mmcls/models/heads/vig_head.py +++ b/mmpretrain/models/heads/vig_head.py @@ -5,7 +5,7 @@ import torch import torch.nn as nn from mmcv.cnn import build_activation_layer -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .cls_head import ClsHead diff --git a/mmcls/models/heads/vision_transformer_head.py b/mmpretrain/models/heads/vision_transformer_head.py similarity index 98% rename from mmcls/models/heads/vision_transformer_head.py rename to mmpretrain/models/heads/vision_transformer_head.py index 6823d671..a7194d91 100644 --- a/mmcls/models/heads/vision_transformer_head.py +++ b/mmpretrain/models/heads/vision_transformer_head.py @@ -9,7 +9,7 @@ from mmcv.cnn import build_activation_layer from mmengine.model import Sequential from mmengine.model.weight_init import trunc_normal_ -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .cls_head import ClsHead diff --git a/mmcls/models/losses/__init__.py b/mmpretrain/models/losses/__init__.py similarity index 100% rename from mmcls/models/losses/__init__.py rename to mmpretrain/models/losses/__init__.py diff --git a/mmcls/models/losses/asymmetric_loss.py b/mmpretrain/models/losses/asymmetric_loss.py similarity index 99% rename from mmcls/models/losses/asymmetric_loss.py rename to mmpretrain/models/losses/asymmetric_loss.py index 91d2fb53..dcc9707d 100644 --- a/mmcls/models/losses/asymmetric_loss.py +++ b/mmpretrain/models/losses/asymmetric_loss.py @@ -2,7 +2,7 @@ import torch import torch.nn as nn -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .utils import convert_to_one_hot, weight_reduce_loss diff --git a/mmcls/models/losses/cross_entropy_loss.py b/mmpretrain/models/losses/cross_entropy_loss.py similarity index 99% rename from mmcls/models/losses/cross_entropy_loss.py rename to mmpretrain/models/losses/cross_entropy_loss.py index a3e51b28..b569713e 100644 --- a/mmcls/models/losses/cross_entropy_loss.py +++ b/mmpretrain/models/losses/cross_entropy_loss.py @@ -2,7 +2,7 @@ import torch.nn as nn import torch.nn.functional as F -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .utils import weight_reduce_loss diff --git a/mmcls/models/losses/focal_loss.py b/mmpretrain/models/losses/focal_loss.py similarity index 99% rename from mmcls/models/losses/focal_loss.py rename to mmpretrain/models/losses/focal_loss.py index 533cffcc..9d2cf503 100644 --- a/mmcls/models/losses/focal_loss.py +++ b/mmpretrain/models/losses/focal_loss.py @@ -2,7 +2,7 @@ import torch.nn as nn import torch.nn.functional as F -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .utils import convert_to_one_hot, weight_reduce_loss diff --git a/mmcls/models/losses/label_smooth_loss.py b/mmpretrain/models/losses/label_smooth_loss.py similarity index 99% rename from mmcls/models/losses/label_smooth_loss.py rename to mmpretrain/models/losses/label_smooth_loss.py index ae8adc7a..b53b9913 100644 --- a/mmcls/models/losses/label_smooth_loss.py +++ b/mmpretrain/models/losses/label_smooth_loss.py @@ -2,7 +2,7 @@ import torch import torch.nn as nn -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .cross_entropy_loss import CrossEntropyLoss from .utils import convert_to_one_hot diff --git a/mmcls/models/losses/seesaw_loss.py b/mmpretrain/models/losses/seesaw_loss.py similarity index 99% rename from mmcls/models/losses/seesaw_loss.py rename to mmpretrain/models/losses/seesaw_loss.py index 5f72da5c..4aaaa451 100644 --- a/mmcls/models/losses/seesaw_loss.py +++ b/mmpretrain/models/losses/seesaw_loss.py @@ -4,7 +4,7 @@ import torch import torch.nn as nn import torch.nn.functional as F -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .utils import weight_reduce_loss diff --git a/mmcls/models/losses/utils.py b/mmpretrain/models/losses/utils.py similarity index 100% rename from mmcls/models/losses/utils.py rename to mmpretrain/models/losses/utils.py diff --git a/mmcls/models/necks/__init__.py b/mmpretrain/models/necks/__init__.py similarity index 100% rename from mmcls/models/necks/__init__.py rename to mmpretrain/models/necks/__init__.py diff --git a/mmcls/models/necks/gap.py b/mmpretrain/models/necks/gap.py similarity index 97% rename from mmcls/models/necks/gap.py rename to mmpretrain/models/necks/gap.py index 13b5897d..0877743a 100644 --- a/mmcls/models/necks/gap.py +++ b/mmpretrain/models/necks/gap.py @@ -2,7 +2,7 @@ import torch import torch.nn as nn -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS @MODELS.register_module() diff --git a/mmcls/models/necks/gem.py b/mmpretrain/models/necks/gem.py similarity index 98% rename from mmcls/models/necks/gem.py rename to mmpretrain/models/necks/gem.py index ce6edb36..f5648be8 100644 --- a/mmcls/models/necks/gem.py +++ b/mmpretrain/models/necks/gem.py @@ -4,7 +4,7 @@ from torch import Tensor, nn from torch.nn import functional as F from torch.nn.parameter import Parameter -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS def gem(x: Tensor, p: Parameter, eps: float = 1e-6, clamp=True) -> Tensor: diff --git a/mmcls/models/necks/hr_fuse.py b/mmpretrain/models/necks/hr_fuse.py similarity index 98% rename from mmcls/models/necks/hr_fuse.py rename to mmpretrain/models/necks/hr_fuse.py index 94811cb5..4a97f86f 100644 --- a/mmcls/models/necks/hr_fuse.py +++ b/mmpretrain/models/necks/hr_fuse.py @@ -3,7 +3,7 @@ import torch.nn as nn from mmcv.cnn.bricks import ConvModule from mmengine.model import BaseModule -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from ..backbones.resnet import Bottleneck, ResLayer diff --git a/mmcls/models/necks/reduction.py b/mmpretrain/models/necks/reduction.py similarity index 98% rename from mmcls/models/necks/reduction.py rename to mmpretrain/models/necks/reduction.py index bebaebc5..5a59793e 100644 --- a/mmcls/models/necks/reduction.py +++ b/mmpretrain/models/necks/reduction.py @@ -7,7 +7,7 @@ import torch.nn as nn from mmcv.cnn import build_activation_layer, build_norm_layer from mmengine.model import BaseModule -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS @MODELS.register_module() diff --git a/mmcls/models/retrievers/__init__.py b/mmpretrain/models/retrievers/__init__.py similarity index 100% rename from mmcls/models/retrievers/__init__.py rename to mmpretrain/models/retrievers/__init__.py diff --git a/mmcls/models/retrievers/base.py b/mmpretrain/models/retrievers/base.py similarity index 99% rename from mmcls/models/retrievers/base.py rename to mmpretrain/models/retrievers/base.py index dd5e561a..32d38899 100644 --- a/mmcls/models/retrievers/base.py +++ b/mmpretrain/models/retrievers/base.py @@ -83,7 +83,7 @@ class BaseRetriever(BaseModel, metaclass=ABCMeta): - If ``mode="tensor"``, return a tensor. - If ``mode="predict"``, return a list of - :obj:`mmcls.structures.ClsDataSample`. + :obj:`mmpretrain.structures.ClsDataSample`. - If ``mode="loss"``, return a dict of tensor. """ pass diff --git a/mmcls/models/retrievers/image2image.py b/mmpretrain/models/retrievers/image2image.py similarity index 96% rename from mmcls/models/retrievers/image2image.py rename to mmpretrain/models/retrievers/image2image.py index e5c36360..6182ba51 100644 --- a/mmcls/models/retrievers/image2image.py +++ b/mmpretrain/models/retrievers/image2image.py @@ -7,9 +7,9 @@ import torch.nn as nn from mmengine.runner import Runner from torch.utils.data import DataLoader -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample -from mmcls.utils import track_on_main_process +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample +from mmpretrain.utils import track_on_main_process from .base import BaseRetriever @@ -29,7 +29,7 @@ class ImageToImageRetriever(BaseRetriever): - torch.Tensor: The saved tensor whose dimension should be dim. head (dict, optional): The head module to calculate loss from - processed features. See :mod:`mmcls.models.heads`. Notice + processed features. See :mod:`mmpretrain.models.heads`. Notice that if the head is not set, `loss` method cannot be used. Defaults to None. similarity_fn (Union[str, Callable]): The way that the similarity @@ -41,7 +41,8 @@ class ImageToImageRetriever(BaseRetriever): fields are: - augments (List[dict]): The batch augmentation methods to use. - More details can be found in :mod:`mmcls.model.utils.augment`. + More details can be found in + :mod:`mmpretrain.model.utils.augment`. Defaults to None. data_preprocessor (dict, optional): The config for preprocessing input @@ -67,7 +68,7 @@ class ImageToImageRetriever(BaseRetriever): if data_preprocessor is None: data_preprocessor = {} # The build process is in MMEngine, so we need to add scope here. - data_preprocessor.setdefault('type', 'mmcls.ClsDataPreprocessor') + data_preprocessor.setdefault('type', 'mmpretrain.ClsDataPreprocessor') if train_cfg is not None and 'augments' in train_cfg: # Set batch augmentations by `train_cfg` @@ -142,7 +143,7 @@ class ImageToImageRetriever(BaseRetriever): - If ``mode="tensor"``, return a tensor. - If ``mode="predict"``, return a list of - :obj:`mmcls.structures.ClsDataSample`. + :obj:`mmpretrain.structures.ClsDataSample`. - If ``mode="loss"``, return a dict of tensor. """ if mode == 'tensor': diff --git a/mmpretrain/models/selfsup/__init__.py b/mmpretrain/models/selfsup/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/mmpretrain/models/target_generators/__init__.py b/mmpretrain/models/target_generators/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/mmcls/models/tta/__init__.py b/mmpretrain/models/tta/__init__.py similarity index 100% rename from mmcls/models/tta/__init__.py rename to mmpretrain/models/tta/__init__.py diff --git a/mmcls/models/tta/score_tta.py b/mmpretrain/models/tta/score_tta.py similarity index 92% rename from mmcls/models/tta/score_tta.py rename to mmpretrain/models/tta/score_tta.py index cb348c55..121bdf57 100644 --- a/mmcls/models/tta/score_tta.py +++ b/mmpretrain/models/tta/score_tta.py @@ -3,8 +3,8 @@ from typing import List from mmengine.model import BaseTTAModel -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample @MODELS.register_module() diff --git a/mmcls/models/utils/__init__.py b/mmpretrain/models/utils/__init__.py similarity index 100% rename from mmcls/models/utils/__init__.py rename to mmpretrain/models/utils/__init__.py diff --git a/mmcls/models/utils/attention.py b/mmpretrain/models/utils/attention.py similarity index 99% rename from mmcls/models/utils/attention.py rename to mmpretrain/models/utils/attention.py index 8d78b59a..132b4da6 100644 --- a/mmcls/models/utils/attention.py +++ b/mmpretrain/models/utils/attention.py @@ -10,7 +10,7 @@ from mmengine.model import BaseModule from mmengine.model.weight_init import trunc_normal_ from mmengine.utils import digit_version -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS from .helpers import to_2tuple from .layer_scale import LayerScale diff --git a/mmcls/models/utils/batch_augments/__init__.py b/mmpretrain/models/utils/batch_augments/__init__.py similarity index 100% rename from mmcls/models/utils/batch_augments/__init__.py rename to mmpretrain/models/utils/batch_augments/__init__.py diff --git a/mmcls/models/utils/batch_augments/cutmix.py b/mmpretrain/models/utils/batch_augments/cutmix.py similarity index 99% rename from mmcls/models/utils/batch_augments/cutmix.py rename to mmpretrain/models/utils/batch_augments/cutmix.py index 5d0920e7..665427bf 100644 --- a/mmcls/models/utils/batch_augments/cutmix.py +++ b/mmpretrain/models/utils/batch_augments/cutmix.py @@ -4,7 +4,7 @@ from typing import List, Optional, Tuple import numpy as np import torch -from mmcls.registry import BATCH_AUGMENTS +from mmpretrain.registry import BATCH_AUGMENTS from .mixup import Mixup diff --git a/mmcls/models/utils/batch_augments/mixup.py b/mmpretrain/models/utils/batch_augments/mixup.py similarity index 97% rename from mmcls/models/utils/batch_augments/mixup.py rename to mmpretrain/models/utils/batch_augments/mixup.py index bbf249e4..bedb2c3e 100644 --- a/mmcls/models/utils/batch_augments/mixup.py +++ b/mmpretrain/models/utils/batch_augments/mixup.py @@ -4,7 +4,7 @@ from typing import Tuple import numpy as np import torch -from mmcls.registry import BATCH_AUGMENTS +from mmpretrain.registry import BATCH_AUGMENTS @BATCH_AUGMENTS.register_module() diff --git a/mmcls/models/utils/batch_augments/resizemix.py b/mmpretrain/models/utils/batch_augments/resizemix.py similarity index 98% rename from mmcls/models/utils/batch_augments/resizemix.py rename to mmpretrain/models/utils/batch_augments/resizemix.py index fe01532e..89cfb720 100644 --- a/mmcls/models/utils/batch_augments/resizemix.py +++ b/mmpretrain/models/utils/batch_augments/resizemix.py @@ -5,7 +5,7 @@ import numpy as np import torch import torch.nn.functional as F -from mmcls.registry import BATCH_AUGMENTS +from mmpretrain.registry import BATCH_AUGMENTS from .cutmix import CutMix diff --git a/mmcls/models/utils/batch_augments/wrapper.py b/mmpretrain/models/utils/batch_augments/wrapper.py similarity index 95% rename from mmcls/models/utils/batch_augments/wrapper.py rename to mmpretrain/models/utils/batch_augments/wrapper.py index 2b84dde5..10e5304c 100644 --- a/mmcls/models/utils/batch_augments/wrapper.py +++ b/mmpretrain/models/utils/batch_augments/wrapper.py @@ -4,7 +4,7 @@ from typing import Callable, Union import numpy as np import torch -from mmcls.registry import BATCH_AUGMENTS +from mmpretrain.registry import BATCH_AUGMENTS class RandomBatchAugment: @@ -19,7 +19,7 @@ class RandomBatchAugment: Example: >>> import torch >>> import torch.nn.functional as F - >>> from mmcls.models import RandomBatchAugment + >>> from mmpretrain.models import RandomBatchAugment >>> augments_cfg = [ ... dict(type='CutMix', alpha=1.), ... dict(type='Mixup', alpha=1.) diff --git a/mmcls/models/utils/channel_shuffle.py b/mmpretrain/models/utils/channel_shuffle.py similarity index 100% rename from mmcls/models/utils/channel_shuffle.py rename to mmpretrain/models/utils/channel_shuffle.py diff --git a/mmcls/models/utils/data_preprocessor.py b/mmpretrain/models/utils/data_preprocessor.py similarity index 96% rename from mmcls/models/utils/data_preprocessor.py rename to mmpretrain/models/utils/data_preprocessor.py index 716b0a1e..a3bab823 100644 --- a/mmcls/models/utils/data_preprocessor.py +++ b/mmpretrain/models/utils/data_preprocessor.py @@ -7,10 +7,10 @@ import torch import torch.nn.functional as F from mmengine.model import BaseDataPreprocessor, stack_batch -from mmcls.registry import MODELS -from mmcls.structures import (ClsDataSample, MultiTaskDataSample, - batch_label_to_onehot, cat_batch_labels, - stack_batch_scores, tensor_split) +from mmpretrain.registry import MODELS +from mmpretrain.structures import (ClsDataSample, MultiTaskDataSample, + batch_label_to_onehot, cat_batch_labels, + stack_batch_scores, tensor_split) from .batch_augments import RandomBatchAugment @@ -50,7 +50,7 @@ class ClsDataPreprocessor(BaseDataPreprocessor): num_classes (int, optional): The number of classes. Defaults to None. batch_augments (dict, optional): The batch augmentations settings, including "augments" and "probs". For more details, see - :class:`mmcls.models.RandomBatchAugment`. + :class:`mmpretrain.models.RandomBatchAugment`. """ def __init__(self, diff --git a/mmcls/models/utils/embed.py b/mmpretrain/models/utils/embed.py similarity index 99% rename from mmcls/models/utils/embed.py rename to mmpretrain/models/utils/embed.py index bd2b17dc..8299f9a0 100644 --- a/mmcls/models/utils/embed.py +++ b/mmpretrain/models/utils/embed.py @@ -140,7 +140,7 @@ class PatchEmbed(BaseModule): conv_cfg=None, init_cfg=None): super(PatchEmbed, self).__init__(init_cfg) - warnings.warn('The `PatchEmbed` in mmcls will be deprecated. ' + warnings.warn('The `PatchEmbed` in mmpretrain will be deprecated. ' 'Please use `mmcv.cnn.bricks.transformer.PatchEmbed`. ' "It's more general and supports dynamic input shape") diff --git a/mmcls/models/utils/helpers.py b/mmpretrain/models/utils/helpers.py similarity index 100% rename from mmcls/models/utils/helpers.py rename to mmpretrain/models/utils/helpers.py diff --git a/mmcls/models/utils/inverted_residual.py b/mmpretrain/models/utils/inverted_residual.py similarity index 100% rename from mmcls/models/utils/inverted_residual.py rename to mmpretrain/models/utils/inverted_residual.py diff --git a/mmcls/models/utils/layer_scale.py b/mmpretrain/models/utils/layer_scale.py similarity index 100% rename from mmcls/models/utils/layer_scale.py rename to mmpretrain/models/utils/layer_scale.py diff --git a/mmcls/models/utils/make_divisible.py b/mmpretrain/models/utils/make_divisible.py similarity index 100% rename from mmcls/models/utils/make_divisible.py rename to mmpretrain/models/utils/make_divisible.py diff --git a/mmcls/models/utils/norm.py b/mmpretrain/models/utils/norm.py similarity index 99% rename from mmcls/models/utils/norm.py rename to mmpretrain/models/utils/norm.py index 4b0b45cb..158ccc6d 100644 --- a/mmcls/models/utils/norm.py +++ b/mmpretrain/models/utils/norm.py @@ -3,7 +3,7 @@ import torch import torch.nn as nn import torch.nn.functional as F -from mmcls.registry import MODELS +from mmpretrain.registry import MODELS @MODELS.register_module() diff --git a/mmcls/models/utils/position_encoding.py b/mmpretrain/models/utils/position_encoding.py similarity index 100% rename from mmcls/models/utils/position_encoding.py rename to mmpretrain/models/utils/position_encoding.py diff --git a/mmcls/models/utils/se_layer.py b/mmpretrain/models/utils/se_layer.py similarity index 100% rename from mmcls/models/utils/se_layer.py rename to mmpretrain/models/utils/se_layer.py diff --git a/mmcls/registry.py b/mmpretrain/registry.py similarity index 82% rename from mmcls/registry.py rename to mmpretrain/registry.py index aa346b79..45bd0cf5 100644 --- a/mmcls/registry.py +++ b/mmpretrain/registry.py @@ -1,5 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. -"""MMClassification provides 21 registry nodes to support using modules across +"""MMPretrain provides 21 registry nodes to support using modules across projects. Each node is a child of the root registry in MMEngine. More details can be found at @@ -40,151 +40,151 @@ __all__ = [ ] ####################################################################### -# mmcls.engine # +# mmpretrain.engine # ####################################################################### # Runners like `EpochBasedRunner` and `IterBasedRunner` RUNNERS = Registry( 'runner', parent=MMENGINE_RUNNERS, - locations=['mmcls.engine'], + locations=['mmpretrain.engine'], ) # Runner constructors that define how to initialize runners RUNNER_CONSTRUCTORS = Registry( 'runner constructor', parent=MMENGINE_RUNNER_CONSTRUCTORS, - locations=['mmcls.engine'], + locations=['mmpretrain.engine'], ) # Loops which define the training or test process, like `EpochBasedTrainLoop` LOOPS = Registry( 'loop', parent=MMENGINE_LOOPS, - locations=['mmcls.engine'], + locations=['mmpretrain.engine'], ) # Hooks to add additional functions during running, like `CheckpointHook` HOOKS = Registry( 'hook', parent=MMENGINE_HOOKS, - locations=['mmcls.engine'], + locations=['mmpretrain.engine'], ) # Log processors to process the scalar log data. LOG_PROCESSORS = Registry( 'log processor', parent=MMENGINE_LOG_PROCESSORS, - locations=['mmcls.engine'], + locations=['mmpretrain.engine'], ) # Optimizers to optimize the model weights, like `SGD` and `Adam`. OPTIMIZERS = Registry( 'optimizer', parent=MMENGINE_OPTIMIZERS, - locations=['mmcls.engine'], + locations=['mmpretrain.engine'], ) # Optimizer wrappers to enhance the optimization process. OPTIM_WRAPPERS = Registry( 'optimizer_wrapper', parent=MMENGINE_OPTIM_WRAPPERS, - locations=['mmcls.engine'], + locations=['mmpretrain.engine'], ) # Optimizer constructors to customize the hyperparameters of optimizers. OPTIM_WRAPPER_CONSTRUCTORS = Registry( 'optimizer wrapper constructor', parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS, - locations=['mmcls.engine'], + locations=['mmpretrain.engine'], ) # Parameter schedulers to dynamically adjust optimization parameters. PARAM_SCHEDULERS = Registry( 'parameter scheduler', parent=MMENGINE_PARAM_SCHEDULERS, - locations=['mmcls.engine'], + locations=['mmpretrain.engine'], ) ####################################################################### -# mmcls.datasets # +# mmpretrain.datasets # ####################################################################### # Datasets like `ImageNet` and `CIFAR10`. DATASETS = Registry( 'dataset', parent=MMENGINE_DATASETS, - locations=['mmcls.datasets'], + locations=['mmpretrain.datasets'], ) # Samplers to sample the dataset. DATA_SAMPLERS = Registry( 'data sampler', parent=MMENGINE_DATA_SAMPLERS, - locations=['mmcls.datasets'], + locations=['mmpretrain.datasets'], ) # Transforms to process the samples from the dataset. TRANSFORMS = Registry( 'transform', parent=MMENGINE_TRANSFORMS, - locations=['mmcls.datasets'], + locations=['mmpretrain.datasets'], ) ####################################################################### -# mmcls.models # +# mmpretrain.models # ####################################################################### # Neural network modules inheriting `nn.Module`. MODELS = Registry( 'model', parent=MMENGINE_MODELS, - locations=['mmcls.models'], + locations=['mmpretrain.models'], ) # Model wrappers like 'MMDistributedDataParallel' MODEL_WRAPPERS = Registry( 'model_wrapper', parent=MMENGINE_MODEL_WRAPPERS, - locations=['mmcls.models'], + locations=['mmpretrain.models'], ) # Weight initialization methods like uniform, xavier. WEIGHT_INITIALIZERS = Registry( 'weight initializer', parent=MMENGINE_WEIGHT_INITIALIZERS, - locations=['mmcls.models'], + locations=['mmpretrain.models'], ) # Batch augmentations like `Mixup` and `CutMix`. BATCH_AUGMENTS = Registry( 'batch augment', - locations=['mmcls.models'], + locations=['mmpretrain.models'], ) # Task-specific modules like anchor generators and box coders TASK_UTILS = Registry( 'task util', parent=MMENGINE_TASK_UTILS, - locations=['mmcls.models'], + locations=['mmpretrain.models'], ) ####################################################################### -# mmcls.evaluation # +# mmpretrain.evaluation # ####################################################################### # Metrics to evaluate the model prediction results. METRICS = Registry( 'metric', parent=MMENGINE_METRICS, - locations=['mmcls.evaluation'], + locations=['mmpretrain.evaluation'], ) # Evaluators to define the evaluation process. EVALUATORS = Registry( 'evaluator', parent=MMENGINE_EVALUATOR, - locations=['mmcls.evaluation'], + locations=['mmpretrain.evaluation'], ) ####################################################################### -# mmcls.visualization # +# mmpretrain.visualization # ####################################################################### # Visualizers to display task-specific results. VISUALIZERS = Registry( 'visualizer', parent=MMENGINE_VISUALIZERS, - locations=['mmcls.visualization'], + locations=['mmpretrain.visualization'], ) # Backends to save the visualization results, like TensorBoard, WandB. VISBACKENDS = Registry( 'vis_backend', parent=MMENGINE_VISBACKENDS, - locations=['mmcls.visualization'], + locations=['mmpretrain.visualization'], ) diff --git a/mmcls/structures/__init__.py b/mmpretrain/structures/__init__.py similarity index 100% rename from mmcls/structures/__init__.py rename to mmpretrain/structures/__init__.py diff --git a/mmcls/structures/cls_data_sample.py b/mmpretrain/structures/cls_data_sample.py similarity index 99% rename from mmcls/structures/cls_data_sample.py rename to mmpretrain/structures/cls_data_sample.py index 9e319a7b..e983ce4b 100644 --- a/mmcls/structures/cls_data_sample.py +++ b/mmpretrain/structures/cls_data_sample.py @@ -92,7 +92,7 @@ class ClsDataSample(BaseDataElement): Examples: >>> import torch - >>> from mmcls.structures import ClsDataSample + >>> from mmpretrain.structures import ClsDataSample >>> >>> img_meta = dict(img_shape=(960, 720), num_classes=5) >>> data_sample = ClsDataSample(metainfo=img_meta) diff --git a/mmcls/structures/multi_task_data_sample.py b/mmpretrain/structures/multi_task_data_sample.py similarity index 100% rename from mmcls/structures/multi_task_data_sample.py rename to mmpretrain/structures/multi_task_data_sample.py diff --git a/mmcls/structures/utils.py b/mmpretrain/structures/utils.py similarity index 97% rename from mmcls/structures/utils.py rename to mmpretrain/structures/utils.py index 8c8f0f3d..5602557f 100644 --- a/mmcls/structures/utils.py +++ b/mmpretrain/structures/utils.py @@ -57,7 +57,7 @@ def batch_label_to_onehot(batch_label, split_indices, num_classes): Examples: >>> import torch - >>> from mmcls.structures import batch_label_to_onehot + >>> from mmpretrain.structures import batch_label_to_onehot >>> # Assume a concated label from 3 samples. >>> # label 1: [0, 1], label 2: [0, 2, 4], label 3: [3, 1] >>> batch_label = torch.tensor([0, 1, 0, 2, 4, 3, 1]) diff --git a/mmcls/utils/__init__.py b/mmpretrain/utils/__init__.py similarity index 100% rename from mmcls/utils/__init__.py rename to mmpretrain/utils/__init__.py diff --git a/mmcls/utils/analyze.py b/mmpretrain/utils/analyze.py similarity index 100% rename from mmcls/utils/analyze.py rename to mmpretrain/utils/analyze.py diff --git a/mmcls/utils/collect_env.py b/mmpretrain/utils/collect_env.py similarity index 79% rename from mmcls/utils/collect_env.py rename to mmpretrain/utils/collect_env.py index 2ad1c657..c5713a97 100644 --- a/mmcls/utils/collect_env.py +++ b/mmpretrain/utils/collect_env.py @@ -2,11 +2,11 @@ from mmengine.utils import get_git_hash from mmengine.utils.dl_utils import collect_env as collect_base_env -import mmcls +import mmpretrain def collect_env(): """Collect the information of the running environments.""" env_info = collect_base_env() - env_info['MMClassification'] = mmcls.__version__ + '+' + get_git_hash()[:7] + env_info['MMPpretrain'] = mmpretrain.__version__ + '+' + get_git_hash()[:7] return env_info diff --git a/mmcls/utils/progress.py b/mmpretrain/utils/progress.py similarity index 100% rename from mmcls/utils/progress.py rename to mmpretrain/utils/progress.py diff --git a/mmpretrain/utils/setup_env.py b/mmpretrain/utils/setup_env.py new file mode 100644 index 00000000..1b57b848 --- /dev/null +++ b/mmpretrain/utils/setup_env.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import warnings + +from mmengine import DefaultScope + + +def register_all_modules(init_default_scope: bool = True) -> None: + """Register all modules in mmpretrain into the registries. + + Args: + init_default_scope (bool): Whether initialize the mmpretrain default + scope. If True, the global default scope will be set to + `mmpretrain`, and all registries will build modules from + mmpretrain's registry node. To understand more about the registry, + please refer to + https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md + Defaults to True. + """ # noqa: E501 + import mmpretrain.datasets # noqa: F401,F403 + import mmpretrain.engine # noqa: F401,F403 + import mmpretrain.evaluation # noqa: F401,F403 + import mmpretrain.models # noqa: F401,F403 + import mmpretrain.structures # noqa: F401,F403 + import mmpretrain.visualization # noqa: F401,F403 + + if not init_default_scope: + return + + current_scope = DefaultScope.get_current_instance() + if current_scope is None: + DefaultScope.get_instance('mmpretrain', scope_name='mmpretrain') + elif current_scope.scope_name != 'mmpretrain': + warnings.warn( + f'The current default scope "{current_scope.scope_name}" ' + 'is not "mmpretrain", `register_all_modules` will force ' + 'the current default scope to be "mmpretrain". If this is ' + 'not expected, please set `init_default_scope=False`.') + # avoid name conflict + new_instance_name = f'mmpretrain-{datetime.datetime.now()}' + DefaultScope.get_instance(new_instance_name, scope_name='mmpretrain') diff --git a/mmcls/version.py b/mmpretrain/version.py similarity index 100% rename from mmcls/version.py rename to mmpretrain/version.py diff --git a/mmcls/visualization/__init__.py b/mmpretrain/visualization/__init__.py similarity index 100% rename from mmcls/visualization/__init__.py rename to mmpretrain/visualization/__init__.py diff --git a/mmcls/visualization/cls_visualizer.py b/mmpretrain/visualization/cls_visualizer.py similarity index 97% rename from mmcls/visualization/cls_visualizer.py rename to mmpretrain/visualization/cls_visualizer.py index 476058ca..3b61ddfa 100644 --- a/mmcls/visualization/cls_visualizer.py +++ b/mmpretrain/visualization/cls_visualizer.py @@ -6,8 +6,8 @@ import numpy as np from mmengine.dist import master_only from mmengine.visualization import Visualizer -from mmcls.registry import VISUALIZERS -from mmcls.structures import ClsDataSample +from mmpretrain.registry import VISUALIZERS +from mmpretrain.structures import ClsDataSample def _get_adaptive_scale(img_shape: Tuple[int, int], @@ -56,8 +56,8 @@ class ClsVisualizer(Visualizer): >>> import torch >>> import mmcv >>> from pathlib import Path - >>> from mmcls.visualization import ClsVisualizer - >>> from mmcls.structures import ClsDataSample + >>> from mmpretrain.visualization import ClsVisualizer + >>> from mmpretrain.structures import ClsDataSample >>> # Example image >>> img = mmcv.imread("./demo/bird.JPEG", channel_order='rgb') >>> # Example annotation diff --git a/projects/example_project/configs/examplenet_8xb32_in1k.py b/projects/example_project/configs/examplenet_8xb32_in1k.py index 5e5f89ea..99ab94d6 100644 --- a/projects/example_project/configs/examplenet_8xb32_in1k.py +++ b/projects/example_project/configs/examplenet_8xb32_in1k.py @@ -1,5 +1,5 @@ # Directly inherit the entire recipe you want to use. -_base_ = 'mmcls::resnet/resnet50_8xb32_in1k.py' +_base_ = 'mmpretrain::resnet/resnet50_8xb32_in1k.py' # This line is to import your own modules. custom_imports = dict(imports='models') diff --git a/projects/example_project/models/example_net.py b/projects/example_project/models/example_net.py index b6ff35dc..ec3aab81 100644 --- a/projects/example_project/models/example_net.py +++ b/projects/example_project/models/example_net.py @@ -1,5 +1,5 @@ -from mmcls.models import ResNet -from mmcls.registry import MODELS +from mmpretrain.models import ResNet +from mmpretrain.registry import MODELS # Register your model to the `MODELS`. @@ -12,7 +12,7 @@ class ExampleNet(ResNet): def __init__(self, **kwargs) -> None: print('#############################\n' - '# Hello MMClassification! #\n' + '# Hello MMPretrain! #\n' '#############################') super().__init__(**kwargs) diff --git a/requirements/runtime.txt b/requirements/runtime.txt index e0acc881..ae30211a 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -1,5 +1,7 @@ +attrs +einops +importlib-metadata;python_version<'3.8' matplotlib modelindex numpy -packaging rich diff --git a/setup.cfg b/setup.cfg index 8c24f5ff..13c91624 100644 --- a/setup.cfg +++ b/setup.cfg @@ -13,7 +13,7 @@ split_before_expression_after_opening_paren = true line_length = 79 multi_line_output = 0 extra_standard_library = pkg_resources,setuptools -known_first_party = mmcls +known_first_party = mmpretrain no_lines_before = STDLIB,LOCALFOLDER default_section = THIRDPARTY diff --git a/setup.py b/setup.py index 28f57577..6d34bb39 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ def readme(): def get_version(): - version_file = 'mmcls/version.py' + version_file = 'mmpretrain/version.py' with open(version_file, 'r', encoding='utf-8') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] @@ -119,7 +119,7 @@ def add_mim_extension(): filenames = ['tools', 'configs', 'model-index.yml'] repo_path = osp.dirname(__file__) - mim_path = osp.join(repo_path, 'mmcls', '.mim') + mim_path = osp.join(repo_path, 'mmpretrain', '.mim') os.makedirs(mim_path, exist_ok=True) for filename in filenames: @@ -161,12 +161,13 @@ def add_mim_extension(): if __name__ == '__main__': add_mim_extension() setup( - name='mmcls', + name='mmpretrain', version=get_version(), - description='OpenMMLab Image Classification Toolbox and Benchmark', + description='OpenMMLab Model Pretraining Toolbox and Benchmark', long_description=readme(), long_description_content_type='text/markdown', - keywords='computer vision, image classification', + keywords='computer vision, image classification, ' + 'unsupervised learning, self-supervised learning', packages=find_packages(exclude=('configs', 'tools', 'demo')), include_package_data=True, python_requires='>=3.7', @@ -178,10 +179,12 @@ if __name__ == '__main__': 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], - url='https://github.com/open-mmlab/mmclassification', - author='MMClassification Contributors', + url='https://github.com/open-mmlab/mmpretrain', + author='MMPretrain Contributors', author_email='openmmlab@gmail.com', license='Apache License 2.0', install_requires=parse_requirements('requirements/runtime.txt'), diff --git a/tests/test_apis/test_inference.py b/tests/test_apis/test_inference.py index b41bb5a1..40340b06 100644 --- a/tests/test_apis/test_inference.py +++ b/tests/test_apis/test_inference.py @@ -6,11 +6,11 @@ from unittest.mock import ANY, MagicMock, patch from mmcv.image import imread -from mmcls.apis import (ImageClassificationInferencer, ModelHub, get_model, - inference_model) -from mmcls.models import MobileNetV3 -from mmcls.structures import ClsDataSample -from mmcls.visualization import ClsVisualizer +from mmpretrain.apis import (ImageClassificationInferencer, ModelHub, + get_model, inference_model) +from mmpretrain.models import MobileNetV3 +from mmpretrain.structures import ClsDataSample +from mmpretrain.visualization import ClsVisualizer MODEL = 'mobilenet-v3-small-050_3rdparty_in1k' WEIGHT = 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-small-050_3rdparty_in1k_20221114-e0b86be1.pth' # noqa: E501 diff --git a/tests/test_apis/test_model.py b/tests/test_apis/test_model.py index f44b7733..fb763285 100644 --- a/tests/test_apis/test_model.py +++ b/tests/test_apis/test_model.py @@ -5,13 +5,13 @@ from unittest.mock import patch from mmengine import Config -from mmcls.apis import ModelHub, get_model, init_model, list_models -from mmcls.models import ImageClassifier, MobileNetV2 +from mmpretrain.apis import ModelHub, get_model, init_model, list_models +from mmpretrain.models import ImageClassifier, MobileNetV2 class TestModelHub(TestCase): - def test_mmcls_models(self): + def test_mmpretrain_models(self): self.assertIn('resnet18_8xb32_in1k', ModelHub._models_dict) def test_register_model_index(self): @@ -57,7 +57,7 @@ class TestHubAPIs(TestCase): self.assertIsInstance(model, ImageClassifier) self.assertIsInstance(model.backbone, MobileNetV2) - with patch('mmcls.apis.model.init_model') as mock: + with patch('mmpretrain.apis.model.init_model') as mock: model = get_model('mobilenet-v2_8xb32_in1k', pretrained=True) model = get_model('mobilenet-v2_8xb32_in1k', pretrained='test.pth') diff --git a/tests/test_datasets/test_dataset_utils.py b/tests/test_datasets/test_dataset_utils.py index 7208ba27..6e924248 100644 --- a/tests/test_datasets/test_dataset_utils.py +++ b/tests/test_datasets/test_dataset_utils.py @@ -6,8 +6,8 @@ from unittest.mock import patch import pytest -from mmcls.datasets.utils import (check_integrity, open_maybe_compressed_file, - rm_suffix) +from mmpretrain.datasets.utils import (check_integrity, + open_maybe_compressed_file, rm_suffix) def test_dataset_utils(): diff --git a/tests/test_datasets/test_datasets.py b/tests/test_datasets/test_datasets.py index 8341451d..488750ef 100644 --- a/tests/test_datasets/test_datasets.py +++ b/tests/test_datasets/test_datasets.py @@ -10,7 +10,7 @@ from unittest.mock import MagicMock, call, patch import numpy as np from mmengine.logging import MMLogger -from mmcls.registry import DATASETS, TRANSFORMS +from mmpretrain.registry import DATASETS, TRANSFORMS ASSETS_ROOT = osp.abspath(osp.join(osp.dirname(__file__), '../data/dataset')) @@ -411,8 +411,8 @@ class TestCIFAR10(TestBaseDataset): dataset.full_init() # Test automatically download - with patch( - 'mmcls.datasets.cifar.download_and_extract_archive') as mock: + with patch('mmpretrain.datasets.cifar.download_and_extract_archive' + ) as mock: cfg = {**self.DEFAULT_ARGS, 'lazy_init': True, 'test_mode': True} dataset = dataset_class(**cfg) dataset.test_list = [['invalid_batch', None]] @@ -720,8 +720,8 @@ class TestMNIST(TestBaseDataset): np.testing.assert_equal(data_info['gt_label'], self.fake_label) # Test automatically download - with patch( - 'mmcls.datasets.mnist.download_and_extract_archive') as mock: + with patch('mmpretrain.datasets.mnist.download_and_extract_archive' + ) as mock: cfg = {**self.DEFAULT_ARGS, 'lazy_init': True, 'test_mode': True} dataset = dataset_class(**cfg) dataset.train_list = [['invalid_train_file', None]] diff --git a/tests/test_datasets/test_samplers/test_repeat_aug.py b/tests/test_datasets/test_samplers/test_repeat_aug.py index 1fce3510..01926e93 100644 --- a/tests/test_datasets/test_samplers/test_repeat_aug.py +++ b/tests/test_datasets/test_samplers/test_repeat_aug.py @@ -7,9 +7,9 @@ from unittest.mock import patch import torch from mmengine.logging import MMLogger -from mmcls.datasets import RepeatAugSampler +from mmpretrain.datasets import RepeatAugSampler -file = 'mmcls.datasets.samplers.repeat_aug.' +file = 'mmpretrain.datasets.samplers.repeat_aug.' class MockDist: diff --git a/tests/test_datasets/test_transforms/test_auto_augment.py b/tests/test_datasets/test_transforms/test_auto_augment.py index 58ef50b5..1720b728 100644 --- a/tests/test_datasets/test_transforms/test_auto_augment.py +++ b/tests/test_datasets/test_transforms/test_auto_augment.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, patch import numpy as np -from mmcls.registry import TRANSFORMS +from mmpretrain.registry import TRANSFORMS def construct_toy_data(): diff --git a/tests/test_datasets/test_transforms/test_formatting.py b/tests/test_datasets/test_transforms/test_formatting.py index 37cb5485..19781160 100644 --- a/tests/test_datasets/test_transforms/test_formatting.py +++ b/tests/test_datasets/test_transforms/test_formatting.py @@ -9,8 +9,8 @@ import torch from mmengine.structures import LabelData from PIL import Image -from mmcls.registry import TRANSFORMS -from mmcls.structures import ClsDataSample, MultiTaskDataSample +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import ClsDataSample, MultiTaskDataSample class TestPackClsInputs(unittest.TestCase): diff --git a/tests/test_datasets/test_transforms/test_processing.py b/tests/test_datasets/test_transforms/test_processing.py index 12bf8c3b..878ff5aa 100644 --- a/tests/test_datasets/test_transforms/test_processing.py +++ b/tests/test_datasets/test_transforms/test_processing.py @@ -8,7 +8,7 @@ import albumentations import mmengine import numpy as np -from mmcls.registry import TRANSFORMS +from mmpretrain.registry import TRANSFORMS def construct_toy_data(): @@ -560,7 +560,7 @@ class TestColorJitter(TestCase): cfg = {**self.DEFAULT_ARGS, 'contrast': 0.} transform = TRANSFORMS.build(cfg) with patch('numpy.random', np.random.RandomState(0)): - mmcv_module = 'mmcls.datasets.transforms.processing.mmcv' + mmcv_module = 'mmpretrain.datasets.transforms.processing.mmcv' call_list = [ call.adjust_color(ANY, alpha=ANY), call.adjust_hue(ANY, ANY), @@ -753,8 +753,8 @@ class TestAlbumentations(TestCase): p=0.1), ] cfg['transforms'] = nested_transform_cfg - mmcls_module = TRANSFORMS.build(cfg) - mmcls_module(results) + mmpretrain_module = TRANSFORMS.build(cfg) + mmpretrain_module(results) # test to be same with albumentations 3rd package np.random.seed(0) @@ -777,9 +777,10 @@ class TestAlbumentations(TestCase): dict(type='HorizontalFlip', p=0.5), dict(type='RandomBrightnessContrast', p=0.2) ] - mmcls_module = TRANSFORMS.build(cfg) - transformed_image_mmcls = mmcls_module(results)['img'] - assert np.equal(transformed_image_3rd, transformed_image_mmcls).all() + mmpretrain_module = TRANSFORMS.build(cfg) + transformed_image_mmpretrain = mmpretrain_module(results)['img'] + assert np.equal(transformed_image_3rd, + transformed_image_mmpretrain).all() # Test class obj case results = dict(img=np.random.randint(0, 256, (200, 300, 3), np.uint8)) diff --git a/tests/test_engine/test_hooks/test_arcface_hooks.py b/tests/test_engine/test_hooks/test_arcface_hooks.py index 041d36a4..6f2831f5 100644 --- a/tests/test_engine/test_hooks/test_arcface_hooks.py +++ b/tests/test_engine/test_hooks/test_arcface_hooks.py @@ -60,7 +60,7 @@ class TestSetAdaptiveMarginsHook(TestCase): optimizer=dict(type='SGD', lr=0.1, momentum=0.9)), param_scheduler=dict( type='MultiStepLR', milestones=[1, 2], gamma=0.1), - default_scope='mmcls', + default_scope='mmpretrain', default_hooks=default_hooks, experiment_name='test_construct_with_arcface', custom_hooks=[self.DEFAULT_HOOK_CFG]) @@ -94,7 +94,7 @@ class TestSetAdaptiveMarginsHook(TestCase): optimizer=dict(type='SGD', lr=0.1, momentum=0.9)), param_scheduler=dict( type='MultiStepLR', milestones=[1, 2], gamma=0.1), - default_scope='mmcls', + default_scope='mmpretrain', default_hooks=default_hooks, experiment_name='test_construct_wo_arcface', custom_hooks=[self.DEFAULT_HOOK_CFG]) diff --git a/tests/test_engine/test_hooks/test_class_num_check_hook.py b/tests/test_engine/test_hooks/test_class_num_check_hook.py index ac26e773..5663c603 100644 --- a/tests/test_engine/test_hooks/test_class_num_check_hook.py +++ b/tests/test_engine/test_hooks/test_class_num_check_hook.py @@ -2,7 +2,7 @@ from unittest import TestCase from unittest.mock import MagicMock, patch -from mmcls.engine import ClassNumCheckHook +from mmpretrain.engine import ClassNumCheckHook class TestClassNumCheckHook(TestCase): diff --git a/tests/test_engine/test_hooks/test_ema_hook.py b/tests/test_engine/test_hooks/test_ema_hook.py index e0b54dee..c7eead14 100644 --- a/tests/test_engine/test_hooks/test_ema_hook.py +++ b/tests/test_engine/test_hooks/test_ema_hook.py @@ -16,7 +16,7 @@ from mmengine.runner import Runner from mmengine.testing import assert_allclose from torch.utils.data import Dataset -from mmcls.engine import EMAHook +from mmpretrain.engine import EMAHook class SimpleModel(BaseModel): @@ -88,7 +88,7 @@ class TestEMAHook(TestCase): load_from=self.ckpt, default_hooks=dict(logger=None), custom_hooks=[ema_hook], - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='load_state_dict') runner.train() assert_allclose(runner.model.para, torch.tensor([1.], device=device)) @@ -113,7 +113,7 @@ class TestEMAHook(TestCase): load_from=self.ckpt, default_hooks=dict(logger=None), custom_hooks=[dict(type='EMAHook')], - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='validate_on_ema') runner.val() evaluator.metrics[0].process.assert_has_calls([ @@ -138,7 +138,7 @@ class TestEMAHook(TestCase): load_from=self.ckpt, default_hooks=dict(logger=None), custom_hooks=[dict(type='EMAHook')], - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='test_on_ema') runner.test() evaluator.metrics[0].process.assert_has_calls([ @@ -163,7 +163,7 @@ class TestEMAHook(TestCase): load_from=self.ckpt, default_hooks=dict(logger=None), custom_hooks=[dict(type='EMAHook', evaluate_on_origin=True)], - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='validate_on_ema_false', ) runner.val() @@ -187,7 +187,7 @@ class TestEMAHook(TestCase): load_from=self.ckpt, default_hooks=dict(logger=None), custom_hooks=[dict(type='EMAHook', evaluate_on_origin=True)], - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='test_on_ema_false', ) runner.test() @@ -212,7 +212,7 @@ class TestEMAHook(TestCase): load_from=self.ckpt, default_hooks=dict(logger=None), custom_hooks=[dict(type='EMAHook', evaluate_on_ema=False)], - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='not_test_on_ema') runner.test() evaluator.metrics[0].process.assert_has_calls([ diff --git a/tests/test_engine/test_hooks/test_precise_bn_hook.py b/tests/test_engine/test_hooks/test_precise_bn_hook.py index b3b11d64..f8a202fe 100644 --- a/tests/test_engine/test_hooks/test_precise_bn_hook.py +++ b/tests/test_engine/test_hooks/test_precise_bn_hook.py @@ -13,8 +13,8 @@ from mmengine.model import BaseDataPreprocessor, BaseModel from mmengine.runner import Runner from torch.utils.data import DataLoader, Dataset -from mmcls.registry import HOOKS -from mmcls.structures import ClsDataSample +from mmpretrain.registry import HOOKS +from mmpretrain.structures import ClsDataSample class ExampleDataset(Dataset): @@ -129,7 +129,7 @@ class TestPreciseBNHookHook(TestCase): log_level='WARNING', optim_wrapper=self.optim_wrapper, param_scheduler=self.epoch_param_scheduler, - default_scope='mmcls', + default_scope='mmpretrain', default_hooks=self.default_hooks, experiment_name='test_construct', custom_hooks=None) @@ -164,7 +164,7 @@ class TestPreciseBNHookHook(TestCase): log_level='WARNING', optim_wrapper=self.optim_wrapper, param_scheduler=self.epoch_param_scheduler, - default_scope='mmcls', + default_scope='mmpretrain', default_hooks=self.default_hooks, experiment_name='test_after_train_epoch_multi_machines', custom_hooks=[self.preciseBN_cfg]) @@ -180,7 +180,7 @@ class TestPreciseBNHookHook(TestCase): log_level='WARNING', optim_wrapper=self.optim_wrapper, param_scheduler=self.epoch_param_scheduler, - default_scope='mmcls', + default_scope='mmpretrain', default_hooks=self.default_hooks, experiment_name='test_after_train_epoch', custom_hooks=[self.preciseBN_cfg]) @@ -217,7 +217,7 @@ class TestPreciseBNHookHook(TestCase): log_level='WARNING', optim_wrapper=self.optim_wrapper, param_scheduler=self.iter_param_scheduler, - default_scope='mmcls', + default_scope='mmpretrain', default_hooks=self.default_hooks, experiment_name='test_after_train_iter', custom_hooks=[self.preciseBN_cfg]) diff --git a/tests/test_engine/test_hooks/test_retrievers_hooks.py b/tests/test_engine/test_hooks/test_retrievers_hooks.py index 055803f1..c14e70ca 100644 --- a/tests/test_engine/test_hooks/test_retrievers_hooks.py +++ b/tests/test_engine/test_hooks/test_retrievers_hooks.py @@ -4,8 +4,8 @@ from unittest.mock import MagicMock import torch -from mmcls.engine import PrepareProtoBeforeValLoopHook -from mmcls.models.retrievers import BaseRetriever +from mmpretrain.engine import PrepareProtoBeforeValLoopHook +from mmpretrain.models.retrievers import BaseRetriever class ToyRetriever(BaseRetriever): diff --git a/tests/test_engine/test_hooks/test_switch_recipe_hook.py b/tests/test_engine/test_hooks/test_switch_recipe_hook.py index d4ab4141..e1b4ebff 100644 --- a/tests/test_engine/test_hooks/test_switch_recipe_hook.py +++ b/tests/test_engine/test_hooks/test_switch_recipe_hook.py @@ -15,11 +15,11 @@ from mmengine.model import BaseDataPreprocessor, BaseModel from mmengine.optim import OptimWrapper from mmengine.runner import Runner -from mmcls.engine import SwitchRecipeHook -from mmcls.models import CrossEntropyLoss -from mmcls.models.heads.cls_head import ClsHead -from mmcls.models.losses import LabelSmoothLoss -from mmcls.models.utils.batch_augments import RandomBatchAugment +from mmpretrain.engine import SwitchRecipeHook +from mmpretrain.models import CrossEntropyLoss +from mmpretrain.models.heads.cls_head import ClsHead +from mmpretrain.models.losses import LabelSmoothLoss +from mmpretrain.models.utils.batch_augments import RandomBatchAugment class SimpleDataPreprocessor(BaseDataPreprocessor): @@ -162,7 +162,7 @@ class TestSwitchRecipeHook(TestCase): work_dir=self.tmpdir.name, default_hooks=dict(logger=None), custom_hooks=[switch_hook], - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='test_switch') runner.train() self.assertEqual(switch_hook.schedule[2]['batch_augments'].call_count, @@ -197,7 +197,7 @@ class TestSwitchRecipeHook(TestCase): # work_dir=self.tmpdir.name, # default_hooks=dict(logger=None), # custom_hooks=[switch_hook], - # default_scope='mmcls', + # default_scope='mmpretrain', # experiment_name='test_switch_multi_workers') # with self.assertRaisesRegex(AssertionError, 'No `input` in data.'): # # If the pipeline switch works, the data_preprocessor cannot @@ -239,7 +239,7 @@ class TestSwitchRecipeHook(TestCase): work_dir=self.tmpdir.name, default_hooks=dict(logger=None), custom_hooks=[switch_hook], - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='test_resume1') runner.train() @@ -260,7 +260,7 @@ class TestSwitchRecipeHook(TestCase): work_dir=self.tmpdir.name, default_hooks=dict(logger=None), custom_hooks=[switch_hook], - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='test_resume2') with self.assertLogs(runner.logger, 'INFO') as logs: @@ -293,7 +293,7 @@ class TestSwitchRecipeHook(TestCase): train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=10), work_dir=self.tmpdir.name, default_hooks=dict(logger=None), - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='test_concat_dataset') pipeline = MagicMock() SwitchRecipeHook._switch_train_pipeline(runner, pipeline) @@ -316,7 +316,7 @@ class TestSwitchRecipeHook(TestCase): train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=10), work_dir=self.tmpdir.name, default_hooks=dict(logger=None), - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='test_repeat_dataset') pipeline = MagicMock() SwitchRecipeHook._switch_train_pipeline(runner, pipeline) @@ -341,7 +341,7 @@ class TestSwitchRecipeHook(TestCase): train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=10), work_dir=self.tmpdir.name, default_hooks=dict(logger=None), - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='test_model_loss') loss = CrossEntropyLoss(use_soft=True) SwitchRecipeHook._switch_loss(runner, loss) @@ -363,7 +363,7 @@ class TestSwitchRecipeHook(TestCase): train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=10), work_dir=self.tmpdir.name, default_hooks=dict(logger=None), - default_scope='mmcls', + default_scope='mmpretrain', experiment_name='test_head_loss') loss = CrossEntropyLoss(use_soft=True) SwitchRecipeHook._switch_loss(runner, loss) diff --git a/tests/test_engine/test_hooks/test_visualization_hook.py b/tests/test_engine/test_hooks/test_visualization_hook.py index 31d3ae6a..f9fbdfd1 100644 --- a/tests/test_engine/test_hooks/test_visualization_hook.py +++ b/tests/test_engine/test_hooks/test_visualization_hook.py @@ -7,10 +7,10 @@ from unittest.mock import ANY, MagicMock, patch import torch from mmengine.runner import EpochBasedTrainLoop, IterBasedTrainLoop -from mmcls.engine import VisualizationHook -from mmcls.registry import HOOKS -from mmcls.structures import ClsDataSample -from mmcls.visualization import ClsVisualizer +from mmpretrain.engine import VisualizationHook +from mmpretrain.registry import HOOKS +from mmpretrain.structures import ClsDataSample +from mmpretrain.visualization import ClsVisualizer class TestVisualizationHook(TestCase): diff --git a/tests/test_evaluation/test_metrics/test_metric_utils.py b/tests/test_evaluation/test_metrics/test_metric_utils.py index bc221d7b..3102ac54 100644 --- a/tests/test_evaluation/test_metrics/test_metric_utils.py +++ b/tests/test_evaluation/test_metrics/test_metric_utils.py @@ -2,7 +2,7 @@ import pytest import torch -from mmcls.models.losses.utils import convert_to_one_hot +from mmpretrain.models.losses.utils import convert_to_one_hot def ori_convert_to_one_hot(targets: torch.Tensor, classes) -> torch.Tensor: diff --git a/tests/test_evaluation/test_metrics/test_multi_label.py b/tests/test_evaluation/test_metrics/test_multi_label.py index c0407394..64016323 100644 --- a/tests/test_evaluation/test_metrics/test_multi_label.py +++ b/tests/test_evaluation/test_metrics/test_multi_label.py @@ -5,9 +5,12 @@ import numpy as np import sklearn.metrics import torch from mmengine.evaluator import Evaluator +from mmengine.registry import init_default_scope -from mmcls.evaluation.metrics import AveragePrecision, MultiLabelMetric -from mmcls.structures import ClsDataSample +from mmpretrain.evaluation.metrics import AveragePrecision, MultiLabelMetric +from mmpretrain.structures import ClsDataSample + +init_default_scope('mmpretrain') class TestMultiLabel(TestCase): diff --git a/tests/test_evaluation/test_metrics/test_multi_task_metrics.py b/tests/test_evaluation/test_metrics/test_multi_task_metrics.py index 29e4d96d..b28605e3 100644 --- a/tests/test_evaluation/test_metrics/test_multi_task_metrics.py +++ b/tests/test_evaluation/test_metrics/test_multi_task_metrics.py @@ -3,8 +3,8 @@ from unittest import TestCase import torch -from mmcls.evaluation.metrics import MultiTasksMetric -from mmcls.structures import ClsDataSample +from mmpretrain.evaluation.metrics import MultiTasksMetric +from mmpretrain.structures import ClsDataSample class MultiTaskMetric(TestCase): diff --git a/tests/test_evaluation/test_metrics/test_retrieval.py b/tests/test_evaluation/test_metrics/test_retrieval.py index 78d5bf18..58d68852 100644 --- a/tests/test_evaluation/test_metrics/test_retrieval.py +++ b/tests/test_evaluation/test_metrics/test_retrieval.py @@ -4,9 +4,9 @@ from unittest import TestCase import numpy as np import torch -from mmcls.evaluation.metrics import RetrievalRecall -from mmcls.registry import METRICS -from mmcls.structures import ClsDataSample +from mmpretrain.evaluation.metrics import RetrievalRecall +from mmpretrain.registry import METRICS +from mmpretrain.structures import ClsDataSample class TestRetrievalRecall(TestCase): diff --git a/tests/test_evaluation/test_metrics/test_single_label.py b/tests/test_evaluation/test_metrics/test_single_label.py index 3b443fcc..08958580 100644 --- a/tests/test_evaluation/test_metrics/test_single_label.py +++ b/tests/test_evaluation/test_metrics/test_single_label.py @@ -5,10 +5,10 @@ from unittest import TestCase import numpy as np import torch -from mmcls.evaluation.metrics import (Accuracy, ConfusionMatrix, - SingleLabelMetric) -from mmcls.registry import METRICS -from mmcls.structures import ClsDataSample +from mmpretrain.evaluation.metrics import (Accuracy, ConfusionMatrix, + SingleLabelMetric) +from mmpretrain.registry import METRICS +from mmpretrain.structures import ClsDataSample class TestAccuracy(TestCase): diff --git a/tests/test_evaluation/test_metrics/test_voc_metrics.py b/tests/test_evaluation/test_metrics/test_voc_metrics.py index 74b1f764..1ef2e9b7 100644 --- a/tests/test_evaluation/test_metrics/test_voc_metrics.py +++ b/tests/test_evaluation/test_metrics/test_voc_metrics.py @@ -5,8 +5,11 @@ import numpy as np import sklearn.metrics import torch from mmengine.evaluator import Evaluator +from mmengine.registry import init_default_scope -from mmcls.structures import ClsDataSample +from mmpretrain.structures import ClsDataSample + +init_default_scope('mmpretrain') class TestVOCMultiLabel(TestCase): diff --git a/tests/test_models/test_backbones/test_beit.py b/tests/test_models/test_backbones/test_beit.py index cb9512b6..b10adb9e 100644 --- a/tests/test_models/test_backbones/test_beit.py +++ b/tests/test_models/test_backbones/test_beit.py @@ -4,7 +4,7 @@ from unittest import TestCase import torch -from mmcls.models.backbones import BEiT +from mmpretrain.models.backbones import BEiT class TestBEiT(TestCase): diff --git a/tests/test_models/test_backbones/test_conformer.py b/tests/test_models/test_backbones/test_conformer.py index 0b1958c5..d28ad5ac 100644 --- a/tests/test_models/test_backbones/test_conformer.py +++ b/tests/test_models/test_backbones/test_conformer.py @@ -6,7 +6,7 @@ import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import Conformer +from mmpretrain.models.backbones import Conformer def is_norm(modules): diff --git a/tests/test_models/test_backbones/test_convmixer.py b/tests/test_models/test_backbones/test_convmixer.py index 26296615..abe6c138 100644 --- a/tests/test_models/test_backbones/test_convmixer.py +++ b/tests/test_models/test_backbones/test_convmixer.py @@ -2,7 +2,7 @@ import pytest import torch -from mmcls.models.backbones import ConvMixer +from mmpretrain.models.backbones import ConvMixer def test_assertion(): diff --git a/tests/test_models/test_backbones/test_convnext.py b/tests/test_models/test_backbones/test_convnext.py index d92478ba..5f63795c 100644 --- a/tests/test_models/test_backbones/test_convnext.py +++ b/tests/test_models/test_backbones/test_convnext.py @@ -2,7 +2,7 @@ import pytest import torch -from mmcls.models.backbones import ConvNeXt +from mmpretrain.models.backbones import ConvNeXt def test_assertion(): diff --git a/tests/test_models/test_backbones/test_cspnet.py b/tests/test_models/test_backbones/test_cspnet.py index 2caefff4..656e9d00 100644 --- a/tests/test_models/test_backbones/test_cspnet.py +++ b/tests/test_models/test_backbones/test_cspnet.py @@ -7,9 +7,10 @@ import torch from mmcv.cnn import ConvModule from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.models.backbones import CSPDarkNet, CSPResNet, CSPResNeXt -from mmcls.models.backbones.cspnet import (CSPNet, DarknetBottleneck, - ResNetBottleneck, ResNeXtBottleneck) +from mmpretrain.models.backbones import CSPDarkNet, CSPResNet, CSPResNeXt +from mmpretrain.models.backbones.cspnet import (CSPNet, DarknetBottleneck, + ResNetBottleneck, + ResNeXtBottleneck) class TestCSPNet(TestCase): diff --git a/tests/test_models/test_backbones/test_davit.py b/tests/test_models/test_backbones/test_davit.py index d417341c..726db741 100644 --- a/tests/test_models/test_backbones/test_davit.py +++ b/tests/test_models/test_backbones/test_davit.py @@ -4,8 +4,8 @@ from unittest import TestCase import torch -from mmcls.models.backbones import DaViT -from mmcls.models.backbones.davit import SpatialBlock +from mmpretrain.models.backbones import DaViT +from mmpretrain.models.backbones.davit import SpatialBlock class TestDaViT(TestCase): diff --git a/tests/test_models/test_backbones/test_deit.py b/tests/test_models/test_backbones/test_deit.py index 9209f6c8..21914651 100644 --- a/tests/test_models/test_backbones/test_deit.py +++ b/tests/test_models/test_backbones/test_deit.py @@ -8,7 +8,7 @@ from unittest import TestCase import torch from mmengine.runner import load_checkpoint, save_checkpoint -from mmcls.models.backbones import DistilledVisionTransformer +from mmpretrain.models.backbones import DistilledVisionTransformer from .utils import timm_resize_pos_embed diff --git a/tests/test_models/test_backbones/test_deit3.py b/tests/test_models/test_backbones/test_deit3.py index 7d7d485e..7e7aa485 100644 --- a/tests/test_models/test_backbones/test_deit3.py +++ b/tests/test_models/test_backbones/test_deit3.py @@ -8,7 +8,7 @@ from unittest import TestCase import torch from mmengine.runner import load_checkpoint, save_checkpoint -from mmcls.models.backbones import DeiT3 +from mmpretrain.models.backbones import DeiT3 class TestDeiT3(TestCase): diff --git a/tests/test_models/test_backbones/test_densenet.py b/tests/test_models/test_backbones/test_densenet.py index 5e4c73bc..6b02bd1e 100644 --- a/tests/test_models/test_backbones/test_densenet.py +++ b/tests/test_models/test_backbones/test_densenet.py @@ -2,7 +2,7 @@ import pytest import torch -from mmcls.models.backbones import DenseNet +from mmpretrain.models.backbones import DenseNet def test_assertion(): diff --git a/tests/test_models/test_backbones/test_edgenext.py b/tests/test_models/test_backbones/test_edgenext.py index 4b39e3d8..93b48a40 100644 --- a/tests/test_models/test_backbones/test_edgenext.py +++ b/tests/test_models/test_backbones/test_edgenext.py @@ -2,7 +2,7 @@ import pytest import torch -from mmcls.models.backbones import EdgeNeXt +from mmpretrain.models.backbones import EdgeNeXt def test_assertion(): diff --git a/tests/test_models/test_backbones/test_efficientformer.py b/tests/test_models/test_backbones/test_efficientformer.py index 88aad529..36876dcb 100644 --- a/tests/test_models/test_backbones/test_efficientformer.py +++ b/tests/test_models/test_backbones/test_efficientformer.py @@ -6,10 +6,10 @@ import torch from mmcv.cnn import ConvModule from torch import nn -from mmcls.models.backbones import EfficientFormer -from mmcls.models.backbones.efficientformer import (AttentionWithBias, Flat, - Meta3D, Meta4D) -from mmcls.models.backbones.poolformer import Pooling +from mmpretrain.models.backbones import EfficientFormer +from mmpretrain.models.backbones.efficientformer import (AttentionWithBias, + Flat, Meta3D, Meta4D) +from mmpretrain.models.backbones.poolformer import Pooling class TestEfficientFormer(TestCase): diff --git a/tests/test_models/test_backbones/test_efficientnet.py b/tests/test_models/test_backbones/test_efficientnet.py index d424b230..37551ffd 100644 --- a/tests/test_models/test_backbones/test_efficientnet.py +++ b/tests/test_models/test_backbones/test_efficientnet.py @@ -4,7 +4,7 @@ import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import EfficientNet +from mmpretrain.models.backbones import EfficientNet def is_norm(modules): diff --git a/tests/test_models/test_backbones/test_efficientnet_v2.py b/tests/test_models/test_backbones/test_efficientnet_v2.py index 240688b0..ca5c9b07 100644 --- a/tests/test_models/test_backbones/test_efficientnet_v2.py +++ b/tests/test_models/test_backbones/test_efficientnet_v2.py @@ -4,7 +4,7 @@ import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import EfficientNetV2 +from mmpretrain.models.backbones import EfficientNetV2 def is_norm(modules): diff --git a/tests/test_models/test_backbones/test_hornet.py b/tests/test_models/test_backbones/test_hornet.py index c676e810..8031d1b3 100644 --- a/tests/test_models/test_backbones/test_hornet.py +++ b/tests/test_models/test_backbones/test_hornet.py @@ -10,7 +10,7 @@ from mmengine.utils import digit_version from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm from torch import nn -from mmcls.models.backbones import HorNet +from mmpretrain.models.backbones import HorNet def check_norm_state(modules, train_state): diff --git a/tests/test_models/test_backbones/test_hrnet.py b/tests/test_models/test_backbones/test_hrnet.py index cb9909a8..96fec469 100644 --- a/tests/test_models/test_backbones/test_hrnet.py +++ b/tests/test_models/test_backbones/test_hrnet.py @@ -4,7 +4,7 @@ import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import HRNet +from mmpretrain.models.backbones import HRNet def is_norm(modules): diff --git a/tests/test_models/test_backbones/test_inception_v3.py b/tests/test_models/test_backbones/test_inception_v3.py index 2d8de45a..4450dd27 100644 --- a/tests/test_models/test_backbones/test_inception_v3.py +++ b/tests/test_models/test_backbones/test_inception_v3.py @@ -4,8 +4,8 @@ from unittest import TestCase import torch -from mmcls.models import InceptionV3 -from mmcls.models.backbones.inception_v3 import InceptionAux +from mmpretrain.models import InceptionV3 +from mmpretrain.models.backbones.inception_v3 import InceptionAux class TestInceptionV3(TestCase): diff --git a/tests/test_models/test_backbones/test_levit.py b/tests/test_models/test_backbones/test_levit.py index 05b7944d..af274f1c 100644 --- a/tests/test_models/test_backbones/test_levit.py +++ b/tests/test_models/test_backbones/test_levit.py @@ -9,8 +9,9 @@ from torch import nn from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import levit -from mmcls.models.backbones.levit import Attention, AttentionSubsample, LeViT +from mmpretrain.models.backbones import levit +from mmpretrain.models.backbones.levit import (Attention, AttentionSubsample, + LeViT) def check_norm_state(modules, train_state): diff --git a/tests/test_models/test_backbones/test_mixmim.py b/tests/test_models/test_backbones/test_mixmim.py index e21d143c..8d349639 100644 --- a/tests/test_models/test_backbones/test_mixmim.py +++ b/tests/test_models/test_backbones/test_mixmim.py @@ -4,7 +4,7 @@ from unittest import TestCase import torch -from mmcls.models.backbones import MixMIMTransformer +from mmpretrain.models.backbones import MixMIMTransformer class TestMixMIM(TestCase): diff --git a/tests/test_models/test_backbones/test_mlp_mixer.py b/tests/test_models/test_backbones/test_mlp_mixer.py index 5606c855..8a4f176c 100644 --- a/tests/test_models/test_backbones/test_mlp_mixer.py +++ b/tests/test_models/test_backbones/test_mlp_mixer.py @@ -6,7 +6,7 @@ import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import MlpMixer +from mmpretrain.models.backbones import MlpMixer def is_norm(modules): diff --git a/tests/test_models/test_backbones/test_mobilenet_v2.py b/tests/test_models/test_backbones/test_mobilenet_v2.py index 9ea75570..ffe43ff8 100644 --- a/tests/test_models/test_backbones/test_mobilenet_v2.py +++ b/tests/test_models/test_backbones/test_mobilenet_v2.py @@ -4,8 +4,8 @@ import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import MobileNetV2 -from mmcls.models.backbones.mobilenet_v2 import InvertedResidual +from mmpretrain.models.backbones import MobileNetV2 +from mmpretrain.models.backbones.mobilenet_v2 import InvertedResidual def is_block(modules): diff --git a/tests/test_models/test_backbones/test_mobilenet_v3.py b/tests/test_models/test_backbones/test_mobilenet_v3.py index b122dbd7..560b948c 100644 --- a/tests/test_models/test_backbones/test_mobilenet_v3.py +++ b/tests/test_models/test_backbones/test_mobilenet_v3.py @@ -4,8 +4,8 @@ import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import MobileNetV3 -from mmcls.models.utils import InvertedResidual +from mmpretrain.models.backbones import MobileNetV3 +from mmpretrain.models.utils import InvertedResidual def is_norm(modules): diff --git a/tests/test_models/test_backbones/test_mobileone.py b/tests/test_models/test_backbones/test_mobileone.py index 32e6751f..93a13f15 100644 --- a/tests/test_models/test_backbones/test_mobileone.py +++ b/tests/test_models/test_backbones/test_mobileone.py @@ -9,9 +9,9 @@ from torch import nn from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import MobileOne -from mmcls.models.backbones.mobileone import MobileOneBlock -from mmcls.models.utils import SELayer +from mmpretrain.models.backbones import MobileOne +from mmpretrain.models.backbones.mobileone import MobileOneBlock +from mmpretrain.models.utils import SELayer def check_norm_state(modules, train_state): diff --git a/tests/test_models/test_backbones/test_mobilevit.py b/tests/test_models/test_backbones/test_mobilevit.py index f9ff4e54..2b7d8d9a 100644 --- a/tests/test_models/test_backbones/test_mobilevit.py +++ b/tests/test_models/test_backbones/test_mobilevit.py @@ -2,7 +2,7 @@ import pytest import torch -from mmcls.models.backbones import MobileViT +from mmpretrain.models.backbones import MobileViT def test_assertion(): diff --git a/tests/test_models/test_backbones/test_mvit.py b/tests/test_models/test_backbones/test_mvit.py index 7757ab81..0a5e1269 100644 --- a/tests/test_models/test_backbones/test_mvit.py +++ b/tests/test_models/test_backbones/test_mvit.py @@ -5,7 +5,7 @@ from unittest import TestCase import torch -from mmcls.models import MViT +from mmpretrain.models import MViT class TestMViT(TestCase): diff --git a/tests/test_models/test_backbones/test_poolformer.py b/tests/test_models/test_backbones/test_poolformer.py index 8e60b81f..f61b3040 100644 --- a/tests/test_models/test_backbones/test_poolformer.py +++ b/tests/test_models/test_backbones/test_poolformer.py @@ -4,8 +4,8 @@ from unittest import TestCase import torch -from mmcls.models.backbones import PoolFormer -from mmcls.models.backbones.poolformer import PoolFormerBlock +from mmpretrain.models.backbones import PoolFormer +from mmpretrain.models.backbones.poolformer import PoolFormerBlock class TestPoolFormer(TestCase): diff --git a/tests/test_models/test_backbones/test_regnet.py b/tests/test_models/test_backbones/test_regnet.py index 67de1c87..bed26fea 100644 --- a/tests/test_models/test_backbones/test_regnet.py +++ b/tests/test_models/test_backbones/test_regnet.py @@ -2,7 +2,7 @@ import pytest import torch -from mmcls.models.backbones import RegNet +from mmpretrain.models.backbones import RegNet regnet_test_data = [ ('regnetx_400mf', diff --git a/tests/test_models/test_backbones/test_replknet.py b/tests/test_models/test_backbones/test_replknet.py index a7ad48ab..ed9305c4 100644 --- a/tests/test_models/test_backbones/test_replknet.py +++ b/tests/test_models/test_backbones/test_replknet.py @@ -9,8 +9,8 @@ from torch import nn from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import RepLKNet -from mmcls.models.backbones.replknet import ReparamLargeKernelConv +from mmpretrain.models.backbones import RepLKNet +from mmpretrain.models.backbones.replknet import ReparamLargeKernelConv def check_norm_state(modules, train_state): diff --git a/tests/test_models/test_backbones/test_repmlp.py b/tests/test_models/test_backbones/test_repmlp.py index 57a60589..bfcb5dfc 100644 --- a/tests/test_models/test_backbones/test_repmlp.py +++ b/tests/test_models/test_backbones/test_repmlp.py @@ -7,7 +7,7 @@ from unittest import TestCase import torch from mmengine.runner import load_checkpoint, save_checkpoint -from mmcls.models.backbones import RepMLPNet +from mmpretrain.models.backbones import RepMLPNet class TestRepMLP(TestCase): diff --git a/tests/test_models/test_backbones/test_repvgg.py b/tests/test_models/test_backbones/test_repvgg.py index 4976fdb3..a558dbc9 100644 --- a/tests/test_models/test_backbones/test_repvgg.py +++ b/tests/test_models/test_backbones/test_repvgg.py @@ -9,9 +9,9 @@ from torch import nn from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import RepVGG -from mmcls.models.backbones.repvgg import RepVGGBlock -from mmcls.models.utils import SELayer +from mmpretrain.models.backbones import RepVGG +from mmpretrain.models.backbones.repvgg import RepVGGBlock +from mmpretrain.models.utils import SELayer def check_norm_state(modules, train_state): diff --git a/tests/test_models/test_backbones/test_res2net.py b/tests/test_models/test_backbones/test_res2net.py index 261e6109..365f5f1e 100644 --- a/tests/test_models/test_backbones/test_res2net.py +++ b/tests/test_models/test_backbones/test_res2net.py @@ -3,7 +3,7 @@ import pytest import torch from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.models.backbones import Res2Net +from mmpretrain.models.backbones import Res2Net def check_norm_state(modules, train_state): diff --git a/tests/test_models/test_backbones/test_resnest.py b/tests/test_models/test_backbones/test_resnest.py index 7a0b250d..7c265cb1 100644 --- a/tests/test_models/test_backbones/test_resnest.py +++ b/tests/test_models/test_backbones/test_resnest.py @@ -2,8 +2,8 @@ import pytest import torch -from mmcls.models.backbones import ResNeSt -from mmcls.models.backbones.resnest import Bottleneck as BottleneckS +from mmpretrain.models.backbones import ResNeSt +from mmpretrain.models.backbones.resnest import Bottleneck as BottleneckS def test_bottleneck(): diff --git a/tests/test_models/test_backbones/test_resnet.py b/tests/test_models/test_backbones/test_resnet.py index 95a0d1fc..bf2900db 100644 --- a/tests/test_models/test_backbones/test_resnet.py +++ b/tests/test_models/test_backbones/test_resnet.py @@ -5,9 +5,9 @@ import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.models.backbones import ResNet, ResNetV1c, ResNetV1d -from mmcls.models.backbones.resnet import (BasicBlock, Bottleneck, ResLayer, - get_expansion) +from mmpretrain.models.backbones import ResNet, ResNetV1c, ResNetV1d +from mmpretrain.models.backbones.resnet import (BasicBlock, Bottleneck, + ResLayer, get_expansion) def is_block(modules): diff --git a/tests/test_models/test_backbones/test_resnet_cifar.py b/tests/test_models/test_backbones/test_resnet_cifar.py index 3d01fcac..45865669 100644 --- a/tests/test_models/test_backbones/test_resnet_cifar.py +++ b/tests/test_models/test_backbones/test_resnet_cifar.py @@ -3,7 +3,7 @@ import pytest import torch from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.models.backbones import ResNet_CIFAR +from mmpretrain.models.backbones import ResNet_CIFAR def check_norm_state(modules, train_state): diff --git a/tests/test_models/test_backbones/test_resnext.py b/tests/test_models/test_backbones/test_resnext.py index 4ee15f93..5c33f9a2 100644 --- a/tests/test_models/test_backbones/test_resnext.py +++ b/tests/test_models/test_backbones/test_resnext.py @@ -2,8 +2,8 @@ import pytest import torch -from mmcls.models.backbones import ResNeXt -from mmcls.models.backbones.resnext import Bottleneck as BottleneckX +from mmpretrain.models.backbones import ResNeXt +from mmpretrain.models.backbones.resnext import Bottleneck as BottleneckX def test_bottleneck(): diff --git a/tests/test_models/test_backbones/test_revvit.py b/tests/test_models/test_backbones/test_revvit.py index d50bffc1..1f234949 100644 --- a/tests/test_models/test_backbones/test_revvit.py +++ b/tests/test_models/test_backbones/test_revvit.py @@ -7,7 +7,7 @@ from unittest import TestCase import torch from mmengine.runner import load_checkpoint, save_checkpoint -from mmcls.models.backbones import RevVisionTransformer +from mmpretrain.models.backbones import RevVisionTransformer from .utils import timm_resize_pos_embed diff --git a/tests/test_models/test_backbones/test_seresnet.py b/tests/test_models/test_backbones/test_seresnet.py index 32670209..d7f9dffe 100644 --- a/tests/test_models/test_backbones/test_seresnet.py +++ b/tests/test_models/test_backbones/test_seresnet.py @@ -4,9 +4,9 @@ import torch from torch.nn.modules import AvgPool2d from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import SEResNet -from mmcls.models.backbones.resnet import ResLayer -from mmcls.models.backbones.seresnet import SEBottleneck, SELayer +from mmpretrain.models.backbones import SEResNet +from mmpretrain.models.backbones.resnet import ResLayer +from mmpretrain.models.backbones.seresnet import SEBottleneck, SELayer def all_zeros(modules): diff --git a/tests/test_models/test_backbones/test_seresnext.py b/tests/test_models/test_backbones/test_seresnext.py index 2431c070..7b84f84e 100644 --- a/tests/test_models/test_backbones/test_seresnext.py +++ b/tests/test_models/test_backbones/test_seresnext.py @@ -2,8 +2,8 @@ import pytest import torch -from mmcls.models.backbones import SEResNeXt -from mmcls.models.backbones.seresnext import SEBottleneck as SEBottleneckX +from mmpretrain.models.backbones import SEResNeXt +from mmpretrain.models.backbones.seresnext import SEBottleneck as SEBottleneckX def test_bottleneck(): diff --git a/tests/test_models/test_backbones/test_shufflenet_v1.py b/tests/test_models/test_backbones/test_shufflenet_v1.py index 97beee7a..3a55acfd 100644 --- a/tests/test_models/test_backbones/test_shufflenet_v1.py +++ b/tests/test_models/test_backbones/test_shufflenet_v1.py @@ -4,8 +4,8 @@ import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import ShuffleNetV1 -from mmcls.models.backbones.shufflenet_v1 import ShuffleUnit +from mmpretrain.models.backbones import ShuffleNetV1 +from mmpretrain.models.backbones.shufflenet_v1 import ShuffleUnit def is_block(modules): diff --git a/tests/test_models/test_backbones/test_shufflenet_v2.py b/tests/test_models/test_backbones/test_shufflenet_v2.py index b7ab4955..84bcec1f 100644 --- a/tests/test_models/test_backbones/test_shufflenet_v2.py +++ b/tests/test_models/test_backbones/test_shufflenet_v2.py @@ -4,8 +4,8 @@ import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import ShuffleNetV2 -from mmcls.models.backbones.shufflenet_v2 import InvertedResidual +from mmpretrain.models.backbones import ShuffleNetV2 +from mmpretrain.models.backbones.shufflenet_v2 import InvertedResidual def is_block(modules): diff --git a/tests/test_models/test_backbones/test_swin_transformer.py b/tests/test_models/test_backbones/test_swin_transformer.py index 613e9b54..1437dac7 100644 --- a/tests/test_models/test_backbones/test_swin_transformer.py +++ b/tests/test_models/test_backbones/test_swin_transformer.py @@ -10,8 +10,8 @@ import torch from mmengine.runner import load_checkpoint, save_checkpoint from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.models.backbones import SwinTransformer -from mmcls.models.backbones.swin_transformer import SwinBlock +from mmpretrain.models.backbones import SwinTransformer +from mmpretrain.models.backbones.swin_transformer import SwinBlock from .utils import timm_resize_pos_embed diff --git a/tests/test_models/test_backbones/test_swin_transformer_v2.py b/tests/test_models/test_backbones/test_swin_transformer_v2.py index 03c93449..02e238c2 100644 --- a/tests/test_models/test_backbones/test_swin_transformer_v2.py +++ b/tests/test_models/test_backbones/test_swin_transformer_v2.py @@ -10,8 +10,8 @@ import torch from mmengine.runner import load_checkpoint, save_checkpoint from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.models.backbones import SwinTransformerV2 -from mmcls.models.backbones.swin_transformer import SwinBlock +from mmpretrain.models.backbones import SwinTransformerV2 +from mmpretrain.models.backbones.swin_transformer import SwinBlock from .utils import timm_resize_pos_embed diff --git a/tests/test_models/test_backbones/test_t2t_vit.py b/tests/test_models/test_backbones/test_t2t_vit.py index f35ff9d4..f0466ba1 100644 --- a/tests/test_models/test_backbones/test_t2t_vit.py +++ b/tests/test_models/test_backbones/test_t2t_vit.py @@ -8,7 +8,7 @@ from unittest import TestCase import torch from mmengine.runner import load_checkpoint, save_checkpoint -from mmcls.models.backbones import T2T_ViT +from mmpretrain.models.backbones import T2T_ViT from .utils import timm_resize_pos_embed diff --git a/tests/test_models/test_backbones/test_timm_backbone.py b/tests/test_models/test_backbones/test_timm_backbone.py index 8dfee934..cfc659bd 100644 --- a/tests/test_models/test_backbones/test_timm_backbone.py +++ b/tests/test_models/test_backbones/test_timm_backbone.py @@ -6,7 +6,7 @@ import torch from torch import nn from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import TIMMBackbone +from mmpretrain.models.backbones import TIMMBackbone def has_timm() -> bool: diff --git a/tests/test_models/test_backbones/test_tinyvit.py b/tests/test_models/test_backbones/test_tinyvit.py index 50b0218a..9747b76b 100644 --- a/tests/test_models/test_backbones/test_tinyvit.py +++ b/tests/test_models/test_backbones/test_tinyvit.py @@ -3,7 +3,7 @@ import pytest import torch -from mmcls.models.backbones import TinyViT +from mmpretrain.models.backbones import TinyViT def test_assertion(): diff --git a/tests/test_models/test_backbones/test_tnt.py b/tests/test_models/test_backbones/test_tnt.py index 2feffd6a..83b997da 100644 --- a/tests/test_models/test_backbones/test_tnt.py +++ b/tests/test_models/test_backbones/test_tnt.py @@ -3,7 +3,7 @@ import pytest import torch from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.backbones import TNT +from mmpretrain.models.backbones import TNT def check_norm_state(modules, train_state): diff --git a/tests/test_models/test_backbones/test_twins.py b/tests/test_models/test_backbones/test_twins.py index 7b81aa5f..e7ca43ee 100644 --- a/tests/test_models/test_backbones/test_twins.py +++ b/tests/test_models/test_backbones/test_twins.py @@ -5,9 +5,9 @@ import pytest import torch import torch.nn as nn -from mmcls.models.backbones.twins import (PCPVT, SVT, - GlobalSubsampledAttention, - LocallyGroupedSelfAttention) +from mmpretrain.models.backbones.twins import (PCPVT, SVT, + GlobalSubsampledAttention, + LocallyGroupedSelfAttention) def test_LSA_module(): diff --git a/tests/test_models/test_backbones/test_van.py b/tests/test_models/test_backbones/test_van.py index e80853ad..fed9e3e5 100644 --- a/tests/test_models/test_backbones/test_van.py +++ b/tests/test_models/test_backbones/test_van.py @@ -8,7 +8,7 @@ import torch from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm from torch import nn -from mmcls.models.backbones import VAN +from mmpretrain.models.backbones import VAN def check_norm_state(modules, train_state): diff --git a/tests/test_models/test_backbones/test_vgg.py b/tests/test_models/test_backbones/test_vgg.py index 2a2dd1e9..dd3910fe 100644 --- a/tests/test_models/test_backbones/test_vgg.py +++ b/tests/test_models/test_backbones/test_vgg.py @@ -3,7 +3,7 @@ import pytest import torch from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmcls.models.backbones import VGG +from mmpretrain.models.backbones import VGG def check_norm_state(modules, train_state): diff --git a/tests/test_models/test_backbones/test_vision_transformer.py b/tests/test_models/test_backbones/test_vision_transformer.py index 5f0fbbf5..31a049fd 100644 --- a/tests/test_models/test_backbones/test_vision_transformer.py +++ b/tests/test_models/test_backbones/test_vision_transformer.py @@ -8,7 +8,7 @@ from unittest import TestCase import torch from mmengine.runner import load_checkpoint, save_checkpoint -from mmcls.models.backbones import VisionTransformer +from mmpretrain.models.backbones import VisionTransformer from .utils import timm_resize_pos_embed diff --git a/tests/test_models/test_backbones/test_xcit.py b/tests/test_models/test_backbones/test_xcit.py index dc0ad694..95a8cfdf 100644 --- a/tests/test_models/test_backbones/test_xcit.py +++ b/tests/test_models/test_backbones/test_xcit.py @@ -2,7 +2,7 @@ # The basic forward/backward tests are in ../test_models.py import torch -from mmcls.apis import get_model +from mmpretrain.apis import get_model def test_out_type(): diff --git a/tests/test_models/test_classifiers.py b/tests/test_models/test_classifiers.py index 7ed76b09..dcbad704 100644 --- a/tests/test_models/test_classifiers.py +++ b/tests/test_models/test_classifiers.py @@ -7,9 +7,9 @@ import torch import torch.nn as nn from mmengine import ConfigDict -from mmcls.models import ImageClassifier -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.models import ImageClassifier +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample def has_timm() -> bool: diff --git a/tests/test_models/test_heads.py b/tests/test_models/test_heads.py index fe803346..43adf0a0 100644 --- a/tests/test_models/test_heads.py +++ b/tests/test_models/test_heads.py @@ -9,8 +9,8 @@ import numpy as np import torch from mmengine import is_seq_of -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample, MultiTaskDataSample +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample, MultiTaskDataSample def setup_seed(seed): @@ -460,7 +460,7 @@ class EfficientFormerClsHead(TestClsHead): cfg = copy.deepcopy(self.DEFAULT_ARGS) cfg['distillation'] = True head = MODELS.build(cfg) - with self.assertRaisesRegex(NotImplementedError, 'MMClassification '): + with self.assertRaisesRegex(NotImplementedError, 'MMPretrain '): head.loss(feats, data_samples) # test without distillation head diff --git a/tests/test_models/test_losses.py b/tests/test_models/test_losses.py index 442da9df..b25fe2f1 100644 --- a/tests/test_models/test_losses.py +++ b/tests/test_models/test_losses.py @@ -2,7 +2,7 @@ import pytest import torch -from mmcls.models import build_loss +from mmpretrain.models import build_loss def test_asymmetric_loss(): diff --git a/tests/test_models/test_models.py b/tests/test_models/test_models.py index e7473ac9..dbf7e413 100644 --- a/tests/test_models/test_models.py +++ b/tests/test_models/test_models.py @@ -4,8 +4,8 @@ from dataclasses import dataclass import pytest import torch -import mmcls.models -from mmcls.apis import ModelHub, get_model +import mmpretrain.models +from mmpretrain.apis import ModelHub, get_model @dataclass @@ -20,9 +20,10 @@ class Cfg: test_list = [ - Cfg(name='xcit-small-12-p16_3rdparty_in1k', backbone=mmcls.models.XCiT), + Cfg(name='xcit-small-12-p16_3rdparty_in1k', + backbone=mmpretrain.models.XCiT), Cfg(name='xcit-nano-12-p8_3rdparty-dist_in1k-384px', - backbone=mmcls.models.XCiT, + backbone=mmpretrain.models.XCiT, input_shape=(1, 3, 384, 384)), ] @@ -33,7 +34,7 @@ def test_build(cfg: Cfg): return model_name = cfg.name - ModelHub._register_mmcls_models() + ModelHub._register_mmpretrain_models() assert ModelHub.has(model_name) model = get_model(model_name) diff --git a/tests/test_models/test_necks.py b/tests/test_models/test_necks.py index 8fa2156e..c912b4ec 100644 --- a/tests/test_models/test_necks.py +++ b/tests/test_models/test_necks.py @@ -2,8 +2,9 @@ import pytest import torch -from mmcls.models.necks import (GeneralizedMeanPooling, GlobalAveragePooling, - HRFuseScales, LinearReduction) +from mmpretrain.models.necks import (GeneralizedMeanPooling, + GlobalAveragePooling, HRFuseScales, + LinearReduction) def test_gap_neck(): diff --git a/tests/test_models/test_retrievers.py b/tests/test_models/test_retrievers.py index 2880374a..cde78c45 100644 --- a/tests/test_models/test_retrievers.py +++ b/tests/test_models/test_retrievers.py @@ -11,9 +11,9 @@ from mmengine import ConfigDict from mmengine.dataset.utils import default_collate from torch.utils.data import DataLoader, Dataset -from mmcls.datasets.transforms import PackClsInputs -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.datasets.transforms import PackClsInputs +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample class ExampleDataset(Dataset): diff --git a/tests/test_models/test_tta.py b/tests/test_models/test_tta.py index 85b05d32..d52b6d05 100644 --- a/tests/test_models/test_tta.py +++ b/tests/test_models/test_tta.py @@ -4,10 +4,13 @@ from unittest import TestCase import torch from mmengine import ConfigDict +from mmengine.registry import init_default_scope -from mmcls.models import AverageClsScoreTTA, ImageClassifier -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.models import AverageClsScoreTTA, ImageClassifier +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample + +init_default_scope('mmpretrain') class TestAverageClsScoreTTA(TestCase): diff --git a/tests/test_models/test_utils/test_attention.py b/tests/test_models/test_utils/test_attention.py index 84941d1b..27c0e093 100644 --- a/tests/test_models/test_utils/test_attention.py +++ b/tests/test_models/test_utils/test_attention.py @@ -5,8 +5,8 @@ from unittest.mock import ANY, MagicMock import pytest import torch -from mmcls.models.utils.attention import (ShiftWindowMSA, WindowMSA, - torch_meshgrid) +from mmpretrain.models.utils.attention import (ShiftWindowMSA, WindowMSA, + torch_meshgrid) def get_relative_position_index(window_size): diff --git a/tests/test_models/test_utils/test_batch_augments.py b/tests/test_models/test_utils/test_batch_augments.py index 65eef588..b4ba3779 100644 --- a/tests/test_models/test_utils/test_batch_augments.py +++ b/tests/test_models/test_utils/test_batch_augments.py @@ -5,8 +5,8 @@ from unittest.mock import MagicMock, patch import numpy as np import torch -from mmcls.models import Mixup, RandomBatchAugment -from mmcls.registry import BATCH_AUGMENTS +from mmpretrain.models import Mixup, RandomBatchAugment +from mmpretrain.registry import BATCH_AUGMENTS class TestRandomBatchAugment(TestCase): diff --git a/tests/test_models/test_utils/test_data_preprocessor.py b/tests/test_models/test_utils/test_data_preprocessor.py index 1a0c3a32..fb550fba 100644 --- a/tests/test_models/test_utils/test_data_preprocessor.py +++ b/tests/test_models/test_utils/test_data_preprocessor.py @@ -3,9 +3,9 @@ from unittest import TestCase import torch -from mmcls.models import ClsDataPreprocessor, RandomBatchAugment -from mmcls.registry import MODELS -from mmcls.structures import ClsDataSample +from mmpretrain.models import ClsDataPreprocessor, RandomBatchAugment +from mmpretrain.registry import MODELS +from mmpretrain.structures import ClsDataSample class TestClsDataPreprocessor(TestCase): diff --git a/tests/test_models/test_utils/test_embed.py b/tests/test_models/test_utils/test_embed.py index eb7356b1..cb282008 100644 --- a/tests/test_models/test_utils/test_embed.py +++ b/tests/test_models/test_utils/test_embed.py @@ -2,8 +2,8 @@ import pytest import torch -from mmcls.models.backbones import VGG -from mmcls.models.utils import HybridEmbed, PatchEmbed, PatchMerging +from mmpretrain.models.backbones import VGG +from mmpretrain.models.utils import HybridEmbed, PatchEmbed, PatchMerging def cal_unfold_dim(dim, kernel_size, stride, padding=0, dilation=1): diff --git a/tests/test_models/test_utils/test_inverted_residual.py b/tests/test_models/test_utils/test_inverted_residual.py index 8c363279..e61ceb1f 100644 --- a/tests/test_models/test_utils/test_inverted_residual.py +++ b/tests/test_models/test_utils/test_inverted_residual.py @@ -4,7 +4,7 @@ import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.utils import InvertedResidual, SELayer +from mmpretrain.models.utils import InvertedResidual, SELayer def is_norm(modules): diff --git a/tests/test_models/test_utils/test_layer_scale.py b/tests/test_models/test_utils/test_layer_scale.py index c33b529d..54b6b60a 100644 --- a/tests/test_models/test_utils/test_layer_scale.py +++ b/tests/test_models/test_utils/test_layer_scale.py @@ -3,7 +3,7 @@ from unittest import TestCase import torch -from mmcls.models.utils import LayerScale +from mmpretrain.models.utils import LayerScale class TestLayerScale(TestCase): diff --git a/tests/test_models/test_utils/test_misc.py b/tests/test_models/test_utils/test_misc.py index 1376d23a..49d233e3 100644 --- a/tests/test_models/test_utils/test_misc.py +++ b/tests/test_models/test_utils/test_misc.py @@ -3,7 +3,7 @@ import pytest import torch from mmengine.utils import digit_version -from mmcls.models.utils import channel_shuffle, is_tracing, make_divisible +from mmpretrain.models.utils import channel_shuffle, is_tracing, make_divisible def test_make_divisible(): diff --git a/tests/test_models/test_utils/test_norm.py b/tests/test_models/test_utils/test_norm.py index ddbaafd9..a4d3a8b7 100644 --- a/tests/test_models/test_utils/test_norm.py +++ b/tests/test_models/test_utils/test_norm.py @@ -4,7 +4,7 @@ from unittest import TestCase import torch import torch.nn.functional as F -from mmcls.models.utils import GRN, LayerNorm2d +from mmpretrain.models.utils import GRN, LayerNorm2d class TestGRN(TestCase): diff --git a/tests/test_models/test_utils/test_position_encoding.py b/tests/test_models/test_utils/test_position_encoding.py index feb171c2..221a20df 100644 --- a/tests/test_models/test_utils/test_position_encoding.py +++ b/tests/test_models/test_utils/test_position_encoding.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch -from mmcls.models.utils import ConditionalPositionEncoding +from mmpretrain.models.utils import ConditionalPositionEncoding def test_conditional_position_encoding_module(): diff --git a/tests/test_models/test_utils/test_se.py b/tests/test_models/test_utils/test_se.py index 8cb8c509..447eb085 100644 --- a/tests/test_models/test_utils/test_se.py +++ b/tests/test_models/test_utils/test_se.py @@ -4,7 +4,7 @@ import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm -from mmcls.models.utils import SELayer +from mmpretrain.models.utils import SELayer def is_norm(modules): diff --git a/tests/test_structures/test_datasample.py b/tests/test_structures/test_datasample.py index e02c95fc..8b45909a 100644 --- a/tests/test_structures/test_datasample.py +++ b/tests/test_structures/test_datasample.py @@ -5,7 +5,7 @@ import numpy as np import torch from mmengine.structures import LabelData -from mmcls.structures import ClsDataSample, MultiTaskDataSample +from mmpretrain.structures import ClsDataSample, MultiTaskDataSample class TestClsDataSample(TestCase): diff --git a/tests/test_structures/test_utils.py b/tests/test_structures/test_utils.py index 998e8b7c..d03fa263 100644 --- a/tests/test_structures/test_utils.py +++ b/tests/test_structures/test_utils.py @@ -4,8 +4,8 @@ from unittest import TestCase import torch from mmengine.structures import LabelData -from mmcls.structures import (batch_label_to_onehot, cat_batch_labels, - stack_batch_scores, tensor_split) +from mmpretrain.structures import (batch_label_to_onehot, cat_batch_labels, + stack_batch_scores, tensor_split) class TestStructureUtils(TestCase): diff --git a/tests/test_utils/test_analyze.py b/tests/test_utils/test_analyze.py index c89ab5f2..d1bb2c45 100644 --- a/tests/test_utils/test_analyze.py +++ b/tests/test_utils/test_analyze.py @@ -2,7 +2,7 @@ import os.path as osp import tempfile -from mmcls.utils import load_json_log +from mmpretrain.utils import load_json_log def test_load_json_log(): diff --git a/tests/test_utils/test_setup_env.py b/tests/test_utils/test_setup_env.py index 22841de7..4f8adee4 100644 --- a/tests/test_utils/test_setup_env.py +++ b/tests/test_utils/test_setup_env.py @@ -5,35 +5,36 @@ from unittest import TestCase from mmengine import DefaultScope -from mmcls.utils import register_all_modules +from mmpretrain.utils import register_all_modules class TestSetupEnv(TestCase): def test_register_all_modules(self): - from mmcls.registry import DATASETS + from mmpretrain.registry import DATASETS # not init default scope - sys.modules.pop('mmcls.datasets', None) - sys.modules.pop('mmcls.datasets.custom', None) + sys.modules.pop('mmpretrain.datasets', None) + sys.modules.pop('mmpretrain.datasets.custom', None) DATASETS._module_dict.pop('CustomDataset', None) self.assertFalse('CustomDataset' in DATASETS.module_dict) register_all_modules(init_default_scope=False) self.assertTrue('CustomDataset' in DATASETS.module_dict) # init default scope - sys.modules.pop('mmcls.datasets') - sys.modules.pop('mmcls.datasets.custom') + sys.modules.pop('mmpretrain.datasets') + sys.modules.pop('mmpretrain.datasets.custom') DATASETS._module_dict.pop('CustomDataset', None) self.assertFalse('CustomDataset' in DATASETS.module_dict) register_all_modules(init_default_scope=True) self.assertTrue('CustomDataset' in DATASETS.module_dict) self.assertEqual(DefaultScope.get_current_instance().scope_name, - 'mmcls') + 'mmpretrain') # init default scope when another scope is init name = f'test-{datetime.datetime.now()}' DefaultScope.get_instance(name, scope_name='test') with self.assertWarnsRegex( - Warning, 'The current default scope "test" is not "mmcls"'): + Warning, + 'The current default scope "test" is not "mmpretrain"'): register_all_modules(init_default_scope=True) diff --git a/tests/test_utils/test_version_utils.py b/tests/test_utils/test_version_utils.py index f4bb3892..07105e08 100644 --- a/tests/test_utils/test_version_utils.py +++ b/tests/test_utils/test_version_utils.py @@ -1,5 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. -from mmcls import digit_version +from mmpretrain import digit_version def test_digit_version(): diff --git a/tests/test_visualizations/test_visualizer.py b/tests/test_visualizations/test_visualizer.py index ab8059ba..d0921843 100644 --- a/tests/test_visualizations/test_visualizer.py +++ b/tests/test_visualizations/test_visualizer.py @@ -7,8 +7,8 @@ from unittest.mock import patch import numpy as np import torch -from mmcls.structures import ClsDataSample -from mmcls.visualization import ClsVisualizer +from mmpretrain.structures import ClsDataSample +from mmpretrain.visualization import ClsVisualizer class TestClsVisualizer(TestCase): diff --git a/tools/analysis_tools/analyze_logs.py b/tools/analysis_tools/analyze_logs.py index f4f54cf1..f51d8877 100644 --- a/tools/analysis_tools/analyze_logs.py +++ b/tools/analysis_tools/analyze_logs.py @@ -7,7 +7,7 @@ from itertools import groupby import matplotlib.pyplot as plt import numpy as np -from mmcls.utils import load_json_log +from mmpretrain.utils import load_json_log def cal_train_time(log_dicts, args): diff --git a/tools/analysis_tools/analyze_results.py b/tools/analysis_tools/analyze_results.py index 3f7bc511..b4837ece 100644 --- a/tools/analysis_tools/analyze_results.py +++ b/tools/analysis_tools/analyze_results.py @@ -8,9 +8,9 @@ import mmengine import torch from mmengine import DictAction -from mmcls.datasets import build_dataset -from mmcls.structures import ClsDataSample -from mmcls.visualization import ClsVisualizer +from mmpretrain.datasets import build_dataset +from mmpretrain.structures import ClsDataSample +from mmpretrain.visualization import ClsVisualizer def parse_args(): diff --git a/tools/analysis_tools/confusion_matrix.py b/tools/analysis_tools/confusion_matrix.py index 214d0a9b..0e6382cf 100644 --- a/tools/analysis_tools/confusion_matrix.py +++ b/tools/analysis_tools/confusion_matrix.py @@ -7,9 +7,9 @@ from mmengine.config import Config, DictAction from mmengine.evaluator import Evaluator from mmengine.runner import Runner -from mmcls.evaluation import ConfusionMatrix -from mmcls.registry import DATASETS -from mmcls.utils import register_all_modules +from mmpretrain.evaluation import ConfusionMatrix +from mmpretrain.registry import DATASETS +from mmpretrain.utils import register_all_modules def parse_args(): @@ -54,7 +54,7 @@ def parse_args(): def main(): args = parse_args() - # register all modules in mmcls into the registries + # register all modules in mmpretrain into the registries # do not init the default scope here because it will be init in the runner register_all_modules(init_default_scope=False) diff --git a/tools/analysis_tools/eval_metric.py b/tools/analysis_tools/eval_metric.py index 17d346e4..cea1a9aa 100644 --- a/tools/analysis_tools/eval_metric.py +++ b/tools/analysis_tools/eval_metric.py @@ -6,10 +6,10 @@ import rich from mmengine import DictAction from mmengine.evaluator import Evaluator -from mmcls.registry import METRICS +from mmpretrain.registry import METRICS HELP_URL = ( - 'https://mmclassification.readthedocs.io/en/dev-1.x/useful_tools/' + 'https://mmpretrain.readthedocs.io/en/dev-1.x/useful_tools/' 'log_result_analysis.html#how-to-conduct-offline-metric-evaluation') prog_description = f"""\ diff --git a/tools/analysis_tools/get_flops.py b/tools/analysis_tools/get_flops.py index 2c50d18f..2f2a66d4 100644 --- a/tools/analysis_tools/get_flops.py +++ b/tools/analysis_tools/get_flops.py @@ -12,7 +12,7 @@ except ImportError: from fvcore.nn.print_model_statistics import _format_size from mmengine import Config -from mmcls.models import build_classifier +from mmpretrain.models import build_classifier def parse_args(): diff --git a/tools/misc/verify_dataset.py b/tools/misc/verify_dataset.py index 05e7bb93..c45e20bd 100644 --- a/tools/misc/verify_dataset.py +++ b/tools/misc/verify_dataset.py @@ -7,8 +7,8 @@ from pathlib import Path from mmengine import (Config, DictAction, track_parallel_progress, track_progress) -from mmcls.datasets import build_dataset -from mmcls.registry import TRANSFORMS +from mmpretrain.datasets import build_dataset +from mmpretrain.registry import TRANSFORMS def parse_args(): diff --git a/tools/model_converters/clip_to_mmcls.py b/tools/model_converters/clip_to_mmpretrain.py similarity index 93% rename from tools/model_converters/clip_to_mmcls.py rename to tools/model_converters/clip_to_mmpretrain.py index 6c179cfb..54426281 100644 --- a/tools/model_converters/clip_to_mmcls.py +++ b/tools/model_converters/clip_to_mmpretrain.py @@ -45,12 +45,10 @@ def convert_clip(ckpt): return new_ckpt -'${ls /mnt/lustre/lirongjie/tmp/clip_ckpt/download_ckpt}' - - def main(): parser = argparse.ArgumentParser( - description='Convert keys in pretrained clip models to mmcls style.') + description='Convert keys in pretrained clip ' + 'models to mmpretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/model_converters/convnext_to_mmcls.py b/tools/model_converters/convnext_to_mmpretrain.py similarity index 94% rename from tools/model_converters/convnext_to_mmcls.py rename to tools/model_converters/convnext_to_mmpretrain.py index da24aca8..82f62361 100644 --- a/tools/model_converters/convnext_to_mmcls.py +++ b/tools/model_converters/convnext_to_mmpretrain.py @@ -38,7 +38,8 @@ def convert_convnext(ckpt): def main(): parser = argparse.ArgumentParser( - description='Convert keys in pretrained van models to mmcls style.') + description='Convert keys in pretrained convnext ' + 'models to mmpretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/model_converters/davit_to_mmcls.py b/tools/model_converters/davit_to_mmpretrain.py similarity index 97% rename from tools/model_converters/davit_to_mmcls.py rename to tools/model_converters/davit_to_mmpretrain.py index acd76ed5..c5780262 100644 --- a/tools/model_converters/davit_to_mmcls.py +++ b/tools/model_converters/davit_to_mmpretrain.py @@ -62,7 +62,8 @@ def convert_davit(ckpt): def main(): parser = argparse.ArgumentParser( - description='Convert keys in pretrained van models to mmcls style.') + description='Convert keys in pretrained davit ' + 'models to mmpretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/model_converters/deit3_to_mmcls.py b/tools/model_converters/deit3_to_mmpretrain.py similarity index 95% rename from tools/model_converters/deit3_to_mmcls.py rename to tools/model_converters/deit3_to_mmpretrain.py index 73427870..0ceed1f0 100644 --- a/tools/model_converters/deit3_to_mmcls.py +++ b/tools/model_converters/deit3_to_mmpretrain.py @@ -50,7 +50,8 @@ def convert_deit3(ckpt): def main(): parser = argparse.ArgumentParser( - description='Convert keys in pretrained van models to mmcls style.') + description='Convert keys in pretrained deit3 ' + 'models to mmpretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/model_converters/edgenext_to_mmcls.py b/tools/model_converters/edgenext_to_mmpretrain.py similarity index 95% rename from tools/model_converters/edgenext_to_mmcls.py rename to tools/model_converters/edgenext_to_mmpretrain.py index 52224905..64a54680 100644 --- a/tools/model_converters/edgenext_to_mmcls.py +++ b/tools/model_converters/edgenext_to_mmpretrain.py @@ -8,11 +8,11 @@ import torch def convert_weights(weight): """Weight Converter. - Converts the weights from timm to mmcls + Converts the weights from timm to mmpretrain Args: weight (dict): weight dict from timm Returns: - Converted weight dict for mmcls + Converted weight dict for mmpretrain """ result = dict() result['meta'] = dict() diff --git a/tools/model_converters/efficientnet_to_mmcls.py b/tools/model_converters/efficientnet_to_mmpretrain.py similarity index 99% rename from tools/model_converters/efficientnet_to_mmcls.py rename to tools/model_converters/efficientnet_to_mmpretrain.py index c193cc54..f1541e38 100644 --- a/tools/model_converters/efficientnet_to_mmcls.py +++ b/tools/model_converters/efficientnet_to_mmpretrain.py @@ -7,7 +7,7 @@ import torch from mmengine.model import Sequential from tensorflow.python.training import py_checkpoint_reader -from mmcls.models.backbones.efficientnet import EfficientNet +from mmpretrain.models.backbones.efficientnet import EfficientNet def tf2pth(v): diff --git a/tools/model_converters/efficientnetv2_to_mmcls.py b/tools/model_converters/efficientnetv2_to_mmpretrain.py similarity index 96% rename from tools/model_converters/efficientnetv2_to_mmcls.py rename to tools/model_converters/efficientnetv2_to_mmpretrain.py index b6ae4ec1..5ada7ecc 100644 --- a/tools/model_converters/efficientnetv2_to_mmcls.py +++ b/tools/model_converters/efficientnetv2_to_mmpretrain.py @@ -1,6 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. """convert the weights of efficientnetv2 in -timm(https://github.com/rwightman/pytorch-image-models) to mmcls format.""" +timm(https://github.com/rwightman/pytorch-image-models) to mmpretrain +format.""" import argparse import os.path as osp @@ -75,7 +76,7 @@ def convert_from_efficientnetv2_timm(param): def main(): parser = argparse.ArgumentParser( description='Convert pretrained efficientnetv2 ' - 'models in timm to mmcls style.') + 'models in timm to mmpretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/model_converters/eva_to_mmcls.py b/tools/model_converters/eva_to_mmpretrain.py similarity index 95% rename from tools/model_converters/eva_to_mmcls.py rename to tools/model_converters/eva_to_mmpretrain.py index 14ac24ec..227e377a 100644 --- a/tools/model_converters/eva_to_mmcls.py +++ b/tools/model_converters/eva_to_mmpretrain.py @@ -51,7 +51,8 @@ def convert_eva(ckpt): def main(): parser = argparse.ArgumentParser( - description='Convert keys in pretrained van models to mmcls style.') + description='Convert keys in pretrained eva ' + 'models to mmpretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/model_converters/hornet2mmcls.py b/tools/model_converters/hornet2mmpretrain.py similarity index 94% rename from tools/model_converters/hornet2mmcls.py rename to tools/model_converters/hornet2mmpretrain.py index 6866a890..667a94c0 100644 --- a/tools/model_converters/hornet2mmcls.py +++ b/tools/model_converters/hornet2mmpretrain.py @@ -37,7 +37,8 @@ def convert_hornet(ckpt): def main(): parser = argparse.ArgumentParser( - description='Convert keys in pretrained van models to mmcls style.') + description='Convert keys in pretrained hornet ' + 'models to mmpretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/model_converters/levit2mmcls.py b/tools/model_converters/levit2mmpretrain.py similarity index 98% rename from tools/model_converters/levit2mmcls.py rename to tools/model_converters/levit2mmpretrain.py index 8e2e6454..4e28e281 100644 --- a/tools/model_converters/levit2mmcls.py +++ b/tools/model_converters/levit2mmpretrain.py @@ -57,7 +57,7 @@ def convert_levit(args, ckpt): def main(): parser = argparse.ArgumentParser( description='Convert keys in timm pretrained vit models to ' - 'MMClassification style.') + 'MMPretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/model_converters/mixmimx_to_mmcls.py b/tools/model_converters/mixmim_to_mmpretrain.py similarity index 96% rename from tools/model_converters/mixmimx_to_mmcls.py rename to tools/model_converters/mixmim_to_mmpretrain.py index dcf9858b..b31bb005 100644 --- a/tools/model_converters/mixmimx_to_mmcls.py +++ b/tools/model_converters/mixmim_to_mmpretrain.py @@ -73,7 +73,8 @@ def convert_mixmim(ckpt): def main(): parser = argparse.ArgumentParser( - description='Convert keys in pretrained van models to mmcls style.') + description='Convert keys in pretrained mixmim ' + 'models to mmpretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/model_converters/mlpmixer_to_mmcls.py b/tools/model_converters/mlpmixer_to_mmpretrain.py similarity index 94% rename from tools/model_converters/mlpmixer_to_mmcls.py rename to tools/model_converters/mlpmixer_to_mmpretrain.py index 6096c138..e1015148 100644 --- a/tools/model_converters/mlpmixer_to_mmcls.py +++ b/tools/model_converters/mlpmixer_to_mmpretrain.py @@ -8,12 +8,12 @@ import torch def convert_weights(weight): """Weight Converter. - Converts the weights from timm to mmcls + Converts the weights from timm to mmpretrain Args: weight (dict): weight dict from timm - Returns: converted weight dict for mmcls + Returns: converted weight dict for mmpretrain """ result = dict() result['meta'] = dict() diff --git a/tools/model_converters/mobilenetv2_to_mmcls.py b/tools/model_converters/mobilenetv2_to_mmpretrain.py similarity index 99% rename from tools/model_converters/mobilenetv2_to_mmcls.py rename to tools/model_converters/mobilenetv2_to_mmpretrain.py index 7f6654ed..878f7378 100644 --- a/tools/model_converters/mobilenetv2_to_mmcls.py +++ b/tools/model_converters/mobilenetv2_to_mmpretrain.py @@ -93,7 +93,7 @@ def convert_block(model_key, model_weight, state_dict, converted_names): def convert(src, dst): - """Convert keys in torchvision pretrained MobileNetV2 models to mmcls + """Convert keys in torchvision pretrained MobileNetV2 models to mmpretrain style.""" # load pytorch model diff --git a/tools/model_converters/reparameterize_model.py b/tools/model_converters/reparameterize_model.py index ba6eea06..f2f3e7c4 100644 --- a/tools/model_converters/reparameterize_model.py +++ b/tools/model_converters/reparameterize_model.py @@ -4,8 +4,8 @@ from pathlib import Path import torch -from mmcls.apis import init_model -from mmcls.models.classifiers import ImageClassifier +from mmpretrain.apis import init_model +from mmpretrain.models.classifiers import ImageClassifier def convert_classifier_to_deploy(model, checkpoint, save_path): @@ -47,7 +47,7 @@ def main(): model = init_model( args.config_path, checkpoint=args.checkpoint_path, device='cpu') assert isinstance(model, ImageClassifier), \ - '`model` must be a `mmcls.classifiers.ImageClassifier` instance.' + '`model` must be a `mmpretrain.classifiers.ImageClassifier` instance.' checkpoint = torch.load(args.checkpoint_path) convert_classifier_to_deploy(model, checkpoint, args.save_path) diff --git a/tools/model_converters/replknet_to_mmcls.py b/tools/model_converters/replknet_to_mmpretrain.py similarity index 100% rename from tools/model_converters/replknet_to_mmcls.py rename to tools/model_converters/replknet_to_mmpretrain.py diff --git a/tools/model_converters/repvgg_to_mmcls.py b/tools/model_converters/repvgg_to_mmpretrain.py similarity index 100% rename from tools/model_converters/repvgg_to_mmcls.py rename to tools/model_converters/repvgg_to_mmpretrain.py diff --git a/tools/model_converters/revvit_to_mmcls.py b/tools/model_converters/revvit_to_mmpretrain.py similarity index 97% rename from tools/model_converters/revvit_to_mmcls.py rename to tools/model_converters/revvit_to_mmpretrain.py index 8b5f1dd5..b3908d31 100644 --- a/tools/model_converters/revvit_to_mmcls.py +++ b/tools/model_converters/revvit_to_mmpretrain.py @@ -80,7 +80,8 @@ def convert_revvit(ckpt): def main(): parser = argparse.ArgumentParser( - description='Convert keys in pretrained van models to mmcls style.') + description='Convert keys in pretrained revvit' + ' models to mmpretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/model_converters/shufflenetv2_to_mmcls.py b/tools/model_converters/shufflenetv2_to_mmpretrain.py similarity index 99% rename from tools/model_converters/shufflenetv2_to_mmcls.py rename to tools/model_converters/shufflenetv2_to_mmpretrain.py index 69046c36..f909e41a 100644 --- a/tools/model_converters/shufflenetv2_to_mmcls.py +++ b/tools/model_converters/shufflenetv2_to_mmpretrain.py @@ -71,7 +71,7 @@ def convert_block(model_key, model_weight, state_dict, converted_names): def convert(src, dst): - """Convert keys in torchvision pretrained ShuffleNetV2 models to mmcls + """Convert keys in torchvision pretrained ShuffleNetV2 models to mmpretrain style.""" # load pytorch model diff --git a/tools/model_converters/tinyvit_to_mmcls.py b/tools/model_converters/tinyvit_to_mmpretrain.py similarity index 94% rename from tools/model_converters/tinyvit_to_mmcls.py rename to tools/model_converters/tinyvit_to_mmpretrain.py index e2791998..0aad47cd 100644 --- a/tools/model_converters/tinyvit_to_mmcls.py +++ b/tools/model_converters/tinyvit_to_mmpretrain.py @@ -8,11 +8,11 @@ import torch def convert_weights(weight): """Weight Converter. - Converts the weights from timm to mmcls + Converts the weights from timm to mmpretrain Args: weight (dict): weight dict from timm Returns: - Converted weight dict for mmcls + Converted weight dict for mmpretrain """ result = dict() result['meta'] = dict() diff --git a/tools/model_converters/torchvision_to_mmcls.py b/tools/model_converters/torchvision_to_mmpretrain.py similarity index 100% rename from tools/model_converters/torchvision_to_mmcls.py rename to tools/model_converters/torchvision_to_mmpretrain.py diff --git a/tools/model_converters/twins2mmcls.py b/tools/model_converters/twins2mmpretrain.py similarity index 98% rename from tools/model_converters/twins2mmcls.py rename to tools/model_converters/twins2mmpretrain.py index e0ea04c2..84891305 100644 --- a/tools/model_converters/twins2mmcls.py +++ b/tools/model_converters/twins2mmpretrain.py @@ -50,7 +50,7 @@ def convert_twins(args, ckpt): def main(): parser = argparse.ArgumentParser( description='Convert keys in timm pretrained vit models to ' - 'MMClassification style.') + 'MMPretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/model_converters/van2mmcls.py b/tools/model_converters/van2mmpretrain.py similarity index 89% rename from tools/model_converters/van2mmcls.py rename to tools/model_converters/van2mmpretrain.py index 5ea7d9ca..563f3d95 100644 --- a/tools/model_converters/van2mmcls.py +++ b/tools/model_converters/van2mmpretrain.py @@ -3,9 +3,9 @@ import argparse import os.path as osp from collections import OrderedDict -import mmcv +import mmengine import torch -from mmcv.runner import CheckpointLoader +from mmengine.runner import CheckpointLoader def convert_van(ckpt): @@ -41,7 +41,8 @@ def convert_van(ckpt): def main(): parser = argparse.ArgumentParser( - description='Convert keys in pretrained van models to mmcls style.') + description='Convert keys in pretrained van ' + 'models to mmpretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') @@ -55,7 +56,7 @@ def main(): state_dict = checkpoint weight = convert_van(state_dict) - mmcv.mkdir_or_exist(osp.dirname(args.dst)) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) torch.save(weight, args.dst) print('Done!!') diff --git a/tools/model_converters/vgg_to_mmcls.py b/tools/model_converters/vgg_to_mmpretrain.py similarity index 97% rename from tools/model_converters/vgg_to_mmcls.py rename to tools/model_converters/vgg_to_mmpretrain.py index b5ab87f6..46178db5 100644 --- a/tools/model_converters/vgg_to_mmcls.py +++ b/tools/model_converters/vgg_to_mmpretrain.py @@ -57,7 +57,8 @@ def get_layer_maps(layer_num, with_bn): def convert(src, dst, layer_num, with_bn=False): - """Convert keys in torchvision pretrained VGG models to mmcls style.""" + """Convert keys in torchvision pretrained VGG models to mmpretrain + style.""" # load pytorch model assert os.path.isfile(src), f'no checkpoint found at {src}' diff --git a/tools/model_converters/vig_to_mmcls.py b/tools/model_converters/vig_to_mmpretrain.py similarity index 97% rename from tools/model_converters/vig_to_mmcls.py rename to tools/model_converters/vig_to_mmpretrain.py index 28ebd491..2642c7d8 100644 --- a/tools/model_converters/vig_to_mmcls.py +++ b/tools/model_converters/vig_to_mmpretrain.py @@ -69,7 +69,8 @@ def convert_pvig(ckpt): def main(): parser = argparse.ArgumentParser( - description='Convert keys in pretrained vig models to mmcls style.') + description='Convert keys in pretrained vig models to ' + 'mmpretrain style.') parser.add_argument('src', help='src model path or url') # The dst path must be a full path of the new checkpoint. parser.add_argument('dst', help='save path') diff --git a/tools/test.py b/tools/test.py index 37693ca5..3d62a95b 100644 --- a/tools/test.py +++ b/tools/test.py @@ -104,7 +104,7 @@ def merge_args(cfg, args): # -------------------- TTA related args -------------------- if args.tta: if 'tta_model' not in cfg: - cfg.tta_model = dict(type='mmcls.AverageClsScoreTTA') + cfg.tta_model = dict(type='mmpretrain.AverageClsScoreTTA') if 'tta_pipeline' not in cfg: test_pipeline = cfg.test_dataloader.dataset.pipeline cfg.tta_pipeline = deepcopy(test_pipeline) diff --git a/tools/torchserve/mmcls2torchserve.py b/tools/torchserve/mmcls2torchserve.py index a8a17bde..8d53bf3f 100644 --- a/tools/torchserve/mmcls2torchserve.py +++ b/tools/torchserve/mmcls2torchserve.py @@ -14,7 +14,7 @@ except ImportError: 'install required third-party libraries.') -def mmcls2torchserve( +def mmpretrain2torchserve( config_file: str, checkpoint_file: str, output_folder: str, @@ -22,15 +22,14 @@ def mmcls2torchserve( model_version: str = '1.0', force: bool = False, ): - """Converts mmclassification model (config + checkpoint) to TorchServe - `.mar`. + """Converts mmpretrain model (config + checkpoint) to TorchServe `.mar`. Args: config_file: - In MMClassification config format. + In MMPretrain config format. The contents vary for each task repository. checkpoint_file: - In MMClassification checkpoint format. + In MMPretrain checkpoint format. The contents vary for each task repository. output_folder: Folder where `{model_name}.mar` will be created. @@ -56,7 +55,7 @@ def mmcls2torchserve( **{ 'model_file': f'{tmpdir}/config.py', 'serialized_file': checkpoint_file, - 'handler': f'{Path(__file__).parent}/mmcls_handler.py', + 'handler': f'{Path(__file__).parent}/mmpretrain_handler.py', 'model_name': model_name or Path(checkpoint_file).stem, 'version': model_version, 'export_path': output_folder, @@ -72,7 +71,7 @@ def mmcls2torchserve( def parse_args(): parser = ArgumentParser( - description='Convert mmcls models to TorchServe `.mar` format.') + description='Convert mmpretrain models to TorchServe `.mar` format.') parser.add_argument('config', type=str, help='config file path') parser.add_argument('checkpoint', type=str, help='checkpoint file path') parser.add_argument( @@ -109,5 +108,5 @@ if __name__ == '__main__': raise ImportError('`torch-model-archiver` is required.' 'Try: pip install torch-model-archiver') - mmcls2torchserve(args.config, args.checkpoint, args.output_folder, - args.model_name, args.model_version, args.force) + mmpretrain2torchserve(args.config, args.checkpoint, args.output_folder, + args.model_name, args.model_version, args.force) diff --git a/tools/torchserve/mmcls_handler.py b/tools/torchserve/mmcls_handler.py index 68815e96..061ca800 100644 --- a/tools/torchserve/mmcls_handler.py +++ b/tools/torchserve/mmcls_handler.py @@ -6,7 +6,7 @@ import mmcv import torch from ts.torch_handler.base_handler import BaseHandler -from mmcls.apis import inference_model, init_model +from mmpretrain.apis import inference_model, init_model class MMclsHandler(BaseHandler): diff --git a/tools/torchserve/test_torchserver.py b/tools/torchserve/test_torchserver.py index c964722d..08cc9238 100644 --- a/tools/torchserve/test_torchserver.py +++ b/tools/torchserve/test_torchserver.py @@ -4,7 +4,7 @@ from argparse import ArgumentParser import numpy as np import requests -from mmcls.apis import inference_model, init_model +from mmpretrain.apis import inference_model, init_model def parse_args(): diff --git a/tools/visualizations/browse_dataset.py b/tools/visualizations/browse_dataset.py index 9aacc8c7..b2611d5f 100644 --- a/tools/visualizations/browse_dataset.py +++ b/tools/visualizations/browse_dataset.py @@ -12,9 +12,9 @@ from mmengine.registry import init_default_scope from mmengine.utils import ProgressBar from mmengine.visualization import Visualizer -from mmcls.datasets.builder import build_dataset -from mmcls.visualization import ClsVisualizer -from mmcls.visualization.cls_visualizer import _get_adaptive_scale +from mmpretrain.datasets.builder import build_dataset +from mmpretrain.visualization import ClsVisualizer +from mmpretrain.visualization.cls_visualizer import _get_adaptive_scale def parse_args(): @@ -170,7 +170,7 @@ def main(): if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) - init_default_scope('mmcls') # Use mmcls as default scope. + init_default_scope('mmpretrain') # Use mmpretrain as default scope. dataset_cfg = cfg.get(args.phase + '_dataloader').get('dataset') dataset = build_dataset(dataset_cfg) diff --git a/tools/visualizations/vis_cam.py b/tools/visualizations/vis_cam.py index e1231d4f..0392752d 100644 --- a/tools/visualizations/vis_cam.py +++ b/tools/visualizations/vis_cam.py @@ -15,8 +15,8 @@ from mmengine.registry import init_default_scope from mmengine.utils import to_2tuple from torch.nn import BatchNorm1d, BatchNorm2d, GroupNorm, LayerNorm -from mmcls import digit_version -from mmcls.apis import init_model +from mmpretrain import digit_version +from mmpretrain.apis import init_model try: from pytorch_grad_cam import (EigenCAM, EigenGradCAM, GradCAM, @@ -159,8 +159,8 @@ def build_reshape_transform(model, args): def init_cam(method, model, target_layers, use_cuda, reshape_transform): - """Construct the CAM object once, In order to be compatible with mmcls, - here we modify the ActivationsAndGradients object.""" + """Construct the CAM object once, In order to be compatible with + mmpretrain, here we modify the ActivationsAndGradients object.""" GradCAM_Class = METHOD_MAP[method.lower()] cam = GradCAM_Class( model=model, target_layers=target_layers, use_cuda=use_cuda) @@ -265,7 +265,7 @@ def main(): if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) - init_default_scope('mmcls') + init_default_scope('mmpretrain') # build the model from a config file and a checkpoint file model = init_model(cfg, args.checkpoint, device=args.device) if args.preview_model: diff --git a/tools/visualizations/vis_scheduler.py b/tools/visualizations/vis_scheduler.py index 674a0502..cdf99ca8 100644 --- a/tools/visualizations/vis_scheduler.py +++ b/tools/visualizations/vis_scheduler.py @@ -234,7 +234,7 @@ def main(): raise ValueError('please set `train_cfg`.') if args.dataset_size is None and by_epoch: - from mmcls.datasets import build_dataset + from mmpretrain.datasets import build_dataset dataset_size = len(build_dataset(cfg.train_dataloader.dataset)) else: dataset_size = args.dataset_size or batch_size