diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 1889a066..a4e208de 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,7 +1,4 @@
-default_language_version:
- ruby: 2.7.1
-
exclude: ^tests/data/
repos:
- repo: https://github.com/PyCQA/flake8
@@ -33,6 +30,15 @@ repos:
rev: v2.1.0
hooks:
- id: codespell
+ - repo: https://github.com/executablebooks/mdformat
+ rev: 0.7.14
+ hooks:
+ - id: mdformat
+ args: ["--number"]
+ additional_dependencies:
+ - mdformat-gfm
+ - mdformat_frontmatter
+ - linkify-it-py
- repo: https://github.com/myint/docformatter
rev: v1.3.1
hooks:
@@ -53,3 +59,12 @@ repos:
- id: check-algo-readme
- id: check-copyright
args: [ "mmrazor", "tests", "tools"]
+ - repo: https://github.com/pre-commit/mirrors-mypy
+ rev: v0.812
+ hooks:
+ - id: mypy
+ exclude: |-
+ (?x)(
+ ^test
+ | ^docs
+ )
diff --git a/README_zh-CN.md b/README_zh-CN.md
index 9bc20b49..cf5fd0a5 100644
--- a/README_zh-CN.md
+++ b/README_zh-CN.md
@@ -85,12 +85,11 @@ MMRazor v0.3.1 版本已经在 2022.5.4 发布。
## 基准测试和模型库
-测试结果可以在 [模型库](docs/en/model_zoo.md) 中找到。
+测试结果可以在 [模型库](docs/en/model_zoo.md) 中找到.
已经支持的算法:
-
-Neural Architecture Search
+Neural Architecture Search
- [x] [DARTS(ICLR'2019)](configs/nas/darts)
@@ -98,24 +97,16 @@ MMRazor v0.3.1 版本已经在 2022.5.4 发布。
- [x] [SPOS(ECCV'2020)](configs/nas/spos)
-
-
-
-Pruning
+Pruning
- [x] [AutoSlim(NeurIPS'2019)](/configs/pruning/autoslim)
-
-
-
-Knowledge Distillation
+Knowledge Distillation
- [x] [CWD(ICCV'2021)](/configs/distill/cwd)
- [x] [WSLD(ICLR'2021)](/configs/distill/wsld)
-
-
## 安装
MMRazor 依赖 [PyTorch](https://pytorch.org/) 和 [MMCV](https://github.com/open-mmlab/mmcv)。
@@ -124,7 +115,7 @@ MMRazor 依赖 [PyTorch](https://pytorch.org/) 和 [MMCV](https://github.com/ope
## 快速入门
-请参考 [train.md](/docs/en/train.md) 和 [test.md](/docs/en/test.md) 学习 MMRazor 的基本使用。 我们也提供了一些进阶教程:
+请参考 [get_started.md](/docs/en/get_started.md) 学习 MMRazor 的基本使用。 我们也提供了一些进阶教程:
- [overview](/docs/en/tutorials/Tutorial_1_overview.md)
- [learn about configs](/docs/en/tutorials/Tutorial_2_learn_about_configs.md)
diff --git a/configs/_base_/datasets/mmcls/cifar100_bs16.py b/configs/_base_/datasets/mmcls/cifar100_bs16.py
deleted file mode 100644
index d4f8db75..00000000
--- a/configs/_base_/datasets/mmcls/cifar100_bs16.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# dataset settings
-dataset_type = 'CIFAR100'
-img_norm_cfg = dict(
- mean=[129.304, 124.070, 112.434],
- std=[68.170, 65.392, 70.418],
- to_rgb=False)
-train_pipeline = [
- dict(type='RandomCrop', size=32, padding=4),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=16,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- data_prefix='data/cifar100',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/cifar100',
- pipeline=test_pipeline,
- test_mode=True),
- test=dict(
- type=dataset_type,
- data_prefix='data/cifar100',
- pipeline=test_pipeline,
- test_mode=True))
diff --git a/configs/_base_/datasets/mmcls/cifar10_bs16.py b/configs/_base_/datasets/mmcls/cifar10_bs16.py
deleted file mode 100644
index 0d28adf5..00000000
--- a/configs/_base_/datasets/mmcls/cifar10_bs16.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# dataset settings
-dataset_type = 'CIFAR10'
-img_norm_cfg = dict(
- mean=[125.307, 122.961, 113.8575],
- std=[51.5865, 50.847, 51.255],
- to_rgb=False)
-train_pipeline = [
- dict(type='RandomCrop', size=32, padding=4),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=16,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type, data_prefix='data/cifar10',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/cifar10',
- pipeline=test_pipeline,
- test_mode=True),
- test=dict(
- type=dataset_type,
- data_prefix='data/cifar10',
- pipeline=test_pipeline,
- test_mode=True))
diff --git a/configs/_base_/datasets/mmcls/cifar10_bs96_cutout.py b/configs/_base_/datasets/mmcls/cifar10_bs96_cutout.py
deleted file mode 100644
index 0abf51ad..00000000
--- a/configs/_base_/datasets/mmcls/cifar10_bs96_cutout.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# dataset settings
-dataset_type = 'CIFAR10'
-img_norm_cfg = dict(
- mean=[125.307, 122.961, 113.8575],
- std=[51.5865, 50.847, 51.255],
- to_rgb=False)
-train_pipeline = [
- dict(type='RandomCrop', size=32, padding=4),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Cutout', shape=16, pad_val=0, prob=1.0),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=96,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type, data_prefix='data/cifar10',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/cifar10',
- pipeline=test_pipeline,
- test_mode=True),
- test=dict(
- type=dataset_type,
- data_prefix='data/cifar10',
- pipeline=test_pipeline,
- test_mode=True))
diff --git a/configs/_base_/datasets/mmcls/imagenet_bs128_colorjittor.py b/configs/_base_/datasets/mmcls/imagenet_bs128_colorjittor.py
deleted file mode 100644
index c0cf884b..00000000
--- a/configs/_base_/datasets/mmcls/imagenet_bs128_colorjittor.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# dataset settings
-dataset_type = 'ImageNet'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='RandomResizedCrop', size=224),
- dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='Resize', size=(256, -1)),
- dict(type='CenterCrop', crop_size=224),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=128,
- workers_per_gpu=8,
- train=dict(
- type=dataset_type,
- data_prefix='data/imagenet/train',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline),
- test=dict(
- # replace `data/val` with `data/test` for standard test
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric='accuracy')
diff --git a/configs/_base_/datasets/mmcls/imagenet_bs256_autoslim.py b/configs/_base_/datasets/mmcls/imagenet_bs256_autoslim.py
deleted file mode 100644
index 262d9478..00000000
--- a/configs/_base_/datasets/mmcls/imagenet_bs256_autoslim.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# dataset settings
-dataset_type = 'ImageNet'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='RandomResizedCrop',
- size=224,
- scale=(0.25, 1.0),
- backend='pillow'),
- dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='Resize', size=(256, -1), backend='pillow'),
- dict(type='CenterCrop', crop_size=224),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=256,
- workers_per_gpu=4,
- train=dict(
- type=dataset_type,
- data_prefix='data/imagenet/train',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline),
- test=dict(
- # replace `data/val` with `data/test` for standard test
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric='accuracy')
diff --git a/configs/_base_/datasets/mmcls/imagenet_bs32.py b/configs/_base_/datasets/mmcls/imagenet_bs32.py
deleted file mode 100644
index 8a546590..00000000
--- a/configs/_base_/datasets/mmcls/imagenet_bs32.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# dataset settings
-dataset_type = 'ImageNet'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='RandomResizedCrop', size=224),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='Resize', size=(256, -1)),
- dict(type='CenterCrop', crop_size=224),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=32,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- data_prefix='data/imagenet/train',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline),
- test=dict(
- # replace `data/val` with `data/test` for standard test
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric='accuracy')
diff --git a/configs/_base_/datasets/mmcls/imagenet_bs32_pil_resize.py b/configs/_base_/datasets/mmcls/imagenet_bs32_pil_resize.py
deleted file mode 100644
index 22b74f76..00000000
--- a/configs/_base_/datasets/mmcls/imagenet_bs32_pil_resize.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# dataset settings
-dataset_type = 'ImageNet'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='RandomResizedCrop', size=224, backend='pillow'),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='Resize', size=(256, -1), backend='pillow'),
- dict(type='CenterCrop', crop_size=224),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=32,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- data_prefix='data/imagenet/train',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline),
- test=dict(
- # replace `data/val` with `data/test` for standard test
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric='accuracy')
diff --git a/configs/_base_/datasets/mmcls/imagenet_bs64.py b/configs/_base_/datasets/mmcls/imagenet_bs64.py
deleted file mode 100644
index b9f866a4..00000000
--- a/configs/_base_/datasets/mmcls/imagenet_bs64.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# dataset settings
-dataset_type = 'ImageNet'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='RandomResizedCrop', size=224),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='Resize', size=(256, -1)),
- dict(type='CenterCrop', crop_size=224),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=64,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- data_prefix='data/imagenet/train',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline),
- test=dict(
- # replace `data/val` with `data/test` for standard test
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric='accuracy')
diff --git a/configs/_base_/datasets/mmcls/imagenet_bs64_autoaug.py b/configs/_base_/datasets/mmcls/imagenet_bs64_autoaug.py
deleted file mode 100644
index a1092a31..00000000
--- a/configs/_base_/datasets/mmcls/imagenet_bs64_autoaug.py
+++ /dev/null
@@ -1,43 +0,0 @@
-_base_ = ['./pipelines/auto_aug.py']
-
-# dataset settings
-dataset_type = 'ImageNet'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='RandomResizedCrop', size=224),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='AutoAugment', policies={{_base_.auto_increasing_policies}}),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='Resize', size=(256, -1)),
- dict(type='CenterCrop', crop_size=224),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=64,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- data_prefix='data/imagenet/train',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline),
- test=dict(
- # replace `data/val` with `data/test` for standard test
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric='accuracy')
diff --git a/configs/_base_/datasets/mmcls/imagenet_bs64_pil_resize.py b/configs/_base_/datasets/mmcls/imagenet_bs64_pil_resize.py
deleted file mode 100644
index 95d0e1f2..00000000
--- a/configs/_base_/datasets/mmcls/imagenet_bs64_pil_resize.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# dataset settings
-dataset_type = 'ImageNet'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='RandomResizedCrop', size=224, backend='pillow'),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='Resize', size=(256, -1), backend='pillow'),
- dict(type='CenterCrop', crop_size=224),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=64,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- data_prefix='data/imagenet/train',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline),
- test=dict(
- # replace `data/val` with `data/test` for standard test
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric='accuracy')
diff --git a/configs/_base_/datasets/mmcls/imagenet_bs64_pil_resize_autoaug.py b/configs/_base_/datasets/mmcls/imagenet_bs64_pil_resize_autoaug.py
deleted file mode 100644
index f9c50267..00000000
--- a/configs/_base_/datasets/mmcls/imagenet_bs64_pil_resize_autoaug.py
+++ /dev/null
@@ -1,45 +0,0 @@
-_base_ = [
- 'pipelines/auto_aug.py',
-]
-
-# dataset settings
-dataset_type = 'ImageNet'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='RandomResizedCrop', size=224, backend='pillow'),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='AutoAugment', policies={{_base_.policy_imagenet}}),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='Resize', size=(256, -1), backend='pillow'),
- dict(type='CenterCrop', crop_size=224),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=64,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- data_prefix='data/imagenet/train',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline),
- test=dict(
- # replace `data/val` with `data/test` for standard test
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric='accuracy')
diff --git a/configs/_base_/datasets/mmcls/imagenet_bs64_swin_224.py b/configs/_base_/datasets/mmcls/imagenet_bs64_swin_224.py
deleted file mode 100644
index 4a059a33..00000000
--- a/configs/_base_/datasets/mmcls/imagenet_bs64_swin_224.py
+++ /dev/null
@@ -1,71 +0,0 @@
-_base_ = ['./pipelines/rand_aug.py']
-
-# dataset settings
-dataset_type = 'ImageNet'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='RandomResizedCrop',
- size=224,
- backend='pillow',
- interpolation='bicubic'),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(
- type='RandAugment',
- policies={{_base_.rand_increasing_policies}},
- num_policies=2,
- total_level=10,
- magnitude_level=9,
- magnitude_std=0.5,
- hparams=dict(
- pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
- interpolation='bicubic')),
- dict(
- type='RandomErasing',
- erase_prob=0.25,
- mode='rand',
- min_area_ratio=0.02,
- max_area_ratio=1 / 3,
- fill_color=img_norm_cfg['mean'][::-1],
- fill_std=img_norm_cfg['std'][::-1]),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='Resize',
- size=(256, -1),
- backend='pillow',
- interpolation='bicubic'),
- dict(type='CenterCrop', crop_size=224),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=64,
- workers_per_gpu=8,
- train=dict(
- type=dataset_type,
- data_prefix='data/imagenet/train',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline),
- test=dict(
- # replace `data/val` with `data/test` for standard test
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline))
-
-evaluation = dict(interval=10, metric='accuracy')
diff --git a/configs/_base_/datasets/mmcls/imagenet_bs64_swin_384.py b/configs/_base_/datasets/mmcls/imagenet_bs64_swin_384.py
deleted file mode 100644
index d2639399..00000000
--- a/configs/_base_/datasets/mmcls/imagenet_bs64_swin_384.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# dataset settings
-dataset_type = 'ImageNet'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='RandomResizedCrop',
- size=384,
- backend='pillow',
- interpolation='bicubic'),
- dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='ToTensor', keys=['gt_label']),
- dict(type='Collect', keys=['img', 'gt_label'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='Resize', size=384, backend='pillow', interpolation='bicubic'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
-]
-data = dict(
- samples_per_gpu=64,
- workers_per_gpu=8,
- train=dict(
- type=dataset_type,
- data_prefix='data/imagenet/train',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline),
- test=dict(
- # replace `data/val` with `data/test` for standard test
- type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
- pipeline=test_pipeline))
-evaluation = dict(interval=10, metric='accuracy')
diff --git a/configs/_base_/datasets/mmdet/cityscapes_detection.py b/configs/_base_/datasets/mmdet/cityscapes_detection.py
deleted file mode 100644
index e341b59d..00000000
--- a/configs/_base_/datasets/mmdet/cityscapes_detection.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# dataset settings
-dataset_type = 'CityscapesDataset'
-data_root = 'data/cityscapes/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(
- type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(2048, 1024),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=1,
- workers_per_gpu=2,
- train=dict(
- type='RepeatDataset',
- times=8,
- dataset=dict(
- type=dataset_type,
- ann_file=data_root +
- 'annotations/instancesonly_filtered_gtFine_train.json',
- img_prefix=data_root + 'leftImg8bit/train/',
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- ann_file=data_root +
- 'annotations/instancesonly_filtered_gtFine_val.json',
- img_prefix=data_root + 'leftImg8bit/val/',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- ann_file=data_root +
- 'annotations/instancesonly_filtered_gtFine_test.json',
- img_prefix=data_root + 'leftImg8bit/test/',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric='bbox')
diff --git a/configs/_base_/datasets/mmdet/cityscapes_instance.py b/configs/_base_/datasets/mmdet/cityscapes_instance.py
deleted file mode 100644
index 4e3c34e2..00000000
--- a/configs/_base_/datasets/mmdet/cityscapes_instance.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# dataset settings
-dataset_type = 'CityscapesDataset'
-data_root = 'data/cityscapes/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(
- type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(2048, 1024),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=1,
- workers_per_gpu=2,
- train=dict(
- type='RepeatDataset',
- times=8,
- dataset=dict(
- type=dataset_type,
- ann_file=data_root +
- 'annotations/instancesonly_filtered_gtFine_train.json',
- img_prefix=data_root + 'leftImg8bit/train/',
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- ann_file=data_root +
- 'annotations/instancesonly_filtered_gtFine_val.json',
- img_prefix=data_root + 'leftImg8bit/val/',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- ann_file=data_root +
- 'annotations/instancesonly_filtered_gtFine_test.json',
- img_prefix=data_root + 'leftImg8bit/test/',
- pipeline=test_pipeline))
-evaluation = dict(metric=['bbox', 'segm'])
diff --git a/configs/_base_/datasets/mmdet/coco_detection.py b/configs/_base_/datasets/mmdet/coco_detection.py
deleted file mode 100644
index 149f590b..00000000
--- a/configs/_base_/datasets/mmdet/coco_detection.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# dataset settings
-dataset_type = 'CocoDataset'
-data_root = 'data/coco/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_train2017.json',
- img_prefix=data_root + 'train2017/',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_val2017.json',
- img_prefix=data_root + 'val2017/',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_val2017.json',
- img_prefix=data_root + 'val2017/',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric='bbox')
diff --git a/configs/_base_/datasets/mmdet/coco_instance.py b/configs/_base_/datasets/mmdet/coco_instance.py
deleted file mode 100644
index 9901a858..00000000
--- a/configs/_base_/datasets/mmdet/coco_instance.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# dataset settings
-dataset_type = 'CocoDataset'
-data_root = 'data/coco/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_train2017.json',
- img_prefix=data_root + 'train2017/',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_val2017.json',
- img_prefix=data_root + 'val2017/',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_val2017.json',
- img_prefix=data_root + 'val2017/',
- pipeline=test_pipeline))
-evaluation = dict(metric=['bbox', 'segm'])
diff --git a/configs/_base_/datasets/mmdet/coco_instance_semantic.py b/configs/_base_/datasets/mmdet/coco_instance_semantic.py
deleted file mode 100644
index 6c8bf07b..00000000
--- a/configs/_base_/datasets/mmdet/coco_instance_semantic.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# dataset settings
-dataset_type = 'CocoDataset'
-data_root = 'data/coco/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='SegRescale', scale_factor=1 / 8),
- dict(type='DefaultFormatBundle'),
- dict(
- type='Collect',
- keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_train2017.json',
- img_prefix=data_root + 'train2017/',
- seg_prefix=data_root + 'stuffthingmaps/train2017/',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_val2017.json',
- img_prefix=data_root + 'val2017/',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_val2017.json',
- img_prefix=data_root + 'val2017/',
- pipeline=test_pipeline))
-evaluation = dict(metric=['bbox', 'segm'])
diff --git a/configs/_base_/datasets/mmdet/coco_panoptic.py b/configs/_base_/datasets/mmdet/coco_panoptic.py
deleted file mode 100644
index dbade7c0..00000000
--- a/configs/_base_/datasets/mmdet/coco_panoptic.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# dataset settings
-dataset_type = 'CocoPanopticDataset'
-data_root = 'data/coco/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='LoadPanopticAnnotations',
- with_bbox=True,
- with_mask=True,
- with_seg=True),
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='SegRescale', scale_factor=1 / 4),
- dict(type='DefaultFormatBundle'),
- dict(
- type='Collect',
- keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/panoptic_train2017.json',
- img_prefix=data_root + 'train2017/',
- seg_prefix=data_root + 'annotations/panoptic_train2017/',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/panoptic_val2017.json',
- img_prefix=data_root + 'val2017/',
- seg_prefix=data_root + 'annotations/panoptic_val2017/',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/panoptic_val2017.json',
- img_prefix=data_root + 'val2017/',
- seg_prefix=data_root + 'annotations/panoptic_val2017/',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric=['PQ'])
diff --git a/configs/_base_/datasets/mmdet/deepfashion.py b/configs/_base_/datasets/mmdet/deepfashion.py
deleted file mode 100644
index 308b4b2a..00000000
--- a/configs/_base_/datasets/mmdet/deepfashion.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# dataset settings
-dataset_type = 'DeepFashionDataset'
-data_root = 'data/DeepFashion/In-shop/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(type='Resize', img_scale=(750, 1101), keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(750, 1101),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- imgs_per_gpu=2,
- workers_per_gpu=1,
- train=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
- img_prefix=data_root + 'Img/',
- pipeline=train_pipeline,
- data_root=data_root),
- val=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
- img_prefix=data_root + 'Img/',
- pipeline=test_pipeline,
- data_root=data_root),
- test=dict(
- type=dataset_type,
- ann_file=data_root +
- 'annotations/DeepFashion_segmentation_gallery.json',
- img_prefix=data_root + 'Img/',
- pipeline=test_pipeline,
- data_root=data_root))
-evaluation = dict(interval=5, metric=['bbox', 'segm'])
diff --git a/configs/_base_/datasets/mmdet/lvis_v0.5_instance.py b/configs/_base_/datasets/mmdet/lvis_v0.5_instance.py
deleted file mode 100644
index 207e0053..00000000
--- a/configs/_base_/datasets/mmdet/lvis_v0.5_instance.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# dataset settings
-_base_ = 'coco_instance.py'
-dataset_type = 'LVISV05Dataset'
-data_root = 'data/lvis_v0.5/'
-data = dict(
- samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(
- _delete_=True,
- type='ClassBalancedDataset',
- oversample_thr=1e-3,
- dataset=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/lvis_v0.5_train.json',
- img_prefix=data_root + 'train2017/')),
- val=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/lvis_v0.5_val.json',
- img_prefix=data_root + 'val2017/'),
- test=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/lvis_v0.5_val.json',
- img_prefix=data_root + 'val2017/'))
-evaluation = dict(metric=['bbox', 'segm'])
diff --git a/configs/_base_/datasets/mmdet/lvis_v1_instance.py b/configs/_base_/datasets/mmdet/lvis_v1_instance.py
deleted file mode 100644
index be791edd..00000000
--- a/configs/_base_/datasets/mmdet/lvis_v1_instance.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# dataset settings
-_base_ = 'coco_instance.py'
-dataset_type = 'LVISV1Dataset'
-data_root = 'data/lvis_v1/'
-data = dict(
- samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(
- _delete_=True,
- type='ClassBalancedDataset',
- oversample_thr=1e-3,
- dataset=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/lvis_v1_train.json',
- img_prefix=data_root)),
- val=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/lvis_v1_val.json',
- img_prefix=data_root),
- test=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/lvis_v1_val.json',
- img_prefix=data_root))
-evaluation = dict(metric=['bbox', 'segm'])
diff --git a/configs/_base_/datasets/mmdet/voc0712.py b/configs/_base_/datasets/mmdet/voc0712.py
deleted file mode 100644
index ae09acdd..00000000
--- a/configs/_base_/datasets/mmdet/voc0712.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# dataset settings
-dataset_type = 'VOCDataset'
-data_root = 'data/VOCdevkit/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1000, 600),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(
- type='RepeatDataset',
- times=3,
- dataset=dict(
- type=dataset_type,
- ann_file=[
- data_root + 'VOC2007/ImageSets/Main/trainval.txt',
- data_root + 'VOC2012/ImageSets/Main/trainval.txt'
- ],
- img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
- img_prefix=data_root + 'VOC2007/',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
- img_prefix=data_root + 'VOC2007/',
- pipeline=test_pipeline))
-evaluation = dict(interval=1, metric='mAP')
diff --git a/configs/_base_/datasets/mmdet/wider_face.py b/configs/_base_/datasets/mmdet/wider_face.py
deleted file mode 100644
index d1d649be..00000000
--- a/configs/_base_/datasets/mmdet/wider_face.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# dataset settings
-dataset_type = 'WIDERFaceDataset'
-data_root = 'data/WIDERFace/'
-img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile', to_float32=True),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(
- type='PhotoMetricDistortion',
- brightness_delta=32,
- contrast_range=(0.5, 1.5),
- saturation_range=(0.5, 1.5),
- hue_delta=18),
- dict(
- type='Expand',
- mean=img_norm_cfg['mean'],
- to_rgb=img_norm_cfg['to_rgb'],
- ratio_range=(1, 4)),
- dict(
- type='MinIoURandomCrop',
- min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
- min_crop_size=0.3),
- dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(300, 300),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=False),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=60,
- workers_per_gpu=2,
- train=dict(
- type='RepeatDataset',
- times=2,
- dataset=dict(
- type=dataset_type,
- ann_file=data_root + 'train.txt',
- img_prefix=data_root + 'WIDER_train/',
- min_size=17,
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- ann_file=data_root + 'val.txt',
- img_prefix=data_root + 'WIDER_val/',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- ann_file=data_root + 'val.txt',
- img_prefix=data_root + 'WIDER_val/',
- pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/ade20k.py b/configs/_base_/datasets/mmseg/ade20k.py
deleted file mode 100644
index efc8b4bb..00000000
--- a/configs/_base_/datasets/mmseg/ade20k.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# dataset settings
-dataset_type = 'ADE20KDataset'
-data_root = 'data/ade/ADEChallengeData2016'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-crop_size = (512, 512)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', reduce_zero_label=True),
- dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(2048, 512),
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/training',
- ann_dir='annotations/training',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/chase_db1.py b/configs/_base_/datasets/mmseg/chase_db1.py
deleted file mode 100644
index 298594ea..00000000
--- a/configs/_base_/datasets/mmseg/chase_db1.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# dataset settings
-dataset_type = 'ChaseDB1Dataset'
-data_root = 'data/CHASE_DB1'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-img_scale = (960, 999)
-crop_size = (128, 128)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=img_scale,
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
-]
-
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type='RepeatDataset',
- times=40000,
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/training',
- ann_dir='annotations/training',
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/cityscapes.py b/configs/_base_/datasets/mmseg/cityscapes.py
deleted file mode 100644
index f21867c6..00000000
--- a/configs/_base_/datasets/mmseg/cityscapes.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# dataset settings
-dataset_type = 'CityscapesDataset'
-data_root = 'data/cityscapes/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-crop_size = (512, 1024)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(2048, 1024),
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='leftImg8bit/train',
- ann_dir='gtFine/train',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='leftImg8bit/val',
- ann_dir='gtFine/val',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='leftImg8bit/val',
- ann_dir='gtFine/val',
- pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/cityscapes_1024x1024.py b/configs/_base_/datasets/mmseg/cityscapes_1024x1024.py
deleted file mode 100644
index f98d9297..00000000
--- a/configs/_base_/datasets/mmseg/cityscapes_1024x1024.py
+++ /dev/null
@@ -1,35 +0,0 @@
-_base_ = './cityscapes.py'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-crop_size = (1024, 1024)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(2048, 1024),
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- train=dict(pipeline=train_pipeline),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/cityscapes_769x769.py b/configs/_base_/datasets/mmseg/cityscapes_769x769.py
deleted file mode 100644
index 336c7b25..00000000
--- a/configs/_base_/datasets/mmseg/cityscapes_769x769.py
+++ /dev/null
@@ -1,35 +0,0 @@
-_base_ = './cityscapes.py'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-crop_size = (769, 769)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(2049, 1025),
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- train=dict(pipeline=train_pipeline),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/cityscapes_832x832.py b/configs/_base_/datasets/mmseg/cityscapes_832x832.py
deleted file mode 100644
index b9325cc0..00000000
--- a/configs/_base_/datasets/mmseg/cityscapes_832x832.py
+++ /dev/null
@@ -1,35 +0,0 @@
-_base_ = './cityscapes.py'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-crop_size = (832, 832)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(2048, 1024),
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- train=dict(pipeline=train_pipeline),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/coco-stuff10k.py b/configs/_base_/datasets/mmseg/coco-stuff10k.py
deleted file mode 100644
index ec049692..00000000
--- a/configs/_base_/datasets/mmseg/coco-stuff10k.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# dataset settings
-dataset_type = 'COCOStuffDataset'
-data_root = 'data/coco_stuff10k'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-crop_size = (512, 512)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', reduce_zero_label=True),
- dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(2048, 512),
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type=dataset_type,
- data_root=data_root,
- reduce_zero_label=True,
- img_dir='images/train2014',
- ann_dir='annotations/train2014',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- reduce_zero_label=True,
- img_dir='images/test2014',
- ann_dir='annotations/test2014',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- reduce_zero_label=True,
- img_dir='images/test2014',
- ann_dir='annotations/test2014',
- pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/coco-stuff164k.py b/configs/_base_/datasets/mmseg/coco-stuff164k.py
deleted file mode 100644
index a6a38f2a..00000000
--- a/configs/_base_/datasets/mmseg/coco-stuff164k.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# dataset settings
-dataset_type = 'COCOStuffDataset'
-data_root = 'data/coco_stuff164k'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-crop_size = (512, 512)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(2048, 512),
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/train2017',
- ann_dir='annotations/train2017',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/val2017',
- ann_dir='annotations/val2017',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/val2017',
- ann_dir='annotations/val2017',
- pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/drive.py b/configs/_base_/datasets/mmseg/drive.py
deleted file mode 100644
index 06e8ff60..00000000
--- a/configs/_base_/datasets/mmseg/drive.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# dataset settings
-dataset_type = 'DRIVEDataset'
-data_root = 'data/DRIVE'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-img_scale = (584, 565)
-crop_size = (64, 64)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=img_scale,
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
-]
-
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type='RepeatDataset',
- times=40000,
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/training',
- ann_dir='annotations/training',
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/hrf.py b/configs/_base_/datasets/mmseg/hrf.py
deleted file mode 100644
index 242d790e..00000000
--- a/configs/_base_/datasets/mmseg/hrf.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# dataset settings
-dataset_type = 'HRFDataset'
-data_root = 'data/HRF'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-img_scale = (2336, 3504)
-crop_size = (256, 256)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=img_scale,
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
-]
-
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type='RepeatDataset',
- times=40000,
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/training',
- ann_dir='annotations/training',
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/pascal_context.py b/configs/_base_/datasets/mmseg/pascal_context.py
deleted file mode 100644
index ff65bad1..00000000
--- a/configs/_base_/datasets/mmseg/pascal_context.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# dataset settings
-dataset_type = 'PascalContextDataset'
-data_root = 'data/VOCdevkit/VOC2010/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-img_scale = (520, 520)
-crop_size = (480, 480)
-
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=img_scale,
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='JPEGImages',
- ann_dir='SegmentationClassContext',
- split='ImageSets/SegmentationContext/train.txt',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='JPEGImages',
- ann_dir='SegmentationClassContext',
- split='ImageSets/SegmentationContext/val.txt',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='JPEGImages',
- ann_dir='SegmentationClassContext',
- split='ImageSets/SegmentationContext/val.txt',
- pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/pascal_context_59.py b/configs/_base_/datasets/mmseg/pascal_context_59.py
deleted file mode 100644
index 37585aba..00000000
--- a/configs/_base_/datasets/mmseg/pascal_context_59.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# dataset settings
-dataset_type = 'PascalContextDataset59'
-data_root = 'data/VOCdevkit/VOC2010/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-img_scale = (520, 520)
-crop_size = (480, 480)
-
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', reduce_zero_label=True),
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=img_scale,
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='JPEGImages',
- ann_dir='SegmentationClassContext',
- split='ImageSets/SegmentationContext/train.txt',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='JPEGImages',
- ann_dir='SegmentationClassContext',
- split='ImageSets/SegmentationContext/val.txt',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='JPEGImages',
- ann_dir='SegmentationClassContext',
- split='ImageSets/SegmentationContext/val.txt',
- pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/pascal_voc12.py b/configs/_base_/datasets/mmseg/pascal_voc12.py
deleted file mode 100644
index ba1d42d0..00000000
--- a/configs/_base_/datasets/mmseg/pascal_voc12.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# dataset settings
-dataset_type = 'PascalVOCDataset'
-data_root = 'data/VOCdevkit/VOC2012'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-crop_size = (512, 512)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(2048, 512),
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='JPEGImages',
- ann_dir='SegmentationClass',
- split='ImageSets/Segmentation/train.txt',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='JPEGImages',
- ann_dir='SegmentationClass',
- split='ImageSets/Segmentation/val.txt',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='JPEGImages',
- ann_dir='SegmentationClass',
- split='ImageSets/Segmentation/val.txt',
- pipeline=test_pipeline))
diff --git a/configs/_base_/datasets/mmseg/pascal_voc12_aug.py b/configs/_base_/datasets/mmseg/pascal_voc12_aug.py
deleted file mode 100644
index 3f23b671..00000000
--- a/configs/_base_/datasets/mmseg/pascal_voc12_aug.py
+++ /dev/null
@@ -1,9 +0,0 @@
-_base_ = './pascal_voc12.py'
-# dataset settings
-data = dict(
- train=dict(
- ann_dir=['SegmentationClass', 'SegmentationClassAug'],
- split=[
- 'ImageSets/Segmentation/train.txt',
- 'ImageSets/Segmentation/aug.txt'
- ]))
diff --git a/configs/_base_/datasets/mmseg/stare.py b/configs/_base_/datasets/mmseg/stare.py
deleted file mode 100644
index 3f71b254..00000000
--- a/configs/_base_/datasets/mmseg/stare.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# dataset settings
-dataset_type = 'STAREDataset'
-data_root = 'data/STARE'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-img_scale = (605, 700)
-crop_size = (128, 128)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=img_scale,
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
-]
-
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type='RepeatDataset',
- times=40000,
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/training',
- ann_dir='annotations/training',
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline))
diff --git a/configs/_base_/mmcls_runtime.py b/configs/_base_/mmcls_runtime.py
deleted file mode 100644
index ba965a45..00000000
--- a/configs/_base_/mmcls_runtime.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# checkpoint saving
-checkpoint_config = dict(interval=1)
-# yapf:disable
-log_config = dict(
- interval=100,
- hooks=[
- dict(type='TextLoggerHook'),
- # dict(type='TensorboardLoggerHook')
- ])
-# yapf:enable
-
-dist_params = dict(backend='nccl')
-log_level = 'INFO'
-load_from = None
-resume_from = None
-workflow = [('train', 1)]
diff --git a/configs/_base_/mmdet_runtime.py b/configs/_base_/mmdet_runtime.py
deleted file mode 100644
index 55097c5b..00000000
--- a/configs/_base_/mmdet_runtime.py
+++ /dev/null
@@ -1,16 +0,0 @@
-checkpoint_config = dict(interval=1)
-# yapf:disable
-log_config = dict(
- interval=50,
- hooks=[
- dict(type='TextLoggerHook'),
- # dict(type='TensorboardLoggerHook')
- ])
-# yapf:enable
-custom_hooks = [dict(type='NumClassCheckHook')]
-
-dist_params = dict(backend='nccl')
-log_level = 'INFO'
-load_from = None
-resume_from = None
-workflow = [('train', 1)]
diff --git a/configs/_base_/mmseg_runtime.py b/configs/_base_/mmseg_runtime.py
deleted file mode 100644
index b564cc4e..00000000
--- a/configs/_base_/mmseg_runtime.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# yapf:disable
-log_config = dict(
- interval=50,
- hooks=[
- dict(type='TextLoggerHook', by_epoch=False),
- # dict(type='TensorboardLoggerHook')
- ])
-# yapf:enable
-dist_params = dict(backend='nccl')
-log_level = 'INFO'
-load_from = None
-resume_from = None
-workflow = [('train', 1)]
-cudnn_benchmark = True
diff --git a/configs/_base_/schedules/mmcls/cifar10_bs128.py b/configs/_base_/schedules/mmcls/cifar10_bs128.py
deleted file mode 100644
index f134dbce..00000000
--- a/configs/_base_/schedules/mmcls/cifar10_bs128.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(policy='step', step=[100, 150])
-runner = dict(type='EpochBasedRunner', max_epochs=200)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs1024_adamw_swin.py b/configs/_base_/schedules/mmcls/imagenet_bs1024_adamw_swin.py
deleted file mode 100644
index 1a523e44..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs1024_adamw_swin.py
+++ /dev/null
@@ -1,30 +0,0 @@
-paramwise_cfg = dict(
- norm_decay_mult=0.0,
- bias_decay_mult=0.0,
- custom_keys={
- '.absolute_pos_embed': dict(decay_mult=0.0),
- '.relative_position_bias_table': dict(decay_mult=0.0)
- })
-
-# for batch in each gpu is 128, 8 gpu
-# lr = 5e-4 * 128 * 8 / 512 = 0.001
-optimizer = dict(
- type='AdamW',
- lr=5e-4 * 128 * 8 / 512,
- weight_decay=0.05,
- eps=1e-8,
- betas=(0.9, 0.999),
- paramwise_cfg=paramwise_cfg)
-optimizer_config = dict(grad_clip=dict(max_norm=5.0))
-
-# learning policy
-lr_config = dict(
- policy='CosineAnnealing',
- by_epoch=False,
- min_lr_ratio=1e-2,
- warmup='linear',
- warmup_ratio=1e-3,
- warmup_iters=20 * 1252,
- warmup_by_epoch=False)
-
-runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs1024_linearlr_bn_nowd.py b/configs/_base_/schedules/mmcls/imagenet_bs1024_linearlr_bn_nowd.py
deleted file mode 100644
index 99fbdda9..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs1024_linearlr_bn_nowd.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# optimizer
-optimizer = dict(
- type='SGD',
- lr=0.5,
- momentum=0.9,
- weight_decay=0.00004,
- paramwise_cfg=dict(norm_decay_mult=0))
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(
- policy='poly',
- min_lr=0,
- by_epoch=False,
- warmup='constant',
- warmup_iters=5000,
-)
-runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs1024_spos.py b/configs/_base_/schedules/mmcls/imagenet_bs1024_spos.py
deleted file mode 100644
index 8ba0cddd..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs1024_spos.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# optimizer
-paramwise_cfg = dict(bias_decay_mult=0.0, norm_decay_mult=0.0)
-optimizer = dict(
- type='SGD',
- lr=0.5,
- momentum=0.9,
- weight_decay=4e-5,
- paramwise_cfg=paramwise_cfg)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(policy='poly', power=1.0, min_lr=0.0, by_epoch=False)
-runner = dict(type='IterBasedRunner', max_iters=300000)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs2048.py b/configs/_base_/schedules/mmcls/imagenet_bs2048.py
deleted file mode 100644
index 93fdebfd..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs2048.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# optimizer
-optimizer = dict(
- type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=2500,
- warmup_ratio=0.25,
- step=[30, 60, 90])
-runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs2048_AdamW.py b/configs/_base_/schedules/mmcls/imagenet_bs2048_AdamW.py
deleted file mode 100644
index 6d4f2081..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs2048_AdamW.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# optimizer
-# In ClassyVision, the lr is set to 0.003 for bs4096.
-# In this implementation(bs2048), lr = 0.003 / 4096 * (32bs * 64gpus) = 0.0015
-optimizer = dict(type='AdamW', lr=0.0015, weight_decay=0.3)
-optimizer_config = dict(grad_clip=dict(max_norm=1.0))
-
-# specific to vit pretrain
-paramwise_cfg = dict(
- custom_keys={
- '.backbone.cls_token': dict(decay_mult=0.0),
- '.backbone.pos_embed': dict(decay_mult=0.0)
- })
-# learning policy
-lr_config = dict(
- policy='CosineAnnealing',
- min_lr=0,
- warmup='linear',
- warmup_iters=10000,
- warmup_ratio=1e-4)
-runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs2048_autoslim.py b/configs/_base_/schedules/mmcls/imagenet_bs2048_autoslim.py
deleted file mode 100644
index 572ad7e9..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs2048_autoslim.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# optimizer
-paramwise_cfg = dict(
- bias_decay_mult=0.0, norm_decay_mult=0.0, dwconv_decay_mult=0.0)
-optimizer = dict(
- type='SGD',
- lr=0.5,
- momentum=0.9,
- nesterov=True,
- weight_decay=0.0001,
- paramwise_cfg=paramwise_cfg)
-
-optimizer_config = None
-
-# learning policy
-lr_config = dict(policy='poly', power=1.0, min_lr=0.0, by_epoch=False)
-runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs2048_coslr.py b/configs/_base_/schedules/mmcls/imagenet_bs2048_coslr.py
deleted file mode 100644
index b9e77f2c..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs2048_coslr.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# optimizer
-optimizer = dict(
- type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(
- policy='CosineAnnealing',
- min_lr=0,
- warmup='linear',
- warmup_iters=2500,
- warmup_ratio=0.25)
-runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs256.py b/configs/_base_/schedules/mmcls/imagenet_bs256.py
deleted file mode 100644
index 3b5d1984..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs256.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(policy='step', step=[30, 60, 90])
-runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs256_140e.py b/configs/_base_/schedules/mmcls/imagenet_bs256_140e.py
deleted file mode 100644
index caba1577..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs256_140e.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(policy='step', step=[40, 80, 120])
-runner = dict(type='EpochBasedRunner', max_epochs=140)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs256_200e_coslr_warmup.py b/configs/_base_/schedules/mmcls/imagenet_bs256_200e_coslr_warmup.py
deleted file mode 100644
index 49456b2c..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs256_200e_coslr_warmup.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(
- policy='CosineAnnealing',
- min_lr=0,
- warmup='linear',
- warmup_iters=25025,
- warmup_ratio=0.25)
-runner = dict(type='EpochBasedRunner', max_epochs=200)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs256_coslr.py b/configs/_base_/schedules/mmcls/imagenet_bs256_coslr.py
deleted file mode 100644
index 779b4792..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs256_coslr.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(policy='CosineAnnealing', min_lr=0)
-runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs256_epochstep.py b/configs/_base_/schedules/mmcls/imagenet_bs256_epochstep.py
deleted file mode 100644
index 2347a043..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs256_epochstep.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.045, momentum=0.9, weight_decay=0.00004)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(policy='step', gamma=0.98, step=1)
-runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/configs/_base_/schedules/mmcls/imagenet_bs4096_AdamW.py b/configs/_base_/schedules/mmcls/imagenet_bs4096_AdamW.py
deleted file mode 100644
index 859cf4b2..00000000
--- a/configs/_base_/schedules/mmcls/imagenet_bs4096_AdamW.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# optimizer
-optimizer = dict(type='AdamW', lr=0.003, weight_decay=0.3)
-optimizer_config = dict(grad_clip=dict(max_norm=1.0))
-
-# specific to vit pretrain
-paramwise_cfg = dict(
- custom_keys={
- '.backbone.cls_token': dict(decay_mult=0.0),
- '.backbone.pos_embed': dict(decay_mult=0.0)
- })
-# learning policy
-lr_config = dict(
- policy='CosineAnnealing',
- min_lr=0,
- warmup='linear',
- warmup_iters=10000,
- warmup_ratio=1e-4)
-runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/configs/_base_/schedules/mmdet/schedule_1x.py b/configs/_base_/schedules/mmdet/schedule_1x.py
deleted file mode 100644
index 13b3783c..00000000
--- a/configs/_base_/schedules/mmdet/schedule_1x.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=500,
- warmup_ratio=0.001,
- step=[8, 11])
-runner = dict(type='EpochBasedRunner', max_epochs=12)
diff --git a/configs/_base_/schedules/mmdet/schedule_2x.py b/configs/_base_/schedules/mmdet/schedule_2x.py
deleted file mode 100644
index 69dc9ee8..00000000
--- a/configs/_base_/schedules/mmdet/schedule_2x.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=500,
- warmup_ratio=0.001,
- step=[16, 22])
-runner = dict(type='EpochBasedRunner', max_epochs=24)
diff --git a/configs/_base_/schedules/mmseg/schedule_160k.py b/configs/_base_/schedules/mmseg/schedule_160k.py
deleted file mode 100644
index 39630f21..00000000
--- a/configs/_base_/schedules/mmseg/schedule_160k.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
-optimizer_config = dict()
-# learning policy
-lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
-# runtime settings
-runner = dict(type='IterBasedRunner', max_iters=160000)
-checkpoint_config = dict(by_epoch=False, interval=16000)
-evaluation = dict(interval=16000, metric='mIoU', pre_eval=True)
diff --git a/configs/_base_/schedules/mmseg/schedule_20k.py b/configs/_base_/schedules/mmseg/schedule_20k.py
deleted file mode 100644
index 73c70219..00000000
--- a/configs/_base_/schedules/mmseg/schedule_20k.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
-optimizer_config = dict()
-# learning policy
-lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
-# runtime settings
-runner = dict(type='IterBasedRunner', max_iters=20000)
-checkpoint_config = dict(by_epoch=False, interval=2000)
-evaluation = dict(interval=2000, metric='mIoU', pre_eval=True)
diff --git a/configs/_base_/schedules/mmseg/schedule_320k.py b/configs/_base_/schedules/mmseg/schedule_320k.py
deleted file mode 100644
index a0b23062..00000000
--- a/configs/_base_/schedules/mmseg/schedule_320k.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
-optimizer_config = dict()
-# learning policy
-lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
-# runtime settings
-runner = dict(type='IterBasedRunner', max_iters=320000)
-checkpoint_config = dict(by_epoch=False, interval=32000)
-evaluation = dict(interval=32000, metric='mIoU')
diff --git a/configs/_base_/schedules/mmseg/schedule_40k.py b/configs/_base_/schedules/mmseg/schedule_40k.py
deleted file mode 100644
index d2c50232..00000000
--- a/configs/_base_/schedules/mmseg/schedule_40k.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
-optimizer_config = dict()
-# learning policy
-lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
-# runtime settings
-runner = dict(type='IterBasedRunner', max_iters=40000)
-checkpoint_config = dict(by_epoch=False, interval=4000)
-evaluation = dict(interval=4000, metric='mIoU', pre_eval=True)
diff --git a/configs/_base_/schedules/mmseg/schedule_80k.py b/configs/_base_/schedules/mmseg/schedule_80k.py
deleted file mode 100644
index 8365a878..00000000
--- a/configs/_base_/schedules/mmseg/schedule_80k.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
-optimizer_config = dict()
-# learning policy
-lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
-# runtime settings
-runner = dict(type='IterBasedRunner', max_iters=80000)
-checkpoint_config = dict(by_epoch=False, interval=8000)
-evaluation = dict(interval=8000, metric='mIoU', pre_eval=True)
diff --git a/configs/detnas_supernet_shufflenetv2_coco_1x_2.0_example.py b/configs/detnas_supernet_shufflenetv2_coco_1x_2.0_example.py
index 638d86eb..1e16951d 100644
--- a/configs/detnas_supernet_shufflenetv2_coco_1x_2.0_example.py
+++ b/configs/detnas_supernet_shufflenetv2_coco_1x_2.0_example.py
@@ -62,7 +62,7 @@ test_evaluator = val_evaluator
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
-test_cfg = dict()
+test_cfg = dict() # type: ignore
# learning rate
param_scheduler = [
diff --git a/configs/distill/cwd/README.md b/configs/distill/cwd/README.md
index be3328d7..bcbff80d 100644
--- a/configs/distill/cwd/README.md
+++ b/configs/distill/cwd/README.md
@@ -14,9 +14,9 @@ Knowledge distillation (KD) has been proven to be a simple and effective tool fo
### Segmentation
-| Location | Dataset | Teacher | Student | mIoU | mIoU(T) | mIou(S) | Config | Download |
-| :------: | :--------: | :------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------: | :---: | :-----: | :-----: | :----------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| logits | cityscapes | [pspnet_r101](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py) | [pspnet_r18](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes.py) | 75.54 | 79.76 | 74.87 | [config](<>) | [teacher](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth) \|[model](https://download.openmmlab.com/mmrazor/v0.1/distill/cwd/cwd_cls_head_pspnet_r101_d8_pspnet_r18_d8_512x1024_cityscapes_80k/cwd_cls_head_pspnet_r101_d8_pspnet_r18_d8_512x1024_cityscapes_80k_mIoU-75.54_20211222-3a26ee1c.pth) \| [log](https://download.openmmlab.com/mmrazor/v0.1/distill/cwd/cwd_cls_head_pspnet_r101_d8_pspnet_r18_d8_512x1024_cityscapes_80k/cwd_cls_head_pspnet_r101_d8_pspnet_r18_d8_512x1024_cityscapes_80k_20211212_205711.log.json) |
+| Location | Dataset | Teacher | Student | mIoU | mIoU(T) | mIou(S) | Config | Download |
+| :------: | :--------: | :------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------: | :---: | :-----: | :-----: | :----------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| logits | cityscapes | [pspnet_r101](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py) | [pspnet_r18](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes.py) | 75.54 | 79.76 | 74.87 | [config](<>) | [teacher](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth) \|[model](https://download.openmmlab.com/mmrazor/v0.1/distill/cwd/cwd_cls_head_pspnet_r101_d8_pspnet_r18_d8_512x1024_cityscapes_80k/cwd_cls_head_pspnet_r101_d8_pspnet_r18_d8_512x1024_cityscapes_80k_mIoU-75.54_20211222-3a26ee1c.pth) \| [log](https://download.openmmlab.com/mmrazor/v0.1/distill/cwd/cwd_cls_head_pspnet_r101_d8_pspnet_r18_d8_512x1024_cityscapes_80k/cwd_cls_head_pspnet_r101_d8_pspnet_r18_d8_512x1024_cityscapes_80k_20211212_205711.log.json?) |
### Detection
diff --git a/mmrazor/apis/__init__.py b/mmrazor/apis/__init__.py
deleted file mode 100644
index 504f9ffb..00000000
--- a/mmrazor/apis/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .mmcls import * # noqa: F401,F403
-from .mmdet import * # noqa: F401,F403
-from .mmseg import * # noqa: F401,F403
diff --git a/mmrazor/apis/mmcls/__init__.py b/mmrazor/apis/mmcls/__init__.py
deleted file mode 100644
index 8542177b..00000000
--- a/mmrazor/apis/mmcls/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .inference import init_mmcls_model
-
-__all__ = ['init_mmcls_model']
diff --git a/mmrazor/apis/mmcls/inference.py b/mmrazor/apis/mmcls/inference.py
deleted file mode 100644
index fd4bbc8f..00000000
--- a/mmrazor/apis/mmcls/inference.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import warnings
-from typing import Dict, Optional, Union
-
-import mmcv
-from mmcv.runner import load_checkpoint
-from torch import nn
-
-from mmrazor.models import build_algorithm
-
-
-def init_mmcls_model(config: Union[str, mmcv.Config],
- checkpoint: Optional[str] = None,
- device: str = 'cuda:0',
- cfg_options: Optional[Dict] = None) -> nn.Module:
- """Initialize a mmcls model from config file.
-
- Args:
- config (str or :obj:`mmcv.Config`): Config file path or the config
- object.
- checkpoint (str, optional): Checkpoint path. If left as None, the model
- will not load any weights.
- cfg_options (dict): cfg_options to override some settings in the used
- config.
-
- Returns:
- nn.Module: The constructed classifier.
- """
- if isinstance(config, str):
- config = mmcv.Config.fromfile(config)
- elif not isinstance(config, mmcv.Config):
- raise TypeError('config must be a filename or Config object, '
- f'but got {type(config)}')
- if cfg_options is not None:
- config.merge_from_dict(cfg_options)
-
- model_cfg = config.algorithm.architecture.model
- model_cfg.pretrained = None
- algorithm = build_algorithm(config.algorithm)
- model = algorithm.architecture.model
-
- if checkpoint is not None:
- # Mapping the weights to GPU may cause unexpected video memory leak
- # which refers to https://github.com/open-mmlab/mmdetection/pull/6405
- checkpoint = load_checkpoint(algorithm, checkpoint, map_location='cpu')
- if 'CLASSES' in checkpoint.get('meta', {}):
- model.CLASSES = checkpoint['meta']['CLASSES']
- else:
- from mmcls.datasets import ImageNet
- warnings.simplefilter('once')
- warnings.warn('Class names are not saved in the checkpoint\'s '
- 'meta data, use imagenet by default.')
- model.CLASSES = ImageNet.CLASSES
- model.cfg = config # save the config in the model for convenience
- model.to(device)
- model.eval()
-
- return model
diff --git a/mmrazor/apis/mmdet/__init__.py b/mmrazor/apis/mmdet/__init__.py
deleted file mode 100644
index fd1b6cfb..00000000
--- a/mmrazor/apis/mmdet/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-try:
- import mmdet
-except (ImportError, ModuleNotFoundError):
- mmdet = None
-
-if mmdet is not None:
- from .inference import init_mmdet_model
-
- __all__ = ['init_mmdet_model']
diff --git a/mmrazor/apis/mmseg/__init__.py b/mmrazor/apis/mmseg/__init__.py
deleted file mode 100644
index 26343ef4..00000000
--- a/mmrazor/apis/mmseg/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-try:
- import mmseg
-except (ImportError, ModuleNotFoundError):
- mmseg = None
-
-if mmseg:
- from .inference import init_mmseg_model
-
- __all__ = ['init_mmseg_model']
diff --git a/mmrazor/apis/mmseg/inference.py b/mmrazor/apis/mmseg/inference.py
deleted file mode 100644
index df6b0ac4..00000000
--- a/mmrazor/apis/mmseg/inference.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import Optional, Union
-
-import mmcv
-from mmcv.runner import load_checkpoint
-from torch import nn
-
-from mmrazor.models import build_algorithm
-
-
-def init_mmseg_model(config: Union[str, mmcv.Config],
- checkpoint: Optional[str] = None,
- device: str = 'cuda:0') -> nn.Module:
- """Initialize a mmseg model from config file.
-
- Args:
- config (str or :obj:`mmcv.Config`): Config file path or the config
- object.
- checkpoint (str, optional): Checkpoint path. If left as None, the model
- will not load any weights.
- device (str, optional) CPU/CUDA device option. Default 'cuda:0'.
- Use 'cpu' for loading model on CPU.
- Returns:
- nn.Module: The constructed segmentor.
- """
- if isinstance(config, str):
- config = mmcv.Config.fromfile(config)
- elif not isinstance(config, mmcv.Config):
- raise TypeError('config must be a filename or Config object, '
- 'but got {}'.format(type(config)))
-
- model_cfg = config.algorithm.architecture.model
- model_cfg.pretrained = None
- model_cfg.train_cfg = None
- algorithm = build_algorithm(config.algorithm)
- model = algorithm.architecture.model
-
- if checkpoint is not None:
- checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
- model.CLASSES = checkpoint['meta']['CLASSES']
- model.PALETTE = checkpoint['meta']['PALETTE']
- model.cfg = config # save the config in the model for convenience
- model.to(device)
- model.eval()
-
- return model
diff --git a/mmrazor/apis/utils.py b/mmrazor/apis/utils.py
deleted file mode 100644
index e00c5780..00000000
--- a/mmrazor/apis/utils.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import random
-
-import numpy as np
-import torch
-import torch.distributed as dist
-from mmcv.runner import get_dist_info
-
-
-def init_random_seed(seed=None, device='cuda'):
- """Initialize random seed.
-
- If the seed is not set, the seed will be automatically randomized,
- and then broadcast to all processes to prevent some potential bugs.
- Args:
- seed (int, Optional): The seed. Default to None.
- device (str): The device where the seed will be put on.
- Default to 'cuda'.
- Returns:
- int: Seed to be used.
- """
- if seed is not None:
- return seed
-
- # Make sure all ranks share the same random seed to prevent
- # some potential bugs. Please refer to
- # https://github.com/open-mmlab/mmdetection/issues/6339
- rank, world_size = get_dist_info()
- seed = np.random.randint(2**31)
- if world_size == 1:
- return seed
-
- if rank == 0:
- random_num = torch.tensor(seed, dtype=torch.int32, device=device)
- else:
- random_num = torch.tensor(0, dtype=torch.int32, device=device)
- dist.broadcast(random_num, src=0)
- return random_num.item()
-
-
-def set_random_seed(seed: int, deterministic: bool = False) -> None:
- """Set random seed.
-
- Args:
- seed (int): Seed to be used.
- deterministic (bool): Whether to set the deterministic option for
- CUDNN backend, i.e., set ``torch.backends.cudnn.deterministic``
- to True and ``torch.backends.cudnn.benchmark`` to False.
- Default: False.
- """
- random.seed(seed)
- np.random.seed(seed)
- torch.manual_seed(seed)
- torch.cuda.manual_seed_all(seed)
- if deterministic:
- torch.backends.cudnn.deterministic = True
- torch.backends.cudnn.benchmark = False
diff --git a/mmrazor/models/pruners/structure_pruning.py b/mmrazor/models/pruners/structure_pruning.py
index b27a8a80..e9b75b11 100644
--- a/mmrazor/models/pruners/structure_pruning.py
+++ b/mmrazor/models/pruners/structure_pruning.py
@@ -30,8 +30,8 @@ NON_PASS = CONV + FC
PASS = BN + GN
NORM = BN + GN
-BACKWARD_PARSER_DICT = dict()
-MAKE_GROUP_PARSER_DICT = dict()
+BACKWARD_PARSER_DICT = dict() # type: ignore
+MAKE_GROUP_PARSER_DICT = dict() # type: ignore
def register_parser(parser_dict, name=None, force=False):
diff --git a/tools/misc/get_flops.py b/tools/misc/get_flops.py
deleted file mode 100644
index 0f573851..00000000
--- a/tools/misc/get_flops.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import argparse
-
-import numpy as np
-import torch
-from mmcv import Config, DictAction
-from mmcv.cnn.utils import get_model_complexity_info
-
-from mmrazor.models import build_algorithm
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Get model flops and params')
- parser.add_argument('config', help='config file path')
- parser.add_argument(
- '--shape',
- type=int,
- nargs='+',
- default=[224, 224],
- help='input image size')
- parser.add_argument(
- '--cfg-options',
- nargs='+',
- action=DictAction,
- help='override some settings in the used config, the key-value pair '
- 'in xxx=yyy format will be merged into config file. If the value to '
- 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
- 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
- 'Note that the quotation marks are necessary and that no white space '
- 'is allowed.')
- parser.add_argument(
- '--size-divisor',
- type=int,
- default=32,
- help='Pad the input image, the minimum size that is divisible '
- 'by size_divisor, -1 means do not pad the image.')
- args = parser.parse_args()
- return args
-
-
-def main():
- args = parse_args()
-
- if len(args.shape) == 1:
- h = w = args.shape[0]
- elif len(args.shape) == 2:
- h, w = args.shape
- else:
- raise ValueError('invalid input shape')
- orig_shape = (3, h, w)
- divisor = args.size_divisor
- if divisor > 0:
- h = int(np.ceil(h / divisor)) * divisor
- w = int(np.ceil(w / divisor)) * divisor
-
- input_shape = (3, h, w)
-
- cfg = Config.fromfile(args.config)
- if args.cfg_options is not None:
- cfg.merge_from_dict(args.cfg_options)
-
- algorithm = build_algorithm(cfg.algorithm)
- if torch.cuda.is_available():
- algorithm.cuda()
- algorithm.eval()
-
- if hasattr(algorithm.architecture, 'forward_dummy'):
- algorithm.architecture.forward = algorithm.architecture.forward_dummy
- else:
- raise NotImplementedError(
- 'FLOPs counter is currently not currently supported with {}'.
- format(algorithm.architecture.__class__.__name__))
-
- flops, params = get_model_complexity_info(algorithm.architecture,
- input_shape)
- split_line = '=' * 30
-
- if divisor > 0 and \
- input_shape != orig_shape:
- print(f'{split_line}\nUse size divisor set input shape '
- f'from {orig_shape} to {input_shape}\n')
- print(f'{split_line}\nInput shape: {input_shape}\n'
- f'Flops: {flops}\nParams: {params}\n{split_line}')
- print('!!!Please be cautious if you use the results in papers. '
- 'You may need to check if all ops are supported and verify that the '
- 'flops computation is correct.')
-
-
-if __name__ == '__main__':
- main()
diff --git a/tools/model_converters/split_checkpoint.py b/tools/model_converters/split_checkpoint.py
deleted file mode 100644
index b12d7c76..00000000
--- a/tools/model_converters/split_checkpoint.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import argparse
-import os.path as osp
-
-from mmcv import Config
-from mmcv.runner import load_checkpoint, save_checkpoint
-
-from mmrazor.models import build_algorithm
-from mmrazor.models.pruners.utils import SwitchableBatchNorm2d
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Split a slimmable trained'
- 'model checkpoint')
- parser.add_argument('config', type=str, help='path of train config file')
- parser.add_argument('checkpoint', type=str, help='checkpoint path')
- parser.add_argument(
- '--channel-cfgs',
- nargs='+',
- help='The path of the channel configs. '
- 'The order should be the same as that of train.')
- parser.add_argument('--output-dir', type=str, default='')
- args = parser.parse_args()
-
- return args
-
-
-def convert_bn(module, bn_ind):
-
- def traverse(module):
- for name, child in module.named_children():
- if isinstance(child, SwitchableBatchNorm2d):
- setattr(module, name, child.bns[bn_ind])
- else:
- traverse(child)
-
- traverse(module)
-
-
-def main():
- args = parse_args()
-
- cfg = Config.fromfile(args.config)
- cfg.merge_from_dict(dict(algorithm=dict(channel_cfg=args.channel_cfgs)))
-
- for i, channel_cfg in enumerate(args.channel_cfgs):
- algorithm = build_algorithm(cfg.algorithm)
- load_checkpoint(algorithm, args.checkpoint, map_location='cpu')
- convert_bn(algorithm, i)
- for module in algorithm.modules():
- if hasattr(module, 'out_mask'):
- del module.out_mask
- if hasattr(module, 'in_mask'):
- del module.in_mask
- assert algorithm.with_pruner, \
- 'The algorithm should has attr pruner. Please check your ' \
- 'config file.'
- algorithm.pruner.deploy_subnet(algorithm.architecture,
- algorithm.channel_cfg[i])
- filename = osp.join(args.output_dir, f'checkpoint_{i + 1}.pth')
- save_checkpoint(algorithm, filename)
-
- print(f'Successfully split the original checkpoint `{args.checkpoint}` to '
- f'{len(args.channel_cfgs)} different checkpoints.')
-
-
-if __name__ == '__main__':
- main()