diff --git a/mmpretrain/configs/_base_/models/swin_transformer/base_224.py b/mmpretrain/configs/_base_/models/swin_transformer/base_224.py
deleted file mode 100644
index 5ba4adac..00000000
--- a/mmpretrain/configs/_base_/models/swin_transformer/base_224.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# This is a BETA new format config file, and the usage may change recently.
-from mmengine.model import ConstantInit, TruncNormalInit
-
-from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier,
-                               LabelSmoothLoss, LinearClsHead, Mixup,
-                               SwinTransformer)
-
-# model settings
-model = dict(
-    type=ImageClassifier,
-    backbone=dict(
-        type=SwinTransformer, arch='base', img_size=224, drop_path_rate=0.5),
-    neck=dict(type=GlobalAveragePooling),
-    head=dict(
-        type=LinearClsHead,
-        num_classes=1000,
-        in_channels=1024,
-        init_cfg=None,  # suppress the default init_cfg of LinearClsHead.
-        loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'),
-        cal_acc=False),
-    init_cfg=[
-        dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
-        dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
-    ],
-    train_cfg=dict(
-        augments=[dict(type=Mixup, alpha=0.8),
-                  dict(type=CutMix, alpha=1.0)]),
-)
diff --git a/mmpretrain/configs/_base_/models/swin_transformer/large_224.py b/mmpretrain/configs/_base_/models/swin_transformer/large_224.py
deleted file mode 100644
index 758600e7..00000000
--- a/mmpretrain/configs/_base_/models/swin_transformer/large_224.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# This is a BETA new format config file, and the usage may change recently.
-from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
-                               ImageClassifier, LinearClsHead, SwinTransformer)
-
-# model settings
-# Only for evaluation
-model = dict(
-    type=ImageClassifier,
-    backbone=dict(type=SwinTransformer, arch='large', img_size=224),
-    neck=dict(type=GlobalAveragePooling),
-    head=dict(
-        type=LinearClsHead,
-        num_classes=1000,
-        in_channels=1536,
-        loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
-        topk=(1, 5)))
diff --git a/mmpretrain/configs/_base_/models/swin_transformer/large_384.py b/mmpretrain/configs/_base_/models/swin_transformer/large_384.py
deleted file mode 100644
index 9cb01033..00000000
--- a/mmpretrain/configs/_base_/models/swin_transformer/large_384.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# This is a BETA new format config file, and the usage may change recently.
-from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
-                               ImageClassifier, LinearClsHead, SwinTransformer)
-
-# model settings
-# Only for evaluation
-model = dict(
-    type=ImageClassifier,
-    backbone=dict(
-        type=SwinTransformer,
-        arch='large',
-        img_size=384,
-        stage_cfgs=dict(block_cfgs=dict(window_size=12))),
-    neck=dict(type=GlobalAveragePooling),
-    head=dict(
-        type=LinearClsHead,
-        num_classes=1000,
-        in_channels=1536,
-        loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
-        topk=(1, 5)))
diff --git a/mmpretrain/configs/_base_/models/swin_transformer/small_224.py b/mmpretrain/configs/_base_/models/swin_transformer/small_224.py
deleted file mode 100644
index f6de6ac0..00000000
--- a/mmpretrain/configs/_base_/models/swin_transformer/small_224.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# This is a BETA new format config file, and the usage may change recently.
-from mmengine.model import ConstantInit, TruncNormalInit
-
-from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier,
-                               LabelSmoothLoss, LinearClsHead, Mixup,
-                               SwinTransformer)
-
-# model settings
-model = dict(
-    type=ImageClassifier,
-    backbone=dict(
-        type=SwinTransformer, arch='small', img_size=224, drop_path_rate=0.3),
-    neck=dict(type=GlobalAveragePooling),
-    head=dict(
-        type=LinearClsHead,
-        num_classes=1000,
-        in_channels=768,
-        init_cfg=None,  # suppress the default init_cfg of LinearClsHead.
-        loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'),
-        cal_acc=False),
-    init_cfg=[
-        dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
-        dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
-    ],
-    train_cfg=dict(
-        augments=[dict(type=Mixup, alpha=0.8),
-                  dict(type=CutMix, alpha=1.0)]),
-)
diff --git a/mmpretrain/configs/_base_/models/swin_transformer/tiny_224.py b/mmpretrain/configs/_base_/models/swin_transformer/tiny_224.py
deleted file mode 100644
index fc976cc0..00000000
--- a/mmpretrain/configs/_base_/models/swin_transformer/tiny_224.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# This is a BETA new format config file, and the usage may change recently.
-from mmengine.model import ConstantInit, TruncNormalInit
-
-from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier,
-                               LabelSmoothLoss, LinearClsHead, Mixup,
-                               SwinTransformer)
-
-# model settings
-model = dict(
-    type=ImageClassifier,
-    backbone=dict(
-        type=SwinTransformer, arch='tiny', img_size=224, drop_path_rate=0.2),
-    neck=dict(type=GlobalAveragePooling),
-    head=dict(
-        type=LinearClsHead,
-        num_classes=1000,
-        in_channels=768,
-        init_cfg=None,  # suppress the default init_cfg of LinearClsHead.
-        loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'),
-        cal_acc=False),
-    init_cfg=[
-        dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
-        dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
-    ],
-    train_cfg=dict(
-        augments=[dict(type=Mixup, alpha=0.8),
-                  dict(type=CutMix, alpha=1.0)]),
-)
diff --git a/mmpretrain/configs/_base_/models/swin_transformer/base_384.py b/mmpretrain/configs/_base_/models/swin_transformer_base.py
similarity index 100%
rename from mmpretrain/configs/_base_/models/swin_transformer/base_384.py
rename to mmpretrain/configs/_base_/models/swin_transformer_base.py
diff --git a/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py
index 12ec65ea..09af3d01 100644
--- a/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py
+++ b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py
@@ -1,12 +1,35 @@
 # Copyright (c) OpenMMLab. All rights reserved.
 # This is a BETA new format config file, and the usage may change recently.
 from mmengine.config import read_base
+from mmengine.model import ConstantInit, TruncNormalInit
+
+from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup
 
 with read_base():
     from .._base_.datasets.imagenet_bs64_swin_224 import *
     from .._base_.default_runtime import *
-    from .._base_.models.swin_transformer.base_224 import *
+    from .._base_.models.swin_transformer_base import *
     from .._base_.schedules.imagenet_bs1024_adamw_swin import *
 
+# model settings
+model.update(
+    backbone=dict(img_size=224, drop_path_rate=0.5, stage_cfgs=None),
+    head=dict(
+        init_cfg=None,  # suppress the default init_cfg of LinearClsHead.
+        loss=dict(
+            type=LabelSmoothLoss,
+            label_smooth_val=0.1,
+            mode='original',
+            loss_weight=0),
+        topk=None,
+        cal_acc=False),
+    init_cfg=[
+        dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
+        dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
+    ],
+    train_cfg=dict(
+        augments=[dict(type=Mixup, alpha=0.8),
+                  dict(type=CutMix, alpha=1.0)]))
+
 # schedule settings
 optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
diff --git a/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py
index 76548d93..aacdc327 100644
--- a/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py
+++ b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py
@@ -5,7 +5,7 @@ from mmengine.config import read_base
 with read_base():
     from .._base_.datasets.imagenet_bs64_swin_384 import *
     from .._base_.default_runtime import *
-    from .._base_.models.swin_transformer.base_384 import *
+    from .._base_.models.swin_transformer_base import *
     from .._base_.schedules.imagenet_bs1024_adamw_swin import *
 
 # schedule settings
diff --git a/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py
index 4b22f5ae..b8fc2793 100644
--- a/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py
+++ b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py
@@ -5,8 +5,14 @@ from mmengine.config import read_base
 with read_base():
     from .._base_.datasets.imagenet_bs64_swin_224 import *
     from .._base_.default_runtime import *
-    from .._base_.models.swin_transformer.large_224 import *
+    from .._base_.models.swin_transformer_base import *
     from .._base_.schedules.imagenet_bs1024_adamw_swin import *
 
+# model settings
+model.update(
+    backbone=dict(arch='large', img_size=224, stage_cfgs=None),
+    head=dict(in_channels=1536),
+)
+
 # schedule settings
 optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
diff --git a/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py
index f4a6143b..9a449aa6 100644
--- a/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py
+++ b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py
@@ -5,8 +5,14 @@ from mmengine.config import read_base
 with read_base():
     from .._base_.datasets.imagenet_bs64_swin_384 import *
     from .._base_.default_runtime import *
-    from .._base_.models.swin_transformer.large_384 import *
+    from .._base_.models.swin_transformer_base import *
     from .._base_.schedules.imagenet_bs1024_adamw_swin import *
 
+# model settings
+model.update(
+    backbone=dict(arch='large'),
+    head=dict(in_channels=1536),
+)
+
 # schedule settings
 optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
diff --git a/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py b/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py
index 6156e306..779daaa3 100644
--- a/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py
+++ b/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py
@@ -10,11 +10,17 @@ from mmpretrain.models import ImageClassifier
 with read_base():
     from .._base_.datasets.cub_bs8_384 import *
     from .._base_.default_runtime import *
-    from .._base_.models.swin_transformer.large_384 import *
+    from .._base_.models.swin_transformer_base import *
     from .._base_.schedules.cub_bs64 import *
 
 # model settings
 checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth'  # noqa
+
+model.update(
+    backbone=dict(arch='large'),
+    head=dict(in_channels=1536),
+)
+
 model = dict(
     type=ImageClassifier,
     backbone=dict(
diff --git a/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py
index 969edee7..59792528 100644
--- a/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py
+++ b/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py
@@ -1,12 +1,37 @@
 # Copyright (c) OpenMMLab. All rights reserved.
 # This is a BETA new format config file, and the usage may change recently.
 from mmengine.config import read_base
+from mmengine.model import ConstantInit, TruncNormalInit
+
+from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup
 
 with read_base():
     from .._base_.datasets.imagenet_bs64_swin_224 import *
     from .._base_.default_runtime import *
-    from .._base_.models.swin_transformer.small_224 import *
+    from .._base_.models.swin_transformer_base import *
     from .._base_.schedules.imagenet_bs1024_adamw_swin import *
 
+# model settings
+model.update(
+    backbone=dict(
+        arch='small', img_size=224, drop_path_rate=0.3, stage_cfgs=None),
+    head=dict(
+        in_channels=768,
+        init_cfg=None,  # suppress the default init_cfg of LinearClsHead.
+        loss=dict(
+            type=LabelSmoothLoss,
+            label_smooth_val=0.1,
+            mode='original',
+            loss_weight=0),
+        topk=None,
+        cal_acc=False),
+    init_cfg=[
+        dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
+        dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
+    ],
+    train_cfg=dict(
+        augments=[dict(type=Mixup, alpha=0.8),
+                  dict(type=CutMix, alpha=1.0)]))
+
 # schedule settings
 optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
diff --git a/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py
index ded80639..733e1ef0 100644
--- a/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py
+++ b/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py
@@ -1,12 +1,37 @@
 # Copyright (c) OpenMMLab. All rights reserved.
 # This is a BETA new format config file, and the usage may change recently.
 from mmengine.config import read_base
+from mmengine.model import ConstantInit, TruncNormalInit
+
+from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup
 
 with read_base():
     from .._base_.datasets.imagenet_bs64_swin_224 import *
     from .._base_.default_runtime import *
-    from .._base_.models.swin_transformer.tiny_224 import *
+    from .._base_.models.swin_transformer_base import *
     from .._base_.schedules.imagenet_bs1024_adamw_swin import *
 
+# model settings
+model.update(
+    backbone=dict(
+        arch='tiny', img_size=224, drop_path_rate=0.2, stage_cfgs=None),
+    head=dict(
+        in_channels=768,
+        init_cfg=None,  # suppress the default init_cfg of LinearClsHead.
+        loss=dict(
+            type=LabelSmoothLoss,
+            label_smooth_val=0.1,
+            mode='original',
+            loss_weight=0),
+        topk=None,
+        cal_acc=False),
+    init_cfg=[
+        dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
+        dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
+    ],
+    train_cfg=dict(
+        augments=[dict(type=Mixup, alpha=0.8),
+                  dict(type=CutMix, alpha=1.0)]))
+
 # schedule settings
 optim_wrapper = dict(clip_grad=dict(max_norm=5.0))