only keep one file to set swin transformer model config
parent
ed3b7f8ae6
commit
f4d372ba7d
|
@ -1,29 +0,0 @@
|
||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
|
||||||
# This is a BETA new format config file, and the usage may change recently.
|
|
||||||
from mmengine.model import ConstantInit, TruncNormalInit
|
|
||||||
|
|
||||||
from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier,
|
|
||||||
LabelSmoothLoss, LinearClsHead, Mixup,
|
|
||||||
SwinTransformer)
|
|
||||||
|
|
||||||
# model settings
|
|
||||||
model = dict(
|
|
||||||
type=ImageClassifier,
|
|
||||||
backbone=dict(
|
|
||||||
type=SwinTransformer, arch='base', img_size=224, drop_path_rate=0.5),
|
|
||||||
neck=dict(type=GlobalAveragePooling),
|
|
||||||
head=dict(
|
|
||||||
type=LinearClsHead,
|
|
||||||
num_classes=1000,
|
|
||||||
in_channels=1024,
|
|
||||||
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
|
|
||||||
loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'),
|
|
||||||
cal_acc=False),
|
|
||||||
init_cfg=[
|
|
||||||
dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
|
|
||||||
dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
|
|
||||||
],
|
|
||||||
train_cfg=dict(
|
|
||||||
augments=[dict(type=Mixup, alpha=0.8),
|
|
||||||
dict(type=CutMix, alpha=1.0)]),
|
|
||||||
)
|
|
|
@ -1,17 +0,0 @@
|
||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
|
||||||
# This is a BETA new format config file, and the usage may change recently.
|
|
||||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
|
||||||
ImageClassifier, LinearClsHead, SwinTransformer)
|
|
||||||
|
|
||||||
# model settings
|
|
||||||
# Only for evaluation
|
|
||||||
model = dict(
|
|
||||||
type=ImageClassifier,
|
|
||||||
backbone=dict(type=SwinTransformer, arch='large', img_size=224),
|
|
||||||
neck=dict(type=GlobalAveragePooling),
|
|
||||||
head=dict(
|
|
||||||
type=LinearClsHead,
|
|
||||||
num_classes=1000,
|
|
||||||
in_channels=1536,
|
|
||||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
|
||||||
topk=(1, 5)))
|
|
|
@ -1,21 +0,0 @@
|
||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
|
||||||
# This is a BETA new format config file, and the usage may change recently.
|
|
||||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
|
||||||
ImageClassifier, LinearClsHead, SwinTransformer)
|
|
||||||
|
|
||||||
# model settings
|
|
||||||
# Only for evaluation
|
|
||||||
model = dict(
|
|
||||||
type=ImageClassifier,
|
|
||||||
backbone=dict(
|
|
||||||
type=SwinTransformer,
|
|
||||||
arch='large',
|
|
||||||
img_size=384,
|
|
||||||
stage_cfgs=dict(block_cfgs=dict(window_size=12))),
|
|
||||||
neck=dict(type=GlobalAveragePooling),
|
|
||||||
head=dict(
|
|
||||||
type=LinearClsHead,
|
|
||||||
num_classes=1000,
|
|
||||||
in_channels=1536,
|
|
||||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
|
||||||
topk=(1, 5)))
|
|
|
@ -1,29 +0,0 @@
|
||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
|
||||||
# This is a BETA new format config file, and the usage may change recently.
|
|
||||||
from mmengine.model import ConstantInit, TruncNormalInit
|
|
||||||
|
|
||||||
from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier,
|
|
||||||
LabelSmoothLoss, LinearClsHead, Mixup,
|
|
||||||
SwinTransformer)
|
|
||||||
|
|
||||||
# model settings
|
|
||||||
model = dict(
|
|
||||||
type=ImageClassifier,
|
|
||||||
backbone=dict(
|
|
||||||
type=SwinTransformer, arch='small', img_size=224, drop_path_rate=0.3),
|
|
||||||
neck=dict(type=GlobalAveragePooling),
|
|
||||||
head=dict(
|
|
||||||
type=LinearClsHead,
|
|
||||||
num_classes=1000,
|
|
||||||
in_channels=768,
|
|
||||||
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
|
|
||||||
loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'),
|
|
||||||
cal_acc=False),
|
|
||||||
init_cfg=[
|
|
||||||
dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
|
|
||||||
dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
|
|
||||||
],
|
|
||||||
train_cfg=dict(
|
|
||||||
augments=[dict(type=Mixup, alpha=0.8),
|
|
||||||
dict(type=CutMix, alpha=1.0)]),
|
|
||||||
)
|
|
|
@ -1,29 +0,0 @@
|
||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
|
||||||
# This is a BETA new format config file, and the usage may change recently.
|
|
||||||
from mmengine.model import ConstantInit, TruncNormalInit
|
|
||||||
|
|
||||||
from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier,
|
|
||||||
LabelSmoothLoss, LinearClsHead, Mixup,
|
|
||||||
SwinTransformer)
|
|
||||||
|
|
||||||
# model settings
|
|
||||||
model = dict(
|
|
||||||
type=ImageClassifier,
|
|
||||||
backbone=dict(
|
|
||||||
type=SwinTransformer, arch='tiny', img_size=224, drop_path_rate=0.2),
|
|
||||||
neck=dict(type=GlobalAveragePooling),
|
|
||||||
head=dict(
|
|
||||||
type=LinearClsHead,
|
|
||||||
num_classes=1000,
|
|
||||||
in_channels=768,
|
|
||||||
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
|
|
||||||
loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'),
|
|
||||||
cal_acc=False),
|
|
||||||
init_cfg=[
|
|
||||||
dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
|
|
||||||
dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
|
|
||||||
],
|
|
||||||
train_cfg=dict(
|
|
||||||
augments=[dict(type=Mixup, alpha=0.8),
|
|
||||||
dict(type=CutMix, alpha=1.0)]),
|
|
||||||
)
|
|
|
@ -1,12 +1,35 @@
|
||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
# This is a BETA new format config file, and the usage may change recently.
|
# This is a BETA new format config file, and the usage may change recently.
|
||||||
from mmengine.config import read_base
|
from mmengine.config import read_base
|
||||||
|
from mmengine.model import ConstantInit, TruncNormalInit
|
||||||
|
|
||||||
|
from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup
|
||||||
|
|
||||||
with read_base():
|
with read_base():
|
||||||
from .._base_.datasets.imagenet_bs64_swin_224 import *
|
from .._base_.datasets.imagenet_bs64_swin_224 import *
|
||||||
from .._base_.default_runtime import *
|
from .._base_.default_runtime import *
|
||||||
from .._base_.models.swin_transformer.base_224 import *
|
from .._base_.models.swin_transformer_base import *
|
||||||
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
model.update(
|
||||||
|
backbone=dict(img_size=224, drop_path_rate=0.5, stage_cfgs=None),
|
||||||
|
head=dict(
|
||||||
|
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
|
||||||
|
loss=dict(
|
||||||
|
type=LabelSmoothLoss,
|
||||||
|
label_smooth_val=0.1,
|
||||||
|
mode='original',
|
||||||
|
loss_weight=0),
|
||||||
|
topk=None,
|
||||||
|
cal_acc=False),
|
||||||
|
init_cfg=[
|
||||||
|
dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
|
||||||
|
dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
|
||||||
|
],
|
||||||
|
train_cfg=dict(
|
||||||
|
augments=[dict(type=Mixup, alpha=0.8),
|
||||||
|
dict(type=CutMix, alpha=1.0)]))
|
||||||
|
|
||||||
# schedule settings
|
# schedule settings
|
||||||
optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
|
optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
|
||||||
|
|
|
@ -5,7 +5,7 @@ from mmengine.config import read_base
|
||||||
with read_base():
|
with read_base():
|
||||||
from .._base_.datasets.imagenet_bs64_swin_384 import *
|
from .._base_.datasets.imagenet_bs64_swin_384 import *
|
||||||
from .._base_.default_runtime import *
|
from .._base_.default_runtime import *
|
||||||
from .._base_.models.swin_transformer.base_384 import *
|
from .._base_.models.swin_transformer_base import *
|
||||||
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
||||||
|
|
||||||
# schedule settings
|
# schedule settings
|
||||||
|
|
|
@ -5,8 +5,14 @@ from mmengine.config import read_base
|
||||||
with read_base():
|
with read_base():
|
||||||
from .._base_.datasets.imagenet_bs64_swin_224 import *
|
from .._base_.datasets.imagenet_bs64_swin_224 import *
|
||||||
from .._base_.default_runtime import *
|
from .._base_.default_runtime import *
|
||||||
from .._base_.models.swin_transformer.large_224 import *
|
from .._base_.models.swin_transformer_base import *
|
||||||
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
model.update(
|
||||||
|
backbone=dict(arch='large', img_size=224, stage_cfgs=None),
|
||||||
|
head=dict(in_channels=1536),
|
||||||
|
)
|
||||||
|
|
||||||
# schedule settings
|
# schedule settings
|
||||||
optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
|
optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
|
||||||
|
|
|
@ -5,8 +5,14 @@ from mmengine.config import read_base
|
||||||
with read_base():
|
with read_base():
|
||||||
from .._base_.datasets.imagenet_bs64_swin_384 import *
|
from .._base_.datasets.imagenet_bs64_swin_384 import *
|
||||||
from .._base_.default_runtime import *
|
from .._base_.default_runtime import *
|
||||||
from .._base_.models.swin_transformer.large_384 import *
|
from .._base_.models.swin_transformer_base import *
|
||||||
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
model.update(
|
||||||
|
backbone=dict(arch='large'),
|
||||||
|
head=dict(in_channels=1536),
|
||||||
|
)
|
||||||
|
|
||||||
# schedule settings
|
# schedule settings
|
||||||
optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
|
optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
|
||||||
|
|
|
@ -10,11 +10,17 @@ from mmpretrain.models import ImageClassifier
|
||||||
with read_base():
|
with read_base():
|
||||||
from .._base_.datasets.cub_bs8_384 import *
|
from .._base_.datasets.cub_bs8_384 import *
|
||||||
from .._base_.default_runtime import *
|
from .._base_.default_runtime import *
|
||||||
from .._base_.models.swin_transformer.large_384 import *
|
from .._base_.models.swin_transformer_base import *
|
||||||
from .._base_.schedules.cub_bs64 import *
|
from .._base_.schedules.cub_bs64 import *
|
||||||
|
|
||||||
# model settings
|
# model settings
|
||||||
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth' # noqa
|
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth' # noqa
|
||||||
|
|
||||||
|
model.update(
|
||||||
|
backbone=dict(arch='large'),
|
||||||
|
head=dict(in_channels=1536),
|
||||||
|
)
|
||||||
|
|
||||||
model = dict(
|
model = dict(
|
||||||
type=ImageClassifier,
|
type=ImageClassifier,
|
||||||
backbone=dict(
|
backbone=dict(
|
||||||
|
|
|
@ -1,12 +1,37 @@
|
||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
# This is a BETA new format config file, and the usage may change recently.
|
# This is a BETA new format config file, and the usage may change recently.
|
||||||
from mmengine.config import read_base
|
from mmengine.config import read_base
|
||||||
|
from mmengine.model import ConstantInit, TruncNormalInit
|
||||||
|
|
||||||
|
from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup
|
||||||
|
|
||||||
with read_base():
|
with read_base():
|
||||||
from .._base_.datasets.imagenet_bs64_swin_224 import *
|
from .._base_.datasets.imagenet_bs64_swin_224 import *
|
||||||
from .._base_.default_runtime import *
|
from .._base_.default_runtime import *
|
||||||
from .._base_.models.swin_transformer.small_224 import *
|
from .._base_.models.swin_transformer_base import *
|
||||||
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
model.update(
|
||||||
|
backbone=dict(
|
||||||
|
arch='small', img_size=224, drop_path_rate=0.3, stage_cfgs=None),
|
||||||
|
head=dict(
|
||||||
|
in_channels=768,
|
||||||
|
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
|
||||||
|
loss=dict(
|
||||||
|
type=LabelSmoothLoss,
|
||||||
|
label_smooth_val=0.1,
|
||||||
|
mode='original',
|
||||||
|
loss_weight=0),
|
||||||
|
topk=None,
|
||||||
|
cal_acc=False),
|
||||||
|
init_cfg=[
|
||||||
|
dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
|
||||||
|
dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
|
||||||
|
],
|
||||||
|
train_cfg=dict(
|
||||||
|
augments=[dict(type=Mixup, alpha=0.8),
|
||||||
|
dict(type=CutMix, alpha=1.0)]))
|
||||||
|
|
||||||
# schedule settings
|
# schedule settings
|
||||||
optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
|
optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
|
||||||
|
|
|
@ -1,12 +1,37 @@
|
||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
# This is a BETA new format config file, and the usage may change recently.
|
# This is a BETA new format config file, and the usage may change recently.
|
||||||
from mmengine.config import read_base
|
from mmengine.config import read_base
|
||||||
|
from mmengine.model import ConstantInit, TruncNormalInit
|
||||||
|
|
||||||
|
from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup
|
||||||
|
|
||||||
with read_base():
|
with read_base():
|
||||||
from .._base_.datasets.imagenet_bs64_swin_224 import *
|
from .._base_.datasets.imagenet_bs64_swin_224 import *
|
||||||
from .._base_.default_runtime import *
|
from .._base_.default_runtime import *
|
||||||
from .._base_.models.swin_transformer.tiny_224 import *
|
from .._base_.models.swin_transformer_base import *
|
||||||
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
model.update(
|
||||||
|
backbone=dict(
|
||||||
|
arch='tiny', img_size=224, drop_path_rate=0.2, stage_cfgs=None),
|
||||||
|
head=dict(
|
||||||
|
in_channels=768,
|
||||||
|
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
|
||||||
|
loss=dict(
|
||||||
|
type=LabelSmoothLoss,
|
||||||
|
label_smooth_val=0.1,
|
||||||
|
mode='original',
|
||||||
|
loss_weight=0),
|
||||||
|
topk=None,
|
||||||
|
cal_acc=False),
|
||||||
|
init_cfg=[
|
||||||
|
dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.),
|
||||||
|
dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.)
|
||||||
|
],
|
||||||
|
train_cfg=dict(
|
||||||
|
augments=[dict(type=Mixup, alpha=0.8),
|
||||||
|
dict(type=CutMix, alpha=1.0)]))
|
||||||
|
|
||||||
# schedule settings
|
# schedule settings
|
||||||
optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
|
optim_wrapper = dict(clip_grad=dict(max_norm=5.0))
|
||||||
|
|
Loading…
Reference in New Issue