mmpretrain/configs/deit/deit-base-distilled_16xb32_...

38 lines
995 B
Python

_base_ = [
'../_base_/datasets/imagenet_bs64_swin_384.py',
'../_base_/schedules/imagenet_bs4096_AdamW.py',
'../_base_/default_runtime.py'
]
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='DistilledVisionTransformer',
arch='deit-base',
img_size=384,
patch_size=16,
),
neck=None,
head=dict(
type='DeiTClsHead',
num_classes=1000,
in_channels=768,
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
),
# Change to the path of the pretrained model
# init_cfg=dict(type='Pretrained', checkpoint=''),
)
# dataset settings
train_dataloader = dict(batch_size=32)
# schedule settings
optim_wrapper = dict(clip_grad=dict(max_norm=1.0))
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (16 GPUs) x (32 samples per GPU)
auto_scale_lr = dict(base_batch_size=512)