[Enhance] Add init_cfg with type='pretrained' to downstream tasks. (#1717)

This commit is contained in:
Yixiao Fang 2023-07-28 15:28:29 +08:00 committed by GitHub
parent b1cd05caf2
commit 0b96dcaa67
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 67 additions and 31 deletions

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py', '../../_base_/default_runtime.py',
] ]
model = dict(backbone=dict(frozen_stages=4)) model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# runtime settings # runtime settings
default_hooks = dict( default_hooks = dict(

View File

@ -23,7 +23,8 @@ model = dict(
out_type='avg_featmap', out_type='avg_featmap',
use_abs_pos_emb=False, use_abs_pos_emb=False,
use_rel_pos_bias=True, use_rel_pos_bias=True,
use_shared_rel_pos_bias=False), use_shared_rel_pos_bias=False,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='LinearClsHead', type='LinearClsHead',

View File

@ -17,7 +17,8 @@ model = dict(
out_type='avg_featmap', out_type='avg_featmap',
use_abs_pos_emb=False, use_abs_pos_emb=False,
use_rel_pos_bias=True, use_rel_pos_bias=True,
use_shared_rel_pos_bias=False), use_shared_rel_pos_bias=False,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='LinearClsHead', type='LinearClsHead',

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py', '../../_base_/default_runtime.py',
] ]
model = dict(backbone=dict(frozen_stages=4)) model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# dataset summary # dataset summary
train_dataloader = dict(batch_size=512) train_dataloader = dict(batch_size=512)

View File

@ -74,7 +74,7 @@ model = dict(
use_abs_pos_emb=True, use_abs_pos_emb=True,
use_rel_pos_bias=True, use_rel_pos_bias=True,
use_shared_rel_pos_bias=False, use_shared_rel_pos_bias=False,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='LinearClsHead', type='LinearClsHead',

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py', '../../_base_/default_runtime.py',
] ]
model = dict(backbone=dict(frozen_stages=4)) model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer # optimizer
optim_wrapper = dict( optim_wrapper = dict(

View File

@ -58,7 +58,7 @@ model = dict(
drop_path_rate=0.1, drop_path_rate=0.1,
out_type='avg_featmap', out_type='avg_featmap',
final_norm=False, final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='LinearClsHead', type='LinearClsHead',

View File

@ -19,7 +19,7 @@ model = dict(
frozen_stages=12, frozen_stages=12,
out_type='cls_token', out_type='cls_token',
final_norm=True, final_norm=True,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=dict(type='ClsBatchNormNeck', input_features=768), neck=dict(type='ClsBatchNormNeck', input_features=768),
head=dict( head=dict(
type='VisionTransformerClsHead', type='VisionTransformerClsHead',

View File

@ -57,7 +57,7 @@ model = dict(
drop_path_rate=0.1, drop_path_rate=0.1,
out_type='avg_featmap', out_type='avg_featmap',
final_norm=False, final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='LinearClsHead', type='LinearClsHead',

View File

@ -20,7 +20,7 @@ model = dict(
frozen_stages=12, frozen_stages=12,
out_type='cls_token', out_type='cls_token',
final_norm=True, final_norm=True,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=dict(type='ClsBatchNormNeck', input_features=768), neck=dict(type='ClsBatchNormNeck', input_features=768),
head=dict( head=dict(
type='VisionTransformerClsHead', type='VisionTransformerClsHead',

View File

@ -59,7 +59,7 @@ model = dict(
drop_path_rate=0.3, # set to 0.3 drop_path_rate=0.3, # set to 0.3
out_type='avg_featmap', out_type='avg_featmap',
final_norm=False, final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='LinearClsHead', type='LinearClsHead',

View File

@ -58,7 +58,7 @@ model = dict(
drop_path_rate=0.3, # set to 0.3 drop_path_rate=0.3, # set to 0.3
out_type='avg_featmap', out_type='avg_featmap',
final_norm=False, final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='LinearClsHead', type='LinearClsHead',

View File

@ -58,7 +58,7 @@ model = dict(
drop_path_rate=0.2, # set to 0.2 drop_path_rate=0.2, # set to 0.2
out_type='avg_featmap', out_type='avg_featmap',
final_norm=False, final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='LinearClsHead', type='LinearClsHead',

View File

@ -20,7 +20,7 @@ model = dict(
frozen_stages=24, frozen_stages=24,
out_type='cls_token', out_type='cls_token',
final_norm=True, final_norm=True,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=dict(type='ClsBatchNormNeck', input_features=1024), neck=dict(type='ClsBatchNormNeck', input_features=1024),
head=dict( head=dict(
type='VisionTransformerClsHead', type='VisionTransformerClsHead',

View File

@ -56,7 +56,7 @@ model = dict(
drop_path_rate=0.1, drop_path_rate=0.1,
out_type='avg_featmap', out_type='avg_featmap',
final_norm=False, final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='LinearClsHead', type='LinearClsHead',

View File

@ -58,7 +58,7 @@ model = dict(
drop_path_rate=0.1, drop_path_rate=0.1,
out_type='avg_featmap', out_type='avg_featmap',
final_norm=False, final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='LinearClsHead', type='LinearClsHead',

View File

@ -19,7 +19,7 @@ model = dict(
frozen_stages=12, frozen_stages=12,
out_type='cls_token', out_type='cls_token',
final_norm=True, final_norm=True,
init_cfg=dict(type='Pretrained', checkpoint='')), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=dict(type='ClsBatchNormNeck', input_features=768), neck=dict(type='ClsBatchNormNeck', input_features=768),
head=dict( head=dict(
type='VisionTransformerClsHead', type='VisionTransformerClsHead',

View File

@ -86,6 +86,10 @@ val_dataloader = dict(
) )
test_dataloader = val_dataloader test_dataloader = val_dataloader
model = dict(
backbone=dict(
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer # optimizer
optim_wrapper = dict( optim_wrapper = dict(
type='OptimWrapper', type='OptimWrapper',

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py', '../../_base_/default_runtime.py',
] ]
model = dict(backbone=dict(frozen_stages=4)) model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer # optimizer
optim_wrapper = dict( optim_wrapper = dict(

View File

@ -8,7 +8,11 @@ _base_ = [
# dataset settings # dataset settings
train_dataloader = dict(batch_size=128) train_dataloader = dict(batch_size=128)
model = dict(backbone=dict(frozen_stages=4, norm_eval=True)) model = dict(
backbone=dict(
frozen_stages=4,
norm_eval=True,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer # optimizer
optim_wrapper = dict( optim_wrapper = dict(

View File

@ -16,7 +16,8 @@ model = dict(
patch_size=16, patch_size=16,
stop_grad_conv1=True, stop_grad_conv1=True,
frozen_stages=12, frozen_stages=12,
norm_eval=True), norm_eval=True,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict( head=dict(
type='VisionTransformerClsHead', type='VisionTransformerClsHead',
num_classes=1000, num_classes=1000,

View File

@ -12,7 +12,7 @@ model = dict(
img_size=224, img_size=224,
patch_size=16, patch_size=16,
drop_path_rate=0.1, drop_path_rate=0.1,
), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='VisionTransformerClsHead', type='VisionTransformerClsHead',

View File

@ -12,7 +12,7 @@ model = dict(
img_size=224, img_size=224,
patch_size=16, patch_size=16,
drop_path_rate=0.5, drop_path_rate=0.5,
), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None, neck=None,
head=dict( head=dict(
type='VisionTransformerClsHead', type='VisionTransformerClsHead',

View File

@ -16,7 +16,8 @@ model = dict(
patch_size=16, patch_size=16,
stop_grad_conv1=True, stop_grad_conv1=True,
frozen_stages=12, frozen_stages=12,
norm_eval=True), norm_eval=True,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict( head=dict(
type='VisionTransformerClsHead', type='VisionTransformerClsHead',
num_classes=1000, num_classes=1000,

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py', '../../_base_/default_runtime.py',
] ]
model = dict(backbone=dict(frozen_stages=4)) model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# dataset summary # dataset summary
train_dataloader = dict(batch_size=512) train_dataloader = dict(batch_size=512)

View File

@ -9,7 +9,8 @@ model = dict(
backbone=dict( backbone=dict(
img_size=192, img_size=192,
drop_path_rate=0.1, drop_path_rate=0.1,
stage_cfgs=dict(block_cfgs=dict(window_size=6)))) stage_cfgs=dict(block_cfgs=dict(window_size=6)),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer settings # optimizer settings
optim_wrapper = dict( optim_wrapper = dict(

View File

@ -52,7 +52,8 @@ model = dict(
backbone=dict( backbone=dict(
img_size=224, img_size=224,
drop_path_rate=0.1, drop_path_rate=0.1,
stage_cfgs=dict(block_cfgs=dict(window_size=7)))) stage_cfgs=dict(block_cfgs=dict(window_size=7)),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer settings # optimizer settings
optim_wrapper = dict( optim_wrapper = dict(

View File

@ -54,7 +54,8 @@ model = dict(
img_size=224, img_size=224,
drop_path_rate=0.2, drop_path_rate=0.2,
stage_cfgs=dict(block_cfgs=dict(window_size=14)), stage_cfgs=dict(block_cfgs=dict(window_size=14)),
pad_small_map=True), pad_small_map=True,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict(in_channels=1536)) head=dict(in_channels=1536))
# optimizer settings # optimizer settings

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py', '../../_base_/default_runtime.py',
] ]
model = dict(backbone=dict(frozen_stages=4)) model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# dataset summary # dataset summary
train_dataloader = dict(batch_size=512) train_dataloader = dict(batch_size=512)

View File

@ -55,7 +55,7 @@ model = dict(
drop_path_rate=0.1, drop_path_rate=0.1,
layer_scale_init_value=0., layer_scale_init_value=0.,
use_grn=True, use_grn=True,
), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict( head=dict(
type='LinearClsHead', type='LinearClsHead',
num_classes=1000, num_classes=1000,

View File

@ -48,7 +48,7 @@ model = dict(
backbone=dict( backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True), norm_cfg=dict(type='SyncBN', requires_grad=True),
drop_path_rate=0.05, drop_path_rate=0.05,
), init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict( head=dict(
loss=dict( loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, use_sigmoid=True)), type='LabelSmoothLoss', label_smooth_val=0.1, use_sigmoid=True)),

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py', '../../_base_/default_runtime.py',
] ]
model = dict(backbone=dict(frozen_stages=4)) model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# dataset summary # dataset summary
train_dataloader = dict(batch_size=512) train_dataloader = dict(batch_size=512)