add init_cfg with type='pretrained'

pull/1717/head
fangyixiao18 2023-07-20 11:42:23 +08:00
parent 569324b180
commit f191ca5e73
32 changed files with 67 additions and 31 deletions

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py',
]
model = dict(backbone=dict(frozen_stages=4))
model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# runtime settings
default_hooks = dict(

View File

@ -23,7 +23,8 @@ model = dict(
out_type='avg_featmap',
use_abs_pos_emb=False,
use_rel_pos_bias=True,
use_shared_rel_pos_bias=False),
use_shared_rel_pos_bias=False,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='LinearClsHead',

View File

@ -17,7 +17,8 @@ model = dict(
out_type='avg_featmap',
use_abs_pos_emb=False,
use_rel_pos_bias=True,
use_shared_rel_pos_bias=False),
use_shared_rel_pos_bias=False,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='LinearClsHead',

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py',
]
model = dict(backbone=dict(frozen_stages=4))
model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# dataset summary
train_dataloader = dict(batch_size=512)

View File

@ -74,7 +74,7 @@ model = dict(
use_abs_pos_emb=True,
use_rel_pos_bias=True,
use_shared_rel_pos_bias=False,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='LinearClsHead',

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py',
]
model = dict(backbone=dict(frozen_stages=4))
model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer
optim_wrapper = dict(

View File

@ -58,7 +58,7 @@ model = dict(
drop_path_rate=0.1,
out_type='avg_featmap',
final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='LinearClsHead',

View File

@ -19,7 +19,7 @@ model = dict(
frozen_stages=12,
out_type='cls_token',
final_norm=True,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=dict(type='ClsBatchNormNeck', input_features=768),
head=dict(
type='VisionTransformerClsHead',

View File

@ -57,7 +57,7 @@ model = dict(
drop_path_rate=0.1,
out_type='avg_featmap',
final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='LinearClsHead',

View File

@ -20,7 +20,7 @@ model = dict(
frozen_stages=12,
out_type='cls_token',
final_norm=True,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=dict(type='ClsBatchNormNeck', input_features=768),
head=dict(
type='VisionTransformerClsHead',

View File

@ -59,7 +59,7 @@ model = dict(
drop_path_rate=0.3, # set to 0.3
out_type='avg_featmap',
final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='LinearClsHead',

View File

@ -58,7 +58,7 @@ model = dict(
drop_path_rate=0.3, # set to 0.3
out_type='avg_featmap',
final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='LinearClsHead',

View File

@ -58,7 +58,7 @@ model = dict(
drop_path_rate=0.2, # set to 0.2
out_type='avg_featmap',
final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='LinearClsHead',

View File

@ -20,7 +20,7 @@ model = dict(
frozen_stages=24,
out_type='cls_token',
final_norm=True,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=dict(type='ClsBatchNormNeck', input_features=1024),
head=dict(
type='VisionTransformerClsHead',

View File

@ -56,7 +56,7 @@ model = dict(
drop_path_rate=0.1,
out_type='avg_featmap',
final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='LinearClsHead',

View File

@ -58,7 +58,7 @@ model = dict(
drop_path_rate=0.1,
out_type='avg_featmap',
final_norm=False,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='LinearClsHead',

View File

@ -19,7 +19,7 @@ model = dict(
frozen_stages=12,
out_type='cls_token',
final_norm=True,
init_cfg=dict(type='Pretrained', checkpoint='')),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=dict(type='ClsBatchNormNeck', input_features=768),
head=dict(
type='VisionTransformerClsHead',

View File

@ -86,6 +86,10 @@ val_dataloader = dict(
)
test_dataloader = val_dataloader
model = dict(
backbone=dict(
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py',
]
model = dict(backbone=dict(frozen_stages=4))
model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer
optim_wrapper = dict(

View File

@ -8,7 +8,11 @@ _base_ = [
# dataset settings
train_dataloader = dict(batch_size=128)
model = dict(backbone=dict(frozen_stages=4, norm_eval=True))
model = dict(
backbone=dict(
frozen_stages=4,
norm_eval=True,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer
optim_wrapper = dict(

View File

@ -16,7 +16,8 @@ model = dict(
patch_size=16,
stop_grad_conv1=True,
frozen_stages=12,
norm_eval=True),
norm_eval=True,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict(
type='VisionTransformerClsHead',
num_classes=1000,

View File

@ -12,7 +12,7 @@ model = dict(
img_size=224,
patch_size=16,
drop_path_rate=0.1,
),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='VisionTransformerClsHead',

View File

@ -12,7 +12,7 @@ model = dict(
img_size=224,
patch_size=16,
drop_path_rate=0.5,
),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='VisionTransformerClsHead',

View File

@ -16,7 +16,8 @@ model = dict(
patch_size=16,
stop_grad_conv1=True,
frozen_stages=12,
norm_eval=True),
norm_eval=True,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict(
type='VisionTransformerClsHead',
num_classes=1000,

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py',
]
model = dict(backbone=dict(frozen_stages=4))
model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# dataset summary
train_dataloader = dict(batch_size=512)

View File

@ -9,7 +9,8 @@ model = dict(
backbone=dict(
img_size=192,
drop_path_rate=0.1,
stage_cfgs=dict(block_cfgs=dict(window_size=6))))
stage_cfgs=dict(block_cfgs=dict(window_size=6)),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer settings
optim_wrapper = dict(

View File

@ -52,7 +52,8 @@ model = dict(
backbone=dict(
img_size=224,
drop_path_rate=0.1,
stage_cfgs=dict(block_cfgs=dict(window_size=7))))
stage_cfgs=dict(block_cfgs=dict(window_size=7)),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer settings
optim_wrapper = dict(

View File

@ -54,7 +54,8 @@ model = dict(
img_size=224,
drop_path_rate=0.2,
stage_cfgs=dict(block_cfgs=dict(window_size=14)),
pad_small_map=True),
pad_small_map=True,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict(in_channels=1536))
# optimizer settings

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py',
]
model = dict(backbone=dict(frozen_stages=4))
model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# dataset summary
train_dataloader = dict(batch_size=512)

View File

@ -55,7 +55,7 @@ model = dict(
drop_path_rate=0.1,
layer_scale_init_value=0.,
use_grn=True,
),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict(
type='LinearClsHead',
num_classes=1000,

View File

@ -48,7 +48,7 @@ model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
drop_path_rate=0.05,
),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict(
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, use_sigmoid=True)),

View File

@ -5,7 +5,10 @@ _base_ = [
'../../_base_/default_runtime.py',
]
model = dict(backbone=dict(frozen_stages=4))
model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# dataset summary
train_dataloader = dict(batch_size=512)