40 lines
1.1 KiB
Python
40 lines
1.1 KiB
Python
_base_ = [
|
|
'../_base_/models/dpt_vit-b16.py', '../_base_/datasets/ade20k.py',
|
|
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
|
|
]
|
|
crop_size = (512, 512)
|
|
data_preprocessor = dict(size=crop_size)
|
|
model = dict(data_preprocessor=data_preprocessor)
|
|
# AdamW optimizer, no weight decay for position embedding & layer norm
|
|
# in backbone
|
|
|
|
optim_wrapper = dict(
|
|
_delete_=True,
|
|
type='OptimWrapper',
|
|
optimizer=dict(
|
|
type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01),
|
|
paramwise_cfg=dict(
|
|
custom_keys={
|
|
'pos_embed': dict(decay_mult=0.),
|
|
'cls_token': dict(decay_mult=0.),
|
|
'norm': dict(decay_mult=0.)
|
|
}))
|
|
|
|
param_scheduler = [
|
|
dict(
|
|
type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
|
|
dict(
|
|
type='PolyLR',
|
|
eta_min=0.0,
|
|
power=1.0,
|
|
begin=1500,
|
|
end=160000,
|
|
by_epoch=False,
|
|
)
|
|
]
|
|
|
|
# By default, models are trained on 8 GPUs with 2 images per GPU
|
|
train_dataloader = dict(batch_size=2, num_workers=2)
|
|
val_dataloader = dict(batch_size=1, num_workers=4)
|
|
test_dataloader = val_dataloader
|