mirror of https://github.com/NVlabs/SegFormer.git
107 lines
3.4 KiB
Python
107 lines
3.4 KiB
Python
_base_ = [
|
|
'../../_base_/models/segformer.py',
|
|
'../../_base_/default_runtime.py',
|
|
'../../_base_/schedules/schedule_160k_adamw.py'
|
|
]
|
|
|
|
# model settings
|
|
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
|
find_unused_parameters = True
|
|
model = dict(
|
|
type='EncoderDecoder',
|
|
pretrained='pretrained/mit_b0.pth',
|
|
backbone=dict(
|
|
type='mit_b0',
|
|
style='pytorch'),
|
|
decode_head=dict(
|
|
type='SegFormerHead',
|
|
in_channels=[32, 64, 160, 256],
|
|
in_index=[0, 1, 2, 3],
|
|
feature_strides=[4, 8, 16, 32],
|
|
channels=128,
|
|
dropout_ratio=0.1,
|
|
num_classes=19,
|
|
norm_cfg=norm_cfg,
|
|
align_corners=False,
|
|
decoder_params=dict(embed_dim=256),
|
|
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
|
# model training and testing settings
|
|
train_cfg=dict(),
|
|
# test_cfg=dict(mode='whole'))
|
|
test_cfg=dict(mode='slide', crop_size=(768,768), stride=(768,768)))
|
|
|
|
# dataset settings
|
|
dataset_type = 'CityscapesDataset'
|
|
data_root = 'data/cityscapes/'
|
|
img_norm_cfg = dict(
|
|
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
|
crop_size = (768, 768)
|
|
train_pipeline = [
|
|
dict(type='LoadImageFromFile'),
|
|
dict(type='LoadAnnotations'),
|
|
dict(type='Resize', img_scale=(1536, 768), ratio_range=(0.5, 2.0)),
|
|
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
|
dict(type='RandomFlip', prob=0.5),
|
|
dict(type='PhotoMetricDistortion'),
|
|
dict(type='Normalize', **img_norm_cfg),
|
|
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
|
dict(type='DefaultFormatBundle'),
|
|
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
|
]
|
|
test_pipeline = [
|
|
dict(type='LoadImageFromFile'),
|
|
dict(
|
|
type='MultiScaleFlipAug',
|
|
img_scale=(1536, 768),
|
|
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
|
flip=False,
|
|
transforms=[
|
|
dict(type='Resize', keep_ratio=True),
|
|
dict(type='RandomFlip'),
|
|
dict(type='Normalize', **img_norm_cfg),
|
|
dict(type='ImageToTensor', keys=['img']),
|
|
dict(type='Collect', keys=['img']),
|
|
])
|
|
]
|
|
data = dict(
|
|
samples_per_gpu=1,
|
|
workers_per_gpu=2,
|
|
train=dict(
|
|
type='RepeatDataset',
|
|
times=500,
|
|
dataset=dict(
|
|
type=dataset_type,
|
|
data_root=data_root,
|
|
img_dir='leftImg8bit/train',
|
|
ann_dir='gtFine/train',
|
|
pipeline=train_pipeline)),
|
|
val=dict(
|
|
type=dataset_type,
|
|
data_root=data_root,
|
|
img_dir='leftImg8bit/val',
|
|
ann_dir='gtFine/val',
|
|
pipeline=test_pipeline),
|
|
test=dict(
|
|
type=dataset_type,
|
|
data_root=data_root,
|
|
img_dir='leftImg8bit/val',
|
|
ann_dir='gtFine/val',
|
|
pipeline=test_pipeline))
|
|
|
|
evaluation = dict(interval=4000, metric='mIoU')
|
|
|
|
# optimizer
|
|
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
|
|
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
|
|
'norm': dict(decay_mult=0.),
|
|
'head': dict(lr_mult=10.)
|
|
}))
|
|
|
|
lr_config = dict(_delete_=True, policy='poly',
|
|
warmup='linear',
|
|
warmup_iters=1500,
|
|
warmup_ratio=1e-6,
|
|
power=1.0, min_lr=0.0, by_epoch=False)
|
|
|
|
|