[Fix] Fix several config file errors in 2.0 (New) (#1994)

* [Fix] Fix several config file errors in 2.0

* change _base_ config file name in configs
This commit is contained in:
MengzhangLI 2022-08-30 20:20:05 +08:00 committed by GitHub
parent ae5c13e927
commit bd1097ac02
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 158 additions and 29 deletions

View File

@ -5,6 +5,7 @@ _base_ = [
]
crop_size = (512, 512)
data_preprocessor = dict(size=crop_size)
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
data_preprocessor=data_preprocessor,
backbone=dict(
@ -14,8 +15,30 @@ model = dict(
backbone_cfg=dict(type='ResNet', depth=101)),
decode_head=dict(in_channels=1024, channels=1024, num_classes=171),
auxiliary_head=[
dict(in_channels=512, channels=256, num_classes=171),
dict(in_channels=512, channels=256, num_classes=171),
dict(
type='FCNHead',
in_channels=512,
channels=256,
num_convs=1,
num_classes=171,
in_index=1,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
dict(
type='FCNHead',
in_channels=512,
channels=256,
num_convs=1,
num_classes=171,
in_index=2,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
])
param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),

View File

@ -5,12 +5,35 @@ _base_ = [
]
crop_size = (512, 512)
data_preprocessor = dict(size=crop_size)
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
data_preprocessor=data_preprocessor,
decode_head=dict(num_classes=171),
auxiliary_head=[
dict(num_classes=171),
dict(num_classes=171),
dict(
type='FCNHead',
in_channels=128,
channels=64,
num_convs=1,
num_classes=171,
in_index=1,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
dict(
type='FCNHead',
in_channels=128,
channels=64,
num_convs=1,
num_classes=171,
in_index=2,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
])
param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),

View File

@ -5,6 +5,7 @@ _base_ = [
]
crop_size = (512, 512)
data_preprocessor = dict(size=crop_size)
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
data_preprocessor=data_preprocessor,
backbone=dict(
@ -14,8 +15,30 @@ model = dict(
backbone_cfg=dict(type='ResNet', depth=50)),
decode_head=dict(in_channels=1024, channels=1024, num_classes=171),
auxiliary_head=[
dict(in_channels=512, channels=256, num_classes=171),
dict(in_channels=512, channels=256, num_classes=171),
dict(
type='FCNHead',
in_channels=512,
channels=256,
num_convs=1,
num_classes=171,
in_index=1,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
dict(
type='FCNHead',
in_channels=512,
channels=256,
num_convs=1,
num_classes=171,
in_index=2,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
])
param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),

View File

@ -1,6 +1,2 @@
_base_ = './deeplabv3plus_r50-d8_4xb4-80k_loveda-512x512.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnet101_v1c')))
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))

View File

@ -1,9 +1,7 @@
_base_ = './deeplabv3plus_r50-d8_4xb4-80k_loveda-512x512.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')),
pretrained='open-mmlab://resnet18_v1c',
backbone=dict(depth=18),
decode_head=dict(
c1_in_channels=64,
c1_channels=12,

View File

@ -1,9 +1,7 @@
_base_ = './fcn_hr18_4xb4-80k_loveda-512x512.py'
model = dict(
pretrained='open-mmlab://msra/hrnetv2_w18_small',
backbone=dict(
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://msra/hrnetv2_w18_small'),
extra=dict(
stage1=dict(num_blocks=(2, )),
stage2=dict(num_blocks=(2, 2)),

View File

@ -1,8 +1,7 @@
_base_ = './fcn_hr18_4xb4-80k_loveda-512x512.py'
model = dict(
pretrained='open-mmlab://msra/hrnetv2_w48',
backbone=dict(
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w48'),
extra=dict(
stage2=dict(num_channels=(48, 96)),
stage3=dict(num_channels=(48, 96, 192)),

View File

@ -1,6 +1,2 @@
_base_ = './pspnet_r50-d8_4xb4-80k_loveda-512x512.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnet101_v1c')))
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))

View File

@ -1,9 +1,7 @@
_base_ = './pspnet_r50-d8_4xb4-80k_loveda-512x512.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')),
pretrained='open-mmlab://resnet18_v1c',
backbone=dict(depth=18),
decode_head=dict(
in_channels=512,
channels=128,

View File

@ -0,0 +1,10 @@
_base_ = [
'swin-large-patch4-window7-in22k-pre_upernet_'
'8xb2-160k_ade20k-512x512.py'
]
checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window12_384_22k_20220412-6580f57d.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file),
pretrain_img_size=384,
window_size=12))

View File

@ -0,0 +1,15 @@
_base_ = [
'swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_'
'ade20k-512x512.py'
]
checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window7_224_22k_20220412-aeecf2aa.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file),
pretrain_img_size=224,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=7),
decode_head=dict(in_channels=[192, 384, 768, 1536], num_classes=150),
auxiliary_head=dict(in_channels=768, num_classes=150))

View File

@ -0,0 +1,6 @@
_base_ = './upernet_r50_4xb2-40k_cityscapes-512x1024.py'
model = dict(
pretrained='open-mmlab://resnet18_v1c',
backbone=dict(depth=18),
decode_head=dict(in_channels=[64, 128, 256, 512]),
auxiliary_head=dict(in_channels=256))

View File

@ -0,0 +1,6 @@
_base_ = './upernet_r50_4xb2-80k_cityscapes-512x1024.py'
model = dict(
pretrained='open-mmlab://resnet18_v1c',
backbone=dict(depth=18),
decode_head=dict(in_channels=[64, 128, 256, 512]),
auxiliary_head=dict(in_channels=256))

View File

@ -0,0 +1,9 @@
_base_ = [
'../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
pretrained='open-mmlab://resnet18_v1c',
backbone=dict(depth=18),
decode_head=dict(in_channels=[64, 128, 256, 512], num_classes=150),
auxiliary_head=dict(in_channels=256, num_classes=150))

View File

@ -0,0 +1,10 @@
_base_ = [
'../_base_/models/upernet_r50.py',
'../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_20k.py'
]
model = dict(
pretrained='open-mmlab://resnet18_v1c',
backbone=dict(depth=18),
decode_head=dict(in_channels=[64, 128, 256, 512], num_classes=21),
auxiliary_head=dict(in_channels=256, num_classes=21))

View File

@ -0,0 +1,10 @@
_base_ = [
'../_base_/models/upernet_r50.py',
'../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
model = dict(
pretrained='open-mmlab://resnet18_v1c',
backbone=dict(depth=18),
decode_head=dict(in_channels=[64, 128, 256, 512], num_classes=21),
auxiliary_head=dict(in_channels=256, num_classes=21))

View File

@ -0,0 +1,9 @@
_base_ = [
'../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
model = dict(
pretrained='open-mmlab://resnet18_v1c',
backbone=dict(depth=18),
decode_head=dict(in_channels=[64, 128, 256, 512], num_classes=150),
auxiliary_head=dict(in_channels=256, num_classes=150))