mirror of
https://github.com/open-mmlab/mmsegmentation.git
synced 2025-06-03 22:03:48 +08:00
* BiSeNetV2 first commit * BiSeNetV2 unittest * remove pytest * add pytest module * fix ConvModule input name * fix pytest error * fix unittest * refactor * BiSeNetV2 Refactory * fix docstrings and add some small changes * use_sigmoid=False * fix potential bugs about upsampling * Use ConvModule instead * Use ConvModule instead * fix typos * fix typos * fix typos * discard nn.conv2d * discard nn.conv2d * discard nn.conv2d * delete **kwargs * uploading markdown and model * final commit * BiSeNetV2 adding Unittest for its modules * BiSeNetV2 adding Unittest for its modules * BiSeNetV2 adding Unittest for its modules * BiSeNetV2 adding Unittest for its modules * BiSeNetV2 adding Unittest for its modules * BiSeNetV2 adding Unittest for its modules * BiSeNetV2 adding Unittest for its modules * Fix README conflict * Fix unittest problem * Fix unittest problem * BiSeNetV2 * Fixing fps * Fixing typpos * bisenetv2
81 lines
2.4 KiB
Python
81 lines
2.4 KiB
Python
# model settings
|
|
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
|
model = dict(
|
|
type='EncoderDecoder',
|
|
pretrained=None,
|
|
backbone=dict(
|
|
type='BiSeNetV2',
|
|
detail_channels=(64, 64, 128),
|
|
semantic_channels=(16, 32, 64, 128),
|
|
semantic_expansion_ratio=6,
|
|
bga_channels=128,
|
|
out_indices=(0, 1, 2, 3, 4),
|
|
init_cfg=None,
|
|
align_corners=False),
|
|
decode_head=dict(
|
|
type='FCNHead',
|
|
in_channels=128,
|
|
in_index=0,
|
|
channels=1024,
|
|
num_convs=1,
|
|
concat_input=False,
|
|
dropout_ratio=0.1,
|
|
num_classes=19,
|
|
norm_cfg=norm_cfg,
|
|
align_corners=False,
|
|
loss_decode=dict(
|
|
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
|
auxiliary_head=[
|
|
dict(
|
|
type='FCNHead',
|
|
in_channels=16,
|
|
channels=16,
|
|
num_convs=2,
|
|
num_classes=19,
|
|
in_index=1,
|
|
norm_cfg=norm_cfg,
|
|
concat_input=False,
|
|
align_corners=False,
|
|
loss_decode=dict(
|
|
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
|
dict(
|
|
type='FCNHead',
|
|
in_channels=32,
|
|
channels=64,
|
|
num_convs=2,
|
|
num_classes=19,
|
|
in_index=2,
|
|
norm_cfg=norm_cfg,
|
|
concat_input=False,
|
|
align_corners=False,
|
|
loss_decode=dict(
|
|
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
|
dict(
|
|
type='FCNHead',
|
|
in_channels=64,
|
|
channels=256,
|
|
num_convs=2,
|
|
num_classes=19,
|
|
in_index=3,
|
|
norm_cfg=norm_cfg,
|
|
concat_input=False,
|
|
align_corners=False,
|
|
loss_decode=dict(
|
|
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
|
dict(
|
|
type='FCNHead',
|
|
in_channels=128,
|
|
channels=1024,
|
|
num_convs=2,
|
|
num_classes=19,
|
|
in_index=4,
|
|
norm_cfg=norm_cfg,
|
|
concat_input=False,
|
|
align_corners=False,
|
|
loss_decode=dict(
|
|
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
|
],
|
|
# model training and testing settings
|
|
train_cfg=dict(),
|
|
test_cfg=dict(mode='whole'))
|