# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='BiSeNetV2', detail_channels=(64, 64, 128), semantic_channels=(16, 32, 64, 128), semantic_expansion_ratio=6, bga_channels=128, out_indices=(0, 1, 2, 3, 4), init_cfg=None, align_corners=False), decode_head=dict( type='FCNHead', in_channels=128, in_index=0, channels=1024, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=[ dict( type='FCNHead', in_channels=16, channels=16, num_convs=2, num_classes=19, in_index=1, norm_cfg=norm_cfg, concat_input=False, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='FCNHead', in_channels=32, channels=64, num_convs=2, num_classes=19, in_index=2, norm_cfg=norm_cfg, concat_input=False, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='FCNHead', in_channels=64, channels=256, num_convs=2, num_classes=19, in_index=3, norm_cfg=norm_cfg, concat_input=False, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='FCNHead', in_channels=128, channels=1024, num_convs=2, num_classes=19, in_index=4, norm_cfg=norm_cfg, concat_input=False, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), ], # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))