diff --git a/.gitignore b/.gitignore
index 04e7fa76f..2104106fb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -114,10 +114,11 @@ data
 *.log.json
 work_dirs/
 workdirs/
-configs_unify/
-results/
 
 # Pytorch
 *.pth
 
+# Nohup out file
+*.file
+
 
diff --git a/configs/fastscnn/fast_scnn_4x3_1000e_cityscapes.py b/configs/fastscnn/fast_scnn_4x3_1000e_cityscapes.py
index bbb5361bd..6e6010e73 100644
--- a/configs/fastscnn/fast_scnn_4x3_1000e_cityscapes.py
+++ b/configs/fastscnn/fast_scnn_4x3_1000e_cityscapes.py
@@ -55,7 +55,7 @@ lr_config = dict(
 )
 # runtime settings
 # total_epochs = 1000
-total_iters = 247917
-evaluation = dict(interval=100, metric='mIoU')
-checkpoint_config = dict(interval=100)
+total_iters = 247000
+evaluation = dict(interval=1000, metric='mIoU')
+checkpoint_config = dict(interval=1000)
 
diff --git a/configs/fastscnn/fast_scnn_4x8_80k_cityscapes.py b/configs/fastscnn/fast_scnn_4x8_80k_cityscapes.py
new file mode 100644
index 000000000..51d2d61eb
--- /dev/null
+++ b/configs/fastscnn/fast_scnn_4x8_80k_cityscapes.py
@@ -0,0 +1,61 @@
+_base_ = [
+    '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py'
+]
+crop_size = (512, 1024)
+cudnn_benchmark = True
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='whole')
+
+# Here: What is parameter 'with_seg'?
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile', to_float32=True),
+    dict(type='LoadAnnotations'),   # with_seg=True
+    dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='PhotoMetricDistortion'),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomCrop', crop_size=crop_size),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(2048, 1024),
+        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
+data = dict(
+    samples_per_gpu=8,
+    workers_per_gpu=4,
+    train=dict(pipeline=train_pipeline),
+    val=dict(pipeline=test_pipeline),
+    test=dict(pipeline=test_pipeline))
+
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=4e-5)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(
+    policy='poly',
+    power=0.9,
+    by_epoch=False,
+)
+# runtime settings
+# total_epochs = 1000
+total_iters = 80000
+evaluation = dict(interval=8000, metric='mIoU')
+checkpoint_config = dict(interval=8000)
+
diff --git a/configs_unify/_base_/datasets/ade.py b/configs_unify/_base_/datasets/ade.py
new file mode 100644
index 000000000..5ea2aac05
--- /dev/null
+++ b/configs_unify/_base_/datasets/ade.py
@@ -0,0 +1,52 @@
+# dataset settings
+dataset_type = 'ADEDataset'
+data_root = 'data/ade/ADEChallengeData2016'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+crop_size = (521, 521)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_seg=True),
+    dict(type='Resize', img_scale=(2049, 521), ratio_range=(0.5, 2.0)),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomCrop', crop_size=crop_size),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(2049, 521),
+        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
+data = dict(
+    samples_per_gpu=2,
+    workers_per_gpu=2,
+    train=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='images/training',
+        ann_dir='annotations/training',
+        train_pipeline=train_pipeline),
+    val=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='images/validation',
+        ann_dir='annotations/validation',
+        test_pipeline=test_pipeline),
+    test=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='images/validation',
+        ann_dir='annotations/validation',
+        test_pipeline=test_pipeline))
diff --git a/configs_unify/_base_/datasets/cityscapes.py b/configs_unify/_base_/datasets/cityscapes.py
new file mode 100644
index 000000000..c256a1a62
--- /dev/null
+++ b/configs_unify/_base_/datasets/cityscapes.py
@@ -0,0 +1,56 @@
+# dataset settings
+dataset_type = 'CityscapesDataset'
+data_root = 'data/cityscapes/'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+crop_size = (769, 769)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations'),
+    dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='PhotoMetricDistortion'),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(2049, 1025),
+        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
+data = dict(
+    samples_per_gpu=2,
+    workers_per_gpu=2,
+    train=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='leftImg8bit/train',
+        ann_dir='gtFine/train',
+        split='splits/train.txt',
+        pipeline=train_pipeline),
+    val=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='leftImg8bit/val',
+        ann_dir='gtFine/val',
+        split='splits/val.txt',
+        pipeline=test_pipeline),
+    test=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='leftImg8bit/val',
+        ann_dir='gtFine/val',
+        split='splits/val.txt',
+        pipeline=test_pipeline))
diff --git a/configs_unify/_base_/datasets/pascal_context.py b/configs_unify/_base_/datasets/pascal_context.py
new file mode 100644
index 000000000..1c1870012
--- /dev/null
+++ b/configs_unify/_base_/datasets/pascal_context.py
@@ -0,0 +1,24 @@
+# dataset settings
+dataset_type = 'PascalContextDataset'
+data_root = 'data/VOCdevkit/VOC2010'
+data = dict(
+    samples_per_gpu=2,
+    workers_per_gpu=2,
+    train=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='JPEGImages',
+        ann_dir='SegmentationClassContext',
+        split='ImageSets/SegmentationContext/train.txt'),
+    val=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='JPEGImages',
+        ann_dir='SegmentationClassContext',
+        split='ImageSets/SegmentationContext/val.txt'),
+    test=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='JPEGImages',
+        ann_dir='SegmentationClassContext',
+        split='ImageSets/SegmentationContext/val.txt'))
diff --git a/configs_unify/_base_/datasets/pascal_voc12.py b/configs_unify/_base_/datasets/pascal_voc12.py
new file mode 100644
index 000000000..8c2442330
--- /dev/null
+++ b/configs_unify/_base_/datasets/pascal_voc12.py
@@ -0,0 +1,55 @@
+# dataset settings
+dataset_type = 'PascalVOCDataset'
+data_root = 'data/VOCdevkit/VOC2012'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+crop_size = (521, 521)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_seg=True),
+    dict(type='Resize', img_scale=(2049, 521), ratio_range=(0.5, 2.0)),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomCrop', crop_size=crop_size),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(2049, 521),
+        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
+data = dict(
+    samples_per_gpu=2,
+    workers_per_gpu=2,
+    train=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='JPEGImages',
+        ann_dir='SegmentationClass',
+        split='ImageSets/Segmentation/train.txt',
+        pipeline=train_pipeline),
+    val=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='JPEGImages',
+        ann_dir='SegmentationClass',
+        split='ImageSets/Segmentation/val.txt',
+        pipeline=test_pipeline),
+    test=dict(
+        type=dataset_type,
+        data_root=data_root,
+        img_dir='JPEGImages',
+        ann_dir='SegmentationClass',
+        split='ImageSets/Segmentation/val.txt',
+        pipeline=test_pipeline))
diff --git a/configs_unify/_base_/datasets/pascal_voc12_aug.py b/configs_unify/_base_/datasets/pascal_voc12_aug.py
new file mode 100644
index 000000000..3f23b6717
--- /dev/null
+++ b/configs_unify/_base_/datasets/pascal_voc12_aug.py
@@ -0,0 +1,9 @@
+_base_ = './pascal_voc12.py'
+# dataset settings
+data = dict(
+    train=dict(
+        ann_dir=['SegmentationClass', 'SegmentationClassAug'],
+        split=[
+            'ImageSets/Segmentation/train.txt',
+            'ImageSets/Segmentation/aug.txt'
+        ]))
diff --git a/configs_unify/_base_/default_runtime.py b/configs_unify/_base_/default_runtime.py
new file mode 100644
index 000000000..a8434e3f4
--- /dev/null
+++ b/configs_unify/_base_/default_runtime.py
@@ -0,0 +1,14 @@
+# yapf:disable
+log_config = dict(
+    interval=50,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        # dict(type='TensorboardLoggerHook')
+    ])
+# yapf:enable
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
+cudnn_benchmark = True
diff --git a/configs_unify/_base_/models/ann_r50.py b/configs_unify/_base_/models/ann_r50.py
new file mode 100644
index 000000000..812c29880
--- /dev/null
+++ b/configs_unify/_base_/models/ann_r50.py
@@ -0,0 +1,44 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='pretrain_model/resnet50_v1c-66047269.pth',
+    backbone=dict(
+        type='ResNetV1c',
+        depth=50,
+        num_stages=4,
+        out_indices=(0, 1, 2, 3),
+        dilations=(1, 1, 2, 4),
+        strides=(1, 2, 1, 1),
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        style='pytorch',
+        contract_dilation=True),
+    decode_head=dict(
+        type='ANNHead',
+        in_channels=[1024, 2048],
+        in_index=[2, 3],
+        channels=512,
+        project_channels=256,
+        query_scales=(1, ),
+        key_pool_scales=(1, 3, 6, 8),
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    auxiliary_head=dict(
+        type='FCNHead',
+        in_channels=1024,
+        in_index=2,
+        channels=256,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513))
diff --git a/configs_unify/_base_/models/cc_r50.py b/configs_unify/_base_/models/cc_r50.py
new file mode 100644
index 000000000..5335bf8f4
--- /dev/null
+++ b/configs_unify/_base_/models/cc_r50.py
@@ -0,0 +1,42 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='pretrain_model/resnet50_v1c-66047269.pth',
+    backbone=dict(
+        type='ResNetV1c',
+        depth=50,
+        num_stages=4,
+        out_indices=(0, 1, 2, 3),
+        dilations=(1, 1, 2, 4),
+        strides=(1, 2, 1, 1),
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        style='pytorch',
+        contract_dilation=True),
+    decode_head=dict(
+        type='CCHead',
+        in_channels=2048,
+        in_index=3,
+        channels=512,
+        recurrence=2,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    auxiliary_head=dict(
+        type='FCNHead',
+        in_channels=1024,
+        in_index=2,
+        channels=256,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513))
diff --git a/configs_unify/_base_/models/da_r50.py b/configs_unify/_base_/models/da_r50.py
new file mode 100644
index 000000000..4d5cc7fe1
--- /dev/null
+++ b/configs_unify/_base_/models/da_r50.py
@@ -0,0 +1,42 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='pretrain_model/resnet50_v1c-66047269.pth',
+    backbone=dict(
+        type='ResNetV1c',
+        depth=50,
+        num_stages=4,
+        out_indices=(0, 1, 2, 3),
+        dilations=(1, 1, 2, 4),
+        strides=(1, 2, 1, 1),
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        style='pytorch',
+        contract_dilation=True),
+    decode_head=dict(
+        type='DAHead',
+        in_channels=2048,
+        in_index=3,
+        channels=512,
+        pam_channels=64,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    auxiliary_head=dict(
+        type='FCNHead',
+        in_channels=1024,
+        in_index=2,
+        channels=256,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513))
diff --git a/configs_unify/_base_/models/deeplabv3_r50.py b/configs_unify/_base_/models/deeplabv3_r50.py
new file mode 100644
index 000000000..7941824a8
--- /dev/null
+++ b/configs_unify/_base_/models/deeplabv3_r50.py
@@ -0,0 +1,42 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='pretrain_model/resnet50_v1c-66047269.pth',
+    backbone=dict(
+        type='ResNetV1c',
+        depth=50,
+        num_stages=4,
+        out_indices=(0, 1, 2, 3),
+        dilations=(1, 1, 2, 4),
+        strides=(1, 2, 1, 1),
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        style='pytorch',
+        contract_dilation=True),
+    decode_head=dict(
+        type='ASPPHead',
+        in_channels=2048,
+        in_index=3,
+        channels=512,
+        dilations=(1, 12, 24, 36),
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    auxiliary_head=dict(
+        type='FCNHead',
+        in_channels=1024,
+        in_index=2,
+        channels=256,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513))
diff --git a/configs_unify/_base_/models/deeplabv3plus_r50.py b/configs_unify/_base_/models/deeplabv3plus_r50.py
new file mode 100644
index 000000000..8dc04ba9c
--- /dev/null
+++ b/configs_unify/_base_/models/deeplabv3plus_r50.py
@@ -0,0 +1,44 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='pretrain_model/resnet50_v1c-66047269.pth',
+    backbone=dict(
+        type='ResNetV1c',
+        depth=50,
+        num_stages=4,
+        out_indices=(0, 1, 2, 3),
+        dilations=(1, 1, 2, 4),
+        strides=(1, 2, 1, 1),
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        style='pytorch',
+        contract_dilation=True),
+    decode_head=dict(
+        type='SepASPPHead',
+        in_channels=2048,
+        in_index=3,
+        channels=512,
+        dilations=(1, 12, 24, 36),
+        c1_in_channels=256,
+        c1_channels=48,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    auxiliary_head=dict(
+        type='FCNHead',
+        in_channels=1024,
+        in_index=2,
+        channels=256,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513))
diff --git a/configs_unify/_base_/models/fast_scnn.py b/configs_unify/_base_/models/fast_scnn.py
new file mode 100644
index 000000000..6001c18fe
--- /dev/null
+++ b/configs_unify/_base_/models/fast_scnn.py
@@ -0,0 +1,57 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
+model = dict(
+    type='EncoderDecoder',
+    backbone=dict(
+        type='FastSCNN',
+        downsample_dw_channels1=32,
+        downsample_dw_channels2=48,
+        global_in_channels=64,
+        global_block_channels=(64, 96, 128),
+        global_out_channels=128,
+        higher_in_channels=64,
+        lower_in_channels=128,
+        fusion_out_channels=128,
+        scale_factor=4,
+        out_indices=(0, 1, 2),
+        norm_cfg=norm_cfg,
+        align_corners=False),
+    decode_head=dict(
+        type='SepFCNHead',
+        in_channels=128,
+        channels=128,
+        concat_input=False,
+        num_classes=19,
+        in_index=-1,
+        norm_cfg=norm_cfg,
+        align_corners=False,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.)),
+    auxiliary_head=[
+        dict(
+            type='FCNHead',
+            in_channels=128,
+            channels=32,
+            num_convs=1,
+            num_classes=19,
+            in_index=-2,
+            norm_cfg=norm_cfg,
+            concat_input=False,
+            align_corners=False,
+            loss_decode=dict(
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+        dict(
+            type='FCNHead',
+            in_channels=64,
+            channels=32,
+            num_convs=1,
+            num_classes=19,
+            in_index=-3,
+            norm_cfg=norm_cfg,
+            concat_input=False,
+            align_corners=False,
+            loss_decode=dict(
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+    ])
+
+total_iters = 10000
\ No newline at end of file
diff --git a/configs_unify/_base_/models/fcn_hr18.py b/configs_unify/_base_/models/fcn_hr18.py
new file mode 100644
index 000000000..40fa92ea9
--- /dev/null
+++ b/configs_unify/_base_/models/fcn_hr18.py
@@ -0,0 +1,49 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='open-mmlab://msra/hrnetv2_w18',
+    backbone=dict(
+        type='HRNet',
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        extra=dict(
+            stage1=dict(
+                num_modules=1,
+                num_branches=1,
+                block='BOTTLENECK',
+                num_blocks=(4, ),
+                num_channels=(64, )),
+            stage2=dict(
+                num_modules=1,
+                num_branches=2,
+                block='BASIC',
+                num_blocks=(4, 4),
+                num_channels=(18, 36)),
+            stage3=dict(
+                num_modules=4,
+                num_branches=3,
+                block='BASIC',
+                num_blocks=(4, 4, 4),
+                num_channels=(18, 36, 72)),
+            stage4=dict(
+                num_modules=3,
+                num_branches=4,
+                block='BASIC',
+                num_blocks=(4, 4, 4, 4),
+                num_channels=(18, 36, 72, 144)))),
+    decode_head=dict(
+        type='FCNHead',
+        in_channels=[18, 36, 72, 144],
+        in_index=(0, 1, 2, 3),
+        channels=sum([18, 36, 72, 144]),
+        input_transform='resize_concat',
+        kernel_size=1,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=-1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        align_corners=False,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
diff --git a/configs_unify/_base_/models/fcn_r50.py b/configs_unify/_base_/models/fcn_r50.py
new file mode 100644
index 000000000..3db773a1f
--- /dev/null
+++ b/configs_unify/_base_/models/fcn_r50.py
@@ -0,0 +1,43 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='pretrain_model/resnet50_v1c-66047269.pth',
+    backbone=dict(
+        type='ResNetV1c',
+        depth=50,
+        num_stages=4,
+        out_indices=(0, 1, 2, 3),
+        dilations=(1, 1, 2, 4),
+        strides=(1, 2, 1, 1),
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        style='pytorch',
+        contract_dilation=True),
+    decode_head=dict(
+        type='FCNHead',
+        in_channels=2048,
+        in_index=3,
+        channels=512,
+        num_convs=2,
+        concat_input=True,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    auxiliary_head=dict(
+        type='FCNHead',
+        in_channels=1024,
+        in_index=2,
+        channels=256,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513))
diff --git a/configs_unify/_base_/models/gc_r50.py b/configs_unify/_base_/models/gc_r50.py
new file mode 100644
index 000000000..0388f77b6
--- /dev/null
+++ b/configs_unify/_base_/models/gc_r50.py
@@ -0,0 +1,44 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='pretrain_model/resnet50_v1c-66047269.pth',
+    backbone=dict(
+        type='ResNetV1c',
+        depth=50,
+        num_stages=4,
+        out_indices=(0, 1, 2, 3),
+        dilations=(1, 1, 2, 4),
+        strides=(1, 2, 1, 1),
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        style='pytorch',
+        contract_dilation=True),
+    decode_head=dict(
+        type='GCHead',
+        in_channels=2048,
+        in_index=3,
+        channels=512,
+        ratio=1 / 4.,
+        pooling_type='att',
+        fusion_types=('channel_add', ),
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    auxiliary_head=dict(
+        type='FCNHead',
+        in_channels=1024,
+        in_index=2,
+        channels=256,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513))
diff --git a/configs_unify/_base_/models/nl_r50.py b/configs_unify/_base_/models/nl_r50.py
new file mode 100644
index 000000000..26ad02f20
--- /dev/null
+++ b/configs_unify/_base_/models/nl_r50.py
@@ -0,0 +1,44 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='pretrain_model/resnet50_v1c-66047269.pth',
+    backbone=dict(
+        type='ResNetV1c',
+        depth=50,
+        num_stages=4,
+        out_indices=(0, 1, 2, 3),
+        dilations=(1, 1, 2, 4),
+        strides=(1, 2, 1, 1),
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        style='pytorch',
+        contract_dilation=True),
+    decode_head=dict(
+        type='NLHead',
+        in_channels=2048,
+        in_index=3,
+        channels=512,
+        drop_out_ratio=0.1,
+        reduction=2,
+        use_scale=True,
+        mode='embedded_gaussian',
+        norm_cfg=norm_cfg,
+        num_classes=19,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    auxiliary_head=dict(
+        type='FCNHead',
+        in_channels=1024,
+        in_index=2,
+        channels=256,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513))
diff --git a/configs_unify/_base_/models/ocr_hr18.py b/configs_unify/_base_/models/ocr_hr18.py
new file mode 100644
index 000000000..b3885a78d
--- /dev/null
+++ b/configs_unify/_base_/models/ocr_hr18.py
@@ -0,0 +1,65 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
+model = dict(
+    type='CascadeEncoderDecoder',
+    num_stages=2,
+    pretrained='open-mmlab://msra/hrnetv2_w18',
+    backbone=dict(
+        type='HRNet',
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        extra=dict(
+            stage1=dict(
+                num_modules=1,
+                num_branches=1,
+                block='BOTTLENECK',
+                num_blocks=(4, ),
+                num_channels=(64, )),
+            stage2=dict(
+                num_modules=1,
+                num_branches=2,
+                block='BASIC',
+                num_blocks=(4, 4),
+                num_channels=(18, 36)),
+            stage3=dict(
+                num_modules=4,
+                num_branches=3,
+                block='BASIC',
+                num_blocks=(4, 4, 4),
+                num_channels=(18, 36, 72)),
+            stage4=dict(
+                num_modules=3,
+                num_branches=4,
+                block='BASIC',
+                num_blocks=(4, 4, 4, 4),
+                num_channels=(18, 36, 72, 144)))),
+    decode_head=[
+        dict(
+            type='FCNHead',
+            in_channels=[18, 36, 72, 144],
+            channels=sum([18, 36, 72, 144]),
+            in_index=(0, 1, 2, 3),
+            input_transform='resize_concat',
+            kernel_size=1,
+            num_convs=1,
+            concat_input=False,
+            drop_out_ratio=-1,
+            num_classes=19,
+            norm_cfg=norm_cfg,
+            align_corners=False,
+            loss_decode=dict(
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+        dict(
+            type='OCRHead',
+            in_channels=[18, 36, 72, 144],
+            in_index=(0, 1, 2, 3),
+            input_transform='resize_concat',
+            channels=512,
+            ocr_channels=256,
+            drop_out_ratio=-1,
+            num_classes=19,
+            norm_cfg=norm_cfg,
+            align_corners=False,
+            loss_decode=dict(
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    ])
diff --git a/configs_unify/_base_/models/psa_r50.py b/configs_unify/_base_/models/psa_r50.py
new file mode 100644
index 000000000..4393b8d37
--- /dev/null
+++ b/configs_unify/_base_/models/psa_r50.py
@@ -0,0 +1,47 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='pretrain_model/resnet50_v1c-66047269.pth',
+    backbone=dict(
+        type='ResNetV1c',
+        depth=50,
+        num_stages=4,
+        out_indices=(0, 1, 2, 3),
+        dilations=(1, 1, 2, 4),
+        strides=(1, 2, 1, 1),
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        style='pytorch',
+        contract_dilation=True),
+    decode_head=dict(
+        type='PSAHead',
+        in_channels=2048,
+        in_index=3,
+        channels=512,
+        mask_size=(97, 97),
+        psa_type='bi-direction',
+        compact=False,
+        shrink_factor=2,
+        normalization_factor=1.0,
+        psa_softmax=True,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    auxiliary_head=dict(
+        type='FCNHead',
+        in_channels=1024,
+        in_index=2,
+        channels=256,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513))
diff --git a/configs_unify/_base_/models/psp_r50.py b/configs_unify/_base_/models/psp_r50.py
new file mode 100644
index 000000000..7a91e91bf
--- /dev/null
+++ b/configs_unify/_base_/models/psp_r50.py
@@ -0,0 +1,42 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='pretrain_model/resnet50_v1c-66047269.pth',
+    backbone=dict(
+        type='ResNetV1c',
+        depth=50,
+        num_stages=4,
+        out_indices=(0, 1, 2, 3),
+        dilations=(1, 1, 2, 4),
+        strides=(1, 2, 1, 1),
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        style='pytorch',
+        contract_dilation=True),
+    decode_head=dict(
+        type='PSPHead',
+        in_channels=2048,
+        in_index=3,
+        channels=512,
+        pool_scales=(1, 2, 3, 6),
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    auxiliary_head=dict(
+        type='FCNHead',
+        in_channels=1024,
+        in_index=2,
+        channels=256,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513))
diff --git a/configs_unify/_base_/models/uper_r50.py b/configs_unify/_base_/models/uper_r50.py
new file mode 100644
index 000000000..006ccf7b7
--- /dev/null
+++ b/configs_unify/_base_/models/uper_r50.py
@@ -0,0 +1,42 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+    type='EncoderDecoder',
+    pretrained='pretrain_model/resnet50_v1c-66047269.pth',
+    backbone=dict(
+        type='ResNetV1c',
+        depth=50,
+        num_stages=4,
+        out_indices=(0, 1, 2, 3),
+        dilations=(1, 1, 1, 1),
+        strides=(1, 2, 2, 2),
+        norm_cfg=norm_cfg,
+        norm_eval=False,
+        style='pytorch',
+        contract_dilation=True),
+    decode_head=dict(
+        type='UPerHead',
+        in_channels=[256, 512, 1024, 2048],
+        in_index=[0, 1, 2, 3],
+        pool_scales=(1, 2, 3, 6),
+        channels=512,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+    auxiliary_head=dict(
+        type='FCNHead',
+        in_channels=1024,
+        in_index=2,
+        channels=256,
+        num_convs=1,
+        concat_input=False,
+        drop_out_ratio=0.1,
+        num_classes=19,
+        norm_cfg=norm_cfg,
+        loss_decode=dict(
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513))
diff --git a/configs_unify/_base_/schedules/schedule_220e.py b/configs_unify/_base_/schedules/schedule_220e.py
new file mode 100644
index 000000000..e4db9a0f6
--- /dev/null
+++ b/configs_unify/_base_/schedules/schedule_220e.py
@@ -0,0 +1,14 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(
+    policy='poly',
+    power=0.9,
+    by_epoch=False,
+)
+# runtime settings
+total_epochs = 220
+checkpoint_config = dict(interval=20)
+evaluation = dict(interval=20, metric='mIoU')
+runner_type = 'epoch'
diff --git a/configs_unify/_base_/schedules/schedule_40ki.py b/configs_unify/_base_/schedules/schedule_40ki.py
new file mode 100644
index 000000000..0a79250f5
--- /dev/null
+++ b/configs_unify/_base_/schedules/schedule_40ki.py
@@ -0,0 +1,14 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(
+    policy='poly',
+    power=0.9,
+    by_epoch=False,
+)
+# runtime settings
+total_iters = 40000
+checkpoint_config = dict(by_epoch=False, interval=4000)
+evaluation = dict(interval=4000, metric='mIoU')
+runner_type = 'iter'
diff --git a/configs_unify/_base_/schedules/schedule_60ki.py b/configs_unify/_base_/schedules/schedule_60ki.py
new file mode 100644
index 000000000..a3c29e304
--- /dev/null
+++ b/configs_unify/_base_/schedules/schedule_60ki.py
@@ -0,0 +1,14 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(
+    policy='poly',
+    power=0.9,
+    by_epoch=False,
+)
+# runtime settings
+total_iters = 60000
+checkpoint_config = dict(by_epoch=False, interval=4000)
+evaluation = dict(interval=4000, metric='mIoU')
+runner_type = 'iter'
diff --git a/configs_unify/annnet/ann_r101_40ki_cityscapes.py b/configs_unify/annnet/ann_r101_40ki_cityscapes.py
new file mode 100644
index 000000000..14b26f5a6
--- /dev/null
+++ b/configs_unify/annnet/ann_r101_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './ann_r50_40ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/annnet/ann_r101_60ki_cityscapes.py b/configs_unify/annnet/ann_r101_60ki_cityscapes.py
new file mode 100644
index 000000000..069e4d4d1
--- /dev/null
+++ b/configs_unify/annnet/ann_r101_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './ann_r50_60ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/annnet/ann_r50_40ki_cityscapes.py b/configs_unify/annnet/ann_r50_40ki_cityscapes.py
new file mode 100644
index 000000000..a29fdecb1
--- /dev/null
+++ b/configs_unify/annnet/ann_r50_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/ann_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py'
+]
diff --git a/configs_unify/annnet/ann_r50_60ki_cityscapes.py b/configs_unify/annnet/ann_r50_60ki_cityscapes.py
new file mode 100644
index 000000000..101b16829
--- /dev/null
+++ b/configs_unify/annnet/ann_r50_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/ann_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py'
+]
diff --git a/configs_unify/ccnet/cc_r101_40ki_cityscapes.py b/configs_unify/ccnet/cc_r101_40ki_cityscapes.py
new file mode 100644
index 000000000..8b32314bb
--- /dev/null
+++ b/configs_unify/ccnet/cc_r101_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './cc_r50_40ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/ccnet/cc_r101_60ki_cityscapes.py b/configs_unify/ccnet/cc_r101_60ki_cityscapes.py
new file mode 100644
index 000000000..78d544d19
--- /dev/null
+++ b/configs_unify/ccnet/cc_r101_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './cc_r50_60ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/ccnet/cc_r50_40ki_cityscapes.py b/configs_unify/ccnet/cc_r50_40ki_cityscapes.py
new file mode 100644
index 000000000..19569f68c
--- /dev/null
+++ b/configs_unify/ccnet/cc_r50_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/cc_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py'
+]
diff --git a/configs_unify/ccnet/cc_r50_60ki_cityscapes.py b/configs_unify/ccnet/cc_r50_60ki_cityscapes.py
new file mode 100644
index 000000000..e15e19b9d
--- /dev/null
+++ b/configs_unify/ccnet/cc_r50_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/cc_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py'
+]
diff --git a/configs_unify/danet/da_r101_40ki_cityscapes.py b/configs_unify/danet/da_r101_40ki_cityscapes.py
new file mode 100644
index 000000000..63c00d08f
--- /dev/null
+++ b/configs_unify/danet/da_r101_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './da_r50_40ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/danet/da_r101_60ki_cityscapes.py b/configs_unify/danet/da_r101_60ki_cityscapes.py
new file mode 100644
index 000000000..ccca5262e
--- /dev/null
+++ b/configs_unify/danet/da_r101_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './da_r50_60ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/danet/da_r50_40ki_cityscapes.py b/configs_unify/danet/da_r50_40ki_cityscapes.py
new file mode 100644
index 000000000..945df58e1
--- /dev/null
+++ b/configs_unify/danet/da_r50_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/da_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py'
+]
diff --git a/configs_unify/danet/da_r50_60ki_cityscapes.py b/configs_unify/danet/da_r50_60ki_cityscapes.py
new file mode 100644
index 000000000..4f5b9258d
--- /dev/null
+++ b/configs_unify/danet/da_r50_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/da_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py'
+]
diff --git a/configs_unify/deeplabv3/deeplabv3_r101_40ki_cityscapes.py b/configs_unify/deeplabv3/deeplabv3_r101_40ki_cityscapes.py
new file mode 100644
index 000000000..5bc38eb91
--- /dev/null
+++ b/configs_unify/deeplabv3/deeplabv3_r101_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './deeplabv3_r50_40ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/deeplabv3/deeplabv3_r101_60ki_cityscapes.py b/configs_unify/deeplabv3/deeplabv3_r101_60ki_cityscapes.py
new file mode 100644
index 000000000..1adef8f1b
--- /dev/null
+++ b/configs_unify/deeplabv3/deeplabv3_r101_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './deeplabv3_r50_60ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/deeplabv3/deeplabv3_r50_40ki_cityscapes.py b/configs_unify/deeplabv3/deeplabv3_r50_40ki_cityscapes.py
new file mode 100644
index 000000000..851af9ee8
--- /dev/null
+++ b/configs_unify/deeplabv3/deeplabv3_r50_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/deeplabv3_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py'
+]
diff --git a/configs_unify/deeplabv3/deeplabv3_r50_60ki_cityscapes.py b/configs_unify/deeplabv3/deeplabv3_r50_60ki_cityscapes.py
new file mode 100644
index 000000000..23b398522
--- /dev/null
+++ b/configs_unify/deeplabv3/deeplabv3_r50_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/deeplabv3_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py'
+]
diff --git a/configs_unify/deeplabv3plus/deeplabv3plus_r101_40ki_cityscapes.py b/configs_unify/deeplabv3plus/deeplabv3plus_r101_40ki_cityscapes.py
new file mode 100644
index 000000000..0ae1c3490
--- /dev/null
+++ b/configs_unify/deeplabv3plus/deeplabv3plus_r101_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './deeplabv3plus_r50_40ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/deeplabv3plus/deeplabv3plus_r101_60ki_cityscapes.py b/configs_unify/deeplabv3plus/deeplabv3plus_r101_60ki_cityscapes.py
new file mode 100644
index 000000000..268742b14
--- /dev/null
+++ b/configs_unify/deeplabv3plus/deeplabv3plus_r101_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './deeplabv3plus_r50_60ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/deeplabv3plus/deeplabv3plus_r50_40ki_cityscapes.py b/configs_unify/deeplabv3plus/deeplabv3plus_r50_40ki_cityscapes.py
new file mode 100644
index 000000000..eaec7a827
--- /dev/null
+++ b/configs_unify/deeplabv3plus/deeplabv3plus_r50_40ki_cityscapes.py
@@ -0,0 +1,5 @@
+_base_ = [
+    '../_base_/models/deeplabv3plus_r50.py',
+    '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py',
+    '../_base_/schedules/schedule_40ki.py'
+]
diff --git a/configs_unify/deeplabv3plus/deeplabv3plus_r50_60ki_cityscapes.py b/configs_unify/deeplabv3plus/deeplabv3plus_r50_60ki_cityscapes.py
new file mode 100644
index 000000000..3e297ff03
--- /dev/null
+++ b/configs_unify/deeplabv3plus/deeplabv3plus_r50_60ki_cityscapes.py
@@ -0,0 +1,5 @@
+_base_ = [
+    '../_base_/models/deeplabv3plus_r50.py',
+    '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py',
+    '../_base_/schedules/schedule_60ki.py'
+]
diff --git a/configs_unify/fastscnn/fast_scnn_4x3_1000e_cityscapes.py b/configs_unify/fastscnn/fast_scnn_4x3_1000e_cityscapes.py
new file mode 100644
index 000000000..e360324ab
--- /dev/null
+++ b/configs_unify/fastscnn/fast_scnn_4x3_1000e_cityscapes.py
@@ -0,0 +1,59 @@
+_base_ = [
+    '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py'
+]
+crop_size = (512, 1024)
+cudnn_benchmark = True
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='whole')
+
+# Here: What is parameter 'with_seg'?
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile', to_float32=True),
+    dict(type='LoadAnnotations'),   # with_seg=True
+    dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='PhotoMetricDistortion'),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomCrop', crop_size=crop_size),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(2048, 1024),
+        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
+data = dict(
+    samples_per_gpu=3,
+    workers_per_gpu=3,
+    train=dict(pipeline=train_pipeline),
+    val=dict(pipeline=test_pipeline),
+    test=dict(pipeline=test_pipeline))
+
+# optimizer
+optimizer = dict(type='SGD', lr=0.045, momentum=0.9, weight_decay=4e-5)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(
+    policy='poly',
+    power=0.9,
+    by_epoch=False,
+)
+# runtime settings
+total_epochs = 247000
+evaluation = dict(interval=1000, metric='mIoU')
+checkpoint_config = dict(interval=1000)
diff --git a/configs_unify/fastscnn/fast_scnn_4x8_80k_cityscapes.py b/configs_unify/fastscnn/fast_scnn_4x8_80k_cityscapes.py
new file mode 100644
index 000000000..51d2d61eb
--- /dev/null
+++ b/configs_unify/fastscnn/fast_scnn_4x8_80k_cityscapes.py
@@ -0,0 +1,61 @@
+_base_ = [
+    '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py'
+]
+crop_size = (512, 1024)
+cudnn_benchmark = True
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='whole')
+
+# Here: What is parameter 'with_seg'?
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile', to_float32=True),
+    dict(type='LoadAnnotations'),   # with_seg=True
+    dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='PhotoMetricDistortion'),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomCrop', crop_size=crop_size),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(2048, 1024),
+        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
+data = dict(
+    samples_per_gpu=8,
+    workers_per_gpu=4,
+    train=dict(pipeline=train_pipeline),
+    val=dict(pipeline=test_pipeline),
+    test=dict(pipeline=test_pipeline))
+
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=4e-5)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(
+    policy='poly',
+    power=0.9,
+    by_epoch=False,
+)
+# runtime settings
+# total_epochs = 1000
+total_iters = 80000
+evaluation = dict(interval=8000, metric='mIoU')
+checkpoint_config = dict(interval=8000)
+
diff --git a/configs_unify/fcnnet/fcn_r101_40ki_cityscapes.py b/configs_unify/fcnnet/fcn_r101_40ki_cityscapes.py
new file mode 100644
index 000000000..e0694eb81
--- /dev/null
+++ b/configs_unify/fcnnet/fcn_r101_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './fcn_r50_40ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/fcnnet/fcn_r101_60ki_cityscapes.py b/configs_unify/fcnnet/fcn_r101_60ki_cityscapes.py
new file mode 100644
index 000000000..bb92bb56e
--- /dev/null
+++ b/configs_unify/fcnnet/fcn_r101_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './fcn_r50_60ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/fcnnet/fcn_r50_40ki_cityscapes.py b/configs_unify/fcnnet/fcn_r50_40ki_cityscapes.py
new file mode 100644
index 000000000..3db17d350
--- /dev/null
+++ b/configs_unify/fcnnet/fcn_r50_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/fcn_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py'
+]
diff --git a/configs_unify/fcnnet/fcn_r50_60ki_cityscapes.py b/configs_unify/fcnnet/fcn_r50_60ki_cityscapes.py
new file mode 100644
index 000000000..ac04a423c
--- /dev/null
+++ b/configs_unify/fcnnet/fcn_r50_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/fcn_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py'
+]
diff --git a/configs_unify/gcnet/gc_r101_40ki_cityscapes.py b/configs_unify/gcnet/gc_r101_40ki_cityscapes.py
new file mode 100644
index 000000000..28c01cce7
--- /dev/null
+++ b/configs_unify/gcnet/gc_r101_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './gc_r50_40ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/gcnet/gc_r101_60ki_cityscapes.py b/configs_unify/gcnet/gc_r101_60ki_cityscapes.py
new file mode 100644
index 000000000..b1ebffa6a
--- /dev/null
+++ b/configs_unify/gcnet/gc_r101_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './gc_r50_60ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/gcnet/gc_r50_40ki_cityscapes.py b/configs_unify/gcnet/gc_r50_40ki_cityscapes.py
new file mode 100644
index 000000000..4ad82b30d
--- /dev/null
+++ b/configs_unify/gcnet/gc_r50_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/gc_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py'
+]
diff --git a/configs_unify/gcnet/gc_r50_60ki_cityscapes.py b/configs_unify/gcnet/gc_r50_60ki_cityscapes.py
new file mode 100644
index 000000000..dea24b0a0
--- /dev/null
+++ b/configs_unify/gcnet/gc_r50_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/gc_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py'
+]
diff --git a/configs_unify/hrnet/fcn_hr18_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr18_4x3_484e_cityscapes.py
new file mode 100644
index 000000000..5742c52ff
--- /dev/null
+++ b/configs_unify/hrnet/fcn_hr18_4x3_484e_cityscapes.py
@@ -0,0 +1,63 @@
+_base_ = [
+    '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py'
+]
+model = dict(
+    decode_head=dict(classes_weight=[
+        0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786,
+        1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529,
+        1.0507
+    ]))
+crop_size = (512, 1024)
+cudnn_benchmark = True
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='whole')
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_seg=True),
+    dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomCrop', crop_size=crop_size),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(2048, 1024),
+        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
+data = dict(
+    samples_per_gpu=3,
+    workers_per_gpu=3,
+    train=dict(pipeline=train_pipeline),
+    val=dict(pipeline=test_pipeline),
+    test=dict(pipeline=test_pipeline))
+
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(
+    policy='poly',
+    power=0.9,
+    by_epoch=False,
+)
+# runtime settings
+total_epochs = 484
+evaluation = dict(interval=11, metric='mIoU')
+checkpoint_config = dict(interval=11)
diff --git a/configs_unify/hrnet/fcn_hr18s_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr18s_4x3_484e_cityscapes.py
new file mode 100644
index 000000000..6dd0fbde1
--- /dev/null
+++ b/configs_unify/hrnet/fcn_hr18s_4x3_484e_cityscapes.py
@@ -0,0 +1,9 @@
+_base_ = './fcn_hr18_4x3_484e_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/hrnetv2_w18_small-b5a04e21.pth',
+    backbone=dict(
+        extra=dict(
+            stage1=dict(num_blocks=(2, )),
+            stage2=dict(num_blocks=(2, 2)),
+            stage3=dict(num_modules=3, num_blocks=(2, 2, 2)),
+            stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2)))))
diff --git a/configs_unify/hrnet/fcn_hr32_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr32_4x3_484e_cityscapes.py
new file mode 100644
index 000000000..3779039c2
--- /dev/null
+++ b/configs_unify/hrnet/fcn_hr32_4x3_484e_cityscapes.py
@@ -0,0 +1,10 @@
+_base_ = './fcn_hr18_4x3_484e_cityscapes.py'
+model = dict(
+    pretrained='open-mmlab://msra/hrnetv2_w32',
+    backbone=dict(
+        extra=dict(
+            stage2=dict(num_channels=(32, 64)),
+            stage3=dict(num_channels=(32, 64, 128)),
+            stage4=dict(num_channels=(32, 64, 128, 256)))),
+    decode_head=dict(
+        in_channels=[32, 64, 128, 256], channels=sum([32, 64, 128, 256])))
diff --git a/configs_unify/hrnet/fcn_hr40_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr40_4x3_484e_cityscapes.py
new file mode 100644
index 000000000..d96c95923
--- /dev/null
+++ b/configs_unify/hrnet/fcn_hr40_4x3_484e_cityscapes.py
@@ -0,0 +1,10 @@
+_base_ = './fcn_hr18_4x3_484e_cityscapes.py'
+model = dict(
+    pretrained='open-mmlab://msra/hrnetv2_w40',
+    backbone=dict(
+        extra=dict(
+            stage2=dict(num_channels=(40, 80)),
+            stage3=dict(num_channels=(40, 80, 160)),
+            stage4=dict(num_channels=(40, 80, 160, 320)))),
+    decode_head=dict(
+        in_channels=[40, 80, 160, 320], channels=sum([40, 80, 160, 320])))
diff --git a/configs_unify/hrnet/fcn_hr48_484e_pascal_context.py b/configs_unify/hrnet/fcn_hr48_484e_pascal_context.py
new file mode 100644
index 000000000..f38ca7ed2
--- /dev/null
+++ b/configs_unify/hrnet/fcn_hr48_484e_pascal_context.py
@@ -0,0 +1,68 @@
+_base_ = [
+    '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context.py',
+    '../_base_/default_runtime.py'
+]
+model = dict(
+    pretrained='pretrain_model/hrnetv2_w48-d2186c55.pth',
+    backbone=dict(
+        extra=dict(
+            stage2=dict(num_channels=(48, 96)),
+            stage3=dict(num_channels=(48, 96, 192)),
+            stage4=dict(num_channels=(48, 96, 192, 384)))),
+    decode_head=dict(
+        num_classes=60,
+        in_channels=[48, 96, 192, 384],
+        channels=sum([48, 96, 192, 384])))
+crop_size = (480, 480)
+cudnn_benchmark = True
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='whole')
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_seg=True),
+    dict(type='Resize', img_scale=(520, 520), ratio_range=(0.5, 2.0)),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomCrop', crop_size=crop_size),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(480, 480),
+        img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
+        flip=True,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
+data = dict(
+    samples_per_gpu=4,
+    workers_per_gpu=4,
+    train=dict(pipeline=train_pipeline),
+    val=dict(pipeline=test_pipeline),
+    test=dict(pipeline=test_pipeline))
+
+# optimizer
+optimizer = dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=0.0001)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(
+    policy='poly',
+    power=0.9,
+    by_epoch=False,
+)
+# runtime settings
+total_epochs = 200
+evaluation = dict(interval=11, metric='mIoU')
+checkpoint_config = dict(interval=10)
diff --git a/configs_unify/hrnet/fcn_hr48_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr48_4x3_484e_cityscapes.py
new file mode 100644
index 000000000..a16bcd60d
--- /dev/null
+++ b/configs_unify/hrnet/fcn_hr48_4x3_484e_cityscapes.py
@@ -0,0 +1,10 @@
+_base_ = './fcn_hr18_4x3_484e_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/hrnetv2_w48-d2186c55.pth',
+    backbone=dict(
+        extra=dict(
+            stage2=dict(num_channels=(48, 96)),
+            stage3=dict(num_channels=(48, 96, 192)),
+            stage4=dict(num_channels=(48, 96, 192, 384)))),
+    decode_head=dict(
+        in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
diff --git a/configs_unify/hrnet/fcn_hr48_ohem_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr48_ohem_4x3_484e_cityscapes.py
new file mode 100644
index 000000000..c5efa5d38
--- /dev/null
+++ b/configs_unify/hrnet/fcn_hr48_ohem_4x3_484e_cityscapes.py
@@ -0,0 +1,12 @@
+_base_ = './fcn_hr18_4x3_484e_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/hrnetv2_w48-d2186c55.pth',
+    backbone=dict(
+        extra=dict(
+            stage2=dict(num_channels=(48, 96)),
+            stage3=dict(num_channels=(48, 96, 192)),
+            stage4=dict(num_channels=(48, 96, 192, 384)))),
+    decode_head=dict(
+        in_channels=[48, 96, 192, 384],
+        channels=sum([48, 96, 192, 384]),
+        sampler=dict(type='OHEMSegSampler', thresh=0.9, min_kept=131072)))
diff --git a/configs_unify/nlnet/nl_r101_40ki_cityscapes.py b/configs_unify/nlnet/nl_r101_40ki_cityscapes.py
new file mode 100644
index 000000000..378978472
--- /dev/null
+++ b/configs_unify/nlnet/nl_r101_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './nl_r50_40ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/nlnet/nl_r101_60ki_cityscapes.py b/configs_unify/nlnet/nl_r101_60ki_cityscapes.py
new file mode 100644
index 000000000..cacf74a66
--- /dev/null
+++ b/configs_unify/nlnet/nl_r101_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './nl_r50_60ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/nlnet/nl_r50_40ki_cityscapes.py b/configs_unify/nlnet/nl_r50_40ki_cityscapes.py
new file mode 100644
index 000000000..fea99f151
--- /dev/null
+++ b/configs_unify/nlnet/nl_r50_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/nl_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py'
+]
diff --git a/configs_unify/nlnet/nl_r50_60ki_cityscapes.py b/configs_unify/nlnet/nl_r50_60ki_cityscapes.py
new file mode 100644
index 000000000..528532d6a
--- /dev/null
+++ b/configs_unify/nlnet/nl_r50_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/nl_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py'
+]
diff --git a/configs_unify/ocrnet/ocr_hr18_4x3_484e_cityscapes.py b/configs_unify/ocrnet/ocr_hr18_4x3_484e_cityscapes.py
new file mode 100644
index 000000000..73b57f48b
--- /dev/null
+++ b/configs_unify/ocrnet/ocr_hr18_4x3_484e_cityscapes.py
@@ -0,0 +1,57 @@
+_base_ = [
+    '../_base_/models/ocr_hr18.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py'
+]
+crop_size = (512, 1024)
+cudnn_benchmark = True
+# model training and testing settings
+train_cfg = dict()
+test_cfg = dict(mode='whole')
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_seg=True),
+    dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomCrop', crop_size=crop_size),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(2048, 1024),
+        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
+data = dict(
+    samples_per_gpu=3,
+    workers_per_gpu=3,
+    train=dict(pipeline=train_pipeline),
+    val=dict(pipeline=test_pipeline),
+    test=dict(pipeline=test_pipeline))
+
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(
+    policy='poly',
+    power=0.9,
+    by_epoch=False,
+)
+# runtime settings
+total_epochs = 484
+evaluation = dict(interval=11, metric='mIoU')
+checkpoint_config = dict(interval=11)
diff --git a/configs_unify/ocrnet/ocr_hr18s_4x3_484e_cityscapes.py b/configs_unify/ocrnet/ocr_hr18s_4x3_484e_cityscapes.py
new file mode 100644
index 000000000..e23bd27fb
--- /dev/null
+++ b/configs_unify/ocrnet/ocr_hr18s_4x3_484e_cityscapes.py
@@ -0,0 +1,10 @@
+_base_ = './ocr_hr18_4x3_484e_cityscapes.py'
+norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
+model = dict(
+    pretrained='pretrain_model/hrnetv2_w18_small-b5a04e21.pth',
+    backbone=dict(
+        extra=dict(
+            stage1=dict(num_blocks=(2, )),
+            stage2=dict(num_blocks=(2, 2)),
+            stage3=dict(num_modules=3, num_blocks=(2, 2, 2)),
+            stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2)))))
diff --git a/configs_unify/ocrnet/ocr_hr48_4x3_484e_cityscapes.py b/configs_unify/ocrnet/ocr_hr48_4x3_484e_cityscapes.py
new file mode 100644
index 000000000..6230679b5
--- /dev/null
+++ b/configs_unify/ocrnet/ocr_hr48_4x3_484e_cityscapes.py
@@ -0,0 +1,39 @@
+_base_ = './ocr_hr18_4x3_484e_cityscapes.py'
+norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
+model = dict(
+    pretrained='pretrain_model/hrnetv2_w48-d2186c55.pth',
+    backbone=dict(
+        extra=dict(
+            stage2=dict(num_channels=(48, 96)),
+            stage3=dict(num_channels=(48, 96, 192)),
+            stage4=dict(num_channels=(48, 96, 192, 384)))),
+    decode_head=[
+        dict(
+            type='FCNHead',
+            in_channels=[48, 96, 192, 384],
+            channels=sum([48, 96, 192, 384]),
+            input_transform='resize_concat',
+            in_index=(0, 1, 2, 3),
+            kernel_size=1,
+            num_convs=1,
+            norm_cfg=norm_cfg,
+            concat_input=False,
+            drop_out_ratio=-1,
+            num_classes=19,
+            align_corners=False,
+            loss_decode=dict(
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+        dict(
+            type='OCRHead',
+            in_channels=[48, 96, 192, 384],
+            channels=512,
+            ocr_channels=256,
+            input_transform='resize_concat',
+            in_index=(0, 1, 2, 3),
+            norm_cfg=norm_cfg,
+            drop_out_ratio=-1,
+            num_classes=19,
+            align_corners=False,
+            loss_decode=dict(
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
+    ])
diff --git a/configs_unify/ocrnet/ocr_hr48_ohem_4x3_484e_cityscapes.py b/configs_unify/ocrnet/ocr_hr48_ohem_4x3_484e_cityscapes.py
new file mode 100644
index 000000000..477fbf70b
--- /dev/null
+++ b/configs_unify/ocrnet/ocr_hr48_ohem_4x3_484e_cityscapes.py
@@ -0,0 +1,40 @@
+_base_ = './ocr_hr18_4x3_484e_cityscapes.py'
+norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
+model = dict(
+    pretrained='pretrain_model/hrnetv2_w48-d2186c55.pth',
+    backbone=dict(
+        extra=dict(
+            stage2=dict(num_channels=(48, 96)),
+            stage3=dict(num_channels=(48, 96, 192)),
+            stage4=dict(num_channels=(48, 96, 192, 384)))),
+    decode_head=[
+        dict(
+            type='FCNHead',
+            in_channels=[48, 96, 192, 384],
+            channels=sum([48, 96, 192, 384]),
+            input_transform='resize_concat',
+            in_index=(0, 1, 2, 3),
+            kernel_size=1,
+            num_convs=1,
+            norm_cfg=norm_cfg,
+            concat_input=False,
+            drop_out_ratio=-1,
+            num_classes=19,
+            align_corners=False,
+            loss_decode=dict(
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+        dict(
+            type='OCRHead',
+            in_channels=[48, 96, 192, 384],
+            channels=512,
+            ocr_channels=256,
+            input_transform='resize_concat',
+            in_index=(0, 1, 2, 3),
+            norm_cfg=norm_cfg,
+            drop_out_ratio=-1,
+            num_classes=19,
+            align_corners=False,
+            loss_decode=dict(
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            sampler=dict(type='OHEMSegSampler', thresh=0.9, min_kept=131072))
+    ])
diff --git a/configs_unify/psanet/psa_r101_40ki_cityscapes.py b/configs_unify/psanet/psa_r101_40ki_cityscapes.py
new file mode 100644
index 000000000..b146cbe1d
--- /dev/null
+++ b/configs_unify/psanet/psa_r101_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './psa_r50_40ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/psanet/psa_r101_60ki_cityscapes.py b/configs_unify/psanet/psa_r101_60ki_cityscapes.py
new file mode 100644
index 000000000..66b2ced0c
--- /dev/null
+++ b/configs_unify/psanet/psa_r101_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './psa_r50_60ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/psanet/psa_r50_40ki_cityscapes.py b/configs_unify/psanet/psa_r50_40ki_cityscapes.py
new file mode 100644
index 000000000..6a4acc627
--- /dev/null
+++ b/configs_unify/psanet/psa_r50_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/psa_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py'
+]
diff --git a/configs_unify/psanet/psa_r50_60ki_cityscapes.py b/configs_unify/psanet/psa_r50_60ki_cityscapes.py
new file mode 100644
index 000000000..786554fab
--- /dev/null
+++ b/configs_unify/psanet/psa_r50_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/psa_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py'
+]
diff --git a/configs_unify/pspnet/psp_r101_40ki_cityscapes.py b/configs_unify/pspnet/psp_r101_40ki_cityscapes.py
new file mode 100644
index 000000000..d37a44173
--- /dev/null
+++ b/configs_unify/pspnet/psp_r101_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './psp_r50_40ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/pspnet/psp_r101_60ki_cityscapes.py b/configs_unify/pspnet/psp_r101_60ki_cityscapes.py
new file mode 100644
index 000000000..0f9fcaad3
--- /dev/null
+++ b/configs_unify/pspnet/psp_r101_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './psp_r50_60ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/pspnet/psp_r50_40ki_cityscapes.py b/configs_unify/pspnet/psp_r50_40ki_cityscapes.py
new file mode 100644
index 000000000..df5241e32
--- /dev/null
+++ b/configs_unify/pspnet/psp_r50_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/psp_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py'
+]
diff --git a/configs_unify/pspnet/psp_r50_60ki_cityscapes.py b/configs_unify/pspnet/psp_r50_60ki_cityscapes.py
new file mode 100644
index 000000000..688c12fb9
--- /dev/null
+++ b/configs_unify/pspnet/psp_r50_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/psp_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py'
+]
diff --git a/configs_unify/upernet/uper_r101_40ki_cityscapes.py b/configs_unify/upernet/uper_r101_40ki_cityscapes.py
new file mode 100644
index 000000000..3ee073c59
--- /dev/null
+++ b/configs_unify/upernet/uper_r101_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './uper_r50_40ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/upernet/uper_r101_60ki_cityscapes.py b/configs_unify/upernet/uper_r101_60ki_cityscapes.py
new file mode 100644
index 000000000..fdc2987b4
--- /dev/null
+++ b/configs_unify/upernet/uper_r101_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = './uper_r50_60ki_cityscapes.py'
+model = dict(
+    pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth',
+    backbone=dict(depth=101))
diff --git a/configs_unify/upernet/uper_r50_40ki_cityscapes.py b/configs_unify/upernet/uper_r50_40ki_cityscapes.py
new file mode 100644
index 000000000..c955aeb72
--- /dev/null
+++ b/configs_unify/upernet/uper_r50_40ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/uper_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py'
+]
diff --git a/configs_unify/upernet/uper_r50_60ki_cityscapes.py b/configs_unify/upernet/uper_r50_60ki_cityscapes.py
new file mode 100644
index 000000000..fb1fde1f9
--- /dev/null
+++ b/configs_unify/upernet/uper_r50_60ki_cityscapes.py
@@ -0,0 +1,4 @@
+_base_ = [
+    '../_base_/models/uper_r50.py', '../_base_/datasets/cityscapes.py',
+    '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py'
+]