diff --git a/configs_unify/_base_/datasets/ade.py b/configs_unify/_base_/datasets/ade.py deleted file mode 100644 index 5ea2aac05..000000000 --- a/configs_unify/_base_/datasets/ade.py +++ /dev/null @@ -1,52 +0,0 @@ -# dataset settings -dataset_type = 'ADEDataset' -data_root = 'data/ade/ADEChallengeData2016' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (521, 521) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_seg=True), - dict(type='Resize', img_scale=(2049, 521), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2049, 521), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - data_root=data_root, - img_dir='images/training', - ann_dir='annotations/training', - train_pipeline=train_pipeline), - val=dict( - type=dataset_type, - data_root=data_root, - img_dir='images/validation', - ann_dir='annotations/validation', - test_pipeline=test_pipeline), - test=dict( - type=dataset_type, - data_root=data_root, - img_dir='images/validation', - ann_dir='annotations/validation', - test_pipeline=test_pipeline)) diff --git a/configs_unify/_base_/datasets/cityscapes.py b/configs_unify/_base_/datasets/cityscapes.py deleted file mode 100644 index f598a8b44..000000000 --- a/configs_unify/_base_/datasets/cityscapes.py +++ /dev/null @@ -1,53 +0,0 @@ -# dataset settings -dataset_type = 'CityscapesDataset' -data_root = 'data/cityscapes/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (769, 769) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2049, 1025), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - data_root=data_root, - img_dir='leftImg8bit/train', - ann_dir='gtFine/train', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - data_root=data_root, - img_dir='leftImg8bit/val', - ann_dir='gtFine/val', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - data_root=data_root, - img_dir='leftImg8bit/val', - ann_dir='gtFine/val', - pipeline=test_pipeline)) diff --git a/configs_unify/_base_/datasets/pascal_context.py b/configs_unify/_base_/datasets/pascal_context.py deleted file mode 100644 index 1c1870012..000000000 --- a/configs_unify/_base_/datasets/pascal_context.py +++ /dev/null @@ -1,24 +0,0 @@ -# dataset settings -dataset_type = 'PascalContextDataset' -data_root = 'data/VOCdevkit/VOC2010' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClassContext', - split='ImageSets/SegmentationContext/train.txt'), - val=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClassContext', - split='ImageSets/SegmentationContext/val.txt'), - test=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClassContext', - split='ImageSets/SegmentationContext/val.txt')) diff --git a/configs_unify/_base_/datasets/pascal_voc12.py b/configs_unify/_base_/datasets/pascal_voc12.py deleted file mode 100644 index 8c2442330..000000000 --- a/configs_unify/_base_/datasets/pascal_voc12.py +++ /dev/null @@ -1,55 +0,0 @@ -# dataset settings -dataset_type = 'PascalVOCDataset' -data_root = 'data/VOCdevkit/VOC2012' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (521, 521) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_seg=True), - dict(type='Resize', img_scale=(2049, 521), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2049, 521), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/train.txt', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/val.txt', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/val.txt', - pipeline=test_pipeline)) diff --git a/configs_unify/_base_/datasets/pascal_voc12_aug.py b/configs_unify/_base_/datasets/pascal_voc12_aug.py deleted file mode 100644 index 3f23b6717..000000000 --- a/configs_unify/_base_/datasets/pascal_voc12_aug.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './pascal_voc12.py' -# dataset settings -data = dict( - train=dict( - ann_dir=['SegmentationClass', 'SegmentationClassAug'], - split=[ - 'ImageSets/Segmentation/train.txt', - 'ImageSets/Segmentation/aug.txt' - ])) diff --git a/configs_unify/_base_/default_runtime.py b/configs_unify/_base_/default_runtime.py deleted file mode 100644 index a8434e3f4..000000000 --- a/configs_unify/_base_/default_runtime.py +++ /dev/null @@ -1,14 +0,0 @@ -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -cudnn_benchmark = True diff --git a/configs_unify/_base_/models/ann_r50.py b/configs_unify/_base_/models/ann_r50.py deleted file mode 100644 index 812c29880..000000000 --- a/configs_unify/_base_/models/ann_r50.py +++ /dev/null @@ -1,44 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c-66047269.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='ANNHead', - in_channels=[1024, 2048], - in_index=[2, 3], - channels=512, - project_channels=256, - query_scales=(1, ), - key_pool_scales=(1, 3, 6, 8), - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs_unify/_base_/models/cc_r50.py b/configs_unify/_base_/models/cc_r50.py deleted file mode 100644 index 5335bf8f4..000000000 --- a/configs_unify/_base_/models/cc_r50.py +++ /dev/null @@ -1,42 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c-66047269.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='CCHead', - in_channels=2048, - in_index=3, - channels=512, - recurrence=2, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs_unify/_base_/models/da_r50.py b/configs_unify/_base_/models/da_r50.py deleted file mode 100644 index 4d5cc7fe1..000000000 --- a/configs_unify/_base_/models/da_r50.py +++ /dev/null @@ -1,42 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c-66047269.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='DAHead', - in_channels=2048, - in_index=3, - channels=512, - pam_channels=64, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs_unify/_base_/models/deeplabv3_r50.py b/configs_unify/_base_/models/deeplabv3_r50.py deleted file mode 100644 index 7941824a8..000000000 --- a/configs_unify/_base_/models/deeplabv3_r50.py +++ /dev/null @@ -1,42 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c-66047269.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='ASPPHead', - in_channels=2048, - in_index=3, - channels=512, - dilations=(1, 12, 24, 36), - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs_unify/_base_/models/deeplabv3plus_r50.py b/configs_unify/_base_/models/deeplabv3plus_r50.py deleted file mode 100644 index 8dc04ba9c..000000000 --- a/configs_unify/_base_/models/deeplabv3plus_r50.py +++ /dev/null @@ -1,44 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c-66047269.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='SepASPPHead', - in_channels=2048, - in_index=3, - channels=512, - dilations=(1, 12, 24, 36), - c1_in_channels=256, - c1_channels=48, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs_unify/_base_/models/fast_scnn.py b/configs_unify/_base_/models/fast_scnn.py deleted file mode 100644 index 6001c18fe..000000000 --- a/configs_unify/_base_/models/fast_scnn.py +++ /dev/null @@ -1,57 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) -model = dict( - type='EncoderDecoder', - backbone=dict( - type='FastSCNN', - downsample_dw_channels1=32, - downsample_dw_channels2=48, - global_in_channels=64, - global_block_channels=(64, 96, 128), - global_out_channels=128, - higher_in_channels=64, - lower_in_channels=128, - fusion_out_channels=128, - scale_factor=4, - out_indices=(0, 1, 2), - norm_cfg=norm_cfg, - align_corners=False), - decode_head=dict( - type='SepFCNHead', - in_channels=128, - channels=128, - concat_input=False, - num_classes=19, - in_index=-1, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.)), - auxiliary_head=[ - dict( - type='FCNHead', - in_channels=128, - channels=32, - num_convs=1, - num_classes=19, - in_index=-2, - norm_cfg=norm_cfg, - concat_input=False, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='FCNHead', - in_channels=64, - channels=32, - num_convs=1, - num_classes=19, - in_index=-3, - norm_cfg=norm_cfg, - concat_input=False, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - ]) - -total_iters = 10000 \ No newline at end of file diff --git a/configs_unify/_base_/models/fcn_hr18.py b/configs_unify/_base_/models/fcn_hr18.py deleted file mode 100644 index 40fa92ea9..000000000 --- a/configs_unify/_base_/models/fcn_hr18.py +++ /dev/null @@ -1,49 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://msra/hrnetv2_w18', - backbone=dict( - type='HRNet', - norm_cfg=norm_cfg, - norm_eval=False, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144)))), - decode_head=dict( - type='FCNHead', - in_channels=[18, 36, 72, 144], - in_index=(0, 1, 2, 3), - channels=sum([18, 36, 72, 144]), - input_transform='resize_concat', - kernel_size=1, - num_convs=1, - concat_input=False, - drop_out_ratio=-1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) diff --git a/configs_unify/_base_/models/fcn_r50.py b/configs_unify/_base_/models/fcn_r50.py deleted file mode 100644 index 3db773a1f..000000000 --- a/configs_unify/_base_/models/fcn_r50.py +++ /dev/null @@ -1,43 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c-66047269.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='FCNHead', - in_channels=2048, - in_index=3, - channels=512, - num_convs=2, - concat_input=True, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs_unify/_base_/models/gc_r50.py b/configs_unify/_base_/models/gc_r50.py deleted file mode 100644 index 0388f77b6..000000000 --- a/configs_unify/_base_/models/gc_r50.py +++ /dev/null @@ -1,44 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c-66047269.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='GCHead', - in_channels=2048, - in_index=3, - channels=512, - ratio=1 / 4., - pooling_type='att', - fusion_types=('channel_add', ), - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs_unify/_base_/models/nl_r50.py b/configs_unify/_base_/models/nl_r50.py deleted file mode 100644 index 26ad02f20..000000000 --- a/configs_unify/_base_/models/nl_r50.py +++ /dev/null @@ -1,44 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c-66047269.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='NLHead', - in_channels=2048, - in_index=3, - channels=512, - drop_out_ratio=0.1, - reduction=2, - use_scale=True, - mode='embedded_gaussian', - norm_cfg=norm_cfg, - num_classes=19, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs_unify/_base_/models/ocr_hr18.py b/configs_unify/_base_/models/ocr_hr18.py deleted file mode 100644 index b3885a78d..000000000 --- a/configs_unify/_base_/models/ocr_hr18.py +++ /dev/null @@ -1,65 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) -model = dict( - type='CascadeEncoderDecoder', - num_stages=2, - pretrained='open-mmlab://msra/hrnetv2_w18', - backbone=dict( - type='HRNet', - norm_cfg=norm_cfg, - norm_eval=False, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[18, 36, 72, 144], - channels=sum([18, 36, 72, 144]), - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - kernel_size=1, - num_convs=1, - concat_input=False, - drop_out_ratio=-1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[18, 36, 72, 144], - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - channels=512, - ocr_channels=256, - drop_out_ratio=-1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - ]) diff --git a/configs_unify/_base_/models/psa_r50.py b/configs_unify/_base_/models/psa_r50.py deleted file mode 100644 index 4393b8d37..000000000 --- a/configs_unify/_base_/models/psa_r50.py +++ /dev/null @@ -1,47 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c-66047269.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='PSAHead', - in_channels=2048, - in_index=3, - channels=512, - mask_size=(97, 97), - psa_type='bi-direction', - compact=False, - shrink_factor=2, - normalization_factor=1.0, - psa_softmax=True, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs_unify/_base_/models/psp_r50.py b/configs_unify/_base_/models/psp_r50.py deleted file mode 100644 index 7a91e91bf..000000000 --- a/configs_unify/_base_/models/psp_r50.py +++ /dev/null @@ -1,42 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c-66047269.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='PSPHead', - in_channels=2048, - in_index=3, - channels=512, - pool_scales=(1, 2, 3, 6), - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs_unify/_base_/models/uper_r50.py b/configs_unify/_base_/models/uper_r50.py deleted file mode 100644 index 006ccf7b7..000000000 --- a/configs_unify/_base_/models/uper_r50.py +++ /dev/null @@ -1,42 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c-66047269.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 1, 1), - strides=(1, 2, 2, 2), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='UPerHead', - in_channels=[256, 512, 1024, 2048], - in_index=[0, 1, 2, 3], - pool_scales=(1, 2, 3, 6), - channels=512, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs_unify/_base_/schedules/schedule_220e.py b/configs_unify/_base_/schedules/schedule_220e.py deleted file mode 100644 index e4db9a0f6..000000000 --- a/configs_unify/_base_/schedules/schedule_220e.py +++ /dev/null @@ -1,14 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -total_epochs = 220 -checkpoint_config = dict(interval=20) -evaluation = dict(interval=20, metric='mIoU') -runner_type = 'epoch' diff --git a/configs_unify/_base_/schedules/schedule_40ki.py b/configs_unify/_base_/schedules/schedule_40ki.py deleted file mode 100644 index 0a79250f5..000000000 --- a/configs_unify/_base_/schedules/schedule_40ki.py +++ /dev/null @@ -1,14 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -total_iters = 40000 -checkpoint_config = dict(by_epoch=False, interval=4000) -evaluation = dict(interval=4000, metric='mIoU') -runner_type = 'iter' diff --git a/configs_unify/_base_/schedules/schedule_60ki.py b/configs_unify/_base_/schedules/schedule_60ki.py deleted file mode 100644 index a3c29e304..000000000 --- a/configs_unify/_base_/schedules/schedule_60ki.py +++ /dev/null @@ -1,14 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -total_iters = 60000 -checkpoint_config = dict(by_epoch=False, interval=4000) -evaluation = dict(interval=4000, metric='mIoU') -runner_type = 'iter' diff --git a/configs_unify/annnet/ann_r101_40ki_cityscapes.py b/configs_unify/annnet/ann_r101_40ki_cityscapes.py deleted file mode 100644 index 14b26f5a6..000000000 --- a/configs_unify/annnet/ann_r101_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './ann_r50_40ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/annnet/ann_r101_60ki_cityscapes.py b/configs_unify/annnet/ann_r101_60ki_cityscapes.py deleted file mode 100644 index 069e4d4d1..000000000 --- a/configs_unify/annnet/ann_r101_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './ann_r50_60ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/annnet/ann_r50_40ki_cityscapes.py b/configs_unify/annnet/ann_r50_40ki_cityscapes.py deleted file mode 100644 index a29fdecb1..000000000 --- a/configs_unify/annnet/ann_r50_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py' -] diff --git a/configs_unify/annnet/ann_r50_60ki_cityscapes.py b/configs_unify/annnet/ann_r50_60ki_cityscapes.py deleted file mode 100644 index 101b16829..000000000 --- a/configs_unify/annnet/ann_r50_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py' -] diff --git a/configs_unify/ccnet/cc_r101_40ki_cityscapes.py b/configs_unify/ccnet/cc_r101_40ki_cityscapes.py deleted file mode 100644 index 8b32314bb..000000000 --- a/configs_unify/ccnet/cc_r101_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './cc_r50_40ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/ccnet/cc_r101_60ki_cityscapes.py b/configs_unify/ccnet/cc_r101_60ki_cityscapes.py deleted file mode 100644 index 78d544d19..000000000 --- a/configs_unify/ccnet/cc_r101_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './cc_r50_60ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/ccnet/cc_r50_40ki_cityscapes.py b/configs_unify/ccnet/cc_r50_40ki_cityscapes.py deleted file mode 100644 index 19569f68c..000000000 --- a/configs_unify/ccnet/cc_r50_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/cc_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py' -] diff --git a/configs_unify/ccnet/cc_r50_60ki_cityscapes.py b/configs_unify/ccnet/cc_r50_60ki_cityscapes.py deleted file mode 100644 index e15e19b9d..000000000 --- a/configs_unify/ccnet/cc_r50_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/cc_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py' -] diff --git a/configs_unify/danet/da_r101_40ki_cityscapes.py b/configs_unify/danet/da_r101_40ki_cityscapes.py deleted file mode 100644 index 63c00d08f..000000000 --- a/configs_unify/danet/da_r101_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './da_r50_40ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/danet/da_r101_60ki_cityscapes.py b/configs_unify/danet/da_r101_60ki_cityscapes.py deleted file mode 100644 index ccca5262e..000000000 --- a/configs_unify/danet/da_r101_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './da_r50_60ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/danet/da_r50_40ki_cityscapes.py b/configs_unify/danet/da_r50_40ki_cityscapes.py deleted file mode 100644 index 945df58e1..000000000 --- a/configs_unify/danet/da_r50_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/da_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py' -] diff --git a/configs_unify/danet/da_r50_60ki_cityscapes.py b/configs_unify/danet/da_r50_60ki_cityscapes.py deleted file mode 100644 index 4f5b9258d..000000000 --- a/configs_unify/danet/da_r50_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/da_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py' -] diff --git a/configs_unify/deeplabv3/deeplabv3_r101_40ki_cityscapes.py b/configs_unify/deeplabv3/deeplabv3_r101_40ki_cityscapes.py deleted file mode 100644 index 5bc38eb91..000000000 --- a/configs_unify/deeplabv3/deeplabv3_r101_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './deeplabv3_r50_40ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/deeplabv3/deeplabv3_r101_60ki_cityscapes.py b/configs_unify/deeplabv3/deeplabv3_r101_60ki_cityscapes.py deleted file mode 100644 index 1adef8f1b..000000000 --- a/configs_unify/deeplabv3/deeplabv3_r101_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './deeplabv3_r50_60ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/deeplabv3/deeplabv3_r50_40ki_cityscapes.py b/configs_unify/deeplabv3/deeplabv3_r50_40ki_cityscapes.py deleted file mode 100644 index 851af9ee8..000000000 --- a/configs_unify/deeplabv3/deeplabv3_r50_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py' -] diff --git a/configs_unify/deeplabv3/deeplabv3_r50_60ki_cityscapes.py b/configs_unify/deeplabv3/deeplabv3_r50_60ki_cityscapes.py deleted file mode 100644 index 23b398522..000000000 --- a/configs_unify/deeplabv3/deeplabv3_r50_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py' -] diff --git a/configs_unify/deeplabv3plus/deeplabv3plus_r101_40ki_cityscapes.py b/configs_unify/deeplabv3plus/deeplabv3plus_r101_40ki_cityscapes.py deleted file mode 100644 index 0ae1c3490..000000000 --- a/configs_unify/deeplabv3plus/deeplabv3plus_r101_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './deeplabv3plus_r50_40ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/deeplabv3plus/deeplabv3plus_r101_60ki_cityscapes.py b/configs_unify/deeplabv3plus/deeplabv3plus_r101_60ki_cityscapes.py deleted file mode 100644 index 268742b14..000000000 --- a/configs_unify/deeplabv3plus/deeplabv3plus_r101_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './deeplabv3plus_r50_60ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/deeplabv3plus/deeplabv3plus_r50_40ki_cityscapes.py b/configs_unify/deeplabv3plus/deeplabv3plus_r50_40ki_cityscapes.py deleted file mode 100644 index eaec7a827..000000000 --- a/configs_unify/deeplabv3plus/deeplabv3plus_r50_40ki_cityscapes.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3plus_r50.py', - '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40ki.py' -] diff --git a/configs_unify/deeplabv3plus/deeplabv3plus_r50_60ki_cityscapes.py b/configs_unify/deeplabv3plus/deeplabv3plus_r50_60ki_cityscapes.py deleted file mode 100644 index 3e297ff03..000000000 --- a/configs_unify/deeplabv3plus/deeplabv3plus_r50_60ki_cityscapes.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3plus_r50.py', - '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_60ki.py' -] diff --git a/configs_unify/fastscnn/fast_scnn_4x3_1000e_cityscapes.py b/configs_unify/fastscnn/fast_scnn_4x3_1000e_cityscapes.py deleted file mode 100644 index 7583cb6f2..000000000 --- a/configs_unify/fastscnn/fast_scnn_4x3_1000e_cityscapes.py +++ /dev/null @@ -1,62 +0,0 @@ -_base_ = [ - '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py' -] -crop_size = (512, 1024) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -# Here: What is parameter 'with_seg'? -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations'), # with_seg=True - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=3, - workers_per_gpu=3, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.045, momentum=0.9, weight_decay=4e-5) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -total_epochs = 247000 -evaluation = dict(interval=1000, metric='mIoU') -checkpoint_config = dict(interval=1000) - -# log config: log by iter. -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) diff --git a/configs_unify/fastscnn/fast_scnn_4x3_450k_lr0.08_cityscapes.py b/configs_unify/fastscnn/fast_scnn_4x3_450k_lr0.08_cityscapes.py deleted file mode 100644 index f562f0d76..000000000 --- a/configs_unify/fastscnn/fast_scnn_4x3_450k_lr0.08_cityscapes.py +++ /dev/null @@ -1,64 +0,0 @@ -_base_ = [ - '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py' -] -crop_size = (512, 1024) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -# Here: What is parameter 'with_seg'? -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations'), # with_seg=True - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=3, - workers_per_gpu=3, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=4e-5) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -# total_epochs = 1000 -total_iters = 450000 -evaluation = dict(interval=2000, metric='mIoU') -checkpoint_config = dict(interval=2000) - -# log config: log by iter. -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) - diff --git a/configs_unify/fastscnn/fast_scnn_4x8_100k_lr0.045_power1.2_cityscapes.py b/configs_unify/fastscnn/fast_scnn_4x8_100k_lr0.045_power1.2_cityscapes.py deleted file mode 100644 index 078fa94c5..000000000 --- a/configs_unify/fastscnn/fast_scnn_4x8_100k_lr0.045_power1.2_cityscapes.py +++ /dev/null @@ -1,64 +0,0 @@ -_base_ = [ - '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py' -] -crop_size = (512, 1024) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -# Here: What is parameter 'with_seg'? -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations'), # with_seg=True - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.045, momentum=0.9, weight_decay=4e-5) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=1.2, - by_epoch=False, -) -# runtime settings -# total_epochs = 1000 -total_iters = 100000 -evaluation = dict(interval=2000, metric='mIoU') -checkpoint_config = dict(interval=2000) - -# log config: log by iter. -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) - diff --git a/configs_unify/fastscnn/fast_scnn_4x8_100k_lr0.08_cityscapes.py b/configs_unify/fastscnn/fast_scnn_4x8_100k_lr0.08_cityscapes.py deleted file mode 100644 index 10f56e093..000000000 --- a/configs_unify/fastscnn/fast_scnn_4x8_100k_lr0.08_cityscapes.py +++ /dev/null @@ -1,64 +0,0 @@ -_base_ = [ - '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py' -] -crop_size = (512, 1024) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -# Here: What is parameter 'with_seg'? -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations'), # with_seg=True - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=4e-5) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -# total_epochs = 1000 -total_iters = 100000 -evaluation = dict(interval=2000, metric='mIoU') -checkpoint_config = dict(interval=2000) - -# log config: log by iter. -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) - diff --git a/configs_unify/fastscnn/fast_scnn_4x8_100k_lr0.12_cityscapes.py b/configs_unify/fastscnn/fast_scnn_4x8_100k_lr0.12_cityscapes.py deleted file mode 100644 index 9ff49a9db..000000000 --- a/configs_unify/fastscnn/fast_scnn_4x8_100k_lr0.12_cityscapes.py +++ /dev/null @@ -1,64 +0,0 @@ -_base_ = [ - '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py' -] -crop_size = (512, 1024) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -# Here: What is parameter 'with_seg'? -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations'), # with_seg=True - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=4e-5) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -# total_epochs = 1000 -total_iters = 100000 -evaluation = dict(interval=2000, metric='mIoU') -checkpoint_config = dict(interval=2000) - -# log config: log by iter. -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) - diff --git a/configs_unify/fastscnn/fast_scnn_4x8_80k_cityscapes.py b/configs_unify/fastscnn/fast_scnn_4x8_80k_cityscapes.py deleted file mode 100644 index dd6ac7f15..000000000 --- a/configs_unify/fastscnn/fast_scnn_4x8_80k_cityscapes.py +++ /dev/null @@ -1,61 +0,0 @@ -_base_ = [ - '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py' -] -crop_size = (512, 1024) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -# Here: What is parameter 'with_seg'? -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations'), # with_seg=True - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=4e-5) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -# total_epochs = 1000 -total_iters = 100000 -evaluation = dict(interval=2000, metric='mIoU') -checkpoint_config = dict(interval=2000) - diff --git a/configs_unify/fastscnn/fast_scnn_4x8_80k_lr0.045_cityscapes.py b/configs_unify/fastscnn/fast_scnn_4x8_80k_lr0.045_cityscapes.py deleted file mode 100644 index f6fd1729e..000000000 --- a/configs_unify/fastscnn/fast_scnn_4x8_80k_lr0.045_cityscapes.py +++ /dev/null @@ -1,64 +0,0 @@ -_base_ = [ - '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py' -] -crop_size = (512, 1024) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -# Here: What is parameter 'with_seg'? -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations'), # with_seg=True - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.045, momentum=0.9, weight_decay=4e-5) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -# total_epochs = 1000 -total_iters = 100000 -evaluation = dict(interval=2000, metric='mIoU') -checkpoint_config = dict(interval=2000) - -# log config: log by iter. -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) - diff --git a/configs_unify/fastscnn/fast_scnn_4x8_80k_lr0.08_cityscapes.py b/configs_unify/fastscnn/fast_scnn_4x8_80k_lr0.08_cityscapes.py deleted file mode 100644 index aec2ea0a2..000000000 --- a/configs_unify/fastscnn/fast_scnn_4x8_80k_lr0.08_cityscapes.py +++ /dev/null @@ -1,64 +0,0 @@ -_base_ = [ - '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py' -] -crop_size = (512, 1024) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -# Here: What is parameter 'with_seg'? -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations'), # with_seg=True - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=4e-5) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -# total_epochs = 1000 -total_iters = 80000 -evaluation = dict(interval=2000, metric='mIoU') -checkpoint_config = dict(interval=2000) - -# log config: log by iter. -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) - diff --git a/configs_unify/fastscnn/fast_scnn_4x8_80k_lr0.12_cityscapes.py b/configs_unify/fastscnn/fast_scnn_4x8_80k_lr0.12_cityscapes.py deleted file mode 100644 index cf6075f12..000000000 --- a/configs_unify/fastscnn/fast_scnn_4x8_80k_lr0.12_cityscapes.py +++ /dev/null @@ -1,64 +0,0 @@ -_base_ = [ - '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py' -] -crop_size = (512, 1024) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -# Here: What is parameter 'with_seg'? -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations'), # with_seg=True - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=4e-5) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -# total_epochs = 1000 -total_iters = 80000 -evaluation = dict(interval=2000, metric='mIoU') -checkpoint_config = dict(interval=2000) - -# log config: log by iter. -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) - diff --git a/configs_unify/fcnnet/fcn_r101_40ki_cityscapes.py b/configs_unify/fcnnet/fcn_r101_40ki_cityscapes.py deleted file mode 100644 index e0694eb81..000000000 --- a/configs_unify/fcnnet/fcn_r101_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './fcn_r50_40ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/fcnnet/fcn_r101_60ki_cityscapes.py b/configs_unify/fcnnet/fcn_r101_60ki_cityscapes.py deleted file mode 100644 index bb92bb56e..000000000 --- a/configs_unify/fcnnet/fcn_r101_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './fcn_r50_60ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/fcnnet/fcn_r50_40ki_cityscapes.py b/configs_unify/fcnnet/fcn_r50_40ki_cityscapes.py deleted file mode 100644 index 3db17d350..000000000 --- a/configs_unify/fcnnet/fcn_r50_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py' -] diff --git a/configs_unify/fcnnet/fcn_r50_60ki_cityscapes.py b/configs_unify/fcnnet/fcn_r50_60ki_cityscapes.py deleted file mode 100644 index ac04a423c..000000000 --- a/configs_unify/fcnnet/fcn_r50_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py' -] diff --git a/configs_unify/gcnet/gc_r101_40ki_cityscapes.py b/configs_unify/gcnet/gc_r101_40ki_cityscapes.py deleted file mode 100644 index 28c01cce7..000000000 --- a/configs_unify/gcnet/gc_r101_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './gc_r50_40ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/gcnet/gc_r101_60ki_cityscapes.py b/configs_unify/gcnet/gc_r101_60ki_cityscapes.py deleted file mode 100644 index b1ebffa6a..000000000 --- a/configs_unify/gcnet/gc_r101_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './gc_r50_60ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/gcnet/gc_r50_40ki_cityscapes.py b/configs_unify/gcnet/gc_r50_40ki_cityscapes.py deleted file mode 100644 index 4ad82b30d..000000000 --- a/configs_unify/gcnet/gc_r50_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/gc_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py' -] diff --git a/configs_unify/gcnet/gc_r50_60ki_cityscapes.py b/configs_unify/gcnet/gc_r50_60ki_cityscapes.py deleted file mode 100644 index dea24b0a0..000000000 --- a/configs_unify/gcnet/gc_r50_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/gc_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py' -] diff --git a/configs_unify/hrnet/fcn_hr18_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr18_4x3_484e_cityscapes.py deleted file mode 100644 index 5742c52ff..000000000 --- a/configs_unify/hrnet/fcn_hr18_4x3_484e_cityscapes.py +++ /dev/null @@ -1,63 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py' -] -model = dict( - decode_head=dict(classes_weight=[ - 0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, - 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, - 1.0507 - ])) -crop_size = (512, 1024) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_seg=True), - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=3, - workers_per_gpu=3, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -total_epochs = 484 -evaluation = dict(interval=11, metric='mIoU') -checkpoint_config = dict(interval=11) diff --git a/configs_unify/hrnet/fcn_hr18s_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr18s_4x3_484e_cityscapes.py deleted file mode 100644 index 6dd0fbde1..000000000 --- a/configs_unify/hrnet/fcn_hr18s_4x3_484e_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './fcn_hr18_4x3_484e_cityscapes.py' -model = dict( - pretrained='pretrain_model/hrnetv2_w18_small-b5a04e21.pth', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs_unify/hrnet/fcn_hr32_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr32_4x3_484e_cityscapes.py deleted file mode 100644 index 3779039c2..000000000 --- a/configs_unify/hrnet/fcn_hr32_4x3_484e_cityscapes.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcn_hr18_4x3_484e_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w32', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(32, 64)), - stage3=dict(num_channels=(32, 64, 128)), - stage4=dict(num_channels=(32, 64, 128, 256)))), - decode_head=dict( - in_channels=[32, 64, 128, 256], channels=sum([32, 64, 128, 256]))) diff --git a/configs_unify/hrnet/fcn_hr40_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr40_4x3_484e_cityscapes.py deleted file mode 100644 index d96c95923..000000000 --- a/configs_unify/hrnet/fcn_hr40_4x3_484e_cityscapes.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcn_hr18_4x3_484e_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w40', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(40, 80)), - stage3=dict(num_channels=(40, 80, 160)), - stage4=dict(num_channels=(40, 80, 160, 320)))), - decode_head=dict( - in_channels=[40, 80, 160, 320], channels=sum([40, 80, 160, 320]))) diff --git a/configs_unify/hrnet/fcn_hr48_484e_pascal_context.py b/configs_unify/hrnet/fcn_hr48_484e_pascal_context.py deleted file mode 100644 index f38ca7ed2..000000000 --- a/configs_unify/hrnet/fcn_hr48_484e_pascal_context.py +++ /dev/null @@ -1,68 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context.py', - '../_base_/default_runtime.py' -] -model = dict( - pretrained='pretrain_model/hrnetv2_w48-d2186c55.pth', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=dict( - num_classes=60, - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]))) -crop_size = (480, 480) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_seg=True), - dict(type='Resize', img_scale=(520, 520), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(480, 480), - img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], - flip=True, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=4, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -total_epochs = 200 -evaluation = dict(interval=11, metric='mIoU') -checkpoint_config = dict(interval=10) diff --git a/configs_unify/hrnet/fcn_hr48_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr48_4x3_484e_cityscapes.py deleted file mode 100644 index a16bcd60d..000000000 --- a/configs_unify/hrnet/fcn_hr48_4x3_484e_cityscapes.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcn_hr18_4x3_484e_cityscapes.py' -model = dict( - pretrained='pretrain_model/hrnetv2_w48-d2186c55.pth', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=dict( - in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs_unify/hrnet/fcn_hr48_ohem_4x3_484e_cityscapes.py b/configs_unify/hrnet/fcn_hr48_ohem_4x3_484e_cityscapes.py deleted file mode 100644 index c5efa5d38..000000000 --- a/configs_unify/hrnet/fcn_hr48_ohem_4x3_484e_cityscapes.py +++ /dev/null @@ -1,12 +0,0 @@ -_base_ = './fcn_hr18_4x3_484e_cityscapes.py' -model = dict( - pretrained='pretrain_model/hrnetv2_w48-d2186c55.pth', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=dict( - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - sampler=dict(type='OHEMSegSampler', thresh=0.9, min_kept=131072))) diff --git a/configs_unify/nlnet/nl_r101_40ki_cityscapes.py b/configs_unify/nlnet/nl_r101_40ki_cityscapes.py deleted file mode 100644 index 378978472..000000000 --- a/configs_unify/nlnet/nl_r101_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './nl_r50_40ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/nlnet/nl_r101_60ki_cityscapes.py b/configs_unify/nlnet/nl_r101_60ki_cityscapes.py deleted file mode 100644 index cacf74a66..000000000 --- a/configs_unify/nlnet/nl_r101_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './nl_r50_60ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/nlnet/nl_r50_40ki_cityscapes.py b/configs_unify/nlnet/nl_r50_40ki_cityscapes.py deleted file mode 100644 index fea99f151..000000000 --- a/configs_unify/nlnet/nl_r50_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/nl_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py' -] diff --git a/configs_unify/nlnet/nl_r50_60ki_cityscapes.py b/configs_unify/nlnet/nl_r50_60ki_cityscapes.py deleted file mode 100644 index 528532d6a..000000000 --- a/configs_unify/nlnet/nl_r50_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/nl_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py' -] diff --git a/configs_unify/ocrnet/ocr_hr18_4x3_484e_cityscapes.py b/configs_unify/ocrnet/ocr_hr18_4x3_484e_cityscapes.py deleted file mode 100644 index 73b57f48b..000000000 --- a/configs_unify/ocrnet/ocr_hr18_4x3_484e_cityscapes.py +++ /dev/null @@ -1,57 +0,0 @@ -_base_ = [ - '../_base_/models/ocr_hr18.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py' -] -crop_size = (512, 1024) -cudnn_benchmark = True -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_seg=True), - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomCrop', crop_size=crop_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=3, - workers_per_gpu=3, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='poly', - power=0.9, - by_epoch=False, -) -# runtime settings -total_epochs = 484 -evaluation = dict(interval=11, metric='mIoU') -checkpoint_config = dict(interval=11) diff --git a/configs_unify/ocrnet/ocr_hr18s_4x3_484e_cityscapes.py b/configs_unify/ocrnet/ocr_hr18s_4x3_484e_cityscapes.py deleted file mode 100644 index e23bd27fb..000000000 --- a/configs_unify/ocrnet/ocr_hr18s_4x3_484e_cityscapes.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './ocr_hr18_4x3_484e_cityscapes.py' -norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) -model = dict( - pretrained='pretrain_model/hrnetv2_w18_small-b5a04e21.pth', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs_unify/ocrnet/ocr_hr48_4x3_484e_cityscapes.py b/configs_unify/ocrnet/ocr_hr48_4x3_484e_cityscapes.py deleted file mode 100644 index 6230679b5..000000000 --- a/configs_unify/ocrnet/ocr_hr48_4x3_484e_cityscapes.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './ocr_hr18_4x3_484e_cityscapes.py' -norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) -model = dict( - pretrained='pretrain_model/hrnetv2_w48-d2186c55.pth', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - drop_out_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - drop_out_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ]) diff --git a/configs_unify/ocrnet/ocr_hr48_ohem_4x3_484e_cityscapes.py b/configs_unify/ocrnet/ocr_hr48_ohem_4x3_484e_cityscapes.py deleted file mode 100644 index 477fbf70b..000000000 --- a/configs_unify/ocrnet/ocr_hr48_ohem_4x3_484e_cityscapes.py +++ /dev/null @@ -1,40 +0,0 @@ -_base_ = './ocr_hr18_4x3_484e_cityscapes.py' -norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) -model = dict( - pretrained='pretrain_model/hrnetv2_w48-d2186c55.pth', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - drop_out_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - drop_out_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - sampler=dict(type='OHEMSegSampler', thresh=0.9, min_kept=131072)) - ]) diff --git a/configs_unify/psanet/psa_r101_40ki_cityscapes.py b/configs_unify/psanet/psa_r101_40ki_cityscapes.py deleted file mode 100644 index b146cbe1d..000000000 --- a/configs_unify/psanet/psa_r101_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './psa_r50_40ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/psanet/psa_r101_60ki_cityscapes.py b/configs_unify/psanet/psa_r101_60ki_cityscapes.py deleted file mode 100644 index 66b2ced0c..000000000 --- a/configs_unify/psanet/psa_r101_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './psa_r50_60ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/psanet/psa_r50_40ki_cityscapes.py b/configs_unify/psanet/psa_r50_40ki_cityscapes.py deleted file mode 100644 index 6a4acc627..000000000 --- a/configs_unify/psanet/psa_r50_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/psa_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py' -] diff --git a/configs_unify/psanet/psa_r50_60ki_cityscapes.py b/configs_unify/psanet/psa_r50_60ki_cityscapes.py deleted file mode 100644 index 786554fab..000000000 --- a/configs_unify/psanet/psa_r50_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/psa_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py' -] diff --git a/configs_unify/pspnet/psp_r101_40ki_cityscapes.py b/configs_unify/pspnet/psp_r101_40ki_cityscapes.py deleted file mode 100644 index d37a44173..000000000 --- a/configs_unify/pspnet/psp_r101_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './psp_r50_40ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/pspnet/psp_r101_60ki_cityscapes.py b/configs_unify/pspnet/psp_r101_60ki_cityscapes.py deleted file mode 100644 index 0f9fcaad3..000000000 --- a/configs_unify/pspnet/psp_r101_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './psp_r50_60ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/pspnet/psp_r50_40ki_cityscapes.py b/configs_unify/pspnet/psp_r50_40ki_cityscapes.py deleted file mode 100644 index df5241e32..000000000 --- a/configs_unify/pspnet/psp_r50_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/psp_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py' -] diff --git a/configs_unify/pspnet/psp_r50_60ki_cityscapes.py b/configs_unify/pspnet/psp_r50_60ki_cityscapes.py deleted file mode 100644 index 688c12fb9..000000000 --- a/configs_unify/pspnet/psp_r50_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/psp_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py' -] diff --git a/configs_unify/upernet/uper_r101_40ki_cityscapes.py b/configs_unify/upernet/uper_r101_40ki_cityscapes.py deleted file mode 100644 index 3ee073c59..000000000 --- a/configs_unify/upernet/uper_r101_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './uper_r50_40ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/upernet/uper_r101_60ki_cityscapes.py b/configs_unify/upernet/uper_r101_60ki_cityscapes.py deleted file mode 100644 index fdc2987b4..000000000 --- a/configs_unify/upernet/uper_r101_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './uper_r50_60ki_cityscapes.py' -model = dict( - pretrained='pretrain_model/resnet101_v1c-5fe8ded3.pth', - backbone=dict(depth=101)) diff --git a/configs_unify/upernet/uper_r50_40ki_cityscapes.py b/configs_unify/upernet/uper_r50_40ki_cityscapes.py deleted file mode 100644 index c955aeb72..000000000 --- a/configs_unify/upernet/uper_r50_40ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/uper_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40ki.py' -] diff --git a/configs_unify/upernet/uper_r50_60ki_cityscapes.py b/configs_unify/upernet/uper_r50_60ki_cityscapes.py deleted file mode 100644 index fb1fde1f9..000000000 --- a/configs_unify/upernet/uper_r50_60ki_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/uper_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_60ki.py' -]