diff --git a/demo/MMSegmentation_Tutorial.ipynb b/demo/MMSegmentation_Tutorial.ipynb index 4a1dbfc58..4bcbfcba6 100644 --- a/demo/MMSegmentation_Tutorial.ipynb +++ b/demo/MMSegmentation_Tutorial.ipynb @@ -381,10 +381,10 @@ "outputs": [], "source": [ "from mmseg.datasets.builder import DATASETS\n", - "from mmseg.datasets.custom import CustomDataset\n", + "from mmseg.datasets.custom import BaseSegDataset\n", "\n", "@DATASETS.register_module()\n", - "class StanfordBackgroundDataset(CustomDataset):\n", + "class StanfordBackgroundDataset(BaseSegDataset):\n", " CLASSES = classes\n", " PALETTE = palette\n", " def __init__(self, split, **kwargs):\n", diff --git a/mmseg/datasets/__init__.py b/mmseg/datasets/__init__.py index cc4d89215..873b566ab 100644 --- a/mmseg/datasets/__init__.py +++ b/mmseg/datasets/__init__.py @@ -3,7 +3,7 @@ from .ade import ADE20KDataset from .chase_db1 import ChaseDB1Dataset from .cityscapes import CityscapesDataset from .coco_stuff import COCOStuffDataset -from .custom import CustomDataset +from .custom import BaseSegDataset from .dark_zurich import DarkZurichDataset from .dataset_wrappers import MultiImageMixDataset from .drive import DRIVEDataset @@ -23,7 +23,7 @@ from .transforms import (CLAHE, AdjustGamma, LoadAnnotations, from .voc import PascalVOCDataset __all__ = [ - 'CustomDataset', 'CityscapesDataset', 'PascalVOCDataset', 'ADE20KDataset', + 'BaseSegDataset', 'CityscapesDataset', 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', 'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset', 'COCOStuffDataset', 'LoveDADataset', diff --git a/mmseg/datasets/ade.py b/mmseg/datasets/ade.py index 740bd1dd2..2153ff2d7 100644 --- a/mmseg/datasets/ade.py +++ b/mmseg/datasets/ade.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class ADE20KDataset(CustomDataset): +class ADE20KDataset(BaseSegDataset): """ADE20K dataset. In segmentation map annotation for ADE20K, 0 stands for background, which diff --git a/mmseg/datasets/chase_db1.py b/mmseg/datasets/chase_db1.py index 5ef2c3cb6..7c697e295 100644 --- a/mmseg/datasets/chase_db1.py +++ b/mmseg/datasets/chase_db1.py @@ -1,11 +1,11 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class ChaseDB1Dataset(CustomDataset): +class ChaseDB1Dataset(BaseSegDataset): """Chase_db1 dataset. In segmentation map annotation for Chase_db1, 0 stands for background, diff --git a/mmseg/datasets/cityscapes.py b/mmseg/datasets/cityscapes.py index c2caa8ccc..a72dcbe1b 100644 --- a/mmseg/datasets/cityscapes.py +++ b/mmseg/datasets/cityscapes.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class CityscapesDataset(CustomDataset): +class CityscapesDataset(BaseSegDataset): """Cityscapes dataset. The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is diff --git a/mmseg/datasets/coco_stuff.py b/mmseg/datasets/coco_stuff.py index b53408bd4..72fd0e4b2 100644 --- a/mmseg/datasets/coco_stuff.py +++ b/mmseg/datasets/coco_stuff.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class COCOStuffDataset(CustomDataset): +class COCOStuffDataset(BaseSegDataset): """COCO-Stuff dataset. In segmentation map annotation for COCO-Stuff, Train-IDs of the 10k version diff --git a/mmseg/datasets/custom.py b/mmseg/datasets/custom.py index e526b450b..edb393be0 100644 --- a/mmseg/datasets/custom.py +++ b/mmseg/datasets/custom.py @@ -11,7 +11,7 @@ from mmseg.registry import DATASETS @DATASETS.register_module() -class CustomDataset(BaseDataset): +class BaseSegDataset(BaseDataset): """Custom dataset for semantic segmentation. An example of file structure is as followed. @@ -32,7 +32,7 @@ class CustomDataset(BaseDataset): │ │ │ │ ├── zzz{seg_map_suffix} │ │ │ ├── val - The img/gt_semantic_seg pair of CustomDataset should be of the same + The img/gt_semantic_seg pair of BaseSegDataset should be of the same except suffix. A valid img/gt_semantic_seg filename pair should be like ``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included in the suffix). If split is given, then ``xxx`` is specified in txt file. diff --git a/mmseg/datasets/drive.py b/mmseg/datasets/drive.py index 4d78f2dfa..2fa78d985 100644 --- a/mmseg/datasets/drive.py +++ b/mmseg/datasets/drive.py @@ -1,11 +1,11 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class DRIVEDataset(CustomDataset): +class DRIVEDataset(BaseSegDataset): """DRIVE dataset. In segmentation map annotation for DRIVE, 0 stands for background, which is diff --git a/mmseg/datasets/hrf.py b/mmseg/datasets/hrf.py index 996e3c2b9..91ed9c1e3 100644 --- a/mmseg/datasets/hrf.py +++ b/mmseg/datasets/hrf.py @@ -1,11 +1,11 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class HRFDataset(CustomDataset): +class HRFDataset(BaseSegDataset): """HRF dataset. In segmentation map annotation for HRF, 0 stands for background, which is diff --git a/mmseg/datasets/isaid.py b/mmseg/datasets/isaid.py index 02a418492..c5a435779 100644 --- a/mmseg/datasets/isaid.py +++ b/mmseg/datasets/isaid.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class iSAIDDataset(CustomDataset): +class iSAIDDataset(BaseSegDataset): """ iSAID: A Large-scale Dataset for Instance Segmentation in Aerial Images In segmentation map annotation for iSAID dataset, which is included in 16 categories. ``reduce_zero_label`` is fixed to False. The diff --git a/mmseg/datasets/isprs.py b/mmseg/datasets/isprs.py index 888ea4762..67bccc32d 100644 --- a/mmseg/datasets/isprs.py +++ b/mmseg/datasets/isprs.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class ISPRSDataset(CustomDataset): +class ISPRSDataset(BaseSegDataset): """ISPRS dataset. In segmentation map annotation for ISPRS, 0 is the ignore index. diff --git a/mmseg/datasets/loveda.py b/mmseg/datasets/loveda.py index 00f7881cd..2dae213ad 100644 --- a/mmseg/datasets/loveda.py +++ b/mmseg/datasets/loveda.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class LoveDADataset(CustomDataset): +class LoveDADataset(BaseSegDataset): """LoveDA dataset. In segmentation map annotation for LoveDA, 0 is the ignore index. diff --git a/mmseg/datasets/pascal_context.py b/mmseg/datasets/pascal_context.py index 2c0fae457..47ec544ea 100644 --- a/mmseg/datasets/pascal_context.py +++ b/mmseg/datasets/pascal_context.py @@ -2,11 +2,11 @@ import os.path as osp from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class PascalContextDataset(CustomDataset): +class PascalContextDataset(BaseSegDataset): """PascalContext dataset. In segmentation map annotation for PascalContext, 0 stands for background, @@ -57,7 +57,7 @@ class PascalContextDataset(CustomDataset): @DATASETS.register_module() -class PascalContextDataset59(CustomDataset): +class PascalContextDataset59(BaseSegDataset): """PascalContext dataset. In segmentation map annotation for PascalContext, 0 stands for background, diff --git a/mmseg/datasets/potsdam.py b/mmseg/datasets/potsdam.py index 65e23ecb4..e8801c4f3 100644 --- a/mmseg/datasets/potsdam.py +++ b/mmseg/datasets/potsdam.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class PotsdamDataset(CustomDataset): +class PotsdamDataset(BaseSegDataset): """ISPRS Potsdam dataset. In segmentation map annotation for Potsdam dataset, 0 is the ignore index. diff --git a/mmseg/datasets/stare.py b/mmseg/datasets/stare.py index 53defc433..73086dd3b 100644 --- a/mmseg/datasets/stare.py +++ b/mmseg/datasets/stare.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class STAREDataset(CustomDataset): +class STAREDataset(BaseSegDataset): """STARE dataset. In segmentation map annotation for STARE, 0 stands for background, which is diff --git a/mmseg/datasets/voc.py b/mmseg/datasets/voc.py index 4848d17b2..fe4070205 100644 --- a/mmseg/datasets/voc.py +++ b/mmseg/datasets/voc.py @@ -2,11 +2,11 @@ import os.path as osp from mmseg.registry import DATASETS -from .custom import CustomDataset +from .custom import BaseSegDataset @DATASETS.register_module() -class PascalVOCDataset(CustomDataset): +class PascalVOCDataset(BaseSegDataset): """Pascal VOC dataset. Args: diff --git a/tests/test_datasets/test_dataset.py b/tests/test_datasets/test_dataset.py index 4b4750fca..2ef51455e 100644 --- a/tests/test_datasets/test_dataset.py +++ b/tests/test_datasets/test_dataset.py @@ -6,10 +6,10 @@ from unittest.mock import MagicMock import pytest -from mmseg.datasets import (ADE20KDataset, CityscapesDataset, COCOStuffDataset, - CustomDataset, ISPRSDataset, LoveDADataset, - PascalVOCDataset, PotsdamDataset, iSAIDDataset) -from mmseg.registry import DATASETS +from mmseg.datasets import (ADE20KDataset, BaseSegDataset, + CityscapesDataset, COCOStuffDataset, ISPRSDataset, + LoveDADataset, PascalVOCDataset, PotsdamDataset, + iSAIDDataset) from mmseg.utils import get_classes, get_palette @@ -87,7 +87,7 @@ def test_palette(): def test_custom_dataset(): # with 'img_path' and 'seg_map_path' in data_prefix - train_dataset = CustomDataset( + train_dataset = BaseSegDataset( data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), data_prefix=dict( img_path='imgs/', @@ -98,7 +98,7 @@ def test_custom_dataset(): assert len(train_dataset) == 5 # with 'img_path' and 'seg_map_path' in data_prefix and ann_file - train_dataset = CustomDataset( + train_dataset = BaseSegDataset( data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), data_prefix=dict( img_path='imgs/', @@ -110,7 +110,7 @@ def test_custom_dataset(): assert len(train_dataset) == 4 # no data_root - train_dataset = CustomDataset( + train_dataset = BaseSegDataset( data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_dataset/imgs'), @@ -122,7 +122,7 @@ def test_custom_dataset(): # with data_root but 'img_path' and 'seg_map_path' in data_prefix are # abs path - train_dataset = CustomDataset( + train_dataset = BaseSegDataset( data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), data_prefix=dict( img_path=osp.join( @@ -134,7 +134,7 @@ def test_custom_dataset(): assert len(train_dataset) == 5 # test_mode=True - test_dataset = CustomDataset( + test_dataset = BaseSegDataset( data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_dataset/imgs')), @@ -245,7 +245,7 @@ def test_isaid(): @pytest.mark.parametrize('dataset, classes', [ ('ADE20KDataset', ('wall', 'building')), ('CityscapesDataset', ('road', 'sidewalk')), - ('CustomDataset', ('bus', 'car')), + ('BaseSegDataset', ('bus', 'car')), ('PascalVOCDataset', ('aeroplane', 'bicycle')), ]) def test_custom_classes_override_default(dataset, classes): @@ -269,7 +269,7 @@ def test_custom_classes_override_default(dataset, classes): assert custom_dataset.metainfo['classes'] != original_classes assert custom_dataset.metainfo['classes'] == classes - if not isinstance(custom_dataset, CustomDataset): + if not isinstance(custom_dataset, BaseSegDataset): assert isinstance(custom_dataset.label_map, dict) # Test setting classes as a list @@ -282,7 +282,7 @@ def test_custom_classes_override_default(dataset, classes): assert custom_dataset.metainfo['classes'] != original_classes assert custom_dataset.metainfo['classes'] == list(classes) - if not isinstance(custom_dataset, CustomDataset): + if not isinstance(custom_dataset, BaseSegDataset): assert isinstance(custom_dataset.label_map, dict) # Test overriding not a subset @@ -295,11 +295,11 @@ def test_custom_classes_override_default(dataset, classes): assert custom_dataset.metainfo['classes'] != original_classes assert custom_dataset.metainfo['classes'] == [classes[0]] - if not isinstance(custom_dataset, CustomDataset): + if not isinstance(custom_dataset, BaseSegDataset): assert isinstance(custom_dataset.label_map, dict) # Test default behavior - if dataset_class is CustomDataset: + if dataset_class is BaseSegDataset: with pytest.raises(AssertionError): custom_dataset = dataset_class( ann_file=ann_file, @@ -320,7 +320,7 @@ def test_custom_classes_override_default(dataset, classes): def test_custom_dataset_random_palette_is_generated(): - dataset = CustomDataset( + dataset = BaseSegDataset( pipeline=[], data_prefix=dict(img_path=MagicMock()), ann_file=MagicMock(), @@ -334,7 +334,7 @@ def test_custom_dataset_random_palette_is_generated(): def test_custom_dataset_custom_palette(): - dataset = CustomDataset( + dataset = BaseSegDataset( data_prefix=dict(img_path=MagicMock()), ann_file=MagicMock(), metainfo=dict( @@ -346,7 +346,7 @@ def test_custom_dataset_custom_palette(): [200, 200, 200]]) # test custom class and palette don't match with pytest.raises(ValueError): - dataset = CustomDataset( + dataset = BaseSegDataset( data_prefix=dict(img_path=MagicMock()), ann_file=MagicMock(), metainfo=dict(classes=('bus', 'car'), palette=[[200, 200, 200]]), diff --git a/tests/test_datasets/test_dataset_builder.py b/tests/test_datasets/test_dataset_builder.py index d48801aea..5e70726aa 100644 --- a/tests/test_datasets/test_dataset_builder.py +++ b/tests/test_datasets/test_dataset_builder.py @@ -37,7 +37,7 @@ def test_build_dataset(): # test RepeatDataset cfg = dict( - type='CustomDataset', + type='BaseSegDataset', pipeline=[], data_root=data_root, data_prefix=data_prefix, @@ -51,13 +51,13 @@ def test_build_dataset(): # We use same dir twice for simplicity # with data_prefix.seg_map_path cfg1 = dict( - type='CustomDataset', + type='BaseSegDataset', pipeline=[], data_root=data_root, data_prefix=data_prefix, serialize_data=False) cfg2 = dict( - type='CustomDataset', + type='BaseSegDataset', pipeline=[], data_root=data_root, data_prefix=data_prefix, @@ -81,14 +81,14 @@ def test_build_dataset(): # with data_prefix.seg_map_path, ann_file cfg1 = dict( - type='CustomDataset', + type='BaseSegDataset', pipeline=[], data_root=data_root, data_prefix=data_prefix, ann_file='splits/train.txt', serialize_data=False) cfg2 = dict( - type='CustomDataset', + type='BaseSegDataset', pipeline=[], data_root=data_root, data_prefix=data_prefix, @@ -103,7 +103,7 @@ def test_build_dataset(): # test mode cfg1 = dict( - type='CustomDataset', + type='BaseSegDataset', pipeline=[], data_root=data_root, data_prefix=dict(img_path='imgs/'), @@ -111,7 +111,7 @@ def test_build_dataset(): metainfo=dict(classes=('pseudo_class', )), serialize_data=False) cfg2 = dict( - type='CustomDataset', + type='BaseSegDataset', pipeline=[], data_root=data_root, data_prefix=dict(img_path='imgs/'), @@ -127,7 +127,7 @@ def test_build_dataset(): # test mode with ann_files cfg1 = dict( - type='CustomDataset', + type='BaseSegDataset', pipeline=[], data_root=data_root, data_prefix=dict(img_path='imgs/'), @@ -136,7 +136,7 @@ def test_build_dataset(): metainfo=dict(classes=('pseudo_class', )), serialize_data=False) cfg2 = dict( - type='CustomDataset', + type='BaseSegDataset', pipeline=[], data_root=data_root, data_prefix=dict(img_path='imgs/'),