[Fix] Change CustomDataset to BaseSegDataset (#1791)

This commit is contained in:
MengzhangLI 2022-07-26 12:01:40 +08:00 committed by GitHub
parent ba4d1d62aa
commit 3cf2bbda2a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 59 additions and 59 deletions

View File

@ -381,10 +381,10 @@
"outputs": [],
"source": [
"from mmseg.datasets.builder import DATASETS\n",
"from mmseg.datasets.custom import CustomDataset\n",
"from mmseg.datasets.custom import BaseSegDataset\n",
"\n",
"@DATASETS.register_module()\n",
"class StanfordBackgroundDataset(CustomDataset):\n",
"class StanfordBackgroundDataset(BaseSegDataset):\n",
" CLASSES = classes\n",
" PALETTE = palette\n",
" def __init__(self, split, **kwargs):\n",

View File

@ -3,7 +3,7 @@ from .ade import ADE20KDataset
from .chase_db1 import ChaseDB1Dataset
from .cityscapes import CityscapesDataset
from .coco_stuff import COCOStuffDataset
from .custom import CustomDataset
from .custom import BaseSegDataset
from .dark_zurich import DarkZurichDataset
from .dataset_wrappers import MultiImageMixDataset
from .drive import DRIVEDataset
@ -23,7 +23,7 @@ from .transforms import (CLAHE, AdjustGamma, LoadAnnotations,
from .voc import PascalVOCDataset
__all__ = [
'CustomDataset', 'CityscapesDataset', 'PascalVOCDataset', 'ADE20KDataset',
'BaseSegDataset', 'CityscapesDataset', 'PascalVOCDataset', 'ADE20KDataset',
'PascalContextDataset', 'PascalContextDataset59', 'ChaseDB1Dataset',
'DRIVEDataset', 'HRFDataset', 'STAREDataset', 'DarkZurichDataset',
'NightDrivingDataset', 'COCOStuffDataset', 'LoveDADataset',

View File

@ -1,10 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class ADE20KDataset(CustomDataset):
class ADE20KDataset(BaseSegDataset):
"""ADE20K dataset.
In segmentation map annotation for ADE20K, 0 stands for background, which

View File

@ -1,11 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class ChaseDB1Dataset(CustomDataset):
class ChaseDB1Dataset(BaseSegDataset):
"""Chase_db1 dataset.
In segmentation map annotation for Chase_db1, 0 stands for background,

View File

@ -1,10 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class CityscapesDataset(CustomDataset):
class CityscapesDataset(BaseSegDataset):
"""Cityscapes dataset.
The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is

View File

@ -1,10 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class COCOStuffDataset(CustomDataset):
class COCOStuffDataset(BaseSegDataset):
"""COCO-Stuff dataset.
In segmentation map annotation for COCO-Stuff, Train-IDs of the 10k version

View File

@ -11,7 +11,7 @@ from mmseg.registry import DATASETS
@DATASETS.register_module()
class CustomDataset(BaseDataset):
class BaseSegDataset(BaseDataset):
"""Custom dataset for semantic segmentation. An example of file structure
is as followed.
@ -32,7 +32,7 @@ class CustomDataset(BaseDataset):
zzz{seg_map_suffix}
val
The img/gt_semantic_seg pair of CustomDataset should be of the same
The img/gt_semantic_seg pair of BaseSegDataset should be of the same
except suffix. A valid img/gt_semantic_seg filename pair should be like
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
in the suffix). If split is given, then ``xxx`` is specified in txt file.

View File

@ -1,11 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class DRIVEDataset(CustomDataset):
class DRIVEDataset(BaseSegDataset):
"""DRIVE dataset.
In segmentation map annotation for DRIVE, 0 stands for background, which is

View File

@ -1,11 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class HRFDataset(CustomDataset):
class HRFDataset(BaseSegDataset):
"""HRF dataset.
In segmentation map annotation for HRF, 0 stands for background, which is

View File

@ -1,10 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class iSAIDDataset(CustomDataset):
class iSAIDDataset(BaseSegDataset):
""" iSAID: A Large-scale Dataset for Instance Segmentation in Aerial Images
In segmentation map annotation for iSAID dataset, which is included
in 16 categories. ``reduce_zero_label`` is fixed to False. The

View File

@ -1,10 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class ISPRSDataset(CustomDataset):
class ISPRSDataset(BaseSegDataset):
"""ISPRS dataset.
In segmentation map annotation for ISPRS, 0 is the ignore index.

View File

@ -1,10 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class LoveDADataset(CustomDataset):
class LoveDADataset(BaseSegDataset):
"""LoveDA dataset.
In segmentation map annotation for LoveDA, 0 is the ignore index.

View File

@ -2,11 +2,11 @@
import os.path as osp
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class PascalContextDataset(CustomDataset):
class PascalContextDataset(BaseSegDataset):
"""PascalContext dataset.
In segmentation map annotation for PascalContext, 0 stands for background,
@ -57,7 +57,7 @@ class PascalContextDataset(CustomDataset):
@DATASETS.register_module()
class PascalContextDataset59(CustomDataset):
class PascalContextDataset59(BaseSegDataset):
"""PascalContext dataset.
In segmentation map annotation for PascalContext, 0 stands for background,

View File

@ -1,10 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class PotsdamDataset(CustomDataset):
class PotsdamDataset(BaseSegDataset):
"""ISPRS Potsdam dataset.
In segmentation map annotation for Potsdam dataset, 0 is the ignore index.

View File

@ -1,10 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class STAREDataset(CustomDataset):
class STAREDataset(BaseSegDataset):
"""STARE dataset.
In segmentation map annotation for STARE, 0 stands for background, which is

View File

@ -2,11 +2,11 @@
import os.path as osp
from mmseg.registry import DATASETS
from .custom import CustomDataset
from .custom import BaseSegDataset
@DATASETS.register_module()
class PascalVOCDataset(CustomDataset):
class PascalVOCDataset(BaseSegDataset):
"""Pascal VOC dataset.
Args:

View File

@ -6,10 +6,10 @@ from unittest.mock import MagicMock
import pytest
from mmseg.datasets import (ADE20KDataset, CityscapesDataset, COCOStuffDataset,
CustomDataset, ISPRSDataset, LoveDADataset,
PascalVOCDataset, PotsdamDataset, iSAIDDataset)
from mmseg.registry import DATASETS
from mmseg.datasets import (ADE20KDataset, BaseSegDataset,
CityscapesDataset, COCOStuffDataset, ISPRSDataset,
LoveDADataset, PascalVOCDataset, PotsdamDataset,
iSAIDDataset)
from mmseg.utils import get_classes, get_palette
@ -87,7 +87,7 @@ def test_palette():
def test_custom_dataset():
# with 'img_path' and 'seg_map_path' in data_prefix
train_dataset = CustomDataset(
train_dataset = BaseSegDataset(
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
data_prefix=dict(
img_path='imgs/',
@ -98,7 +98,7 @@ def test_custom_dataset():
assert len(train_dataset) == 5
# with 'img_path' and 'seg_map_path' in data_prefix and ann_file
train_dataset = CustomDataset(
train_dataset = BaseSegDataset(
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
data_prefix=dict(
img_path='imgs/',
@ -110,7 +110,7 @@ def test_custom_dataset():
assert len(train_dataset) == 4
# no data_root
train_dataset = CustomDataset(
train_dataset = BaseSegDataset(
data_prefix=dict(
img_path=osp.join(
osp.dirname(__file__), '../data/pseudo_dataset/imgs'),
@ -122,7 +122,7 @@ def test_custom_dataset():
# with data_root but 'img_path' and 'seg_map_path' in data_prefix are
# abs path
train_dataset = CustomDataset(
train_dataset = BaseSegDataset(
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
data_prefix=dict(
img_path=osp.join(
@ -134,7 +134,7 @@ def test_custom_dataset():
assert len(train_dataset) == 5
# test_mode=True
test_dataset = CustomDataset(
test_dataset = BaseSegDataset(
data_prefix=dict(
img_path=osp.join(
osp.dirname(__file__), '../data/pseudo_dataset/imgs')),
@ -245,7 +245,7 @@ def test_isaid():
@pytest.mark.parametrize('dataset, classes', [
('ADE20KDataset', ('wall', 'building')),
('CityscapesDataset', ('road', 'sidewalk')),
('CustomDataset', ('bus', 'car')),
('BaseSegDataset', ('bus', 'car')),
('PascalVOCDataset', ('aeroplane', 'bicycle')),
])
def test_custom_classes_override_default(dataset, classes):
@ -269,7 +269,7 @@ def test_custom_classes_override_default(dataset, classes):
assert custom_dataset.metainfo['classes'] != original_classes
assert custom_dataset.metainfo['classes'] == classes
if not isinstance(custom_dataset, CustomDataset):
if not isinstance(custom_dataset, BaseSegDataset):
assert isinstance(custom_dataset.label_map, dict)
# Test setting classes as a list
@ -282,7 +282,7 @@ def test_custom_classes_override_default(dataset, classes):
assert custom_dataset.metainfo['classes'] != original_classes
assert custom_dataset.metainfo['classes'] == list(classes)
if not isinstance(custom_dataset, CustomDataset):
if not isinstance(custom_dataset, BaseSegDataset):
assert isinstance(custom_dataset.label_map, dict)
# Test overriding not a subset
@ -295,11 +295,11 @@ def test_custom_classes_override_default(dataset, classes):
assert custom_dataset.metainfo['classes'] != original_classes
assert custom_dataset.metainfo['classes'] == [classes[0]]
if not isinstance(custom_dataset, CustomDataset):
if not isinstance(custom_dataset, BaseSegDataset):
assert isinstance(custom_dataset.label_map, dict)
# Test default behavior
if dataset_class is CustomDataset:
if dataset_class is BaseSegDataset:
with pytest.raises(AssertionError):
custom_dataset = dataset_class(
ann_file=ann_file,
@ -320,7 +320,7 @@ def test_custom_classes_override_default(dataset, classes):
def test_custom_dataset_random_palette_is_generated():
dataset = CustomDataset(
dataset = BaseSegDataset(
pipeline=[],
data_prefix=dict(img_path=MagicMock()),
ann_file=MagicMock(),
@ -334,7 +334,7 @@ def test_custom_dataset_random_palette_is_generated():
def test_custom_dataset_custom_palette():
dataset = CustomDataset(
dataset = BaseSegDataset(
data_prefix=dict(img_path=MagicMock()),
ann_file=MagicMock(),
metainfo=dict(
@ -346,7 +346,7 @@ def test_custom_dataset_custom_palette():
[200, 200, 200]])
# test custom class and palette don't match
with pytest.raises(ValueError):
dataset = CustomDataset(
dataset = BaseSegDataset(
data_prefix=dict(img_path=MagicMock()),
ann_file=MagicMock(),
metainfo=dict(classes=('bus', 'car'), palette=[[200, 200, 200]]),

View File

@ -37,7 +37,7 @@ def test_build_dataset():
# test RepeatDataset
cfg = dict(
type='CustomDataset',
type='BaseSegDataset',
pipeline=[],
data_root=data_root,
data_prefix=data_prefix,
@ -51,13 +51,13 @@ def test_build_dataset():
# We use same dir twice for simplicity
# with data_prefix.seg_map_path
cfg1 = dict(
type='CustomDataset',
type='BaseSegDataset',
pipeline=[],
data_root=data_root,
data_prefix=data_prefix,
serialize_data=False)
cfg2 = dict(
type='CustomDataset',
type='BaseSegDataset',
pipeline=[],
data_root=data_root,
data_prefix=data_prefix,
@ -81,14 +81,14 @@ def test_build_dataset():
# with data_prefix.seg_map_path, ann_file
cfg1 = dict(
type='CustomDataset',
type='BaseSegDataset',
pipeline=[],
data_root=data_root,
data_prefix=data_prefix,
ann_file='splits/train.txt',
serialize_data=False)
cfg2 = dict(
type='CustomDataset',
type='BaseSegDataset',
pipeline=[],
data_root=data_root,
data_prefix=data_prefix,
@ -103,7 +103,7 @@ def test_build_dataset():
# test mode
cfg1 = dict(
type='CustomDataset',
type='BaseSegDataset',
pipeline=[],
data_root=data_root,
data_prefix=dict(img_path='imgs/'),
@ -111,7 +111,7 @@ def test_build_dataset():
metainfo=dict(classes=('pseudo_class', )),
serialize_data=False)
cfg2 = dict(
type='CustomDataset',
type='BaseSegDataset',
pipeline=[],
data_root=data_root,
data_prefix=dict(img_path='imgs/'),
@ -127,7 +127,7 @@ def test_build_dataset():
# test mode with ann_files
cfg1 = dict(
type='CustomDataset',
type='BaseSegDataset',
pipeline=[],
data_root=data_root,
data_prefix=dict(img_path='imgs/'),
@ -136,7 +136,7 @@ def test_build_dataset():
metainfo=dict(classes=('pseudo_class', )),
serialize_data=False)
cfg2 = dict(
type='CustomDataset',
type='BaseSegDataset',
pipeline=[],
data_root=data_root,
data_prefix=dict(img_path='imgs/'),