mirror of
https://github.com/open-mmlab/mmsegmentation.git
synced 2025-06-03 22:03:48 +08:00
[Fix] Change CustomDataset to BaseSegDataset (#1791)
This commit is contained in:
parent
ba4d1d62aa
commit
3cf2bbda2a
@ -381,10 +381,10 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from mmseg.datasets.builder import DATASETS\n",
|
"from mmseg.datasets.builder import DATASETS\n",
|
||||||
"from mmseg.datasets.custom import CustomDataset\n",
|
"from mmseg.datasets.custom import BaseSegDataset\n",
|
||||||
"\n",
|
"\n",
|
||||||
"@DATASETS.register_module()\n",
|
"@DATASETS.register_module()\n",
|
||||||
"class StanfordBackgroundDataset(CustomDataset):\n",
|
"class StanfordBackgroundDataset(BaseSegDataset):\n",
|
||||||
" CLASSES = classes\n",
|
" CLASSES = classes\n",
|
||||||
" PALETTE = palette\n",
|
" PALETTE = palette\n",
|
||||||
" def __init__(self, split, **kwargs):\n",
|
" def __init__(self, split, **kwargs):\n",
|
||||||
|
@ -3,7 +3,7 @@ from .ade import ADE20KDataset
|
|||||||
from .chase_db1 import ChaseDB1Dataset
|
from .chase_db1 import ChaseDB1Dataset
|
||||||
from .cityscapes import CityscapesDataset
|
from .cityscapes import CityscapesDataset
|
||||||
from .coco_stuff import COCOStuffDataset
|
from .coco_stuff import COCOStuffDataset
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
from .dark_zurich import DarkZurichDataset
|
from .dark_zurich import DarkZurichDataset
|
||||||
from .dataset_wrappers import MultiImageMixDataset
|
from .dataset_wrappers import MultiImageMixDataset
|
||||||
from .drive import DRIVEDataset
|
from .drive import DRIVEDataset
|
||||||
@ -23,7 +23,7 @@ from .transforms import (CLAHE, AdjustGamma, LoadAnnotations,
|
|||||||
from .voc import PascalVOCDataset
|
from .voc import PascalVOCDataset
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'CustomDataset', 'CityscapesDataset', 'PascalVOCDataset', 'ADE20KDataset',
|
'BaseSegDataset', 'CityscapesDataset', 'PascalVOCDataset', 'ADE20KDataset',
|
||||||
'PascalContextDataset', 'PascalContextDataset59', 'ChaseDB1Dataset',
|
'PascalContextDataset', 'PascalContextDataset59', 'ChaseDB1Dataset',
|
||||||
'DRIVEDataset', 'HRFDataset', 'STAREDataset', 'DarkZurichDataset',
|
'DRIVEDataset', 'HRFDataset', 'STAREDataset', 'DarkZurichDataset',
|
||||||
'NightDrivingDataset', 'COCOStuffDataset', 'LoveDADataset',
|
'NightDrivingDataset', 'COCOStuffDataset', 'LoveDADataset',
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class ADE20KDataset(CustomDataset):
|
class ADE20KDataset(BaseSegDataset):
|
||||||
"""ADE20K dataset.
|
"""ADE20K dataset.
|
||||||
|
|
||||||
In segmentation map annotation for ADE20K, 0 stands for background, which
|
In segmentation map annotation for ADE20K, 0 stands for background, which
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
|
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class ChaseDB1Dataset(CustomDataset):
|
class ChaseDB1Dataset(BaseSegDataset):
|
||||||
"""Chase_db1 dataset.
|
"""Chase_db1 dataset.
|
||||||
|
|
||||||
In segmentation map annotation for Chase_db1, 0 stands for background,
|
In segmentation map annotation for Chase_db1, 0 stands for background,
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class CityscapesDataset(CustomDataset):
|
class CityscapesDataset(BaseSegDataset):
|
||||||
"""Cityscapes dataset.
|
"""Cityscapes dataset.
|
||||||
|
|
||||||
The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is
|
The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class COCOStuffDataset(CustomDataset):
|
class COCOStuffDataset(BaseSegDataset):
|
||||||
"""COCO-Stuff dataset.
|
"""COCO-Stuff dataset.
|
||||||
|
|
||||||
In segmentation map annotation for COCO-Stuff, Train-IDs of the 10k version
|
In segmentation map annotation for COCO-Stuff, Train-IDs of the 10k version
|
||||||
|
@ -11,7 +11,7 @@ from mmseg.registry import DATASETS
|
|||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class CustomDataset(BaseDataset):
|
class BaseSegDataset(BaseDataset):
|
||||||
"""Custom dataset for semantic segmentation. An example of file structure
|
"""Custom dataset for semantic segmentation. An example of file structure
|
||||||
is as followed.
|
is as followed.
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ class CustomDataset(BaseDataset):
|
|||||||
│ │ │ │ ├── zzz{seg_map_suffix}
|
│ │ │ │ ├── zzz{seg_map_suffix}
|
||||||
│ │ │ ├── val
|
│ │ │ ├── val
|
||||||
|
|
||||||
The img/gt_semantic_seg pair of CustomDataset should be of the same
|
The img/gt_semantic_seg pair of BaseSegDataset should be of the same
|
||||||
except suffix. A valid img/gt_semantic_seg filename pair should be like
|
except suffix. A valid img/gt_semantic_seg filename pair should be like
|
||||||
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
|
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
|
||||||
in the suffix). If split is given, then ``xxx`` is specified in txt file.
|
in the suffix). If split is given, then ``xxx`` is specified in txt file.
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
|
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class DRIVEDataset(CustomDataset):
|
class DRIVEDataset(BaseSegDataset):
|
||||||
"""DRIVE dataset.
|
"""DRIVE dataset.
|
||||||
|
|
||||||
In segmentation map annotation for DRIVE, 0 stands for background, which is
|
In segmentation map annotation for DRIVE, 0 stands for background, which is
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
|
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class HRFDataset(CustomDataset):
|
class HRFDataset(BaseSegDataset):
|
||||||
"""HRF dataset.
|
"""HRF dataset.
|
||||||
|
|
||||||
In segmentation map annotation for HRF, 0 stands for background, which is
|
In segmentation map annotation for HRF, 0 stands for background, which is
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class iSAIDDataset(CustomDataset):
|
class iSAIDDataset(BaseSegDataset):
|
||||||
""" iSAID: A Large-scale Dataset for Instance Segmentation in Aerial Images
|
""" iSAID: A Large-scale Dataset for Instance Segmentation in Aerial Images
|
||||||
In segmentation map annotation for iSAID dataset, which is included
|
In segmentation map annotation for iSAID dataset, which is included
|
||||||
in 16 categories. ``reduce_zero_label`` is fixed to False. The
|
in 16 categories. ``reduce_zero_label`` is fixed to False. The
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class ISPRSDataset(CustomDataset):
|
class ISPRSDataset(BaseSegDataset):
|
||||||
"""ISPRS dataset.
|
"""ISPRS dataset.
|
||||||
|
|
||||||
In segmentation map annotation for ISPRS, 0 is the ignore index.
|
In segmentation map annotation for ISPRS, 0 is the ignore index.
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class LoveDADataset(CustomDataset):
|
class LoveDADataset(BaseSegDataset):
|
||||||
"""LoveDA dataset.
|
"""LoveDA dataset.
|
||||||
|
|
||||||
In segmentation map annotation for LoveDA, 0 is the ignore index.
|
In segmentation map annotation for LoveDA, 0 is the ignore index.
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
import os.path as osp
|
import os.path as osp
|
||||||
|
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class PascalContextDataset(CustomDataset):
|
class PascalContextDataset(BaseSegDataset):
|
||||||
"""PascalContext dataset.
|
"""PascalContext dataset.
|
||||||
|
|
||||||
In segmentation map annotation for PascalContext, 0 stands for background,
|
In segmentation map annotation for PascalContext, 0 stands for background,
|
||||||
@ -57,7 +57,7 @@ class PascalContextDataset(CustomDataset):
|
|||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class PascalContextDataset59(CustomDataset):
|
class PascalContextDataset59(BaseSegDataset):
|
||||||
"""PascalContext dataset.
|
"""PascalContext dataset.
|
||||||
|
|
||||||
In segmentation map annotation for PascalContext, 0 stands for background,
|
In segmentation map annotation for PascalContext, 0 stands for background,
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class PotsdamDataset(CustomDataset):
|
class PotsdamDataset(BaseSegDataset):
|
||||||
"""ISPRS Potsdam dataset.
|
"""ISPRS Potsdam dataset.
|
||||||
|
|
||||||
In segmentation map annotation for Potsdam dataset, 0 is the ignore index.
|
In segmentation map annotation for Potsdam dataset, 0 is the ignore index.
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class STAREDataset(CustomDataset):
|
class STAREDataset(BaseSegDataset):
|
||||||
"""STARE dataset.
|
"""STARE dataset.
|
||||||
|
|
||||||
In segmentation map annotation for STARE, 0 stands for background, which is
|
In segmentation map annotation for STARE, 0 stands for background, which is
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
import os.path as osp
|
import os.path as osp
|
||||||
|
|
||||||
from mmseg.registry import DATASETS
|
from mmseg.registry import DATASETS
|
||||||
from .custom import CustomDataset
|
from .custom import BaseSegDataset
|
||||||
|
|
||||||
|
|
||||||
@DATASETS.register_module()
|
@DATASETS.register_module()
|
||||||
class PascalVOCDataset(CustomDataset):
|
class PascalVOCDataset(BaseSegDataset):
|
||||||
"""Pascal VOC dataset.
|
"""Pascal VOC dataset.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -6,10 +6,10 @@ from unittest.mock import MagicMock
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from mmseg.datasets import (ADE20KDataset, CityscapesDataset, COCOStuffDataset,
|
from mmseg.datasets import (ADE20KDataset, BaseSegDataset,
|
||||||
CustomDataset, ISPRSDataset, LoveDADataset,
|
CityscapesDataset, COCOStuffDataset, ISPRSDataset,
|
||||||
PascalVOCDataset, PotsdamDataset, iSAIDDataset)
|
LoveDADataset, PascalVOCDataset, PotsdamDataset,
|
||||||
from mmseg.registry import DATASETS
|
iSAIDDataset)
|
||||||
from mmseg.utils import get_classes, get_palette
|
from mmseg.utils import get_classes, get_palette
|
||||||
|
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ def test_palette():
|
|||||||
def test_custom_dataset():
|
def test_custom_dataset():
|
||||||
|
|
||||||
# with 'img_path' and 'seg_map_path' in data_prefix
|
# with 'img_path' and 'seg_map_path' in data_prefix
|
||||||
train_dataset = CustomDataset(
|
train_dataset = BaseSegDataset(
|
||||||
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
|
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
|
||||||
data_prefix=dict(
|
data_prefix=dict(
|
||||||
img_path='imgs/',
|
img_path='imgs/',
|
||||||
@ -98,7 +98,7 @@ def test_custom_dataset():
|
|||||||
assert len(train_dataset) == 5
|
assert len(train_dataset) == 5
|
||||||
|
|
||||||
# with 'img_path' and 'seg_map_path' in data_prefix and ann_file
|
# with 'img_path' and 'seg_map_path' in data_prefix and ann_file
|
||||||
train_dataset = CustomDataset(
|
train_dataset = BaseSegDataset(
|
||||||
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
|
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
|
||||||
data_prefix=dict(
|
data_prefix=dict(
|
||||||
img_path='imgs/',
|
img_path='imgs/',
|
||||||
@ -110,7 +110,7 @@ def test_custom_dataset():
|
|||||||
assert len(train_dataset) == 4
|
assert len(train_dataset) == 4
|
||||||
|
|
||||||
# no data_root
|
# no data_root
|
||||||
train_dataset = CustomDataset(
|
train_dataset = BaseSegDataset(
|
||||||
data_prefix=dict(
|
data_prefix=dict(
|
||||||
img_path=osp.join(
|
img_path=osp.join(
|
||||||
osp.dirname(__file__), '../data/pseudo_dataset/imgs'),
|
osp.dirname(__file__), '../data/pseudo_dataset/imgs'),
|
||||||
@ -122,7 +122,7 @@ def test_custom_dataset():
|
|||||||
|
|
||||||
# with data_root but 'img_path' and 'seg_map_path' in data_prefix are
|
# with data_root but 'img_path' and 'seg_map_path' in data_prefix are
|
||||||
# abs path
|
# abs path
|
||||||
train_dataset = CustomDataset(
|
train_dataset = BaseSegDataset(
|
||||||
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
|
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
|
||||||
data_prefix=dict(
|
data_prefix=dict(
|
||||||
img_path=osp.join(
|
img_path=osp.join(
|
||||||
@ -134,7 +134,7 @@ def test_custom_dataset():
|
|||||||
assert len(train_dataset) == 5
|
assert len(train_dataset) == 5
|
||||||
|
|
||||||
# test_mode=True
|
# test_mode=True
|
||||||
test_dataset = CustomDataset(
|
test_dataset = BaseSegDataset(
|
||||||
data_prefix=dict(
|
data_prefix=dict(
|
||||||
img_path=osp.join(
|
img_path=osp.join(
|
||||||
osp.dirname(__file__), '../data/pseudo_dataset/imgs')),
|
osp.dirname(__file__), '../data/pseudo_dataset/imgs')),
|
||||||
@ -245,7 +245,7 @@ def test_isaid():
|
|||||||
@pytest.mark.parametrize('dataset, classes', [
|
@pytest.mark.parametrize('dataset, classes', [
|
||||||
('ADE20KDataset', ('wall', 'building')),
|
('ADE20KDataset', ('wall', 'building')),
|
||||||
('CityscapesDataset', ('road', 'sidewalk')),
|
('CityscapesDataset', ('road', 'sidewalk')),
|
||||||
('CustomDataset', ('bus', 'car')),
|
('BaseSegDataset', ('bus', 'car')),
|
||||||
('PascalVOCDataset', ('aeroplane', 'bicycle')),
|
('PascalVOCDataset', ('aeroplane', 'bicycle')),
|
||||||
])
|
])
|
||||||
def test_custom_classes_override_default(dataset, classes):
|
def test_custom_classes_override_default(dataset, classes):
|
||||||
@ -269,7 +269,7 @@ def test_custom_classes_override_default(dataset, classes):
|
|||||||
|
|
||||||
assert custom_dataset.metainfo['classes'] != original_classes
|
assert custom_dataset.metainfo['classes'] != original_classes
|
||||||
assert custom_dataset.metainfo['classes'] == classes
|
assert custom_dataset.metainfo['classes'] == classes
|
||||||
if not isinstance(custom_dataset, CustomDataset):
|
if not isinstance(custom_dataset, BaseSegDataset):
|
||||||
assert isinstance(custom_dataset.label_map, dict)
|
assert isinstance(custom_dataset.label_map, dict)
|
||||||
|
|
||||||
# Test setting classes as a list
|
# Test setting classes as a list
|
||||||
@ -282,7 +282,7 @@ def test_custom_classes_override_default(dataset, classes):
|
|||||||
|
|
||||||
assert custom_dataset.metainfo['classes'] != original_classes
|
assert custom_dataset.metainfo['classes'] != original_classes
|
||||||
assert custom_dataset.metainfo['classes'] == list(classes)
|
assert custom_dataset.metainfo['classes'] == list(classes)
|
||||||
if not isinstance(custom_dataset, CustomDataset):
|
if not isinstance(custom_dataset, BaseSegDataset):
|
||||||
assert isinstance(custom_dataset.label_map, dict)
|
assert isinstance(custom_dataset.label_map, dict)
|
||||||
|
|
||||||
# Test overriding not a subset
|
# Test overriding not a subset
|
||||||
@ -295,11 +295,11 @@ def test_custom_classes_override_default(dataset, classes):
|
|||||||
|
|
||||||
assert custom_dataset.metainfo['classes'] != original_classes
|
assert custom_dataset.metainfo['classes'] != original_classes
|
||||||
assert custom_dataset.metainfo['classes'] == [classes[0]]
|
assert custom_dataset.metainfo['classes'] == [classes[0]]
|
||||||
if not isinstance(custom_dataset, CustomDataset):
|
if not isinstance(custom_dataset, BaseSegDataset):
|
||||||
assert isinstance(custom_dataset.label_map, dict)
|
assert isinstance(custom_dataset.label_map, dict)
|
||||||
|
|
||||||
# Test default behavior
|
# Test default behavior
|
||||||
if dataset_class is CustomDataset:
|
if dataset_class is BaseSegDataset:
|
||||||
with pytest.raises(AssertionError):
|
with pytest.raises(AssertionError):
|
||||||
custom_dataset = dataset_class(
|
custom_dataset = dataset_class(
|
||||||
ann_file=ann_file,
|
ann_file=ann_file,
|
||||||
@ -320,7 +320,7 @@ def test_custom_classes_override_default(dataset, classes):
|
|||||||
|
|
||||||
|
|
||||||
def test_custom_dataset_random_palette_is_generated():
|
def test_custom_dataset_random_palette_is_generated():
|
||||||
dataset = CustomDataset(
|
dataset = BaseSegDataset(
|
||||||
pipeline=[],
|
pipeline=[],
|
||||||
data_prefix=dict(img_path=MagicMock()),
|
data_prefix=dict(img_path=MagicMock()),
|
||||||
ann_file=MagicMock(),
|
ann_file=MagicMock(),
|
||||||
@ -334,7 +334,7 @@ def test_custom_dataset_random_palette_is_generated():
|
|||||||
|
|
||||||
|
|
||||||
def test_custom_dataset_custom_palette():
|
def test_custom_dataset_custom_palette():
|
||||||
dataset = CustomDataset(
|
dataset = BaseSegDataset(
|
||||||
data_prefix=dict(img_path=MagicMock()),
|
data_prefix=dict(img_path=MagicMock()),
|
||||||
ann_file=MagicMock(),
|
ann_file=MagicMock(),
|
||||||
metainfo=dict(
|
metainfo=dict(
|
||||||
@ -346,7 +346,7 @@ def test_custom_dataset_custom_palette():
|
|||||||
[200, 200, 200]])
|
[200, 200, 200]])
|
||||||
# test custom class and palette don't match
|
# test custom class and palette don't match
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
dataset = CustomDataset(
|
dataset = BaseSegDataset(
|
||||||
data_prefix=dict(img_path=MagicMock()),
|
data_prefix=dict(img_path=MagicMock()),
|
||||||
ann_file=MagicMock(),
|
ann_file=MagicMock(),
|
||||||
metainfo=dict(classes=('bus', 'car'), palette=[[200, 200, 200]]),
|
metainfo=dict(classes=('bus', 'car'), palette=[[200, 200, 200]]),
|
||||||
|
@ -37,7 +37,7 @@ def test_build_dataset():
|
|||||||
|
|
||||||
# test RepeatDataset
|
# test RepeatDataset
|
||||||
cfg = dict(
|
cfg = dict(
|
||||||
type='CustomDataset',
|
type='BaseSegDataset',
|
||||||
pipeline=[],
|
pipeline=[],
|
||||||
data_root=data_root,
|
data_root=data_root,
|
||||||
data_prefix=data_prefix,
|
data_prefix=data_prefix,
|
||||||
@ -51,13 +51,13 @@ def test_build_dataset():
|
|||||||
# We use same dir twice for simplicity
|
# We use same dir twice for simplicity
|
||||||
# with data_prefix.seg_map_path
|
# with data_prefix.seg_map_path
|
||||||
cfg1 = dict(
|
cfg1 = dict(
|
||||||
type='CustomDataset',
|
type='BaseSegDataset',
|
||||||
pipeline=[],
|
pipeline=[],
|
||||||
data_root=data_root,
|
data_root=data_root,
|
||||||
data_prefix=data_prefix,
|
data_prefix=data_prefix,
|
||||||
serialize_data=False)
|
serialize_data=False)
|
||||||
cfg2 = dict(
|
cfg2 = dict(
|
||||||
type='CustomDataset',
|
type='BaseSegDataset',
|
||||||
pipeline=[],
|
pipeline=[],
|
||||||
data_root=data_root,
|
data_root=data_root,
|
||||||
data_prefix=data_prefix,
|
data_prefix=data_prefix,
|
||||||
@ -81,14 +81,14 @@ def test_build_dataset():
|
|||||||
|
|
||||||
# with data_prefix.seg_map_path, ann_file
|
# with data_prefix.seg_map_path, ann_file
|
||||||
cfg1 = dict(
|
cfg1 = dict(
|
||||||
type='CustomDataset',
|
type='BaseSegDataset',
|
||||||
pipeline=[],
|
pipeline=[],
|
||||||
data_root=data_root,
|
data_root=data_root,
|
||||||
data_prefix=data_prefix,
|
data_prefix=data_prefix,
|
||||||
ann_file='splits/train.txt',
|
ann_file='splits/train.txt',
|
||||||
serialize_data=False)
|
serialize_data=False)
|
||||||
cfg2 = dict(
|
cfg2 = dict(
|
||||||
type='CustomDataset',
|
type='BaseSegDataset',
|
||||||
pipeline=[],
|
pipeline=[],
|
||||||
data_root=data_root,
|
data_root=data_root,
|
||||||
data_prefix=data_prefix,
|
data_prefix=data_prefix,
|
||||||
@ -103,7 +103,7 @@ def test_build_dataset():
|
|||||||
|
|
||||||
# test mode
|
# test mode
|
||||||
cfg1 = dict(
|
cfg1 = dict(
|
||||||
type='CustomDataset',
|
type='BaseSegDataset',
|
||||||
pipeline=[],
|
pipeline=[],
|
||||||
data_root=data_root,
|
data_root=data_root,
|
||||||
data_prefix=dict(img_path='imgs/'),
|
data_prefix=dict(img_path='imgs/'),
|
||||||
@ -111,7 +111,7 @@ def test_build_dataset():
|
|||||||
metainfo=dict(classes=('pseudo_class', )),
|
metainfo=dict(classes=('pseudo_class', )),
|
||||||
serialize_data=False)
|
serialize_data=False)
|
||||||
cfg2 = dict(
|
cfg2 = dict(
|
||||||
type='CustomDataset',
|
type='BaseSegDataset',
|
||||||
pipeline=[],
|
pipeline=[],
|
||||||
data_root=data_root,
|
data_root=data_root,
|
||||||
data_prefix=dict(img_path='imgs/'),
|
data_prefix=dict(img_path='imgs/'),
|
||||||
@ -127,7 +127,7 @@ def test_build_dataset():
|
|||||||
|
|
||||||
# test mode with ann_files
|
# test mode with ann_files
|
||||||
cfg1 = dict(
|
cfg1 = dict(
|
||||||
type='CustomDataset',
|
type='BaseSegDataset',
|
||||||
pipeline=[],
|
pipeline=[],
|
||||||
data_root=data_root,
|
data_root=data_root,
|
||||||
data_prefix=dict(img_path='imgs/'),
|
data_prefix=dict(img_path='imgs/'),
|
||||||
@ -136,7 +136,7 @@ def test_build_dataset():
|
|||||||
metainfo=dict(classes=('pseudo_class', )),
|
metainfo=dict(classes=('pseudo_class', )),
|
||||||
serialize_data=False)
|
serialize_data=False)
|
||||||
cfg2 = dict(
|
cfg2 = dict(
|
||||||
type='CustomDataset',
|
type='BaseSegDataset',
|
||||||
pipeline=[],
|
pipeline=[],
|
||||||
data_root=data_root,
|
data_root=data_root,
|
||||||
data_prefix=dict(img_path='imgs/'),
|
data_prefix=dict(img_path='imgs/'),
|
||||||
|
Loading…
x
Reference in New Issue
Block a user