[Feature] LIP dataset (#2187)

* [WIP] LIP dataset

* wip

* keep473

* lip dataset prepare

* add ut and test data
This commit is contained in:
Miao Zheng 2022-10-31 20:47:52 +08:00 committed by GitHub
parent a1f011dc0b
commit b21df463d4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 127 additions and 5 deletions

View File

@ -351,7 +351,8 @@ The dataset is a Large-scale Dataset for Instance Segmentation (also have segman
You may need to follow the following structure for dataset preparation after downloading iSAID dataset.
```
```none
├── data
│ ├── iSAID
│ │ ├── train
│ │ │ ├── images
@ -376,3 +377,40 @@ python tools/dataset_converters/isaid.py /path/to/iSAID
```
In our default setting (`patch_width`=896, `patch_height`=896, `overlap_area`=384), it will generate 33978 images for training and 11644 images for validation.
## LIP(Look Into Person) dataset
This dataset could be download from [this page](https://lip.sysuhcp.com/overview.php).
Please run the following commands to unzip dataset.
```shell
unzip LIP.zip
cd LIP
unzip TrainVal_images.zip
unzip TrainVal_parsing_annotations.zip
cd TrainVal_parsing_annotations
unzip TrainVal_parsing_annotations.zip
mv train_segmentations ../
mv val_segmentations ../
cd ..
```
The contents of LIP datasets include:
```none
├── data
│ ├── LIP
│ │ ├── train_images
│   │ │ ├── 1000_1234574.jpg
│   │ │ ├── ...
│ │ ├── train_segmentations
│   │ │ ├── 1000_1234574.png
│   │ │ ├── ...
│ │ ├── val_images
│   │ │ ├── 100034_483681.jpg
│   │ │ ├── ...
│ │ ├── val_segmentations
│   │ │ ├── 100034_483681.png
│   │ │ ├── ...
```

View File

@ -11,6 +11,7 @@ from .drive import DRIVEDataset
from .hrf import HRFDataset
from .isaid import iSAIDDataset
from .isprs import ISPRSDataset
from .lip import LIPDataset
from .loveda import LoveDADataset
from .night_driving import NightDrivingDataset
from .pascal_context import PascalContextDataset, PascalContextDataset59
@ -35,5 +36,5 @@ __all__ = [
'RandomCutOut', 'RandomMosaic', 'PackSegInputs', 'ResizeToMultiple',
'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile',
'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge',
'DecathlonDataset'
'DecathlonDataset', 'LIPDataset'
]

43
mmseg/datasets/lip.py Normal file
View File

@ -0,0 +1,43 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .basesegdataset import BaseSegDataset
@DATASETS.register_module()
class LIPDataset(BaseSegDataset):
"""LIP dataset.
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
'.png'.
"""
METAINFO = dict(
classes=('Background', 'Hat', 'Hair', 'Glove', 'Sunglasses',
'UpperClothes', 'Dress', 'Coat', 'Socks', 'Pants',
'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm',
'Right-arm', 'Left-leg', 'Right-leg', 'Left-shoe',
'Right-shoe'),
palette=(
[0, 0, 0],
[128, 0, 0],
[255, 0, 0],
[0, 85, 0],
[170, 0, 51],
[255, 85, 0],
[0, 0, 85],
[0, 119, 221],
[85, 85, 0],
[0, 85, 85],
[85, 51, 0],
[52, 86, 128],
[0, 128, 0],
[0, 0, 255],
[51, 170, 221],
[0, 255, 255],
[85, 255, 170],
[170, 255, 85],
[255, 255, 0],
[255, 170, 0],
))
def __init__(self, **kwargs) -> None:
super().__init__(img_suffix='.jpg', seg_map_suffix='.png', **kwargs)

View File

@ -265,6 +265,26 @@ def stare_palette():
return [[120, 120, 120], [6, 230, 230]]
def lip_classes():
"""LIP class names for external use."""
return [
'background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes',
'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt',
'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe',
'rightShoe'
]
def lip_palette():
"""LIP palette for external use."""
return [
'Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'UpperClothes',
'Dress', 'Coat', 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt',
'Face', 'Left-arm', 'Right-arm', 'Left-leg', 'Right-leg', 'Left-shoe',
'Right-shoe'
]
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
@ -278,7 +298,8 @@ dataset_aliases = {
'coco_stuff164k'
],
'isaid': ['isaid', 'iSAID'],
'stare': ['stare', 'STARE']
'stare': ['stare', 'STARE'],
'lip': ['LIP', 'lip']
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

View File

@ -8,8 +8,8 @@ import pytest
from mmseg.datasets import (ADE20KDataset, BaseSegDataset, CityscapesDataset,
COCOStuffDataset, DecathlonDataset, ISPRSDataset,
LoveDADataset, PascalVOCDataset, PotsdamDataset,
iSAIDDataset)
LIPDataset, LoveDADataset, PascalVOCDataset,
PotsdamDataset, iSAIDDataset)
from mmseg.registry import DATASETS
from mmseg.utils import get_classes, get_palette
@ -259,6 +259,25 @@ def test_decathlon():
assert len(test_dataset) == 3
def test_lip():
data_root = osp.join(osp.dirname(__file__), '../data/pseudo_lip_dataset')
# train load training dataset
train_dataset = LIPDataset(
pipeline=[],
data_root=data_root,
data_prefix=dict(
img_path='train_images', seg_map_path='train_segmentations'))
assert len(train_dataset) == 1
# test load training dataset
test_dataset = LIPDataset(
pipeline=[],
data_root=data_root,
data_prefix=dict(
img_path='val_images', seg_map_path='val_segmentations'))
assert len(test_dataset) == 1
@pytest.mark.parametrize('dataset, classes', [
('ADE20KDataset', ('wall', 'building')),
('CityscapesDataset', ('road', 'sidewalk')),