mirror of
https://github.com/open-mmlab/mmsegmentation.git
synced 2025-06-03 22:03:48 +08:00
* update LoveDA dataset api * revised lint errors in dataset_prepare.md * revised lint errors in loveda.py * revised lint errors in loveda.py * revised lint errors in dataset_prepare.md * revised lint errors in dataset_prepare.md * checked with isort and yapf * checked with isort and yapf * checked with isort and yapf * Revert "checked with isort and yapf" This reverts commit 686a51d9 * Revert "checked with isort and yapf" This reverts commit b877e121bb2935ceefc503c09675019489829feb. * Revert "revised lint errors in dataset_prepare.md" This reverts commit 2289e27c * Revert "checked with isort and yapf" This reverts commit 159db2f8 * Revert "checked with isort and yapf" This reverts commit 159db2f8 * add configs & fix bugs * update new branch * upload models&logs and add format-only * change pretraied model path of HRNet * fix the errors in dataset_prepare.md * fix the errors in dataset_prepare.md and configs in loveda.py * change the description in docs_zh-CN/dataset_prepare.md * use init_cfg * fix test converage * adding pseudo loveda dataset * adding pseudo loveda dataset * adding pseudo loveda dataset * adding pseudo loveda dataset * adding pseudo loveda dataset * adding pseudo loveda dataset * Update docs/dataset_prepare.md Co-authored-by: Junjun2016 <hejunjun@sjtu.edu.cn> * Update docs_zh-CN/dataset_prepare.md Co-authored-by: Junjun2016 <hejunjun@sjtu.edu.cn> * Update docs_zh-CN/dataset_prepare.md Co-authored-by: Junjun2016 <hejunjun@sjtu.edu.cn> * Delete unused lines of unittest and Add docs * add convert .py file * add downloading links from zenodo * move place of LoveDA and Cityscapes in doc * move place of LoveDA and Cityscapes in doc Co-authored-by: MengzhangLI <mcmong@pku.edu.cn> Co-authored-by: Junjun2016 <hejunjun@sjtu.edu.cn>
55 lines
1.7 KiB
Python
55 lines
1.7 KiB
Python
# dataset settings
|
|
dataset_type = 'LoveDADataset'
|
|
data_root = 'data/loveDA'
|
|
img_norm_cfg = dict(
|
|
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
|
crop_size = (512, 512)
|
|
train_pipeline = [
|
|
dict(type='LoadImageFromFile'),
|
|
dict(type='LoadAnnotations', reduce_zero_label=True),
|
|
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
|
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
|
dict(type='RandomFlip', prob=0.5),
|
|
dict(type='PhotoMetricDistortion'),
|
|
dict(type='Normalize', **img_norm_cfg),
|
|
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
|
dict(type='DefaultFormatBundle'),
|
|
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
|
]
|
|
test_pipeline = [
|
|
dict(type='LoadImageFromFile'),
|
|
dict(
|
|
type='MultiScaleFlipAug',
|
|
img_scale=(1024, 1024),
|
|
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
|
flip=False,
|
|
transforms=[
|
|
dict(type='Resize', keep_ratio=True),
|
|
dict(type='RandomFlip'),
|
|
dict(type='Normalize', **img_norm_cfg),
|
|
dict(type='ImageToTensor', keys=['img']),
|
|
dict(type='Collect', keys=['img']),
|
|
])
|
|
]
|
|
data = dict(
|
|
samples_per_gpu=4,
|
|
workers_per_gpu=4,
|
|
train=dict(
|
|
type=dataset_type,
|
|
data_root=data_root,
|
|
img_dir='img_dir/train',
|
|
ann_dir='ann_dir/train',
|
|
pipeline=train_pipeline),
|
|
val=dict(
|
|
type=dataset_type,
|
|
data_root=data_root,
|
|
img_dir='img_dir/val',
|
|
ann_dir='ann_dir/val',
|
|
pipeline=test_pipeline),
|
|
test=dict(
|
|
type=dataset_type,
|
|
data_root=data_root,
|
|
img_dir='img_dir/val',
|
|
ann_dir='ann_dir/val',
|
|
pipeline=test_pipeline))
|