From fcb948295c8a402db7d713eddca56d0f53d1aa0f Mon Sep 17 00:00:00 2001 From: MeowZheng Date: Fri, 27 May 2022 21:52:49 +0800 Subject: [PATCH] [Enhancement] Revise RandomCrop --- configs/_base_/datasets/ade20k.py | 15 +-------------- configs/_base_/datasets/ade20k_640x640.py | 15 +-------------- configs/_base_/datasets/chase_db1.py | 15 +-------------- configs/_base_/datasets/cityscapes.py | 15 +-------------- configs/_base_/datasets/cityscapes_1024x1024.py | 15 +-------------- configs/_base_/datasets/cityscapes_768x768.py | 15 +-------------- configs/_base_/datasets/cityscapes_769x769.py | 15 +-------------- configs/_base_/datasets/cityscapes_832x832.py | 15 +-------------- configs/_base_/datasets/coco-stuff10k.py | 15 +-------------- configs/_base_/datasets/coco-stuff164k.py | 15 +-------------- configs/_base_/datasets/drive.py | 15 +-------------- configs/_base_/datasets/hrf.py | 15 +-------------- configs/_base_/datasets/isaid.py | 15 +-------------- configs/_base_/datasets/loveda.py | 15 +-------------- configs/_base_/datasets/pascal_context.py | 15 +-------------- configs/_base_/datasets/pascal_context_59.py | 15 +-------------- configs/_base_/datasets/pascal_voc12.py | 15 +-------------- configs/_base_/datasets/potsdam.py | 15 +-------------- configs/_base_/datasets/stare.py | 15 +-------------- configs/_base_/datasets/vaihingen.py | 15 +-------------- mmseg/datasets/pipelines/transforms.py | 6 +++++- .../test_pipelines/test_transforms.py | 11 ++--------- 22 files changed, 27 insertions(+), 290 deletions(-) diff --git a/configs/_base_/datasets/ade20k.py b/configs/_base_/datasets/ade20k.py index c7742285f..efc8b4bb2 100644 --- a/configs/_base_/datasets/ade20k.py +++ b/configs/_base_/datasets/ade20k.py @@ -8,20 +8,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/ade20k_640x640.py b/configs/_base_/datasets/ade20k_640x640.py index 3907f6fec..14a4bb092 100644 --- a/configs/_base_/datasets/ade20k_640x640.py +++ b/configs/_base_/datasets/ade20k_640x640.py @@ -8,20 +8,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(2560, 640), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/chase_db1.py b/configs/_base_/datasets/chase_db1.py index f8eb4fdb9..298594ea9 100644 --- a/configs/_base_/datasets/chase_db1.py +++ b/configs/_base_/datasets/chase_db1.py @@ -9,20 +9,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/cityscapes.py b/configs/_base_/datasets/cityscapes.py index 4a645e1bf..f21867c63 100644 --- a/configs/_base_/datasets/cityscapes.py +++ b/configs/_base_/datasets/cityscapes.py @@ -8,20 +8,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/cityscapes_1024x1024.py b/configs/_base_/datasets/cityscapes_1024x1024.py index 57d09289c..f98d92972 100644 --- a/configs/_base_/datasets/cityscapes_1024x1024.py +++ b/configs/_base_/datasets/cityscapes_1024x1024.py @@ -6,20 +6,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/cityscapes_768x768.py b/configs/_base_/datasets/cityscapes_768x768.py index 8735ef557..fde9d7c7d 100644 --- a/configs/_base_/datasets/cityscapes_768x768.py +++ b/configs/_base_/datasets/cityscapes_768x768.py @@ -6,20 +6,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/cityscapes_769x769.py b/configs/_base_/datasets/cityscapes_769x769.py index d04ac0a8d..336c7b254 100644 --- a/configs/_base_/datasets/cityscapes_769x769.py +++ b/configs/_base_/datasets/cityscapes_769x769.py @@ -6,20 +6,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/cityscapes_832x832.py b/configs/_base_/datasets/cityscapes_832x832.py index d65c16f5f..b9325cc00 100644 --- a/configs/_base_/datasets/cityscapes_832x832.py +++ b/configs/_base_/datasets/cityscapes_832x832.py @@ -6,20 +6,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/coco-stuff10k.py b/configs/_base_/datasets/coco-stuff10k.py index ceec06dc8..ec0496928 100644 --- a/configs/_base_/datasets/coco-stuff10k.py +++ b/configs/_base_/datasets/coco-stuff10k.py @@ -8,20 +8,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/coco-stuff164k.py b/configs/_base_/datasets/coco-stuff164k.py index 29a33894b..a6a38f2ac 100644 --- a/configs/_base_/datasets/coco-stuff164k.py +++ b/configs/_base_/datasets/coco-stuff164k.py @@ -8,20 +8,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/drive.py b/configs/_base_/datasets/drive.py index 6b00bc75b..06e8ff606 100644 --- a/configs/_base_/datasets/drive.py +++ b/configs/_base_/datasets/drive.py @@ -9,20 +9,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/hrf.py b/configs/_base_/datasets/hrf.py index 2c1ad741e..242d790eb 100644 --- a/configs/_base_/datasets/hrf.py +++ b/configs/_base_/datasets/hrf.py @@ -9,20 +9,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/isaid.py b/configs/_base_/datasets/isaid.py index 29e731fb2..8e4c26abb 100644 --- a/configs/_base_/datasets/isaid.py +++ b/configs/_base_/datasets/isaid.py @@ -16,20 +16,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(896, 896), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/loveda.py b/configs/_base_/datasets/loveda.py index bcdc4f158..e55335695 100644 --- a/configs/_base_/datasets/loveda.py +++ b/configs/_base_/datasets/loveda.py @@ -8,20 +8,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/pascal_context.py b/configs/_base_/datasets/pascal_context.py index 0f803a6b8..ff65bad1b 100644 --- a/configs/_base_/datasets/pascal_context.py +++ b/configs/_base_/datasets/pascal_context.py @@ -11,20 +11,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/pascal_context_59.py b/configs/_base_/datasets/pascal_context_59.py index 4e1865f09..37585abab 100644 --- a/configs/_base_/datasets/pascal_context_59.py +++ b/configs/_base_/datasets/pascal_context_59.py @@ -11,20 +11,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/pascal_voc12.py b/configs/_base_/datasets/pascal_voc12.py index aa864390c..ba1d42d0c 100644 --- a/configs/_base_/datasets/pascal_voc12.py +++ b/configs/_base_/datasets/pascal_voc12.py @@ -8,20 +8,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/potsdam.py b/configs/_base_/datasets/potsdam.py index 0690578cb..f74c4a56c 100644 --- a/configs/_base_/datasets/potsdam.py +++ b/configs/_base_/datasets/potsdam.py @@ -8,20 +8,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(512, 512), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/stare.py b/configs/_base_/datasets/stare.py index afba5127c..3f71b2548 100644 --- a/configs/_base_/datasets/stare.py +++ b/configs/_base_/datasets/stare.py @@ -9,20 +9,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/configs/_base_/datasets/vaihingen.py b/configs/_base_/datasets/vaihingen.py index 03987c001..c0df282c4 100644 --- a/configs/_base_/datasets/vaihingen.py +++ b/configs/_base_/datasets/vaihingen.py @@ -8,20 +8,7 @@ train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(512, 512), ratio_range=(0.5, 2.0)), - dict( - type='TransformBroadcaster', - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True, - transforms=[ - dict( - type='mmseg.RandomCrop', - crop_size=crop_size, - cat_max_ratio=0.75), - ]), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), diff --git a/mmseg/datasets/pipelines/transforms.py b/mmseg/datasets/pipelines/transforms.py index 3d3c7e2ae..52c61953b 100644 --- a/mmseg/datasets/pipelines/transforms.py +++ b/mmseg/datasets/pipelines/transforms.py @@ -237,7 +237,7 @@ class RandomCrop(BaseTransform): if self.cat_max_ratio < 1.: # Repeat 10 times for _ in range(10): - seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox) + seg_temp = self.crop(results['gt_seg_map'], crop_bbox) labels, cnt = np.unique(seg_temp, return_counts=True) cnt = cnt[labels != self.ignore_index] if len(cnt) > 1 and np.max(cnt) / np.sum( @@ -279,6 +279,10 @@ class RandomCrop(BaseTransform): # crop the image img = self.crop(img, crop_bbox) + + # crop semantic seg + for key in results.get('seg_fields', []): + results[key] = self.crop(results[key], crop_bbox) img_shape = img.shape results['img'] = img results['img_shape'] = img_shape diff --git a/tests/test_datasets/test_pipelines/test_transforms.py b/tests/test_datasets/test_pipelines/test_transforms.py index 03da1266f..0321b0169 100644 --- a/tests/test_datasets/test_pipelines/test_transforms.py +++ b/tests/test_datasets/test_pipelines/test_transforms.py @@ -4,7 +4,6 @@ import os.path as osp import mmcv import numpy as np import pytest -from mmcv.transforms.wrappers import TransformBroadcaster from PIL import Image from mmseg.datasets.pipelines import PhotoMetricDistortion, RandomCrop @@ -28,14 +27,8 @@ def test_random_crop(): results['scale_factor'] = 1.0 h, w, _ = img.shape - pipeline = TransformBroadcaster( - transforms=[RandomCrop(crop_size=(h - 20, w - 20))], - mapping={ - 'img': ['img', 'gt_semantic_seg'], - 'img_shape': [..., 'img_shape'] - }, - auto_remap=True, - share_random_params=True) + pipeline = RandomCrop(crop_size=(h - 20, w - 20)) + results = pipeline(results) assert results['img'].shape[:2] == (h - 20, w - 20) assert results['img_shape'][:2] == (h - 20, w - 20)