201 lines
6.4 KiB
Python
201 lines
6.4 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import numpy as np
|
|
from mmcv.parallel import DataContainer as DC
|
|
from mmcv.transforms import to_tensor
|
|
from mmcv.transforms.base import BaseTransform
|
|
from mmengine.data import PixelData
|
|
|
|
from mmseg.registry import TRANSFORMS
|
|
from mmseg.structures import SegDataSample
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class PackSegInputs(BaseTransform):
|
|
"""Pack the inputs data for the semantic segmentation.
|
|
|
|
The ``img_meta`` item is always populated. The contents of the
|
|
``img_meta`` dictionary depends on ``meta_keys``. By default this includes:
|
|
|
|
- ``img_path``: filename of the image
|
|
|
|
- ``ori_shape``: original shape of the image as a tuple (h, w, c)
|
|
|
|
- ``img_shape``: shape of the image input to the network as a tuple \
|
|
(h, w, c). Note that images may be zero padded on the \
|
|
bottom/right if the batch tensor is larger than this shape.
|
|
|
|
- ``pad_shape``: shape of padded images
|
|
|
|
- ``scale_factor``: a float indicating the preprocessing scale
|
|
|
|
- ``flip``: a boolean indicating if image flip transform was used
|
|
|
|
- ``flip_direction``: the flipping direction
|
|
|
|
Args:
|
|
meta_keys (Sequence[str], optional): Meta keys to be packed from
|
|
``SegDataSample`` and collected in ``data[img_metas]``.
|
|
Default: ``('img_path', 'ori_shape',
|
|
'img_shape', 'pad_shape', 'scale_factor', 'flip',
|
|
'flip_direction')``
|
|
"""
|
|
|
|
def __init__(self,
|
|
meta_keys=('img_path', 'seg_map_path', 'ori_shape',
|
|
'img_shape', 'pad_shape', 'scale_factor', 'flip',
|
|
'flip_direction')):
|
|
self.meta_keys = meta_keys
|
|
|
|
def transform(self, results: dict) -> dict:
|
|
"""Method to pack the input data.
|
|
|
|
Args:
|
|
results (dict): Result dict from the data pipeline.
|
|
|
|
Returns:
|
|
dict:
|
|
|
|
- 'inputs' (obj:`torch.Tensor`): The forward data of models.
|
|
- 'data_sample' (obj:`SegDataSample`): The annotation info of the
|
|
sample.
|
|
"""
|
|
packed_results = dict()
|
|
if 'img' in results:
|
|
img = results['img']
|
|
if len(img.shape) < 3:
|
|
img = np.expand_dims(img, -1)
|
|
img = np.ascontiguousarray(img.transpose(2, 0, 1))
|
|
packed_results['inputs'] = to_tensor(img)
|
|
|
|
data_sample = SegDataSample()
|
|
if 'gt_seg_map' in results:
|
|
gt_sem_seg_data = dict(
|
|
data=to_tensor(results['gt_seg_map'][None,
|
|
...].astype(np.int64)))
|
|
data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data)
|
|
|
|
img_meta = {}
|
|
for key in self.meta_keys:
|
|
if key in results:
|
|
img_meta[key] = results[key]
|
|
data_sample.set_metainfo(img_meta)
|
|
packed_results['data_sample'] = data_sample
|
|
|
|
return packed_results
|
|
|
|
def __repr__(self) -> str:
|
|
repr_str = self.__class__.__name__
|
|
repr_str += f'(meta_keys={self.meta_keys})'
|
|
return repr_str
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class ImageToTensor(object):
|
|
"""Convert image to :obj:`torch.Tensor` by given keys.
|
|
|
|
The dimension order of input image is (H, W, C). The pipeline will convert
|
|
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
|
|
(1, H, W).
|
|
|
|
Args:
|
|
keys (Sequence[str]): Key of images to be converted to Tensor.
|
|
"""
|
|
|
|
def __init__(self, keys):
|
|
self.keys = keys
|
|
|
|
def __call__(self, results):
|
|
"""Call function to convert image in results to :obj:`torch.Tensor` and
|
|
transpose the channel order.
|
|
|
|
Args:
|
|
results (dict): Result dict contains the image data to convert.
|
|
|
|
Returns:
|
|
dict: The result dict contains the image converted
|
|
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
|
|
"""
|
|
|
|
for key in self.keys:
|
|
img = results[key]
|
|
if len(img.shape) < 3:
|
|
img = np.expand_dims(img, -1)
|
|
results[key] = to_tensor(img.transpose(2, 0, 1))
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + f'(keys={self.keys})'
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class Transpose(object):
|
|
"""Transpose some results by given keys.
|
|
|
|
Args:
|
|
keys (Sequence[str]): Keys of results to be transposed.
|
|
order (Sequence[int]): Order of transpose.
|
|
"""
|
|
|
|
def __init__(self, keys, order):
|
|
self.keys = keys
|
|
self.order = order
|
|
|
|
def __call__(self, results):
|
|
"""Call function to convert image in results to :obj:`torch.Tensor` and
|
|
transpose the channel order.
|
|
|
|
Args:
|
|
results (dict): Result dict contains the image data to convert.
|
|
|
|
Returns:
|
|
dict: The result dict contains the image converted
|
|
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
|
|
"""
|
|
|
|
for key in self.keys:
|
|
results[key] = results[key].transpose(self.order)
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + \
|
|
f'(keys={self.keys}, order={self.order})'
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class ToDataContainer(object):
|
|
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
|
|
|
|
Args:
|
|
fields (Sequence[dict]): Each field is a dict like
|
|
``dict(key='xxx', **kwargs)``. The ``key`` in result will
|
|
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
|
|
Default: ``(dict(key='img', stack=True),
|
|
dict(key='gt_semantic_seg'))``.
|
|
"""
|
|
|
|
def __init__(self,
|
|
fields=(dict(key='img',
|
|
stack=True), dict(key='gt_semantic_seg'))):
|
|
self.fields = fields
|
|
|
|
def __call__(self, results):
|
|
"""Call function to convert data in results to
|
|
:obj:`mmcv.DataContainer`.
|
|
|
|
Args:
|
|
results (dict): Result dict contains the data to convert.
|
|
|
|
Returns:
|
|
dict: The result dict contains the data converted to
|
|
:obj:`mmcv.DataContainer`.
|
|
"""
|
|
|
|
for field in self.fields:
|
|
field = field.copy()
|
|
key = field.pop('key')
|
|
results[key] = DC(results[key], **field)
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + f'(fields={self.fields})'
|