290 lines
9.1 KiB
Python
290 lines
9.1 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
from collections.abc import Sequence
|
|
|
|
import mmcv
|
|
import numpy as np
|
|
import torch
|
|
from mmcv.parallel import DataContainer as DC
|
|
|
|
from ..builder import PIPELINES
|
|
|
|
|
|
def to_tensor(data):
|
|
"""Convert objects of various python types to :obj:`torch.Tensor`.
|
|
|
|
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
|
|
:class:`Sequence`, :class:`int` and :class:`float`.
|
|
|
|
Args:
|
|
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
|
|
be converted.
|
|
"""
|
|
|
|
if isinstance(data, torch.Tensor):
|
|
return data
|
|
elif isinstance(data, np.ndarray):
|
|
return torch.from_numpy(data)
|
|
elif isinstance(data, Sequence) and not mmcv.is_str(data):
|
|
return torch.tensor(data)
|
|
elif isinstance(data, int):
|
|
return torch.LongTensor([data])
|
|
elif isinstance(data, float):
|
|
return torch.FloatTensor([data])
|
|
else:
|
|
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
|
|
|
|
|
|
@PIPELINES.register_module()
|
|
class ToTensor(object):
|
|
"""Convert some results to :obj:`torch.Tensor` by given keys.
|
|
|
|
Args:
|
|
keys (Sequence[str]): Keys that need to be converted to Tensor.
|
|
"""
|
|
|
|
def __init__(self, keys):
|
|
self.keys = keys
|
|
|
|
def __call__(self, results):
|
|
"""Call function to convert data in results to :obj:`torch.Tensor`.
|
|
|
|
Args:
|
|
results (dict): Result dict contains the data to convert.
|
|
|
|
Returns:
|
|
dict: The result dict contains the data converted
|
|
to :obj:`torch.Tensor`.
|
|
"""
|
|
|
|
for key in self.keys:
|
|
results[key] = to_tensor(results[key])
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + f'(keys={self.keys})'
|
|
|
|
|
|
@PIPELINES.register_module()
|
|
class ImageToTensor(object):
|
|
"""Convert image to :obj:`torch.Tensor` by given keys.
|
|
|
|
The dimension order of input image is (H, W, C). The pipeline will convert
|
|
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
|
|
(1, H, W).
|
|
|
|
Args:
|
|
keys (Sequence[str]): Key of images to be converted to Tensor.
|
|
"""
|
|
|
|
def __init__(self, keys):
|
|
self.keys = keys
|
|
|
|
def __call__(self, results):
|
|
"""Call function to convert image in results to :obj:`torch.Tensor` and
|
|
transpose the channel order.
|
|
|
|
Args:
|
|
results (dict): Result dict contains the image data to convert.
|
|
|
|
Returns:
|
|
dict: The result dict contains the image converted
|
|
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
|
|
"""
|
|
|
|
for key in self.keys:
|
|
img = results[key]
|
|
if len(img.shape) < 3:
|
|
img = np.expand_dims(img, -1)
|
|
results[key] = to_tensor(img.transpose(2, 0, 1))
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + f'(keys={self.keys})'
|
|
|
|
|
|
@PIPELINES.register_module()
|
|
class Transpose(object):
|
|
"""Transpose some results by given keys.
|
|
|
|
Args:
|
|
keys (Sequence[str]): Keys of results to be transposed.
|
|
order (Sequence[int]): Order of transpose.
|
|
"""
|
|
|
|
def __init__(self, keys, order):
|
|
self.keys = keys
|
|
self.order = order
|
|
|
|
def __call__(self, results):
|
|
"""Call function to convert image in results to :obj:`torch.Tensor` and
|
|
transpose the channel order.
|
|
|
|
Args:
|
|
results (dict): Result dict contains the image data to convert.
|
|
|
|
Returns:
|
|
dict: The result dict contains the image converted
|
|
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
|
|
"""
|
|
|
|
for key in self.keys:
|
|
results[key] = results[key].transpose(self.order)
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + \
|
|
f'(keys={self.keys}, order={self.order})'
|
|
|
|
|
|
@PIPELINES.register_module()
|
|
class ToDataContainer(object):
|
|
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
|
|
|
|
Args:
|
|
fields (Sequence[dict]): Each field is a dict like
|
|
``dict(key='xxx', **kwargs)``. The ``key`` in result will
|
|
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
|
|
Default: ``(dict(key='img', stack=True),
|
|
dict(key='gt_semantic_seg'))``.
|
|
"""
|
|
|
|
def __init__(self,
|
|
fields=(dict(key='img',
|
|
stack=True), dict(key='gt_semantic_seg'))):
|
|
self.fields = fields
|
|
|
|
def __call__(self, results):
|
|
"""Call function to convert data in results to
|
|
:obj:`mmcv.DataContainer`.
|
|
|
|
Args:
|
|
results (dict): Result dict contains the data to convert.
|
|
|
|
Returns:
|
|
dict: The result dict contains the data converted to
|
|
:obj:`mmcv.DataContainer`.
|
|
"""
|
|
|
|
for field in self.fields:
|
|
field = field.copy()
|
|
key = field.pop('key')
|
|
results[key] = DC(results[key], **field)
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + f'(fields={self.fields})'
|
|
|
|
|
|
@PIPELINES.register_module()
|
|
class DefaultFormatBundle(object):
|
|
"""Default formatting bundle.
|
|
|
|
It simplifies the pipeline of formatting common fields, including "img"
|
|
and "gt_semantic_seg". These fields are formatted as follows.
|
|
|
|
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
|
|
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
|
|
(3)to DataContainer (stack=True)
|
|
"""
|
|
|
|
def __call__(self, results):
|
|
"""Call function to transform and format common fields in results.
|
|
|
|
Args:
|
|
results (dict): Result dict contains the data to convert.
|
|
|
|
Returns:
|
|
dict: The result dict contains the data that is formatted with
|
|
default bundle.
|
|
"""
|
|
|
|
if 'img' in results:
|
|
img = results['img']
|
|
if len(img.shape) < 3:
|
|
img = np.expand_dims(img, -1)
|
|
img = np.ascontiguousarray(img.transpose(2, 0, 1))
|
|
results['img'] = DC(to_tensor(img), stack=True)
|
|
if 'gt_semantic_seg' in results:
|
|
# convert to long
|
|
results['gt_semantic_seg'] = DC(
|
|
to_tensor(results['gt_semantic_seg'][None,
|
|
...].astype(np.int64)),
|
|
stack=True)
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__
|
|
|
|
|
|
@PIPELINES.register_module()
|
|
class Collect(object):
|
|
"""Collect data from the loader relevant to the specific task.
|
|
|
|
This is usually the last stage of the data loader pipeline. Typically keys
|
|
is set to some subset of "img", "gt_semantic_seg".
|
|
|
|
The "img_meta" item is always populated. The contents of the "img_meta"
|
|
dictionary depends on "meta_keys". By default this includes:
|
|
|
|
- "img_shape": shape of the image input to the network as a tuple
|
|
(h, w, c). Note that images may be zero padded on the bottom/right
|
|
if the batch tensor is larger than this shape.
|
|
|
|
- "scale_factor": a float indicating the preprocessing scale
|
|
|
|
- "flip": a boolean indicating if image flip transform was used
|
|
|
|
- "filename": path to the image file
|
|
|
|
- "ori_shape": original shape of the image as a tuple (h, w, c)
|
|
|
|
- "pad_shape": image shape after padding
|
|
|
|
- "img_norm_cfg": a dict of normalization information:
|
|
- mean - per channel mean subtraction
|
|
- std - per channel std divisor
|
|
- to_rgb - bool indicating if bgr was converted to rgb
|
|
|
|
Args:
|
|
keys (Sequence[str]): Keys of results to be collected in ``data``.
|
|
meta_keys (Sequence[str], optional): Meta keys to be converted to
|
|
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
|
|
Default: (``filename``, ``ori_filename``, ``ori_shape``,
|
|
``img_shape``, ``pad_shape``, ``scale_factor``, ``flip``,
|
|
``flip_direction``, ``img_norm_cfg``)
|
|
"""
|
|
|
|
def __init__(self,
|
|
keys,
|
|
meta_keys=('filename', 'ori_filename', 'ori_shape',
|
|
'img_shape', 'pad_shape', 'scale_factor', 'flip',
|
|
'flip_direction', 'img_norm_cfg')):
|
|
self.keys = keys
|
|
self.meta_keys = meta_keys
|
|
|
|
def __call__(self, results):
|
|
"""Call function to collect keys in results. The keys in ``meta_keys``
|
|
will be converted to :obj:mmcv.DataContainer.
|
|
|
|
Args:
|
|
results (dict): Result dict contains the data to collect.
|
|
|
|
Returns:
|
|
dict: The result dict contains the following keys
|
|
- keys in``self.keys``
|
|
- ``img_metas``
|
|
"""
|
|
|
|
data = {}
|
|
img_meta = {}
|
|
for key in self.meta_keys:
|
|
img_meta[key] = results[key]
|
|
data['img_metas'] = DC(img_meta, cpu_only=True)
|
|
for key in self.keys:
|
|
data[key] = results[key]
|
|
return data
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + \
|
|
f'(keys={self.keys}, meta_keys={self.meta_keys})'
|