282 lines
8.5 KiB
Python
282 lines
8.5 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import warnings
|
|
from collections.abc import Sequence
|
|
|
|
import mmcv
|
|
import numpy as np
|
|
import torch
|
|
from mmcv.parallel import DataContainer as DC
|
|
from mmcv.transforms.base import BaseTransform
|
|
from PIL import Image
|
|
|
|
from mmcls.registry import TRANSFORMS
|
|
from mmcls.structures import ClsDataSample
|
|
|
|
|
|
def to_tensor(data):
|
|
"""Convert objects of various python types to :obj:`torch.Tensor`.
|
|
|
|
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
|
|
:class:`Sequence`, :class:`int` and :class:`float`.
|
|
"""
|
|
if isinstance(data, torch.Tensor):
|
|
return data
|
|
elif isinstance(data, np.ndarray):
|
|
return torch.from_numpy(data)
|
|
elif isinstance(data, Sequence) and not mmcv.is_str(data):
|
|
return torch.tensor(data)
|
|
elif isinstance(data, int):
|
|
return torch.LongTensor([data])
|
|
elif isinstance(data, float):
|
|
return torch.FloatTensor([data])
|
|
else:
|
|
raise TypeError(
|
|
f'Type {type(data)} cannot be converted to tensor.'
|
|
'Supported types are: `numpy.ndarray`, `torch.Tensor`, '
|
|
'`Sequence`, `int` and `float`')
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class PackClsInputs(BaseTransform):
|
|
"""Pack the inputs data for the classification.
|
|
|
|
The ``img_meta`` item is always populated. The contents of the
|
|
``img_meta`` dictionary depends on ``meta_keys``. By default this includes:
|
|
|
|
- ``sample_idx``: id of the image sample
|
|
|
|
- ``img_path``: path to the image file
|
|
|
|
- ``ori_shape``: original shape of the image as a tuple (H, W).
|
|
|
|
- ``img_shape``: shape of the image input to the network as a tuple
|
|
(H, W). Note that images may be zero padded on the bottom/right
|
|
if the batch tensor is larger than this shape.
|
|
|
|
- ``scale_factor``: a float indicating the preprocessing scale
|
|
|
|
- ``flip``: a boolean indicating if image flip transform was used
|
|
|
|
- ``flip_direction``: the flipping direction
|
|
|
|
Args:
|
|
meta_keys (Sequence[str], optional): The meta keys to saved in the
|
|
``metainfo`` of the packed ``data_sample``.
|
|
Default: ``('sample_idx', 'img_path', 'ori_shape', 'img_shape',
|
|
'scale_factor', 'flip', 'flip_direction')``
|
|
"""
|
|
|
|
def __init__(self,
|
|
meta_keys=('sample_idx', 'img_path', 'ori_shape', 'img_shape',
|
|
'scale_factor', 'flip', 'flip_direction')):
|
|
self.meta_keys = meta_keys
|
|
|
|
def transform(self, results: dict) -> dict:
|
|
"""Method to pack the input data.
|
|
|
|
Args:
|
|
results (dict): Result dict from the data pipeline.
|
|
|
|
Returns:
|
|
dict:
|
|
- 'inputs' (obj:`torch.Tensor`): The forward data of models.
|
|
- 'data_sample' (obj:`ClsDataSample`): The annotation info of the
|
|
sample.
|
|
"""
|
|
packed_results = dict()
|
|
if 'img' in results:
|
|
img = results['img']
|
|
if len(img.shape) < 3:
|
|
img = np.expand_dims(img, -1)
|
|
img = np.ascontiguousarray(img.transpose(2, 0, 1))
|
|
packed_results['inputs'] = to_tensor(img)
|
|
else:
|
|
warnings.warn(
|
|
'Cannot get "img" in the input dict of `PackClsInputs`,'
|
|
'please make sure `LoadImageFromFile` has been added '
|
|
'in the data pipeline or images have been loaded in '
|
|
'the dataset.')
|
|
|
|
data_sample = ClsDataSample()
|
|
if 'gt_label' in results:
|
|
gt_label = results['gt_label']
|
|
data_sample.set_gt_label(gt_label)
|
|
|
|
img_meta = {k: results[k] for k in self.meta_keys if k in results}
|
|
data_sample.set_metainfo(img_meta)
|
|
packed_results['data_sample'] = data_sample
|
|
|
|
return packed_results
|
|
|
|
def __repr__(self) -> str:
|
|
repr_str = self.__class__.__name__
|
|
repr_str += f'(meta_keys={self.meta_keys})'
|
|
return repr_str
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class ToTensor(object):
|
|
"""Convert objects of various python types to :obj:`torch.Tensor`."""
|
|
|
|
def __init__(self, keys):
|
|
self.keys = keys
|
|
|
|
def __call__(self, results):
|
|
for key in self.keys:
|
|
results[key] = to_tensor(results[key])
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + f'(keys={self.keys})'
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class ImageToTensor(object):
|
|
"""Convert objects :obj:`PIL.Image` to :obj:`torch.Tensor`."""
|
|
|
|
def __init__(self, keys):
|
|
self.keys = keys
|
|
|
|
def __call__(self, results):
|
|
for key in self.keys:
|
|
img = results[key]
|
|
if len(img.shape) < 3:
|
|
img = np.expand_dims(img, -1)
|
|
results[key] = to_tensor(img.transpose(2, 0, 1))
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + f'(keys={self.keys})'
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class Transpose(object):
|
|
"""matrix transpose."""
|
|
|
|
def __init__(self, keys, order):
|
|
self.keys = keys
|
|
self.order = order
|
|
|
|
def __call__(self, results):
|
|
for key in self.keys:
|
|
results[key] = results[key].transpose(self.order)
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + \
|
|
f'(keys={self.keys}, order={self.order})'
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class ToPIL(object):
|
|
"""Convert tensor to :obj:`PIL.Image`."""
|
|
|
|
def __init__(self):
|
|
pass
|
|
|
|
def __call__(self, results):
|
|
results['img'] = Image.fromarray(results['img'])
|
|
return results
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class ToNumpy(object):
|
|
"""Convert tensor to :obj:`np.ndarray`."""
|
|
|
|
def __init__(self):
|
|
pass
|
|
|
|
def __call__(self, results):
|
|
results['img'] = np.array(results['img'], dtype=np.float32)
|
|
return results
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class Collect(object):
|
|
"""Collect data from the loader relevant to the specific task.
|
|
|
|
This is usually the last stage of the data loader pipeline. Typically keys
|
|
is set to some subset of "img" and "gt_label".
|
|
|
|
Args:
|
|
keys (Sequence[str]): Keys of results to be collected in ``data``.
|
|
meta_keys (Sequence[str], optional): Meta keys to be converted to
|
|
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
|
|
Default: ('filename', 'ori_shape', 'img_shape', 'flip',
|
|
'flip_direction', 'img_norm_cfg')
|
|
|
|
Returns:
|
|
dict: The result dict contains the following keys
|
|
|
|
- keys in ``self.keys``
|
|
- ``img_metas`` if available
|
|
"""
|
|
|
|
def __init__(self,
|
|
keys,
|
|
meta_keys=('filename', 'ori_filename', 'ori_shape',
|
|
'img_shape', 'flip', 'flip_direction',
|
|
'img_norm_cfg')):
|
|
self.keys = keys
|
|
self.meta_keys = meta_keys
|
|
|
|
def __call__(self, results):
|
|
data = {}
|
|
img_meta = {}
|
|
for key in self.meta_keys:
|
|
if key in results:
|
|
img_meta[key] = results[key]
|
|
data['img_metas'] = DC(img_meta, cpu_only=True)
|
|
for key in self.keys:
|
|
data[key] = results[key]
|
|
return data
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + \
|
|
f'(keys={self.keys}, meta_keys={self.meta_keys})'
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class WrapFieldsToLists(object):
|
|
"""Wrap fields of the data dictionary into lists for evaluation.
|
|
|
|
This class can be used as a last step of a test or validation
|
|
pipeline for single image evaluation or inference.
|
|
|
|
Example:
|
|
>>> test_pipeline = [
|
|
>>> dict(type='LoadImageFromFile'),
|
|
>>> dict(type='Normalize',
|
|
mean=[123.675, 116.28, 103.53],
|
|
std=[58.395, 57.12, 57.375],
|
|
to_rgb=True),
|
|
>>> dict(type='ImageToTensor', keys=['img']),
|
|
>>> dict(type='Collect', keys=['img']),
|
|
>>> dict(type='WrapIntoLists')
|
|
>>> ]
|
|
"""
|
|
|
|
def __call__(self, results):
|
|
# Wrap dict fields into lists
|
|
for key, val in results.items():
|
|
results[key] = [val]
|
|
return results
|
|
|
|
def __repr__(self):
|
|
return f'{self.__class__.__name__}()'
|
|
|
|
|
|
@TRANSFORMS.register_module()
|
|
class ToHalf(object):
|
|
|
|
def __init__(self, keys):
|
|
self.keys = keys
|
|
|
|
def __call__(self, results):
|
|
for k in self.keys:
|
|
if isinstance(results[k], torch.Tensor):
|
|
results[k] = results[k].to(torch.half)
|
|
else:
|
|
results[k] = results[k].astype(np.float16)
|
|
return results
|