mmsegmentation/mmseg/models/segmentors/encoder_decoder.py

284 lines
11 KiB
Python

# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmseg.core import add_prefix
from mmseg.ops import resize
from mmseg.registry import MODELS
from .base import BaseSegmentor
@MODELS.register_module()
class EncoderDecoder(BaseSegmentor):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
preprocess_cfg=None,
pretrained=None,
init_cfg=None):
super(EncoderDecoder, self).__init__(
preprocess_cfg=preprocess_cfg, init_cfg=init_cfg)
if pretrained is not None:
assert backbone.get('pretrained') is None, \
'both backbone and segmentor set pretrained weight'
backbone.pretrained = pretrained
self.backbone = MODELS.build(backbone)
if neck is not None:
self.neck = MODELS.build(neck)
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
assert self.with_decode_head
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = MODELS.build(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(MODELS.build(head_cfg))
else:
self.auxiliary_head = MODELS.build(auxiliary_head)
def extract_feat(self, batch_inputs):
"""Extract features from images."""
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, batch_inputs, batch_img_metas):
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
x = self.extract_feat(batch_inputs)
out = self._decode_head_forward_test(x, batch_img_metas)
out = resize(
input=out,
size=batch_inputs.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out
def _decode_head_forward_train(self, batch_inputs, batch_data_samples):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head.forward_train(batch_inputs,
batch_data_samples,
self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, batch_inputs, batch_img_metas):
"""Run forward function and calculate loss for decode head in
inference."""
seg_logits = self.decode_head.forward_test(batch_inputs,
batch_img_metas,
self.test_cfg)
return seg_logits
def _auxiliary_head_forward_train(self, batch_inputs, batch_data_samples):
"""Run forward function and calculate loss for auxiliary head in
training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(batch_inputs,
batch_data_samples,
self.train_cfg)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
batch_inputs, batch_data_samples, self.train_cfg)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def forward_dummy(self, batch_inputs, batch_img_metas):
"""Dummy forward function."""
seg_logit = self.encode_decode(batch_inputs, batch_img_metas)
return seg_logit
def forward_train(self, batch_inputs, batch_data_samples):
"""Forward function for training.
Args:
img (Tensor): Input images.
batch_data_samples (list[:obj:`SegDataSample`]): The seg
data samples. It usually includes information such
as `img_metas` or `gt_semantic_seg`.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(batch_inputs)
losses = dict()
loss_decode = self._decode_head_forward_train(x, batch_data_samples)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, batch_data_samples)
losses.update(loss_aux)
return losses
# TODO refactor
def slide_inference(self, batch_inputs, batch_img_metas, rescale):
"""Inference by sliding-window with overlap.
If h_crop > h_img or w_crop > w_img, the small patch will be used to
decode without padding.
Args:
batch_inputs (tensor): the tensor should have a shape NxCxHxW,
which contains all images in the batch.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
Returns:
tensor: get seg_logit.
"""
h_stride, w_stride = self.test_cfg.stride
h_crop, w_crop = self.test_cfg.crop_size
batch_size, _, h_img, w_img = batch_inputs.size()
num_classes = self.num_classes
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = batch_inputs.new_zeros((batch_size, num_classes, h_img, w_img))
count_mat = batch_inputs.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = batch_inputs[:, :, y1:y2, x1:x2]
crop_seg_logit = self.encode_decode(crop_img, batch_img_metas)
preds += F.pad(crop_seg_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
preds = preds / count_mat
if rescale:
preds = resize(
preds,
size=batch_img_metas[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return preds
def whole_inference(self, batch_inputs, batch_img_metas, rescale):
"""Inference with full image."""
seg_logit = self.encode_decode(batch_inputs, batch_img_metas)
if rescale:
size = batch_img_metas[0]['ori_shape'][:2]
seg_logit = resize(
seg_logit,
size=size,
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return seg_logit
def inference(self, batch_inputs, batch_img_metas, rescale):
"""Inference with slide/whole style.
Args:
batch_inputs (Tensor): The input image of shape (N, 3, H, W).
batch_img_metas (dict): Image info dict where each dict has:
'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg.mode in ['slide', 'whole']
ori_shape = batch_img_metas[0]['ori_shape']
assert all(_['ori_shape'] == ori_shape for _ in batch_img_metas)
if self.test_cfg.mode == 'slide':
seg_logit = self.slide_inference(batch_inputs, batch_img_metas,
rescale)
else:
seg_logit = self.whole_inference(batch_inputs, batch_img_metas,
rescale)
output = F.softmax(seg_logit, dim=1)
flip = batch_img_metas[0].get('flip', None)
if flip:
flip_direction = batch_img_metas[0]['flip_direction']
assert flip_direction in ['horizontal', 'vertical']
if flip_direction == 'horizontal':
output = output.flip(dims=(3, ))
elif flip_direction == 'vertical':
output = output.flip(dims=(2, ))
return output
def simple_test(self, batch_inputs, batch_img_metas, rescale=True):
"""Simple test with single image."""
results_dict = dict()
seg_logit = self.inference(batch_inputs, batch_img_metas, rescale)
results_dict['seg_logits'] = seg_logit
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
results_dict['pred_sem_seg'] = seg_pred
results_list = self.postprocess_result(results_dict)
return results_list
def aug_test(self, batch_inputs, batch_img_metas, rescale=True):
"""Test with augmentations.
Only rescale=True is supported.
"""
# aug_test rescale all imgs back to ori_shape for now
assert rescale
# to save memory, we get augmented seg logit inplace
seg_logit = self.inference(batch_inputs[0], batch_img_metas[0],
rescale)
for i in range(1, len(batch_inputs)):
cur_seg_logit = self.inference(batch_inputs[i], batch_img_metas[i],
rescale)
seg_logit += cur_seg_logit
seg_logit /= len(batch_inputs)
seg_pred = seg_logit.argmax(dim=1)
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred