mirror of https://github.com/alibaba/EasyCV.git
336 lines
13 KiB
Python
336 lines
13 KiB
Python
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
|
|
from easycv.models import builder
|
|
from easycv.models.base import BaseModel
|
|
from easycv.models.builder import MODELS
|
|
from easycv.models.utils.ops import resize_tensor
|
|
from easycv.utils.checkpoint import load_checkpoint
|
|
from easycv.utils.logger import get_root_logger, print_log
|
|
from easycv.utils.misc import add_prefix
|
|
|
|
|
|
# Modified from https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/segmentors/encoder_decoder.py
|
|
@MODELS.register_module()
|
|
class EncoderDecoder(BaseModel):
|
|
"""Encoder Decoder segmentors.
|
|
|
|
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
|
|
Note that auxiliary_head is only used for deep supervision during training,
|
|
which could be dumped during inference.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
backbone,
|
|
decode_head,
|
|
neck=None,
|
|
auxiliary_head=None,
|
|
train_cfg=None,
|
|
test_cfg=None,
|
|
pretrained=None,
|
|
):
|
|
super(EncoderDecoder, self).__init__()
|
|
self.backbone = builder.build_backbone(backbone)
|
|
|
|
self.neck = neck
|
|
self.auxiliary_head = auxiliary_head
|
|
self.pretrained = pretrained
|
|
if self.neck is not None:
|
|
self.neck = builder.build_neck(self.neck)
|
|
|
|
self.decode_head = builder.build_head(decode_head)
|
|
self.align_corners = self.decode_head.align_corners
|
|
self.num_classes = self.decode_head.num_classes
|
|
|
|
if auxiliary_head is not None:
|
|
auxiliary_head = [
|
|
auxiliary_head
|
|
] if not isinstance(auxiliary_head, list) else auxiliary_head
|
|
self.auxiliary_head = nn.ModuleList()
|
|
for head_cfg in auxiliary_head:
|
|
self.auxiliary_head.append(builder.build_head(head_cfg))
|
|
|
|
self.train_cfg = train_cfg
|
|
self.test_cfg = test_cfg
|
|
|
|
self.init_weights()
|
|
|
|
def init_weights(self):
|
|
logger = get_root_logger()
|
|
if isinstance(self.pretrained, str):
|
|
load_checkpoint(
|
|
self.backbone, self.pretrained, strict=False, logger=logger)
|
|
elif self.pretrained:
|
|
if self.backbone.__class__.__name__ == 'PytorchImageModelWrapper':
|
|
self.backbone.init_weights(pretrained=self.pretrained)
|
|
elif hasattr(self.backbone, 'default_pretrained_model_path'
|
|
) and self.backbone.default_pretrained_model_path:
|
|
print_log(
|
|
'load model from default path: {}'.format(
|
|
self.backbone.default_pretrained_model_path), logger)
|
|
load_checkpoint(
|
|
self.backbone,
|
|
self.backbone.default_pretrained_model_path,
|
|
strict=False,
|
|
logger=logger)
|
|
else:
|
|
print_log('load model from init weights')
|
|
self.backbone.init_weights()
|
|
else:
|
|
print_log('load model from init weights')
|
|
self.backbone.init_weights()
|
|
|
|
if hasattr(self.decode_head, 'init_weights'):
|
|
self.decode_head.init_weights()
|
|
|
|
if self.auxiliary_head is not None:
|
|
for idx in range(len(self.auxiliary_head)):
|
|
if hasattr(self.auxiliary_head[idx], 'init_weights'):
|
|
self.auxiliary_head[idx].init_weights()
|
|
|
|
if self.neck is not None and hasattr(self.neck, 'init_weights'):
|
|
self.neck.init_weights()
|
|
|
|
def extract_feat(self, img):
|
|
"""Extract features from images."""
|
|
x = self.backbone(img)
|
|
if self.neck is not None:
|
|
x = self.neck(x)
|
|
return x
|
|
|
|
def encode_decode(self, img, img_metas):
|
|
"""Encode images with backbone and decode into a semantic segmentation
|
|
map of the same size as input."""
|
|
x = self.extract_feat(img)
|
|
out = self._decode_head_forward_test(x, img_metas)
|
|
out = resize_tensor(
|
|
input=out,
|
|
size=img.shape[2:],
|
|
mode='bilinear',
|
|
align_corners=self.align_corners)
|
|
return out
|
|
|
|
def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg):
|
|
"""Run forward function and calculate loss for decode head in
|
|
training."""
|
|
losses = dict()
|
|
loss_decode = self.decode_head.forward_train(x, img_metas,
|
|
gt_semantic_seg,
|
|
self.train_cfg)
|
|
|
|
losses.update(add_prefix(loss_decode, 'decode'))
|
|
return losses
|
|
|
|
def _decode_head_forward_test(self, x, img_metas):
|
|
"""Run forward function and calculate loss for decode head in
|
|
inference."""
|
|
seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)
|
|
return seg_logits
|
|
|
|
def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg):
|
|
"""Run forward function and calculate loss for auxiliary head in
|
|
training."""
|
|
losses = dict()
|
|
for idx, aux_head in enumerate(self.auxiliary_head):
|
|
loss_aux = aux_head.forward_train(x, img_metas, gt_semantic_seg,
|
|
self.train_cfg)
|
|
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
|
|
return losses
|
|
|
|
def forward_train(self, img, img_metas, gt_semantic_seg):
|
|
"""Forward function for training.
|
|
|
|
Args:
|
|
img (Tensor): Input images.
|
|
img_metas (list[dict]): List of image info dict where each dict
|
|
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
|
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
|
For details on the values of these keys see
|
|
`mmseg/datasets/pipelines/formatting.py:Collect`.
|
|
gt_semantic_seg (Tensor): Semantic segmentation masks
|
|
used if the architecture supports semantic segmentation task.
|
|
|
|
Returns:
|
|
dict[str, Tensor]: a dictionary of loss components
|
|
"""
|
|
|
|
x = self.extract_feat(img)
|
|
losses = dict()
|
|
loss_decode = self._decode_head_forward_train(x, img_metas,
|
|
gt_semantic_seg)
|
|
losses.update(loss_decode)
|
|
|
|
if self.auxiliary_head is not None:
|
|
loss_aux = self._auxiliary_head_forward_train(
|
|
x, img_metas, gt_semantic_seg)
|
|
losses.update(loss_aux)
|
|
|
|
return losses
|
|
|
|
def forward_test(self, imgs, img_metas, **kwargs):
|
|
"""
|
|
Args:
|
|
imgs (List[Tensor]): the outer list indicates test-time
|
|
augmentations and inner Tensor should have a shape NxCxHxW,
|
|
which contains all images in the batch.
|
|
img_metas (List[List[dict]]): the outer list indicates test-time
|
|
augs (multiscale, flip, etc.) and the inner list indicates
|
|
images in a batch.
|
|
"""
|
|
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
|
|
if not isinstance(var, list):
|
|
raise TypeError(f'{name} must be a list, but got '
|
|
f'{type(var)}')
|
|
|
|
num_augs = len(imgs)
|
|
if num_augs != len(img_metas):
|
|
raise ValueError(f'num of augmentations ({len(imgs)}) != '
|
|
f'num of image meta ({len(img_metas)})')
|
|
# all images in the same aug batch all of the same ori_shape and pad shape
|
|
for img_meta in img_metas:
|
|
ori_shapes = [_['ori_shape'] for _ in img_meta]
|
|
assert all(shape == ori_shapes[0] for shape in ori_shapes)
|
|
img_shapes = [_['img_shape'] for _ in img_meta]
|
|
assert all(shape == img_shapes[0] for shape in img_shapes)
|
|
pad_shapes = [_['pad_shape'] for _ in img_meta]
|
|
assert all(shape == pad_shapes[0] for shape in pad_shapes)
|
|
|
|
if num_augs == 1:
|
|
return self.simple_test(imgs[0], img_metas[0], **kwargs)
|
|
else:
|
|
return self.aug_test(imgs, img_metas, **kwargs)
|
|
|
|
def slide_inference(self, img, img_meta, rescale):
|
|
"""Inference by sliding-window with overlap.
|
|
|
|
If h_crop > h_img or w_crop > w_img, the small patch will be used to
|
|
decode without padding.
|
|
"""
|
|
|
|
h_stride, w_stride = self.test_cfg.stride
|
|
h_crop, w_crop = self.test_cfg.crop_size
|
|
batch_size, _, h_img, w_img = img.size()
|
|
num_classes = self.num_classes
|
|
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
|
|
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
|
|
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
|
|
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
|
|
for h_idx in range(h_grids):
|
|
for w_idx in range(w_grids):
|
|
y1 = h_idx * h_stride
|
|
x1 = w_idx * w_stride
|
|
y2 = min(y1 + h_crop, h_img)
|
|
x2 = min(x1 + w_crop, w_img)
|
|
y1 = max(y2 - h_crop, 0)
|
|
x1 = max(x2 - w_crop, 0)
|
|
crop_img = img[:, :, y1:y2, x1:x2]
|
|
crop_seg_logit = self.encode_decode(crop_img, img_meta)
|
|
preds += F.pad(crop_seg_logit,
|
|
(int(x1), int(preds.shape[3] - x2), int(y1),
|
|
int(preds.shape[2] - y2)))
|
|
|
|
count_mat[:, :, y1:y2, x1:x2] += 1
|
|
assert (count_mat == 0).sum() == 0
|
|
if torch.onnx.is_in_onnx_export():
|
|
# cast count_mat to constant while exporting to ONNX
|
|
count_mat = torch.from_numpy(
|
|
count_mat.cpu().detach().numpy()).to(device=img.device)
|
|
preds = preds / count_mat
|
|
if rescale:
|
|
preds = resize_tensor(
|
|
preds,
|
|
size=img_meta[0]['ori_shape'][:2],
|
|
mode='bilinear',
|
|
align_corners=self.align_corners,
|
|
warning=False)
|
|
return preds
|
|
|
|
def whole_inference(self, img, img_meta, rescale):
|
|
"""Inference with full image."""
|
|
|
|
seg_logit = self.encode_decode(img, img_meta)
|
|
if rescale:
|
|
# support dynamic shape for onnx
|
|
if torch.onnx.is_in_onnx_export():
|
|
size = img.shape[2:]
|
|
else:
|
|
size = img_meta[0]['ori_shape'][:2]
|
|
seg_logit = resize_tensor(
|
|
seg_logit,
|
|
size=size,
|
|
mode='bilinear',
|
|
align_corners=self.align_corners,
|
|
warning=False)
|
|
|
|
return seg_logit
|
|
|
|
def inference(self, img, img_meta, rescale):
|
|
"""Inference with slide/whole style.
|
|
|
|
Args:
|
|
img (Tensor): The input image of shape (N, 3, H, W).
|
|
img_meta (dict): Image info dict where each dict has: 'img_shape',
|
|
'scale_factor', 'flip', and may also contain
|
|
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
|
For details on the values of these keys see
|
|
`mmseg/datasets/pipelines/formatting.py:Collect`.
|
|
rescale (bool): Whether rescale back to original shape.
|
|
|
|
Returns:
|
|
Tensor: The output segmentation map.
|
|
"""
|
|
|
|
assert self.test_cfg.mode in ['slide', 'whole']
|
|
ori_shape = img_meta[0]['ori_shape']
|
|
assert all(_['ori_shape'] == ori_shape for _ in img_meta)
|
|
if self.test_cfg.mode == 'slide':
|
|
seg_logit = self.slide_inference(img, img_meta, rescale)
|
|
else:
|
|
seg_logit = self.whole_inference(img, img_meta, rescale)
|
|
output = F.softmax(seg_logit, dim=1)
|
|
flip = img_meta[0]['flip']
|
|
if flip:
|
|
flip_direction = img_meta[0]['flip_direction']
|
|
assert flip_direction in ['horizontal', 'vertical']
|
|
if flip_direction == 'horizontal':
|
|
output = output.flip(dims=(3, ))
|
|
elif flip_direction == 'vertical':
|
|
output = output.flip(dims=(2, ))
|
|
|
|
return output
|
|
|
|
def simple_test(self, img, img_meta, rescale=True):
|
|
"""Simple test with single image."""
|
|
seg_logit = self.inference(img, img_meta, rescale)
|
|
seg_pred = seg_logit.argmax(dim=1)
|
|
if torch.onnx.is_in_onnx_export():
|
|
# our inference backend only support 4D output
|
|
seg_pred = seg_pred.unsqueeze(0)
|
|
return seg_pred
|
|
seg_pred = seg_pred.cpu().numpy()
|
|
# unravel batch dim
|
|
seg_pred = list(seg_pred)
|
|
return {'seg_pred': seg_pred}
|
|
|
|
def aug_test(self, imgs, img_metas, rescale=True):
|
|
"""Test with augmentations.
|
|
|
|
Only rescale=True is supported.
|
|
"""
|
|
# aug_test rescale all imgs back to ori_shape for now
|
|
assert rescale
|
|
# to save memory, we get augmented seg logit inplace
|
|
seg_logit = self.inference(imgs[0], img_metas[0], rescale)
|
|
for i in range(1, len(imgs)):
|
|
cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale)
|
|
seg_logit += cur_seg_logit
|
|
seg_logit /= len(imgs)
|
|
seg_pred = seg_logit.argmax(dim=1)
|
|
seg_pred = seg_pred.cpu().numpy()
|
|
# unravel batch dim
|
|
seg_pred = list(seg_pred)
|
|
return {'seg_pred': seg_pred}
|