2021-08-17 19:52:42 +08:00
|
|
|
# Copyright (c) OpenMMLab. All rights reserved.
|
2020-07-07 19:32:06 +08:00
|
|
|
import torch.nn as nn
|
2020-09-30 19:00:20 +08:00
|
|
|
import torch.nn.functional as F
|
2020-07-07 19:32:06 +08:00
|
|
|
|
|
|
|
from ..builder import HEADS
|
|
|
|
from .cls_head import ClsHead
|
|
|
|
|
|
|
|
|
|
|
|
@HEADS.register_module()
|
|
|
|
class LinearClsHead(ClsHead):
|
|
|
|
"""Linear classifier head.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
num_classes (int): Number of categories excluding the background
|
|
|
|
category.
|
|
|
|
in_channels (int): Number of channels in the input feature map.
|
2021-06-30 19:13:27 +08:00
|
|
|
init_cfg (dict | optional): The extra init config of layers.
|
|
|
|
Defaults to use dict(type='Normal', layer='Linear', std=0.01).
|
2021-05-10 14:56:55 +08:00
|
|
|
"""
|
|
|
|
|
2021-06-15 21:08:30 +08:00
|
|
|
def __init__(self,
|
|
|
|
num_classes,
|
|
|
|
in_channels,
|
2021-06-30 19:13:27 +08:00
|
|
|
init_cfg=dict(type='Normal', layer='Linear', std=0.01),
|
2021-06-15 21:08:30 +08:00
|
|
|
*args,
|
|
|
|
**kwargs):
|
|
|
|
super(LinearClsHead, self).__init__(init_cfg=init_cfg, *args, **kwargs)
|
2021-06-10 10:54:34 +08:00
|
|
|
|
2020-07-07 19:32:06 +08:00
|
|
|
self.in_channels = in_channels
|
|
|
|
self.num_classes = num_classes
|
|
|
|
|
|
|
|
if self.num_classes <= 0:
|
|
|
|
raise ValueError(
|
|
|
|
f'num_classes={num_classes} must be a positive integer')
|
|
|
|
|
|
|
|
self.fc = nn.Linear(self.in_channels, self.num_classes)
|
|
|
|
|
2021-12-17 15:55:02 +08:00
|
|
|
def pre_logits(self, x):
|
2021-09-08 10:38:57 +08:00
|
|
|
if isinstance(x, tuple):
|
|
|
|
x = x[-1]
|
2021-12-17 15:55:02 +08:00
|
|
|
return x
|
|
|
|
|
|
|
|
def simple_test(self, x, softmax=True, post_process=True):
|
|
|
|
"""Inference without augmentation.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
x (tuple[Tensor]): The input features.
|
|
|
|
Multi-stage inputs are acceptable but only the last stage will
|
|
|
|
be used to classify. The shape of every item should be
|
|
|
|
``(num_samples, in_channels)``.
|
|
|
|
softmax (bool): Whether to softmax the classification score.
|
|
|
|
post_process (bool): Whether to do post processing the
|
|
|
|
inference results. It will convert the output to a list.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Tensor | list: The inference results.
|
|
|
|
|
|
|
|
- If no post processing, the output is a tensor with shape
|
|
|
|
``(num_samples, num_classes)``.
|
|
|
|
- If post processing, the output is a multi-dimentional list of
|
|
|
|
float and the dimensions are ``(num_samples, num_classes)``.
|
|
|
|
"""
|
|
|
|
x = self.pre_logits(x)
|
2021-09-08 10:38:57 +08:00
|
|
|
cls_score = self.fc(x)
|
2021-06-14 23:25:35 +08:00
|
|
|
|
2021-12-17 15:55:02 +08:00
|
|
|
if softmax:
|
|
|
|
pred = (
|
|
|
|
F.softmax(cls_score, dim=1) if cls_score is not None else None)
|
|
|
|
else:
|
|
|
|
pred = cls_score
|
|
|
|
|
|
|
|
if post_process:
|
|
|
|
return self.post_process(pred)
|
|
|
|
else:
|
|
|
|
return pred
|
2020-09-30 19:00:20 +08:00
|
|
|
|
2021-11-10 17:12:34 +08:00
|
|
|
def forward_train(self, x, gt_label, **kwargs):
|
2021-12-17 15:55:02 +08:00
|
|
|
x = self.pre_logits(x)
|
2020-07-07 19:32:06 +08:00
|
|
|
cls_score = self.fc(x)
|
2021-11-10 17:12:34 +08:00
|
|
|
losses = self.loss(cls_score, gt_label, **kwargs)
|
2020-07-07 19:32:06 +08:00
|
|
|
return losses
|