mmclassification/mmcls/models/losses/accuracy.py

133 lines
4.3 KiB
Python
Raw Normal View History

import numpy as np
import torch
import torch.nn as nn
def accuracy_numpy(pred, target, topk=1, thrs=None):
if thrs is None:
thrs = 0.0
if isinstance(thrs, float):
thrs = (thrs, )
res_single = True
elif isinstance(thrs, tuple):
res_single = False
else:
raise TypeError(
f'thrs should be float or tuple, but got {type(thrs)}.')
res = []
maxk = max(topk)
num = pred.shape[0]
pred_label = pred.argsort(axis=1)[:, -maxk:][:, ::-1]
pred_score = np.sort(pred, axis=1)[:, -maxk:][:, ::-1]
for k in topk:
correct_k = pred_label[:, :k] == target.reshape(-1, 1)
res_thr = []
for thr in thrs:
# Only prediction values larger than thr are counted as correct
_correct_k = correct_k & (pred_score[:, :k] > thr)
_correct_k = np.logical_or.reduce(_correct_k, axis=1)
res_thr.append(_correct_k.sum() * 100. / num)
if res_single:
res.append(res_thr[0])
else:
res.append(res_thr)
return res
def accuracy_torch(pred, target, topk=1, thrs=None):
if thrs is None:
thrs = 0.0
if isinstance(thrs, float):
thrs = (thrs, )
res_single = True
elif isinstance(thrs, tuple):
res_single = False
else:
raise TypeError(
f'thrs should be float or tuple, but got {type(thrs)}.')
res = []
maxk = max(topk)
num = pred.size(0)
pred_score, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
for k in topk:
res_thr = []
for thr in thrs:
# Only prediction values larger than thr are counted as correct
_correct = correct & (pred_score.t() > thr)
correct_k = _correct[:k].reshape(-1).float().sum(0, keepdim=True)
res_thr.append(correct_k.mul_(100. / num))
if res_single:
res.append(res_thr[0])
else:
res.append(res_thr)
return res
def accuracy(pred, target, topk=1, thrs=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction.
target (torch.Tensor | np.array): The target of each prediction
topk (int | tuple[int]): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thrs (float, optional): thrs (float | tuple[float], optional):
Predictions with scores under the thresholds are considered
negative. Default to None.
Returns:
float | list[float] | list[list[float]]: Accuracy
- float: If both ``topk`` and ``thrs`` is a single value.
- list[float]: If one of ``topk`` or ``thrs`` is a tuple.
- list[list[float]]: If both ``topk`` and ``thrs`` is a tuple. \
And the first dim is ``topk``, the second dim is ``thrs``.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
res = accuracy_torch(pred, target, topk, thrs)
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
res = accuracy_numpy(pred, target, topk, thrs)
else:
raise TypeError(
f'pred and target should both be torch.Tensor or np.ndarray, '
f'but got {type(pred)} and {type(target)}.')
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, )):
"""Module to calculate the accuracy.
Args:
topk (tuple): The criterion used to calculate the
accuracy. Defaults to (1,).
"""
super().__init__()
self.topk = topk
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
list[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk)