From 48a781cd6aca7be40821b55679bebb30295d4336 Mon Sep 17 00:00:00 2001 From: lixinran Date: Thu, 31 Dec 2020 10:02:23 +0800 Subject: [PATCH] Revised according to comments --- mmcls/core/evaluation/mean_ap.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/mmcls/core/evaluation/mean_ap.py b/mmcls/core/evaluation/mean_ap.py index 41fd649a0..c482e1b1b 100644 --- a/mmcls/core/evaluation/mean_ap.py +++ b/mmcls/core/evaluation/mean_ap.py @@ -5,6 +5,15 @@ import torch def average_precision(pred, target): """ Calculate the average precision for a single class + AP summarizes a precision-recall curve as the weighted mean of maximum + precisions obtained for any r'>r, where r is the recall: + + ..math:: + \\text{AP} = \\sum_n (R_n - R_{n-1}) P_n + + Note that no approximation is involved since the curve is piecewise + constant. + Args: pred (np.ndarray): The model prediction with shape (N, ). target (np.ndarray): The target of each prediction with shape (N, ). @@ -19,17 +28,17 @@ def average_precision(pred, target): sort_target = target[sort_inds] # count true positive examples - p_inds = sort_target == 1 - tp = np.cumsum(p_inds) - total_p = tp[-1] + pos_inds = sort_target == 1 + tp = np.cumsum(pos_inds) + total_pos = tp[-1] # count not difficult examples pn_inds = sort_target != -1 pn = np.cumsum(pn_inds) - tp[np.logical_not(p_inds)] = 0 + tp[np.logical_not(pos_inds)] = 0 precision = tp / np.maximum(pn, eps) - ap = np.sum(precision) / np.maximum(total_p, eps) + ap = np.sum(precision) / np.maximum(total_pos, eps) return ap