mirror of https://github.com/alibaba/EasyCV.git
add NuScenesEvaluator, support dist eval for list model results
parent
e7b69a8f63
commit
c350bc8b02
|
@ -8,7 +8,7 @@ voxel_size = [0.2, 0.2, 8]
|
|||
img_norm_cfg = dict(
|
||||
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
|
||||
# For nuScenes we usually do 10-class detection
|
||||
class_names = [
|
||||
CLASSES = [
|
||||
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
|
||||
'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
|
||||
]
|
||||
|
@ -168,10 +168,10 @@ train_pipeline = [
|
|||
with_label_3d=True,
|
||||
with_attr_label=False),
|
||||
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
|
||||
dict(type='ObjectNameFilter', classes=class_names),
|
||||
dict(type='ObjectNameFilter', classes=CLASSES),
|
||||
dict(type='NormalizeMultiviewImage', **img_norm_cfg),
|
||||
dict(type='PadMultiViewImage', size_divisor=32),
|
||||
dict(type='DefaultFormatBundle3D', class_names=class_names),
|
||||
dict(type='DefaultFormatBundle3D', class_names=CLASSES),
|
||||
dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img'])
|
||||
]
|
||||
|
||||
|
@ -187,7 +187,7 @@ test_pipeline = [
|
|||
transforms=[
|
||||
dict(
|
||||
type='DefaultFormatBundle3D',
|
||||
class_names=class_names,
|
||||
class_names=CLASSES,
|
||||
with_label=False),
|
||||
dict(type='CustomCollect3D', keys=['img'])
|
||||
])
|
||||
|
@ -204,7 +204,7 @@ data = dict(
|
|||
data_root=data_root,
|
||||
ann_file=data_root + 'nuscenes_infos_temporal_train.pkl',
|
||||
pipeline=train_pipeline,
|
||||
classes=class_names,
|
||||
classes=CLASSES,
|
||||
modality=input_modality,
|
||||
test_mode=False,
|
||||
use_valid_flag=True,
|
||||
|
@ -214,22 +214,24 @@ data = dict(
|
|||
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
|
||||
box_type_3d='LiDAR'),
|
||||
val=dict(
|
||||
imgs_per_gpu=1,
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
ann_file=data_root + 'nuscenes_infos_temporal_val.pkl',
|
||||
pipeline=test_pipeline,
|
||||
bev_size=(bev_h_, bev_w_),
|
||||
classes=class_names,
|
||||
classes=CLASSES,
|
||||
modality=input_modality,
|
||||
samples_per_gpu=1),
|
||||
test_mode=True),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
ann_file=data_root + 'nuscenes_infos_temporal_val.pkl',
|
||||
pipeline=test_pipeline,
|
||||
bev_size=(bev_h_, bev_w_),
|
||||
classes=class_names,
|
||||
modality=input_modality))
|
||||
classes=CLASSES,
|
||||
modality=input_modality,
|
||||
test_mode=True))
|
||||
|
||||
paramwise_cfg = dict(custom_keys={
|
||||
'img_backbone': dict(lr_mult=0.1),
|
||||
|
@ -246,9 +248,22 @@ lr_config = dict(
|
|||
warmup_ratio=1.0 / 3,
|
||||
min_lr_ratio=1e-3)
|
||||
total_epochs = 24
|
||||
evaluation = dict(interval=1, pipeline=test_pipeline)
|
||||
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=total_epochs)
|
||||
eval_config = dict(initial=False, interval=1, gpu_collect=False)
|
||||
eval_pipelines = [
|
||||
dict(
|
||||
mode='test',
|
||||
data=data['val'],
|
||||
dist_eval=True,
|
||||
evaluators=[
|
||||
dict(
|
||||
type='NuScenesEvaluator',
|
||||
classes=CLASSES,
|
||||
result_names=['pts_bbox'])
|
||||
],
|
||||
)
|
||||
]
|
||||
|
||||
load_from = '/root/workspace/data/nuScenes/r101_dcn_fcos3d_pretrain.pth'
|
||||
log_config = dict(
|
||||
interval=1,
|
|
@ -101,6 +101,8 @@ def single_gpu_test(model, data_loader, mode='test', use_fp16=False, **kwargs):
|
|||
|
||||
prog_bar = mmcv.ProgressBar(data_len)
|
||||
results = {}
|
||||
results_list = []
|
||||
is_list_result = False
|
||||
for i, data in enumerate(data_loader):
|
||||
# use scatter_kwargs to unpack DataContainer data for raw torch.nn.module
|
||||
if not isinstance(model,
|
||||
|
@ -114,28 +116,37 @@ def single_gpu_test(model, data_loader, mode='test', use_fp16=False, **kwargs):
|
|||
with torch.no_grad():
|
||||
result = model(**data, mode=mode)
|
||||
|
||||
for k, v in result.items():
|
||||
if k not in results:
|
||||
results[k] = []
|
||||
results[k].append(v)
|
||||
|
||||
if 'img_metas' in data:
|
||||
if isinstance(data['img_metas'], list):
|
||||
batch_size = len(data['img_metas'][0].data[0])
|
||||
else:
|
||||
batch_size = len(data['img_metas'].data[0])
|
||||
|
||||
if isinstance(result, list):
|
||||
batch_size = len(result)
|
||||
results_list.extend(result)
|
||||
is_list_result = True
|
||||
else:
|
||||
if isinstance(data['img'], list):
|
||||
batch_size = data['img'][0].size(0)
|
||||
for k, v in result.items():
|
||||
if k not in results:
|
||||
results[k] = []
|
||||
results[k].append(v)
|
||||
|
||||
if 'img_metas' in data:
|
||||
if isinstance(data['img_metas'], list):
|
||||
batch_size = len(data['img_metas'][0].data[0])
|
||||
else:
|
||||
batch_size = len(data['img_metas'].data[0])
|
||||
|
||||
else:
|
||||
batch_size = data['img'].size(0)
|
||||
if isinstance(data['img'], list):
|
||||
batch_size = data['img'][0].size(0)
|
||||
else:
|
||||
batch_size = data['img'].size(0)
|
||||
|
||||
for _ in range(batch_size):
|
||||
prog_bar.update()
|
||||
|
||||
# new line for prog_bar
|
||||
print()
|
||||
|
||||
if is_list_result:
|
||||
return results_list
|
||||
|
||||
for k, v in results.items():
|
||||
if len(v) == 0:
|
||||
raise ValueError(f'empty result for {k}')
|
||||
|
@ -186,6 +197,9 @@ def multi_gpu_test(model,
|
|||
|
||||
model.eval()
|
||||
results = {}
|
||||
results_list = []
|
||||
is_list_result = False
|
||||
|
||||
rank, world_size = get_dist_info()
|
||||
|
||||
if hasattr(data_loader, 'dataset'): # normal dataloader
|
||||
|
@ -206,14 +220,20 @@ def multi_gpu_test(model,
|
|||
# encoded_mask_results = encode_mask_results(mask_results)
|
||||
# result = bbox_results, encoded_mask_results
|
||||
|
||||
for k, v in result.items():
|
||||
if k not in results:
|
||||
results[k] = []
|
||||
if isinstance(result, (tuple, list)):
|
||||
results_list.extend(result)
|
||||
is_list_result = True
|
||||
else:
|
||||
for k, v in result.items():
|
||||
if k not in results:
|
||||
results[k] = []
|
||||
|
||||
results[k].append(v)
|
||||
results[k].append(v)
|
||||
|
||||
if rank == 0:
|
||||
if 'img_metas' in data:
|
||||
if is_list_result:
|
||||
batch_size = len(result)
|
||||
elif 'img_metas' in data:
|
||||
if isinstance(data['img_metas'], list):
|
||||
batch_size = len(data['img_metas'][0].data[0])
|
||||
else:
|
||||
|
@ -224,8 +244,8 @@ def multi_gpu_test(model,
|
|||
for _ in range(batch_size * world_size):
|
||||
prog_bar.update()
|
||||
|
||||
# new line for prog_bar
|
||||
# print("gpu_collect", gpu_collect)
|
||||
if is_list_result:
|
||||
results = results_list
|
||||
|
||||
# collect results from all ranks
|
||||
if gpu_collect:
|
||||
|
@ -234,6 +254,9 @@ def multi_gpu_test(model,
|
|||
results = collect_results_cpu(results, data_len, tmpdir)
|
||||
|
||||
if rank == 0:
|
||||
if is_list_result:
|
||||
return results
|
||||
|
||||
for k, v in results.items():
|
||||
if len(v) == 0:
|
||||
raise ValueError(f'empty result for {k}')
|
||||
|
@ -278,18 +301,31 @@ def collect_results_cpu(result_part, size, tmpdir=None):
|
|||
else:
|
||||
# load results of all parts from tmp dir
|
||||
part_dict = {}
|
||||
part_list = []
|
||||
is_list_result = False
|
||||
for i in range(world_size):
|
||||
part_file = io.open(osp.join(tmpdir, f'part_{i}.pkl'), 'rb')
|
||||
for k, v in mmcv.load(part_file, file_format='pkl').items():
|
||||
if k not in part_dict:
|
||||
part_dict[k] = []
|
||||
part_dict[k].extend(v)
|
||||
# # sort the results
|
||||
# ordered_results = []
|
||||
# for res in zip(*part_list):
|
||||
# ordered_results.extend(list(res))
|
||||
# the dataloader may pad some samples
|
||||
ordered_results = {k: v[:size] for k, v in part_dict.items()}
|
||||
load_result = mmcv.load(part_file, file_format='pkl')
|
||||
if isinstance(load_result, (list, tuple)):
|
||||
is_list_result = True
|
||||
part_list.append(load_result)
|
||||
else:
|
||||
for k, v in load_result.items():
|
||||
if k not in part_dict:
|
||||
part_dict[k] = []
|
||||
part_dict[k].extend(v)
|
||||
|
||||
# sort the results
|
||||
if is_list_result:
|
||||
ordered_results = []
|
||||
for res in part_list:
|
||||
ordered_results.extend(list(res))
|
||||
# the dataloader may pad some samples
|
||||
ordered_results = ordered_results[:size]
|
||||
else:
|
||||
# the dataloader may pad some samples
|
||||
ordered_results = {k: v[:size] for k, v in part_dict.items()}
|
||||
|
||||
# remove tmp dir
|
||||
io.rmtree(tmpdir)
|
||||
return ordered_results
|
||||
|
@ -330,18 +366,27 @@ def collect_results_gpu(result_part, size):
|
|||
|
||||
if rank == 0:
|
||||
part_dict = {}
|
||||
part_list = []
|
||||
is_list_result = False
|
||||
for recv, shape in zip(part_recv_list, shape_list):
|
||||
result_part = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
|
||||
for k, v in result_part.items():
|
||||
if k not in part_dict:
|
||||
part_dict[k] = []
|
||||
part_dict[k].extend(v)
|
||||
if isinstance(result_part, (list, tuple)):
|
||||
is_list_result = True
|
||||
part_list.append(result_part)
|
||||
else:
|
||||
for k, v in result_part.items():
|
||||
if k not in part_dict:
|
||||
part_dict[k] = []
|
||||
part_dict[k].extend(v)
|
||||
|
||||
# # sort the results
|
||||
# ordered_results = []
|
||||
# for res in zip(*part_list):
|
||||
# ordered_results.extend(list(res))
|
||||
# the dataloader may pad some samples
|
||||
# ordered_results = ordered_results[:size]
|
||||
ordered_results = {k: v[:size] for k, v in part_dict.items()}
|
||||
# sort the results
|
||||
if is_list_result:
|
||||
ordered_results = []
|
||||
for res in part_list:
|
||||
ordered_results.extend(list(res))
|
||||
# the dataloader may pad some samples
|
||||
ordered_results = ordered_results[:size]
|
||||
else:
|
||||
# the dataloader may pad some samples
|
||||
ordered_results = {k: v[:size] for k, v in part_dict.items()}
|
||||
return ordered_results
|
||||
|
|
|
@ -7,6 +7,7 @@ from .face_eval import FaceKeypointEvaluator
|
|||
from .faceid_pair_eval import FaceIDPairEvaluator
|
||||
from .keypoint_eval import KeyPointEvaluator
|
||||
from .mse_eval import MSEEvaluator
|
||||
from .nuscenes_eval import NuScenesEvaluator
|
||||
from .ocr_eval import OCRDetEvaluator, OCRRecEvaluator
|
||||
from .retrival_topk_eval import RetrivalTopKEvaluator
|
||||
from .segmentation_eval import SegmentationEvaluator
|
||||
|
|
|
@ -1,17 +1,16 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# Refer to https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/datasets/nuscnes_eval.py
|
||||
# Copyright (c) Alibaba, Inc. and its affiliates.
|
||||
import argparse
|
||||
import copy
|
||||
import json
|
||||
import os
|
||||
import os.path as osp
|
||||
import time
|
||||
from typing import Any, Tuple
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import tqdm
|
||||
from matplotlib import pyplot as plt
|
||||
from nuscenes import NuScenes
|
||||
from nuscenes.eval.common.config import config_factory
|
||||
from nuscenes.eval.common.data_classes import EvalBoxes
|
||||
from nuscenes.eval.common.loaders import (add_center_dist, filter_eval_boxes,
|
||||
load_prediction)
|
||||
|
@ -34,6 +33,10 @@ from nuscenes.utils.data_classes import Box
|
|||
from nuscenes.utils.geometry_utils import BoxVisibility, view_points
|
||||
from nuscenes.utils.splits import create_splits_scenes
|
||||
|
||||
from easycv.core.evaluation.base_evaluator import Evaluator
|
||||
from easycv.core.evaluation.builder import EVALUATORS
|
||||
from .metric_registry import METRICS
|
||||
|
||||
Axis = Any
|
||||
|
||||
|
||||
|
@ -112,7 +115,7 @@ def class_tp_curve(md_list: DetectionMetricDataList,
|
|||
plt.close()
|
||||
|
||||
|
||||
class DetectionBox_modified(DetectionBox):
|
||||
class CustomDetectionBox(DetectionBox):
|
||||
|
||||
def __init__(self,
|
||||
*args,
|
||||
|
@ -247,7 +250,7 @@ def load_gt(nusc: NuScenes, eval_split: str, box_cls, verbose: bool = False):
|
|||
"""
|
||||
|
||||
# Init.
|
||||
if box_cls == DetectionBox_modified:
|
||||
if box_cls == CustomDetectionBox:
|
||||
attribute_map = {a['token']: a['name'] for a in nusc.attribute}
|
||||
|
||||
if verbose:
|
||||
|
@ -312,7 +315,7 @@ def load_gt(nusc: NuScenes, eval_split: str, box_cls, verbose: bool = False):
|
|||
|
||||
sample_annotation = nusc.get('sample_annotation',
|
||||
sample_annotation_token)
|
||||
if box_cls == DetectionBox_modified:
|
||||
if box_cls == CustomDetectionBox:
|
||||
# Get label name in detection task and filter unused labels.
|
||||
detection_name = category_to_detection_name(
|
||||
sample_annotation['category_name'])
|
||||
|
@ -515,7 +518,7 @@ def filter_eval_boxes_by_overlap(nusc: NuScenes,
|
|||
return eval_boxes
|
||||
|
||||
|
||||
class NuScenesEval_custom(NuScenesEval):
|
||||
class CustomNuScenesEval(NuScenesEval):
|
||||
"""
|
||||
Dummy class for backward-compatibility. Same as DetectionEval.
|
||||
"""
|
||||
|
@ -569,7 +572,7 @@ class NuScenesEval_custom(NuScenesEval):
|
|||
DetectionBox,
|
||||
verbose=verbose)
|
||||
self.gt_boxes = load_gt(
|
||||
self.nusc, self.eval_set, DetectionBox_modified, verbose=verbose)
|
||||
self.nusc, self.eval_set, CustomDetectionBox, verbose=verbose)
|
||||
|
||||
assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \
|
||||
"Samples in split doesn't match samples in predictions."
|
||||
|
@ -749,83 +752,106 @@ class NuScenesEval_custom(NuScenesEval):
|
|||
savepath=savepath('dist_pr_' + str(dist_th)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
@EVALUATORS.register_module()
|
||||
class NuScenesEvaluator(Evaluator):
|
||||
"""NuScenes evaluator.
|
||||
Args:
|
||||
classes (list): List of class names.
|
||||
result_name (str, optional): Result name in the metric prefix.
|
||||
Default: 'pts_bbox'.
|
||||
eval_version (str, optional): Name of desired configuration in eval_detection_configs.
|
||||
Default: 'detection_cvpr_2019'.
|
||||
dataset_name (str, optional): Dataset name to be evaluated.
|
||||
metric_names (List[str]): Metric names this evaluator will return.
|
||||
"""
|
||||
ErrNameMapping = {
|
||||
'trans_err': 'mATE',
|
||||
'scale_err': 'mASE',
|
||||
'orient_err': 'mAOE',
|
||||
'vel_err': 'mAVE',
|
||||
'attr_err': 'mAAE'
|
||||
}
|
||||
|
||||
# Settings.
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Evaluate nuScenes detection results.',
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser.add_argument(
|
||||
'result_path', type=str, help='The submission as a JSON file.')
|
||||
parser.add_argument(
|
||||
'--output_dir',
|
||||
type=str,
|
||||
default='~/nuscenes-metrics',
|
||||
help=
|
||||
'Folder to store result metrics, graphs and example visualizations.')
|
||||
parser.add_argument(
|
||||
'--eval_set',
|
||||
type=str,
|
||||
default='val',
|
||||
help='Which dataset split to evaluate on, train, val or test.')
|
||||
parser.add_argument(
|
||||
'--dataroot',
|
||||
type=str,
|
||||
default='data/nuscenes',
|
||||
help='Default nuScenes data directory.')
|
||||
parser.add_argument(
|
||||
'--version',
|
||||
type=str,
|
||||
default='v1.0-trainval',
|
||||
help=
|
||||
'Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--config_path',
|
||||
type=str,
|
||||
default='',
|
||||
help='Path to the configuration file.'
|
||||
'If no path given, the CVPR 2019 configuration will be used.')
|
||||
parser.add_argument(
|
||||
'--plot_examples',
|
||||
type=int,
|
||||
default=0,
|
||||
help='How many example visualizations to write to disk.')
|
||||
parser.add_argument(
|
||||
'--render_curves',
|
||||
type=int,
|
||||
default=1,
|
||||
help='Whether to render PR and TP curves to disk.')
|
||||
parser.add_argument(
|
||||
'--verbose', type=int, default=1, help='Whether to print to stdout.')
|
||||
args = parser.parse_args()
|
||||
def __init__(self,
|
||||
classes,
|
||||
result_names=['pts_bbox'],
|
||||
eval_version='detection_cvpr_2019',
|
||||
dataset_name=None,
|
||||
metric_names=['mAP']):
|
||||
super().__init__(dataset_name=dataset_name, metric_names=metric_names)
|
||||
self.classes = classes
|
||||
self.result_names = result_names
|
||||
self.eval_version = eval_version
|
||||
from nuscenes.eval.detection.config import config_factory
|
||||
self.eval_detection_configs = config_factory(self.eval_version)
|
||||
|
||||
result_path_ = os.path.expanduser(args.result_path)
|
||||
output_dir_ = os.path.expanduser(args.output_dir)
|
||||
eval_set_ = args.eval_set
|
||||
dataroot_ = args.dataroot
|
||||
version_ = args.version
|
||||
config_path = args.config_path
|
||||
plot_examples_ = args.plot_examples
|
||||
render_curves_ = bool(args.render_curves)
|
||||
verbose_ = bool(args.verbose)
|
||||
def _evaluate_single(self, result_path, nusc, result_name='pts_bbox'):
|
||||
"""Evaluation for a single model in nuScenes protocol.
|
||||
|
||||
if config_path == '':
|
||||
cfg_ = config_factory('detection_cvpr_2019')
|
||||
else:
|
||||
with open(config_path, 'r') as _f:
|
||||
cfg_ = DetectionConfig.deserialize(json.load(_f))
|
||||
Args:
|
||||
result_path (str): Path of the result file.
|
||||
nusc: Instance of nuscenes.NuScenes.
|
||||
result_name (str, optional): Result name in the metric prefix.
|
||||
Default: 'pts_bbox'.
|
||||
|
||||
nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_)
|
||||
nusc_eval = NuScenesEval_custom(
|
||||
nusc_,
|
||||
config=cfg_,
|
||||
result_path=result_path_,
|
||||
eval_set=eval_set_,
|
||||
output_dir=output_dir_,
|
||||
verbose=verbose_)
|
||||
for vis in ['1', '2', '3', '4']:
|
||||
nusc_eval.update_gt(type_='vis', visibility=vis)
|
||||
print(f'================ {vis} ===============')
|
||||
nusc_eval.main(
|
||||
plot_examples=plot_examples_, render_curves=render_curves_)
|
||||
Returns:
|
||||
dict: Dictionary of evaluation details.
|
||||
"""
|
||||
from nuscenes.eval.detection.evaluate import NuScenesEval
|
||||
|
||||
output_dir = osp.join(*osp.split(result_path)[:-1])
|
||||
eval_set_map = {
|
||||
'v1.0-mini': 'mini_val',
|
||||
'v1.0-trainval': 'val',
|
||||
}
|
||||
nusc_eval = NuScenesEval(
|
||||
nusc,
|
||||
config=self.eval_detection_configs,
|
||||
result_path=result_path,
|
||||
eval_set=eval_set_map[nusc.version],
|
||||
output_dir=output_dir,
|
||||
verbose=False)
|
||||
nusc_eval.main(render_curves=False)
|
||||
|
||||
# record metrics
|
||||
metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
|
||||
detail = dict()
|
||||
metric_prefix = f'{result_name}_NuScenes'
|
||||
for name in self.classes:
|
||||
for k, v in metrics['label_aps'][name].items():
|
||||
val = float('{:.4f}'.format(v))
|
||||
detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
|
||||
for k, v in metrics['label_tp_errors'][name].items():
|
||||
val = float('{:.4f}'.format(v))
|
||||
detail['{}/{}_{}'.format(metric_prefix, name, k)] = val
|
||||
for k, v in metrics['tp_errors'].items():
|
||||
val = float('{:.4f}'.format(v))
|
||||
detail['{}/{}'.format(metric_prefix,
|
||||
self.ErrNameMapping[k])] = val
|
||||
|
||||
detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
|
||||
detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
|
||||
return detail
|
||||
|
||||
def _evaluate_impl(self, prediction, groundtruth, **kwargs):
|
||||
"""
|
||||
Args:
|
||||
prediction (str | Dict[str]): Path of the result file or dict of path of the result file.
|
||||
groundtruth: Instance of nuscenes.NuScenes.
|
||||
"""
|
||||
result_files = prediction
|
||||
nusc = groundtruth
|
||||
|
||||
if isinstance(result_files, dict):
|
||||
results_dict = dict()
|
||||
for name in self.result_names:
|
||||
print('Evaluating bboxes of {}'.format(name))
|
||||
ret_dict = self._evaluate_single(result_files[name], nusc)
|
||||
results_dict.update(ret_dict)
|
||||
elif isinstance(result_files, str):
|
||||
results_dict = self._evaluate_single(result_files, nusc)
|
||||
|
||||
return results_dict
|
||||
|
||||
|
||||
METRICS.register_default_best_metric(NuScenesEvaluator, 'mAP', 'max')
|
|
@ -17,7 +17,6 @@ from easycv.core.bbox import Box3DMode, Coord3DMode, LiDARInstance3DBoxes
|
|||
from easycv.datasets.registry import DATASETS
|
||||
from easycv.datasets.shared.pipelines import Compose
|
||||
from .base import Custom3DDataset
|
||||
from .nuscenes_eval import NuScenesEval_custom
|
||||
|
||||
|
||||
@DATASETS.register_module()
|
||||
|
@ -373,64 +372,6 @@ class NuScenesDataset(Custom3DDataset):
|
|||
mmcv.dump(nusc_submissions, res_path)
|
||||
return res_path
|
||||
|
||||
def _evaluate_single(self,
|
||||
result_path,
|
||||
logger=None,
|
||||
metric='bbox',
|
||||
result_name='pts_bbox'):
|
||||
"""Evaluation for a single model in nuScenes protocol.
|
||||
|
||||
Args:
|
||||
result_path (str): Path of the result file.
|
||||
logger (logging.Logger | str, optional): Logger used for printing
|
||||
related information during evaluation. Default: None.
|
||||
metric (str, optional): Metric name used for evaluation.
|
||||
Default: 'bbox'.
|
||||
result_name (str, optional): Result name in the metric prefix.
|
||||
Default: 'pts_bbox'.
|
||||
|
||||
Returns:
|
||||
dict: Dictionary of evaluation details.
|
||||
"""
|
||||
from nuscenes import NuScenes
|
||||
from nuscenes.eval.detection.evaluate import NuScenesEval
|
||||
|
||||
output_dir = osp.join(*osp.split(result_path)[:-1])
|
||||
nusc = NuScenes(
|
||||
version=self.version, dataroot=self.data_root, verbose=False)
|
||||
eval_set_map = {
|
||||
'v1.0-mini': 'mini_val',
|
||||
'v1.0-trainval': 'val',
|
||||
}
|
||||
nusc_eval = NuScenesEval(
|
||||
nusc,
|
||||
config=self.eval_detection_configs,
|
||||
result_path=result_path,
|
||||
eval_set=eval_set_map[self.version],
|
||||
output_dir=output_dir,
|
||||
verbose=False)
|
||||
nusc_eval.main(render_curves=False)
|
||||
|
||||
# record metrics
|
||||
metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
|
||||
detail = dict()
|
||||
metric_prefix = f'{result_name}_NuScenes'
|
||||
for name in self.CLASSES:
|
||||
for k, v in metrics['label_aps'][name].items():
|
||||
val = float('{:.4f}'.format(v))
|
||||
detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
|
||||
for k, v in metrics['label_tp_errors'][name].items():
|
||||
val = float('{:.4f}'.format(v))
|
||||
detail['{}/{}_{}'.format(metric_prefix, name, k)] = val
|
||||
for k, v in metrics['tp_errors'].items():
|
||||
val = float('{:.4f}'.format(v))
|
||||
detail['{}/{}'.format(metric_prefix,
|
||||
self.ErrNameMapping[k])] = val
|
||||
|
||||
detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
|
||||
detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
|
||||
return detail
|
||||
|
||||
def format_results(self, results, jsonfile_prefix=None):
|
||||
"""Format the results to json (standard format for COCO evaluation).
|
||||
|
||||
|
@ -478,19 +419,17 @@ class NuScenesDataset(Custom3DDataset):
|
|||
|
||||
def evaluate(self,
|
||||
results,
|
||||
metric='bbox',
|
||||
logger=None,
|
||||
jsonfile_prefix=None,
|
||||
result_names=['pts_bbox'],
|
||||
show=False,
|
||||
out_dir=None,
|
||||
pipeline=None):
|
||||
pipeline=None,
|
||||
evaluators=[],
|
||||
**kwargs):
|
||||
"""Evaluation in nuScenes protocol.
|
||||
|
||||
Args:
|
||||
results (list[dict]): Testing results of the dataset.
|
||||
metric (str | list[str], optional): Metrics to be evaluated.
|
||||
Default: 'bbox'.
|
||||
logger (logging.Logger | str, optional): Logger used for printing
|
||||
related information during evaluation. Default: None.
|
||||
jsonfile_prefix (str, optional): The prefix of json files including
|
||||
|
@ -507,15 +446,14 @@ class NuScenesDataset(Custom3DDataset):
|
|||
dict[str, float]: Results of each evaluation metric.
|
||||
"""
|
||||
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
|
||||
from nuscenes import NuScenes
|
||||
|
||||
if isinstance(result_files, dict):
|
||||
results_dict = dict()
|
||||
for name in result_names:
|
||||
print('Evaluating bboxes of {}'.format(name))
|
||||
ret_dict = self._evaluate_single(result_files[name])
|
||||
results_dict.update(ret_dict)
|
||||
elif isinstance(result_files, str):
|
||||
results_dict = self._evaluate_single(result_files)
|
||||
nusc = NuScenes(
|
||||
version=self.version, dataroot=self.data_root, verbose=False)
|
||||
|
||||
results_dict = {}
|
||||
for evaluator in evaluators:
|
||||
results_dict.update(evaluator.evaluate(result_files, nusc))
|
||||
|
||||
if tmp_dir is not None:
|
||||
tmp_dir.cleanup()
|
||||
|
@ -828,66 +766,8 @@ class CustomNuScenesDataset(NuScenesDataset):
|
|||
if self.test_mode:
|
||||
return self.prepare_test_data(idx)
|
||||
while True:
|
||||
|
||||
data = self.prepare_train_data(idx)
|
||||
if data is None:
|
||||
idx = self._rand_another(idx)
|
||||
continue
|
||||
return data
|
||||
|
||||
def _evaluate_single(self,
|
||||
result_path,
|
||||
logger=None,
|
||||
metric='bbox',
|
||||
result_name='pts_bbox'):
|
||||
"""Evaluation for a single model in nuScenes protocol.
|
||||
|
||||
Args:
|
||||
result_path (str): Path of the result file.
|
||||
logger (logging.Logger | str | None): Logger used for printing
|
||||
related information during evaluation. Default: None.
|
||||
metric (str): Metric name used for evaluation. Default: 'bbox'.
|
||||
result_name (str): Result name in the metric prefix.
|
||||
Default: 'pts_bbox'.
|
||||
|
||||
Returns:
|
||||
dict: Dictionary of evaluation details.
|
||||
"""
|
||||
from nuscenes import NuScenes
|
||||
self.nusc = NuScenes(
|
||||
version=self.version, dataroot=self.data_root, verbose=True)
|
||||
|
||||
output_dir = osp.join(*osp.split(result_path)[:-1])
|
||||
|
||||
eval_set_map = {
|
||||
'v1.0-mini': 'mini_val',
|
||||
'v1.0-trainval': 'val',
|
||||
}
|
||||
self.nusc_eval = NuScenesEval_custom(
|
||||
self.nusc,
|
||||
config=self.eval_detection_configs,
|
||||
result_path=result_path,
|
||||
eval_set=eval_set_map[self.version],
|
||||
output_dir=output_dir,
|
||||
verbose=True,
|
||||
overlap_test=self.overlap_test,
|
||||
data_infos=self.data_infos)
|
||||
self.nusc_eval.main(plot_examples=0, render_curves=False)
|
||||
# record metrics
|
||||
metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
|
||||
detail = dict()
|
||||
metric_prefix = f'{result_name}_NuScenes'
|
||||
for name in self.CLASSES:
|
||||
for k, v in metrics['label_aps'][name].items():
|
||||
val = float('{:.4f}'.format(v))
|
||||
detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
|
||||
for k, v in metrics['label_tp_errors'][name].items():
|
||||
val = float('{:.4f}'.format(v))
|
||||
detail['{}/{}_{}'.format(metric_prefix, name, k)] = val
|
||||
for k, v in metrics['tp_errors'].items():
|
||||
val = float('{:.4f}'.format(v))
|
||||
detail['{}/{}'.format(metric_prefix,
|
||||
self.ErrNameMapping[k])] = val
|
||||
detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
|
||||
detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
|
||||
return detail
|
||||
|
|
|
@ -4,9 +4,9 @@ import warnings
|
|||
from copy import deepcopy
|
||||
|
||||
import mmcv
|
||||
from mmdet3d.datasets.pipelines.compose import Compose
|
||||
|
||||
from easycv.datasets.registry import PIPELINES
|
||||
from easycv.datasets.shared.pipelines import Compose
|
||||
|
||||
|
||||
@PIPELINES.register_module()
|
||||
|
|
|
@ -204,7 +204,7 @@ class BEVFormer(MVXTwoStageDetector):
|
|||
losses.update(losses_pts)
|
||||
return losses
|
||||
|
||||
def forward_test(self, img_metas, img=None, **kwargs):
|
||||
def forward_test(self, img_metas, img=None, rescale=True, **kwargs):
|
||||
for var, name in [(img_metas, 'img_metas')]:
|
||||
if not isinstance(var, list):
|
||||
raise TypeError('{} must be a list, but got {}'.format(
|
||||
|
@ -237,6 +237,7 @@ class BEVFormer(MVXTwoStageDetector):
|
|||
img_metas[0],
|
||||
img[0],
|
||||
prev_bev=self.prev_frame_info['prev_bev'],
|
||||
rescale=rescale,
|
||||
**kwargs)
|
||||
# During inference, we save the BEV features and ego motion of each timestamp.
|
||||
self.prev_frame_info['prev_pos'] = tmp_pos
|
||||
|
|
Loading…
Reference in New Issue