mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* feat(mmdet3d): test pointpillars and centerpoint on ort, openvino and trt passed * fix(centerpoint): mvx_two_stage input error * fix(review): remove mode decorator * fix(mmdet3d): review advices * fix(regression): update mmdet3d.yml and test ort/openvino passed * unittest(mmdet3d): fix * fix(unittest): fix * fix(mmdet3d): unittest * fix(mmdet3d): unittest * fix(CI): remove mmcv.Config * fix(mmdet3d): unittest * fix(mmdet3d): support torch1.12 * fix(CI): use bigger point cloud file * improvement(mmdet3d): align backend outputs with torch * fix(mmdet3d): remove useless * style(mmdet3d): format code * style(mmdet3d): remove useless * fix(mmdet3d): sync vis_task * unittest(mmdet3d): add test * docs(mmdet3d): add docstring * unittest(ci): add unittest data * fix(mmdet3d): review advices * feat(mmdet3d): convert fail * style(mmdet3d): docstring * style(mmdet3d): docstring
134 lines
4.7 KiB
Python
134 lines
4.7 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
# If point cloud range is changed, the models should also change their point
|
|
# cloud range accordingly
|
|
point_cloud_range = [-50, -50, -5, 50, 50, 3]
|
|
# For nuScenes we usually do 10-class detection
|
|
class_names = [
|
|
'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle',
|
|
'motorcycle', 'pedestrian', 'traffic_cone', 'barrier'
|
|
]
|
|
metainfo = dict(CLASSES=class_names)
|
|
dataset_type = 'NuScenesDataset'
|
|
data_root = 'data/nuscenes/'
|
|
# Input modality for nuScenes dataset, this is consistent with the submission
|
|
# format which requires the information in input_modality.
|
|
input_modality = dict(use_lidar=True, use_camera=False)
|
|
data_prefix = dict(pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP')
|
|
|
|
file_client_args = dict(backend='disk')
|
|
# Uncomment the following if use ceph or other file clients.
|
|
# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient
|
|
# for more details.
|
|
# file_client_args = dict(
|
|
# backend='petrel',
|
|
# path_mapping=dict({
|
|
# './data/nuscenes/': 's3://nuscenes/nuscenes/',
|
|
# 'data/nuscenes/': 's3://nuscenes/nuscenes/'
|
|
# }))
|
|
|
|
train_pipeline = [
|
|
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5),
|
|
dict(type='LoadPointsFromMultiSweeps', sweeps_num=10),
|
|
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
|
|
dict(
|
|
type='GlobalRotScaleTrans',
|
|
rot_range=[-0.3925, 0.3925],
|
|
scale_ratio_range=[0.95, 1.05],
|
|
translation_std=[0, 0, 0]),
|
|
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
|
|
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
|
|
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
|
|
dict(type='ObjectNameFilter', classes=class_names),
|
|
dict(type='PointShuffle'),
|
|
dict(
|
|
type='Pack3DDetInputs',
|
|
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
|
|
]
|
|
test_pipeline = [
|
|
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5),
|
|
dict(type='LoadPointsFromMultiSweeps', sweeps_num=10, test_mode=True),
|
|
dict(
|
|
type='MultiScaleFlipAug3D',
|
|
img_scale=(1333, 800),
|
|
pts_scale_ratio=1,
|
|
flip=False,
|
|
transforms=[
|
|
dict(
|
|
type='GlobalRotScaleTrans',
|
|
rot_range=[0, 0],
|
|
scale_ratio_range=[1., 1.],
|
|
translation_std=[0, 0, 0]),
|
|
dict(type='RandomFlip3D'),
|
|
dict(
|
|
type='PointsRangeFilter', point_cloud_range=point_cloud_range)
|
|
]),
|
|
dict(type='Pack3DDetInputs', keys=['points'])
|
|
]
|
|
# construct a pipeline for data and gt loading in show function
|
|
# please keep its loading function consistent with test_pipeline (e.g. client)
|
|
eval_pipeline = [
|
|
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5),
|
|
dict(type='LoadPointsFromMultiSweeps', sweeps_num=10, test_mode=True),
|
|
dict(type='Pack3DDetInputs', keys=['points'])
|
|
]
|
|
train_dataloader = dict(
|
|
batch_size=4,
|
|
num_workers=4,
|
|
persistent_workers=True,
|
|
sampler=dict(type='DefaultSampler', shuffle=True),
|
|
dataset=dict(
|
|
type=dataset_type,
|
|
data_root=data_root,
|
|
ann_file='nuscenes_infos_train.pkl',
|
|
pipeline=train_pipeline,
|
|
metainfo=metainfo,
|
|
modality=input_modality,
|
|
test_mode=False,
|
|
data_prefix=data_prefix,
|
|
# we use box_type_3d='LiDAR' in kitti and nuscenes dataset
|
|
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
|
|
box_type_3d='LiDAR'))
|
|
test_dataloader = dict(
|
|
batch_size=1,
|
|
num_workers=1,
|
|
persistent_workers=True,
|
|
drop_last=False,
|
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
|
dataset=dict(
|
|
type=dataset_type,
|
|
data_root=data_root,
|
|
ann_file='nuscenes_infos_val.pkl',
|
|
pipeline=test_pipeline,
|
|
metainfo=metainfo,
|
|
modality=input_modality,
|
|
data_prefix=data_prefix,
|
|
test_mode=True,
|
|
box_type_3d='LiDAR'))
|
|
val_dataloader = dict(
|
|
batch_size=1,
|
|
num_workers=1,
|
|
persistent_workers=True,
|
|
drop_last=False,
|
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
|
dataset=dict(
|
|
type=dataset_type,
|
|
data_root=data_root,
|
|
ann_file='nuscenes_infos_val.pkl',
|
|
pipeline=test_pipeline,
|
|
metainfo=metainfo,
|
|
modality=input_modality,
|
|
test_mode=True,
|
|
data_prefix=data_prefix,
|
|
box_type_3d='LiDAR'))
|
|
|
|
val_evaluator = dict(
|
|
type='NuScenesMetric',
|
|
data_root=data_root,
|
|
ann_file=data_root + 'nuscenes_infos_val.pkl',
|
|
metric='bbox')
|
|
test_evaluator = val_evaluator
|
|
|
|
vis_backends = [dict(type='LocalVisBackend')]
|
|
visualizer = dict(
|
|
type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer')
|