mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* feat(mmdet3d): test pointpillars and centerpoint on ort, openvino and trt passed * fix(centerpoint): mvx_two_stage input error * fix(review): remove mode decorator * fix(mmdet3d): review advices * fix(regression): update mmdet3d.yml and test ort/openvino passed * unittest(mmdet3d): fix * fix(unittest): fix * fix(mmdet3d): unittest * fix(mmdet3d): unittest * fix(CI): remove mmcv.Config * fix(mmdet3d): unittest * fix(mmdet3d): support torch1.12 * fix(CI): use bigger point cloud file * improvement(mmdet3d): align backend outputs with torch * fix(mmdet3d): remove useless * style(mmdet3d): format code * style(mmdet3d): remove useless * fix(mmdet3d): sync vis_task * unittest(mmdet3d): add test * docs(mmdet3d): add docstring * unittest(ci): add unittest data * fix(mmdet3d): review advices * feat(mmdet3d): convert fail * style(mmdet3d): docstring * style(mmdet3d): docstring
69 lines
2.3 KiB
Python
69 lines
2.3 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
||
# The schedule is usually used by models trained on KITTI dataset
|
||
# The learning rate set in the cyclic schedule is the initial learning rate
|
||
# rather than the max learning rate. Since the target_ratio is (10, 1e-4),
|
||
# the learning rate will change from 0.0018 to 0.018, than go to 0.0018*1e-4
|
||
lr = 0.0018
|
||
# The optimizer follows the setting in SECOND.Pytorch, but here we use
|
||
# the official AdamW optimizer implemented by PyTorch.
|
||
optim_wrapper = dict(
|
||
type='OptimWrapper',
|
||
optimizer=dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01),
|
||
clip_grad=dict(max_norm=10, norm_type=2))
|
||
# learning rate
|
||
param_scheduler = [
|
||
# learning rate scheduler
|
||
# During the first 16 epochs, learning rate increases from 0 to lr * 10
|
||
# during the next 24 epochs, learning rate decreases from lr * 10 to
|
||
# lr * 1e-4
|
||
dict(
|
||
type='CosineAnnealingLR',
|
||
T_max=16,
|
||
eta_min=lr * 10,
|
||
begin=0,
|
||
end=16,
|
||
by_epoch=True,
|
||
convert_to_iter_based=True),
|
||
dict(
|
||
type='CosineAnnealingLR',
|
||
T_max=24,
|
||
eta_min=lr * 1e-4,
|
||
begin=16,
|
||
end=40,
|
||
by_epoch=True,
|
||
convert_to_iter_based=True),
|
||
# momentum scheduler
|
||
# During the first 16 epochs, momentum increases from 0 to 0.85 / 0.95
|
||
# during the next 24 epochs, momentum increases from 0.85 / 0.95 to 1
|
||
dict(
|
||
type='CosineAnnealingMomentum',
|
||
T_max=16,
|
||
eta_min=0.85 / 0.95,
|
||
begin=0,
|
||
end=16,
|
||
by_epoch=True,
|
||
convert_to_iter_based=True),
|
||
dict(
|
||
type='CosineAnnealingMomentum',
|
||
T_max=24,
|
||
eta_min=1,
|
||
begin=16,
|
||
end=40,
|
||
by_epoch=True,
|
||
convert_to_iter_based=True)
|
||
]
|
||
|
||
# Runtime settings,training schedule for 40e
|
||
# Although the max_epochs is 40, this schedule is usually used we
|
||
# RepeatDataset with repeat ratio N, thus the actual max epoch
|
||
# number could be Nx40
|
||
train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1)
|
||
val_cfg = dict()
|
||
test_cfg = dict()
|
||
|
||
# Default setting for scaling LR automatically
|
||
# - `enable` means enable scaling LR automatically
|
||
# or not by default.
|
||
# - `base_batch_size` = (8 GPUs) x (6 samples per GPU).
|
||
auto_scale_lr = dict(enable=False, base_batch_size=48)
|