Migrate configs to new styles

pull/1178/head
gaotongxiao 2022-05-13 15:55:06 +08:00
parent cb85f857aa
commit 98d9d39505
14 changed files with 118 additions and 106 deletions

View File

@ -1,17 +1,21 @@
# yapf:disable
log_config = dict(
interval=5,
hooks=[
dict(type='TextLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
# custom_imports = dict(imports=['mmcv.transforms'], allow_failed_imports=False) # noqa
default_scope = 'mmocr'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=5),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
)
env_cfg = dict(
cudnn_benchmark=True,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
resume = False

View File

@ -1,8 +0,0 @@
# optimizer
optimizer = dict(type='Adadelta', lr=0.5)
optimizer_config = dict(grad_clip=dict(max_norm=0.5))
# learning policy
lr_config = dict(policy='step', step=[8, 14, 16])
# running settings
runner = dict(type='EpochBasedRunner', max_epochs=18)
checkpoint_config = dict(interval=1)

View File

@ -1,8 +1,10 @@
# optimizer
optimizer = dict(type='Adadelta', lr=1.0)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[])
# running settings
runner = dict(type='EpochBasedRunner', max_epochs=5)
checkpoint_config = dict(interval=1)
train_cfg = dict(by_epoch=True, max_epochs=5)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(type='ConstantLR'),
]

View File

@ -1,8 +1,11 @@
# optimizer
optimizer = dict(type='Adam', lr=1e-3)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='poly', power=0.9)
# running settings
runner = dict(type='EpochBasedRunner', max_epochs=600)
checkpoint_config = dict(interval=100)
train_cfg = dict(by_epoch=True, max_epochs=600)
val_cfg = dict(interval=20)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(type='PolyLR', power=0.9, end=600),
]

View File

@ -1,12 +1,12 @@
# optimizer
optimizer = dict(type='Adam', lr=4e-4)
optimizer_config = dict(grad_clip=None)
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=100,
warmup_ratio=1.0 / 3,
step=[11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
checkpoint_config = dict(interval=1)
param_scheduler = [
dict(type='LinearLR', end=100, by_epoch=False),
dict(type='MultiStepLR', milestones=[11], end=12),
]

View File

@ -1,14 +1,12 @@
# optimizer
optimizer = dict(type='Adam', lr=1e-4)
optimizer_config = dict(grad_clip=None)
train_cfg = dict(by_epoch=True, max_epochs=20)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning policy
lr_config = dict(
policy='step',
step=[16, 18],
warmup='linear',
warmup_iters=1,
warmup_ratio=0.001,
warmup_by_epoch=True)
# running settings
runner = dict(type='EpochBasedRunner', max_epochs=20)
checkpoint_config = dict(interval=1)
param_scheduler = [
dict(type='LinearLR', end=1, start_factor=0.001),
dict(type='MultiStepLR', milestones=[16, 18], end=20),
]

View File

@ -1,8 +1,11 @@
# optimizer
optimizer = dict(type='Adam', lr=1e-3)
optimizer_config = dict(grad_clip=None)
train_cfg = dict(by_epoch=True, max_epochs=5)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning policy
lr_config = dict(policy='step', step=[3, 4])
# running settings
runner = dict(type='EpochBasedRunner', max_epochs=5)
checkpoint_config = dict(interval=1)
param_scheduler = [
dict(type='MultiStepLR', milestones=[3, 4], end=5),
]

View File

@ -1,8 +1,11 @@
# optimizer
optimizer = dict(type='Adam', lr=1e-4)
optimizer_config = dict(grad_clip=None)
train_cfg = dict(by_epoch=True, max_epochs=600)
val_cfg = dict(interval=40)
test_cfg = dict()
# learning policy
lr_config = dict(policy='step', step=[200, 400])
# running settings
runner = dict(type='EpochBasedRunner', max_epochs=600)
checkpoint_config = dict(interval=100)
param_scheduler = [
dict(type='MultiStepLR', milestones=[200, 400], end=600),
]

View File

@ -1,8 +1,8 @@
# optimizer
optimizer = dict(type='Adam', lr=1e-3)
optimizer_config = dict(grad_clip=None)
_base_ = 'schedule_adam_step_5e.py'
train_cfg = dict(by_epoch=True, max_epochs=6)
# learning policy
lr_config = dict(policy='step', step=[3, 4])
# running settings
runner = dict(type='EpochBasedRunner', max_epochs=6)
checkpoint_config = dict(interval=1)
param_scheduler = [
dict(type='MultiStepLR', milestones=[3, 4], end=6),
]

View File

@ -1,8 +1,11 @@
# optimizer
optimizer = dict(type='SGD', lr=0.007, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
train_cfg = dict(by_epoch=False, max_iters=100000)
val_cfg = dict(interval=100001) # Never evaluate
test_cfg = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-7, by_epoch=False)
# running settings
runner = dict(type='IterBasedRunner', max_iters=100000)
checkpoint_config = dict(interval=10000)
param_scheduler = [
dict(type='PolyLR', power=0.9, eta_min=1e-7, by_epoch=False, end=100000),
]

View File

@ -1,8 +1,11 @@
# optimizer
optimizer = dict(type='SGD', lr=0.007, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
train_cfg = dict(by_epoch=True, max_epochs=1200)
val_cfg = dict(interval=20) # Never evaluate
test_cfg = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-7, by_epoch=True)
# running settings
runner = dict(type='EpochBasedRunner', max_epochs=1200)
checkpoint_config = dict(interval=100)
param_scheduler = [
dict(type='PolyLR', power=0.9, eta_min=1e-7, end=1200),
]

View File

@ -1,8 +1,8 @@
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.90, weight_decay=5e-4)
optimizer_config = dict(grad_clip=None)
train_cfg = dict(by_epoch=True, max_epochs=1500)
val_cfg = dict(interval=20) # Never evaluate
test_cfg = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-7, by_epoch=True)
# running settings
runner = dict(type='EpochBasedRunner', max_epochs=1500)
checkpoint_config = dict(interval=100)
param_scheduler = [
dict(type='PolyLR', power=0.9, eta_min=1e-7, end=1500),
]

View File

@ -1,13 +1,12 @@
# optimizer
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
train_cfg = dict(by_epoch=True, max_epochs=160)
val_cfg = dict(interval=20)
test_cfg = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[80, 128])
# running settings
runner = dict(type='EpochBasedRunner', max_epochs=160)
checkpoint_config = dict(interval=10)
param_scheduler = [
dict(type='LinearLR', end=500, start_factor=0.001, by_epoch=False),
dict(type='MultiStepLR', milestones=[80, 128], end=160),
]

View File

@ -1,8 +1,10 @@
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.99, weight_decay=5e-4)
optimizer_config = dict(grad_clip=None)
train_cfg = dict(by_epoch=True, max_epochs=600)
val_cfg = dict(interval=50)
test_cfg = dict()
# learning policy
lr_config = dict(policy='step', step=[200, 400])
# running settings
runner = dict(type='EpochBasedRunner', max_epochs=600)
checkpoint_config = dict(interval=100)
param_scheduler = [
dict(type='MultiStepLR', milestones=[200, 400], end=600),
]