mirror of https://github.com/open-mmlab/mmocr.git
Migrate configs to new styles
parent
cb85f857aa
commit
98d9d39505
|
@ -1,17 +1,21 @@
|
|||
# yapf:disable
|
||||
log_config = dict(
|
||||
interval=5,
|
||||
hooks=[
|
||||
dict(type='TextLoggerHook')
|
||||
])
|
||||
# yapf:enable
|
||||
dist_params = dict(backend='nccl')
|
||||
# custom_imports = dict(imports=['mmcv.transforms'], allow_failed_imports=False) # noqa
|
||||
default_scope = 'mmocr'
|
||||
|
||||
default_hooks = dict(
|
||||
optimizer=dict(type='OptimizerHook', grad_clip=None),
|
||||
timer=dict(type='IterTimerHook'),
|
||||
logger=dict(type='LoggerHook', interval=5),
|
||||
param_scheduler=dict(type='ParamSchedulerHook'),
|
||||
checkpoint=dict(type='CheckpointHook', interval=1),
|
||||
sampler_seed=dict(type='DistSamplerSeedHook'),
|
||||
)
|
||||
|
||||
env_cfg = dict(
|
||||
cudnn_benchmark=True,
|
||||
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
|
||||
dist_cfg=dict(backend='nccl'),
|
||||
)
|
||||
|
||||
log_level = 'INFO'
|
||||
load_from = None
|
||||
resume_from = None
|
||||
workflow = [('train', 1)]
|
||||
|
||||
# disable opencv multithreading to avoid system being overloaded
|
||||
opencv_num_threads = 0
|
||||
# set multi-process start method as `fork` to speed up the training
|
||||
mp_start_method = 'fork'
|
||||
resume = False
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='Adadelta', lr=0.5)
|
||||
optimizer_config = dict(grad_clip=dict(max_norm=0.5))
|
||||
# learning policy
|
||||
lr_config = dict(policy='step', step=[8, 14, 16])
|
||||
# running settings
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=18)
|
||||
checkpoint_config = dict(interval=1)
|
|
@ -1,8 +1,10 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='Adadelta', lr=1.0)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
# learning policy
|
||||
lr_config = dict(policy='step', step=[])
|
||||
# running settings
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=5)
|
||||
checkpoint_config = dict(interval=1)
|
||||
|
||||
train_cfg = dict(by_epoch=True, max_epochs=5)
|
||||
val_cfg = dict(interval=1)
|
||||
test_cfg = dict()
|
||||
|
||||
# learning rate
|
||||
param_scheduler = [
|
||||
dict(type='ConstantLR'),
|
||||
]
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='Adam', lr=1e-3)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
# learning policy
|
||||
lr_config = dict(policy='poly', power=0.9)
|
||||
# running settings
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=600)
|
||||
checkpoint_config = dict(interval=100)
|
||||
|
||||
train_cfg = dict(by_epoch=True, max_epochs=600)
|
||||
val_cfg = dict(interval=20)
|
||||
test_cfg = dict()
|
||||
|
||||
# learning rate
|
||||
param_scheduler = [
|
||||
dict(type='PolyLR', power=0.9, end=600),
|
||||
]
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='Adam', lr=4e-4)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
|
||||
train_cfg = dict(by_epoch=True, max_epochs=12)
|
||||
val_cfg = dict(interval=1)
|
||||
test_cfg = dict()
|
||||
|
||||
# learning policy
|
||||
lr_config = dict(
|
||||
policy='step',
|
||||
warmup='linear',
|
||||
warmup_iters=100,
|
||||
warmup_ratio=1.0 / 3,
|
||||
step=[11])
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
||||
checkpoint_config = dict(interval=1)
|
||||
param_scheduler = [
|
||||
dict(type='LinearLR', end=100, by_epoch=False),
|
||||
dict(type='MultiStepLR', milestones=[11], end=12),
|
||||
]
|
||||
|
|
|
@ -1,14 +1,12 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='Adam', lr=1e-4)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
|
||||
train_cfg = dict(by_epoch=True, max_epochs=20)
|
||||
val_cfg = dict(interval=1)
|
||||
test_cfg = dict()
|
||||
|
||||
# learning policy
|
||||
lr_config = dict(
|
||||
policy='step',
|
||||
step=[16, 18],
|
||||
warmup='linear',
|
||||
warmup_iters=1,
|
||||
warmup_ratio=0.001,
|
||||
warmup_by_epoch=True)
|
||||
# running settings
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
||||
checkpoint_config = dict(interval=1)
|
||||
param_scheduler = [
|
||||
dict(type='LinearLR', end=1, start_factor=0.001),
|
||||
dict(type='MultiStepLR', milestones=[16, 18], end=20),
|
||||
]
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='Adam', lr=1e-3)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
|
||||
train_cfg = dict(by_epoch=True, max_epochs=5)
|
||||
val_cfg = dict(interval=1)
|
||||
test_cfg = dict()
|
||||
|
||||
# learning policy
|
||||
lr_config = dict(policy='step', step=[3, 4])
|
||||
# running settings
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=5)
|
||||
checkpoint_config = dict(interval=1)
|
||||
param_scheduler = [
|
||||
dict(type='MultiStepLR', milestones=[3, 4], end=5),
|
||||
]
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='Adam', lr=1e-4)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
|
||||
train_cfg = dict(by_epoch=True, max_epochs=600)
|
||||
val_cfg = dict(interval=40)
|
||||
test_cfg = dict()
|
||||
|
||||
# learning policy
|
||||
lr_config = dict(policy='step', step=[200, 400])
|
||||
# running settings
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=600)
|
||||
checkpoint_config = dict(interval=100)
|
||||
param_scheduler = [
|
||||
dict(type='MultiStepLR', milestones=[200, 400], end=600),
|
||||
]
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='Adam', lr=1e-3)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
_base_ = 'schedule_adam_step_5e.py'
|
||||
|
||||
train_cfg = dict(by_epoch=True, max_epochs=6)
|
||||
|
||||
# learning policy
|
||||
lr_config = dict(policy='step', step=[3, 4])
|
||||
# running settings
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=6)
|
||||
checkpoint_config = dict(interval=1)
|
||||
param_scheduler = [
|
||||
dict(type='MultiStepLR', milestones=[3, 4], end=6),
|
||||
]
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='SGD', lr=0.007, momentum=0.9, weight_decay=0.0001)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
|
||||
train_cfg = dict(by_epoch=False, max_iters=100000)
|
||||
val_cfg = dict(interval=100001) # Never evaluate
|
||||
test_cfg = dict()
|
||||
|
||||
# learning policy
|
||||
lr_config = dict(policy='poly', power=0.9, min_lr=1e-7, by_epoch=False)
|
||||
# running settings
|
||||
runner = dict(type='IterBasedRunner', max_iters=100000)
|
||||
checkpoint_config = dict(interval=10000)
|
||||
param_scheduler = [
|
||||
dict(type='PolyLR', power=0.9, eta_min=1e-7, by_epoch=False, end=100000),
|
||||
]
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='SGD', lr=0.007, momentum=0.9, weight_decay=0.0001)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
|
||||
train_cfg = dict(by_epoch=True, max_epochs=1200)
|
||||
val_cfg = dict(interval=20) # Never evaluate
|
||||
test_cfg = dict()
|
||||
|
||||
# learning policy
|
||||
lr_config = dict(policy='poly', power=0.9, min_lr=1e-7, by_epoch=True)
|
||||
# running settings
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=1200)
|
||||
checkpoint_config = dict(interval=100)
|
||||
param_scheduler = [
|
||||
dict(type='PolyLR', power=0.9, eta_min=1e-7, end=1200),
|
||||
]
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='SGD', lr=1e-3, momentum=0.90, weight_decay=5e-4)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
train_cfg = dict(by_epoch=True, max_epochs=1500)
|
||||
val_cfg = dict(interval=20) # Never evaluate
|
||||
test_cfg = dict()
|
||||
|
||||
# learning policy
|
||||
lr_config = dict(policy='poly', power=0.9, min_lr=1e-7, by_epoch=True)
|
||||
# running settings
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=1500)
|
||||
checkpoint_config = dict(interval=100)
|
||||
param_scheduler = [
|
||||
dict(type='PolyLR', power=0.9, eta_min=1e-7, end=1500),
|
||||
]
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
|
||||
train_cfg = dict(by_epoch=True, max_epochs=160)
|
||||
val_cfg = dict(interval=20)
|
||||
test_cfg = dict()
|
||||
|
||||
# learning policy
|
||||
lr_config = dict(
|
||||
policy='step',
|
||||
warmup='linear',
|
||||
warmup_iters=500,
|
||||
warmup_ratio=0.001,
|
||||
step=[80, 128])
|
||||
# running settings
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=160)
|
||||
checkpoint_config = dict(interval=10)
|
||||
param_scheduler = [
|
||||
dict(type='LinearLR', end=500, start_factor=0.001, by_epoch=False),
|
||||
dict(type='MultiStepLR', milestones=[80, 128], end=160),
|
||||
]
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
# optimizer
|
||||
optimizer = dict(type='SGD', lr=1e-3, momentum=0.99, weight_decay=5e-4)
|
||||
optimizer_config = dict(grad_clip=None)
|
||||
|
||||
train_cfg = dict(by_epoch=True, max_epochs=600)
|
||||
val_cfg = dict(interval=50)
|
||||
test_cfg = dict()
|
||||
|
||||
# learning policy
|
||||
lr_config = dict(policy='step', step=[200, 400])
|
||||
# running settings
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=600)
|
||||
checkpoint_config = dict(interval=100)
|
||||
param_scheduler = [
|
||||
dict(type='MultiStepLR', milestones=[200, 400], end=600),
|
||||
]
|
||||
|
|
Loading…
Reference in New Issue