[Update] update config

pull/1178/head
liukuikun 2022-06-21 06:00:54 +00:00 committed by gaotongxiao
parent 301eb7b783
commit 1212ae89cc
12 changed files with 37 additions and 44 deletions

View File

@ -1,9 +1,8 @@
optim_wrapper = dict(
type='OptimWrapper', optimizer=dict(type='Adadelta', lr=1.0))
train_cfg = dict(by_epoch=True, max_epochs=5)
val_cfg = dict(interval=1)
test_cfg = dict()
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=5, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0),

View File

@ -1,9 +1,8 @@
# optimizer
optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=1e-3))
train_cfg = dict(by_epoch=True, max_epochs=600)
val_cfg = dict(interval=20)
test_cfg = dict()
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=600, val_interval=20)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(type='PolyLR', power=0.9, end=600),

View File

@ -1,10 +1,9 @@
# optimizer
optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=4e-4))
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning policy
param_scheduler = [
dict(type='LinearLR', end=100, by_epoch=False),

View File

@ -1,9 +1,8 @@
# optimizer
optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=1e-4))
train_cfg = dict(by_epoch=True, max_epochs=20)
val_cfg = dict(interval=1)
test_cfg = dict()
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=20, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning policy
param_scheduler = [
dict(type='LinearLR', end=1, start_factor=0.001),

View File

@ -1,9 +1,8 @@
# optimizer
optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=1e-3))
train_cfg = dict(by_epoch=True, max_epochs=5)
val_cfg = dict(interval=1)
test_cfg = dict()
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=5, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning policy
param_scheduler = [
dict(type='MultiStepLR', milestones=[3, 4], end=5),

View File

@ -1,9 +1,8 @@
# optimizer
optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=1e-4))
train_cfg = dict(by_epoch=True, max_epochs=600)
val_cfg = dict(interval=40)
test_cfg = dict()
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=600, val_interval=40)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning policy
param_scheduler = [
dict(type='MultiStepLR', milestones=[200, 400], end=600),

View File

@ -1,6 +1,6 @@
_base_ = 'schedule_adam_step_5e.py'
train_cfg = dict(by_epoch=True, max_epochs=6)
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=6, val_interval=1)
# learning policy
param_scheduler = [

View File

@ -2,10 +2,10 @@
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.007, momentum=0.9, weight_decay=0.0001))
train_cfg = dict(by_epoch=False, max_iters=100000)
val_cfg = dict(interval=100001) # Never evaluate
test_cfg = dict()
train_cfg = dict(type='IterBasedTrainLoop', max_iters=100000)
test_cfg = dict(type='TestLoop')
val_cfg = None
# learning policy
param_scheduler = [
dict(type='PolyLR', power=0.9, eta_min=1e-7, by_epoch=False, end=100000),

View File

@ -2,10 +2,9 @@
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.007, momentum=0.9, weight_decay=0.0001))
train_cfg = dict(by_epoch=True, max_epochs=1200)
val_cfg = dict(interval=20) # Never evaluate
test_cfg = dict()
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1200, val_interval=20)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning policy
param_scheduler = [
dict(type='PolyLR', power=0.9, eta_min=1e-7, end=1200),

View File

@ -1,7 +1,9 @@
train_cfg = dict(by_epoch=True, max_epochs=1500)
val_cfg = dict(interval=20) # Never evaluate
test_cfg = dict()
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4))
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1500, val_interval=20)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning policy
param_scheduler = [
dict(type='PolyLR', power=0.9, eta_min=1e-7, end=1500),

View File

@ -2,10 +2,9 @@
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001))
train_cfg = dict(by_epoch=True, max_epochs=160)
val_cfg = dict(interval=20)
test_cfg = dict()
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=160, val_interval=20)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning policy
param_scheduler = [
dict(type='LinearLR', end=500, start_factor=0.001, by_epoch=False),

View File

@ -1,10 +1,9 @@
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=1e-3, momentum=0.99, weight_decay=5e-4))
train_cfg = dict(by_epoch=True, max_epochs=600)
val_cfg = dict(interval=50)
test_cfg = dict()
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=600, val_interval=50)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning policy
param_scheduler = [
dict(type='MultiStepLR', milestones=[200, 400], end=600),