From 1212ae89cc851e14d3615e66c08f93e26688ca12 Mon Sep 17 00:00:00 2001 From: liukuikun <641417025@qq.com> Date: Tue, 21 Jun 2022 06:00:54 +0000 Subject: [PATCH] [Update] update config --- configs/_base_/schedules/schedule_adadelta_5e.py | 7 +++---- configs/_base_/schedules/schedule_adam_600e.py | 7 +++---- configs/_base_/schedules/schedule_adam_step_12e.py | 7 +++---- configs/_base_/schedules/schedule_adam_step_20e.py | 7 +++---- configs/_base_/schedules/schedule_adam_step_5e.py | 7 +++---- configs/_base_/schedules/schedule_adam_step_600e.py | 7 +++---- configs/_base_/schedules/schedule_adam_step_6e.py | 2 +- configs/_base_/schedules/schedule_sgd_100k_iters.py | 6 +++--- configs/_base_/schedules/schedule_sgd_1200e.py | 7 +++---- configs/_base_/schedules/schedule_sgd_1500e.py | 10 ++++++---- configs/_base_/schedules/schedule_sgd_160e.py | 7 +++---- configs/_base_/schedules/schedule_sgd_600e.py | 7 +++---- 12 files changed, 37 insertions(+), 44 deletions(-) diff --git a/configs/_base_/schedules/schedule_adadelta_5e.py b/configs/_base_/schedules/schedule_adadelta_5e.py index 1e7ab9d6..465072eb 100644 --- a/configs/_base_/schedules/schedule_adadelta_5e.py +++ b/configs/_base_/schedules/schedule_adadelta_5e.py @@ -1,9 +1,8 @@ optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='Adadelta', lr=1.0)) -train_cfg = dict(by_epoch=True, max_epochs=5) -val_cfg = dict(interval=1) -test_cfg = dict() - +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=5, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') # learning rate param_scheduler = [ dict(type='ConstantLR', factor=1.0), diff --git a/configs/_base_/schedules/schedule_adam_600e.py b/configs/_base_/schedules/schedule_adam_600e.py index b78643ef..eb61f7b9 100644 --- a/configs/_base_/schedules/schedule_adam_600e.py +++ b/configs/_base_/schedules/schedule_adam_600e.py @@ -1,9 +1,8 @@ # optimizer optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=1e-3)) -train_cfg = dict(by_epoch=True, max_epochs=600) -val_cfg = dict(interval=20) -test_cfg = dict() - +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=600, val_interval=20) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') # learning rate param_scheduler = [ dict(type='PolyLR', power=0.9, end=600), diff --git a/configs/_base_/schedules/schedule_adam_step_12e.py b/configs/_base_/schedules/schedule_adam_step_12e.py index 8328c888..1fe536e4 100644 --- a/configs/_base_/schedules/schedule_adam_step_12e.py +++ b/configs/_base_/schedules/schedule_adam_step_12e.py @@ -1,10 +1,9 @@ # optimizer optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=4e-4)) -train_cfg = dict(by_epoch=True, max_epochs=12) -val_cfg = dict(interval=1) -test_cfg = dict() - +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') # learning policy param_scheduler = [ dict(type='LinearLR', end=100, by_epoch=False), diff --git a/configs/_base_/schedules/schedule_adam_step_20e.py b/configs/_base_/schedules/schedule_adam_step_20e.py index 8a37a8ea..487836ce 100644 --- a/configs/_base_/schedules/schedule_adam_step_20e.py +++ b/configs/_base_/schedules/schedule_adam_step_20e.py @@ -1,9 +1,8 @@ # optimizer optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=1e-4)) -train_cfg = dict(by_epoch=True, max_epochs=20) -val_cfg = dict(interval=1) -test_cfg = dict() - +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=20, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') # learning policy param_scheduler = [ dict(type='LinearLR', end=1, start_factor=0.001), diff --git a/configs/_base_/schedules/schedule_adam_step_5e.py b/configs/_base_/schedules/schedule_adam_step_5e.py index 6fc12741..73aad763 100644 --- a/configs/_base_/schedules/schedule_adam_step_5e.py +++ b/configs/_base_/schedules/schedule_adam_step_5e.py @@ -1,9 +1,8 @@ # optimizer optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=1e-3)) -train_cfg = dict(by_epoch=True, max_epochs=5) -val_cfg = dict(interval=1) -test_cfg = dict() - +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=5, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') # learning policy param_scheduler = [ dict(type='MultiStepLR', milestones=[3, 4], end=5), diff --git a/configs/_base_/schedules/schedule_adam_step_600e.py b/configs/_base_/schedules/schedule_adam_step_600e.py index 43fd3338..b127b22a 100644 --- a/configs/_base_/schedules/schedule_adam_step_600e.py +++ b/configs/_base_/schedules/schedule_adam_step_600e.py @@ -1,9 +1,8 @@ # optimizer optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=1e-4)) -train_cfg = dict(by_epoch=True, max_epochs=600) -val_cfg = dict(interval=40) -test_cfg = dict() - +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=600, val_interval=40) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') # learning policy param_scheduler = [ dict(type='MultiStepLR', milestones=[200, 400], end=600), diff --git a/configs/_base_/schedules/schedule_adam_step_6e.py b/configs/_base_/schedules/schedule_adam_step_6e.py index 12da6cc8..598c16dc 100644 --- a/configs/_base_/schedules/schedule_adam_step_6e.py +++ b/configs/_base_/schedules/schedule_adam_step_6e.py @@ -1,6 +1,6 @@ _base_ = 'schedule_adam_step_5e.py' -train_cfg = dict(by_epoch=True, max_epochs=6) +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=6, val_interval=1) # learning policy param_scheduler = [ diff --git a/configs/_base_/schedules/schedule_sgd_100k_iters.py b/configs/_base_/schedules/schedule_sgd_100k_iters.py index af804a9a..61286916 100644 --- a/configs/_base_/schedules/schedule_sgd_100k_iters.py +++ b/configs/_base_/schedules/schedule_sgd_100k_iters.py @@ -2,10 +2,10 @@ optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='SGD', lr=0.007, momentum=0.9, weight_decay=0.0001)) -train_cfg = dict(by_epoch=False, max_iters=100000) -val_cfg = dict(interval=100001) # Never evaluate -test_cfg = dict() +train_cfg = dict(type='IterBasedTrainLoop', max_iters=100000) +test_cfg = dict(type='TestLoop') +val_cfg = None # learning policy param_scheduler = [ dict(type='PolyLR', power=0.9, eta_min=1e-7, by_epoch=False, end=100000), diff --git a/configs/_base_/schedules/schedule_sgd_1200e.py b/configs/_base_/schedules/schedule_sgd_1200e.py index ae425a38..f8555e46 100644 --- a/configs/_base_/schedules/schedule_sgd_1200e.py +++ b/configs/_base_/schedules/schedule_sgd_1200e.py @@ -2,10 +2,9 @@ optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='SGD', lr=0.007, momentum=0.9, weight_decay=0.0001)) -train_cfg = dict(by_epoch=True, max_epochs=1200) -val_cfg = dict(interval=20) # Never evaluate -test_cfg = dict() - +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1200, val_interval=20) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') # learning policy param_scheduler = [ dict(type='PolyLR', power=0.9, eta_min=1e-7, end=1200), diff --git a/configs/_base_/schedules/schedule_sgd_1500e.py b/configs/_base_/schedules/schedule_sgd_1500e.py index 18128bcb..d5a600c6 100644 --- a/configs/_base_/schedules/schedule_sgd_1500e.py +++ b/configs/_base_/schedules/schedule_sgd_1500e.py @@ -1,7 +1,9 @@ -train_cfg = dict(by_epoch=True, max_epochs=1500) -val_cfg = dict(interval=20) # Never evaluate -test_cfg = dict() - +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)) +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1500, val_interval=20) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') # learning policy param_scheduler = [ dict(type='PolyLR', power=0.9, eta_min=1e-7, end=1500), diff --git a/configs/_base_/schedules/schedule_sgd_160e.py b/configs/_base_/schedules/schedule_sgd_160e.py index 650b7f65..67e9bf46 100644 --- a/configs/_base_/schedules/schedule_sgd_160e.py +++ b/configs/_base_/schedules/schedule_sgd_160e.py @@ -2,10 +2,9 @@ optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)) -train_cfg = dict(by_epoch=True, max_epochs=160) -val_cfg = dict(interval=20) -test_cfg = dict() - +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=160, val_interval=20) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') # learning policy param_scheduler = [ dict(type='LinearLR', end=500, start_factor=0.001, by_epoch=False), diff --git a/configs/_base_/schedules/schedule_sgd_600e.py b/configs/_base_/schedules/schedule_sgd_600e.py index 39c75a81..a7f8848a 100644 --- a/configs/_base_/schedules/schedule_sgd_600e.py +++ b/configs/_base_/schedules/schedule_sgd_600e.py @@ -1,10 +1,9 @@ optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='SGD', lr=1e-3, momentum=0.99, weight_decay=5e-4)) -train_cfg = dict(by_epoch=True, max_epochs=600) -val_cfg = dict(interval=50) -test_cfg = dict() - +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=600, val_interval=50) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') # learning policy param_scheduler = [ dict(type='MultiStepLR', milestones=[200, 400], end=600),