[Refactor] Refactor lr_config

This commit is contained in:
limengzhang.vendor 2022-06-08 09:25:00 +00:00 committed by zhengmiao
parent 6b34566eba
commit fcb9cb4571
58 changed files with 522 additions and 258 deletions

View File

@ -2,13 +2,20 @@
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
# learning policy # learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) param_scheduler = [
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=0,
end=160000,
by_epoch=False)
]
# training schedule for 160k # training schedule for 160k
train_cfg = dict( train_cfg = dict(
type='IterBasedTrainLoop', max_iters=160000, val_interval=16000) type='IterBasedTrainLoop', max_iters=160000, val_interval=16000)
val_cfg = dict(type='ValLoop') val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop') test_cfg = dict(type='TestLoop')
evaluation = dict(interval=16000, metric='mIoU', pre_eval=True)
default_hooks = dict( default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None), optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'), timer=dict(type='IterTimerHook'),

View File

@ -2,12 +2,19 @@
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
# learning policy # learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) param_scheduler = [
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=0,
end=20000,
by_epoch=False)
]
# training schedule for 20k # training schedule for 20k
train_cfg = dict(type='IterBasedTrainLoop', max_iters=20000, val_interval=2000) train_cfg = dict(type='IterBasedTrainLoop', max_iters=20000, val_interval=2000)
val_cfg = dict(type='ValLoop') val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop') test_cfg = dict(type='TestLoop')
evaluation = dict(interval=2000, metric='mIoU', pre_eval=True)
default_hooks = dict( default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None), optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'), timer=dict(type='IterTimerHook'),

View File

@ -2,13 +2,20 @@
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
# learning policy # learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) param_scheduler = [
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=0,
end=320000,
by_epoch=False)
]
# training schedule for 320k # training schedule for 320k
train_cfg = dict( train_cfg = dict(
type='IterBasedTrainLoop', max_iters=320000, val_interval=32000) type='IterBasedTrainLoop', max_iters=320000, val_interval=32000)
val_cfg = dict(type='ValLoop') val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop') test_cfg = dict(type='TestLoop')
evaluation = dict(interval=32000, metric='mIoU')
default_hooks = dict( default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None), optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'), timer=dict(type='IterTimerHook'),

View File

@ -2,12 +2,19 @@
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
# learning policy # learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) param_scheduler = [
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=0,
end=40000,
by_epoch=False)
]
# training schedule for 40k # training schedule for 40k
train_cfg = dict(type='IterBasedTrainLoop', max_iters=40000, val_interval=4000) train_cfg = dict(type='IterBasedTrainLoop', max_iters=40000, val_interval=4000)
val_cfg = dict(type='ValLoop') val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop') test_cfg = dict(type='TestLoop')
evaluation = dict(interval=4000, metric='mIoU', pre_eval=True)
default_hooks = dict( default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None), optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'), timer=dict(type='IterTimerHook'),

View File

@ -2,12 +2,19 @@
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
# learning policy # learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) param_scheduler = [
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=0,
end=80000,
by_epoch=False)
]
# training schedule for 80k # training schedule for 80k
train_cfg = dict(type='IterBasedTrainLoop', max_iters=80000, val_interval=8000) train_cfg = dict(type='IterBasedTrainLoop', max_iters=80000, val_interval=8000)
val_cfg = dict(type='ValLoop') val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop') test_cfg = dict(type='TestLoop')
evaluation = dict(interval=8000, metric='mIoU', pre_eval=True)
default_hooks = dict( default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None), optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'), timer=dict(type='IterTimerHook'),

View File

@ -20,15 +20,18 @@ optim_wrapper = dict(
constructor='LayerDecayOptimizerConstructor', constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9)) paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9))
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
eta_min=0.0,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2) train_dataloader = dict(batch_size=2)

View File

@ -33,15 +33,19 @@ optim_wrapper = dict(
optimizer=optimizer, optimizer=optimizer,
constructor='LayerDecayOptimizerConstructor', constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95)) paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95))
lr_config = dict(
_delete_=True, param_scheduler = [
policy='poly', dict(
warmup='linear', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=3000),
warmup_iters=3000, dict(
warmup_ratio=1e-6, type='PolyLR',
power=1.0, power=1.0,
min_lr=0.0, begin=3000,
by_epoch=False) end=160000,
eta_min=0.0,
by_epoch=False,
)
]
train_dataloader = dict(batch_size=1) train_dataloader = dict(batch_size=1)
val_dataloader = dict(batch_size=1) val_dataloader = dict(batch_size=1)

View File

@ -14,7 +14,17 @@ model = dict(
dict(in_channels=512, channels=256, num_classes=171), dict(in_channels=512, channels=256, num_classes=171),
dict(in_channels=512, channels=256, num_classes=171), dict(in_channels=512, channels=256, num_classes=171),
]) ])
lr_config = dict(warmup='linear', warmup_iters=1000) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=1000,
end=160000,
by_epoch=False,
)
]
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
train_dataloader = dict(batch_size=4, num_workers=4) train_dataloader = dict(batch_size=4, num_workers=4)

View File

@ -3,7 +3,17 @@ _base_ = [
'../_base_/datasets/cityscapes_1024x1024.py', '../_base_/datasets/cityscapes_1024x1024.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
] ]
lr_config = dict(warmup='linear', warmup_iters=1000) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=1000,
end=160000,
by_epoch=False,
)
]
optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
train_dataloader = dict(batch_size=4, num_workers=4) train_dataloader = dict(batch_size=4, num_workers=4)

View File

@ -8,7 +8,17 @@ model = dict(
backbone_cfg=dict( backbone_cfg=dict(
init_cfg=dict( init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')))) type='Pretrained', checkpoint='open-mmlab://resnet18_v1c'))))
lr_config = dict(warmup='linear', warmup_iters=1000) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=1000,
end=160000,
by_epoch=False,
)
]
optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
train_dataloader = dict(batch_size=4, num_workers=4) train_dataloader = dict(batch_size=4, num_workers=4)

View File

@ -9,6 +9,19 @@ model = dict(
dict(num_classes=171), dict(num_classes=171),
dict(num_classes=171), dict(num_classes=171),
]) ])
lr_config = dict(warmup='linear', warmup_iters=1000) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=1000,
end=160000,
by_epoch=False,
)
]
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
train_dataloader = dict(batch_size=4, num_workers=4)
val_dataloader = dict(batch_size=4, num_workers=4)
test_dataloader = val_dataloader

View File

@ -34,7 +34,17 @@ model = dict(
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
concat_input=False), concat_input=False),
]) ])
lr_config = dict(warmup='linear', warmup_iters=1000) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=1000,
end=160000,
by_epoch=False,
)
]
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
train_dataloader = dict(batch_size=4, num_workers=4) train_dataloader = dict(batch_size=4, num_workers=4)

View File

@ -14,6 +14,19 @@ model = dict(
dict(in_channels=512, channels=256, num_classes=171), dict(in_channels=512, channels=256, num_classes=171),
dict(in_channels=512, channels=256, num_classes=171), dict(in_channels=512, channels=256, num_classes=171),
]) ])
lr_config = dict(warmup='linear', warmup_iters=1000) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=1000,
end=160000,
by_epoch=False,
)
]
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
train_dataloader = dict(batch_size=4, num_workers=4)
val_dataloader = dict(batch_size=4, num_workers=4)
test_dataloader = val_dataloader

View File

@ -3,7 +3,17 @@ _base_ = [
'../_base_/datasets/cityscapes_1024x1024.py', '../_base_/datasets/cityscapes_1024x1024.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
] ]
lr_config = dict(warmup='linear', warmup_iters=1000) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=1000,
end=160000,
by_epoch=False,
)
]
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
train_dataloader = dict(batch_size=4, num_workers=4) train_dataloader = dict(batch_size=4, num_workers=4)

View File

@ -3,7 +3,17 @@ _base_ = [
'../_base_/datasets/cityscapes_1024x1024.py', '../_base_/datasets/cityscapes_1024x1024.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
] ]
lr_config = dict(warmup='linear', warmup_iters=1000) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=1000,
end=160000,
by_epoch=False,
)
]
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
train_dataloader = dict(batch_size=8, num_workers=4) train_dataloader = dict(batch_size=8, num_workers=4)

View File

@ -62,7 +62,17 @@ models = dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
], ],
) )
lr_config = dict(warmup='linear', warmup_iters=1000) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=1000,
end=160000,
by_epoch=False,
)
]
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
train_dataloader = dict(batch_size=4, num_workers=4) train_dataloader = dict(batch_size=4, num_workers=4)

View File

@ -4,7 +4,15 @@ _base_ = ['../_base_/models/cgnet.py', '../_base_/default_runtime.py']
optimizer = dict(type='Adam', lr=0.001, eps=1e-08, weight_decay=0.0005) optimizer = dict(type='Adam', lr=0.001, eps=1e-08, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
# learning policy # learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) param_scheduler = [
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
by_epoch=False,
begin=0,
end=60000)
]
# runtime settings # runtime settings
total_iters = 60000 total_iters = 60000
train_cfg = dict( train_cfg = dict(
@ -12,7 +20,6 @@ train_cfg = dict(
val_cfg = dict(type='ValLoop') val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop') test_cfg = dict(type='TestLoop')
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=4000)) default_hooks = dict(checkpoint=dict(by_epoch=False, interval=4000))
evaluation = dict(interval=4000, metric='mIoU')
# dataset settings # dataset settings
dataset_type = 'CityscapesDataset' dataset_type = 'CityscapesDataset'

View File

@ -7,7 +7,15 @@ _base_ = [
optimizer = dict(type='Adam', lr=0.001, eps=1e-08, weight_decay=0.0005) optimizer = dict(type='Adam', lr=0.001, eps=1e-08, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
# learning policy # learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) param_scheduler = [
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
by_epoch=False,
begin=0,
end=60000)
]
# runtime settings # runtime settings
total_iters = 60000 total_iters = 60000
train_cfg = dict( train_cfg = dict(
@ -15,7 +23,6 @@ train_cfg = dict(
val_cfg = dict(type='ValLoop') val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop') test_cfg = dict(type='TestLoop')
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=4000)) default_hooks = dict(checkpoint=dict(by_epoch=False, interval=4000))
evaluation = dict(interval=4000, metric='mIoU')
img_norm_cfg = dict( img_norm_cfg = dict(
mean=[72.39239876, 82.90891754, 73.15835921], std=[1, 1, 1], to_rgb=True) mean=[72.39239876, 82.90891754, 73.15835921], std=[1, 1, 1], to_rgb=True)

View File

@ -26,15 +26,18 @@ optim_wrapper = dict(
}, },
constructor='LearningRateDecayOptimizerConstructor') constructor='LearningRateDecayOptimizerConstructor')
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
eta_min=0.0,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2) train_dataloader = dict(batch_size=2)

View File

@ -41,15 +41,18 @@ optim_wrapper = dict(
}, },
constructor='LearningRateDecayOptimizerConstructor') constructor='LearningRateDecayOptimizerConstructor')
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
eta_min=0.0,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2) train_dataloader = dict(batch_size=2)

View File

@ -41,15 +41,18 @@ optim_wrapper = dict(
}, },
constructor='LearningRateDecayOptimizerConstructor') constructor='LearningRateDecayOptimizerConstructor')
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
eta_min=0.0,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2) train_dataloader = dict(batch_size=2)

View File

@ -40,15 +40,18 @@ optim_wrapper = dict(
}, },
constructor='LearningRateDecayOptimizerConstructor') constructor='LearningRateDecayOptimizerConstructor')
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
eta_min=0.0,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2) train_dataloader = dict(batch_size=2)

View File

@ -40,15 +40,18 @@ optim_wrapper = dict(
}, },
constructor='LearningRateDecayOptimizerConstructor') constructor='LearningRateDecayOptimizerConstructor')
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
eta_min=0.0,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2) train_dataloader = dict(batch_size=2)

View File

@ -41,15 +41,18 @@ optim_wrapper = dict(
}, },
constructor='LearningRateDecayOptimizerConstructor') constructor='LearningRateDecayOptimizerConstructor')
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
eta_min=0.0,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2) train_dataloader = dict(batch_size=2)

View File

@ -22,15 +22,18 @@ optim_wrapper = dict(
'norm': dict(decay_mult=0.) 'norm': dict(decay_mult=0.)
})) }))
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6, eta_min=0.0,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2, num_workers=2) train_dataloader = dict(batch_size=2, num_workers=2)

View File

@ -86,14 +86,18 @@ default_hooks = dict(
type='OptimizerHook', type='OptimizerHook',
grad_clip=dict(max_norm=1, norm_type=2))) grad_clip=dict(max_norm=1, norm_type=2)))
# learning policy # learning policy
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='step', type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
warmup='linear', end=1000),
warmup_iters=1000, dict(
warmup_ratio=0.001, type='MultiStepLR',
step=[60000, 72000], begin=1000,
by_epoch=False) end=80000,
milestones=[60000, 72000],
by_epoch=False,
)
]
# In K-Net implementation we use batch size 2 per GPU as default # In K-Net implementation we use batch size 2 per GPU as default
train_dataloader = dict(batch_size=2, num_workers=2) train_dataloader = dict(batch_size=2, num_workers=2)
val_dataloader = dict(batch_size=2, num_workers=2) val_dataloader = dict(batch_size=2, num_workers=2)

View File

@ -86,14 +86,18 @@ default_hooks = dict(
type='OptimizerHook', type='OptimizerHook',
grad_clip=dict(max_norm=1, norm_type=2))) grad_clip=dict(max_norm=1, norm_type=2)))
# learning policy # learning policy
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='step', type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
warmup='linear', end=1000),
warmup_iters=1000, dict(
warmup_ratio=0.001, type='MultiStepLR',
step=[60000, 72000], begin=1000,
by_epoch=False) end=80000,
milestones=[60000, 72000],
by_epoch=False,
)
]
# In K-Net implementation we use batch size 2 per GPU as default # In K-Net implementation we use batch size 2 per GPU as default
train_dataloader = dict(batch_size=2, num_workers=2) train_dataloader = dict(batch_size=2, num_workers=2)
val_dataloader = dict(batch_size=2, num_workers=2) val_dataloader = dict(batch_size=2, num_workers=2)

View File

@ -85,14 +85,18 @@ default_hooks = dict(
type='OptimizerHook', type='OptimizerHook',
grad_clip=dict(max_norm=1, norm_type=2))) grad_clip=dict(max_norm=1, norm_type=2)))
# learning policy # learning policy
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='step', type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
warmup='linear', end=1000),
warmup_iters=1000, dict(
warmup_ratio=0.001, type='MultiStepLR',
step=[60000, 72000], begin=1000,
by_epoch=False) end=80000,
milestones=[60000, 72000],
by_epoch=False,
)
]
# In K-Net implementation we use batch size 2 per GPU as default # In K-Net implementation we use batch size 2 per GPU as default
train_dataloader = dict(batch_size=2, num_workers=2) train_dataloader = dict(batch_size=2, num_workers=2)
val_dataloader = dict(batch_size=2, num_workers=2) val_dataloader = dict(batch_size=2, num_workers=2)

View File

@ -86,14 +86,18 @@ default_hooks = dict(
type='OptimizerHook', type='OptimizerHook',
grad_clip=dict(max_norm=1, norm_type=2))) grad_clip=dict(max_norm=1, norm_type=2)))
# learning policy # learning policy
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='step', type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
warmup='linear', end=1000),
warmup_iters=1000, dict(
warmup_ratio=0.001, type='MultiStepLR',
step=[60000, 72000], begin=1000,
by_epoch=False) end=80000,
milestones=[60000, 72000],
by_epoch=False,
)
]
# In K-Net implementation we use batch size 2 per GPU as default # In K-Net implementation we use batch size 2 per GPU as default
train_dataloader = dict(batch_size=2, num_workers=2) train_dataloader = dict(batch_size=2, num_workers=2)
val_dataloader = dict(batch_size=2, num_workers=2) val_dataloader = dict(batch_size=2, num_workers=2)

View File

@ -54,14 +54,18 @@ default_hooks = dict(
type='OptimizerHook', type='OptimizerHook',
grad_clip=dict(max_norm=1, norm_type=2))) grad_clip=dict(max_norm=1, norm_type=2)))
# learning policy # learning policy
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='step', type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
warmup='linear', end=1000),
warmup_iters=1000, dict(
warmup_ratio=0.001, type='MultiStepLR',
step=[60000, 72000], begin=1000,
by_epoch=False) end=80000,
milestones=[60000, 72000],
by_epoch=False,
)
]
# In K-Net implementation we use batch size 2 per GPU as default # In K-Net implementation we use batch size 2 per GPU as default
train_dataloader = dict(batch_size=2, num_workers=2) train_dataloader = dict(batch_size=2, num_workers=2)
val_dataloader = dict(batch_size=2, num_workers=2) val_dataloader = dict(batch_size=2, num_workers=2)

View File

@ -35,15 +35,18 @@ optim_wrapper = dict(
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.65), paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.65),
constructor='LayerDecayOptimizerConstructor') constructor='LayerDecayOptimizerConstructor')
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6, eta_min=0.0,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
by_epoch=False,
)
]
# mixed precision # mixed precision
fp16 = dict(loss_scale='dynamic') fp16 = dict(loss_scale='dynamic')

View File

@ -5,4 +5,12 @@ _base_ = [
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
lr_config = dict(min_lr=2e-4) param_scheduler = [
dict(
type='PolyLR',
eta_min=2e-4,
power=0.9,
begin=0,
end=40000,
by_epoch=False)
]

View File

@ -5,4 +5,12 @@ _base_ = [
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
lr_config = dict(min_lr=2e-4) param_scheduler = [
dict(
type='PolyLR',
eta_min=2e-4,
power=0.9,
begin=0,
end=40000,
by_epoch=False)
]

View File

@ -2,4 +2,14 @@ _base_ = [
'../_base_/models/pointrend_r50.py', '../_base_/datasets/cityscapes.py', '../_base_/models/pointrend_r50.py', '../_base_/datasets/cityscapes.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
] ]
lr_config = dict(warmup='linear', warmup_iters=200) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=200),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=200,
end=80000,
by_epoch=False,
)
]

View File

@ -29,4 +29,14 @@ model = dict(decode_head=[
loss_decode=dict( loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
]) ])
lr_config = dict(warmup='linear', warmup_iters=200) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=200),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=200,
end=160000,
by_epoch=False,
)
]

View File

@ -20,11 +20,15 @@ default_hooks = dict(
type='OptimizerHook', type='OptimizerHook',
grad_clip=dict(max_norm=1, norm_type=2))) grad_clip=dict(max_norm=1, norm_type=2)))
# learning policy # learning policy
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='step', type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
warmup='linear', end=1000),
warmup_iters=1000, dict(
warmup_ratio=0.001, type='MultiStepLR',
step=[60000, 72000], begin=1000,
by_epoch=False) end=80000,
by_epoch=False,
milestones=[60000, 72000],
)
]

View File

@ -18,11 +18,15 @@ default_hooks = dict(
type='OptimizerHook', type='OptimizerHook',
grad_clip=dict(max_norm=1, norm_type=2))) grad_clip=dict(max_norm=1, norm_type=2)))
# learning policy # learning policy
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='step', type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
warmup='linear', end=1000),
warmup_iters=1000, dict(
warmup_ratio=0.001, type='MultiStepLR',
step=[60000, 72000], begin=1000,
by_epoch=False) end=80000,
by_epoch=False,
milestones=[60000, 72000],
)
]

View File

@ -24,15 +24,18 @@ optim_wrapper = dict(
'head': dict(lr_mult=10.) 'head': dict(lr_mult=10.)
})) }))
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6, eta_min=0.0,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
by_epoch=False,
)
]
train_dataloader = dict(batch_size=2, num_workers=2) train_dataloader = dict(batch_size=2, num_workers=2)
val_dataloader = dict(batch_size=2, num_workers=2) val_dataloader = dict(batch_size=2, num_workers=2)
test_dataloader = val_dataloader test_dataloader = val_dataloader

View File

@ -26,15 +26,20 @@ optim_wrapper = dict(
'norm': dict(decay_mult=0.), 'norm': dict(decay_mult=0.),
'head': dict(lr_mult=10.) 'head': dict(lr_mult=10.)
})) }))
lr_config = dict(
_delete_=True, param_scheduler = [
policy='poly', dict(
warmup='linear', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup_iters=1500, dict(
warmup_ratio=1e-6, type='PolyLR',
eta_min=0.0,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
by_epoch=False,
)
]
train_dataloader = dict(batch_size=1, num_workers=1) train_dataloader = dict(batch_size=1, num_workers=1)
val_dataloader = dict(batch_size=1, num_workers=1) val_dataloader = dict(batch_size=1, num_workers=1)
test_dataloader = val_dataloader test_dataloader = val_dataloader

View File

@ -2,7 +2,17 @@ _base_ = [
'../_base_/models/stdc.py', '../_base_/datasets/cityscapes.py', '../_base_/models/stdc.py', '../_base_/datasets/cityscapes.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
] ]
lr_config = dict(warmup='linear', warmup_iters=1000) param_scheduler = [
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
dict(
type='PolyLR',
eta_min=1e-4,
power=0.9,
begin=1000,
end=80000,
by_epoch=False,
)
]
train_dataloader = dict(batch_size=12, num_workers=4) train_dataloader = dict(batch_size=12, num_workers=4)
val_dataloader = dict(batch_size=12, num_workers=4) val_dataloader = dict(batch_size=12, num_workers=4)
test_dataloader = val_dataloader test_dataloader = val_dataloader

View File

@ -35,15 +35,18 @@ optim_wrapper = dict(
'norm': dict(decay_mult=0.) 'norm': dict(decay_mult=0.)
})) }))
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6, eta_min=0.0,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2) train_dataloader = dict(batch_size=2)

View File

@ -19,12 +19,15 @@ optim_wrapper = dict(
'norm': dict(decay_mult=0.) 'norm': dict(decay_mult=0.)
})) }))
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6, eta_min=0.0,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
by_epoch=False,
)
]

View File

@ -34,15 +34,18 @@ optim_wrapper = dict(
'norm': dict(decay_mult=0.) 'norm': dict(decay_mult=0.)
})) }))
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6, eta_min=0.0,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
by_epoch=False,
)
]
train_dataloader = dict(batch_size=2, num_workers=2) train_dataloader = dict(batch_size=2, num_workers=2)
val_dataloader = dict(batch_size=2, num_workers=2) val_dataloader = dict(batch_size=2, num_workers=2)

View File

@ -4,4 +4,3 @@ _base_ = [
'../_base_/schedules/schedule_40k.py' '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85)))
evaluation = dict(metric='mDice')

View File

@ -3,4 +3,3 @@ _base_ = [
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85)))
evaluation = dict(metric='mDice')

View File

@ -3,4 +3,3 @@ _base_ = [
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(256, 256), stride=(170, 170))) model = dict(test_cfg=dict(crop_size=(256, 256), stride=(170, 170)))
evaluation = dict(metric='mDice')

View File

@ -3,4 +3,3 @@ _base_ = [
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(64, 64), stride=(42, 42))) model = dict(test_cfg=dict(crop_size=(64, 64), stride=(42, 42)))
evaluation = dict(metric='mDice')

View File

@ -3,4 +3,3 @@ _base_ = [
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85)))
evaluation = dict(metric='mDice')

View File

@ -3,4 +3,3 @@ _base_ = [
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85)))
evaluation = dict(metric='mDice')

View File

@ -3,4 +3,3 @@ _base_ = [
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(256, 256), stride=(170, 170))) model = dict(test_cfg=dict(crop_size=(256, 256), stride=(170, 170)))
evaluation = dict(metric='mDice')

View File

@ -3,4 +3,3 @@ _base_ = [
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(64, 64), stride=(42, 42))) model = dict(test_cfg=dict(crop_size=(64, 64), stride=(42, 42)))
evaluation = dict(metric='mDice')

View File

@ -4,4 +4,3 @@ _base_ = [
'../_base_/schedules/schedule_40k.py' '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85)))
evaluation = dict(metric='mDice')

View File

@ -3,4 +3,3 @@ _base_ = [
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85)))
evaluation = dict(metric='mDice')

View File

@ -3,4 +3,3 @@ _base_ = [
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(256, 256), stride=(170, 170))) model = dict(test_cfg=dict(crop_size=(256, 256), stride=(170, 170)))
evaluation = dict(metric='mDice')

View File

@ -3,4 +3,3 @@ _base_ = [
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
] ]
model = dict(test_cfg=dict(crop_size=(64, 64), stride=(42, 42))) model = dict(test_cfg=dict(crop_size=(64, 64), stride=(42, 42)))
evaluation = dict(metric='mDice')

View File

@ -29,15 +29,18 @@ optim_wrapper = dict(
'norm': dict(decay_mult=0.) 'norm': dict(decay_mult=0.)
})) }))
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6, eta_min=0.0,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2) train_dataloader = dict(batch_size=2)

View File

@ -28,15 +28,18 @@ optim_wrapper = dict(
'norm': dict(decay_mult=0.) 'norm': dict(decay_mult=0.)
})) }))
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6, eta_min=0.0,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=160000,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2) train_dataloader = dict(batch_size=2)

View File

@ -28,15 +28,18 @@ optim_wrapper = dict(
'norm': dict(decay_mult=0.) 'norm': dict(decay_mult=0.)
})) }))
lr_config = dict( param_scheduler = [
_delete_=True, dict(
policy='poly', type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
warmup='linear', dict(
warmup_iters=1500, type='PolyLR',
warmup_ratio=1e-6, eta_min=0.0,
power=1.0, power=1.0,
min_lr=0.0, begin=1500,
by_epoch=False) end=80000,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU # By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2) train_dataloader = dict(batch_size=2)