From cc12db099c91432b08d5858c88b5dc065333257b Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 20 Sep 2022 12:01:52 +0800 Subject: [PATCH 1/2] fix Constant learning rate bug --- ppcls/optimizer/learning_rate.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/ppcls/optimizer/learning_rate.py b/ppcls/optimizer/learning_rate.py index 437ffe243..75a0c07ed 100644 --- a/ppcls/optimizer/learning_rate.py +++ b/ppcls/optimizer/learning_rate.py @@ -93,6 +93,25 @@ class LRBase(object): return warmup_lr +class ConstantImpl(lr.LRScheduler): + """Constant learning rate Class implementation + + Args: + learning_rate (float): The initial learning rate + last_epoch (int, optional): The index of last epoch. Default: -1. + """ + + def __init__(self, learning_rate, last_epoch=-1, **kwargs): + self.learning_rate = learning_rate + self.last_epoch = last_epoch + super(ConstantImpl, self).__init__() + + def get_lr(self) -> float: + """always return the same learning rate + """ + return self.learning_rate + + class Constant(LRBase): """Constant learning rate @@ -120,17 +139,9 @@ class Constant(LRBase): last_epoch, by_epoch) def __call__(self): - learning_rate = lr.LRScheduler( + learning_rate = ConstantImpl( learning_rate=self.learning_rate, last_epoch=self.last_epoch) - def make_get_lr(): - def get_lr(self): - return self.learning_rate - - return get_lr - - setattr(learning_rate, "get_lr", make_get_lr()) - if self.warmup_steps > 0: learning_rate = self.linear_warmup(learning_rate) From 6125fa941be2cdb0661fe908389345e8b42e72e1 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 20 Sep 2022 14:45:02 +0800 Subject: [PATCH 2/2] change Constant to ConstLR --- .../softmax_triplet_with_center.yaml | 2 +- ppcls/optimizer/learning_rate.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml b/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml index b500fb203..70c70a99b 100644 --- a/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml +++ b/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml @@ -87,7 +87,7 @@ Optimizer: - SGD: scope: CenterLoss lr: - name: Constant + name: ConstLR learning_rate: 1000.0 # NOTE: set to ori_lr*(1/centerloss_weight) to avoid manually scaling centers' gradidents. # data loader for train and eval diff --git a/ppcls/optimizer/learning_rate.py b/ppcls/optimizer/learning_rate.py index 75a0c07ed..c8d87517e 100644 --- a/ppcls/optimizer/learning_rate.py +++ b/ppcls/optimizer/learning_rate.py @@ -93,7 +93,7 @@ class LRBase(object): return warmup_lr -class ConstantImpl(lr.LRScheduler): +class Constant(lr.LRScheduler): """Constant learning rate Class implementation Args: @@ -104,7 +104,7 @@ class ConstantImpl(lr.LRScheduler): def __init__(self, learning_rate, last_epoch=-1, **kwargs): self.learning_rate = learning_rate self.last_epoch = last_epoch - super(ConstantImpl, self).__init__() + super(Constant, self).__init__() def get_lr(self) -> float: """always return the same learning rate @@ -112,7 +112,7 @@ class ConstantImpl(lr.LRScheduler): return self.learning_rate -class Constant(LRBase): +class ConstLR(LRBase): """Constant learning rate Args: @@ -134,12 +134,12 @@ class Constant(LRBase): last_epoch=-1, by_epoch=False, **kwargs): - super(Constant, self).__init__(epochs, step_each_epoch, learning_rate, - warmup_epoch, warmup_start_lr, - last_epoch, by_epoch) + super(ConstLR, self).__init__(epochs, step_each_epoch, learning_rate, + warmup_epoch, warmup_start_lr, + last_epoch, by_epoch) def __call__(self): - learning_rate = ConstantImpl( + learning_rate = Constant( learning_rate=self.learning_rate, last_epoch=self.last_epoch) if self.warmup_steps > 0: