mirror of https://github.com/RE-OWOD/RE-OWOD
63 lines
2.4 KiB
Python
63 lines
2.4 KiB
Python
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||
|
import math
|
||
|
from typing import List
|
||
|
import torch
|
||
|
|
||
|
from detectron2.solver.lr_scheduler import _get_warmup_factor_at_iter
|
||
|
|
||
|
# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes
|
||
|
# only on epoch boundaries. We typically use iteration based schedules instead.
|
||
|
# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean
|
||
|
# "iteration" instead.
|
||
|
|
||
|
# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating
|
||
|
# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it.
|
||
|
|
||
|
|
||
|
class WarmupPolyLR(torch.optim.lr_scheduler._LRScheduler):
|
||
|
"""
|
||
|
Poly learning rate schedule used to train DeepLab.
|
||
|
Paper: DeepLab: Semantic Image Segmentation with Deep Convolutional Nets,
|
||
|
Atrous Convolution, and Fully Connected CRFs.
|
||
|
Reference: https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/utils/train_utils.py#L337 # noqa
|
||
|
"""
|
||
|
|
||
|
def __init__(
|
||
|
self,
|
||
|
optimizer: torch.optim.Optimizer,
|
||
|
max_iters: int,
|
||
|
warmup_factor: float = 0.001,
|
||
|
warmup_iters: int = 1000,
|
||
|
warmup_method: str = "linear",
|
||
|
last_epoch: int = -1,
|
||
|
power: float = 0.9,
|
||
|
constant_ending: float = 0.0,
|
||
|
):
|
||
|
self.max_iters = max_iters
|
||
|
self.warmup_factor = warmup_factor
|
||
|
self.warmup_iters = warmup_iters
|
||
|
self.warmup_method = warmup_method
|
||
|
self.power = power
|
||
|
self.constant_ending = constant_ending
|
||
|
super().__init__(optimizer, last_epoch)
|
||
|
|
||
|
def get_lr(self) -> List[float]:
|
||
|
warmup_factor = _get_warmup_factor_at_iter(
|
||
|
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
|
||
|
)
|
||
|
if self.constant_ending > 0 and warmup_factor == 1.0:
|
||
|
# Constant ending lr.
|
||
|
if (
|
||
|
math.pow((1.0 - self.last_epoch / self.max_iters), self.power)
|
||
|
< self.constant_ending
|
||
|
):
|
||
|
return [base_lr * self.constant_ending for base_lr in self.base_lrs]
|
||
|
return [
|
||
|
base_lr * warmup_factor * math.pow((1.0 - self.last_epoch / self.max_iters), self.power)
|
||
|
for base_lr in self.base_lrs
|
||
|
]
|
||
|
|
||
|
def _compute_values(self) -> List[float]:
|
||
|
# The new interface
|
||
|
return self.get_lr()
|