mmselfsup/openselfsup/utils/optimizers.py
2020-06-16 00:05:18 +08:00

96 lines
3.3 KiB
Python

""" Layer-wise adaptive rate scaling for SGD in PyTorch! """
import torch
from torch.optim.optimizer import Optimizer, required
from torch.optim import *
class LARS(Optimizer):
r"""Implements layer-wise adaptive rate scaling for SGD.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): base learning rate (\gamma_0)
momentum (float, optional): momentum factor (default: 0) ("m")
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
("\beta")
eta (float, optional): LARS coefficient
max_epoch: maximum training epoch to determine polynomial LR decay.
Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg.
Large Batch Training of Convolutional Networks:
https://arxiv.org/abs/1708.03888
Example:
>>> optimizer = LARS(model.parameters(), lr=0.1, eta=1e-3)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
"""
def __init__(self,
params,
lr=required,
momentum=.9,
weight_decay=.0005,
eta=0.001):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
if eta < 0.0:
raise ValueError("Invalid LARS coefficient value: {}".format(eta))
defaults = dict(
lr=lr, momentum=momentum, weight_decay=weight_decay, eta=eta)
super(LARS, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
epoch: current epoch to calculate polynomial LR decay schedule.
if None, uses self.epoch and increments it.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
eta = group['eta']
lr = group['lr']
for p in group['params']:
if p.grad is None:
continue
param_state = self.state[p]
d_p = p.grad.data
weight_norm = torch.norm(p.data)
grad_norm = torch.norm(d_p)
# Compute local learning rate for this layer
local_lr = eta * weight_norm / \
(grad_norm + weight_decay * weight_norm)
# Update the momentum term
actual_lr = local_lr * lr
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = \
torch.zeros_like(p.data)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(actual_lr, d_p + weight_decay * p.data)
p.data.add_(-buf)
return loss