mirror of
https://github.com/huggingface/pytorch-image-models.git
synced 2025-06-03 15:01:08 +08:00
* Add MADGRAD code * Fix Lamb (non-fused variant) to work w/ PyTorch XLA * Tweak optimizer factory args (lr/learning_rate and opt/optimizer_name), may break compat * Use newer fn signatures for all add,addcdiv, addcmul in optimizers * Use upcoming PyTorch native Nadam if it's available * Cleanup lookahead opt * Add optimizer tests * Remove novograd.py impl as it was messy, keep nvnovograd * Make AdamP/SGDP work in channels_last layout * Add rectified adablief mode (radabelief) * Support a few more PyTorch optim, adamax, adagrad
13 lines
409 B
Python
13 lines
409 B
Python
from .adamp import AdamP
|
|
from .adamw import AdamW
|
|
from .adafactor import Adafactor
|
|
from .adahessian import Adahessian
|
|
from .lookahead import Lookahead
|
|
from .nadam import Nadam
|
|
from .nvnovograd import NvNovoGrad
|
|
from .radam import RAdam
|
|
from .rmsprop_tf import RMSpropTF
|
|
from .sgdp import SGDP
|
|
from .adabelief import AdaBelief
|
|
from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs
|