refine code and docs

pull/1819/head
HydrogenSulfate 2022-05-05 22:14:07 +08:00
parent 1c31010b14
commit 790815f430
5 changed files with 11 additions and 29 deletions

View File

@ -33,8 +33,8 @@ def get_architectures():
def get_blacklist_model_in_static_mode(): def get_blacklist_model_in_static_mode():
from ppcls.arch.backbone import (distilled_vision_transformer, from ppcls.arch.backbone import distilled_vision_transformer
vision_transformer) from ppcls.arch.backbone import vision_transformer
blacklist = distilled_vision_transformer.__all__ + vision_transformer.__all__ blacklist = distilled_vision_transformer.__all__ + vision_transformer.__all__
return blacklist return blacklist
@ -60,10 +60,10 @@ def get_param_attr_dict(ParamAttr_config: Union[None, bool, Dict[str, Dict]]
"""parse ParamAttr from an dict """parse ParamAttr from an dict
Args: Args:
ParamAttr_config (Union[bool, Dict[str, Dict]]): ParamAttr_config ParamAttr_config (Union[None, bool, Dict[str, Dict]]): ParamAttr configure
Returns: Returns:
Union[bool, paddle.ParamAttr]: Generated ParamAttr Union[None, bool, paddle.ParamAttr]: Generated ParamAttr
""" """
if ParamAttr_config is None: if ParamAttr_config is None:
return None return None

View File

@ -22,7 +22,6 @@ import six
import math import math
import random import random
import cv2 import cv2
from typing import Sequence
import numpy as np import numpy as np
from PIL import Image, ImageOps, __version__ as PILLOW_VERSION from PIL import Image, ImageOps, __version__ as PILLOW_VERSION
from paddle.vision.transforms import ColorJitter as RawColorJitter from paddle.vision.transforms import ColorJitter as RawColorJitter

View File

@ -23,8 +23,9 @@ import paddle.nn as nn
class CenterLoss(nn.Layer): class CenterLoss(nn.Layer):
"""Center loss class """Center loss
paper : [A Discriminative Feature Learning Approach for Deep Face Recognition](https://link.springer.com/content/pdf/10.1007%2F978-3-319-46478-7_31.pdf)
code reference: https://github.com/michuanhaohao/reid-strong-baseline/blob/master/layers/center_loss.py#L7
Args: Args:
num_classes (int): number of classes. num_classes (int): number of classes.
feat_dim (int): number of feature dimensions. feat_dim (int): number of feature dimensions.

View File

@ -71,7 +71,7 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
optim_cfg = optim_item[optim_name] # get optim_cfg optim_cfg = optim_item[optim_name] # get optim_cfg
lr = build_lr_scheduler(optim_cfg.pop('lr'), epochs, step_each_epoch) lr = build_lr_scheduler(optim_cfg.pop('lr'), epochs, step_each_epoch)
logger.info("build lr ({}) for scope ({}) success..".format( logger.debug("build lr ({}) for scope ({}) success..".format(
lr, optim_scope)) lr, optim_scope))
# step2 build regularization # step2 build regularization
if 'regularizer' in optim_cfg and optim_cfg['regularizer'] is not None: if 'regularizer' in optim_cfg and optim_cfg['regularizer'] is not None:
@ -83,7 +83,7 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
reg_name = reg_config.pop('name') + 'Decay' reg_name = reg_config.pop('name') + 'Decay'
reg = getattr(paddle.regularizer, reg_name)(**reg_config) reg = getattr(paddle.regularizer, reg_name)(**reg_config)
optim_cfg["weight_decay"] = reg optim_cfg["weight_decay"] = reg
logger.info("build regularizer ({}) for scope ({}) success..". logger.debug("build regularizer ({}) for scope ({}) success..".
format(reg, optim_scope)) format(reg, optim_scope))
# step3 build optimizer # step3 build optimizer
if 'clip_norm' in optim_cfg: if 'clip_norm' in optim_cfg:
@ -123,7 +123,7 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
optim = getattr(optimizer, optim_name)( optim = getattr(optimizer, optim_name)(
learning_rate=lr, grad_clip=grad_clip, learning_rate=lr, grad_clip=grad_clip,
**optim_cfg)(model_list=optim_model) **optim_cfg)(model_list=optim_model)
logger.info("build optimizer ({}) for scope ({}) success..".format( logger.debug("build optimizer ({}) for scope ({}) success..".format(
optim, optim_scope)) optim, optim_scope))
optim_list.append(optim) optim_list.append(optim)
lr_list.append(lr) lr_list.append(lr)

View File

@ -262,24 +262,6 @@ class Piecewise(object):
return learning_rate return learning_rate
class Constant(LRScheduler):
"""
Constant learning rate
Args:
lr (float): The initial learning rate. It is a python float number.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
"""
def __init__(self, learning_rate, last_epoch=-1, by_epoch=False, **kwargs):
self.learning_rate = learning_rate
self.last_epoch = last_epoch
self.by_epoch = by_epoch
super().__init__()
def get_lr(self):
return self.learning_rate
class MultiStepDecay(LRScheduler): class MultiStepDecay(LRScheduler):
""" """
Update the learning rate by ``gamma`` once ``epoch`` reaches one of the milestones. Update the learning rate by ``gamma`` once ``epoch`` reaches one of the milestones.