mirror of https://github.com/JDAI-CV/fast-reid.git
parent
15e1729a27
commit
7e83d3175f
11
README.md
11
README.md
|
@ -4,13 +4,14 @@ FastReID is a research platform that implements state-of-the-art re-identificati
|
|||
|
||||
## What's New
|
||||
|
||||
- [Oct 2020] Added the [Hyper-Parameter Optimization](https://github.com/JDAI-CV/fast-reid/tree/master/projects/HPOReID) based on fastreid. See `projects/HPOReID`.
|
||||
- [Sep 2020] Added the [person attribute recognition](https://github.com/JDAI-CV/fast-reid/tree/master/projects/attribute_recognition) based on fastreid. See `projects/attribute_recognition`.
|
||||
- [Sep 2020] Automatic Mixed Precision training is supported with pytorch1.6 built-in `torch.cuda.amp`. Set `cfg.SOLVER.AMP_ENABLED=True` to switch it on.
|
||||
- [Aug 2020] [Model Distillation](https://github.com/JDAI-CV/fast-reid/tree/master/projects/DistillReID) is supported, thanks for [guan'an wang](https://github.com/wangguanan)'s contribution.
|
||||
- [Jan 2021] FastReID V1.0 has been released!🎉
|
||||
Support many tasks beyond reid, such image retrieval and face recognition. See [projects](https://github.com/JDAI-CV/fast-reid/tree/master/projects).
|
||||
- [Oct 2020] Added the [Hyper-Parameter Optimization](https://github.com/JDAI-CV/fast-reid/tree/master/projects/FastTune) based on fastreid. See `projects/FastTune`.
|
||||
- [Sep 2020] Added the [person attribute recognition](https://github.com/JDAI-CV/fast-reid/tree/master/projects/FastAttr) based on fastreid. See `projects/FastAttr`.
|
||||
- [Sep 2020] Automatic Mixed Precision training is supported with `apex`. Set `cfg.SOLVER.FP16_ENABLED=True` to switch it on.
|
||||
- [Aug 2020] [Model Distillation](https://github.com/JDAI-CV/fast-reid/tree/master/projects/FastDistill) is supported, thanks for [guan'an wang](https://github.com/wangguanan)'s contribution.
|
||||
- [Aug 2020] ONNX/TensorRT converter is supported.
|
||||
- [Jul 2020] Distributed training with multiple GPUs, it trains much faster.
|
||||
- [Jul 2020] `MAX_ITER` in config means `epoch`, it will auto scale to maximum iterations.
|
||||
- Includes more features such as circle loss, abundant visualization methods and evaluation metrics, SoTA results on conventional, cross-domain, partial and vehicle re-id, testing on multi-datasets simultaneously, etc.
|
||||
- Can be used as a library to support [different projects](https://github.com/JDAI-CV/fast-reid/tree/master/projects) on top of it. We'll open source more research projects in this way.
|
||||
- Remove [ignite](https://github.com/pytorch/ignite)(a high-level library) dependency and powered by [PyTorch](https://pytorch.org/).
|
||||
|
|
|
@ -13,7 +13,6 @@ import sys
|
|||
|
||||
from yacs.config import CfgNode as CfgNode
|
||||
|
||||
|
||||
# Global config object
|
||||
_C = CfgNode()
|
||||
|
||||
|
@ -21,7 +20,6 @@ _C = CfgNode()
|
|||
# from core.config import cfg
|
||||
cfg = _C
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# Model options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -39,7 +37,6 @@ _C.MODEL.NUM_CLASSES = 10
|
|||
# Loss function (see pycls/models/loss.py for options)
|
||||
_C.MODEL.LOSS_FUN = "cross_entropy"
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# ResNet options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -57,7 +54,6 @@ _C.RESNET.WIDTH_PER_GROUP = 64
|
|||
# Apply stride to 1x1 conv (True -> MSRA; False -> fb.torch)
|
||||
_C.RESNET.STRIDE_1X1 = True
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# AnyNet options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -93,7 +89,6 @@ _C.ANYNET.SE_ON = False
|
|||
# SE ratio
|
||||
_C.ANYNET.SE_R = 0.25
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# RegNet options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -133,7 +128,6 @@ _C.REGNET.GROUP_W = 16
|
|||
# Bottleneck multiplier (bm = 1 / b from the paper)
|
||||
_C.REGNET.BOT_MUL = 1.0
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# EfficientNet options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -169,7 +163,6 @@ _C.EN.DC_RATIO = 0.0
|
|||
# Dropout ratio
|
||||
_C.EN.DROPOUT_RATIO = 0.0
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# Batch norm options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -192,7 +185,6 @@ _C.BN.ZERO_INIT_FINAL_GAMMA = False
|
|||
_C.BN.USE_CUSTOM_WEIGHT_DECAY = False
|
||||
_C.BN.CUSTOM_WEIGHT_DECAY = 0.0
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# Optimizer options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -234,7 +226,6 @@ _C.OPTIM.WARMUP_FACTOR = 0.1
|
|||
# Gradually warm up the OPTIM.BASE_LR over this number of epochs
|
||||
_C.OPTIM.WARMUP_EPOCHS = 0
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# Training options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -262,7 +253,6 @@ _C.TRAIN.AUTO_RESUME = True
|
|||
# Weights to start training from
|
||||
_C.TRAIN.WEIGHTS = ""
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# Testing options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -281,7 +271,6 @@ _C.TEST.IM_SIZE = 256
|
|||
# Weights to use for testing
|
||||
_C.TEST.WEIGHTS = ""
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# Common train/test data loader options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -293,7 +282,6 @@ _C.DATA_LOADER.NUM_WORKERS = 8
|
|||
# Load data to pinned host memory
|
||||
_C.DATA_LOADER.PIN_MEMORY = True
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# Memory options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -302,7 +290,6 @@ _C.MEM = CfgNode()
|
|||
# Perform ReLU inplace
|
||||
_C.MEM.RELU_INPLACE = True
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# CUDNN options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -313,7 +300,6 @@ _C.CUDNN = CfgNode()
|
|||
# in overall speedups when variable size inputs are used (e.g. COCO training)
|
||||
_C.CUDNN.BENCHMARK = True
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# Precise timing options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -325,7 +311,6 @@ _C.PREC_TIME.WARMUP_ITER = 3
|
|||
# Number of iterations to compute avg time
|
||||
_C.PREC_TIME.NUM_ITER = 30
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# Misc options
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -359,7 +344,6 @@ _C.PORT_RANGE = [10000, 65000]
|
|||
# Models weights referred to by URL are downloaded to this local cache
|
||||
_C.DOWNLOAD_CACHE = "/tmp/pycls-download-cache"
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
# Deprecated keys
|
||||
# ------------------------------------------------------------------------------------ #
|
||||
|
@ -369,7 +353,7 @@ _C.register_deprecated_key("PREC_TIME.ENABLED")
|
|||
_C.register_deprecated_key("PORT")
|
||||
|
||||
|
||||
def assert_and_infer_cfg(cache_urls=True):
|
||||
def assert_and_infer_cfg():
|
||||
"""Checks config values invariants."""
|
||||
err_str = "The first lr step must start at 0"
|
||||
assert not _C.OPTIM.STEPS or _C.OPTIM.STEPS[0] == 0, err_str
|
||||
|
@ -382,14 +366,6 @@ def assert_and_infer_cfg(cache_urls=True):
|
|||
assert _C.TEST.BATCH_SIZE % _C.NUM_GPUS == 0, err_str
|
||||
err_str = "Log destination '{}' not supported"
|
||||
assert _C.LOG_DEST in ["stdout", "file"], err_str.format(_C.LOG_DEST)
|
||||
if cache_urls:
|
||||
cache_cfg_urls()
|
||||
|
||||
|
||||
def cache_cfg_urls():
|
||||
"""Download URLs in config, cache them, and rewrite cfg to use cached file."""
|
||||
_C.TRAIN.WEIGHTS = cache_url(_C.TRAIN.WEIGHTS, _C.DOWNLOAD_CACHE)
|
||||
_C.TEST.WEIGHTS = cache_url(_C.TEST.WEIGHTS, _C.DOWNLOAD_CACHE)
|
||||
|
||||
|
||||
def dump_cfg():
|
||||
|
@ -417,4 +393,4 @@ def load_cfg_fom_args(description="Config file options."):
|
|||
sys.exit(1)
|
||||
args = parser.parse_args()
|
||||
_C.merge_from_file(args.cfg_file)
|
||||
_C.merge_from_list(args.opts)
|
||||
_C.merge_from_list(args.opts)
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
from __future__ import absolute_import
|
||||
from . import caffe_pb2 as pb
|
||||
import numpy as np
|
||||
|
||||
def pair_process(item,strict_one=True):
|
||||
if hasattr(item,'__iter__'):
|
||||
from . import caffe_pb2 as pb
|
||||
|
||||
|
||||
def pair_process(item, strict_one=True):
|
||||
if hasattr(item, '__iter__'):
|
||||
for i in item:
|
||||
if i!=item[0]:
|
||||
if i != item[0]:
|
||||
if strict_one:
|
||||
raise ValueError("number in item {} must be the same".format(item))
|
||||
else:
|
||||
|
@ -13,26 +14,28 @@ def pair_process(item,strict_one=True):
|
|||
return item[0]
|
||||
return item
|
||||
|
||||
|
||||
def pair_reduce(item):
|
||||
if hasattr(item,'__iter__'):
|
||||
if hasattr(item, '__iter__'):
|
||||
for i in item:
|
||||
if i!=item[0]:
|
||||
if i != item[0]:
|
||||
return item
|
||||
return [item[0]]
|
||||
return [item]
|
||||
|
||||
class Layer_param():
|
||||
def __init__(self,name='',type='',top=(),bottom=()):
|
||||
self.param=pb.LayerParameter()
|
||||
self.name=self.param.name=name
|
||||
self.type=self.param.type=type
|
||||
|
||||
self.top=self.param.top
|
||||
class Layer_param():
|
||||
def __init__(self, name='', type='', top=(), bottom=()):
|
||||
self.param = pb.LayerParameter()
|
||||
self.name = self.param.name = name
|
||||
self.type = self.param.type = type
|
||||
|
||||
self.top = self.param.top
|
||||
self.top.extend(top)
|
||||
self.bottom=self.param.bottom
|
||||
self.bottom = self.param.bottom
|
||||
self.bottom.extend(bottom)
|
||||
|
||||
def fc_param(self, num_output, weight_filler='xavier', bias_filler='constant',has_bias=True):
|
||||
def fc_param(self, num_output, weight_filler='xavier', bias_filler='constant', has_bias=True):
|
||||
if self.type != 'InnerProduct':
|
||||
raise TypeError('the layer type must be InnerProduct if you want set fc param')
|
||||
fc_param = pb.InnerProductParameter()
|
||||
|
@ -45,7 +48,7 @@ class Layer_param():
|
|||
|
||||
def conv_param(self, num_output, kernel_size, stride=(1), pad=(0,),
|
||||
weight_filler_type='xavier', bias_filler_type='constant',
|
||||
bias_term=True, dilation=None,groups=None):
|
||||
bias_term=True, dilation=None, groups=None):
|
||||
"""
|
||||
add a conv_param layer if you spec the layer type "Convolution"
|
||||
Args:
|
||||
|
@ -56,80 +59,69 @@ class Layer_param():
|
|||
bias_filler_type: the bias filler type
|
||||
Returns:
|
||||
"""
|
||||
if self.type not in ['Convolution','Deconvolution']:
|
||||
if self.type not in ['Convolution', 'Deconvolution']:
|
||||
raise TypeError('the layer type must be Convolution or Deconvolution if you want set conv param')
|
||||
conv_param=pb.ConvolutionParameter()
|
||||
conv_param.num_output=num_output
|
||||
conv_param = pb.ConvolutionParameter()
|
||||
conv_param.num_output = num_output
|
||||
conv_param.kernel_size.extend(pair_reduce(kernel_size))
|
||||
conv_param.stride.extend(pair_reduce(stride))
|
||||
conv_param.pad.extend(pair_reduce(pad))
|
||||
conv_param.bias_term=bias_term
|
||||
conv_param.weight_filler.type=weight_filler_type
|
||||
conv_param.bias_term = bias_term
|
||||
conv_param.weight_filler.type = weight_filler_type
|
||||
if bias_term:
|
||||
conv_param.bias_filler.type = bias_filler_type
|
||||
if dilation:
|
||||
conv_param.dilation.extend(pair_reduce(dilation))
|
||||
if groups:
|
||||
conv_param.group=groups
|
||||
conv_param.group = groups
|
||||
self.param.convolution_param.CopyFrom(conv_param)
|
||||
|
||||
def pool_param(self,type='MAX',kernel_size=2,stride=2,pad=None, ceil_mode = False):
|
||||
pool_param=pb.PoolingParameter()
|
||||
pool_param.pool=pool_param.PoolMethod.Value(type)
|
||||
pool_param.kernel_size=pair_process(kernel_size)
|
||||
pool_param.stride=pair_process(stride)
|
||||
pool_param.ceil_mode=ceil_mode
|
||||
def pool_param(self, type='MAX', kernel_size=2, stride=2, pad=None, ceil_mode=False):
|
||||
pool_param = pb.PoolingParameter()
|
||||
pool_param.pool = pool_param.PoolMethod.Value(type)
|
||||
pool_param.kernel_size = pair_process(kernel_size)
|
||||
pool_param.stride = pair_process(stride)
|
||||
pool_param.ceil_mode = ceil_mode
|
||||
if pad:
|
||||
if isinstance(pad,tuple):
|
||||
if isinstance(pad, tuple):
|
||||
pool_param.pad_h = pad[0]
|
||||
pool_param.pad_w = pad[1]
|
||||
else:
|
||||
pool_param.pad=pad
|
||||
pool_param.pad = pad
|
||||
self.param.pooling_param.CopyFrom(pool_param)
|
||||
|
||||
def batch_norm_param(self,use_global_stats=0,moving_average_fraction=None,eps=None):
|
||||
bn_param=pb.BatchNormParameter()
|
||||
bn_param.use_global_stats=use_global_stats
|
||||
def batch_norm_param(self, use_global_stats=0, moving_average_fraction=None, eps=None):
|
||||
bn_param = pb.BatchNormParameter()
|
||||
bn_param.use_global_stats = use_global_stats
|
||||
if moving_average_fraction:
|
||||
bn_param.moving_average_fraction=moving_average_fraction
|
||||
bn_param.moving_average_fraction = moving_average_fraction
|
||||
if eps:
|
||||
bn_param.eps = eps
|
||||
self.param.batch_norm_param.CopyFrom(bn_param)
|
||||
|
||||
# layer
|
||||
# {
|
||||
# name: "upsample_layer"
|
||||
# type: "Upsample"
|
||||
# bottom: "some_input_feature_map"
|
||||
# bottom: "some_input_pool_index"
|
||||
# top: "some_output"
|
||||
# upsample_param {
|
||||
# upsample_h: 224
|
||||
# upsample_w: 224
|
||||
# }
|
||||
# }
|
||||
def upsample_param(self,size=None, scale_factor=None):
|
||||
upsample_param=pb.UpsampleParameter()
|
||||
def upsample_param(self, size=None, scale_factor=None):
|
||||
upsample_param = pb.UpsampleParameter()
|
||||
if scale_factor:
|
||||
if isinstance(scale_factor,int):
|
||||
if isinstance(scale_factor, int):
|
||||
upsample_param.scale = scale_factor
|
||||
else:
|
||||
upsample_param.scale_h = scale_factor[0]
|
||||
upsample_param.scale_w = scale_factor[1]
|
||||
|
||||
if size:
|
||||
if isinstance(size,int):
|
||||
if isinstance(size, int):
|
||||
upsample_param.upsample_h = size
|
||||
else:
|
||||
upsample_param.upsample_h = size[0]
|
||||
upsample_param.upsample_w = size[1]
|
||||
#upsample_param.upsample_h = size[0] * scale_factor
|
||||
#upsample_param.upsample_w = size[1] * scale_factor
|
||||
# upsample_param.upsample_h = size[0] * scale_factor
|
||||
# upsample_param.upsample_w = size[1] * scale_factor
|
||||
self.param.upsample_param.CopyFrom(upsample_param)
|
||||
def interp_param(self,size=None, scale_factor=None):
|
||||
interp_param=pb.InterpParameter()
|
||||
|
||||
def interp_param(self, size=None, scale_factor=None):
|
||||
interp_param = pb.InterpParameter()
|
||||
if scale_factor:
|
||||
if isinstance(scale_factor,int):
|
||||
if isinstance(scale_factor, int):
|
||||
interp_param.zoom_factor = scale_factor
|
||||
|
||||
if size:
|
||||
|
@ -138,7 +130,7 @@ class Layer_param():
|
|||
interp_param.width = size[1]
|
||||
self.param.interp_param.CopyFrom(interp_param)
|
||||
|
||||
def add_data(self,*args):
|
||||
def add_data(self, *args):
|
||||
"""Args are data numpy array
|
||||
"""
|
||||
del self.param.blobs[:]
|
||||
|
@ -148,11 +140,12 @@ class Layer_param():
|
|||
new_blob.shape.dim.append(dim)
|
||||
new_blob.data.extend(data.flatten().astype(float))
|
||||
|
||||
def set_params_by_dict(self,dic):
|
||||
def set_params_by_dict(self, dic):
|
||||
pass
|
||||
|
||||
def copy_from(self,layer_param):
|
||||
def copy_from(self, layer_param):
|
||||
pass
|
||||
|
||||
def set_enum(param,key,value):
|
||||
setattr(param,key,param.Value(value))
|
||||
|
||||
def set_enum(param, key, value):
|
||||
setattr(param, key, param.Value(value))
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
raise ImportError,'the nn_tools.Caffe.net is no longer used, please use nn_tools.Caffe.caffe_net'
|
||||
raise ImportError("the nn_tools.Caffe.net is no longer used, please use nn_tools.Caffe.caffe_net")
|
||||
|
||||
|
|
Loading…
Reference in New Issue