yolov5/utils/torch_utils.py

226 lines
9.0 KiB
Python
Raw Normal View History

2020-05-30 08:04:54 +08:00
import math
import os
import time
from copy import deepcopy
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
2020-05-30 08:04:54 +08:00
def init_seeds(seed=0):
torch.manual_seed(seed)
2020-06-06 04:07:09 +08:00
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
if seed == 0: # slower, more reproducible
cudnn.deterministic = True
cudnn.benchmark = False
else: # faster, less reproducible
2020-05-30 08:04:54 +08:00
cudnn.deterministic = False
cudnn.benchmark = True
def select_device(device='', apex=False, batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
cpu_request = device.lower() == 'cpu'
if device and not cpu_request: # if device requested other than 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity
cuda = False if cpu_request else torch.cuda.is_available()
if cuda:
c = 1024 ** 2 # bytes to MB
ng = torch.cuda.device_count()
if ng > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
s = 'Using CUDA ' + ('Apex ' if apex else '') # apex for mixed precision https://github.com/NVIDIA/apex
for i in range(0, ng):
if i == 1:
s = ' ' * len(s)
print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
(s, i, x[i].name, x[i].total_memory / c))
else:
print('Using CPU')
print('') # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
torch.cuda.synchronize() if torch.cuda.is_available() else None
return time.time()
2020-07-03 03:03:45 +08:00
def is_parallel(model):
# is model is parallel with DP or DDP
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
2020-05-30 08:04:54 +08:00
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-4
m.momentum = 0.03
elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
2020-07-06 04:41:21 +08:00
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, torch.nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
2020-05-30 08:04:54 +08:00
def fuse_conv_and_bn(conv, bn):
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
with torch.no_grad():
# init
fusedconv = torch.nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
bias=True)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
if conv.bias is not None:
b_conv = conv.bias
else:
2020-06-08 05:10:30 +08:00
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device)
2020-05-30 08:04:54 +08:00
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False):
# Plots a line-by-line description of a PyTorch model
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPS
from thop import profile
2020-07-03 03:03:45 +08:00
flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2
fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS
2020-05-30 08:04:54 +08:00
except:
fs = ''
print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = models.__dict__[name](pretrained=True)
2020-05-30 08:04:54 +08:00
# Display model properties
input_size = [3, 224, 224]
input_space = 'RGB'
input_range = [0, 1]
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for x in [input_size, input_space, input_range, mean, std]:
2020-05-30 08:04:54 +08:00
print(x + ' =', eval(x))
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = torch.nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = torch.nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
2020-05-30 08:04:54 +08:00
return model
2020-06-06 12:14:15 +08:00
def scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio
2020-05-30 08:04:54 +08:00
# scales img(bs,3,y,x) by ratio
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
2020-06-06 11:50:43 +08:00
gs = 32 # (pixels) grid size
2020-05-30 08:04:54 +08:00
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.
"""
def __init__(self, model, decay=0.9999, device=''):
2020-07-04 02:56:14 +08:00
# Create EMA
2020-07-04 07:57:08 +08:00
self.ema = deepcopy(model.module if is_parallel(model) else model) # FP32 EMA
2020-05-30 08:04:54 +08:00
self.ema.eval()
self.updates = 0 # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
self.device = device # perform ema on different device from model if set
if device:
2020-07-04 07:57:08 +08:00
self.ema.to(device)
2020-05-30 08:04:54 +08:00
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
2020-07-04 02:56:14 +08:00
# Update EMA parameters
2020-05-30 08:04:54 +08:00
with torch.no_grad():
2020-07-04 02:56:14 +08:00
self.updates += 1
d = self.decay(self.updates)
2020-05-30 08:04:54 +08:00
2020-07-04 02:56:14 +08:00
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
2020-05-30 08:04:54 +08:00
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model):
2020-07-04 02:56:14 +08:00
# Update EMA attributes
2020-07-03 03:03:45 +08:00
for k, v in model.__dict__.items():
if not k.startswith('_') and k not in ["process_group", "reducer"]:
2020-07-04 02:56:14 +08:00
setattr(self.ema, k, v)