Final cleanup of mixup/cutmix. Element/batch modes working with both collate (prefetcher active) and without prefetcher.
parent
cd23f55397
commit
8c9814e3f5
|
@ -4,7 +4,7 @@ from .dataset import Dataset, DatasetTar, AugMixDataset
|
|||
from .transforms import *
|
||||
from .loader import create_loader
|
||||
from .transforms_factory import create_transform
|
||||
from .mixup import mix_batch, FastCollateMixup
|
||||
from .mixup import Mixup, FastCollateMixup
|
||||
from .auto_augment import RandAugment, AutoAugment, rand_augment_ops, auto_augment_policy,\
|
||||
rand_augment_transform, auto_augment_transform
|
||||
from .real_labels import RealLabelsImagenet
|
||||
|
|
|
@ -10,11 +10,8 @@ CutMix: https://github.com/clovaai/CutMix-PyTorch
|
|||
|
||||
Hacked together by / Copyright 2020 Ross Wightman
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import math
|
||||
import numbers
|
||||
|
||||
|
||||
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
|
||||
|
@ -30,20 +27,21 @@ def mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda'):
|
|||
return y1 * lam + y2 * (1. - lam)
|
||||
|
||||
|
||||
def mixup_batch(input, target, alpha=0.2, num_classes=1000, smoothing=0.1, disable=False):
|
||||
lam = 1.
|
||||
if not disable:
|
||||
lam = np.random.beta(alpha, alpha)
|
||||
input = input.mul(lam).add_(1 - lam, input.flip(0))
|
||||
target = mixup_target(target, num_classes, lam, smoothing)
|
||||
return input, target
|
||||
def rand_bbox(img_shape, lam, margin=0., count=None):
|
||||
""" Standard CutMix bounding-box
|
||||
Generates a random square bbox based on lambda value. This impl includes
|
||||
support for enforcing a border margin as percent of bbox dimensions.
|
||||
|
||||
|
||||
def rand_bbox(size, lam, border=0., count=None):
|
||||
ratio = math.sqrt(1 - lam)
|
||||
img_h, img_w = size[-2:]
|
||||
Args:
|
||||
img_shape (tuple): Image shape as tuple
|
||||
lam (float): Cutmix lambda value
|
||||
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
|
||||
count (int): Number of bbox to generate
|
||||
"""
|
||||
ratio = np.sqrt(1 - lam)
|
||||
img_h, img_w = img_shape[-2:]
|
||||
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
|
||||
margin_y, margin_x = int(border * cut_h), int(border * cut_w)
|
||||
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
|
||||
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
|
||||
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
|
||||
yl = np.clip(cy - cut_h // 2, 0, img_h)
|
||||
|
@ -53,9 +51,20 @@ def rand_bbox(size, lam, border=0., count=None):
|
|||
return yl, yh, xl, xh
|
||||
|
||||
|
||||
def rand_bbox_minmax(size, minmax, count=None):
|
||||
def rand_bbox_minmax(img_shape, minmax, count=None):
|
||||
""" Min-Max CutMix bounding-box
|
||||
Inspired by Darknet cutmix impl, generates a random rectangular bbox
|
||||
based on min/max percent values applied to each dimension of the input image.
|
||||
|
||||
Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.
|
||||
|
||||
Args:
|
||||
img_shape (tuple): Image shape as tuple
|
||||
minmax (tuple or list): Min and max bbox ratios (as percent of image size)
|
||||
count (int): Number of bbox to generate
|
||||
"""
|
||||
assert len(minmax) == 2
|
||||
img_h, img_w = size[-2:]
|
||||
img_h, img_w = img_shape[-2:]
|
||||
cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count)
|
||||
cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count)
|
||||
yl = np.random.randint(0, img_h - cut_h, size=count)
|
||||
|
@ -66,6 +75,8 @@ def rand_bbox_minmax(size, minmax, count=None):
|
|||
|
||||
|
||||
def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None):
|
||||
""" Generate bbox and apply lambda correction.
|
||||
"""
|
||||
if ratio_minmax is not None:
|
||||
yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)
|
||||
else:
|
||||
|
@ -76,52 +87,22 @@ def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, cou
|
|||
return (yl, yu, xl, xu), lam
|
||||
|
||||
|
||||
def cutmix_batch(input, target, alpha=0.2, num_classes=1000, smoothing=0.1, disable=False, correct_lam=False):
|
||||
lam = 1.
|
||||
if not disable:
|
||||
lam = np.random.beta(alpha, alpha)
|
||||
if lam != 1:
|
||||
yl, yh, xl, xh = rand_bbox(input.size(), lam)
|
||||
input[:, :, yl:yh, xl:xh] = input.flip(0)[:, :, yl:yh, xl:xh]
|
||||
if correct_lam:
|
||||
lam = 1. - (yh - yl) * (xh - xl) / float(input.shape[-2] * input.shape[-1])
|
||||
target = mixup_target(target, num_classes, lam, smoothing)
|
||||
return input, target
|
||||
|
||||
|
||||
def mix_batch(
|
||||
input, target, mixup_alpha=0.2, cutmix_alpha=0., prob=1.0, switch_prob=.5,
|
||||
num_classes=1000, smoothing=0.1, disable=False):
|
||||
# FIXME test this version
|
||||
if np.random.rand() > prob:
|
||||
return input, target
|
||||
use_cutmix = cutmix_alpha > 0. and np.random.rand() <= switch_prob
|
||||
if use_cutmix:
|
||||
return cutmix_batch(input, target, cutmix_alpha, num_classes, smoothing, disable)
|
||||
else:
|
||||
return mixup_batch(input, target, mixup_alpha, num_classes, smoothing, disable)
|
||||
|
||||
|
||||
class FastCollateMixup:
|
||||
"""Fast Collate Mixup/Cutmix that applies different params to each element or whole batch
|
||||
|
||||
NOTE once experiments are done, one of the three variants will remain with this class name
|
||||
class Mixup:
|
||||
""" Mixup/Cutmix that applies different params to each element or whole batch
|
||||
|
||||
Args:
|
||||
mixup_alpha (float): mixup alpha value, mixup is active if > 0.
|
||||
cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.
|
||||
cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.
|
||||
prob (float): probability of applying mixup or cutmix per batch or element
|
||||
switch_prob (float): probability of switching to cutmix instead of mixup when both are active
|
||||
elementwise (bool): apply mixup/cutmix params per batch element instead of per batch
|
||||
correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders
|
||||
label_smoothing (float): apply label smoothing to the mixed target tensor
|
||||
num_classes (int): number of classes for target
|
||||
"""
|
||||
def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,
|
||||
elementwise=False, correct_lam=True, label_smoothing=0.1, num_classes=1000):
|
||||
"""
|
||||
|
||||
Args:
|
||||
mixup_alpha (float): mixup alpha value, mixup is active if > 0.
|
||||
cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.
|
||||
cutmix_minmax (float): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None
|
||||
prob (float): probability of applying mixup or cutmix per batch or element
|
||||
switch_prob (float): probability of using cutmix instead of mixup when both active
|
||||
elementwise (bool): apply mixup/cutmix params per batch element instead of per batch
|
||||
label_smoothing (float):
|
||||
num_classes (int):
|
||||
"""
|
||||
self.mixup_alpha = mixup_alpha
|
||||
self.cutmix_alpha = cutmix_alpha
|
||||
self.cutmix_minmax = cutmix_minmax
|
||||
|
@ -129,7 +110,7 @@ class FastCollateMixup:
|
|||
assert len(self.cutmix_minmax) == 2
|
||||
# force cutmix alpha == 1.0 when minmax active to keep logic simple & safe
|
||||
self.cutmix_alpha = 1.0
|
||||
self.prob = prob
|
||||
self.mix_prob = prob
|
||||
self.switch_prob = switch_prob
|
||||
self.label_smoothing = label_smoothing
|
||||
self.num_classes = num_classes
|
||||
|
@ -137,10 +118,9 @@ class FastCollateMixup:
|
|||
self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix
|
||||
self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)
|
||||
|
||||
def _mix_elem(self, output, batch):
|
||||
batch_size = len(batch)
|
||||
lam_out = np.ones(batch_size, dtype=np.float32)
|
||||
use_cutmix = np.zeros(batch_size).astype(np.bool)
|
||||
def _params_per_elem(self, batch_size):
|
||||
lam = np.ones(batch_size, dtype=np.float32)
|
||||
use_cutmix = np.zeros(batch_size, dtype=np.bool)
|
||||
if self.mixup_enabled:
|
||||
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
|
||||
use_cutmix = np.random.rand(batch_size) < self.switch_prob
|
||||
|
@ -151,35 +131,17 @@ class FastCollateMixup:
|
|||
elif self.mixup_alpha > 0.:
|
||||
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)
|
||||
elif self.cutmix_alpha > 0.:
|
||||
use_cutmix = np.ones(batch_size).astype(np.bool)
|
||||
use_cutmix = np.ones(batch_size, dtype=np.bool)
|
||||
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)
|
||||
else:
|
||||
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
|
||||
lam_out = np.where(np.random.rand(batch_size) < self.prob, lam_mix.astype(np.float32), lam_out)
|
||||
lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)
|
||||
return lam, use_cutmix
|
||||
|
||||
for i in range(batch_size):
|
||||
j = batch_size - i - 1
|
||||
lam = lam_out[i]
|
||||
mixed = batch[i][0]
|
||||
if lam != 1.:
|
||||
if use_cutmix[i]:
|
||||
mixed = mixed.copy()
|
||||
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
|
||||
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
|
||||
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
|
||||
lam_out[i] = lam
|
||||
else:
|
||||
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
|
||||
lam_out[i] = lam
|
||||
np.round(mixed, out=mixed)
|
||||
output[i] += torch.from_numpy(mixed.astype(np.uint8))
|
||||
return torch.tensor(lam_out).unsqueeze(1)
|
||||
|
||||
def _mix_batch(self, output, batch):
|
||||
batch_size = len(batch)
|
||||
def _params_per_batch(self):
|
||||
lam = 1.
|
||||
use_cutmix = False
|
||||
if self.mixup_enabled and np.random.rand() < self.prob:
|
||||
if self.mixup_enabled and np.random.rand() < self.mix_prob:
|
||||
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
|
||||
use_cutmix = np.random.rand() < self.switch_prob
|
||||
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \
|
||||
|
@ -192,17 +154,84 @@ class FastCollateMixup:
|
|||
else:
|
||||
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
|
||||
lam = float(lam_mix)
|
||||
return lam, use_cutmix
|
||||
|
||||
def _mix_elem(self, x):
|
||||
batch_size = len(x)
|
||||
lam_batch, use_cutmix = self._params_per_elem(batch_size)
|
||||
x_orig = x.clone() # need to keep an unmodified original for mixing source
|
||||
for i in range(batch_size):
|
||||
j = batch_size - i - 1
|
||||
lam = lam_batch[i]
|
||||
if lam != 1.:
|
||||
if use_cutmix[i]:
|
||||
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
|
||||
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
|
||||
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
|
||||
lam_batch[i] = lam
|
||||
else:
|
||||
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
|
||||
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
|
||||
|
||||
def _mix_batch(self, x):
|
||||
lam, use_cutmix = self._params_per_batch()
|
||||
if lam == 1.:
|
||||
return 1.
|
||||
if use_cutmix:
|
||||
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
|
||||
x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
|
||||
x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]
|
||||
else:
|
||||
x_flipped = x.flip(0).mul_(1. - lam)
|
||||
x.mul_(lam).add_(x_flipped)
|
||||
return lam
|
||||
|
||||
def __call__(self, x, target):
|
||||
assert len(x) % 2 == 0, 'Batch size should be even when using this'
|
||||
lam = self._mix_elem(x) if self.elementwise else self._mix_batch(x)
|
||||
target = mixup_target(target, self.num_classes, lam, self.label_smoothing)
|
||||
return x, target
|
||||
|
||||
|
||||
class FastCollateMixup(Mixup):
|
||||
""" Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch
|
||||
|
||||
A Mixup impl that's performed while collating the batches.
|
||||
"""
|
||||
|
||||
def _mix_elem_collate(self, output, batch):
|
||||
batch_size = len(batch)
|
||||
lam_batch, use_cutmix = self._params_per_elem(batch_size)
|
||||
for i in range(batch_size):
|
||||
j = batch_size - i - 1
|
||||
lam = lam_batch[i]
|
||||
mixed = batch[i][0]
|
||||
if lam != 1.:
|
||||
if use_cutmix[i]:
|
||||
mixed = mixed.copy()
|
||||
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
|
||||
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
|
||||
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
|
||||
lam_batch[i] = lam
|
||||
else:
|
||||
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
|
||||
lam_batch[i] = lam
|
||||
np.round(mixed, out=mixed)
|
||||
output[i] += torch.from_numpy(mixed.astype(np.uint8))
|
||||
return torch.tensor(lam_batch).unsqueeze(1)
|
||||
|
||||
def _mix_batch_collate(self, output, batch):
|
||||
batch_size = len(batch)
|
||||
lam, use_cutmix = self._params_per_batch()
|
||||
if use_cutmix:
|
||||
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
|
||||
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
|
||||
|
||||
for i in range(batch_size):
|
||||
j = batch_size - i - 1
|
||||
mixed = batch[i][0]
|
||||
if lam != 1.:
|
||||
if use_cutmix:
|
||||
mixed = mixed.copy()
|
||||
mixed = mixed.copy() # don't want to modify the original while iterating
|
||||
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
|
||||
else:
|
||||
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
|
||||
|
@ -210,16 +239,15 @@ class FastCollateMixup:
|
|||
output[i] += torch.from_numpy(mixed.astype(np.uint8))
|
||||
return lam
|
||||
|
||||
def __call__(self, batch):
|
||||
def __call__(self, batch, _=None):
|
||||
batch_size = len(batch)
|
||||
assert batch_size % 2 == 0, 'Batch size should be even when using this'
|
||||
output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
|
||||
if self.elementwise:
|
||||
lam = self._mix_elem(output, batch)
|
||||
lam = self._mix_elem_collate(output, batch)
|
||||
else:
|
||||
lam = self._mix_batch(output, batch)
|
||||
lam = self._mix_batch_collate(output, batch)
|
||||
target = torch.tensor([b[1] for b in batch], dtype=torch.int64)
|
||||
target = mixup_target(target, self.num_classes, lam, self.label_smoothing, device='cpu')
|
||||
|
||||
return output, target
|
||||
|
||||
|
|
41
train.py
41
train.py
|
@ -28,7 +28,7 @@ except ImportError:
|
|||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
has_apex = False
|
||||
|
||||
from timm.data import Dataset, create_loader, resolve_data_config, FastCollateMixup, mix_batch, AugMixDataset
|
||||
from timm.data import Dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
|
||||
from timm.models import create_model, resume_checkpoint, convert_splitbn_model
|
||||
from timm.utils import *
|
||||
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, JsdCrossEntropy
|
||||
|
@ -398,12 +398,18 @@ def main():
|
|||
dataset_train = Dataset(train_dir)
|
||||
|
||||
collate_fn = None
|
||||
if args.prefetcher and (args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None):
|
||||
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
|
||||
collate_fn = FastCollateMixup(
|
||||
mixup_fn = None
|
||||
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
|
||||
if mixup_active:
|
||||
mixup_args = dict(
|
||||
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
|
||||
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, elementwise=args.mixup_elem,
|
||||
label_smoothing=args.smoothing, num_classes=args.num_classes)
|
||||
if args.prefetcher:
|
||||
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
|
||||
collate_fn = FastCollateMixup(**mixup_args)
|
||||
else:
|
||||
mixup_fn = Mixup(**mixup_args)
|
||||
|
||||
if num_aug_splits > 1:
|
||||
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
|
||||
|
@ -465,17 +471,14 @@ def main():
|
|||
if args.jsd:
|
||||
assert num_aug_splits > 1 # JSD only valid with aug splits set
|
||||
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing).cuda()
|
||||
validate_loss_fn = nn.CrossEntropyLoss().cuda()
|
||||
elif args.mixup > 0.:
|
||||
# smoothing is handled with mixup label transform
|
||||
elif mixup_active:
|
||||
# smoothing is handled with mixup target transform
|
||||
train_loss_fn = SoftTargetCrossEntropy().cuda()
|
||||
validate_loss_fn = nn.CrossEntropyLoss().cuda()
|
||||
elif args.smoothing:
|
||||
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda()
|
||||
validate_loss_fn = nn.CrossEntropyLoss().cuda()
|
||||
else:
|
||||
train_loss_fn = nn.CrossEntropyLoss().cuda()
|
||||
validate_loss_fn = train_loss_fn
|
||||
validate_loss_fn = nn.CrossEntropyLoss().cuda()
|
||||
|
||||
eval_metric = args.eval_metric
|
||||
best_metric = None
|
||||
|
@ -503,7 +506,7 @@ def main():
|
|||
train_metrics = train_epoch(
|
||||
epoch, model, loader_train, optimizer, train_loss_fn, args,
|
||||
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
|
||||
use_amp=use_amp, model_ema=model_ema)
|
||||
use_amp=use_amp, model_ema=model_ema, mixup_fn=mixup_fn)
|
||||
|
||||
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
|
||||
if args.local_rank == 0:
|
||||
|
@ -543,11 +546,13 @@ def main():
|
|||
|
||||
def train_epoch(
|
||||
epoch, model, loader, optimizer, loss_fn, args,
|
||||
lr_scheduler=None, saver=None, output_dir='', use_amp=False, model_ema=None):
|
||||
lr_scheduler=None, saver=None, output_dir='', use_amp=False, model_ema=None, mixup_fn=None):
|
||||
|
||||
if args.prefetcher and args.mixup > 0 and loader.mixup_enabled:
|
||||
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
|
||||
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
|
||||
if args.prefetcher and loader.mixup_enabled:
|
||||
loader.mixup_enabled = False
|
||||
elif mixup_fn is not None:
|
||||
mixup_fn.mixup_enabled = False
|
||||
|
||||
batch_time_m = AverageMeter()
|
||||
data_time_m = AverageMeter()
|
||||
|
@ -563,12 +568,8 @@ def train_epoch(
|
|||
data_time_m.update(time.time() - end)
|
||||
if not args.prefetcher:
|
||||
input, target = input.cuda(), target.cuda()
|
||||
if args.mixup > 0.:
|
||||
input, target = mix_batch(
|
||||
input, target,
|
||||
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, prob=args.mixup_prob,
|
||||
switch_prob=args.mixup_switch_prob, num_classes=args.num_classes, smoothing=args.smoothing,
|
||||
disable=args.mixup_off_epoch and epoch >= args.mixup_off_epoch)
|
||||
if mixup_fn is not None:
|
||||
input, target = mixup_fn(input, target)
|
||||
|
||||
output = model(input)
|
||||
|
||||
|
|
Loading…
Reference in New Issue