update torch.cuda.amp to torch.amp

This commit is contained in:
Jacob Brown 2024-08-05 13:47:39 -06:00
parent 6096750fcc
commit aa637bb6f0
4 changed files with 6 additions and 6 deletions

View File

@ -108,7 +108,7 @@ def run(
action = "validating" if dataloader.dataset.root.stem == "val" else "testing" action = "validating" if dataloader.dataset.root.stem == "val" else "testing"
desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}"
bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
with torch.cuda.amp.autocast(enabled=device.type != "cpu"): with torch.amp.autocast("cuda", enabled=device.type != "cpu"):
for images, labels in bar: for images, labels in bar:
with dt[0]: with dt[0]:
images, labels = images.to(device, non_blocking=True), labels.to(device) images, labels = images.to(device, non_blocking=True), labels.to(device)

View File

@ -320,7 +320,7 @@ def train(hyp, opt, device, callbacks):
maps = np.zeros(nc) # mAP per class maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move scheduler.last_epoch = start_epoch - 1 # do not move
scaler = torch.cuda.amp.GradScaler(enabled=amp) scaler = torch.amp.GradScaler("cuda", enabled=amp)
stopper, stop = EarlyStopping(patience=opt.patience), False stopper, stop = EarlyStopping(patience=opt.patience), False
compute_loss = ComputeLoss(model, overlap=overlap) # init loss class compute_loss = ComputeLoss(model, overlap=overlap) # init loss class
# callbacks.run('on_train_start') # callbacks.run('on_train_start')
@ -380,7 +380,7 @@ def train(hyp, opt, device, callbacks):
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False) imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
# Forward # Forward
with torch.cuda.amp.autocast(amp): with torch.amp.autocast("cuda", enabled=amp):
pred = model(imgs) # forward pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float()) loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float())
if RANK != -1: if RANK != -1:

View File

@ -352,7 +352,7 @@ def train(hyp, opt, device, callbacks):
maps = np.zeros(nc) # mAP per class maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move scheduler.last_epoch = start_epoch - 1 # do not move
scaler = torch.cuda.amp.GradScaler(enabled=amp) scaler = torch.amp.GradScaler("cuda", enabled=amp)
stopper, stop = EarlyStopping(patience=opt.patience), False stopper, stop = EarlyStopping(patience=opt.patience), False
compute_loss = ComputeLoss(model) # init loss class compute_loss = ComputeLoss(model) # init loss class
callbacks.run("on_train_start") callbacks.run("on_train_start")
@ -409,7 +409,7 @@ def train(hyp, opt, device, callbacks):
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False) imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
# Forward # Forward
with torch.cuda.amp.autocast(amp): with torch.amp.autocast("cuda", enabled=amp):
pred = model(imgs) # forward pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if RANK != -1: if RANK != -1:

View File

@ -12,7 +12,7 @@ from utils.torch_utils import profile
def check_train_batch_size(model, imgsz=640, amp=True): def check_train_batch_size(model, imgsz=640, amp=True):
"""Checks and computes optimal training batch size for YOLOv5 model, given image size and AMP setting.""" """Checks and computes optimal training batch size for YOLOv5 model, given image size and AMP setting."""
with torch.cuda.amp.autocast(amp): with torch.amp.autocast("cuda", enabled=amp):
return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size