diff --git a/classify/val.py b/classify/val.py index 8ce48f064..244597a79 100644 --- a/classify/val.py +++ b/classify/val.py @@ -108,7 +108,7 @@ def run( action = "validating" if dataloader.dataset.root.stem == "val" else "testing" desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) - with torch.cuda.amp.autocast(enabled=device.type != "cpu"): + with torch.amp.autocast("cuda", enabled=device.type != "cpu"): for images, labels in bar: with dt[0]: images, labels = images.to(device, non_blocking=True), labels.to(device) diff --git a/segment/train.py b/segment/train.py index 379fed0b2..6654e2a9e 100644 --- a/segment/train.py +++ b/segment/train.py @@ -320,7 +320,7 @@ def train(hyp, opt, device, callbacks): maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move - scaler = torch.cuda.amp.GradScaler(enabled=amp) + scaler = torch.amp.GradScaler("cuda", enabled=amp) stopper, stop = EarlyStopping(patience=opt.patience), False compute_loss = ComputeLoss(model, overlap=overlap) # init loss class # callbacks.run('on_train_start') @@ -380,7 +380,7 @@ def train(hyp, opt, device, callbacks): imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False) # Forward - with torch.cuda.amp.autocast(amp): + with torch.amp.autocast("cuda", enabled=amp): pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float()) if RANK != -1: diff --git a/train.py b/train.py index b4395d7e8..8ab5256b3 100644 --- a/train.py +++ b/train.py @@ -352,7 +352,7 @@ def train(hyp, opt, device, callbacks): maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move - scaler = torch.cuda.amp.GradScaler(enabled=amp) + scaler = torch.amp.GradScaler("cuda", enabled=amp) stopper, stop = EarlyStopping(patience=opt.patience), False compute_loss = ComputeLoss(model) # init loss class callbacks.run("on_train_start") @@ -409,7 +409,7 @@ def train(hyp, opt, device, callbacks): imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False) # Forward - with torch.cuda.amp.autocast(amp): + with torch.amp.autocast("cuda", enabled=amp): pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size if RANK != -1: diff --git a/utils/autobatch.py b/utils/autobatch.py index 08a0de841..23e5e6593 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -12,7 +12,7 @@ from utils.torch_utils import profile def check_train_batch_size(model, imgsz=640, amp=True): """Checks and computes optimal training batch size for YOLOv5 model, given image size and AMP setting.""" - with torch.cuda.amp.autocast(amp): + with torch.amp.autocast("cuda", enabled=amp): return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size