From 1b82b72ed528df7c3ab370b9d6e958f0cc961e0b Mon Sep 17 00:00:00 2001 From: Bala-Vignesh-Reddy Date: Tue, 7 Jan 2025 18:57:49 +0530 Subject: [PATCH] fixed scaler --- segment/train.py | 2 +- train.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/segment/train.py b/segment/train.py index c5cfb5ca8..6a17dc0be 100644 --- a/segment/train.py +++ b/segment/train.py @@ -320,7 +320,7 @@ def train(hyp, opt, device, callbacks): maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move - scaler = torch.amp.GradScaler(enabled=amp) + scaler = amp.GradScaler(enabled=device.type != "cpu") stopper, stop = EarlyStopping(patience=opt.patience), False compute_loss = ComputeLoss(model, overlap=overlap) # init loss class # callbacks.run('on_train_start') diff --git a/train.py b/train.py index 6c967bc6f..311525bff 100644 --- a/train.py +++ b/train.py @@ -352,7 +352,7 @@ def train(hyp, opt, device, callbacks): maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move - scaler = amp.GradScaler(enabled=cuda) + scaler = amp.GradScaler(enabled=device.type != "cpu") stopper, stop = EarlyStopping(patience=opt.patience), False compute_loss = ComputeLoss(model) # init loss class callbacks.run("on_train_start") @@ -409,7 +409,7 @@ def train(hyp, opt, device, callbacks): imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False) # Forward - with torch.amp.autocast("cuda", enabled=amp): + with torch.amp.autocast("cuda", enabled=device.type != "cpu"): pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size if RANK != -1: