updated autocast
commit
1fbde181d4
|
@ -108,7 +108,7 @@ def run(
|
|||
action = "validating" if dataloader.dataset.root.stem == "val" else "testing"
|
||||
desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}"
|
||||
bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
|
||||
with torch.amp.autocast("cuda", enabled=amp):
|
||||
with torch.amp.autocast("cuda", enabled=device.type != "cpu"):
|
||||
for images, labels in bar:
|
||||
with dt[0]:
|
||||
images, labels = images.to(device, non_blocking=True), labels.to(device)
|
||||
|
|
8
train.py
8
train.py
|
@ -352,7 +352,15 @@ def train(hyp, opt, device, callbacks):
|
|||
maps = np.zeros(nc) # mAP per class
|
||||
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
|
||||
scheduler.last_epoch = start_epoch - 1 # do not move
|
||||
<<<<<<< HEAD
|
||||
scaler = torch.amp.GradScaler(enabled=device.type != "cpu")
|
||||
=======
|
||||
|
||||
# checking if autocast is available
|
||||
device_amp = torch.is_autocast_available(device_type=device.type)
|
||||
|
||||
scaler = torch.amp.GradScaler(enabled=(device_amp and device.type != "cpu"))
|
||||
>>>>>>> 5d03fd8cdd44ce49148653ba4ea874d9cd41a832
|
||||
stopper, stop = EarlyStopping(patience=opt.patience), False
|
||||
compute_loss = ComputeLoss(model) # init loss class
|
||||
callbacks.run("on_train_start")
|
||||
|
|
Loading…
Reference in New Issue