fixed scaler
parent
c510954d17
commit
1b82b72ed5
|
@ -320,7 +320,7 @@ def train(hyp, opt, device, callbacks):
|
|||
maps = np.zeros(nc) # mAP per class
|
||||
results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
|
||||
scheduler.last_epoch = start_epoch - 1 # do not move
|
||||
scaler = torch.amp.GradScaler(enabled=amp)
|
||||
scaler = amp.GradScaler(enabled=device.type != "cpu")
|
||||
stopper, stop = EarlyStopping(patience=opt.patience), False
|
||||
compute_loss = ComputeLoss(model, overlap=overlap) # init loss class
|
||||
# callbacks.run('on_train_start')
|
||||
|
|
4
train.py
4
train.py
|
@ -352,7 +352,7 @@ def train(hyp, opt, device, callbacks):
|
|||
maps = np.zeros(nc) # mAP per class
|
||||
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
|
||||
scheduler.last_epoch = start_epoch - 1 # do not move
|
||||
scaler = amp.GradScaler(enabled=cuda)
|
||||
scaler = amp.GradScaler(enabled=device.type != "cpu")
|
||||
stopper, stop = EarlyStopping(patience=opt.patience), False
|
||||
compute_loss = ComputeLoss(model) # init loss class
|
||||
callbacks.run("on_train_start")
|
||||
|
@ -409,7 +409,7 @@ def train(hyp, opt, device, callbacks):
|
|||
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
|
||||
|
||||
# Forward
|
||||
with torch.amp.autocast("cuda", enabled=amp):
|
||||
with torch.amp.autocast("cuda", enabled=device.type != "cpu"):
|
||||
pred = model(imgs) # forward
|
||||
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
|
||||
if RANK != -1:
|
||||
|
|
Loading…
Reference in New Issue