work with later versions of pytorch
parent
dd4f147016
commit
19c93d5af4
|
@ -23,7 +23,7 @@ import requests
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
from PIL import Image
|
||||
from torch.cuda import amp
|
||||
from torch import amp
|
||||
|
||||
from utils import TryExcept
|
||||
from utils.dataloaders import exif_transpose, letterbox
|
||||
|
@ -728,7 +728,7 @@ class AutoShape(nn.Module):
|
|||
p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
|
||||
autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
|
||||
if isinstance(ims, torch.Tensor): # torch
|
||||
with amp.autocast(autocast):
|
||||
with amp.autocast("cuda", enabled=autocast):
|
||||
return self.model(ims.to(p.device).type_as(p), augment=augment) # inference
|
||||
|
||||
# Pre-process
|
||||
|
@ -755,7 +755,7 @@ class AutoShape(nn.Module):
|
|||
x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
|
||||
x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
|
||||
|
||||
with amp.autocast(autocast):
|
||||
with amp.autocast("cuda", enabled=autocast):
|
||||
# Inference
|
||||
with dt[1]:
|
||||
y = self.model(x, augment=augment) # forward
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# Base ------------------------------------------------------------------------
|
||||
gitpython>=3.1.30
|
||||
matplotlib>=3.3
|
||||
numpy>=1.18.5,<2
|
||||
numpy>=1.18.5
|
||||
opencv-python>=4.1.1
|
||||
Pillow>=7.1.2,<10
|
||||
psutil # system resources
|
||||
|
@ -12,7 +12,7 @@ PyYAML>=5.3.1
|
|||
requests>=2.23.0
|
||||
scipy>=1.4.1
|
||||
thop>=0.1.1 # FLOPs computation
|
||||
torch>=1.7.0,<1.14 # see https://pytorch.org/get-started/locally (recommended)
|
||||
torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended)
|
||||
torchvision>=0.8.1
|
||||
tqdm>=4.64.0
|
||||
# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
|
||||
|
|
4
train.py
4
train.py
|
@ -250,7 +250,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
|
|||
maps = np.zeros(nc) # mAP per class
|
||||
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
|
||||
scheduler.last_epoch = start_epoch - 1 # do not move
|
||||
scaler = torch.cuda.amp.GradScaler(enabled=amp)
|
||||
scaler = torch.amp.GradScaler("cuda", enabled=amp)
|
||||
stopper, stop = EarlyStopping(patience=opt.patience), False
|
||||
compute_loss = ComputeLoss(model) # init loss class
|
||||
callbacks.run('on_train_start')
|
||||
|
@ -305,7 +305,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
|
|||
imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
|
||||
|
||||
# Forward
|
||||
with torch.cuda.amp.autocast(amp):
|
||||
with torch.amp.autocast("cuda", enabled=amp):
|
||||
pred = model(imgs) # forward
|
||||
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
|
||||
if RANK != -1:
|
||||
|
|
Loading…
Reference in New Issue