refactored the code
parent
42486238a9
commit
6695bac0cd
|
@ -23,6 +23,7 @@ from datetime import datetime
|
|||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
CHECK_PYTORCH_18 = torch.__version__.startswith("1.8")
|
||||
import torch.distributed as dist
|
||||
import torch.hub as hub
|
||||
import torch.optim.lr_scheduler as lr_scheduler
|
||||
|
@ -200,8 +201,7 @@ def train(opt, device):
|
|||
criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function
|
||||
best_fitness = 0.0
|
||||
# adding a check to torch version
|
||||
scaler = None
|
||||
if torch.__version__.startswith("1.8"):
|
||||
if CHECK_PYTORCH_18:
|
||||
scaler = torch.cuda.amp.GradScaler(enabled=cuda)
|
||||
else:
|
||||
scaler = torch.amp.GradScaler("cuda", enabled=cuda)
|
||||
|
@ -225,8 +225,7 @@ def train(opt, device):
|
|||
images, labels = images.to(device, non_blocking=True), labels.to(device)
|
||||
|
||||
# Forward
|
||||
amp_autocast = None
|
||||
if torch.__version__.startswith("1.8"):
|
||||
if CHECK_PYTORCH_18:
|
||||
amp_autocast = torch.cuda.amp.autocast(enabled=device.type != "cpu")
|
||||
else:
|
||||
amp_autocast = torch.amp.autocast("cuda", enabled=device.type != "cpu")
|
||||
|
|
|
@ -26,6 +26,7 @@ import sys
|
|||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
CHECK_PYTORCH_18 = torch.__version__.startswith("1.8")
|
||||
from tqdm import tqdm
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
|
@ -110,8 +111,7 @@ def run(
|
|||
bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
|
||||
|
||||
# checking the version
|
||||
amp_autocast = None
|
||||
if torch.__version__.startswith("1.8"):
|
||||
if CHECK_PYTORCH_18:
|
||||
amp_autocast = torch.cuda.amp.autocast(enabled=device.type != "cpu")
|
||||
else:
|
||||
amp_autocast = torch.amp.autocast("cuda", enabled=device.type != "cpu")
|
||||
|
|
|
@ -20,7 +20,7 @@ import requests
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
from PIL import Image
|
||||
|
||||
CHECK_PYTORCH_18 = torch.__version__.startswith("1.8")
|
||||
# Import 'ultralytics' package or install if missing
|
||||
try:
|
||||
import ultralytics
|
||||
|
@ -863,8 +863,7 @@ class AutoShape(nn.Module):
|
|||
p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
|
||||
autocast = self.amp and (p.device.type != "cpu") # Automatic Mixed Precision (AMP) inference
|
||||
if isinstance(ims, torch.Tensor): # torch
|
||||
amp_autocast = None
|
||||
if torch.__version__.startswith("1.8"):
|
||||
if CHECK_PYTORCH_18:
|
||||
amp_autocast = torch.cuda.amp.autocast(enabled=autocast)
|
||||
else:
|
||||
amp_autocast = torch.amp.autocast("cuda", enabled=autocast)
|
||||
|
@ -895,8 +894,7 @@ class AutoShape(nn.Module):
|
|||
x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
|
||||
x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
|
||||
|
||||
amp_autocast = None
|
||||
if torch.__version__.startswith("1.8"):
|
||||
if CHECK_PYTORCH_18:
|
||||
amp_autocast = torch.cuda.amp.autocast(enabled=autocast)
|
||||
else:
|
||||
amp_autocast = torch.amp.autocast("cuda", enabled=autocast)
|
||||
|
|
|
@ -39,6 +39,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory
|
|||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
CHECK_PYTORCH_18 = torch.__version__.startswith("1.8")
|
||||
|
||||
import segment.val as validate # for end-of-epoch mAP
|
||||
from models.experimental import attempt_load
|
||||
|
@ -321,8 +322,7 @@ def train(hyp, opt, device, callbacks):
|
|||
maps = np.zeros(nc) # mAP per class
|
||||
results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
|
||||
scheduler.last_epoch = start_epoch - 1 # do not move
|
||||
scaler = None
|
||||
if torch.__version__.startswith("1.8"):
|
||||
if CHECK_PYTORCH_18:
|
||||
scaler = torch.cuda.amp.GradScaler(enabled=amp)
|
||||
else:
|
||||
scaler = torch.amp.GradScaler("cuda", enabled=amp)
|
||||
|
@ -383,8 +383,7 @@ def train(hyp, opt, device, callbacks):
|
|||
if sf != 1:
|
||||
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
|
||||
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
|
||||
amp_autocast = None
|
||||
if torch.__version__.startswith("1.8"):
|
||||
if CHECK_PYTORCH_18:
|
||||
amp_autocast = torch.cuda.amp.autocast(enabled=amp)
|
||||
else:
|
||||
amp_autocast = torch.amp.autocast("cuda", enabled=amp)
|
||||
|
|
7
train.py
7
train.py
|
@ -43,6 +43,7 @@ ROOT = FILE.parents[0] # YOLOv5 root directory
|
|||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
CHECK_PYTORCH_18 = torch.__version__.startswith("1.8")
|
||||
|
||||
import val as validate # for end-of-epoch mAP
|
||||
from models.experimental import attempt_load
|
||||
|
@ -353,8 +354,7 @@ def train(hyp, opt, device, callbacks):
|
|||
maps = np.zeros(nc) # mAP per class
|
||||
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
|
||||
scheduler.last_epoch = start_epoch - 1 # do not move
|
||||
scaler = None
|
||||
if torch.__version__.startswith("1.8"):
|
||||
if CHECK_PYTORCH_18:
|
||||
scaler = torch.cuda.amp.GradScaler(enabled=amp)
|
||||
else:
|
||||
scaler = torch.amp.GradScaler("cuda", enabled=amp)
|
||||
|
@ -415,8 +415,7 @@ def train(hyp, opt, device, callbacks):
|
|||
|
||||
# Forward
|
||||
# with Autocast:
|
||||
amp_autocast = None
|
||||
if torch.__version__.startswith("1.8"):
|
||||
if CHECK_PYTORCH_18:
|
||||
amp_autocast = torch.cuda.amp.autocast(enabled=amp)
|
||||
else:
|
||||
amp_autocast = torch.amp.autocast("cuda", enabled=amp)
|
||||
|
|
Loading…
Reference in New Issue