pull/13566/head
UltralyticsAssistant 2025-04-18 11:25:51 +00:00
parent 1be6f8f298
commit 746ce3302f
4 changed files with 198 additions and 156 deletions

View File

@ -6,17 +6,26 @@
# └── datasets
# └── coco128 ← downloads here
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
path: D:/lzy/yolov5/yolov5/data_org/yolo_dataset # dataset root dir
train: train/images # train images (relative to 'path') 128 images
val: valid/images # val images (relative to 'path') 128 images
path: D:/lzy/yolov5/yolov5/data_org/yolo_dataset # dataset root dir
train: train/images # train images (relative to 'path') 128 images
val: valid/images # val images (relative to 'path') 128 images
test: test/images # test images (optional)
# Classes
nc: 11 # number of classes
names: ['animasls', 'cat', 'chicken', 'cow', 'dog', 'fox',
'goat', 'horse', 'person', 'recoon', 'skunk'] # class names
nc: 11 # number of classes
names: [
"animasls",
"cat",
"chicken",
"cow",
"dog",
"fox",
"goat",
"horse",
"person",
"recoon",
"skunk",
] # class names
# Download script/URL (optional)

View File

@ -7,9 +7,10 @@
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"import cv2 as cv\n",
"import json"
"\n",
"import cv2 as cv"
]
},
{
@ -19,7 +20,7 @@
"metadata": {},
"outputs": [],
"source": [
"img = cv.imread(r\"D:\\lzy\\yolov5\\yolov5\\data_org\\yolo_dataset\\test\\images\\9_jpg.rf.eb8e232922688878a850763f2127e8fd.jpg\")\n"
"img = cv.imread(r\"D:\\lzy\\yolov5\\yolov5\\data_org\\yolo_dataset\\test\\images\\9_jpg.rf.eb8e232922688878a850763f2127e8fd.jpg\")"
]
},
{
@ -67,7 +68,7 @@
"outputs": [],
"source": [
"# 读取文件\n",
"with open(label_path, 'r') as f:\n",
"with open(label_path) as f:\n",
" content = json.load(f)\n",
"pass"
]
@ -81,8 +82,8 @@
"source": [
"# 创建一个id到图片名称的映射以字典形式存储\n",
"dict_id2img = dict()\n",
"for img_info in content['images']:\n",
" dict_id2img[img_info['id']] = img_info['file_name']\n",
"for img_info in content[\"images\"]:\n",
" dict_id2img[img_info[\"id\"]] = img_info[\"file_name\"]\n",
"pass"
]
},
@ -93,8 +94,8 @@
"metadata": {},
"outputs": [],
"source": [
"img_height = content['images'][0]['height']\n",
"img_width = content['images'][0]['height']"
"img_height = content[\"images\"][0][\"height\"]\n",
"img_width = content[\"images\"][0][\"height\"]"
]
},
{
@ -105,15 +106,15 @@
"outputs": [],
"source": [
"# 开始读取标签信息处理后写入txt文件\n",
"for label_info in content['annotations']:\n",
" img_name = dict_id2img[label_info['image_id']]\n",
" class_name = label_info['category_id']\n",
" x_center = (label_info['bbox'][0] + label_info['bbox'][2] / 2) / img_height\n",
" y_center = (label_info['bbox'][1] + label_info['bbox'][3] / 2) / img_width\n",
" w = label_info['bbox'][1] / 640\n",
" h = label_info['bbox'][3] / 640\n",
" with open(os.path.join(img_folder.replace(\"images\", \"labels\"), img_name.replace(\".jpg\", \".txt\")), 'a') as f:\n",
" f.write(\"{} {} {} {} {}\\n\".format(class_name, x_center, y_center, w, h))"
"for label_info in content[\"annotations\"]:\n",
" img_name = dict_id2img[label_info[\"image_id\"]]\n",
" class_name = label_info[\"category_id\"]\n",
" x_center = (label_info[\"bbox\"][0] + label_info[\"bbox\"][2] / 2) / img_height\n",
" y_center = (label_info[\"bbox\"][1] + label_info[\"bbox\"][3] / 2) / img_width\n",
" w = label_info[\"bbox\"][1] / 640\n",
" h = label_info[\"bbox\"][3] / 640\n",
" with open(os.path.join(img_folder.replace(\"images\", \"labels\"), img_name.replace(\".jpg\", \".txt\")), \"a\") as f:\n",
" f.write(f\"{class_name} {x_center} {y_center} {w} {h}\\n\")"
]
}
],

View File

@ -24,88 +24,110 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.experimental import attempt_load
from utils.datasets import LoadImages, LoadStreams
from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \
increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \
strip_optimizer, xyxy2xywh
from utils.general import (
apply_classifier,
check_img_size,
check_imshow,
check_requirements,
check_suffix,
colorstr,
increment_path,
non_max_suppression,
print_args,
save_one_box,
scale_coords,
set_logging,
strip_optimizer,
xyxy2xywh,
)
from utils.plots import Annotator, colors
from utils.torch_utils import load_classifier, select_device, time_sync
@torch.no_grad()
def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam
imgsz=640, # inference size (pixels)
conf_thres=0.25, # confidence threshold
iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img=False, # show results
save_txt=False, # save results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_crop=False, # save cropped prediction boxes
nosave=False, # do not save images/videos
classes=None, # filter by class: --class 0, or --class 0 2 3
agnostic_nms=False, # class-agnostic NMS
augment=False, # augmented inference
visualize=False, # visualize features
update=False, # update all models
project=ROOT / 'runs/detect', # save results to project/name
name='exp', # save results to project/name
exist_ok=False, # existing project/name ok, do not increment
line_thickness=3, # bounding box thickness (pixels)
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
):
def run(
weights=ROOT / "yolov5s.pt", # model.pt path(s)
source=ROOT / "data/images", # file/dir/URL/glob, 0 for webcam
imgsz=640, # inference size (pixels)
conf_thres=0.25, # confidence threshold
iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img=False, # show results
save_txt=False, # save results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_crop=False, # save cropped prediction boxes
nosave=False, # do not save images/videos
classes=None, # filter by class: --class 0, or --class 0 2 3
agnostic_nms=False, # class-agnostic NMS
augment=False, # augmented inference
visualize=False, # visualize features
update=False, # update all models
project=ROOT / "runs/detect", # save results to project/name
name="exp", # save results to project/name
exist_ok=False, # existing project/name ok, do not increment
line_thickness=3, # bounding box thickness (pixels)
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
):
source = str(source)
save_img = not nosave and not source.endswith('.txt') # save inference images
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://', 'https://'))
save_img = not nosave and not source.endswith(".txt") # save inference images
webcam = (
source.isnumeric()
or source.endswith(".txt")
or source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://"))
)
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
(save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Initialize
set_logging()
device = select_device(device)
half &= device.type != 'cpu' # half precision only supported on CUDA
half &= device.type != "cpu" # half precision only supported on CUDA
# Load model
w = str(weights[0] if isinstance(weights, list) else weights)
classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', '']
classify, suffix, suffixes = False, Path(w).suffix.lower(), [".pt", ".onnx", ".tflite", ".pb", ""]
check_suffix(w, suffixes) # check weights have acceptable suffix
pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans
stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults
stride, names = 64, [f"class{i}" for i in range(1000)] # assign defaults
if pt:
model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device)
model = torch.jit.load(w) if "torchscript" in w else attempt_load(weights, map_location=device)
stride = int(model.stride.max()) # model stride
names = model.module.names if hasattr(model, 'module') else model.names # get class names
names = model.module.names if hasattr(model, "module") else model.names # get class names
if half:
model.half() # to FP16
if classify: # second-stage classifier
modelc = load_classifier(name='resnet50', n=2) # initialize
modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval()
modelc = load_classifier(name="resnet50", n=2) # initialize
modelc.load_state_dict(torch.load("resnet50.pt", map_location=device)["model"]).to(device).eval()
elif onnx:
if dnn:
# check_requirements(('opencv-python>=4.5.4',))
net = cv2.dnn.readNetFromONNX(w)
else:
check_requirements(('onnx', 'onnxruntime'))
check_requirements(("onnx", "onnxruntime"))
import onnxruntime
session = onnxruntime.InferenceSession(w, None)
else: # TensorFlow models
check_requirements(('tensorflow>=2.4.1',))
check_requirements(("tensorflow>=2.4.1",))
import tensorflow as tf
if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
def wrap_frozen_graph(gd, inputs, outputs):
x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import
return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs),
tf.nest.map_structure(x.graph.as_graph_element, outputs))
return x.prune(
tf.nest.map_structure(x.graph.as_graph_element, inputs),
tf.nest.map_structure(x.graph.as_graph_element, outputs),
)
graph_def = tf.Graph().as_graph_def()
graph_def.ParseFromString(open(w, 'rb').read())
graph_def.ParseFromString(open(w, "rb").read())
frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0")
elif saved_model:
model = tf.keras.models.load_model(w)
@ -114,7 +136,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
interpreter.allocate_tensors() # allocate
input_details = interpreter.get_input_details() # inputs
output_details = interpreter.get_output_details() # outputs
int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model
int8 = input_details[0]["dtype"] == np.uint8 # is TFLite quantized uint8 model
imgsz = check_img_size(imgsz, s=stride) # check image size
# Dataloader
@ -129,13 +151,13 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
vid_path, vid_writer = [None] * bs, [None] * bs
# Run inference
if pt and device.type != 'cpu':
if pt and device.type != "cpu":
model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once
dt, seen = [0.0, 0.0, 0.0], 0
for path, img, im0s, vid_cap in dataset:
t1 = time_sync()
if onnx:
img = img.astype('float32')
img = img.astype("float32")
else:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
@ -163,13 +185,13 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
pred = model(imn, training=False).numpy()
elif tflite:
if int8:
scale, zero_point = input_details[0]['quantization']
scale, zero_point = input_details[0]["quantization"]
imn = (imn / scale + zero_point).astype(np.uint8) # de-scale
interpreter.set_tensor(input_details[0]['index'], imn)
interpreter.set_tensor(input_details[0]["index"], imn)
interpreter.invoke()
pred = interpreter.get_tensor(output_details[0]['index'])
pred = interpreter.get_tensor(output_details[0]["index"])
if int8:
scale, zero_point = output_details[0]['quantization']
scale, zero_point = output_details[0]["quantization"]
pred = (pred.astype(np.float32) - zero_point) * scale # re-scale
pred[..., 0] *= imgsz[1] # x
pred[..., 1] *= imgsz[0] # y
@ -191,14 +213,14 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
for i, det in enumerate(pred): # per image
seen += 1
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count
p, s, im0, frame = path[i], f"{i}: ", im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0)
p, s, im0, frame = path, "", im0s.copy(), getattr(dataset, "frame", 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
s += '%gx%g ' % img.shape[2:] # print string
txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # img.txt
s += "{:g}x{:g} ".format(*img.shape[2:]) # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
@ -216,18 +238,18 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
with open(txt_path + ".txt", "a") as f:
f.write(("%g " * len(line)).rstrip() % line + "\n")
if save_img or save_crop or view_img: # Add bbox to image
c = int(cls) # integer class
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
label = None if hide_labels else (names[c] if hide_conf else f"{names[c]} {conf:.2f}")
annotator.box_label(xyxy, label, color=colors(c, True))
if save_crop:
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
save_one_box(xyxy, imc, file=save_dir / "crops" / names[c] / f"{p.stem}.jpg", BGR=True)
# Print time (inference-only)
print(f'{s}Done. ({t3 - t2:.3f}s)')
print(f"{s}Done. ({t3 - t2:.3f}s)")
# Stream results
im0 = annotator.result()
@ -237,7 +259,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
if dataset.mode == "image":
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path[i] != save_path: # new video
@ -250,15 +272,15 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path += '.mp4'
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
save_path += ".mp4"
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
vid_writer[i].write(im0)
# Print results
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
t = tuple(x / seen * 1e3 for x in dt) # speeds per image
print(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ""
print(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(weights) # update model (to fix SourceChangeWarning)
@ -266,31 +288,42 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default=r'D:\lzy\yolov5\yolov5\runs\train\exp15\weights\best.pt', help='model path(s)')
parser.add_argument('--source', type=str, default=r'D:\lzy\yolov5\yolov5\data_org\yolo_dataset\test\images', help='file/dir/URL/glob, 0 for webcam')
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='show results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--visualize', action='store_true', help='visualize features')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
parser.add_argument(
"--weights",
nargs="+",
type=str,
default=r"D:\lzy\yolov5\yolov5\runs\train\exp15\weights\best.pt",
help="model path(s)",
)
parser.add_argument(
"--source",
type=str,
default=r"D:\lzy\yolov5\yolov5\data_org\yolo_dataset\test\images",
help="file/dir/URL/glob, 0 for webcam",
)
parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640], help="inference size h,w")
parser.add_argument("--conf-thres", type=float, default=0.25, help="confidence threshold")
parser.add_argument("--iou-thres", type=float, default=0.45, help="NMS IoU threshold")
parser.add_argument("--max-det", type=int, default=1000, help="maximum detections per image")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--view-img", action="store_true", help="show results")
parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels")
parser.add_argument("--save-crop", action="store_true", help="save cropped prediction boxes")
parser.add_argument("--nosave", action="store_true", help="do not save images/videos")
parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3")
parser.add_argument("--agnostic-nms", action="store_true", help="class-agnostic NMS")
parser.add_argument("--augment", action="store_true", help="augmented inference")
parser.add_argument("--visualize", action="store_true", help="visualize features")
parser.add_argument("--update", action="store_true", help="update all models")
parser.add_argument("--project", default=ROOT / "runs/detect", help="save results to project/name")
parser.add_argument("--name", default="exp", help="save results to project/name")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--line-thickness", default=3, type=int, help="bounding box thickness (pixels)")
parser.add_argument("--hide-labels", default=False, action="store_true", help="hide labels")
parser.add_argument("--hide-conf", default=False, action="store_true", help="hide confidences")
parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
print_args(FILE.stem, opt)
@ -298,7 +331,7 @@ def parse_opt():
def main(opt):
check_requirements(exclude=('tensorboard', 'thop'))
check_requirements(exclude=("tensorboard", "thop"))
run(**vars(opt))

View File

@ -51,7 +51,7 @@ from utils.autoanchor import check_anchors
from utils.autobatch import check_train_batch_size
from utils.callbacks import Callbacks
from utils.dataloaders import create_dataloader
from utils.downloads import attempt_download, is_url
from utils.downloads import attempt_download
from utils.general import (
LOGGER,
TQDM_BAR_FORMAT,
@ -79,7 +79,6 @@ from utils.general import (
yaml_save,
)
from utils.loggers import LOGGERS, Loggers
from utils.loggers.comet.comet_utils import check_comet_resume
from utils.loss import ComputeLoss
from utils.metrics import fitness
from utils.plots import plot_evolve
@ -564,38 +563,38 @@ def parse_opt(known=False):
- Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
"""
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--noval', action='store_true', help='only validate final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24')
parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path")
parser.add_argument("--cfg", type=str, default="", help="model.yaml path")
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
parser.add_argument("--hyp", type=str, default=ROOT / "data/hyps/hyp.scratch.yaml", help="hyperparameters path")
parser.add_argument("--epochs", type=int, default=300)
parser.add_argument("--batch-size", type=int, default=16, help="total batch size for all GPUs")
parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="train, val image size (pixels)")
parser.add_argument("--rect", action="store_true", help="rectangular training")
parser.add_argument("--resume", nargs="?", const=True, default=False, help="resume most recent training")
parser.add_argument("--nosave", action="store_true", help="only save final checkpoint")
parser.add_argument("--noval", action="store_true", help="only validate final epoch")
parser.add_argument("--noautoanchor", action="store_true", help="disable autoanchor check")
parser.add_argument("--evolve", type=int, nargs="?", const=300, help="evolve hyperparameters for x generations")
parser.add_argument("--bucket", type=str, default="", help="gsutil bucket")
parser.add_argument("--cache", type=str, nargs="?", const="ram", help='--cache images in "ram" (default) or "disk"')
parser.add_argument("--image-weights", action="store_true", help="use weighted image selection for training")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--multi-scale", action="store_true", help="vary img-size +/- 50%%")
parser.add_argument("--single-cls", action="store_true", help="train multi-class data as single-class")
parser.add_argument("--adam", action="store_true", help="use torch.optim.Adam() optimizer")
parser.add_argument("--sync-bn", action="store_true", help="use SyncBatchNorm, only available in DDP mode")
parser.add_argument("--workers", type=int, default=8, help="maximum number of dataloader workers")
parser.add_argument("--project", default=ROOT / "runs/train", help="save to project/name")
parser.add_argument("--name", default="exp", help="save to project/name")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--quad", action="store_true", help="quad dataloader")
parser.add_argument("--linear-lr", action="store_true", help="linear LR")
parser.add_argument("--label-smoothing", type=float, default=0.0, help="Label smoothing epsilon")
parser.add_argument("--patience", type=int, default=100, help="EarlyStopping patience (epochs without improvement)")
parser.add_argument("--freeze", type=int, default=0, help="Number of layers to freeze. backbone=10, all=24")
parser.add_argument("--save-period", type=int, default=-1, help="Save checkpoint every x epochs (disabled if < 1)")
parser.add_argument("--local_rank", type=int, default=-1, help="DDP parameter, do not modify")
# Logger arguments
parser.add_argument("--entity", default=None, help="Entity")
@ -634,11 +633,11 @@ def main(opt, callbacks=Callbacks()):
# Resume
if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f:
assert os.path.isfile(ckpt), "ERROR: --resume checkpoint does not exist"
with open(Path(ckpt).parent.parent / "opt.yaml", errors="ignore") as f:
opt = argparse.Namespace(**yaml.safe_load(f)) # replace
opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate
LOGGER.info(f'Resuming training from {ckpt}')
opt.cfg, opt.weights, opt.resume = "", ckpt, True # reinstate
LOGGER.info(f"Resuming training from {ckpt}")
else:
opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = (
check_file(opt.data),