Merge master

This commit is contained in:
Glenn Jocher 2021-12-12 23:52:55 +01:00
commit a6037838a3
3 changed files with 11 additions and 8 deletions

View File

@ -38,7 +38,7 @@ from utils.torch_utils import select_device, time_sync
@torch.no_grad() @torch.no_grad()
def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam
imgsz=640, # inference size (pixels) imgsz=(640, 640), # inference size (height, width)
conf_thres=0.25, # confidence threshold conf_thres=0.25, # confidence threshold
iou_thres=0.45, # NMS IOU threshold iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image max_det=1000, # maximum detections per image

View File

@ -139,5 +139,6 @@ if __name__ == '__main__':
np.zeros((320, 640, 3))] # numpy np.zeros((320, 640, 3))] # numpy
results = model(imgs) # batched inference results = model(imgs) # batched inference
results.print() rl = results.tolist()
results.save()
print(rl[0].pandas().xyxy[0])

View File

@ -525,7 +525,7 @@ class AutoShape(nn.Module):
class Detections: class Detections:
# YOLOv5 detections class for inference results # YOLOv5 detections class for inference results
def __init__(self, imgs, pred, files, times=None, names=None, shape=None): def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None):
super().__init__() super().__init__()
d = pred[0].device # device d = pred[0].device # device
gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations
@ -533,6 +533,7 @@ class Detections:
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names self.names = names # class names
self.files = files # image filenames self.files = files # image filenames
self.times = times # profiling times
self.xyxy = pred # xyxy pixels self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
@ -612,10 +613,11 @@ class Detections:
def tolist(self): def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():' # return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], names=self.names, shape=self.s) for i in range(self.n)] r = range(self.n) # iterable
for d in x: x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: # for d in x:
setattr(d, k, getattr(d, k)[0]) # pop out of list # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
# setattr(d, k, getattr(d, k)[0]) # pop out of list
return x return x
def __len__(self): def __len__(self):