From 67bf9a974e5d4f6bfb851f6e10871f6f16e72ffb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 10 Nov 2020 14:15:03 +0100 Subject: [PATCH] Replace 'ground truth' with 'labels' (#1337) * Replace 'ground truth' with 'labels' * Using torch... update --- README.md | 2 +- test.py | 10 +++++----- tutorial.ipynb | 10 +++++----- utils/general.py | 16 ++++++++-------- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 4d1083635..f83529304 100755 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ To run inference on example images in `data/images`: $ python detect.py --source data/images --weights yolov5s.pt --conf 0.25 Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, output='runs/detect', save_conf=False, save_txt=False, source='data/images', update=False, view_img=False, weights='yolov5s.pt') -Using CUDA device0 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', total_memory=16160MB) +Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB) Downloading https://github.com/ultralytics/yolov5/releases/download/v3.0/yolov5s.pt to yolov5s.pt... 100%|██████████████| 14.5M/14.5M [00:00<00:00, 21.3MB/s] diff --git a/test.py b/test.py index 49d783aea..644d28604 100644 --- a/test.py +++ b/test.py @@ -204,8 +204,8 @@ def test(data, # Plot images if plots and batch_i < 1: - f = save_dir / f'test_batch{batch_i}_gt.jpg' # filename - plot_images(img, targets, paths, str(f), names) # ground truth + f = save_dir / f'test_batch{batch_i}_labels.jpg' # filename + plot_images(img, targets, paths, str(f), names) # labels f = save_dir / f'test_batch{batch_i}_pred.jpg' plot_images(img, output_to_target(output, width, height), paths, str(f), names) # predictions @@ -250,9 +250,9 @@ def test(data, from pycocotools.cocoeval import COCOeval imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] - cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')[0]) # initialize COCO ground truth api - cocoDt = cocoGt.loadRes(str(file)) # initialize COCO pred api - cocoEval = COCOeval(cocoGt, cocoDt, 'bbox') + cocoAnno = COCO(glob.glob('../coco/annotations/instances_val*.json')[0]) # initialize COCO annotations api + cocoPred = cocoAnno.loadRes(str(file)) # initialize COCO pred api + cocoEval = COCOeval(cocoAnno, cocoPred, 'bbox') cocoEval.params.imgIds = imgIds # image IDs to evaluate cocoEval.evaluate() cocoEval.accumulate() diff --git a/tutorial.ipynb b/tutorial.ipynb index 54c87b0f2..792967592 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -605,7 +605,7 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, save_conf=False, save_dir='runs/detect', save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "Using CUDA device0 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', total_memory=16130MB)\n", + "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n", "\n", "Fusing layers... \n", "Model Summary: 140 layers, 7.45958e+06 parameters, 0 gradients\n", @@ -735,7 +735,7 @@ "output_type": "stream", "text": [ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', img_size=640, iou_thres=0.65, save_conf=False, save_dir='runs/test', save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "Using CUDA device0 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', total_memory=16130MB)\n", + "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5x.pt to yolov5x.pt...\n", "100% 170M/170M [00:05<00:00, 32.2MB/s]\n", @@ -921,7 +921,7 @@ { "output_type": "stream", "text": [ - "Using CUDA device0 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', total_memory=16130MB)\n", + "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n", "\n", "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], local_rank=-1, log_imgs=10, logdir='runs/', multi_scale=False, name='', noautoanchor=False, nosave=True, notest=False, rect=False, resume=False, single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", "Start Tensorboard with \"tensorboard --logdir runs/\", view at http://localhost:6006/\n", @@ -1040,7 +1040,7 @@ }, "source": [ "Image(filename='runs/train/exp0/train_batch0.jpg', width=800) # train batch 0 mosaics and labels\n", - "Image(filename='runs/train/exp0/test_batch0_gt.jpg', width=800) # test batch 0 ground truth\n", + "Image(filename='runs/train/exp0/test_batch0_labels.jpg', width=800) # test batch 0 labels\n", "Image(filename='runs/train/exp0/test_batch0_pred.jpg', width=800) # test batch 0 predictions" ], "execution_count": null, @@ -1056,7 +1056,7 @@ "`train_batch0.jpg` train batch 0 mosaics and labels\n", "\n", "> \n", - "`test_batch0_gt.jpg` shows test batch 0 ground truth\n", + "`test_batch0_labels.jpg` shows test batch 0 labels\n", "\n", "> \n", "`test_batch0_pred.jpg` shows test batch 0 _predictions_\n" diff --git a/utils/general.py b/utils/general.py index 2e166c9c1..499d58285 100755 --- a/utils/general.py +++ b/utils/general.py @@ -275,10 +275,10 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, fname='precision-re ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s) for ci, c in enumerate(unique_classes): i = pred_cls == c - n_gt = (target_cls == c).sum() # Number of ground truth objects - n_p = i.sum() # Number of predicted objects + n_l = (target_cls == c).sum() # number of labels + n_p = i.sum() # number of predictions - if n_p == 0 or n_gt == 0: + if n_p == 0 or n_l == 0: continue else: # Accumulate FPs and TPs @@ -286,7 +286,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, fname='precision-re tpc = tp[i].cumsum(0) # Recall - recall = tpc / (n_gt + 1e-16) # recall curve + recall = tpc / (n_l + 1e-16) # recall curve r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases # Precision @@ -1076,8 +1076,8 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max image_targets = targets[targets[:, 0] == i] boxes = xywh2xyxy(image_targets[:, 2:6]).T classes = image_targets[:, 1].astype('int') - gt = image_targets.shape[1] == 6 # ground truth if no conf column - conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred) + labels = image_targets.shape[1] == 6 # labels if no conf column + conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) boxes[[0, 2]] *= w boxes[[0, 2]] += block_x @@ -1087,8 +1087,8 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max cls = int(classes[j]) color = color_lut[cls % len(color_lut)] cls = names[cls] if names else cls - if gt or conf[j] > 0.3: # 0.3 conf thresh - label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j]) + if labels or conf[j] > 0.3: # 0.3 conf thresh + label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) # Draw image filename labels