From 61ebf5e5edb6f46d56dca24d358a2d91e7a292d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Nov 2022 20:47:54 +0100 Subject: [PATCH] Created using Colaboratory --- segment/tutorial.ipynb | 62 +++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index ad44f31d3..09ca963d4 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -42,14 +42,14 @@ "base_uri": "https://localhost:8080/" }, "id": "wbvMlHd_QwMG", - "outputId": "664f49fa-554a-4dca-8d0e-5c9dd60f6d28" + "outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -100,7 +100,7 @@ "base_uri": "https://localhost:8080/" }, "id": "zR9ZbuQCH7FX", - "outputId": "6392c9ff-0863-4665-faf9-b3af9881c305" + "outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad" }, "outputs": [ { @@ -108,16 +108,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-seg.pt to yolov5s-seg.pt...\n", - "100% 14.9M/14.9M [00:01<00:00, 9.09MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n", + "100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.0ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.5ms\n", - "Speed: 0.5ms pre-process, 15.7ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n", + "Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" ] } @@ -155,7 +155,7 @@ "base_uri": "https://localhost:8080/" }, "id": "WQPtK1QYVaD_", - "outputId": "4707734e-00c7-43da-d642-32c3c3fe3090" + "outputId": "9d751d8c-bee8-4339-cf30-9854ca530449" }, "outputs": [ { @@ -182,7 +182,7 @@ "base_uri": "https://localhost:8080/" }, "id": "X58w8JLpMnjH", - "outputId": "f96b700d-c779-4a34-930b-e85be4e58974" + "outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a" }, "outputs": [ { @@ -190,15 +190,15 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1409.04it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:53<00:00, 1.38it/s]\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", - "Speed: 0.8ms pre-process, 4.0ms inference, 2.8ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n", "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" ] } @@ -270,7 +270,7 @@ "base_uri": "https://localhost:8080/" }, "id": "1NcFxRcFdJ_O", - "outputId": "2cdb19cc-69af-4c90-f8de-af02dfedba91" + "outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988" }, "outputs": [ { @@ -279,15 +279,15 @@ "text": [ "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", - "100% 6.79M/6.79M [00:01<00:00, 5.87MB/s]\n", - "Dataset download success ✅ (2.1s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n", + "Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -321,11 +321,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1439.54it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 253.53it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00