yolov5/tutorial.ipynb

1251 lines
384 KiB
Plaintext
Raw Normal View History

2020-05-30 08:04:54 +08:00
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "YOLOv5 Tutorial",
"provenance": [],
"collapsed_sections": [],
2020-07-12 06:16:24 +08:00
"toc_visible": true,
2020-05-30 08:04:54 +08:00
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
2020-11-06 01:34:15 +08:00
"accelerator": "GPU",
"widgets": {
"application/vnd.jupyter.widget-state+json": {
2021-02-12 14:39:37 +08:00
"1f8e9b8ebded4175b2eaa9f75c3ceb00": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
2021-02-12 14:39:37 +08:00
"layout": "IPY_MODEL_0a1246a73077468ab80e979cc0576cd2",
2020-11-06 01:34:15 +08:00
"_model_module": "@jupyter-widgets/controls",
"children": [
2021-02-12 14:39:37 +08:00
"IPY_MODEL_d327cde5a85a4a51bb8b1b3e9cf06c97",
"IPY_MODEL_d5ef1cb2cbed4b87b3c5d292ff2b0da6"
2020-11-06 01:34:15 +08:00
]
}
},
2021-02-12 14:39:37 +08:00
"0a1246a73077468ab80e979cc0576cd2": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
2021-02-12 14:39:37 +08:00
"d327cde5a85a4a51bb8b1b3e9cf06c97": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
2021-02-12 14:39:37 +08:00
"style": "IPY_MODEL_8d5dff8bca14435a88fa1814533acd85",
2020-11-06 01:34:15 +08:00
"_dom_classes": [],
"description": "100%",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 819257867,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 819257867,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
2021-02-12 14:39:37 +08:00
"layout": "IPY_MODEL_3d5136c19e7645ca9bc8f51ceffb2be1"
2020-11-06 01:34:15 +08:00
}
},
2021-02-12 14:39:37 +08:00
"d5ef1cb2cbed4b87b3c5d292ff2b0da6": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
2021-02-12 14:39:37 +08:00
"style": "IPY_MODEL_2919396dbd4b4c8e821d12bd28665d8a",
2020-11-06 01:34:15 +08:00
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
2021-02-12 14:39:37 +08:00
"value": " 781M/781M [00:12<00:00, 65.5MB/s]",
2020-11-06 01:34:15 +08:00
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
2021-02-12 14:39:37 +08:00
"layout": "IPY_MODEL_6feb16f2b2fa4021b1a271e1dd442d04"
2020-11-06 01:34:15 +08:00
}
},
2021-02-12 14:39:37 +08:00
"8d5dff8bca14435a88fa1814533acd85": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
2021-02-12 14:39:37 +08:00
"3d5136c19e7645ca9bc8f51ceffb2be1": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
2021-02-12 14:39:37 +08:00
"2919396dbd4b4c8e821d12bd28665d8a": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
2021-02-12 14:39:37 +08:00
"6feb16f2b2fa4021b1a271e1dd442d04": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
2021-02-12 14:39:37 +08:00
"e6459e0bcee449b090fc9807672725bc": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
2021-02-12 14:39:37 +08:00
"layout": "IPY_MODEL_c341e1d3bf3b40d1821ce392eb966c68",
2020-11-06 01:34:15 +08:00
"_model_module": "@jupyter-widgets/controls",
"children": [
2021-02-12 14:39:37 +08:00
"IPY_MODEL_660afee173694231a6dce3cd94df6cae",
"IPY_MODEL_261218485cef48df961519dde5edfcbe"
2020-11-06 01:34:15 +08:00
]
}
},
2021-02-12 14:39:37 +08:00
"c341e1d3bf3b40d1821ce392eb966c68": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
2021-02-12 14:39:37 +08:00
"660afee173694231a6dce3cd94df6cae": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
2021-02-12 14:39:37 +08:00
"style": "IPY_MODEL_32736d503c06497abfae8c0421918255",
2020-11-06 01:34:15 +08:00
"_dom_classes": [],
"description": "100%",
"_model_name": "FloatProgressModel",
"bar_style": "success",
2021-01-18 05:04:16 +08:00
"max": 22091032,
2020-11-06 01:34:15 +08:00
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
2021-01-18 05:04:16 +08:00
"value": 22091032,
2020-11-06 01:34:15 +08:00
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
2021-02-12 14:39:37 +08:00
"layout": "IPY_MODEL_e257738711f54d5280c8393d9d3dce1c"
2020-11-06 01:34:15 +08:00
}
},
2021-02-12 14:39:37 +08:00
"261218485cef48df961519dde5edfcbe": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
2021-02-12 14:39:37 +08:00
"style": "IPY_MODEL_beb7a6fe34b840899bb79c062681696f",
2020-11-06 01:34:15 +08:00
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
2021-02-12 14:39:37 +08:00
"value": " 21.1M/21.1M [00:00<00:00, 33.5MB/s]",
2020-11-06 01:34:15 +08:00
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
2021-02-12 14:39:37 +08:00
"layout": "IPY_MODEL_e639132395d64d70b99d8b72c32f8fbb"
2020-11-06 01:34:15 +08:00
}
},
2021-02-12 14:39:37 +08:00
"32736d503c06497abfae8c0421918255": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
2021-02-12 14:39:37 +08:00
"e257738711f54d5280c8393d9d3dce1c": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
2021-02-12 14:39:37 +08:00
"beb7a6fe34b840899bb79c062681696f": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
2021-02-12 14:39:37 +08:00
"e639132395d64d70b99d8b72c32f8fbb": {
2020-11-06 01:34:15 +08:00
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
}
}
}
2020-05-30 08:04:54 +08:00
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
2021-01-31 03:51:16 +08:00
"<a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
2020-05-30 08:04:54 +08:00
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "HvhYZrIZCEyo"
},
"source": [
2020-11-11 00:46:36 +08:00
"<img src=\"https://user-images.githubusercontent.com/26833433/98702494-b71c4e80-237a-11eb-87ed-17fcd6b3f066.jpg\">\n",
2020-05-30 08:04:54 +08:00
"\n",
2020-07-12 06:16:24 +08:00
"This notebook was written by Ultralytics LLC, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n",
"For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com."
2020-05-30 08:04:54 +08:00
]
},
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "7mGmQbAO5pQb"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-07-12 06:16:24 +08:00
"# Setup\n",
2020-05-30 08:04:54 +08:00
"\n",
2020-11-06 01:34:15 +08:00
"Clone repo, install dependencies and check PyTorch and GPU."
2020-05-30 08:04:54 +08:00
]
},
{
"cell_type": "code",
"metadata": {
"id": "wbvMlHd_QwMG",
2020-07-12 06:16:24 +08:00
"colab": {
2020-11-06 01:34:15 +08:00
"base_uri": "https://localhost:8080/"
2020-11-20 19:46:46 +08:00
},
2021-02-12 14:39:37 +08:00
"outputId": "ae8805a9-ce15-4e1c-f6b4-baa1c1033f56"
2020-05-30 08:04:54 +08:00
},
"source": [
"!git clone https://github.com/ultralytics/yolov5 # clone repo\n",
2020-06-15 04:29:05 +08:00
"%cd yolov5\n",
2020-11-06 01:34:15 +08:00
"%pip install -qr requirements.txt # install dependencies\n",
2020-06-15 04:29:05 +08:00
"\n",
2020-05-30 08:04:54 +08:00
"import torch\n",
2020-06-15 04:29:05 +08:00
"from IPython.display import Image, clear_output # to display images\n",
2020-05-30 08:04:54 +08:00
"\n",
2020-06-15 04:29:05 +08:00
"clear_output()\n",
"print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))"
2020-05-30 08:04:54 +08:00
],
2021-02-21 05:21:58 +08:00
"execution_count": null,
2020-07-12 06:16:24 +08:00
"outputs": [
{
"output_type": "stream",
"text": [
2021-02-12 14:39:37 +08:00
"Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n"
2020-07-12 06:16:24 +08:00
],
"name": "stdout"
}
]
2020-05-30 08:04:54 +08:00
},
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "4JnkELT0cIJg"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-07-12 06:16:24 +08:00
"# 1. Inference\n",
2020-05-30 08:04:54 +08:00
"\n",
2020-11-06 01:34:15 +08:00
"`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)."
2020-05-30 08:04:54 +08:00
]
},
{
"cell_type": "code",
"metadata": {
"id": "zR9ZbuQCH7FX",
"colab": {
"base_uri": "https://localhost:8080/",
2020-11-06 01:34:15 +08:00
"height": 534
2020-11-20 19:46:46 +08:00
},
"outputId": "c9a308f7-2216-4805-8003-eca8dd0dc30d"
2020-05-30 08:04:54 +08:00
},
"source": [
"!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n",
"Image(filename='runs/detect/exp/zidane.jpg', width=600)"
2020-05-30 08:04:54 +08:00
],
2020-11-06 02:47:35 +08:00
"execution_count": null,
2020-05-30 08:04:54 +08:00
"outputs": [
{
"output_type": "stream",
"text": [
2021-01-18 05:11:28 +08:00
"Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n",
2021-03-14 12:05:21 +08:00
"YOLOv5 v4.0-132-gf813f6d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
2020-05-30 08:04:54 +08:00
"\n",
2020-11-06 01:34:15 +08:00
"Fusing layers... \n",
2021-01-18 05:11:28 +08:00
"Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n",
2021-03-14 12:05:21 +08:00
"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n",
"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.008s)\n",
"Results saved to runs/detect/exp\n",
2021-03-14 12:05:21 +08:00
"Done. (0.087)\n"
2020-05-30 08:04:54 +08:00
],
"name": "stdout"
},
{
"output_type": "execute_result",
"data": {
2020-11-06 01:34:15 +08:00
"image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCALQBQADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD8347F5pkSP5t38P3ttaFjZzR2rzOMjfs+/wDNVi10+5kh877Gqv8AwfP96tOz0+2b99sw0e1drfxV87HY+wjHm94z4bOZ2WZ4dgV9vzN81Tx6a8jHvu+bd/DV+HT51uHd0Up95Pl21bhtfIkH2ncqfN8q/e21NS0dUbU4/ZMf7Oi52OzMu1UVU+an/wBjlW3w7l2t8y/3q3pNPRl2I+1tn/AqZZ280cXk3Nrub+7v+6tefKtLl5onZGm48qMqbQ3k/wBJeb5lb5PMf5l/2aZcaW6tshhyzffZn3ba3biHzI5USFfmX7tQyWc3zTXltuWPb+8jT+LbXJWxVWO534XDxkchrmm/KZt+d3yvurBm0maHLvu2su1G/vV3OsWsMe5xyWTd5bVh3VikkLJ5Pyqu7b/easaNacX7x6nsYyicrJYws3nom1m/vf3qWC3uYW32zr8v95v/AEGtK6s5I9iJuDMu51aq62827502Nt3Jur6zAylKUTlqREj+0wsiI7OzNuRW/wBr+7ViSPy4/wBzud9+1vm+Wq0aurIJtxdf4qtLayeX8nyusu5mb+KvqMPSlKJ58qnvco65uHaNpvlTdt2fJ8y0kjSbER3Vtq7tzJtqbyPtDLDNtx96nTKjR/Ii7t38X3a9D2fKebUkoy5SHyXjnP75l/i/3amSSVm+0v5joqbfv/Ky/wB6i3/fRrv+9911j+6rUsMMuxvJufu/fXZXPKXLE4OaUuaxPBv3b9n+r/hjl3LVqH9zJ/qV2t823/eqtbwpHGkP+qVn+dY/l/4FVuzZLqRI5plV13b12fdX+GvLxHvF04825p2cm1Ucopdvl+V9taVvDcSSK6fd+ZXrN0+GGS637F+V1aXd/d/hq7b75mX51Db9zMr/AC/7Py14WIqSNadHuaVjNLJCsP2pmTfuddvzNU8jO3yQ7X2/e/iaq8IeGNPLRW+bbu2fdq95n2OZXhhV2b5V3V4dap7+h6VOnHqWob792yI6o6orfLVCZJpPnudrBf4v97+KpmuIWmDzTKsrfdXft+7VCS5dpmR5o3/vq392uJSjztQOlx928hzbIZXSFFLs7fMqf6yopmubzY63jIVb7qrU32OGSP8AhRPveXHSyKluy/J975VXf/FWkqnNqLk5fdEntdy/3vl2eZs/76pU3yQyJsYeX8if3lqwsE0iy2zzfuvl/d/7VVr6O6WTf8yfe/d7/u1n71TRSMK0R8d1cxwrvRQv3dzfdWoprp75hNc3cjtHtSLzG+61OaGaS3RJnV1+88bVVkkRlKWtthlf+GspRhKRjH3Y8rKuoXtvHteN8qy7X/vVga9cXisrpcthkVfm/u1pXk00zAu+R/d/utWDq14+5n342/6rav3a78PFRj8JyVqhj6lM/wC8+8f/AB3dXManN82/fjd/CtdBqW+4bM0/Gzc1Yd48Pls/Vm+Xb/FXsUYy5NDxsVLmiYF9avt+07F21QVXmuNmzb/utW9cWbyR56hVqnHp7rMJvJ8xK9CnKMeU82T5hljlWZE3fN9//ZrodI3x7ntn+Rk2srfM1V9N03bGOdu7/wAdrVhs4I5BGiMk0f8ADJ8tEqhrToz+I1NLtUinR9+fLf5F/wDsa7bQZnjwibU2/N+7X5VrjdH/AHKxBE3f367TRZE+x7E2/wB1dv3mqo1PfOj2fuWOu0W4k+ziF5sOzfxfw11ui6uNyu6Mrqu1/Mfb8v8As1wWk3KOuy28xVVvnb+7W/puqQxsU3/eiVmj+9XZGpzmMoyj8R3Wn6kQN8Myh1f/AEfb93/eatXT9am8ve+1vvbmrgrHWd0iXOcFfl3L/F/wGtCHxB5K+d8wSR9qKq/M3/Aa6OYw9+J2q69C3zpZttX5Ub+9/vUybV4IYd+//WbtzL/CtcqutbYf3fmHc+1/mqvcawk3ybJCu/b9/wC9U/DAfunT/wBtusCv0/2d/wDDWbqGuosbO8jEt91tvystYN9q226ldH2xtt8qNX3f8B3VVvtUm2l3TLsnzLu/i/hqJRjI25vslPxRNDdZm85iv3fLb+GuMvJ3dXR/uK23/erW1PVHuomQXLFpJfkZvur/ALNZGqQ/aFb5G+V/3sa1x1I8x0UeaOjOa1SG2ml85Pv/AMO5vlWqtvbupYOmPLf5d3yturcbTkjdt6Mxb/lm38NQXWnpJcM8iSO38Un8K1nKn7p2RqQ5tTPWFJpD5czIn97726mTWVzIHfez+Z/yz/vVZa1eSTZDCqqqNu+fbSLYwzRuXhxufd9/71cNSnI0lUM2SN1CwpMuyT5tv/stJbxurI/nL+8ba0cn92tXybaOSHyYfuxbtrN8v3qq3Eltu+0+T86tt+VK5q1P3tCoVOXWRbtWdcoltv2tu2t8u6uj01na3TZuAVt27+61YNu7s0jzbWlb5U/hrQ0+aGObzo3bzl+X7/y7q+Ox1GXNKTPewtT4ZI7LT2T/AFM03mt8q7v4a0WuvLUI+6H5v9Wvzbv+BVzVnfTeSH/55q25d/3m/wBmp/7UdpI+Nqt8rbWr5DEYeUqp9DRrfDzG5cXySsN9zuVot6qybvu1m3mpRrD5iO0KSRbvlf5aqSal8zbNuPm2/J8q1Uk1QSM73KKrrF8nlr8u6tKOHUZe8dvtOhPeahD5yc7v3X975t1Zs0zrsfo2/wCZW/h/4FS3F4jKkEyMXX5X3fdaqzLBNJscrsZNqqv8NexhcPGPuozqVOWHKJe+c0hf7Tv3fL8tVri3DSPD9pUyr/F91d1aEljH/wAvMylG+4yp91aktdPeRc+Tv+f5fk3V9XluH5dTwcdiIx+0YLK6tvfcKry6bN5ezZ+7b/lpG+35q7BfDiNa+XNC37xtq7m27qdY+DXuN0m/hX/1f8NfY4ej7lz5XGYjm+E5C10e/Ece+2+fdtXb81XF8P7bqPztwkVGV9vyrt/2a7ux8KzRyJCkLM6/Nt3/ACtU7eDXkmj811Ty2+f91ub5q1lTjGZwRrcp5wuihpJIPmZGf/v2tQDwrMzHyXbZ93aqV6ovg/y5FT7zL99VT7y0kngvM3nfZmQbWZFWuKpR5vdN6dbl+0eUyeG7mO4Dp0Zf/Hqfp+jzQtLNczZK/wAP92vS28HmaOL/AEXa21n/AOA1m3HhWaxmm32fySIv+1uX/drxsVR+yejh63N7xysmnwxqrwp5rtztV/4f/iqJLRLVVT7HIo2bd27+Kuqj8Nos29BiKRdySN/d/u1U
2020-05-30 08:04:54 +08:00
"text/plain": [
"<IPython.core.display.Image object>"
]
},
"metadata": {
"tags": [],
"image/jpeg": {
"width": 600
}
},
2020-11-06 01:34:15 +08:00
"execution_count": 38
2020-05-30 08:04:54 +08:00
}
]
},
2020-06-15 04:29:05 +08:00
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "4qbaa3iEcrcE"
2020-06-15 04:29:05 +08:00
},
"source": [
"Results are saved to `runs/detect`. A full list of available inference sources:\n",
2020-11-06 01:34:15 +08:00
"<img src=\"https://user-images.githubusercontent.com/26833433/98274798-2b7a7a80-1f94-11eb-91a4-70c73593e26b.jpg\" width=\"900\"> "
2020-06-15 04:29:05 +08:00
]
},
2020-05-30 08:04:54 +08:00
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "0eq1SMWl6Sfn"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-07-12 06:16:24 +08:00
"# 2. Test\n",
2020-11-06 01:34:15 +08:00
"Test a model on [COCO](https://cocodataset.org/#home) val or test-dev dataset to evaluate trained accuracy. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be 1-2% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation."
2020-05-30 08:04:54 +08:00
]
},
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "eyTZYGgRjnMc"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-06 03:01:33 +08:00
"## COCO val2017\n",
2020-11-06 01:34:15 +08:00
"Download [COCO val 2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L14) dataset (1GB - 5000 images), and test model accuracy."
2020-05-30 08:04:54 +08:00
]
},
{
"cell_type": "code",
"metadata": {
"id": "WQPtK1QYVaD_",
"colab": {
"base_uri": "https://localhost:8080/",
2021-01-18 05:04:16 +08:00
"height": 65,
2020-11-06 01:34:15 +08:00
"referenced_widgets": [
2021-02-12 14:39:37 +08:00
"1f8e9b8ebded4175b2eaa9f75c3ceb00",
"0a1246a73077468ab80e979cc0576cd2",
"d327cde5a85a4a51bb8b1b3e9cf06c97",
"d5ef1cb2cbed4b87b3c5d292ff2b0da6",
"8d5dff8bca14435a88fa1814533acd85",
"3d5136c19e7645ca9bc8f51ceffb2be1",
"2919396dbd4b4c8e821d12bd28665d8a",
"6feb16f2b2fa4021b1a271e1dd442d04"
2020-11-06 01:34:15 +08:00
]
2020-11-20 19:46:46 +08:00
},
2021-02-12 14:39:37 +08:00
"outputId": "d6ace7c6-1be5-41ff-d607-1c716b88d298"
2020-05-30 08:04:54 +08:00
},
"source": [
"# Download COCO val2017\n",
2020-11-06 01:34:15 +08:00
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
"!unzip -q tmp.zip -d ../ && rm tmp.zip"
2020-05-30 08:04:54 +08:00
],
2021-02-21 05:21:58 +08:00
"execution_count": null,
2020-05-30 08:04:54 +08:00
"outputs": [
2020-11-06 01:34:15 +08:00
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
2021-02-12 14:39:37 +08:00
"model_id": "1f8e9b8ebded4175b2eaa9f75c3ceb00",
2020-11-06 01:34:15 +08:00
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=0.0, max=819257867.0), HTML(value='')))"
]
},
"metadata": {
"tags": []
}
},
2020-05-30 08:04:54 +08:00
{
"output_type": "stream",
"text": [
2020-11-06 01:34:15 +08:00
"\n"
2020-05-30 08:04:54 +08:00
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
2020-06-15 04:29:05 +08:00
"id": "X58w8JLpMnjH",
"colab": {
2020-11-06 01:34:15 +08:00
"base_uri": "https://localhost:8080/"
2020-11-20 19:46:46 +08:00
},
2021-02-12 14:39:37 +08:00
"outputId": "cc25f70c-0a11-44f6-cc44-e92c5083488c"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-06-15 04:29:05 +08:00
"# Run YOLOv5x on COCO val2017\n",
"!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65"
2020-05-30 08:04:54 +08:00
],
2021-02-21 05:21:58 +08:00
"execution_count": null,
2020-06-15 04:29:05 +08:00
"outputs": [
{
"output_type": "stream",
"text": [
2021-01-18 05:04:16 +08:00
"Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n",
2021-02-12 14:39:37 +08:00
"YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
2020-06-15 04:29:05 +08:00
"\n",
2021-01-18 05:04:16 +08:00
"Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n",
2021-02-12 14:39:37 +08:00
"100% 168M/168M [00:04<00:00, 39.7MB/s]\n",
2020-06-15 04:29:05 +08:00
"\n",
2020-11-06 01:34:15 +08:00
"Fusing layers... \n",
2021-01-18 05:04:16 +08:00
"Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n",
2021-02-12 14:39:37 +08:00
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2824.78it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n",
" Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:33<00:00, 1.68it/s]\n",
" all 5e+03 3.63e+04 0.749 0.619 0.68 0.486\n",
"Speed: 5.2/2.0/7.3 ms inference/NMS/total per 640x640 image at batch-size 32\n",
2020-11-06 01:34:15 +08:00
"\n",
2020-11-20 19:46:46 +08:00
"Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n",
2020-06-15 04:29:05 +08:00
"loading annotations into memory...\n",
2021-02-12 14:39:37 +08:00
"Done (t=0.44s)\n",
2020-06-15 04:29:05 +08:00
"creating index...\n",
"index created!\n",
"Loading and preparing results...\n",
2021-02-12 14:39:37 +08:00
"DONE (t=4.47s)\n",
2020-06-15 04:29:05 +08:00
"creating index...\n",
"index created!\n",
"Running per image evaluation...\n",
"Evaluate annotation type *bbox*\n",
2021-02-12 14:39:37 +08:00
"DONE (t=94.87s).\n",
2020-06-15 04:29:05 +08:00
"Accumulating evaluation results...\n",
2021-02-12 14:39:37 +08:00
"DONE (t=15.96s).\n",
2021-01-18 05:04:16 +08:00
" Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n",
" Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n",
" Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.338\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.548\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.637\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.378\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.628\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.680\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.520\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.729\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.826\n",
2020-11-20 19:46:46 +08:00
"Results saved to runs/test/exp\n"
2020-06-15 04:29:05 +08:00
],
"name": "stdout"
}
]
2020-05-30 08:04:54 +08:00
},
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "rc_KbFk0juX2"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-06 03:01:33 +08:00
"## COCO test-dev2017\n",
2020-11-06 01:34:15 +08:00
"Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (20,000 images). Results are saved to a `*.json` file which can be submitted to the evaluation server at https://competitions.codalab.org/competitions/20794."
2020-05-30 08:04:54 +08:00
]
},
{
"cell_type": "code",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "V0AJnSeCIHyJ"
2020-05-30 08:04:54 +08:00
},
"source": [
"# Download COCO test-dev2017\n",
2020-11-06 02:47:35 +08:00
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n",
"!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n",
2020-05-30 08:04:54 +08:00
"!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n",
2020-11-06 02:47:35 +08:00
"%mv ./test2017 ./coco/images && mv ./coco ../ # move images to /coco and move /coco next to /yolov5"
2020-05-30 08:04:54 +08:00
],
2020-07-12 06:16:24 +08:00
"execution_count": null,
2020-06-15 04:29:05 +08:00
"outputs": []
2020-05-30 08:04:54 +08:00
},
{
"cell_type": "code",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "29GJXAP_lPrt"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-06 01:34:15 +08:00
"# Run YOLOv5s on COCO test-dev2017 using --task test\n",
"!python test.py --weights yolov5s.pt --data coco.yaml --task test"
2020-05-30 08:04:54 +08:00
],
2020-07-12 06:16:24 +08:00
"execution_count": null,
2020-05-30 08:04:54 +08:00
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "VUOiNLtMP5aG"
2020-05-30 08:04:54 +08:00
},
"source": [
"# 3. Train\n",
"\n",
2020-11-11 23:11:49 +08:00
"Download [COCO128](https://www.kaggle.com/ultralytics/coco128), a small 128-image tutorial dataset, start tensorboard and train YOLOv5s from a pretrained checkpoint for 3 epochs (note actual training is typically much longer, around **300-1000 epochs**, depending on your dataset)."
2020-05-30 08:04:54 +08:00
]
},
{
"cell_type": "code",
"metadata": {
"id": "Knxi2ncxWffW",
"colab": {
"base_uri": "https://localhost:8080/",
2021-01-18 05:04:16 +08:00
"height": 65,
2020-11-06 01:34:15 +08:00
"referenced_widgets": [
2021-02-12 14:39:37 +08:00
"e6459e0bcee449b090fc9807672725bc",
"c341e1d3bf3b40d1821ce392eb966c68",
"660afee173694231a6dce3cd94df6cae",
"261218485cef48df961519dde5edfcbe",
"32736d503c06497abfae8c0421918255",
"e257738711f54d5280c8393d9d3dce1c",
"beb7a6fe34b840899bb79c062681696f",
"e639132395d64d70b99d8b72c32f8fbb"
2020-11-06 01:34:15 +08:00
]
2020-11-20 19:46:46 +08:00
},
2021-02-12 14:39:37 +08:00
"outputId": "e8b7d5b3-a71e-4446-eec2-ad13419cf700"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-06 01:34:15 +08:00
"# Download COCO128\n",
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n",
"!unzip -q tmp.zip -d ../ && rm tmp.zip"
2020-05-30 08:04:54 +08:00
],
2021-02-21 05:21:58 +08:00
"execution_count": null,
2020-05-30 08:04:54 +08:00
"outputs": [
2020-11-06 01:34:15 +08:00
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
2021-02-12 14:39:37 +08:00
"model_id": "e6459e0bcee449b090fc9807672725bc",
2020-11-06 01:34:15 +08:00
"version_minor": 0,
"version_major": 2
},
"text/plain": [
2021-01-18 05:04:16 +08:00
"HBox(children=(FloatProgress(value=0.0, max=22091032.0), HTML(value='')))"
2020-11-06 01:34:15 +08:00
]
},
"metadata": {
"tags": []
}
},
2020-05-30 08:04:54 +08:00
{
"output_type": "stream",
"text": [
2020-11-06 01:34:15 +08:00
"\n"
2020-05-30 08:04:54 +08:00
],
"name": "stdout"
}
]
},
2020-07-12 06:16:24 +08:00
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "_pOkGLv1dMqh"
2020-07-12 06:16:24 +08:00
},
"source": [
2020-11-11 23:11:49 +08:00
"Train a YOLOv5s model on [COCO128](https://www.kaggle.com/ultralytics/coco128) with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and **COCO, COCO128, and VOC datasets are downloaded automatically** on first use.\n",
2020-07-12 06:16:24 +08:00
"\n",
"All training results are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n"
2020-07-12 06:16:24 +08:00
]
},
2020-05-30 08:04:54 +08:00
{
"cell_type": "code",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "bOy5KI2ncnWd"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-24 06:37:23 +08:00
"# Tensorboard (optional)\n",
2020-05-30 08:04:54 +08:00
"%load_ext tensorboard\n",
2020-12-24 04:43:10 +08:00
"%tensorboard --logdir runs/train"
2020-05-30 08:04:54 +08:00
],
2020-07-12 06:16:24 +08:00
"execution_count": null,
2020-05-30 08:04:54 +08:00
"outputs": []
},
2020-11-24 06:37:23 +08:00
{
"cell_type": "code",
"metadata": {
"id": "2fLAV42oNb7M"
},
"source": [
"# Weights & Biases (optional)\n",
"%pip install -q wandb \n",
"!wandb login # use 'wandb disabled' or 'wandb enabled' to disable or enable"
],
"execution_count": null,
"outputs": []
},
2020-05-30 08:04:54 +08:00
{
"cell_type": "code",
"metadata": {
"id": "1NcFxRcFdJ_O",
"colab": {
2020-11-06 01:34:15 +08:00
"base_uri": "https://localhost:8080/"
2020-11-20 19:46:46 +08:00
},
2021-02-12 14:39:37 +08:00
"outputId": "38e51b29-2df4-4f00-cde8-5f6e4a34da9e"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-06 01:34:15 +08:00
"# Train YOLOv5s on COCO128 for 3 epochs\n",
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache"
2020-05-30 08:04:54 +08:00
],
2021-02-21 05:21:58 +08:00
"execution_count": null,
2020-05-30 08:04:54 +08:00
"outputs": [
{
"output_type": "stream",
"text": [
2021-01-18 05:04:16 +08:00
"\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
2021-02-12 14:39:37 +08:00
"YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
2020-05-30 08:04:54 +08:00
"\n",
2021-02-12 14:39:37 +08:00
"Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n",
2021-01-18 05:04:16 +08:00
"\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n",
2020-11-20 19:46:46 +08:00
"Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n",
2021-02-12 14:39:37 +08:00
"2021-02-12 06:38:28.027271: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n",
2021-01-18 05:04:16 +08:00
"\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n",
2021-02-12 14:39:37 +08:00
"100% 14.1M/14.1M [00:01<00:00, 13.2MB/s]\n",
2020-11-20 19:46:46 +08:00
"\n",
2020-07-12 06:16:24 +08:00
"\n",
" from n params module arguments \n",
" 0 -1 1 3520 models.common.Focus [3, 32, 3] \n",
" 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
2021-01-18 05:04:16 +08:00
" 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
2020-07-12 06:16:24 +08:00
" 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
2021-01-18 05:04:16 +08:00
" 4 -1 1 156928 models.common.C3 [128, 128, 3] \n",
2020-07-12 06:16:24 +08:00
" 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
2021-01-18 05:04:16 +08:00
" 6 -1 1 625152 models.common.C3 [256, 256, 3] \n",
2020-07-12 06:16:24 +08:00
" 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
" 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n",
2021-01-18 05:04:16 +08:00
" 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
2020-07-12 06:16:24 +08:00
" 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
" 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 12 [-1, 6] 1 0 models.common.Concat [1] \n",
2021-01-18 05:04:16 +08:00
" 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n",
2020-07-12 06:16:24 +08:00
" 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
" 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 16 [-1, 4] 1 0 models.common.Concat [1] \n",
2021-01-18 05:04:16 +08:00
" 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n",
2020-11-06 01:34:15 +08:00
" 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
" 19 [-1, 14] 1 0 models.common.Concat [1] \n",
2021-01-18 05:04:16 +08:00
" 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n",
2020-11-06 01:34:15 +08:00
" 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
" 22 [-1, 10] 1 0 models.common.Concat [1] \n",
2021-01-18 05:04:16 +08:00
" 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
2020-11-06 01:34:15 +08:00
" 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
2021-01-18 05:04:16 +08:00
"Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n",
2020-05-30 08:04:54 +08:00
"\n",
2021-01-18 05:04:16 +08:00
"Transferred 362/362 items from yolov5s.pt\n",
"Scaled weight_decay = 0.0005\n",
"Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n",
2021-02-12 14:39:37 +08:00
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2566.00it/s]\n",
2021-01-18 05:04:16 +08:00
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n",
2021-02-12 14:39:37 +08:00
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 175.07it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 764773.38it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 128.17it/s]\n",
2021-01-18 05:04:16 +08:00
"Plotting labels... \n",
2020-05-30 08:04:54 +08:00
"\n",
2021-01-18 05:04:16 +08:00
"\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
2020-05-30 08:04:54 +08:00
"Image sizes 640 train, 640 test\n",
"Using 2 dataloader workers\n",
"Logging results to runs/train/exp\n",
2020-07-12 06:16:24 +08:00
"Starting training for 3 epochs...\n",
2020-05-30 08:04:54 +08:00
"\n",
2020-11-06 01:34:15 +08:00
" Epoch gpu_mem box obj cls total targets img_size\n",
2021-02-12 14:39:37 +08:00
" 0/2 3.27G 0.04357 0.06781 0.01869 0.1301 207 640: 100% 8/8 [00:03<00:00, 2.03it/s]\n",
" Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.14s/it]\n",
" all 128 929 0.646 0.627 0.659 0.431\n",
2020-05-30 08:04:54 +08:00
"\n",
2020-11-06 01:34:15 +08:00
" Epoch gpu_mem box obj cls total targets img_size\n",
2021-02-12 14:39:37 +08:00
" 1/2 7.75G 0.04308 0.06654 0.02083 0.1304 227 640: 100% 8/8 [00:01<00:00, 4.11it/s]\n",
" Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.94it/s]\n",
" all 128 929 0.681 0.607 0.663 0.434\n",
2020-05-30 08:04:54 +08:00
"\n",
2020-11-06 01:34:15 +08:00
" Epoch gpu_mem box obj cls total targets img_size\n",
2021-02-12 14:39:37 +08:00
" 2/2 7.75G 0.04461 0.06896 0.01866 0.1322 191 640: 100% 8/8 [00:02<00:00, 3.94it/s]\n",
" Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.22it/s]\n",
" all 128 929 0.642 0.632 0.662 0.432\n",
2021-01-18 05:04:16 +08:00
"Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
"3 epochs completed in 0.007 hours.\n",
2020-05-30 08:04:54 +08:00
"\n"
],
"name": "stdout"
}
]
},
2020-11-06 03:01:33 +08:00
{
"cell_type": "markdown",
"metadata": {
"id": "15glLzbQx5u0"
},
"source": [
"# 4. Visualize"
]
},
2020-05-30 08:04:54 +08:00
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "DLI1JmHU7B0l"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-14 19:07:31 +08:00
"## Weights & Biases Logging 🌟 NEW\n",
2020-11-06 01:34:15 +08:00
"\n",
2020-11-24 06:37:23 +08:00
"[Weights & Biases](https://www.wandb.com/) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n",
2020-11-06 01:34:15 +08:00
"\n",
2020-11-24 06:37:23 +08:00
"During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n",
2020-11-06 01:34:15 +08:00
"\n",
"<img src=\"https://user-images.githubusercontent.com/26833433/98184457-bd3da580-1f0a-11eb-8461-95d908a71893.jpg\" width=\"800\">"
2020-05-30 08:04:54 +08:00
]
},
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "-WPvRbS5Swl6"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-06 03:01:33 +08:00
"## Local Logging\n",
2020-11-06 01:34:15 +08:00
"\n",
"All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and test jpgs to see mosaics, labels, predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)."
2020-05-30 08:04:54 +08:00
]
},
{
"cell_type": "code",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "riPdhraOTCO0"
2020-05-30 08:04:54 +08:00
},
"source": [
"Image(filename='runs/train/exp/train_batch0.jpg', width=800) # train batch 0 mosaics and labels\n",
"Image(filename='runs/train/exp/test_batch0_labels.jpg', width=800) # test batch 0 labels\n",
"Image(filename='runs/train/exp/test_batch0_pred.jpg', width=800) # test batch 0 predictions"
2020-05-30 08:04:54 +08:00
],
2020-07-12 06:16:24 +08:00
"execution_count": null,
2020-11-06 01:34:15 +08:00
"outputs": []
2020-05-30 08:04:54 +08:00
},
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "OYG4WFEnTVrI"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-06 01:34:15 +08:00
"> <img src=\"https://user-images.githubusercontent.com/26833433/83667642-90fcb200-a583-11ea-8fa3-338bbf7da194.jpeg\" width=\"750\"> \n",
2020-11-11 23:11:49 +08:00
"`train_batch0.jpg` shows train batch 0 mosaics and labels\n",
2020-11-06 01:34:15 +08:00
"\n",
"> <img src=\"https://user-images.githubusercontent.com/26833433/83667626-8c37fe00-a583-11ea-997b-0923fe59b29b.jpeg\" width=\"750\"> \n",
"`test_batch0_labels.jpg` shows test batch 0 labels\n",
2020-11-06 01:34:15 +08:00
"\n",
"> <img src=\"https://user-images.githubusercontent.com/26833433/83667635-90641b80-a583-11ea-8075-606316cebb9c.jpeg\" width=\"750\"> \n",
"`test_batch0_pred.jpg` shows test batch 0 _predictions_\n"
2020-05-30 08:04:54 +08:00
]
},
{
2020-11-06 01:34:15 +08:00
"cell_type": "markdown",
2020-05-30 08:04:54 +08:00
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "7KN5ghjE6ZWh"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-06 02:47:35 +08:00
"Training losses and performance metrics are also logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and a custom `results.txt` logfile which is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `--weights yolov5s.pt` (orange)."
2020-05-30 08:04:54 +08:00
]
},
{
2020-11-06 01:34:15 +08:00
"cell_type": "code",
2020-05-30 08:04:54 +08:00
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "MDznIqPF7nk3"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-14 19:07:31 +08:00
"from utils.plots import plot_results \n",
"plot_results(save_dir='runs/train/exp') # plot all results*.txt as results.png\n",
"Image(filename='runs/train/exp/results.png', width=800)"
2020-11-06 01:34:15 +08:00
],
"execution_count": null,
"outputs": []
2020-05-30 08:04:54 +08:00
},
{
2020-11-06 01:34:15 +08:00
"cell_type": "markdown",
2020-05-30 08:04:54 +08:00
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "lfrEegCSW3fK"
2020-05-30 08:04:54 +08:00
},
"source": [
2020-11-06 01:34:15 +08:00
"<img src=\"https://user-images.githubusercontent.com/26833433/97808309-8182b180-1c66-11eb-8461-bffe1a79511d.png\" width=\"800\">\n"
2020-05-30 08:04:54 +08:00
]
2020-06-15 04:29:05 +08:00
},
{
"cell_type": "markdown",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "Zelyeqbyt3GD"
2020-06-15 04:29:05 +08:00
},
"source": [
2020-11-06 03:01:33 +08:00
"# Environments\n",
2020-07-12 06:16:24 +08:00
"\n",
2020-11-06 01:34:15 +08:00
"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
2020-07-12 06:16:24 +08:00
"\n",
"- **Google Colab and Kaggle** notebooks with free GPU: <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n",
"- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n",
"- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6Qu7Iesl0p54"
},
"source": [
"# Status\n",
"\n",
"![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n",
"\n",
"If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
2020-06-15 04:29:05 +08:00
]
},
{
2020-07-12 06:16:24 +08:00
"cell_type": "markdown",
2020-06-15 04:29:05 +08:00
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "IEijrePND_2I"
2020-06-15 04:29:05 +08:00
},
"source": [
2020-07-12 06:16:24 +08:00
"# Appendix\n",
"\n",
"Optional extras below. Unit tests validate repo functionality and should be run on any PRs submitted.\n"
]
2020-06-15 04:29:05 +08:00
},
{
"cell_type": "code",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "gI6NoBev8Ib1"
2020-06-15 04:29:05 +08:00
},
"source": [
2020-07-12 06:16:24 +08:00
"# Re-clone repo\n",
"%cd ..\n",
2020-11-06 01:34:15 +08:00
"%rm -rf yolov5 && git clone https://github.com/ultralytics/yolov5\n",
2020-07-12 06:16:24 +08:00
"%cd yolov5"
2020-06-15 04:29:05 +08:00
],
2020-07-12 06:25:52 +08:00
"execution_count": null,
2020-06-15 04:29:05 +08:00
"outputs": []
},
{
"cell_type": "code",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "mcKoSIK2WSzj"
2020-06-15 04:29:05 +08:00
},
"source": [
2020-12-28 05:38:12 +08:00
"# Reproduce\n",
2021-03-03 11:20:51 +08:00
"for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n",
" !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n",
" !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP"
2020-06-15 04:29:05 +08:00
],
2020-07-12 06:16:24 +08:00
"execution_count": null,
2020-06-15 04:29:05 +08:00
"outputs": []
},
{
"cell_type": "code",
"metadata": {
2020-11-06 01:34:15 +08:00
"id": "FGH0ZjkGjejy"
2020-06-15 04:29:05 +08:00
},
"source": [
2020-11-20 19:46:46 +08:00
"# Unit tests\n",
2020-06-15 04:29:05 +08:00
"%%shell\n",
2020-11-20 19:46:46 +08:00
"export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n",
2020-11-06 01:34:15 +08:00
"\n",
2020-11-20 20:14:57 +08:00
"rm -rf runs # remove runs/\n",
2020-11-20 19:46:46 +08:00
"for m in yolov5s; do # models\n",
" python train.py --weights $m.pt --epochs 3 --img 320 --device 0 # train pretrained\n",
2020-11-20 20:14:57 +08:00
" python train.py --weights '' --cfg $m.yaml --epochs 3 --img 320 --device 0 # train scratch\n",
2020-11-20 19:46:46 +08:00
" for d in 0 cpu; do # devices\n",
" python detect.py --weights $m.pt --device $d # detect official\n",
" python detect.py --weights runs/train/exp/weights/best.pt --device $d # detect custom\n",
" python test.py --weights $m.pt --device $d # test official\n",
" python test.py --weights runs/train/exp/weights/best.pt --device $d # test custom\n",
2020-07-12 06:16:24 +08:00
" done\n",
2020-11-20 19:46:46 +08:00
" python hubconf.py # hub\n",
" python models/yolo.py --cfg $m.yaml # inspect\n",
" python models/export.py --weights $m.pt --img 640 --batch 1 # export\n",
2020-06-15 04:29:05 +08:00
"done"
],
2020-07-12 06:16:24 +08:00
"execution_count": null,
2020-06-15 04:29:05 +08:00
"outputs": []
2020-11-15 18:23:43 +08:00
},
2020-12-12 01:34:27 +08:00
{
"cell_type": "code",
"metadata": {
"id": "gogI-kwi3Tye"
},
"source": [
"# Profile\n",
"from utils.torch_utils import profile \n",
"\n",
"m1 = lambda x: x * torch.sigmoid(x)\n",
"m2 = torch.nn.SiLU()\n",
"profile(x=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)"
2020-12-12 01:34:27 +08:00
],
"execution_count": null,
"outputs": []
},
2021-02-21 05:21:58 +08:00
{
"cell_type": "code",
"metadata": {
"id": "RVRSOhEvUdb5"
},
"source": [
"# Evolve\n",
"!python train.py --img 640 --batch 64 --epochs 100 --data coco128.yaml --weights yolov5s.pt --cache --noautoanchor --evolve\n",
"!d=runs/train/evolve && cp evolve.* $d && zip -r evolve.zip $d && gsutil mv evolve.zip gs://bucket # upload results (optional)"
],
"execution_count": null,
"outputs": []
},
2020-11-15 18:23:43 +08:00
{
"cell_type": "code",
"metadata": {
"id": "BSgFCAcMbk1R"
},
"source": [
"# VOC\n",
"for b, m in zip([64, 48, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n",
" !python train.py --batch {b} --weights {m}.pt --data voc.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.finetune.yaml --project VOC --name {m}"
],
"execution_count": null,
"outputs": []
2020-05-30 08:04:54 +08:00
}
]
2021-03-14 12:05:21 +08:00
}