Merge branch 'master' into trt-infer-bug

Signed-off-by: 张一极 <3099463052@qq.com>
pull/12209/head
张一极 2025-03-19 23:48:19 +08:00 committed by GitHub
commit 7b18c95726
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
125 changed files with 659 additions and 630 deletions

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: 🐛 Bug Report
# title: " "

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
blank_issues_enabled: true
contact_links:

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: 🚀 Feature Request
description: Suggest a YOLOv5 idea

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: ❓ Question
description: Ask a YOLOv5 question

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Dependabot for package version updates
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# YOLOv5 Continuous Integration (CI) GitHub Actions tests
name: YOLOv5 CI
@ -137,14 +138,14 @@ jobs:
Summary:
runs-on: ubuntu-latest
needs: [Benchmarks, Tests] # Add job names that you want to check for failure
if: always() # This ensures the job runs even if previous jobs fail
needs: [Benchmarks, Tests]
if: always()
steps:
- name: Check for failure and notify
if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.Benchmarks.result == 'cancelled' || needs.Tests.result == 'cancelled') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push')
uses: slackapi/slack-github-action@v1.26.0
uses: slackapi/slack-github-action@v2.0.0
with:
webhook-type: incoming-webhook
webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }}
payload: |
{"text": "<!channel> GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }}
text: "<!channel> GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"

View File

@ -1,4 +1,5 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Ultralytics Contributor License Agreement (CLA) action https://docs.ultralytics.com/help/CLA
# This workflow automatically requests Pull Requests (PR) authors to sign the Ultralytics CLA before PRs can be merged
@ -26,11 +27,11 @@ jobs:
steps:
- name: CLA Assistant
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target'
uses: contributor-assistant/github-action@v2.5.1
uses: contributor-assistant/github-action@v2.6.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Must be repository secret PAT
PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
PERSONAL_ACCESS_TOKEN: ${{ secrets._GITHUB_TOKEN }}
with:
path-to-signatures: "signatures/version1/cla.json"
path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document

View File

@ -1,56 +0,0 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
# https://github.com/github/codeql-action
name: "CodeQL"
on:
schedule:
- cron: "0 0 1 * *" # Runs at 00:00 UTC on the 1st of every month
workflow_dispatch:
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: ["python"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v3
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5
name: Publish Docker Images

View File

@ -1,4 +1,5 @@
# Ultralytics 🚀 - AGPL-3.0 License https://ultralytics.com/license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Ultralytics Actions https://github.com/ultralytics/actions
# This workflow automatically formats code and documentation in PRs to official Ultralytics standards
@ -7,7 +8,7 @@ name: Ultralytics Actions
on:
issues:
types: [opened]
pull_request_target:
pull_request:
branches: [main, master]
types: [opened, closed, synchronize, review_requested]
@ -18,12 +19,41 @@ jobs:
- name: Run Ultralytics Formatting
uses: ultralytics/actions@main
with:
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated
token: ${{ secrets._GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
labels: true # autolabel issues and PRs
python: true # format Python code and docstrings
prettier: true # format YAML, JSON, Markdown and CSS
spelling: true # check spelling
links: false # check broken links
summary: true # print PR summary with GPT4o (requires 'openai_api_key')
openai_azure_api_key: ${{ secrets.OPENAI_AZURE_API_KEY }}
openai_azure_endpoint: ${{ secrets.OPENAI_AZURE_ENDPOINT }}
openai_api_key: ${{ secrets.OPENAI_API_KEY }}
first_issue_response: |
👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/).
If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it.
If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/).
## Requirements
[**Python>=3.8.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started:
```bash
git clone https://github.com/ultralytics/yolov5 # clone
cd yolov5
pip install -r requirements.txt # install
```
## Environments
YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):
- **Notebooks** with free GPU: <a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a> <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://www.kaggle.com/models/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)
- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)
- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
## Status
<a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI"></a>
If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit.

View File

@ -1,65 +0,0 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
name: Greetings
on:
pull_request_target:
types: [opened]
issues:
types: [opened]
jobs:
greeting:
runs-on: ubuntu-latest
steps:
- uses: actions/first-interaction@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
pr-message: |
👋 Hello @${{ github.actor }}, thank you for submitting a YOLOv5 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to:
- ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally.
- ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**.
- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
issue-message: |
👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/).
If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it.
If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips//).
## Requirements
[**Python>=3.8.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started:
```bash
git clone https://github.com/ultralytics/yolov5 # clone
cd yolov5
pip install -r requirements.txt # install
```
## Environments
YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):
- **Notebooks** with free GPU: <a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a> <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)
- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)
- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
## Status
<a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI"></a>
If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit.
## Introducing YOLOv8 🚀
We're excited to announce the launch of our latest state-of-the-art (SOTA) object detection model for 2023 - [YOLOv8](https://github.com/ultralytics/ultralytics) 🚀!
Designed to be fast, accurate, and easy to use, YOLOv8 is an ideal choice for a wide range of object detection, image segmentation and image classification tasks. With YOLOv8, you'll be able to quickly and accurately detect objects in real-time, streamline your workflows, and achieve new levels of accuracy in your projects.
Check out our [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with:
```bash
pip install ultralytics
```

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Continuous Integration (CI) GitHub Actions tests broken link checker using https://github.com/lycheeverse/lychee
# Ignores the following status codes to reduce false positives:
# - 403(OpenVINO, 'forbidden')
@ -23,17 +24,15 @@ jobs:
- name: Download and install lychee
run: |
LYCHEE_URL=$(curl -s https://api.github.com/repos/lycheeverse/lychee/releases/latest | grep "browser_download_url" | grep "x86_64-unknown-linux-gnu.tar.gz" | cut -d '"' -f 4)
curl -L $LYCHEE_URL -o lychee.tar.gz
tar xzf lychee.tar.gz
sudo mv lychee /usr/local/bin
curl -L $LYCHEE_URL | tar xz -C /usr/local/bin
- name: Test Markdown and HTML links with retry
uses: nick-invision/retry@v3
uses: ultralytics/actions/retry@main
with:
timeout_minutes: 5
retry_wait_seconds: 60
max_attempts: 3
command: |
retry_delay_seconds: 60
retries: 2
run: |
lychee \
--scheme 'https' \
--timeout 60 \
@ -45,16 +44,16 @@ jobs:
--github-token ${{ secrets.GITHUB_TOKEN }} \
--header "User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.183 Safari/537.36" \
'./**/*.md' \
'./**/*.html'
'./**/*.html' | tee -a $GITHUB_STEP_SUMMARY
- name: Test Markdown, HTML, YAML, Python and Notebook links with retry
if: github.event_name == 'workflow_dispatch'
uses: nick-invision/retry@v3
uses: ultralytics/actions/retry@main
with:
timeout_minutes: 5
retry_wait_seconds: 60
max_attempts: 3
command: |
retry_delay_seconds: 60
retries: 2
run: |
lychee \
--scheme 'https' \
--timeout 60 \
@ -70,4 +69,4 @@ jobs:
'./**/*.yml' \
'./**/*.yaml' \
'./**/*.py' \
'./**/*.ipynb'
'./**/*.ipynb' | tee -a $GITHUB_STEP_SUMMARY

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Automatically merges repository 'main' branch into all open PRs to keep them up-to-date
# Action runs on updates to main branch so when one PR merges to main all others update
@ -6,10 +7,9 @@ name: Merge main into PRs
on:
workflow_dispatch:
push:
branches:
- main
- master
# push:
# branches:
# - ${{ github.event.repository.default_branch }}
jobs:
Merge:
@ -22,35 +22,51 @@ jobs:
fetch-depth: 0
- uses: actions/setup-python@v5
with:
python-version: "3.11"
cache: "pip" # caching pip dependencies
python-version: "3.x"
cache: "pip"
- name: Install requirements
run: |
pip install pygithub
- name: Merge main into PRs
- name: Merge default branch into PRs
shell: python
run: |
from github import Github
import os
# Authenticate with the GitHub Token
g = Github(os.getenv('GITHUB_TOKEN'))
# Get the repository dynamically
repo = g.get_repo(os.getenv('GITHUB_REPOSITORY'))
# List all open pull requests
open_pulls = repo.get_pulls(state='open', sort='created')
# Fetch the default branch name
default_branch_name = repo.default_branch
default_branch = repo.get_branch(default_branch_name)
for pr in open_pulls:
# Compare PR head with main to see if it's behind
for pr in repo.get_pulls(state='open', sort='created'):
try:
# Merge main into the PR branch
success = pr.update_branch()
assert success, "Branch update failed"
print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.")
# Get full names for repositories and branches
base_repo_name = repo.full_name
head_repo_name = pr.head.repo.full_name
base_branch_name = pr.base.ref
head_branch_name = pr.head.ref
# Check if PR is behind the default branch
comparison = repo.compare(default_branch.commit.sha, pr.head.sha)
if comparison.behind_by > 0:
print(f"⚠️ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is behind {default_branch_name} by {comparison.behind_by} commit(s).")
# Attempt to update the branch
try:
success = pr.update_branch()
assert success, "Branch update failed"
print(f"✅ Successfully merged '{default_branch_name}' into PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}).")
except Exception as update_error:
print(f"❌ Could not update PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}): {update_error}")
print(" This might be due to branch protection rules or insufficient permissions.")
else:
print(f"✅ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is up to date with {default_branch_name}.")
except Exception as e:
print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}")
print(f"❌ Could not process PR #{pr.number}: {e}")
env:
GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
GITHUB_TOKEN: ${{ secrets._GITHUB_TOKEN }}
GITHUB_REPOSITORY: ${{ github.repository }}

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: Close stale issues
on:

View File

@ -64,10 +64,10 @@ When asking a question, people will be better able to provide help if you provid
- ✅ **Complete** Provide **all** parts someone else needs to reproduce your problem in the question itself
- ✅ **Reproducible** Test the code you're about to provide to make sure it reproduces the problem
In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be:
In addition to the above requirements, for [Ultralytics](https://www.ultralytics.com/) to provide assistance your code should be:
- ✅ **Current** Verify that your code is up-to-date with the current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits.
- ✅ **Unmodified** Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.
- ✅ **Unmodified** Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://www.ultralytics.com/) does not provide support for custom code ⚠️.
If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/) to help us better understand and diagnose your problem.

120
README.md
View File

@ -1,28 +1,28 @@
<div align="center">
<p>
<a href="https://ultralytics.com/events/yolovision" target="_blank">
<a href="https://www.ultralytics.com/events/yolovision" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png"></a>
</p>
[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)
[中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar)
<div>
<a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI"></a>
<a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="YOLOv5 Citation"></a>
<a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
<a href="https://ultralytics.com/discord"><img alt="Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a> <a href="https://community.ultralytics.com"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
<a href="https://discord.com/invite/ultralytics"><img alt="Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a> <a href="https://community.ultralytics.com/"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a> <a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
<br>
<a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a>
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
<a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
<a href="https://www.kaggle.com/models/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
</div>
<br>
YOLOv5 🚀 is the world's most loved vision AI, representing <a href="https://ultralytics.com">Ultralytics</a> open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
YOLOv5 🚀 is the world's most loved vision AI, representing <a href="https://www.ultralytics.com/">Ultralytics</a> open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href="https://docs.ultralytics.com/yolov5">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> for support, and join our <a href="https://ultralytics.com/discord">Discord</a> community for questions and discussions!
We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href="https://docs.ultralytics.com/yolov5/">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> for support, and join our <a href="https://discord.com/invite/ultralytics">Discord</a> community for questions and discussions!
To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).
<div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
@ -37,32 +37,32 @@ To request an Enterprise License please complete the form at [Ultralytics Licens
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="2%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
<a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
</div>
</div>
<br>
## <div align="center">YOLOv8 🚀 NEW</div>
## <div align="center">YOLO11 🚀 NEW</div>
We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks.
We are excited to unveil the launch of Ultralytics YOLO11 🚀, the latest advancement in our state-of-the-art (SOTA) vision models! Available now at **[GitHub](https://github.com/ultralytics/ultralytics)**, YOLO11 builds on our legacy of speed, precision, and ease of use. Whether you're tackling object detection, image segmentation, or image classification, YOLO11 delivers the performance and versatility needed to excel in diverse applications.
See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with:
Get started today and unlock the full potential of YOLO11! Visit the [Ultralytics Docs](https://docs.ultralytics.com/) for comprehensive guides and resources:
[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics)
[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics)
```bash
pip install ultralytics
```
<div align="center">
<a href="https://ultralytics.com/yolov8" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/yolo-comparison-plots.png"></a>
<a href="https://www.ultralytics.com/yolo" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/yolo/performance-comparison.png"></a>
</div>
## <div align="center">Documentation</div>
See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentation on training, testing and deployment. See below for quickstart examples.
See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5/) for full documentation on training, testing and deployment. See below for quickstart examples.
<details open>
<summary>Install</summary>
@ -80,7 +80,7 @@ pip install -r requirements.txt # install
<details>
<summary>Inference</summary>
YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
```python
import torch
@ -123,7 +123,7 @@ python detect.py --weights yolov5s.pt --source 0 #
<details>
<summary>Training</summary>
The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
```bash
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
@ -140,56 +140,58 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
<details open>
<summary>Tutorials</summary>
- [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 RECOMMENDED
- [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) 🚀 RECOMMENDED
- [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/) ☘️
- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training)
- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 NEW
- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀
- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 NEW
- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation)
- [Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling)
- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity)
- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution)
- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers)
- [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 NEW
- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration)
- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 NEW
- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 NEW
- [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 NEW
- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/)
- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) 🌟 NEW
- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export/) 🚀
- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano/) 🌟 NEW
- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/)
- [Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling/)
- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity/)
- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/)
- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers/)
- [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/) 🌟 NEW
- [Ultralytics HUB to train and deploy YOLO](https://www.ultralytics.com/hub) 🚀 RECOMMENDED
- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/)
- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/)
- [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/) 🌟 NEW
</details>
## <div align="center">Integrations</div>
Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/), [Comet](https://bit.ly/yolov8-readme-comet), [Roboflow](https://roboflow.com/?ref=ultralytics) and [OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow.
<br>
<a align="center" href="https://ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png"></a>
<a href="https://www.ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png" alt="Ultralytics active learning integrations"></a>
<br>
<br>
<div align="center">
<a href="https://roboflow.com/?ref=ultralytics">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-roboflow.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
<a href="https://cutt.ly/yolov5-readme-clearml">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-clearml.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
<a href="https://bit.ly/yolov5-readme-comet2">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
<a href="https://www.ultralytics.com/hub">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-ultralytics-hub.png" width="10%" alt="Ultralytics HUB logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://docs.wandb.ai/guides/integrations/ultralytics/">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-wb.png" width="10%" alt="ClearML logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://bit.ly/yolov8-readme-comet">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" alt="Comet ML logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://bit.ly/yolov5-neuralmagic">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" alt="NeuralMagic logo"></a>
</div>
| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW |
| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) |
| Ultralytics HUB 🚀 | W&B | Comet ⭐ NEW | Neural Magic |
| :--------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
| Streamline YOLO workflows: Label, train, and deploy effortlessly with [Ultralytics HUB](https://www.ultralytics.com/hub). Try now! | Track experiments, hyperparameters, and results with [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) |
## <div align="center">Ultralytics HUB</div>
Experience seamless AI with [Ultralytics HUB](https://ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now!
Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now!
<a align="center" href="https://ultralytics.com/hub" target="_blank">
<a align="center" href="https://www.ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png"></a>
## <div align="center">Why YOLOv5</div>
@ -206,7 +208,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We
<summary>Figure Notes</summary>
- **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536.
- **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32.
- **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/) V100 instance at batch-size 32.
- **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8.
- **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
@ -233,8 +235,8 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We
- All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml).
- **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.<br>Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.<br>Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1`
- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) includes reflection and scale augmentations.<br>Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/) instance. NMS times (~1 ms/img) not included.<br>Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1`
- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) includes reflection and scale augmentations.<br>Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
</details>
@ -246,7 +248,7 @@ Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.
<summary>Segmentation Checkpoints</summary>
<div align="center">
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
<a align="center" href="https://www.ultralytics.com/yolo" target="_blank">
<img width="800" src="https://user-images.githubusercontent.com/61612323/204180385-84f3aca9-a5e9-43d8-a617-dda7ca12e54a.png"></a>
</div>
@ -415,7 +417,7 @@ Get started in seconds with our verified environments. Click each icon below for
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-colab-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://www.kaggle.com/ultralytics/yolov5">
<a href="https://www.kaggle.com/models/ultralytics/yolov5">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-kaggle-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://hub.docker.com/r/ultralytics/yolov5">
@ -430,7 +432,7 @@ Get started in seconds with our verified environments. Click each icon below for
## <div align="center">Contribute</div>
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors!
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out the [YOLOv5 Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors!
<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->
@ -441,12 +443,12 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare
Ultralytics offers two licensing options to accommodate diverse use cases:
- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for more details.
- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license).
- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/license) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for more details.
- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://www.ultralytics.com/license).
## <div align="center">Contact</div>
For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://ultralytics.com/discord) community for questions and discussions!
For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.com/invite/ultralytics) community for questions and discussions!
<br>
<div align="center">
@ -462,7 +464,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="3%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
<a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
</div>
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation

View File

@ -1,27 +1,28 @@
<div align="center">
<p>
<a href="https://ultralytics.com/events/yolovision" target="_blank">
<a href="https://www.ultralytics.com/events/yolovision" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png"></a>
</p>
[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)
[中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar)
<div>
<a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI"></a>
<a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="YOLOv5 Citation"></a>
<a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
<a href="https://discord.com/invite/ultralytics"><img alt="Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a> <a href="https://community.ultralytics.com/"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a> <a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
<br>
<a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a>
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
<a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
<a href="https://www.kaggle.com/models/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
</div>
<br>
YOLOv5 🚀 是世界上最受欢迎的视觉 AI代表<a href="https://ultralytics.com"> Ultralytics </a>对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。
YOLOv5 🚀 是世界上最受欢迎的视觉 AI代表<a href="https://www.ultralytics.com/"> Ultralytics </a>对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。
我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 <a href="https://docs.ultralytics.com/yolov5/">文档</a> 了解详细信息,在 <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> 上提交问题以获得支持,并加入我们的 <a href="https://ultralytics.com/discord">Discord</a> 社区进行问题和讨论!
我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 <a href="https://docs.ultralytics.com/yolov5/">文档</a> 了解详细信息,在 <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> 上提交问题以获得支持,并加入我们的 <a href="https://discord.com/invite/ultralytics">Discord</a> 社区进行问题和讨论!
如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格
如需申请企业许可,请在 [Ultralytics Licensing](https://www.ultralytics.com/license) 处填写表格
<div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
@ -36,25 +37,25 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI代表<a href="https://ultral
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="2%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
<a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
</div>
</div>
## <div align="center">YOLOv8 🚀 新品</div>
## <div align="center">YOLO11 🚀 全新发布</div>
我们很高兴宣布 Ultralytics YOLOv8 🚀 的发布这是我们新推出的领先水平、最先进的SOTA模型发布于 **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**。 YOLOv8 旨在快速、准确且易于使用,使其成为广泛的物体检测、图像分割和图像分类任务的极佳选择
我们很高兴宣布推出 Ultralytics YOLO11 🚀,这是我们最先进视觉模型的最新进展!现已在 **[GitHub](https://github.com/ultralytics/ultralytics)** 上发布。YOLO11 在速度、精度和易用性方面进一步提升无论是处理目标检测、图像分割还是图像分类任务YOLO11 都具备出色的性能和多功能性,助您在各种应用中脱颖而出
请查看 [YOLOv8 文档](https://docs.ultralytics.com)了解详细信息,并开始使用
立即开始,解锁 YOLO11 的全部潜力!访问 [Ultralytics 文档](https://docs.ultralytics.com/) 获取全面的指南和资源
[![PyPI 版本](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![下载量](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics)
[![PyPI 版本](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![下载量](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics)
```commandline
```bash
pip install ultralytics
```
<div align="center">
<a href="https://ultralytics.com/yolov8" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/yolo-comparison-plots.png"></a>
<a href="https://www.ultralytics.com/yolo" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/yolo/performance-comparison.png"></a>
</div>
## <div align="center">文档</div>
@ -77,7 +78,7 @@ pip install -r requirements.txt # install
<details>
<summary>推理</summary>
使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。
使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。
```python
import torch
@ -121,7 +122,7 @@ python detect.py --weights yolov5s.pt --source 0 #
<summary>训练</summary>
下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data)
将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) 训练速度更快)。 尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。
将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/) 训练速度更快)。 尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。
```bash
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
@ -138,56 +139,58 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
<details open>
<summary>教程</summary>
- [训练自定义数据](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 推荐
- [获得最佳训练结果的技巧](https://docs.ultralytics.com/guides/model-training-tips/) ☘️
- [多GPU训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training)
- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 新
- [TFLiteONNXCoreMLTensorRT导出](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀
- [NVIDIA Jetson平台部署](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 新
- [测试时增强 (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation)
- [模型集成](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling)
- [模型剪枝/稀疏](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity)
- [超参数进化](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution)
- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers)
- [架构概述](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 新
- [Roboflow用于数据集、标注和主动学习](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration)
- [ClearML日志记录](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 新
- [使用Neural Magic的Deepsparse的YOLOv5](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 新
- [Comet日志记录](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 新
- [自定义数据训练](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) 🚀 **推荐**
- [最佳训练效果的提示](https://docs.ultralytics.com/guides/model-training-tips/) ☘️
- [多GPU训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/)
- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) 🌟 ****
- [TFLite, ONNX, CoreML, TensorRT 导出](https://docs.ultralytics.com/yolov5/tutorials/model_export/) 🚀
- [NVIDIA Jetson 平台部署](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano/) 🌟 ****
- [测试时增强 (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/)
- [模型集成](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling/)
- [模型剪枝/稀疏](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity/)
- [超参数进化](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/)
- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers/)
- [架构概述](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/) 🌟 ****
- [使用 Ultralytics HUB 进行 YOLO 训练和部署](https://www.ultralytics.com/hub) 🚀 **推荐**
- [ClearML 日志记录](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/)
- [与 Neural Magic 的 Deepsparse 集成的 YOLOv5](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/)
- [Comet 日志记录](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/) 🌟 ****
</details>
## <div align="center">模块集成</div>
## <div align="center">集成</div>
我们与领先的 AI 平台的关键集成扩展了 Ultralytics 产品的功能,提升了数据集标注、训练、可视化和模型管理等任务。探索 Ultralytics 如何通过与 [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/)、[Comet](https://bit.ly/yolov8-readme-comet)、[Roboflow](https://roboflow.com/?ref=ultralytics) 和 [OpenVINO](https://docs.ultralytics.com/integrations/openvino/) 的合作,优化您的 AI 工作流程。
<br>
<a align="center" href="https://ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png"></a>
<a href="https://www.ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png" alt="Ultralytics active learning integrations"></a>
<br>
<br>
<div align="center">
<a href="https://roboflow.com/?ref=ultralytics">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-roboflow.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
<a href="https://cutt.ly/yolov5-readme-clearml">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-clearml.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
<a href="https://bit.ly/yolov5-readme-comet2">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
<a href="https://www.ultralytics.com/hub">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-ultralytics-hub.png" width="10%" alt="Ultralytics HUB logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://docs.wandb.ai/guides/integrations/ultralytics/">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-wb.png" width="10%" alt="W&B logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://bit.ly/yolov8-readme-comet">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" alt="Comet ML logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://bit.ly/yolov5-neuralmagic">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" alt="NeuralMagic logo"></a>
</div>
| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic |
| :--------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 |
| Ultralytics HUB 🚀 | W&B | Comet ⭐ | Neural Magic |
| :----------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------: |
| 简化 YOLO 工作流程:通过 [Ultralytics HUB](https://www.ultralytics.com/hub) 轻松标注、训练和部署。立即试用! | 使用 [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) 跟踪实验、超参数和结果 | 永久免费,[Comet](https://bit.ly/yolov5-readme-comet) 允许您保存 YOLO11 模型、恢复训练,并交互式地可视化和调试预测结果 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) 运行 YOLO11 推理,速度提升至 6 倍 |
## <div align="center">Ultralytics HUB</div>
[Ultralytics HUB](https://ultralytics.com/hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他!
[Ultralytics HUB](https://www.ultralytics.com/hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他!
<a align="center" href="https://ultralytics.com/hub" target="_blank">
<a align="center" href="https://www.ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png"></a>
## <div align="center">为什么选择 YOLOv5</div>
@ -204,7 +207,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
<summary>图表笔记</summary>
- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。
- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例batchsize 为 32 。
- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/) V100实例batchsize 为 32 。
- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) batchsize 为32。
- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
@ -231,8 +234,8 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。
- \*\*mAP<sup>val</sup>\*\*在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。<br>复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。<br>复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1`
- **TTA** [测试时数据增强](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) 包括反射和尺度变换。<br>复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。<br>复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1`
- **TTA** [测试时数据增强](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) 包括反射和尺度变换。<br>复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
</details>
@ -246,7 +249,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
<br>
<div align="center">
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
<a align="center" href="https://www.ultralytics.com/yolo" target="_blank">
<img width="800" src="https://user-images.githubusercontent.com/61612323/204180385-84f3aca9-a5e9-43d8-a617-dda7ca12e54a.png"></a>
</div>
@ -414,7 +417,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-colab-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://www.kaggle.com/ultralytics/yolov5">
<a href="https://www.kaggle.com/models/ultralytics/yolov5">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-kaggle-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://hub.docker.com/r/ultralytics/yolov5">
@ -429,7 +432,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
## <div align="center">贡献</div>
我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](https://docs.ultralytics.com/help/contributing/),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者!
我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](https://docs.ultralytics.com/help/contributing/),并填写 [YOLOv5调查](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者!
<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->
@ -440,12 +443,12 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
Ultralytics 提供两种许可证选项以适应各种使用场景:
- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/licenses/)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件以了解更多细节。
- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://ultralytics.com/license)与我们联系。
- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/license)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件以了解更多细节。
- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://www.ultralytics.com/license)与我们联系。
## <div align="center">联系方式</div>
对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://ultralytics.com/discord) 社区进行问题和讨论!
对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.com/invite/ultralytics) 社区进行问题和讨论!
<br>
<div align="center">
@ -461,7 +464,7 @@ Ultralytics 提供两种许可证选项以适应各种使用场景:
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="3%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
<a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
</div>
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Run YOLOv5 benchmarks on all supported export formats.

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
@ -147,7 +147,7 @@ def run(
save_path = str(save_dir / p.name) # im.jpg
txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt
s += "%gx%g " % im.shape[2:] # print string
s += "{:g}x{:g} ".format(*im.shape[2:]) # print string
annotator = Annotator(im0, example=str(names), pil=True)
# Print results
@ -192,7 +192,7 @@ def run(
vid_writer[i].write(im0)
# Print time (inference-only)
LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms")
LOGGER.info(f"{s}{dt[1].dt * 1e3:.1f}ms")
# Print results
t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Train a YOLOv5 classifier model on a classification dataset.
@ -201,10 +201,10 @@ def train(opt, device):
scaler = amp.GradScaler(enabled=cuda)
val = test_dir.stem # 'val' or 'test'
LOGGER.info(
f'Image sizes {imgsz} train, {imgsz} test\n'
f'Using {nw * WORLD_SIZE} dataloader workers\n'
f"Image sizes {imgsz} train, {imgsz} test\n"
f"Using {nw * WORLD_SIZE} dataloader workers\n"
f"Logging results to {colorstr('bold', save_dir)}\n"
f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n'
f"Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n"
f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}"
)
for epoch in range(epochs): # loop over the dataset multiple times
@ -290,13 +290,13 @@ def train(opt, device):
# Train complete
if RANK in {-1, 0} and final_epoch:
LOGGER.info(
f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)'
f"\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)"
f"\nResults saved to {colorstr('bold', save_dir)}"
f'\nPredict: python classify/predict.py --weights {best} --source im.jpg'
f'\nValidate: python classify/val.py --weights {best} --data {data_dir}'
f'\nExport: python export.py --weights {best} --include onnx'
f"\nPredict: python classify/predict.py --weights {best} --source im.jpg"
f"\nValidate: python classify/val.py --weights {best} --data {data_dir}"
f"\nExport: python export.py --weights {best} --include onnx"
f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')"
f'\nVisualize: https://netron.app\n'
f"\nVisualize: https://netron.app\n"
)
# Plot examples

View File

@ -15,7 +15,7 @@
"<br>\n",
" <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a>\n",
" <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/classify/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
" <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
" <a href=\"https://www.kaggle.com/models/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
"<br>\n",
"\n",
"This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>See <a href=\"https://github.com/ultralytics/yolov5/issues/new/choose\">GitHub</a> for community support or <a href=\"https://ultralytics.com/contact\">contact us</a> for professional support.\n",
@ -1410,7 +1410,7 @@
"\n",
"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
"\n",
"- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
"- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/models/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n",
"- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n",
"- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Validate a trained YOLOv5 classification model on a classification dataset.

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
# Example usage: python train.py --data Argoverse.yaml
# parent

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
# Example usage: python train.py --data GlobalWheat2020.yaml
# parent

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University
# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels
# Example usage: python classify/train.py --data imagenet

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University
# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels
# Example usage: python classify/train.py --data imagenet

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University
# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels
# Example usage: python classify/train.py --data imagenet

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University
# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels
# Example usage: python classify/train.py --data imagenet

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Objects365 dataset https://www.objects365.org/ by Megvii
# Example usage: python train.py --data Objects365.yaml
# parent

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
# Example usage: python train.py --data SKU-110K.yaml
# parent

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
# Example usage: python train.py --data VOC.yaml
# parent

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
# Example usage: python train.py --data VisDrone.yaml
# parent

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# COCO 2017 dataset http://cocodataset.org by Microsoft
# Example usage: python train.py --data coco.yaml
# parent

View File

@ -1,5 +1,6 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# COCO128-seg dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
# Example usage: python train.py --data coco128.yaml
# parent
# ├── yolov5

View File

@ -1,5 +1,6 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# COCO128 dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
# Example usage: python train.py --data coco128.yaml
# parent
# ├── yolov5

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Hyperparameters for Objects365 training
# python train.py --weights yolov5m.pt --data Objects365.yaml --evolve
# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Hyperparameters for VOC training
# python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve
# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Hyperparameters when using Albumentations frameworks
# python train.py --hyp hyp.no-augmentation.yaml
# See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Hyperparameters for high-augmentation COCO training from scratch
# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Hyperparameters for low-augmentation COCO training from scratch
# python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Hyperparameters for medium-augmentation COCO training from scratch
# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)
# -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! --------
# Example usage: python train.py --data xView.yaml

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
@ -219,9 +219,10 @@ def run(
def write_to_csv(image_name, prediction, confidence):
"""Writes prediction data for an image to a CSV file, appending if the file exists."""
data = {"Image Name": image_name, "Prediction": prediction, "Confidence": confidence}
file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a", newline="") as f:
writer = csv.DictWriter(f, fieldnames=data.keys())
if not csv_path.is_file():
if not file_exists:
writer.writeheader()
writer.writerow(data)
@ -237,7 +238,7 @@ def run(
p = Path(p) # to Path
save_path = str(save_dir / p.name) # im.jpg
txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt
s += "%gx%g " % im.shape[2:] # print string
s += "{:g}x{:g} ".format(*im.shape[2:]) # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
@ -308,7 +309,7 @@ def run(
vid_writer[i].write(im0)
# Print time (inference-only)
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms")
# Print results
t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image

View File

@ -1,6 +1,6 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.
Format | `export.py --include` | Model
--- | --- | ---
@ -91,6 +91,8 @@ MACOS = platform.system() == "Darwin" # macOS environment
class iOSModel(torch.nn.Module):
"""An iOS-compatible wrapper for YOLOv5 models that normalizes input images based on their dimensions."""
def __init__(self, model, im):
"""
Initializes an iOS compatible model with normalization based on image dimensions.
@ -141,7 +143,7 @@ class iOSModel(torch.nn.Module):
def export_formats():
"""
r"""
Returns a DataFrame of supported YOLOv5 model export formats and their properties.
Returns:
@ -450,8 +452,9 @@ def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:
Extracts and preprocess input data from dataloader item for quantization.
Parameters:
data_item: Tuple with data item produced by DataLoader during iteration
Returns:
input_tensor: Input data for quantization
"""
@ -563,11 +566,7 @@ def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr("Co
else:
f = file.with_suffix(".mlpackage")
convert_to = "mlprogram"
if half:
precision = ct.precision.FLOAT16
else:
precision = ct.precision.FLOAT32
precision = ct.precision.FLOAT16 if half else ct.precision.FLOAT32
if nms:
model = iOSModel(model, im)
ts = torch.jit.trace(model, im, strict=False) # TorchScript model
@ -594,7 +593,9 @@ def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr("Co
@try_export
def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr("TensorRT:")):
def export_engine(
model, im, file, half, dynamic, simplify, workspace=4, verbose=False, cache="", prefix=colorstr("TensorRT:")
):
"""
Export a YOLOv5 model to TensorRT engine format, requiring GPU and TensorRT>=7.0.0.
@ -607,6 +608,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose
simplify (bool): Set to True to simplify the model during export.
workspace (int): Workspace size in GB (default is 4).
verbose (bool): Set to True for verbose logging output.
cache (str): Path to save the TensorRT timing cache.
prefix (str): Log message prefix.
Returns:
@ -661,6 +663,11 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30)
else: # TensorRT versions 7, 8
config.max_workspace_size = workspace * 1 << 30
if cache: # enable timing cache
Path(cache).parent.mkdir(parents=True, exist_ok=True)
buf = Path(cache).read_bytes() if Path(cache).exists() else b""
timing_cache = config.create_timing_cache(buf)
config.set_timing_cache(timing_cache, ignore_mismatch=True)
flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(flag)
parser = trt.OnnxParser(network, logger)
@ -689,6 +696,9 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose
build = builder.build_serialized_network if is_trt10 else builder.build_engine
with build(network, config) as engine, open(f, "wb") as t:
t.write(engine if is_trt10 else engine.serialize())
if cache: # save timing cache
with open(cache, "wb") as c:
c.write(config.get_timing_cache().serialize())
return f, None
@ -1135,11 +1145,7 @@ def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr("CoreML
import coremltools as ct
from PIL import Image
if mlmodel:
f = file.with_suffix(".mlmodel") # filename
else:
f = file.with_suffix(".mlpackage") # filename
f = file.with_suffix(".mlmodel") if mlmodel else file.with_suffix(".mlpackage")
print(f"{prefix} starting pipeline with coremltools {ct.__version__}...")
batch_size, ch, h, w = list(im.shape) # BCHW
t = time.time()
@ -1183,10 +1189,7 @@ def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr("CoreML
# Model from spec
weights_dir = None
if mlmodel:
weights_dir = None
else:
weights_dir = str(f / "Data/com.apple.CoreML/weights")
weights_dir = None if mlmodel else str(f / "Data/com.apple.CoreML/weights")
model = ct.models.MLModel(spec, weights_dir=weights_dir)
# 3. Create NMS protobuf
@ -1285,6 +1288,7 @@ def run(
int8=False, # CoreML/TF INT8 quantization
per_tensor=False, # TF per tensor quantization
dynamic=False, # ONNX/TF/TensorRT: dynamic axes
cache="", # TensorRT: timing cache path
simplify=False, # ONNX: simplify model
mlmodel=False, # CoreML: Export in *.mlmodel format
opset=12, # ONNX: opset version
@ -1314,6 +1318,7 @@ def run(
int8 (bool): Apply INT8 quantization for CoreML or TensorFlow models. Default is False.
per_tensor (bool): Apply per tensor quantization for TensorFlow models. Default is False.
dynamic (bool): Enable dynamic axes for ONNX, TensorFlow, or TensorRT exports. Default is False.
cache (str): TensorRT timing cache path. Default is an empty string.
simplify (bool): Simplify the ONNX model during export. Default is False.
opset (int): ONNX opset version. Default is 12.
verbose (bool): Enable verbose logging for TensorRT export. Default is False.
@ -1349,6 +1354,7 @@ def run(
int8=False,
per_tensor=False,
dynamic=False,
cache="",
simplify=False,
opset=12,
verbose=False,
@ -1386,7 +1392,8 @@ def run(
# Input
gs = int(max(model.stride)) # grid size (max stride)
imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
ch = next(model.parameters()).size(1) # require input image channels
im = torch.zeros(batch_size, ch, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
# Update model
model.eval()
@ -1410,7 +1417,7 @@ def run(
if jit: # TorchScript
f[0], _ = export_torchscript(model, im, file, optimize)
if engine: # TensorRT required before ONNX
f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose)
f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose, cache)
if onnx or xml: # OpenVINO requires ONNX
f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
if xml: # OpenVINO
@ -1464,12 +1471,12 @@ def run(
else ""
)
LOGGER.info(
f'\nExport complete ({time.time() - t:.1f}s)'
f"\nExport complete ({time.time() - t:.1f}s)"
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}"
f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}"
f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}"
f'\nVisualize: https://netron.app'
f"\nVisualize: https://netron.app"
)
return f # return list of exported files/dirs
@ -1505,6 +1512,7 @@ def parse_opt(known=False):
parser.add_argument("--int8", action="store_true", help="CoreML/TF/OpenVINO INT8 quantization")
parser.add_argument("--per-tensor", action="store_true", help="TF per-tensor quantization")
parser.add_argument("--dynamic", action="store_true", help="ONNX/TF/TensorRT: dynamic axes")
parser.add_argument("--cache", type=str, default="", help="TensorRT: timing cache file path")
parser.add_argument("--simplify", action="store_true", help="ONNX: simplify model")
parser.add_argument("--mlmodel", action="store_true", help="CoreML: Export in *.mlmodel format")
parser.add_argument("--opset", type=int, default=17, help="ONNX: opset version")

View File

@ -1,6 +1,6 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5
PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5.
Usage:
import torch

View File

@ -0,0 +1 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""Common modules."""
import ast
@ -71,7 +71,8 @@ def autopad(k, p=None, d=1):
class Conv(nn.Module):
# Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
"""Applies a convolution, batch normalization, and activation function to an input tensor in a neural network."""
default_act = nn.SiLU() # default activation
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
@ -91,7 +92,8 @@ class Conv(nn.Module):
class DWConv(Conv):
# Depth-wise convolution
"""Implements a depth-wise convolution layer with optional activation for efficient spatial filtering."""
def __init__(self, c1, c2, k=1, s=1, d=1, act=True):
"""Initializes a depth-wise convolution layer with optional activation; args: input channels (c1), output
channels (c2), kernel size (k), stride (s), dilation (d), and activation flag (act).
@ -100,7 +102,8 @@ class DWConv(Conv):
class DWConvTranspose2d(nn.ConvTranspose2d):
# Depth-wise transpose convolution
"""A depth-wise transpose convolutional layer for upsampling in neural networks, particularly in YOLOv5 models."""
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0):
"""Initializes a depth-wise transpose convolutional layer for YOLOv5; args: input channels (c1), output channels
(c2), kernel size (k), stride (s), input padding (p1), output padding (p2).
@ -109,7 +112,8 @@ class DWConvTranspose2d(nn.ConvTranspose2d):
class TransformerLayer(nn.Module):
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
"""Transformer layer with multihead attention and linear layers, optimized by removing LayerNorm."""
def __init__(self, c, num_heads):
"""
Initializes a transformer layer, sans LayerNorm for performance, with multihead attention and linear layers.
@ -132,7 +136,8 @@ class TransformerLayer(nn.Module):
class TransformerBlock(nn.Module):
# Vision Transformer https://arxiv.org/abs/2010.11929
"""A Transformer block for vision tasks with convolution, position embeddings, and Transformer layers."""
def __init__(self, c1, c2, num_heads, num_layers):
"""Initializes a Transformer block for vision tasks, adapting dimensions if necessary and stacking specified
layers.
@ -157,7 +162,8 @@ class TransformerBlock(nn.Module):
class Bottleneck(nn.Module):
# Standard bottleneck
"""A bottleneck layer with optional shortcut and group convolution for efficient feature extraction."""
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):
"""Initializes a standard bottleneck layer with optional shortcut and group convolution, supporting channel
expansion.
@ -176,7 +182,8 @@ class Bottleneck(nn.Module):
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
"""CSP bottleneck layer for feature extraction with cross-stage partial connections and optional shortcuts."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
"""Initializes CSP bottleneck with optional shortcuts; args: ch_in, ch_out, number of repeats, shortcut bool,
groups, expansion.
@ -201,7 +208,8 @@ class BottleneckCSP(nn.Module):
class CrossConv(nn.Module):
# Cross Convolution Downsample
"""Implements a cross convolution layer with downsampling, expansion, and optional shortcut."""
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
"""
Initializes CrossConv with downsampling, expanding, and optionally shortcutting; `c1` input, `c2` output
@ -221,7 +229,8 @@ class CrossConv(nn.Module):
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
"""Implements a CSP Bottleneck module with three convolutions for enhanced feature extraction in neural networks."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
"""Initializes C3 module with options for channel count, bottleneck repetition, shortcut usage, group
convolutions, and expansion.
@ -239,7 +248,8 @@ class C3(nn.Module):
class C3x(C3):
# C3 module with cross-convolutions
"""Extends the C3 module with cross-convolutions for enhanced feature extraction in neural networks."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
"""Initializes C3x module with cross-convolutions, extending C3 with customizable channel dimensions, groups,
and expansion.
@ -250,7 +260,8 @@ class C3x(C3):
class C3TR(C3):
# C3 module with TransformerBlock()
"""C3 module with TransformerBlock for enhanced feature extraction in object detection models."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
"""Initializes C3 module with TransformerBlock for enhanced feature extraction, accepts channel sizes, shortcut
config, group, and expansion.
@ -261,7 +272,8 @@ class C3TR(C3):
class C3SPP(C3):
# C3 module with SPP()
"""Extends the C3 module with an SPP layer for enhanced spatial feature extraction and customizable channels."""
def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
"""Initializes a C3 module with SPP layer for advanced spatial feature extraction, given channel sizes, kernel
sizes, shortcut, group, and expansion ratio.
@ -272,7 +284,8 @@ class C3SPP(C3):
class C3Ghost(C3):
# C3 module with GhostBottleneck()
"""Implements a C3 module with Ghost Bottlenecks for efficient feature extraction in YOLOv5."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
"""Initializes YOLOv5's C3 module with Ghost Bottlenecks for efficient feature extraction."""
super().__init__(c1, c2, n, shortcut, g, e)
@ -281,7 +294,8 @@ class C3Ghost(C3):
class SPP(nn.Module):
# Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
"""Implements Spatial Pyramid Pooling (SPP) for feature extraction, ref: https://arxiv.org/abs/1406.4729."""
def __init__(self, c1, c2, k=(5, 9, 13)):
"""Initializes SPP layer with Spatial Pyramid Pooling, ref: https://arxiv.org/abs/1406.4729, args: c1 (input channels), c2 (output channels), k (kernel sizes)."""
super().__init__()
@ -301,7 +315,8 @@ class SPP(nn.Module):
class SPPF(nn.Module):
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
"""Implements a fast Spatial Pyramid Pooling (SPPF) layer for efficient feature extraction in YOLOv5 models."""
def __init__(self, c1, c2, k=5):
"""
Initializes YOLOv5 SPPF layer with given channels and kernel size for YOLOv5 model, combining convolution and
@ -326,7 +341,8 @@ class SPPF(nn.Module):
class Focus(nn.Module):
# Focus wh information into c-space
"""Focuses spatial information into channel space using slicing and convolution for efficient feature extraction."""
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
"""Initializes Focus module to concentrate width-height info into channel space with configurable convolution
parameters.
@ -342,7 +358,8 @@ class Focus(nn.Module):
class GhostConv(nn.Module):
# Ghost Convolution https://github.com/huawei-noah/ghostnet
"""Implements Ghost Convolution for efficient feature extraction, see https://github.com/huawei-noah/ghostnet."""
def __init__(self, c1, c2, k=1, s=1, g=1, act=True):
"""Initializes GhostConv with in/out channels, kernel size, stride, groups, and activation; halves out channels
for efficiency.
@ -359,7 +376,8 @@ class GhostConv(nn.Module):
class GhostBottleneck(nn.Module):
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
"""Efficient bottleneck layer using Ghost Convolutions, see https://github.com/huawei-noah/ghostnet."""
def __init__(self, c1, c2, k=3, s=1):
"""Initializes GhostBottleneck with ch_in `c1`, ch_out `c2`, kernel size `k`, stride `s`; see https://github.com/huawei-noah/ghostnet."""
super().__init__()
@ -379,7 +397,8 @@ class GhostBottleneck(nn.Module):
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
"""Contracts spatial dimensions into channel dimensions for efficient processing in neural networks."""
def __init__(self, gain=2):
"""Initializes a layer to contract spatial dimensions (width-height) into channels, e.g., input shape
(1,64,80,80) to (1,256,40,40).
@ -399,7 +418,8 @@ class Contract(nn.Module):
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
"""Expands spatial dimensions by redistributing channels, e.g., from (1,64,80,80) to (1,16,160,160)."""
def __init__(self, gain=2):
"""
Initializes the Expand module to increase spatial dimensions by redistributing channels, with an optional gain
@ -422,7 +442,8 @@ class Expand(nn.Module):
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
"""Concatenates tensors along a specified dimension for efficient tensor manipulation in neural networks."""
def __init__(self, dimension=1):
"""Initializes a Concat module to concatenate tensors along a specified dimension."""
super().__init__()
@ -436,7 +457,8 @@ class Concat(nn.Module):
class DetectMultiBackend(nn.Module):
# YOLOv5 MultiBackend class for python inference on various backends
"""YOLOv5 MultiBackend class for inference on various backends including PyTorch, ONNX, TensorRT, and more."""
def __init__(self, weights="yolov5s.pt", device=torch.device("cpu"), dnn=False, data=None, fp16=False, fuse=True):
"""Initializes DetectMultiBackend with support for various inference backends, including PyTorch and ONNX."""
# PyTorch: weights = *.pt
@ -728,6 +750,8 @@ class DetectMultiBackend(nn.Module):
scale, zero_point = output["quantization"]
x = (x.astype(np.float32) - zero_point) * scale # re-scale
y.append(x)
if len(y) == 2 and len(y[1].shape) != 4:
y = list(reversed(y))
y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
@ -778,7 +802,8 @@ class DetectMultiBackend(nn.Module):
class AutoShape(nn.Module):
# YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
"""AutoShape class for robust YOLOv5 inference with preprocessing, NMS, and support for various input formats."""
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
agnostic = False # NMS class-agnostic
@ -889,7 +914,8 @@ class AutoShape(nn.Module):
class Detections:
# YOLOv5 detections class for inference results
"""Manages YOLOv5 detection results with methods for visualization, saving, cropping, and exporting detections."""
def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
"""Initializes the YOLOv5 Detections class with image info, predictions, filenames, timing and normalization."""
super().__init__()
@ -1047,7 +1073,8 @@ class Detections:
class Proto(nn.Module):
# YOLOv5 mask Proto module for segmentation models
"""YOLOv5 mask Proto module for segmentation models, performing convolutions and upsampling on input tensors."""
def __init__(self, c1, c_=256, c2=32):
"""Initializes YOLOv5 Proto module for segmentation with input, proto, and mask channels configuration."""
super().__init__()
@ -1062,7 +1089,8 @@ class Proto(nn.Module):
class Classify(nn.Module):
# YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
"""YOLOv5 classification head with convolution, pooling, and dropout layers for channel transformation."""
def __init__(
self, c1, c2, k=1, s=1, p=None, g=1, dropout_p=0.0
): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""Experimental modules."""
import math

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Default anchors for COCO data
# P5 -------------------------------------------------------------------------------------------------------------------

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,7 +1,7 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
TensorFlow, Keras and TFLite versions of YOLOv5
Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127.
Usage:
$ python models/tf.py --weights yolov5s.pt
@ -49,7 +49,8 @@ from utils.general import LOGGER, make_divisible, print_args
class TFBN(keras.layers.Layer):
# TensorFlow BatchNormalization wrapper
"""TensorFlow BatchNormalization wrapper for initializing with optional pretrained weights."""
def __init__(self, w=None):
"""Initializes a TensorFlow BatchNormalization layer with optional pretrained weights."""
super().__init__()
@ -67,7 +68,8 @@ class TFBN(keras.layers.Layer):
class TFPad(keras.layers.Layer):
# Pad inputs in spatial dimensions 1 and 2
"""Pads input tensors in spatial dimensions 1 and 2 with specified integer or tuple padding values."""
def __init__(self, pad):
"""
Initializes a padding layer for spatial dimensions 1 and 2 with specified padding, supporting both int and tuple
@ -87,7 +89,8 @@ class TFPad(keras.layers.Layer):
class TFConv(keras.layers.Layer):
# Standard convolution
"""Implements a standard convolutional layer with optional batch normalization and activation for TensorFlow."""
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
"""
Initializes a standard convolution layer with optional batch normalization and activation; supports only
@ -118,7 +121,8 @@ class TFConv(keras.layers.Layer):
class TFDWConv(keras.layers.Layer):
# Depthwise convolution
"""Initializes a depthwise convolution layer with optional batch normalization and activation for TensorFlow."""
def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
"""
Initializes a depthwise convolution layer with optional batch normalization and activation for TensorFlow
@ -147,7 +151,8 @@ class TFDWConv(keras.layers.Layer):
class TFDWConvTranspose2d(keras.layers.Layer):
# Depthwise ConvTranspose2d
"""Implements a depthwise ConvTranspose2D layer for TensorFlow with specific settings."""
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
"""
Initializes depthwise ConvTranspose2D layer with specific channel, kernel, stride, and padding settings.
@ -179,7 +184,8 @@ class TFDWConvTranspose2d(keras.layers.Layer):
class TFFocus(keras.layers.Layer):
# Focus wh information into c-space
"""Focuses spatial information into channel space using pixel shuffling and convolution for TensorFlow models."""
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
"""
Initializes TFFocus layer to focus width and height information into channel space with custom convolution
@ -201,7 +207,8 @@ class TFFocus(keras.layers.Layer):
class TFBottleneck(keras.layers.Layer):
# Standard bottleneck
"""Implements a TensorFlow bottleneck layer with optional shortcut connections for efficient feature extraction."""
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None):
"""
Initializes a standard bottleneck layer for TensorFlow models, expanding and contracting channels with optional
@ -223,7 +230,8 @@ class TFBottleneck(keras.layers.Layer):
class TFCrossConv(keras.layers.Layer):
# Cross Convolution
"""Implements a cross convolutional layer with optional expansion, grouping, and shortcut for TensorFlow."""
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
"""Initializes cross convolution layer with optional expansion, grouping, and shortcut addition capabilities."""
super().__init__()
@ -238,7 +246,8 @@ class TFCrossConv(keras.layers.Layer):
class TFConv2d(keras.layers.Layer):
# Substitution for PyTorch nn.Conv2D
"""Implements a TensorFlow 2D convolution layer, mimicking PyTorch's nn.Conv2D for specified filters and stride."""
def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
"""Initializes a TensorFlow 2D convolution layer, mimicking PyTorch's nn.Conv2D functionality for given filter
sizes and stride.
@ -261,7 +270,8 @@ class TFConv2d(keras.layers.Layer):
class TFBottleneckCSP(keras.layers.Layer):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
"""Implements a CSP bottleneck layer for TensorFlow models to enhance gradient flow and efficiency."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
"""
Initializes CSP bottleneck layer with specified channel sizes, count, shortcut option, groups, and expansion
@ -289,7 +299,8 @@ class TFBottleneckCSP(keras.layers.Layer):
class TFC3(keras.layers.Layer):
# CSP Bottleneck with 3 convolutions
"""CSP bottleneck layer with 3 convolutions for TensorFlow, supporting optional shortcuts and group convolutions."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
"""
Initializes CSP Bottleneck with 3 convolutions, supporting optional shortcuts and group convolutions.
@ -313,7 +324,8 @@ class TFC3(keras.layers.Layer):
class TFC3x(keras.layers.Layer):
# 3 module with cross-convolutions
"""A TensorFlow layer for enhanced feature extraction using cross-convolutions in object detection models."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
"""
Initializes layer with cross-convolutions for enhanced feature extraction in object detection models.
@ -335,7 +347,8 @@ class TFC3x(keras.layers.Layer):
class TFSPP(keras.layers.Layer):
# Spatial pyramid pooling layer used in YOLOv3-SPP
"""Implements spatial pyramid pooling for YOLOv3-SPP with specific channels and kernel sizes."""
def __init__(self, c1, c2, k=(5, 9, 13), w=None):
"""Initializes a YOLOv3-SPP layer with specific input/output channels and kernel sizes for pooling."""
super().__init__()
@ -351,7 +364,8 @@ class TFSPP(keras.layers.Layer):
class TFSPPF(keras.layers.Layer):
# Spatial pyramid pooling-Fast layer
"""Implements a fast spatial pyramid pooling layer for TensorFlow with optimized feature extraction."""
def __init__(self, c1, c2, k=5, w=None):
"""Initializes a fast spatial pyramid pooling layer with customizable in/out channels, kernel size, and
weights.
@ -373,7 +387,8 @@ class TFSPPF(keras.layers.Layer):
class TFDetect(keras.layers.Layer):
# TF YOLOv5 Detect layer
"""Implements YOLOv5 object detection layer in TensorFlow for predicting bounding boxes and class probabilities."""
def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None):
"""Initializes YOLOv5 detection layer for TensorFlow with configurable classes, anchors, channels, and image
size.
@ -427,7 +442,8 @@ class TFDetect(keras.layers.Layer):
class TFSegment(TFDetect):
# YOLOv5 Segment head for segmentation models
"""YOLOv5 segmentation head for TensorFlow, combining detection and segmentation."""
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None):
"""Initializes YOLOv5 Segment head with specified channel depths, anchors, and input size for segmentation
models.
@ -450,6 +466,8 @@ class TFSegment(TFDetect):
class TFProto(keras.layers.Layer):
"""Implements convolutional and upsampling layers for feature extraction in YOLOv5 segmentation."""
def __init__(self, c1, c_=256, c2=32, w=None):
"""Initializes TFProto layer with convolutional and upsampling layers for feature extraction and
transformation.
@ -466,7 +484,8 @@ class TFProto(keras.layers.Layer):
class TFUpsample(keras.layers.Layer):
# TF version of torch.nn.Upsample()
"""Implements a TensorFlow upsampling layer with specified size, scale factor, and interpolation mode."""
def __init__(self, size, scale_factor, mode, w=None):
"""
Initializes a TensorFlow upsampling layer with specified size, scale_factor, and mode, ensuring scale_factor is
@ -488,7 +507,8 @@ class TFUpsample(keras.layers.Layer):
class TFConcat(keras.layers.Layer):
# TF version of torch.concat()
"""Implements TensorFlow's version of torch.concat() for concatenating tensors along the last dimension."""
def __init__(self, dimension=1, w=None):
"""Initializes a TensorFlow layer for NCHW to NHWC concatenation, requiring dimension=1."""
super().__init__()
@ -581,7 +601,8 @@ def parse_model(d, ch, model, imgsz):
class TFModel:
# TF YOLOv5 model
"""Implements YOLOv5 model in TensorFlow, supporting TensorFlow, Keras, and TFLite formats for object detection."""
def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, model=None, imgsz=(640, 640)):
"""Initializes TF YOLOv5 model with specified configuration, channels, classes, model instance, and input
size.
@ -653,7 +674,8 @@ class TFModel:
class AgnosticNMS(keras.layers.Layer):
# TF Agnostic NMS
"""Performs agnostic non-maximum suppression (NMS) on detected objects using IoU and confidence thresholds."""
def call(self, input, topk_all, iou_thres, conf_thres):
"""Performs agnostic NMS on input tensors using given thresholds and top-K selection."""
return tf.map_fn(

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
YOLO-specific modules.
@ -70,7 +70,8 @@ except ImportError:
class Detect(nn.Module):
# YOLOv5 Detect head for detection models
"""YOLOv5 Detect head for processing input tensors and generating detection outputs in object detection models."""
stride = None # strides computed during build
dynamic = False # force grid reconstruction
export = False # export mode
@ -127,7 +128,8 @@ class Detect(nn.Module):
class Segment(Detect):
# YOLOv5 Segment head for segmentation models
"""YOLOv5 Segment head for segmentation models, extending Detect with mask and prototype layers."""
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
"""Initializes YOLOv5 Segment head with options for mask count, protos, and channel adjustments."""
super().__init__(nc, anchors, ch, inplace)
@ -214,7 +216,8 @@ class BaseModel(nn.Module):
class DetectionModel(BaseModel):
# YOLOv5 detection model
"""YOLOv5 detection model class for object detection tasks, supporting custom configurations and anchors."""
def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, anchors=None):
"""Initializes YOLOv5 model with configuration file, input channels, number of classes, and custom anchors."""
super().__init__()
@ -332,14 +335,16 @@ Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibilit
class SegmentationModel(DetectionModel):
# YOLOv5 segmentation model
"""YOLOv5 segmentation model for object detection and segmentation tasks with configurable parameters."""
def __init__(self, cfg="yolov5s-seg.yaml", ch=3, nc=None, anchors=None):
"""Initializes a YOLOv5 segmentation model with configurable params: cfg (str) for configuration, ch (int) for channels, nc (int) for num classes, anchors (list)."""
super().__init__(cfg, ch, nc, anchors)
class ClassificationModel(BaseModel):
# YOLOv5 classification model
"""YOLOv5 classification model for image classification tasks, initialized with a config file or detection model."""
def __init__(self, cfg=None, model=None, nc=1000, cutoff=10):
"""Initializes YOLOv5 model with config file `cfg`, input channels `ch`, number of classes `nc`, and `cuttoff`
index.

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Parameters
nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Overview:
# This pyproject.toml file manages the build, packaging, and distribution of the Ultralytics library.

View File

@ -9,12 +9,12 @@ opencv-python>=4.1.1
pillow>=10.3.0
psutil # system resources
PyYAML>=5.3.1
requests>=2.32.0
requests>=2.32.2
scipy>=1.4.1
thop>=0.1.1 # FLOPs computation
torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended)
torchvision>=0.9.0
tqdm>=4.64.0
tqdm>=4.66.3
ultralytics>=8.2.34 # https://ultralytics.com
# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Run YOLOv5 segmentation inference on images, videos, directories, streams, etc.
@ -164,7 +164,7 @@ def run(
p = Path(p) # to Path
save_path = str(save_dir / p.name) # im.jpg
txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt
s += "%gx%g " % im.shape[2:] # print string
s += "{:g}x{:g} ".format(*im.shape[2:]) # print string
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
if len(det):
@ -245,7 +245,7 @@ def run(
vid_writer[i].write(im0)
# Print time (inference-only)
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms")
# Print results
t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Train a YOLOv5 segment model on a segment dataset Models and datasets download automatically from the latest YOLOv5
release.
@ -325,10 +325,10 @@ def train(hyp, opt, device, callbacks):
compute_loss = ComputeLoss(model, overlap=overlap) # init loss class
# callbacks.run('on_train_start')
LOGGER.info(
f'Image sizes {imgsz} train, {imgsz} val\n'
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
f"Image sizes {imgsz} train, {imgsz} val\n"
f"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n"
f"Logging results to {colorstr('bold', save_dir)}\n"
f'Starting training for {epochs} epochs...'
f"Starting training for {epochs} epochs..."
)
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
# callbacks.run('on_train_epoch_start')
@ -405,7 +405,7 @@ def train(hyp, opt, device, callbacks):
# Log
if RANK in {-1, 0}:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
mem = f"{torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
pbar.set_description(
("%11s" * 2 + "%11.4g" * 6)
% (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1])
@ -740,9 +740,9 @@ def main(opt, callbacks=Callbacks()):
# Plot results
plot_evolve(evolve_csv)
LOGGER.info(
f'Hyperparameter evolution finished {opt.evolve} generations\n'
f"Hyperparameter evolution finished {opt.evolve} generations\n"
f"Results saved to {colorstr('bold', save_dir)}\n"
f'Usage example: $ python train.py --hyp {evolve_yaml}'
f"Usage example: $ python train.py --hyp {evolve_yaml}"
)

View File

@ -15,7 +15,7 @@
"<br>\n",
" <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a>\n",
" <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
" <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
" <a href=\"https://www.kaggle.com/models/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
"<br>\n",
"\n",
"This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>See <a href=\"https://github.com/ultralytics/yolov5/issues/new/choose\">GitHub</a> for community support or <a href=\"https://ultralytics.com/contact\">contact us</a> for professional support.\n",
@ -222,7 +222,7 @@
"Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n",
"<br><br>\n",
"\n",
"Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n",
"Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/datasets/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n",
"\n",
"- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n",
"automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n",
@ -523,7 +523,7 @@
"\n",
"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
"\n",
"- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
"- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/models/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n",
"- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n",
"- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Validate a trained YOLOv5 segment model on a segment dataset.
@ -121,7 +121,7 @@ def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, over
detections (array[N, 6]), x1, y1, x2, y2, conf, class
labels (array[M, 5]), class, x1, y1, x2, y2
Returns:
correct (array[N, 10]), for 10 IoU levels
correct (array[N, 10]), for 10 IoU levels.
"""
if masks:
if overlap:

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release.
@ -357,10 +357,10 @@ def train(hyp, opt, device, callbacks):
compute_loss = ComputeLoss(model) # init loss class
callbacks.run("on_train_start")
LOGGER.info(
f'Image sizes {imgsz} train, {imgsz} val\n'
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
f"Image sizes {imgsz} train, {imgsz} val\n"
f"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n"
f"Logging results to {colorstr('bold', save_dir)}\n"
f'Starting training for {epochs} epochs...'
f"Starting training for {epochs} epochs..."
)
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
callbacks.run("on_train_epoch_start")
@ -434,7 +434,7 @@ def train(hyp, opt, device, callbacks):
# Log
if RANK in {-1, 0}:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
mem = f"{torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
pbar.set_description(
("%11s" * 2 + "%11.4g" * 5)
% (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1])
@ -717,10 +717,10 @@ def main(opt, callbacks=Callbacks()):
"perspective": (True, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
"flipud": (True, 0.0, 1.0), # image flip up-down (probability)
"fliplr": (True, 0.0, 1.0), # image flip left-right (probability)
"mosaic": (True, 0.0, 1.0), # image mixup (probability)
"mosaic": (True, 0.0, 1.0), # image mosaic (probability)
"mixup": (True, 0.0, 1.0), # image mixup (probability)
"copy_paste": (True, 0.0, 1.0),
} # segment copy-paste (probability)
"copy_paste": (True, 0.0, 1.0), # segment copy-paste (probability)
}
# GA configs
pop_size = 50
@ -880,9 +880,9 @@ def main(opt, callbacks=Callbacks()):
# Plot results
plot_evolve(evolve_csv)
LOGGER.info(
f'Hyperparameter evolution finished {opt.evolve} generations\n'
f"Hyperparameter evolution finished {opt.evolve} generations\n"
f"Results saved to {colorstr('bold', save_dir)}\n"
f'Usage example: $ python train.py --hyp {evolve_yaml}'
f"Usage example: $ python train.py --hyp {evolve_yaml}"
)

6
tutorial.ipynb vendored
View File

@ -28,7 +28,7 @@
"\n",
" <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a>\n",
" <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
" <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
" <a href=\"https://www.kaggle.com/models/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
"\n",
"This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href=\"https://docs.ultralytics.com/yolov5\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/yolov5\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
"\n",
@ -257,7 +257,7 @@
"Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n",
"<br><br>\n",
"\n",
"Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n",
"Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/datasets/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n",
"\n",
"- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n",
"automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n",
@ -553,7 +553,7 @@
"\n",
"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
"\n",
"- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
"- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/models/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n",
"- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n",
"- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""utils/initialization."""
import contextlib
@ -12,7 +12,8 @@ def emojis(str=""):
class TryExcept(contextlib.ContextDecorator):
# YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
"""A context manager and decorator for error handling that prints an optional message with emojis on exception."""
def __init__(self, msg=""):
"""Initializes TryExcept with an optional message, used as a decorator or context manager for error handling."""
self.msg = msg

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""Activation functions."""
import torch
@ -7,6 +7,8 @@ import torch.nn.functional as F
class SiLU(nn.Module):
"""Applies the Sigmoid-weighted Linear Unit (SiLU) activation function, also known as Swish."""
@staticmethod
def forward(x):
"""
@ -18,6 +20,8 @@ class SiLU(nn.Module):
class Hardswish(nn.Module):
"""Applies the Hardswish activation function, which is efficient for mobile and embedded devices."""
@staticmethod
def forward(x):
"""
@ -38,7 +42,11 @@ class Mish(nn.Module):
class MemoryEfficientMish(nn.Module):
"""Efficiently applies the Mish activation function using custom autograd for reduced memory usage."""
class F(torch.autograd.Function):
"""Implements a custom autograd function for memory-efficient Mish activation."""
@staticmethod
def forward(ctx, x):
"""Applies the Mish activation function, a smooth ReLU alternative, to the input tensor `x`."""

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""Image augmentation functions."""
import math
@ -18,7 +18,8 @@ IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation
class Albumentations:
# YOLOv5 Albumentations class (optional, only used if package is installed)
"""Provides optional data augmentation for YOLOv5 using Albumentations library if installed."""
def __init__(self, size=640):
"""Initializes Albumentations class for optional data augmentation in YOLOv5 with specified input size."""
self.transform = None
@ -196,15 +197,7 @@ def random_perspective(
else: # affine
im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(im[:, :, ::-1]) # base
# ax[1].imshow(im2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
if n := len(targets):
use_segments = any(x.any() for x in segments) and len(segments) == n
new = np.zeros((n, 4))
if use_segments: # warp segments
@ -378,7 +371,8 @@ def classify_transforms(size=224):
class LetterBox:
# YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
"""Resizes and pads images to specified dimensions while maintaining aspect ratio for YOLOv5 preprocessing."""
def __init__(self, size=(640, 640), auto=False, stride=32):
"""Initializes a LetterBox object for YOLOv5 image preprocessing with optional auto sizing and stride
adjustment.
@ -405,7 +399,8 @@ class LetterBox:
class CenterCrop:
# YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])
"""Applies center crop to an image, resizing it to the specified size while maintaining aspect ratio."""
def __init__(self, size=640):
"""Initializes CenterCrop for image preprocessing, accepting single int or tuple for size, defaults to 640."""
super().__init__()
@ -424,7 +419,8 @@ class CenterCrop:
class ToTensor:
# YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
"""Converts BGR np.array image from HWC to RGB CHW format, normalizes to [0, 1], and supports FP16 if half=True."""
def __init__(self, half=False):
"""Initializes ToTensor for YOLOv5 image preprocessing, with optional half precision (half=True for FP16)."""
super().__init__()

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""AutoAnchor utils."""
import random

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""Auto-batch utils."""
from copy import deepcopy

View File

@ -0,0 +1 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license

View File

@ -1,4 +1,5 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Resume all interrupted trainings in yolov5/ dir including DDP trainings
# Usage: $ python utils/aws/resume.py

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""Callback utils."""
import threading

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""Dataloaders and dataset utils."""
import contextlib
@ -93,7 +93,7 @@ def exif_size(img):
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose().
:param image: The image to transpose.
:return: An image.
@ -131,6 +131,8 @@ def seed_worker(worker_id):
# Inherit from DistributedSampler and override iterator
# https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py
class SmartDistributedSampler(distributed.DistributedSampler):
"""A distributed sampler ensuring deterministic shuffling and balanced data distribution across GPUs."""
def __iter__(self):
"""Yields indices for distributed data sampling, shuffled deterministically based on epoch and seed."""
g = torch.Generator()
@ -208,6 +210,7 @@ def create_dataloader(
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
drop_last=quad,
pin_memory=PIN_MEMORY,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn,
worker_init_fn=seed_worker,
@ -259,7 +262,8 @@ class _RepeatSampler:
class LoadScreenshots:
# YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
"""Loads and processes screenshots for YOLOv5 detection from specified screen regions using mss."""
def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):
"""
Initializes a screenshot dataloader for YOLOv5 with specified source region, image size, stride, auto, and
@ -316,7 +320,7 @@ class LoadScreenshots:
class LoadImages:
"""YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`"""
"""YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`."""
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
"""Initializes YOLOv5 loader for images/videos, supporting glob patterns, directories, and lists of paths."""
@ -352,8 +356,7 @@ class LoadImages:
else:
self.cap = None
assert self.nf > 0, (
f"No images or videos found in {p}. "
f"Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}"
f"No images or videos found in {p}. Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}"
)
def __iter__(self):
@ -428,7 +431,8 @@ class LoadImages:
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
"""Loads and processes video streams for YOLOv5, supporting various sources including YouTube and IP cameras."""
def __init__(self, sources="file.streams", img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
"""Initializes a stream loader for processing video streams with YOLOv5, supporting various sources including
YouTube.
@ -531,7 +535,8 @@ def img2label_paths(img_paths):
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
"""Loads images and their corresponding labels for training and validation in YOLOv5."""
cache_version = 0.6 # dataset labels *.cache version
rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
@ -683,16 +688,17 @@ class LoadImagesAndLabels(Dataset):
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
self.im_hw0, self.im_hw = [None] * n, [None] * n
fcn = self.cache_images_to_disk if cache_images == "disk" else self.load_image
results = ThreadPool(NUM_THREADS).imap(lambda i: (i, fcn(i)), self.indices)
pbar = tqdm(results, total=len(self.indices), bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0)
for i, x in pbar:
if cache_images == "disk":
b += self.npy_files[i].stat().st_size
else: # 'ram'
self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
b += self.ims[i].nbytes * WORLD_SIZE
pbar.desc = f"{prefix}Caching images ({b / gb:.1f}GB {cache_images})"
pbar.close()
with ThreadPool(NUM_THREADS) as pool:
results = pool.imap(lambda i: (i, fcn(i)), self.indices)
pbar = tqdm(results, total=len(self.indices), bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0)
for i, x in pbar:
if cache_images == "disk":
b += self.npy_files[i].stat().st_size
else: # 'ram'
self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
b += self.ims[i].nbytes * WORLD_SIZE
pbar.desc = f"{prefix}Caching images ({b / gb:.1f}GB {cache_images})"
pbar.close()
def check_cache_ram(self, safety_margin=0.1, prefix=""):
"""Checks if available RAM is sufficient for caching images, adjusting for a safety margin."""
@ -707,8 +713,8 @@ class LoadImagesAndLabels(Dataset):
cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question
if not cache:
LOGGER.info(
f'{prefix}{mem_required / gb:.1f}GB RAM required, '
f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, '
f"{prefix}{mem_required / gb:.1f}GB RAM required, "
f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, "
f"{'caching images ✅' if cache else 'not caching images ⚠️'}"
)
return cache
@ -768,8 +774,7 @@ class LoadImagesAndLabels(Dataset):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp["mosaic"]
if mosaic:
if mosaic := self.mosaic and random.random() < hyp["mosaic"]:
# Load mosaic
img, labels = self.load_mosaic(index)
shapes = None
@ -1103,7 +1108,6 @@ def extract_boxes(path=DATASETS_DIR / "coco128"):
def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0.0), annotated_only=False):
"""Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.dataloaders import *; autosplit()
Arguments:
path: Path to images directory
@ -1156,8 +1160,7 @@ def verify_image_label(args):
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
nl = len(lb)
if nl:
if nl := len(lb):
assert lb.shape[1] == 5, f"labels require 5 columns, {lb.shape[1]} columns detected"
assert (lb >= 0).all(), f"negative label values {lb[lb < 0]}"
assert (lb[:, 1:] <= 1).all(), f"non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}"

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""Download utils."""
import logging

View File

@ -4,7 +4,7 @@
## Requirements
[Flask](https://palletsprojects.com/p/flask/) is required. Install with:
[Flask](https://palletsprojects.com/projects/flask/) is required. Install with:
```shell
$ pip install Flask

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""Perform test request."""
import pprint

View File

@ -1,4 +1,4 @@
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""Run a Flask REST API exposing one or more YOLOv5s models."""
import argparse

Some files were not shown because too many files have changed in this diff Show More