diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 76b426a19..3bd941761 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license name: 🐛 Bug Report # title: " " diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index e23dfcf9c..d05d057dc 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license blank_issues_enabled: true contact_links: diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index 9282e62d2..bf201809b 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license name: 🚀 Feature Request description: Suggest a YOLOv5 idea diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml index 5b62af675..0787bbea9 100644 --- a/.github/ISSUE_TEMPLATE/question.yml +++ b/.github/ISSUE_TEMPLATE/question.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license name: ❓ Question description: Ask a YOLOv5 question diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 061d61f1b..233db72b0 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Dependabot for package version updates # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 639d0449c..ae24eb11c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # YOLOv5 Continuous Integration (CI) GitHub Actions tests name: YOLOv5 CI @@ -137,14 +138,14 @@ jobs: Summary: runs-on: ubuntu-latest - needs: [Benchmarks, Tests] # Add job names that you want to check for failure - if: always() # This ensures the job runs even if previous jobs fail + needs: [Benchmarks, Tests] + if: always() steps: - name: Check for failure and notify if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.Benchmarks.result == 'cancelled' || needs.Tests.result == 'cancelled') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') - uses: slackapi/slack-github-action@v1.26.0 + uses: slackapi/slack-github-action@v2.0.0 with: + webhook-type: incoming-webhook + webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} + text: " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n" diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 2fb5bfb8f..61b944167 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Ultralytics Contributor License Agreement (CLA) action https://docs.ultralytics.com/help/CLA # This workflow automatically requests Pull Requests (PR) authors to sign the Ultralytics CLA before PRs can be merged @@ -26,11 +27,11 @@ jobs: steps: - name: CLA Assistant if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target' - uses: contributor-assistant/github-action@v2.5.1 + uses: contributor-assistant/github-action@v2.6.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Must be repository secret PAT - PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + PERSONAL_ACCESS_TOKEN: ${{ secrets._GITHUB_TOKEN }} with: path-to-signatures: "signatures/version1/cla.json" path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml deleted file mode 100644 index 77054fd0d..000000000 --- a/.github/workflows/codeql-analysis.yml +++ /dev/null @@ -1,56 +0,0 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license -# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities. -# https://github.com/github/codeql-action - -name: "CodeQL" - -on: - schedule: - - cron: "0 0 1 * *" # Runs at 00:00 UTC on the 1st of every month - workflow_dispatch: - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - - strategy: - fail-fast: false - matrix: - language: ["python"] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] - # Learn more: - # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v3 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v3 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 2cd1c13f7..b43c2bc28 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5 name: Publish Docker Images diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index b326e778d..10891c878 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -1,4 +1,5 @@ -# Ultralytics 🚀 - AGPL-3.0 License https://ultralytics.com/license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Ultralytics Actions https://github.com/ultralytics/actions # This workflow automatically formats code and documentation in PRs to official Ultralytics standards @@ -7,7 +8,7 @@ name: Ultralytics Actions on: issues: types: [opened] - pull_request_target: + pull_request: branches: [main, master] types: [opened, closed, synchronize, review_requested] @@ -18,12 +19,41 @@ jobs: - name: Run Ultralytics Formatting uses: ultralytics/actions@main with: - token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated + token: ${{ secrets._GITHUB_TOKEN || secrets.GITHUB_TOKEN }} labels: true # autolabel issues and PRs python: true # format Python code and docstrings prettier: true # format YAML, JSON, Markdown and CSS spelling: true # check spelling links: false # check broken links summary: true # print PR summary with GPT4o (requires 'openai_api_key') - openai_azure_api_key: ${{ secrets.OPENAI_AZURE_API_KEY }} - openai_azure_endpoint: ${{ secrets.OPENAI_AZURE_ENDPOINT }} + openai_api_key: ${{ secrets.OPENAI_API_KEY }} + first_issue_response: | + 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/). + + If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. + + If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/). + + ## Requirements + + [**Python>=3.8.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started: + ```bash + git clone https://github.com/ultralytics/yolov5 # clone + cd yolov5 + pip install -r requirements.txt # install + ``` + + ## Environments + + YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + + - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle + - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) + - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/) + - **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls + + ## Status + + YOLOv5 CI + + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml deleted file mode 100644 index 212211d24..000000000 --- a/.github/workflows/greetings.yml +++ /dev/null @@ -1,65 +0,0 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license - -name: Greetings - -on: - pull_request_target: - types: [opened] - issues: - types: [opened] - -jobs: - greeting: - runs-on: ubuntu-latest - steps: - - uses: actions/first-interaction@v1 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - pr-message: | - 👋 Hello @${{ github.actor }}, thank you for submitting a YOLOv5 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to: - - - ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally. - - ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**. - - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee - - issue-message: | - 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/). - - If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. - - If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips//). - - ## Requirements - - [**Python>=3.8.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started: - ```bash - git clone https://github.com/ultralytics/yolov5 # clone - cd yolov5 - pip install -r requirements.txt # install - ``` - - ## Environments - - YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - - - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) - - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/) - - **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls - - ## Status - - YOLOv5 CI - - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. - - ## Introducing YOLOv8 🚀 - - We're excited to announce the launch of our latest state-of-the-art (SOTA) object detection model for 2023 - [YOLOv8](https://github.com/ultralytics/ultralytics) 🚀! - - Designed to be fast, accurate, and easy to use, YOLOv8 is an ideal choice for a wide range of object detection, image segmentation and image classification tasks. With YOLOv8, you'll be able to quickly and accurately detect objects in real-time, streamline your workflows, and achieve new levels of accuracy in your projects. - - Check out our [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with: - ```bash - pip install ultralytics - ``` diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 82c1f3f23..2da3c066f 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Continuous Integration (CI) GitHub Actions tests broken link checker using https://github.com/lycheeverse/lychee # Ignores the following status codes to reduce false positives: # - 403(OpenVINO, 'forbidden') @@ -23,17 +24,15 @@ jobs: - name: Download and install lychee run: | LYCHEE_URL=$(curl -s https://api.github.com/repos/lycheeverse/lychee/releases/latest | grep "browser_download_url" | grep "x86_64-unknown-linux-gnu.tar.gz" | cut -d '"' -f 4) - curl -L $LYCHEE_URL -o lychee.tar.gz - tar xzf lychee.tar.gz - sudo mv lychee /usr/local/bin + curl -L $LYCHEE_URL | tar xz -C /usr/local/bin - name: Test Markdown and HTML links with retry - uses: nick-invision/retry@v3 + uses: ultralytics/actions/retry@main with: timeout_minutes: 5 - retry_wait_seconds: 60 - max_attempts: 3 - command: | + retry_delay_seconds: 60 + retries: 2 + run: | lychee \ --scheme 'https' \ --timeout 60 \ @@ -45,16 +44,16 @@ jobs: --github-token ${{ secrets.GITHUB_TOKEN }} \ --header "User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.183 Safari/537.36" \ './**/*.md' \ - './**/*.html' + './**/*.html' | tee -a $GITHUB_STEP_SUMMARY - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' - uses: nick-invision/retry@v3 + uses: ultralytics/actions/retry@main with: timeout_minutes: 5 - retry_wait_seconds: 60 - max_attempts: 3 - command: | + retry_delay_seconds: 60 + retries: 2 + run: | lychee \ --scheme 'https' \ --timeout 60 \ @@ -70,4 +69,4 @@ jobs: './**/*.yml' \ './**/*.yaml' \ './**/*.py' \ - './**/*.ipynb' + './**/*.ipynb' | tee -a $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index 2cd4b028c..034c6c143 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Automatically merges repository 'main' branch into all open PRs to keep them up-to-date # Action runs on updates to main branch so when one PR merges to main all others update @@ -6,10 +7,9 @@ name: Merge main into PRs on: workflow_dispatch: - push: - branches: - - main - - master + # push: + # branches: + # - ${{ github.event.repository.default_branch }} jobs: Merge: @@ -22,35 +22,51 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v5 with: - python-version: "3.11" - cache: "pip" # caching pip dependencies + python-version: "3.x" + cache: "pip" - name: Install requirements run: | pip install pygithub - - name: Merge main into PRs + - name: Merge default branch into PRs shell: python run: | from github import Github import os - # Authenticate with the GitHub Token g = Github(os.getenv('GITHUB_TOKEN')) - - # Get the repository dynamically repo = g.get_repo(os.getenv('GITHUB_REPOSITORY')) - # List all open pull requests - open_pulls = repo.get_pulls(state='open', sort='created') + # Fetch the default branch name + default_branch_name = repo.default_branch + default_branch = repo.get_branch(default_branch_name) - for pr in open_pulls: - # Compare PR head with main to see if it's behind + for pr in repo.get_pulls(state='open', sort='created'): try: - # Merge main into the PR branch - success = pr.update_branch() - assert success, "Branch update failed" - print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.") + # Get full names for repositories and branches + base_repo_name = repo.full_name + head_repo_name = pr.head.repo.full_name + base_branch_name = pr.base.ref + head_branch_name = pr.head.ref + + # Check if PR is behind the default branch + comparison = repo.compare(default_branch.commit.sha, pr.head.sha) + + if comparison.behind_by > 0: + print(f"⚠️ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is behind {default_branch_name} by {comparison.behind_by} commit(s).") + + # Attempt to update the branch + try: + success = pr.update_branch() + assert success, "Branch update failed" + print(f"✅ Successfully merged '{default_branch_name}' into PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}).") + except Exception as update_error: + print(f"❌ Could not update PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}): {update_error}") + print(" This might be due to branch protection rules or insufficient permissions.") + else: + print(f"✅ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is up to date with {default_branch_name}.") except Exception as e: - print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}") + print(f"❌ Could not process PR #{pr.number}: {e}") + env: - GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + GITHUB_TOKEN: ${{ secrets._GITHUB_TOKEN }} GITHUB_REPOSITORY: ${{ github.repository }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 374bc01ab..fda092a44 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license name: Close stale issues on: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 556c554a2..7b9c1cd64 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -64,10 +64,10 @@ When asking a question, people will be better able to provide help if you provid - ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself - ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem -In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: +In addition to the above requirements, for [Ultralytics](https://www.ultralytics.com/) to provide assistance your code should be: - ✅ **Current** – Verify that your code is up-to-date with the current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. -- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. +- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://www.ultralytics.com/) does not provide support for custom code ⚠️. If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/) to help us better understand and diagnose your problem. diff --git a/README.md b/README.md index ac8b63af8..46b2a833e 100644 --- a/README.md +++ b/README.md @@ -1,28 +1,28 @@

- +

-[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/) +[中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar)
YOLOv5 CI YOLOv5 Citation Docker Pulls - Discord Ultralytics Forums + Discord Ultralytics Forums Ultralytics Reddit
Run on Gradient Open In Colab - Open In Kaggle + Open In Kaggle

-YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! +We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! -To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). +To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).
Ultralytics GitHub @@ -37,32 +37,32 @@ To request an Enterprise License please complete the form at [Ultralytics Licens Ultralytics BiliBili - Ultralytics Discord + Ultralytics Discord

-##
YOLOv8 🚀 NEW
+##
YOLO11 🚀 NEW
-We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks. +We are excited to unveil the launch of Ultralytics YOLO11 🚀, the latest advancement in our state-of-the-art (SOTA) vision models! Available now at **[GitHub](https://github.com/ultralytics/ultralytics)**, YOLO11 builds on our legacy of speed, precision, and ease of use. Whether you're tackling object detection, image segmentation, or image classification, YOLO11 delivers the performance and versatility needed to excel in diverse applications. -See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: +Get started today and unlock the full potential of YOLO11! Visit the [Ultralytics Docs](https://docs.ultralytics.com/) for comprehensive guides and resources: -[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) +[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics) ```bash pip install ultralytics ```
- - + +
##
Documentation
-See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentation on training, testing and deployment. See below for quickstart examples. +See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5/) for full documentation on training, testing and deployment. See below for quickstart examples.
Install @@ -80,7 +80,7 @@ pip install -r requirements.txt # install
Inference -YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). +YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). ```python import torch @@ -123,7 +123,7 @@ python detect.py --weights yolov5s.pt --source 0 #
Training -The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. +The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. ```bash python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 @@ -140,56 +140,58 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
Tutorials -- [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 RECOMMENDED +- [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) 🚀 RECOMMENDED - [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/) ☘️ -- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) -- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 NEW -- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀 -- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 NEW -- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) -- [Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling) -- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity) -- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution) -- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers) -- [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 NEW -- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration) -- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 NEW -- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 NEW -- [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 NEW +- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) 🌟 NEW +- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export/) 🚀 +- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano/) 🌟 NEW +- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) +- [Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling/) +- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity/) +- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/) +- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers/) +- [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/) 🌟 NEW +- [Ultralytics HUB to train and deploy YOLO](https://www.ultralytics.com/hub) 🚀 RECOMMENDED +- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/) +- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/) +- [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/) 🌟 NEW
##
Integrations
+Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/), [Comet](https://bit.ly/yolov8-readme-comet), [Roboflow](https://roboflow.com/?ref=ultralytics) and [OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow. +
- - + +Ultralytics active learning integrations

- - - - - - - - - + + Ultralytics HUB logo + space + + ClearML logo + space + + Comet ML logo + space - + NeuralMagic logo
-| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | -| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | -| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | +| Ultralytics HUB 🚀 | W&B | Comet ⭐ NEW | Neural Magic | +| :--------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| Streamline YOLO workflows: Label, train, and deploy effortlessly with [Ultralytics HUB](https://www.ultralytics.com/hub). Try now! | Track experiments, hyperparameters, and results with [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | ##
Ultralytics HUB
-Experience seamless AI with [Ultralytics HUB](https://ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! +Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now! - + ##
Why YOLOv5
@@ -206,7 +208,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We Figure Notes - **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. -- **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. +- **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/) V100 instance at batch-size 32. - **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8. - **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` @@ -233,8 +235,8 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We - All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). - **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -246,7 +248,7 @@ Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7. Segmentation Checkpoints
- +
@@ -415,7 +417,7 @@ Get started in seconds with our verified environments. Click each icon below for - + @@ -430,7 +432,7 @@ Get started in seconds with our verified environments. Click each icon below for ##
Contribute
-We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out the [YOLOv5 Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! @@ -441,12 +443,12 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare Ultralytics offers two licensing options to accommodate diverse use cases: -- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for more details. -- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license). +- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/license) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for more details. +- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://www.ultralytics.com/license). ##
Contact
-For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://ultralytics.com/discord) community for questions and discussions! +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.com/invite/ultralytics) community for questions and discussions!
@@ -462,7 +464,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ Ultralytics BiliBili - Ultralytics Discord + Ultralytics Discord
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/README.zh-CN.md b/README.zh-CN.md index bb45872a0..b76c66d76 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,27 +1,28 @@

- +

-[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/) +[中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar)
YOLOv5 CI YOLOv5 Citation Docker Pulls + Discord Ultralytics Forums Ultralytics Reddit
Run on Gradient Open In Colab - Open In Kaggle + Open In Kaggle

-YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 +YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! +我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! -如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格 +如需申请企业许可,请在 [Ultralytics Licensing](https://www.ultralytics.com/license) 处填写表格
Ultralytics GitHub @@ -36,25 +37,25 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics BiliBili - Ultralytics Discord + Ultralytics Discord
-##
YOLOv8 🚀 新品
+##
YOLO11 🚀 全新发布
-我们很高兴宣布 Ultralytics YOLOv8 🚀 的发布,这是我们新推出的领先水平、最先进的(SOTA)模型,发布于 **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**。 YOLOv8 旨在快速、准确且易于使用,使其成为广泛的物体检测、图像分割和图像分类任务的极佳选择。 +我们很高兴宣布推出 Ultralytics YOLO11 🚀,这是我们最先进视觉模型的最新进展!现已在 **[GitHub](https://github.com/ultralytics/ultralytics)** 上发布。YOLO11 在速度、精度和易用性方面进一步提升,无论是处理目标检测、图像分割还是图像分类任务,YOLO11 都具备出色的性能和多功能性,助您在各种应用中脱颖而出。 -请查看 [YOLOv8 文档](https://docs.ultralytics.com)了解详细信息,并开始使用: +立即开始,解锁 YOLO11 的全部潜力!访问 [Ultralytics 文档](https://docs.ultralytics.com/) 获取全面的指南和资源: -[![PyPI 版本](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![下载量](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) +[![PyPI 版本](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![下载量](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics) -```commandline +```bash pip install ultralytics ```
- - + +
##
文档
@@ -77,7 +78,7 @@ pip install -r requirements.txt # install
推理 -使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 +使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 ```python import torch @@ -121,7 +122,7 @@ python detect.py --weights yolov5s.pt --source 0 # 训练 下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) -将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) 训练速度更快)。 尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。 +将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/) 训练速度更快)。 尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。 ```bash python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 @@ -138,56 +139,58 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
教程 -- [训练自定义数据](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 推荐 -- [获得最佳训练结果的技巧](https://docs.ultralytics.com/guides/model-training-tips/) ☘️ -- [多GPU训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) -- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 新 -- [TFLite,ONNX,CoreML,TensorRT导出](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀 -- [NVIDIA Jetson平台部署](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 新 -- [测试时增强 (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) -- [模型集成](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling) -- [模型剪枝/稀疏](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity) -- [超参数进化](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution) -- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers) -- [架构概述](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 新 -- [Roboflow用于数据集、标注和主动学习](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration) -- [ClearML日志记录](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 新 -- [使用Neural Magic的Deepsparse的YOLOv5](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 新 -- [Comet日志记录](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 新 +- [自定义数据训练](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) 🚀 **推荐** +- [最佳训练效果的提示](https://docs.ultralytics.com/guides/model-training-tips/) ☘️ +- [多GPU训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) 🌟 **全新** +- [TFLite, ONNX, CoreML, TensorRT 导出](https://docs.ultralytics.com/yolov5/tutorials/model_export/) 🚀 +- [NVIDIA Jetson 平台部署](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano/) 🌟 **全新** +- [测试时增强 (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) +- [模型集成](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling/) +- [模型剪枝/稀疏化](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity/) +- [超参数进化](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/) +- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers/) +- [架构概述](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/) 🌟 **全新** +- [使用 Ultralytics HUB 进行 YOLO 训练和部署](https://www.ultralytics.com/hub) 🚀 **推荐** +- [ClearML 日志记录](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/) +- [与 Neural Magic 的 Deepsparse 集成的 YOLOv5](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/) +- [Comet 日志记录](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/) 🌟 **全新**
-##
模块集成
+##
集成
+ +我们与领先的 AI 平台的关键集成扩展了 Ultralytics 产品的功能,提升了数据集标注、训练、可视化和模型管理等任务。探索 Ultralytics 如何通过与 [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/)、[Comet](https://bit.ly/yolov8-readme-comet)、[Roboflow](https://roboflow.com/?ref=ultralytics) 和 [OpenVINO](https://docs.ultralytics.com/integrations/openvino/) 的合作,优化您的 AI 工作流程。
- - + +Ultralytics active learning integrations

- - - - - - - - - + + Ultralytics HUB logo + space + + W&B logo + space + + Comet ML logo + space - + NeuralMagic logo
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | -| :--------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | -| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | +| Ultralytics HUB 🚀 | W&B | Comet ⭐ 全新 | Neural Magic | +| :----------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------: | +| 简化 YOLO 工作流程:通过 [Ultralytics HUB](https://www.ultralytics.com/hub) 轻松标注、训练和部署。立即试用! | 使用 [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) 跟踪实验、超参数和结果 | 永久免费,[Comet](https://bit.ly/yolov5-readme-comet) 允许您保存 YOLO11 模型、恢复训练,并交互式地可视化和调试预测结果 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) 运行 YOLO11 推理,速度提升至 6 倍 | ##
Ultralytics HUB
-[Ultralytics HUB](https://ultralytics.com/hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! +[Ultralytics HUB](https://www.ultralytics.com/hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! - + ##
为什么选择 YOLOv5
@@ -204,7 +207,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结 图表笔记 - **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 -- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 +- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/) V100实例,batchsize 为 32 。 - **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 - **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` @@ -231,8 +234,8 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结 - 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 - \*\*mAPval\*\*在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [测试时数据增强](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [测试时数据增强](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -246,7 +249,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
- +
@@ -414,7 +417,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu - + @@ -429,7 +432,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu ##
贡献
-我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](https://docs.ultralytics.com/help/contributing/),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! +我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](https://docs.ultralytics.com/help/contributing/),并填写 [YOLOv5调查](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! @@ -440,12 +443,12 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu Ultralytics 提供两种许可证选项以适应各种使用场景: -- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/licenses/)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件以了解更多细节。 -- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://ultralytics.com/license)与我们联系。 +- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/license)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件以了解更多细节。 +- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://www.ultralytics.com/license)与我们联系。 ##
联系方式
-对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://ultralytics.com/discord) 社区进行问题和讨论! +对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.com/invite/ultralytics) 社区进行问题和讨论!
@@ -461,7 +464,7 @@ Ultralytics 提供两种许可证选项以适应各种使用场景: Ultralytics BiliBili - Ultralytics Discord + Ultralytics Discord
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/benchmarks.py b/benchmarks.py index 996b8d438..45ae55b36 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Run YOLOv5 benchmarks on all supported export formats. diff --git a/classify/predict.py b/classify/predict.py index 33140e9b5..59db1330a 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc. @@ -147,7 +147,7 @@ def run( save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt - s += "%gx%g " % im.shape[2:] # print string + s += "{:g}x{:g} ".format(*im.shape[2:]) # print string annotator = Annotator(im0, example=str(names), pil=True) # Print results @@ -192,7 +192,7 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") + LOGGER.info(f"{s}{dt[1].dt * 1e3:.1f}ms") # Print results t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image diff --git a/classify/train.py b/classify/train.py index 9c12a66c3..d454c7187 100644 --- a/classify/train.py +++ b/classify/train.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Train a YOLOv5 classifier model on a classification dataset. @@ -201,10 +201,10 @@ def train(opt, device): scaler = amp.GradScaler(enabled=cuda) val = test_dir.stem # 'val' or 'test' LOGGER.info( - f'Image sizes {imgsz} train, {imgsz} test\n' - f'Using {nw * WORLD_SIZE} dataloader workers\n' + f"Image sizes {imgsz} train, {imgsz} test\n" + f"Using {nw * WORLD_SIZE} dataloader workers\n" f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n' + f"Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n" f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}" ) for epoch in range(epochs): # loop over the dataset multiple times @@ -290,13 +290,13 @@ def train(opt, device): # Train complete if RANK in {-1, 0} and final_epoch: LOGGER.info( - f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' + f"\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)" f"\nResults saved to {colorstr('bold', save_dir)}" - f'\nPredict: python classify/predict.py --weights {best} --source im.jpg' - f'\nValidate: python classify/val.py --weights {best} --data {data_dir}' - f'\nExport: python export.py --weights {best} --include onnx' + f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" + f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" + f"\nExport: python export.py --weights {best} --include onnx" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" - f'\nVisualize: https://netron.app\n' + f"\nVisualize: https://netron.app\n" ) # Plot examples diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index e3bfbf674..c547a29a9 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -15,7 +15,7 @@ "
\n", " \"Run\n", " \"Open\n", - " \"Open\n", + " \"Open\n", "
\n", "\n", "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", @@ -1410,7 +1410,7 @@ "\n", "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" diff --git a/classify/val.py b/classify/val.py index 8ce48f064..72bd0e14e 100644 --- a/classify/val.py +++ b/classify/val.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Validate a trained YOLOv5 classification model on a classification dataset. diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 366552ea4..651b6431b 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI # Example usage: python train.py --data Argoverse.yaml # parent diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index acb88290f..eb25871c6 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan # Example usage: python train.py --data GlobalWheat2020.yaml # parent diff --git a/data/ImageNet.yaml b/data/ImageNet.yaml index 979a0e4de..a3cf694bc 100644 --- a/data/ImageNet.yaml +++ b/data/ImageNet.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/ImageNet10.yaml b/data/ImageNet10.yaml index 2189def7d..e50e58888 100644 --- a/data/ImageNet10.yaml +++ b/data/ImageNet10.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/ImageNet100.yaml b/data/ImageNet100.yaml index 560cdecdb..e3891bcb4 100644 --- a/data/ImageNet100.yaml +++ b/data/ImageNet100.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/ImageNet1000.yaml b/data/ImageNet1000.yaml index aa17e9e05..8943d3312 100644 --- a/data/ImageNet1000.yaml +++ b/data/ImageNet1000.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/Objects365.yaml b/data/Objects365.yaml index f1f0a1ae4..248b6c775 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Objects365 dataset https://www.objects365.org/ by Megvii # Example usage: python train.py --data Objects365.yaml # parent diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index b012bec31..695b89cd4 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail # Example usage: python train.py --data SKU-110K.yaml # parent diff --git a/data/VOC.yaml b/data/VOC.yaml index 227d91d76..9dad47777 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford # Example usage: python train.py --data VOC.yaml # parent diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 20ff1d39c..637433b50 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University # Example usage: python train.py --data VisDrone.yaml # parent diff --git a/data/coco.yaml b/data/coco.yaml index 816efa5cf..7f872e8ca 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # COCO 2017 dataset http://cocodataset.org by Microsoft # Example usage: python train.py --data coco.yaml # parent diff --git a/data/coco128-seg.yaml b/data/coco128-seg.yaml index aea711c98..fa618d87e 100644 --- a/data/coco128-seg.yaml +++ b/data/coco128-seg.yaml @@ -1,5 +1,6 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license -# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + +# COCO128-seg dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent # ├── yolov5 diff --git a/data/coco128.yaml b/data/coco128.yaml index 2ed35c06e..e81fb1ff4 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,5 +1,6 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license -# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + +# COCO128 dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent # ├── yolov5 diff --git a/data/hyps/hyp.Objects365.yaml b/data/hyps/hyp.Objects365.yaml index 7a6c507c7..7b26a053b 100644 --- a/data/hyps/hyp.Objects365.yaml +++ b/data/hyps/hyp.Objects365.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters for Objects365 training # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.VOC.yaml b/data/hyps/hyp.VOC.yaml index c04c63e21..378bc4030 100644 --- a/data/hyps/hyp.VOC.yaml +++ b/data/hyps/hyp.VOC.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters for VOC training # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml index adc360bb8..08378961f 100644 --- a/data/hyps/hyp.no-augmentation.yaml +++ b/data/hyps/hyp.no-augmentation.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters when using Albumentations frameworks # python train.py --hyp hyp.no-augmentation.yaml # See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml index 3e913e36d..74536c297 100644 --- a/data/hyps/hyp.scratch-high.yaml +++ b/data/hyps/hyp.scratch-high.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters for high-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-low.yaml b/data/hyps/hyp.scratch-low.yaml index ff0d1e7ff..e89b3ba4e 100644 --- a/data/hyps/hyp.scratch-low.yaml +++ b/data/hyps/hyp.scratch-low.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters for low-augmentation COCO training from scratch # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-med.yaml b/data/hyps/hyp.scratch-med.yaml index c2fba1fc2..7dfd2f306 100644 --- a/data/hyps/hyp.scratch-med.yaml +++ b/data/hyps/hyp.scratch-med.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters for medium-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/xView.yaml b/data/xView.yaml index 407159831..6bea7637e 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml diff --git a/detect.py b/detect.py index 57d778740..24724a35f 100644 --- a/detect.py +++ b/detect.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. @@ -219,9 +219,10 @@ def run( def write_to_csv(image_name, prediction, confidence): """Writes prediction data for an image to a CSV file, appending if the file exists.""" data = {"Image Name": image_name, "Prediction": prediction, "Confidence": confidence} + file_exists = os.path.isfile(csv_path) with open(csv_path, mode="a", newline="") as f: writer = csv.DictWriter(f, fieldnames=data.keys()) - if not csv_path.is_file(): + if not file_exists: writer.writeheader() writer.writerow(data) @@ -237,7 +238,7 @@ def run( p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt - s += "%gx%g " % im.shape[2:] # print string + s += "{:g}x{:g} ".format(*im.shape[2:]) # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) @@ -308,7 +309,7 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms") # Print results t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image diff --git a/export.py b/export.py index f3216a564..e6d18eaa0 100644 --- a/export.py +++ b/export.py @@ -1,6 +1,6 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ -Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit +Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit. Format | `export.py --include` | Model --- | --- | --- @@ -91,6 +91,8 @@ MACOS = platform.system() == "Darwin" # macOS environment class iOSModel(torch.nn.Module): + """An iOS-compatible wrapper for YOLOv5 models that normalizes input images based on their dimensions.""" + def __init__(self, model, im): """ Initializes an iOS compatible model with normalization based on image dimensions. @@ -141,7 +143,7 @@ class iOSModel(torch.nn.Module): def export_formats(): - """ + r""" Returns a DataFrame of supported YOLOv5 model export formats and their properties. Returns: @@ -450,8 +452,9 @@ def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO: Extracts and preprocess input data from dataloader item for quantization. - Parameters: + data_item: Tuple with data item produced by DataLoader during iteration + Returns: input_tensor: Input data for quantization """ @@ -563,11 +566,7 @@ def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr("Co else: f = file.with_suffix(".mlpackage") convert_to = "mlprogram" - if half: - precision = ct.precision.FLOAT16 - else: - precision = ct.precision.FLOAT32 - + precision = ct.precision.FLOAT16 if half else ct.precision.FLOAT32 if nms: model = iOSModel(model, im) ts = torch.jit.trace(model, im, strict=False) # TorchScript model @@ -594,7 +593,9 @@ def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr("Co @try_export -def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr("TensorRT:")): +def export_engine( + model, im, file, half, dynamic, simplify, workspace=4, verbose=False, cache="", prefix=colorstr("TensorRT:") +): """ Export a YOLOv5 model to TensorRT engine format, requiring GPU and TensorRT>=7.0.0. @@ -607,6 +608,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose simplify (bool): Set to True to simplify the model during export. workspace (int): Workspace size in GB (default is 4). verbose (bool): Set to True for verbose logging output. + cache (str): Path to save the TensorRT timing cache. prefix (str): Log message prefix. Returns: @@ -661,6 +663,11 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) else: # TensorRT versions 7, 8 config.max_workspace_size = workspace * 1 << 30 + if cache: # enable timing cache + Path(cache).parent.mkdir(parents=True, exist_ok=True) + buf = Path(cache).read_bytes() if Path(cache).exists() else b"" + timing_cache = config.create_timing_cache(buf) + config.set_timing_cache(timing_cache, ignore_mismatch=True) flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) network = builder.create_network(flag) parser = trt.OnnxParser(network, logger) @@ -689,6 +696,9 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose build = builder.build_serialized_network if is_trt10 else builder.build_engine with build(network, config) as engine, open(f, "wb") as t: t.write(engine if is_trt10 else engine.serialize()) + if cache: # save timing cache + with open(cache, "wb") as c: + c.write(config.get_timing_cache().serialize()) return f, None @@ -1135,11 +1145,7 @@ def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr("CoreML import coremltools as ct from PIL import Image - if mlmodel: - f = file.with_suffix(".mlmodel") # filename - else: - f = file.with_suffix(".mlpackage") # filename - + f = file.with_suffix(".mlmodel") if mlmodel else file.with_suffix(".mlpackage") print(f"{prefix} starting pipeline with coremltools {ct.__version__}...") batch_size, ch, h, w = list(im.shape) # BCHW t = time.time() @@ -1183,10 +1189,7 @@ def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr("CoreML # Model from spec weights_dir = None - if mlmodel: - weights_dir = None - else: - weights_dir = str(f / "Data/com.apple.CoreML/weights") + weights_dir = None if mlmodel else str(f / "Data/com.apple.CoreML/weights") model = ct.models.MLModel(spec, weights_dir=weights_dir) # 3. Create NMS protobuf @@ -1285,6 +1288,7 @@ def run( int8=False, # CoreML/TF INT8 quantization per_tensor=False, # TF per tensor quantization dynamic=False, # ONNX/TF/TensorRT: dynamic axes + cache="", # TensorRT: timing cache path simplify=False, # ONNX: simplify model mlmodel=False, # CoreML: Export in *.mlmodel format opset=12, # ONNX: opset version @@ -1314,6 +1318,7 @@ def run( int8 (bool): Apply INT8 quantization for CoreML or TensorFlow models. Default is False. per_tensor (bool): Apply per tensor quantization for TensorFlow models. Default is False. dynamic (bool): Enable dynamic axes for ONNX, TensorFlow, or TensorRT exports. Default is False. + cache (str): TensorRT timing cache path. Default is an empty string. simplify (bool): Simplify the ONNX model during export. Default is False. opset (int): ONNX opset version. Default is 12. verbose (bool): Enable verbose logging for TensorRT export. Default is False. @@ -1349,6 +1354,7 @@ def run( int8=False, per_tensor=False, dynamic=False, + cache="", simplify=False, opset=12, verbose=False, @@ -1386,7 +1392,8 @@ def run( # Input gs = int(max(model.stride)) # grid size (max stride) imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples - im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection + ch = next(model.parameters()).size(1) # require input image channels + im = torch.zeros(batch_size, ch, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection # Update model model.eval() @@ -1410,7 +1417,7 @@ def run( if jit: # TorchScript f[0], _ = export_torchscript(model, im, file, optimize) if engine: # TensorRT required before ONNX - f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) + f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose, cache) if onnx or xml: # OpenVINO requires ONNX f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) if xml: # OpenVINO @@ -1464,12 +1471,12 @@ def run( else "" ) LOGGER.info( - f'\nExport complete ({time.time() - t:.1f}s)' + f"\nExport complete ({time.time() - t:.1f}s)" f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" - f'\nVisualize: https://netron.app' + f"\nVisualize: https://netron.app" ) return f # return list of exported files/dirs @@ -1505,6 +1512,7 @@ def parse_opt(known=False): parser.add_argument("--int8", action="store_true", help="CoreML/TF/OpenVINO INT8 quantization") parser.add_argument("--per-tensor", action="store_true", help="TF per-tensor quantization") parser.add_argument("--dynamic", action="store_true", help="ONNX/TF/TensorRT: dynamic axes") + parser.add_argument("--cache", type=str, default="", help="TensorRT: timing cache file path") parser.add_argument("--simplify", action="store_true", help="ONNX: simplify model") parser.add_argument("--mlmodel", action="store_true", help="CoreML: Export in *.mlmodel format") parser.add_argument("--opset", type=int, default=17, help="ONNX: opset version") diff --git a/hubconf.py b/hubconf.py index 98e399421..2eeac4edd 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,6 +1,6 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ -PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 +PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5. Usage: import torch diff --git a/models/__init__.py b/models/__init__.py index e69de29bb..77a19dcf0 100644 --- a/models/__init__.py +++ b/models/__init__.py @@ -0,0 +1 @@ +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license diff --git a/models/common.py b/models/common.py index a497627a7..29e4413e3 100644 --- a/models/common.py +++ b/models/common.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Common modules.""" import ast @@ -71,7 +71,8 @@ def autopad(k, p=None, d=1): class Conv(nn.Module): - # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) + """Applies a convolution, batch normalization, and activation function to an input tensor in a neural network.""" + default_act = nn.SiLU() # default activation def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): @@ -91,7 +92,8 @@ class Conv(nn.Module): class DWConv(Conv): - # Depth-wise convolution + """Implements a depth-wise convolution layer with optional activation for efficient spatial filtering.""" + def __init__(self, c1, c2, k=1, s=1, d=1, act=True): """Initializes a depth-wise convolution layer with optional activation; args: input channels (c1), output channels (c2), kernel size (k), stride (s), dilation (d), and activation flag (act). @@ -100,7 +102,8 @@ class DWConv(Conv): class DWConvTranspose2d(nn.ConvTranspose2d): - # Depth-wise transpose convolution + """A depth-wise transpose convolutional layer for upsampling in neural networks, particularly in YOLOv5 models.""" + def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): """Initializes a depth-wise transpose convolutional layer for YOLOv5; args: input channels (c1), output channels (c2), kernel size (k), stride (s), input padding (p1), output padding (p2). @@ -109,7 +112,8 @@ class DWConvTranspose2d(nn.ConvTranspose2d): class TransformerLayer(nn.Module): - # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + """Transformer layer with multihead attention and linear layers, optimized by removing LayerNorm.""" + def __init__(self, c, num_heads): """ Initializes a transformer layer, sans LayerNorm for performance, with multihead attention and linear layers. @@ -132,7 +136,8 @@ class TransformerLayer(nn.Module): class TransformerBlock(nn.Module): - # Vision Transformer https://arxiv.org/abs/2010.11929 + """A Transformer block for vision tasks with convolution, position embeddings, and Transformer layers.""" + def __init__(self, c1, c2, num_heads, num_layers): """Initializes a Transformer block for vision tasks, adapting dimensions if necessary and stacking specified layers. @@ -157,7 +162,8 @@ class TransformerBlock(nn.Module): class Bottleneck(nn.Module): - # Standard bottleneck + """A bottleneck layer with optional shortcut and group convolution for efficient feature extraction.""" + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): """Initializes a standard bottleneck layer with optional shortcut and group convolution, supporting channel expansion. @@ -176,7 +182,8 @@ class Bottleneck(nn.Module): class BottleneckCSP(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + """CSP bottleneck layer for feature extraction with cross-stage partial connections and optional shortcuts.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes CSP bottleneck with optional shortcuts; args: ch_in, ch_out, number of repeats, shortcut bool, groups, expansion. @@ -201,7 +208,8 @@ class BottleneckCSP(nn.Module): class CrossConv(nn.Module): - # Cross Convolution Downsample + """Implements a cross convolution layer with downsampling, expansion, and optional shortcut.""" + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): """ Initializes CrossConv with downsampling, expanding, and optionally shortcutting; `c1` input, `c2` output @@ -221,7 +229,8 @@ class CrossConv(nn.Module): class C3(nn.Module): - # CSP Bottleneck with 3 convolutions + """Implements a CSP Bottleneck module with three convolutions for enhanced feature extraction in neural networks.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes C3 module with options for channel count, bottleneck repetition, shortcut usage, group convolutions, and expansion. @@ -239,7 +248,8 @@ class C3(nn.Module): class C3x(C3): - # C3 module with cross-convolutions + """Extends the C3 module with cross-convolutions for enhanced feature extraction in neural networks.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes C3x module with cross-convolutions, extending C3 with customizable channel dimensions, groups, and expansion. @@ -250,7 +260,8 @@ class C3x(C3): class C3TR(C3): - # C3 module with TransformerBlock() + """C3 module with TransformerBlock for enhanced feature extraction in object detection models.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes C3 module with TransformerBlock for enhanced feature extraction, accepts channel sizes, shortcut config, group, and expansion. @@ -261,7 +272,8 @@ class C3TR(C3): class C3SPP(C3): - # C3 module with SPP() + """Extends the C3 module with an SPP layer for enhanced spatial feature extraction and customizable channels.""" + def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): """Initializes a C3 module with SPP layer for advanced spatial feature extraction, given channel sizes, kernel sizes, shortcut, group, and expansion ratio. @@ -272,7 +284,8 @@ class C3SPP(C3): class C3Ghost(C3): - # C3 module with GhostBottleneck() + """Implements a C3 module with Ghost Bottlenecks for efficient feature extraction in YOLOv5.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes YOLOv5's C3 module with Ghost Bottlenecks for efficient feature extraction.""" super().__init__(c1, c2, n, shortcut, g, e) @@ -281,7 +294,8 @@ class C3Ghost(C3): class SPP(nn.Module): - # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 + """Implements Spatial Pyramid Pooling (SPP) for feature extraction, ref: https://arxiv.org/abs/1406.4729.""" + def __init__(self, c1, c2, k=(5, 9, 13)): """Initializes SPP layer with Spatial Pyramid Pooling, ref: https://arxiv.org/abs/1406.4729, args: c1 (input channels), c2 (output channels), k (kernel sizes).""" super().__init__() @@ -301,7 +315,8 @@ class SPP(nn.Module): class SPPF(nn.Module): - # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + """Implements a fast Spatial Pyramid Pooling (SPPF) layer for efficient feature extraction in YOLOv5 models.""" + def __init__(self, c1, c2, k=5): """ Initializes YOLOv5 SPPF layer with given channels and kernel size for YOLOv5 model, combining convolution and @@ -326,7 +341,8 @@ class SPPF(nn.Module): class Focus(nn.Module): - # Focus wh information into c-space + """Focuses spatial information into channel space using slicing and convolution for efficient feature extraction.""" + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): """Initializes Focus module to concentrate width-height info into channel space with configurable convolution parameters. @@ -342,7 +358,8 @@ class Focus(nn.Module): class GhostConv(nn.Module): - # Ghost Convolution https://github.com/huawei-noah/ghostnet + """Implements Ghost Convolution for efficient feature extraction, see https://github.com/huawei-noah/ghostnet.""" + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): """Initializes GhostConv with in/out channels, kernel size, stride, groups, and activation; halves out channels for efficiency. @@ -359,7 +376,8 @@ class GhostConv(nn.Module): class GhostBottleneck(nn.Module): - # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + """Efficient bottleneck layer using Ghost Convolutions, see https://github.com/huawei-noah/ghostnet.""" + def __init__(self, c1, c2, k=3, s=1): """Initializes GhostBottleneck with ch_in `c1`, ch_out `c2`, kernel size `k`, stride `s`; see https://github.com/huawei-noah/ghostnet.""" super().__init__() @@ -379,7 +397,8 @@ class GhostBottleneck(nn.Module): class Contract(nn.Module): - # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + """Contracts spatial dimensions into channel dimensions for efficient processing in neural networks.""" + def __init__(self, gain=2): """Initializes a layer to contract spatial dimensions (width-height) into channels, e.g., input shape (1,64,80,80) to (1,256,40,40). @@ -399,7 +418,8 @@ class Contract(nn.Module): class Expand(nn.Module): - # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + """Expands spatial dimensions by redistributing channels, e.g., from (1,64,80,80) to (1,16,160,160).""" + def __init__(self, gain=2): """ Initializes the Expand module to increase spatial dimensions by redistributing channels, with an optional gain @@ -422,7 +442,8 @@ class Expand(nn.Module): class Concat(nn.Module): - # Concatenate a list of tensors along dimension + """Concatenates tensors along a specified dimension for efficient tensor manipulation in neural networks.""" + def __init__(self, dimension=1): """Initializes a Concat module to concatenate tensors along a specified dimension.""" super().__init__() @@ -436,7 +457,8 @@ class Concat(nn.Module): class DetectMultiBackend(nn.Module): - # YOLOv5 MultiBackend class for python inference on various backends + """YOLOv5 MultiBackend class for inference on various backends including PyTorch, ONNX, TensorRT, and more.""" + def __init__(self, weights="yolov5s.pt", device=torch.device("cpu"), dnn=False, data=None, fp16=False, fuse=True): """Initializes DetectMultiBackend with support for various inference backends, including PyTorch and ONNX.""" # PyTorch: weights = *.pt @@ -728,6 +750,8 @@ class DetectMultiBackend(nn.Module): scale, zero_point = output["quantization"] x = (x.astype(np.float32) - zero_point) * scale # re-scale y.append(x) + if len(y) == 2 and len(y[1].shape) != 4: + y = list(reversed(y)) y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels @@ -778,7 +802,8 @@ class DetectMultiBackend(nn.Module): class AutoShape(nn.Module): - # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + """AutoShape class for robust YOLOv5 inference with preprocessing, NMS, and support for various input formats.""" + conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold agnostic = False # NMS class-agnostic @@ -889,7 +914,8 @@ class AutoShape(nn.Module): class Detections: - # YOLOv5 detections class for inference results + """Manages YOLOv5 detection results with methods for visualization, saving, cropping, and exporting detections.""" + def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): """Initializes the YOLOv5 Detections class with image info, predictions, filenames, timing and normalization.""" super().__init__() @@ -1047,7 +1073,8 @@ class Detections: class Proto(nn.Module): - # YOLOv5 mask Proto module for segmentation models + """YOLOv5 mask Proto module for segmentation models, performing convolutions and upsampling on input tensors.""" + def __init__(self, c1, c_=256, c2=32): """Initializes YOLOv5 Proto module for segmentation with input, proto, and mask channels configuration.""" super().__init__() @@ -1062,7 +1089,8 @@ class Proto(nn.Module): class Classify(nn.Module): - # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) + """YOLOv5 classification head with convolution, pooling, and dropout layers for channel transformation.""" + def __init__( self, c1, c2, k=1, s=1, p=None, g=1, dropout_p=0.0 ): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability diff --git a/models/experimental.py b/models/experimental.py index ab9b0ed23..63d9c461a 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Experimental modules.""" import math diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml index c8089311b..0f3e288e1 100644 --- a/models/hub/anchors.yaml +++ b/models/hub/anchors.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Default anchors for COCO data # P5 ------------------------------------------------------------------------------------------------------------------- diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index 0e073667b..34c2d517c 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index 0a74fff71..f1861012e 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index ce4a980c8..15cb68a83 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index bf05e434c..fba3fe5f7 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index dcfdd14a7..4411d1cc0 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 2626e7348..e47d39e4e 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p34.yaml b/models/hub/yolov5-p34.yaml index fba35ec10..17e46f7bd 100644 --- a/models/hub/yolov5-p34.yaml +++ b/models/hub/yolov5-p34.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index c997df2db..dbc1ae4d0 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index 14e6ce05d..2c1706992 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index f0857f92d..68a717566 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index 05501a9d1..223f681bf 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index 1512e2b6e..6878d8996 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5n6.yaml b/models/hub/yolov5n6.yaml index 11350413e..0d454c9ca 100644 --- a/models/hub/yolov5n6.yaml +++ b/models/hub/yolov5n6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-LeakyReLU.yaml b/models/hub/yolov5s-LeakyReLU.yaml index 6e9d4a882..61d6d3317 100644 --- a/models/hub/yolov5s-LeakyReLU.yaml +++ b/models/hub/yolov5s-LeakyReLU.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml index cc4336948..53695ae48 100644 --- a/models/hub/yolov5s-ghost.yaml +++ b/models/hub/yolov5s-ghost.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index 1b2d62c5a..213e4dac1 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index 2a4c11625..6e69964a9 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index 0c8f29e60..33a8525f1 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5l-seg.yaml b/models/segment/yolov5l-seg.yaml index de430f4fb..824e8aec2 100644 --- a/models/segment/yolov5l-seg.yaml +++ b/models/segment/yolov5l-seg.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml index 288577778..c3c1e668a 100644 --- a/models/segment/yolov5m-seg.yaml +++ b/models/segment/yolov5m-seg.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5n-seg.yaml b/models/segment/yolov5n-seg.yaml index faf5228fd..2461e4160 100644 --- a/models/segment/yolov5n-seg.yaml +++ b/models/segment/yolov5n-seg.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml index a199f1d82..fac7664a3 100644 --- a/models/segment/yolov5s-seg.yaml +++ b/models/segment/yolov5s-seg.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5x-seg.yaml b/models/segment/yolov5x-seg.yaml index 75f426386..d3c457a6d 100644 --- a/models/segment/yolov5x-seg.yaml +++ b/models/segment/yolov5x-seg.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/tf.py b/models/tf.py index 9884ec3db..c2cad393e 100644 --- a/models/tf.py +++ b/models/tf.py @@ -1,7 +1,7 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ TensorFlow, Keras and TFLite versions of YOLOv5 -Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 +Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127. Usage: $ python models/tf.py --weights yolov5s.pt @@ -49,7 +49,8 @@ from utils.general import LOGGER, make_divisible, print_args class TFBN(keras.layers.Layer): - # TensorFlow BatchNormalization wrapper + """TensorFlow BatchNormalization wrapper for initializing with optional pretrained weights.""" + def __init__(self, w=None): """Initializes a TensorFlow BatchNormalization layer with optional pretrained weights.""" super().__init__() @@ -67,7 +68,8 @@ class TFBN(keras.layers.Layer): class TFPad(keras.layers.Layer): - # Pad inputs in spatial dimensions 1 and 2 + """Pads input tensors in spatial dimensions 1 and 2 with specified integer or tuple padding values.""" + def __init__(self, pad): """ Initializes a padding layer for spatial dimensions 1 and 2 with specified padding, supporting both int and tuple @@ -87,7 +89,8 @@ class TFPad(keras.layers.Layer): class TFConv(keras.layers.Layer): - # Standard convolution + """Implements a standard convolutional layer with optional batch normalization and activation for TensorFlow.""" + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): """ Initializes a standard convolution layer with optional batch normalization and activation; supports only @@ -118,7 +121,8 @@ class TFConv(keras.layers.Layer): class TFDWConv(keras.layers.Layer): - # Depthwise convolution + """Initializes a depthwise convolution layer with optional batch normalization and activation for TensorFlow.""" + def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None): """ Initializes a depthwise convolution layer with optional batch normalization and activation for TensorFlow @@ -147,7 +151,8 @@ class TFDWConv(keras.layers.Layer): class TFDWConvTranspose2d(keras.layers.Layer): - # Depthwise ConvTranspose2d + """Implements a depthwise ConvTranspose2D layer for TensorFlow with specific settings.""" + def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None): """ Initializes depthwise ConvTranspose2D layer with specific channel, kernel, stride, and padding settings. @@ -179,7 +184,8 @@ class TFDWConvTranspose2d(keras.layers.Layer): class TFFocus(keras.layers.Layer): - # Focus wh information into c-space + """Focuses spatial information into channel space using pixel shuffling and convolution for TensorFlow models.""" + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): """ Initializes TFFocus layer to focus width and height information into channel space with custom convolution @@ -201,7 +207,8 @@ class TFFocus(keras.layers.Layer): class TFBottleneck(keras.layers.Layer): - # Standard bottleneck + """Implements a TensorFlow bottleneck layer with optional shortcut connections for efficient feature extraction.""" + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): """ Initializes a standard bottleneck layer for TensorFlow models, expanding and contracting channels with optional @@ -223,7 +230,8 @@ class TFBottleneck(keras.layers.Layer): class TFCrossConv(keras.layers.Layer): - # Cross Convolution + """Implements a cross convolutional layer with optional expansion, grouping, and shortcut for TensorFlow.""" + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None): """Initializes cross convolution layer with optional expansion, grouping, and shortcut addition capabilities.""" super().__init__() @@ -238,7 +246,8 @@ class TFCrossConv(keras.layers.Layer): class TFConv2d(keras.layers.Layer): - # Substitution for PyTorch nn.Conv2D + """Implements a TensorFlow 2D convolution layer, mimicking PyTorch's nn.Conv2D for specified filters and stride.""" + def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): """Initializes a TensorFlow 2D convolution layer, mimicking PyTorch's nn.Conv2D functionality for given filter sizes and stride. @@ -261,7 +270,8 @@ class TFConv2d(keras.layers.Layer): class TFBottleneckCSP(keras.layers.Layer): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + """Implements a CSP bottleneck layer for TensorFlow models to enhance gradient flow and efficiency.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): """ Initializes CSP bottleneck layer with specified channel sizes, count, shortcut option, groups, and expansion @@ -289,7 +299,8 @@ class TFBottleneckCSP(keras.layers.Layer): class TFC3(keras.layers.Layer): - # CSP Bottleneck with 3 convolutions + """CSP bottleneck layer with 3 convolutions for TensorFlow, supporting optional shortcuts and group convolutions.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): """ Initializes CSP Bottleneck with 3 convolutions, supporting optional shortcuts and group convolutions. @@ -313,7 +324,8 @@ class TFC3(keras.layers.Layer): class TFC3x(keras.layers.Layer): - # 3 module with cross-convolutions + """A TensorFlow layer for enhanced feature extraction using cross-convolutions in object detection models.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): """ Initializes layer with cross-convolutions for enhanced feature extraction in object detection models. @@ -335,7 +347,8 @@ class TFC3x(keras.layers.Layer): class TFSPP(keras.layers.Layer): - # Spatial pyramid pooling layer used in YOLOv3-SPP + """Implements spatial pyramid pooling for YOLOv3-SPP with specific channels and kernel sizes.""" + def __init__(self, c1, c2, k=(5, 9, 13), w=None): """Initializes a YOLOv3-SPP layer with specific input/output channels and kernel sizes for pooling.""" super().__init__() @@ -351,7 +364,8 @@ class TFSPP(keras.layers.Layer): class TFSPPF(keras.layers.Layer): - # Spatial pyramid pooling-Fast layer + """Implements a fast spatial pyramid pooling layer for TensorFlow with optimized feature extraction.""" + def __init__(self, c1, c2, k=5, w=None): """Initializes a fast spatial pyramid pooling layer with customizable in/out channels, kernel size, and weights. @@ -373,7 +387,8 @@ class TFSPPF(keras.layers.Layer): class TFDetect(keras.layers.Layer): - # TF YOLOv5 Detect layer + """Implements YOLOv5 object detection layer in TensorFlow for predicting bounding boxes and class probabilities.""" + def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): """Initializes YOLOv5 detection layer for TensorFlow with configurable classes, anchors, channels, and image size. @@ -427,7 +442,8 @@ class TFDetect(keras.layers.Layer): class TFSegment(TFDetect): - # YOLOv5 Segment head for segmentation models + """YOLOv5 segmentation head for TensorFlow, combining detection and segmentation.""" + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None): """Initializes YOLOv5 Segment head with specified channel depths, anchors, and input size for segmentation models. @@ -450,6 +466,8 @@ class TFSegment(TFDetect): class TFProto(keras.layers.Layer): + """Implements convolutional and upsampling layers for feature extraction in YOLOv5 segmentation.""" + def __init__(self, c1, c_=256, c2=32, w=None): """Initializes TFProto layer with convolutional and upsampling layers for feature extraction and transformation. @@ -466,7 +484,8 @@ class TFProto(keras.layers.Layer): class TFUpsample(keras.layers.Layer): - # TF version of torch.nn.Upsample() + """Implements a TensorFlow upsampling layer with specified size, scale factor, and interpolation mode.""" + def __init__(self, size, scale_factor, mode, w=None): """ Initializes a TensorFlow upsampling layer with specified size, scale_factor, and mode, ensuring scale_factor is @@ -488,7 +507,8 @@ class TFUpsample(keras.layers.Layer): class TFConcat(keras.layers.Layer): - # TF version of torch.concat() + """Implements TensorFlow's version of torch.concat() for concatenating tensors along the last dimension.""" + def __init__(self, dimension=1, w=None): """Initializes a TensorFlow layer for NCHW to NHWC concatenation, requiring dimension=1.""" super().__init__() @@ -581,7 +601,8 @@ def parse_model(d, ch, model, imgsz): class TFModel: - # TF YOLOv5 model + """Implements YOLOv5 model in TensorFlow, supporting TensorFlow, Keras, and TFLite formats for object detection.""" + def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, model=None, imgsz=(640, 640)): """Initializes TF YOLOv5 model with specified configuration, channels, classes, model instance, and input size. @@ -653,7 +674,8 @@ class TFModel: class AgnosticNMS(keras.layers.Layer): - # TF Agnostic NMS + """Performs agnostic non-maximum suppression (NMS) on detected objects using IoU and confidence thresholds.""" + def call(self, input, topk_all, iou_thres, conf_thres): """Performs agnostic NMS on input tensors using given thresholds and top-K selection.""" return tf.map_fn( diff --git a/models/yolo.py b/models/yolo.py index d89c5da01..13498aced 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ YOLO-specific modules. @@ -70,7 +70,8 @@ except ImportError: class Detect(nn.Module): - # YOLOv5 Detect head for detection models + """YOLOv5 Detect head for processing input tensors and generating detection outputs in object detection models.""" + stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode @@ -127,7 +128,8 @@ class Detect(nn.Module): class Segment(Detect): - # YOLOv5 Segment head for segmentation models + """YOLOv5 Segment head for segmentation models, extending Detect with mask and prototype layers.""" + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): """Initializes YOLOv5 Segment head with options for mask count, protos, and channel adjustments.""" super().__init__(nc, anchors, ch, inplace) @@ -214,7 +216,8 @@ class BaseModel(nn.Module): class DetectionModel(BaseModel): - # YOLOv5 detection model + """YOLOv5 detection model class for object detection tasks, supporting custom configurations and anchors.""" + def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, anchors=None): """Initializes YOLOv5 model with configuration file, input channels, number of classes, and custom anchors.""" super().__init__() @@ -332,14 +335,16 @@ Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibilit class SegmentationModel(DetectionModel): - # YOLOv5 segmentation model + """YOLOv5 segmentation model for object detection and segmentation tasks with configurable parameters.""" + def __init__(self, cfg="yolov5s-seg.yaml", ch=3, nc=None, anchors=None): """Initializes a YOLOv5 segmentation model with configurable params: cfg (str) for configuration, ch (int) for channels, nc (int) for num classes, anchors (list).""" super().__init__(cfg, ch, nc, anchors) class ClassificationModel(BaseModel): - # YOLOv5 classification model + """YOLOv5 classification model for image classification tasks, initialized with a config file or detection model.""" + def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): """Initializes YOLOv5 model with config file `cfg`, input channels `ch`, number of classes `nc`, and `cuttoff` index. diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index 7cac7ead2..c6c878a10 100644 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml index 820e60704..41d9c223a 100644 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/yolov5n.yaml b/models/yolov5n.yaml index d3b84ace2..588674923 100644 --- a/models/yolov5n.yaml +++ b/models/yolov5n.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml index 090cb67c2..11ff79001 100644 --- a/models/yolov5s.yaml +++ b/models/yolov5s.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml index 8c1a6be1b..817b4f911 100644 --- a/models/yolov5x.yaml +++ b/models/yolov5x.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/pyproject.toml b/pyproject.toml index 2bcf65929..9680857af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Overview: # This pyproject.toml file manages the build, packaging, and distribution of the Ultralytics library. diff --git a/requirements.txt b/requirements.txt index e10fd0cfd..dcd23bf53 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,12 +9,12 @@ opencv-python>=4.1.1 pillow>=10.3.0 psutil # system resources PyYAML>=5.3.1 -requests>=2.32.0 +requests>=2.32.2 scipy>=1.4.1 thop>=0.1.1 # FLOPs computation torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.9.0 -tqdm>=4.64.0 +tqdm>=4.66.3 ultralytics>=8.2.34 # https://ultralytics.com # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 diff --git a/segment/predict.py b/segment/predict.py index 0bccaaaae..e0e4336c1 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. @@ -164,7 +164,7 @@ def run( p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt - s += "%gx%g " % im.shape[2:] # print string + s += "{:g}x{:g} ".format(*im.shape[2:]) # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): @@ -245,7 +245,7 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms") # Print results t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image diff --git a/segment/train.py b/segment/train.py index 379fed0b2..815c97ce1 100644 --- a/segment/train.py +++ b/segment/train.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Train a YOLOv5 segment model on a segment dataset Models and datasets download automatically from the latest YOLOv5 release. @@ -325,10 +325,10 @@ def train(hyp, opt, device, callbacks): compute_loss = ComputeLoss(model, overlap=overlap) # init loss class # callbacks.run('on_train_start') LOGGER.info( - f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Image sizes {imgsz} train, {imgsz} val\n" + f"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n" f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting training for {epochs} epochs...' + f"Starting training for {epochs} epochs..." ) for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ # callbacks.run('on_train_epoch_start') @@ -405,7 +405,7 @@ def train(hyp, opt, device, callbacks): # Log if RANK in {-1, 0}: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB) + mem = f"{torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0:.3g}G" # (GB) pbar.set_description( ("%11s" * 2 + "%11.4g" * 6) % (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1]) @@ -740,9 +740,9 @@ def main(opt, callbacks=Callbacks()): # Plot results plot_evolve(evolve_csv) LOGGER.info( - f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Hyperparameter evolution finished {opt.evolve} generations\n" f"Results saved to {colorstr('bold', save_dir)}\n" - f'Usage example: $ python train.py --hyp {evolve_yaml}' + f"Usage example: $ python train.py --hyp {evolve_yaml}" ) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 56ea50500..bb5c1f996 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -15,7 +15,7 @@ "
\n", " \"Run\n", " \"Open\n", - " \"Open\n", + " \"Open\n", "
\n", "\n", "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", @@ -222,7 +222,7 @@ "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", - "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", + "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/datasets/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", "\n", "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", @@ -523,7 +523,7 @@ "\n", "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" diff --git a/segment/val.py b/segment/val.py index ab8a66a90..edd6a08fa 100644 --- a/segment/val.py +++ b/segment/val.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Validate a trained YOLOv5 segment model on a segment dataset. @@ -121,7 +121,7 @@ def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, over detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: - correct (array[N, 10]), for 10 IoU levels + correct (array[N, 10]), for 10 IoU levels. """ if masks: if overlap: diff --git a/train.py b/train.py index b4395d7e8..1401ccb96 100644 --- a/train.py +++ b/train.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release. @@ -357,10 +357,10 @@ def train(hyp, opt, device, callbacks): compute_loss = ComputeLoss(model) # init loss class callbacks.run("on_train_start") LOGGER.info( - f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Image sizes {imgsz} train, {imgsz} val\n" + f"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n" f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting training for {epochs} epochs...' + f"Starting training for {epochs} epochs..." ) for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ callbacks.run("on_train_epoch_start") @@ -434,7 +434,7 @@ def train(hyp, opt, device, callbacks): # Log if RANK in {-1, 0}: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB) + mem = f"{torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0:.3g}G" # (GB) pbar.set_description( ("%11s" * 2 + "%11.4g" * 5) % (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1]) @@ -717,10 +717,10 @@ def main(opt, callbacks=Callbacks()): "perspective": (True, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 "flipud": (True, 0.0, 1.0), # image flip up-down (probability) "fliplr": (True, 0.0, 1.0), # image flip left-right (probability) - "mosaic": (True, 0.0, 1.0), # image mixup (probability) + "mosaic": (True, 0.0, 1.0), # image mosaic (probability) "mixup": (True, 0.0, 1.0), # image mixup (probability) - "copy_paste": (True, 0.0, 1.0), - } # segment copy-paste (probability) + "copy_paste": (True, 0.0, 1.0), # segment copy-paste (probability) + } # GA configs pop_size = 50 @@ -880,9 +880,9 @@ def main(opt, callbacks=Callbacks()): # Plot results plot_evolve(evolve_csv) LOGGER.info( - f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Hyperparameter evolution finished {opt.evolve} generations\n" f"Results saved to {colorstr('bold', save_dir)}\n" - f'Usage example: $ python train.py --hyp {evolve_yaml}' + f"Usage example: $ python train.py --hyp {evolve_yaml}" ) diff --git a/tutorial.ipynb b/tutorial.ipynb index ebc6c0b22..b383deb7e 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -28,7 +28,7 @@ "\n", " \"Run\n", " \"Open\n", - " \"Open\n", + " \"Open\n", "\n", "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", @@ -257,7 +257,7 @@ "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", - "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", + "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/datasets/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", "\n", "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", @@ -553,7 +553,7 @@ "\n", "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" diff --git a/utils/__init__.py b/utils/__init__.py index c7ece49fa..3c43c9b68 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """utils/initialization.""" import contextlib @@ -12,7 +12,8 @@ def emojis(str=""): class TryExcept(contextlib.ContextDecorator): - # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager + """A context manager and decorator for error handling that prints an optional message with emojis on exception.""" + def __init__(self, msg=""): """Initializes TryExcept with an optional message, used as a decorator or context manager for error handling.""" self.msg = msg diff --git a/utils/activations.py b/utils/activations.py index 47f0a9980..4652540db 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Activation functions.""" import torch @@ -7,6 +7,8 @@ import torch.nn.functional as F class SiLU(nn.Module): + """Applies the Sigmoid-weighted Linear Unit (SiLU) activation function, also known as Swish.""" + @staticmethod def forward(x): """ @@ -18,6 +20,8 @@ class SiLU(nn.Module): class Hardswish(nn.Module): + """Applies the Hardswish activation function, which is efficient for mobile and embedded devices.""" + @staticmethod def forward(x): """ @@ -38,7 +42,11 @@ class Mish(nn.Module): class MemoryEfficientMish(nn.Module): + """Efficiently applies the Mish activation function using custom autograd for reduced memory usage.""" + class F(torch.autograd.Function): + """Implements a custom autograd function for memory-efficient Mish activation.""" + @staticmethod def forward(ctx, x): """Applies the Mish activation function, a smooth ReLU alternative, to the input tensor `x`.""" diff --git a/utils/augmentations.py b/utils/augmentations.py index bdbe07712..79e7afc8d 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Image augmentation functions.""" import math @@ -18,7 +18,8 @@ IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation class Albumentations: - # YOLOv5 Albumentations class (optional, only used if package is installed) + """Provides optional data augmentation for YOLOv5 using Albumentations library if installed.""" + def __init__(self, size=640): """Initializes Albumentations class for optional data augmentation in YOLOv5 with specified input size.""" self.transform = None @@ -196,15 +197,7 @@ def random_perspective( else: # affine im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(im[:, :, ::-1]) # base - # ax[1].imshow(im2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: + if n := len(targets): use_segments = any(x.any() for x in segments) and len(segments) == n new = np.zeros((n, 4)) if use_segments: # warp segments @@ -378,7 +371,8 @@ def classify_transforms(size=224): class LetterBox: - # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + """Resizes and pads images to specified dimensions while maintaining aspect ratio for YOLOv5 preprocessing.""" + def __init__(self, size=(640, 640), auto=False, stride=32): """Initializes a LetterBox object for YOLOv5 image preprocessing with optional auto sizing and stride adjustment. @@ -405,7 +399,8 @@ class LetterBox: class CenterCrop: - # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + """Applies center crop to an image, resizing it to the specified size while maintaining aspect ratio.""" + def __init__(self, size=640): """Initializes CenterCrop for image preprocessing, accepting single int or tuple for size, defaults to 640.""" super().__init__() @@ -424,7 +419,8 @@ class CenterCrop: class ToTensor: - # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + """Converts BGR np.array image from HWC to RGB CHW format, normalizes to [0, 1], and supports FP16 if half=True.""" + def __init__(self, half=False): """Initializes ToTensor for YOLOv5 image preprocessing, with optional half precision (half=True for FP16).""" super().__init__() diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 00eee2eb7..1fc420759 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """AutoAnchor utils.""" import random diff --git a/utils/autobatch.py b/utils/autobatch.py index 08a0de841..9d5ea0a94 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Auto-batch utils.""" from copy import deepcopy diff --git a/utils/aws/__init__.py b/utils/aws/__init__.py index e69de29bb..77a19dcf0 100644 --- a/utils/aws/__init__.py +++ b/utils/aws/__init__.py @@ -0,0 +1 @@ +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license diff --git a/utils/aws/resume.py b/utils/aws/resume.py index ea432a161..5b80fd41f 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Resume all interrupted trainings in yolov5/ dir including DDP trainings # Usage: $ python utils/aws/resume.py diff --git a/utils/callbacks.py b/utils/callbacks.py index 21c587bd7..1a6092840 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Callback utils.""" import threading diff --git a/utils/dataloaders.py b/utils/dataloaders.py index bdeffec46..d86c9afda 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Dataloaders and dataset utils.""" import contextlib @@ -93,7 +93,7 @@ def exif_size(img): def exif_transpose(image): """ Transpose a PIL image accordingly if it has an EXIF Orientation tag. - Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose(). :param image: The image to transpose. :return: An image. @@ -131,6 +131,8 @@ def seed_worker(worker_id): # Inherit from DistributedSampler and override iterator # https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py class SmartDistributedSampler(distributed.DistributedSampler): + """A distributed sampler ensuring deterministic shuffling and balanced data distribution across GPUs.""" + def __iter__(self): """Yields indices for distributed data sampling, shuffled deterministically based on epoch and seed.""" g = torch.Generator() @@ -208,6 +210,7 @@ def create_dataloader( shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, + drop_last=quad, pin_memory=PIN_MEMORY, collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, worker_init_fn=seed_worker, @@ -259,7 +262,8 @@ class _RepeatSampler: class LoadScreenshots: - # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` + """Loads and processes screenshots for YOLOv5 detection from specified screen regions using mss.""" + def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): """ Initializes a screenshot dataloader for YOLOv5 with specified source region, image size, stride, auto, and @@ -316,7 +320,7 @@ class LoadScreenshots: class LoadImages: - """YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`""" + """YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`.""" def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): """Initializes YOLOv5 loader for images/videos, supporting glob patterns, directories, and lists of paths.""" @@ -352,8 +356,7 @@ class LoadImages: else: self.cap = None assert self.nf > 0, ( - f"No images or videos found in {p}. " - f"Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}" + f"No images or videos found in {p}. Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}" ) def __iter__(self): @@ -428,7 +431,8 @@ class LoadImages: class LoadStreams: - # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + """Loads and processes video streams for YOLOv5, supporting various sources including YouTube and IP cameras.""" + def __init__(self, sources="file.streams", img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): """Initializes a stream loader for processing video streams with YOLOv5, supporting various sources including YouTube. @@ -531,7 +535,8 @@ def img2label_paths(img_paths): class LoadImagesAndLabels(Dataset): - # YOLOv5 train_loader/val_loader, loads images and labels for training and validation + """Loads images and their corresponding labels for training and validation in YOLOv5.""" + cache_version = 0.6 # dataset labels *.cache version rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] @@ -683,16 +688,17 @@ class LoadImagesAndLabels(Dataset): b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == "disk" else self.load_image - results = ThreadPool(NUM_THREADS).imap(lambda i: (i, fcn(i)), self.indices) - pbar = tqdm(results, total=len(self.indices), bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) - for i, x in pbar: - if cache_images == "disk": - b += self.npy_files[i].stat().st_size - else: # 'ram' - self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) - b += self.ims[i].nbytes * WORLD_SIZE - pbar.desc = f"{prefix}Caching images ({b / gb:.1f}GB {cache_images})" - pbar.close() + with ThreadPool(NUM_THREADS) as pool: + results = pool.imap(lambda i: (i, fcn(i)), self.indices) + pbar = tqdm(results, total=len(self.indices), bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) + for i, x in pbar: + if cache_images == "disk": + b += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + b += self.ims[i].nbytes * WORLD_SIZE + pbar.desc = f"{prefix}Caching images ({b / gb:.1f}GB {cache_images})" + pbar.close() def check_cache_ram(self, safety_margin=0.1, prefix=""): """Checks if available RAM is sufficient for caching images, adjusting for a safety margin.""" @@ -707,8 +713,8 @@ class LoadImagesAndLabels(Dataset): cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question if not cache: LOGGER.info( - f'{prefix}{mem_required / gb:.1f}GB RAM required, ' - f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' + f"{prefix}{mem_required / gb:.1f}GB RAM required, " + f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " f"{'caching images ✅' if cache else 'not caching images ⚠️'}" ) return cache @@ -768,8 +774,7 @@ class LoadImagesAndLabels(Dataset): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp["mosaic"] - if mosaic: + if mosaic := self.mosaic and random.random() < hyp["mosaic"]: # Load mosaic img, labels = self.load_mosaic(index) shapes = None @@ -1103,7 +1108,6 @@ def extract_boxes(path=DATASETS_DIR / "coco128"): def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0.0), annotated_only=False): """Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files - Usage: from utils.dataloaders import *; autosplit() Arguments: path: Path to images directory @@ -1156,8 +1160,7 @@ def verify_image_label(args): segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) lb = np.array(lb, dtype=np.float32) - nl = len(lb) - if nl: + if nl := len(lb): assert lb.shape[1] == 5, f"labels require 5 columns, {lb.shape[1]} columns detected" assert (lb >= 0).all(), f"negative label values {lb[lb < 0]}" assert (lb[:, 1:] <= 1).all(), f"non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}" diff --git a/utils/downloads.py b/utils/downloads.py index c7e2273c7..f51d67aa9 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Download utils.""" import logging diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md index b18a3011c..d3ffaa206 100644 --- a/utils/flask_rest_api/README.md +++ b/utils/flask_rest_api/README.md @@ -4,7 +4,7 @@ ## Requirements -[Flask](https://palletsprojects.com/p/flask/) is required. Install with: +[Flask](https://palletsprojects.com/projects/flask/) is required. Install with: ```shell $ pip install Flask diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index 104249002..db88e8040 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Perform test request.""" import pprint diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 7e03d3a66..410ae26c5 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Run a Flask REST API exposing one or more YOLOv5s models.""" import argparse diff --git a/utils/general.py b/utils/general.py index 57db68a7a..89bbc61f4 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """General utils.""" import contextlib @@ -173,8 +173,7 @@ def user_config_dir(dir="Ultralytics", env_var="YOLOV5_CONFIG_DIR"): """Returns user configuration directory path, preferring environment variable `YOLOV5_CONFIG_DIR` if set, else OS- specific. """ - env = os.getenv(env_var) - if env: + if env := os.getenv(env_var): path = Path(env) # use environment variable else: cfg = {"Windows": "AppData/Roaming", "Linux": ".config", "Darwin": "Library/Application Support"} # 3 OS dirs @@ -188,7 +187,8 @@ CONFIG_DIR = user_config_dir() # Ultralytics settings dir class Profile(contextlib.ContextDecorator): - # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager + """Context manager and decorator for profiling code execution time, with optional CUDA synchronization.""" + def __init__(self, t=0.0, device: torch.device = None): """Initializes a profiling context for YOLOv5 with optional timing threshold and device specification.""" self.t = t @@ -213,7 +213,8 @@ class Profile(contextlib.ContextDecorator): class Timeout(contextlib.ContextDecorator): - # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager + """Enforces a timeout on code execution, raising TimeoutError if the specified duration is exceeded.""" + def __init__(self, seconds, *, timeout_msg="", suppress_timeout_errors=True): """Initializes a timeout context/decorator with defined seconds, optional message, and error suppression.""" self.seconds = int(seconds) @@ -239,7 +240,8 @@ class Timeout(contextlib.ContextDecorator): class WorkingDirectory(contextlib.ContextDecorator): - # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager + """Context manager/decorator to temporarily change the working directory within a 'with' statement or decorator.""" + def __init__(self, new_dir): """Initializes a context manager/decorator to temporarily change the working directory.""" self.dir = new_dir # new dir @@ -493,9 +495,9 @@ def check_file(file, suffix=""): assert Path(file).exists() and Path(file).stat().st_size > 0, f"File download failed: {url}" # check return file elif file.startswith("clearml://"): # ClearML Dataset ID - assert ( - "clearml" in sys.modules - ), "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + assert "clearml" in sys.modules, ( + "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + ) return file else: # search files = [] diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml index 4c1751f55..6fb9d5f9d 100644 --- a/utils/google_app_engine/app.yaml +++ b/utils/google_app_engine/app.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license runtime: custom env: flex diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 7051e8da0..be9ccfbb1 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Logging utils.""" import json @@ -76,7 +76,8 @@ def _json_default(value): class Loggers: - # YOLOv5 Loggers class + """Initializes and manages various logging utilities for tracking YOLOv5 training and validation metrics.""" + def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): """Initializes loggers for YOLOv5 training and validation metrics, paths, and options.""" self.save_dir = save_dir @@ -349,7 +350,7 @@ class Loggers: class GenericLogger: """ YOLOv5 General purpose logger for non-task specific logging - Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) + Arguments: opt: Run arguments diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index a61e30254..374765dfb 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -4,7 +4,7 @@ ## About ClearML -[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️. +[ClearML](https://clear.ml/) is an [open-source](https://github.com/clearml/clearml) toolbox designed to save you time ⏱️. 🔨 Track every YOLOv5 training run in the experiment manager @@ -18,13 +18,13 @@ And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline! -![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) +![ClearML scalars dashboard](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/experiment_manager_with_compare.gif) ## 🦾 Setting Things Up To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one: -Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go! +Either sign up for free to the [ClearML Hosted Service](https://clear.ml/) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go! 1. Install the `clearml` python package: @@ -85,7 +85,7 @@ There even more we can do with all of this information, like hyperparameter opti Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! -![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) +![ClearML Dataset Interface](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/clearml_data.gif) ### Prepare Your Dataset @@ -163,13 +163,13 @@ pip install optuna python utils/loggers/clearml/hpo.py ``` -![HPO](https://github.com/thepycoder/clearml_screenshots/raw/main/hpo.png) +![HPO](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/hpo.png) ## 🤯 Remote Execution (advanced) Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs. This is where the ClearML Agent comes into play. Check out what the agent can do here: -- [YouTube video](https://youtu.be/MX3BrXnaULs) +- [YouTube video](https://www.youtube.com/watch?v=MX3BrXnaULs&feature=youtu.be) - [Documentation](https://clear.ml/docs/latest/docs/clearml_agent) In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. @@ -190,7 +190,7 @@ With our agent running, we can give it some work. Remember from the HPO section ⏳ Enqueue the task to any of the queues by right-clicking it -![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) +![Enqueue a task from the UI](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/enqueue.gif) ### Executing A Task Remotely diff --git a/utils/loggers/clearml/__init__.py b/utils/loggers/clearml/__init__.py index e69de29bb..77a19dcf0 100644 --- a/utils/loggers/clearml/__init__.py +++ b/utils/loggers/clearml/__init__.py @@ -0,0 +1 @@ +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index de4129e08..67553bdb4 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Main Logger class for ClearML experiment tracking.""" import glob @@ -41,11 +41,9 @@ def construct_dataset(clearml_info_string): with open(yaml_filenames[0]) as f: dataset_definition = yaml.safe_load(f) - assert set( - dataset_definition.keys() - ).issuperset( - {"train", "test", "val", "nc", "names"} - ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" + assert set(dataset_definition.keys()).issuperset({"train", "test", "val", "nc", "names"}), ( + "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" + ) data_dict = { "train": ( @@ -78,7 +76,7 @@ class ClearmlLogger: def __init__(self, opt, hyp): """ - Initialize ClearML Task, this object will capture the experiment - - Upload dataset version to ClearML Data if opt.upload_dataset is True + - Upload dataset version to ClearML Data if opt.upload_dataset is True. Arguments: opt (namespace) -- Commandline arguments for this run diff --git a/utils/loggers/clearml/hpo.py b/utils/loggers/clearml/hpo.py index 5a9be757a..099a87fca 100644 --- a/utils/loggers/clearml/hpo.py +++ b/utils/loggers/clearml/hpo.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license from clearml import Task diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 846dcb42a..1ad44b9a3 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import glob import json @@ -88,14 +88,12 @@ class CometLogger: self.upload_dataset = self.opt.upload_dataset or COMET_UPLOAD_DATASET self.resume = self.opt.resume - # Default parameters to pass to Experiment objects self.default_experiment_kwargs = { "log_code": False, "log_env_gpu": True, "log_env_cpu": True, "project_name": COMET_PROJECT_NAME, - } - self.default_experiment_kwargs.update(experiment_kwargs) + } | experiment_kwargs self.experiment = self._get_experiment(self.comet_mode, run_id) self.experiment.set_name(self.opt.name) diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py index cf936ab48..1dc572cca 100644 --- a/utils/loggers/comet/comet_utils.py +++ b/utils/loggers/comet/comet_utils.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import logging import os diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index c225ebbd0..dc171e261 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import argparse import json diff --git a/utils/loggers/wandb/__init__.py b/utils/loggers/wandb/__init__.py index e69de29bb..77a19dcf0 100644 --- a/utils/loggers/wandb/__init__.py +++ b/utils/loggers/wandb/__init__.py @@ -0,0 +1 @@ +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 6a32c8cc7..83c5ee130 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # WARNING ⚠️ wandb is deprecated and will be removed in future release. # See supported integrations at https://github.com/ultralytics/yolov5#integrations @@ -18,7 +18,7 @@ if str(ROOT) not in sys.path: RANK = int(os.getenv("RANK", -1)) DEPRECATION_WARNING = ( f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " - f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.' + f"See supported integrations at https://github.com/ultralytics/yolov5#integrations." ) try: @@ -47,7 +47,7 @@ class WandbLogger: """ - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True - - Setup training processes if job_type is 'Training' + - Setup training processes if job_type is 'Training'. Arguments: opt (namespace) -- Commandline arguments for this run @@ -88,7 +88,7 @@ class WandbLogger: Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - - Setup log_dict, initialize bbox_interval + - Setup log_dict, initialize bbox_interval. Arguments: opt (namespace) -- commandline arguments for this run diff --git a/utils/loss.py b/utils/loss.py index e8f148e77..35ce6d792 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Loss functions.""" import torch @@ -9,12 +9,13 @@ from utils.torch_utils import de_parallel def smooth_BCE(eps=0.1): - """Returns label smoothing BCE targets for reducing overfitting; pos: `1.0 - 0.5*eps`, neg: `0.5*eps`. For details see https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441""" + """Returns label smoothing BCE targets for reducing overfitting; pos: `1.0 - 0.5*eps`, neg: `0.5*eps`. For details see https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441.""" return 1.0 - 0.5 * eps, 0.5 * eps class BCEBlurWithLogitsLoss(nn.Module): - # BCEwithLogitLoss() with reduced missing label effects. + """Modified BCEWithLogitsLoss to reduce missing label effects in YOLOv5 training with optional alpha smoothing.""" + def __init__(self, alpha=0.05): """Initializes a modified BCEWithLogitsLoss with reduced missing label effects, taking optional alpha smoothing parameter. @@ -37,7 +38,8 @@ class BCEBlurWithLogitsLoss(nn.Module): class FocalLoss(nn.Module): - # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + """Applies focal loss to address class imbalance by modifying BCEWithLogitsLoss with gamma and alpha parameters.""" + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): """Initializes FocalLoss with specified loss function, gamma, and alpha values; modifies loss reduction to 'none'. @@ -71,7 +73,8 @@ class FocalLoss(nn.Module): class QFocalLoss(nn.Module): - # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + """Implements Quality Focal Loss to address class imbalance by modulating loss based on prediction confidence.""" + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): """Initializes Quality Focal Loss with given loss function, gamma, alpha; modifies reduction to 'none'.""" super().__init__() @@ -101,6 +104,8 @@ class QFocalLoss(nn.Module): class ComputeLoss: + """Computes the total loss for YOLOv5 model predictions, including classification, box, and objectness losses.""" + sort_obj_iou = False # Compute losses @@ -143,8 +148,7 @@ class ComputeLoss: b, a, gj, gi = indices[i] # image, anchor, gridy, gridx tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj - n = b.shape[0] # number of targets - if n: + if n := b.shape[0]: # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions @@ -170,10 +174,6 @@ class ComputeLoss: t[range(n), tcls[i]] = self.cp lcls += self.BCEcls(pcls, t) # BCE - # Append targets to text file - # with open('targets.txt', 'a') as file: - # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] - obji = self.BCEobj(pi[..., 4], tobj) lobj += obji * self.balance[i] # obj loss if self.autobalance: diff --git a/utils/metrics.py b/utils/metrics.py index 9acc38591..03013f4e3 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Model validation metrics.""" import math @@ -100,7 +100,7 @@ def compute_ap(recall, precision): recall: The recall curve (list) precision: The precision curve (list) # Returns - Average precision, precision curve, recall curve + Average precision, precision curve, recall curve. """ # Append sentinel values to beginning and end mrec = np.concatenate(([0.0], recall, [1.0])) @@ -122,7 +122,8 @@ def compute_ap(recall, precision): class ConfusionMatrix: - # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + """Generates and visualizes a confusion matrix for evaluating object detection classification performance.""" + def __init__(self, nc, conf=0.25, iou_thres=0.45): """Initializes ConfusionMatrix with given number of classes, confidence, and IoU threshold.""" self.matrix = np.zeros((nc + 1, nc + 1)) @@ -346,7 +347,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=()): else: ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision) - ax.plot(px, py.mean(1), linewidth=3, color="blue", label="all classes %.3f mAP@0.5" % ap[:, 0].mean()) + ax.plot(px, py.mean(1), linewidth=3, color="blue", label=f"all classes {ap[:, 0].mean():.3f} mAP@0.5") ax.set_xlabel("Recall") ax.set_ylabel("Precision") ax.set_xlim(0, 1) diff --git a/utils/plots.py b/utils/plots.py index 9bec34a15..f70775f26 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Plotting utils.""" import contextlib @@ -29,7 +29,8 @@ matplotlib.use("Agg") # for writing to files only class Colors: - # Ultralytics color palette https://ultralytics.com/ + """Provides an RGB color palette derived from Ultralytics color scheme for visualization tasks.""" + def __init__(self): """ Initializes the Colors class with a palette derived from Ultralytics color scheme, converting hex codes to RGB. @@ -81,7 +82,7 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path("runs/detec module_type: Module type stage: Module stage within model n: Maximum number of feature maps to plot - save_dir: Directory to save results + save_dir: Directory to save results. """ if ("Detect" not in module_type) and ( "Segment" not in module_type @@ -182,7 +183,7 @@ def plot_images(images, targets, paths=None, fname="images.jpg", names=None): # Annotate fs = int((h + w) * ns * 0.01) # font size annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) - for i in range(i + 1): + for i in range(bs): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: diff --git a/utils/segment/__init__.py b/utils/segment/__init__.py index e69de29bb..77a19dcf0 100644 --- a/utils/segment/__init__.py +++ b/utils/segment/__init__.py @@ -0,0 +1 @@ +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index 2e1dca119..14a81cf7d 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Image augmentation functions.""" import math @@ -69,16 +69,8 @@ def random_perspective( else: # affine im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(im[:, :, ::-1]) # base - # ax[1].imshow(im2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) new_segments = [] - if n: + if n := len(targets): new = np.zeros((n, 4)) segments = resample_segments(segments) # upsample for i, segment in enumerate(segments): diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index c2be5f0df..2363d7265 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Dataloaders.""" import os @@ -75,6 +75,7 @@ def create_dataloader( shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, + drop_last=quad, pin_memory=True, collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, worker_init_fn=seed_worker, @@ -83,6 +84,8 @@ def create_dataloader( class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing + """Loads images, labels, and segmentation masks for training and testing YOLO models with augmentation support.""" + def __init__( self, path, @@ -129,9 +132,7 @@ class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp["mosaic"] - masks = [] - if mosaic: + if mosaic := self.mosaic and random.random() < hyp["mosaic"]: # Load mosaic img, labels, segments = self.load_mosaic(index) shapes = None @@ -177,6 +178,7 @@ class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing ) nl = len(labels) # number of labels + masks = [] if nl: labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) if self.overlap: diff --git a/utils/segment/general.py b/utils/segment/general.py index 0793470a9..6a71c2551 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import cv2 import numpy as np @@ -28,7 +28,7 @@ def process_mask_upsample(protos, masks_in, bboxes, shape): protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape: input_image_size, (h, w) + shape: input_image_size, (h, w). return: h, w, n """ @@ -45,7 +45,7 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): proto_out: [mask_dim, mask_h, mask_w] out_masks: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) + shape:input_image_size, (h, w). return: h, w, n """ @@ -71,7 +71,7 @@ def process_mask_native(protos, masks_in, bboxes, shape): protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape: input_image_size, (h, w) + shape: input_image_size, (h, w). return: h, w, n """ @@ -92,7 +92,7 @@ def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): """ img1_shape: model input shape, [h, w] img0_shape: origin pic shape, [h, w, 3] - masks: [h, w, num] + masks: [h, w, num]. """ # Rescale coordinates (xyxy) from im1_shape to im0_shape if ratio_pad is None: # calculate from im0_shape @@ -120,7 +120,7 @@ def mask_iou(mask1, mask2, eps=1e-7): """ mask1: [N, n] m1 means number of predicted objects mask2: [M, n] m2 means number of gt objects - Note: n means image_w x image_h + Note: n means image_w x image_h. return: masks iou, [N, M] """ @@ -133,7 +133,7 @@ def masks_iou(mask1, mask2, eps=1e-7): """ mask1: [N, n] m1 means number of predicted objects mask2: [N, n] m2 means number of gt objects - Note: n means image_w x image_h + Note: n means image_w x image_h. return: masks iou, (N, ) """ diff --git a/utils/segment/loss.py b/utils/segment/loss.py index d4bc9d3ae..6ef590620 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import torch import torch.nn as nn @@ -12,7 +12,8 @@ from .general import crop_mask class ComputeLoss: - # Compute losses + """Computes the YOLOv5 model's loss components including classification, objectness, box, and mask losses.""" + def __init__(self, model, autobalance=False, overlap=False): """Initializes the compute loss function for YOLOv5 models with options for autobalancing and overlap handling. @@ -60,8 +61,7 @@ class ComputeLoss: b, a, gj, gi = indices[i] # image, anchor, gridy, gridx tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj - n = b.shape[0] # number of targets - if n: + if n := b.shape[0]: pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions # Box regression diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index 6f57dec13..3bb7aeec4 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Model validation metrics.""" import numpy as np @@ -54,6 +54,8 @@ def ap_per_class_box_and_mask( class Metric: + """Computes performance metrics like precision, recall, F1 score, and average precision for model evaluation.""" + def __init__(self) -> None: """Initializes performance metric attributes for precision, recall, F1 score, average precision, and class indices. @@ -127,7 +129,7 @@ class Metric: return (self.mp, self.mr, self.map50, self.map) def class_result(self, i): - """Class-aware result, return p[i], r[i], ap50[i], ap[i]""" + """Class-aware result, return p[i], r[i], ap50[i], ap[i].""" return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) def get_maps(self, nc): @@ -140,7 +142,7 @@ class Metric: def update(self, results): """ Args: - results: tuple(p, r, ap, f1, ap_class) + results: tuple(p, r, ap, f1, ap_class). """ p, r, all_ap, f1, ap_class_index = results self.p = p @@ -163,7 +165,7 @@ class Metrics: def update(self, results): """ Args: - results: Dict{'boxes': Dict{}, 'masks': Dict{}} + results: Dict{'boxes': Dict{}, 'masks': Dict{}}. """ self.metric_box.update(list(results["boxes"].values())) self.metric_mask.update(list(results["masks"].values())) diff --git a/utils/segment/plots.py b/utils/segment/plots.py index f5b81711c..5619e9de9 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import contextlib import math diff --git a/utils/torch_utils.py b/utils/torch_utils.py index d15f1f73f..8b3c43b5b 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """PyTorch utils.""" import math @@ -121,9 +121,9 @@ def select_device(device="", batch_size=0, newline=True): os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False elif device: # non-cpu device requested os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable - must be before assert is_available() - assert torch.cuda.is_available() and torch.cuda.device_count() >= len( - device.replace(",", "") - ), f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(",", "")), ( + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + ) if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7 @@ -161,7 +161,7 @@ def profile(input, ops, n=10, device=None): input = torch.randn(16, 3, 640, 640) m1 = lambda x: x * torch.sigmoid(x) m2 = nn.SiLU() - profile(input, [m1, m2], n=100) # profile over 100 iterations + profile(input, [m1, m2], n=100) # profile over 100 iterations. """ results = [] if not isinstance(device, torch.device): @@ -380,7 +380,7 @@ def smart_optimizer(model, name="Adam", lr=0.001, momentum=0.9, decay=1e-5): optimizer.add_param_group({"params": g[1], "weight_decay": 0.0}) # add g1 (BatchNorm2d weights) LOGGER.info( f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " - f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias' + f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias" ) return optimizer @@ -420,7 +420,8 @@ def smart_resume(ckpt, optimizer, ema=None, weights="yolov5s.pt", epochs=300, re class EarlyStopping: - # YOLOv5 simple early stopper + """Implements early stopping to halt training when no improvement is observed for a specified number of epochs.""" + def __init__(self, patience=30): """Initializes simple early stopping mechanism for YOLOv5, with adjustable patience for non-improving epochs.""" self.best_fitness = 0.0 # i.e. mAP @@ -449,7 +450,7 @@ class EarlyStopping: class ModelEMA: """Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models Keeps a moving average of everything in the model state_dict (parameters and buffers) - For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage. """ def __init__(self, model, decay=0.9999, tau=2000, updates=0): diff --git a/utils/triton.py b/utils/triton.py index 2fee42815..07531ea7e 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Utils to interact with the Triton Inference Server.""" import typing @@ -18,7 +18,6 @@ class TritonRemoteModel: def __init__(self, url: str): """ Keyword Arguments: - url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 """ parsed_url = urlparse(url) if parsed_url.scheme == "grpc": diff --git a/val.py b/val.py index b8db6122f..600353b1c 100644 --- a/val.py +++ b/val.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Validate a trained YOLOv5 detection model on a detection dataset.