diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml
index e2c3dc379..2cd4b028c 100644
--- a/.github/workflows/merge-main-into-prs.yml
+++ b/.github/workflows/merge-main-into-prs.yml
@@ -10,47 +10,47 @@ on:
branches:
- main
- master
-
+
jobs:
Merge:
if: github.repository == 'ultralytics/yolov5'
runs-on: ubuntu-latest
steps:
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
- - uses: actions/setup-python@v5
- with:
- python-version: "3.11"
- cache: "pip" # caching pip dependencies
- - name: Install requirements
- run: |
- pip install pygithub
- - name: Merge main into PRs
- shell: python
- run: |
- from github import Github
- import os
-
- # Authenticate with the GitHub Token
- g = Github(os.getenv('GITHUB_TOKEN'))
-
- # Get the repository dynamically
- repo = g.get_repo(os.getenv('GITHUB_REPOSITORY'))
-
- # List all open pull requests
- open_pulls = repo.get_pulls(state='open', sort='created')
-
- for pr in open_pulls:
- # Compare PR head with main to see if it's behind
- try:
- # Merge main into the PR branch
- success = pr.update_branch()
- assert success, "Branch update failed"
- print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.")
- except Exception as e:
- print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}")
- env:
- GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
- GITHUB_REPOSITORY: ${{ github.repository }}
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+ cache: "pip" # caching pip dependencies
+ - name: Install requirements
+ run: |
+ pip install pygithub
+ - name: Merge main into PRs
+ shell: python
+ run: |
+ from github import Github
+ import os
+
+ # Authenticate with the GitHub Token
+ g = Github(os.getenv('GITHUB_TOKEN'))
+
+ # Get the repository dynamically
+ repo = g.get_repo(os.getenv('GITHUB_REPOSITORY'))
+
+ # List all open pull requests
+ open_pulls = repo.get_pulls(state='open', sort='created')
+
+ for pr in open_pulls:
+ # Compare PR head with main to see if it's behind
+ try:
+ # Merge main into the PR branch
+ success = pr.update_branch()
+ assert success, "Branch update failed"
+ print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.")
+ except Exception as e:
+ print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}")
+ env:
+ GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
+ GITHUB_REPOSITORY: ${{ github.repository }}
diff --git a/classify/train.py b/classify/train.py
index 79045e9fb..6f9eda40b 100644
--- a/classify/train.py
+++ b/classify/train.py
@@ -178,7 +178,9 @@ def train(opt, device):
# Scheduler
lrf = 0.01 # final lr (fraction of lr0)
# lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
- lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear
+ def lf(x):
+ return (1 - x / epochs) * (1 - lrf) + lrf # linear
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
# final_div_factor=1 / 25 / lrf)
diff --git a/models/yolo.py b/models/yolo.py
index 5390db6a5..ebd0e8302 100644
--- a/models/yolo.py
+++ b/models/yolo.py
@@ -244,7 +244,10 @@ class DetectionModel(BaseModel):
if isinstance(m, (Detect, Segment)):
s = 256 # 2x min stride
m.inplace = self.inplace
- forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
+
+ def forward(x):
+ return self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
+
m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
check_anchor_order(m)
m.anchors /= m.stride.view(-1, 1, 1)
diff --git a/segment/train.py b/segment/train.py
index 2b1dca115..ffd1746ad 100644
--- a/segment/train.py
+++ b/segment/train.py
@@ -214,7 +214,10 @@ def train(hyp, opt, device, callbacks):
if opt.cos_lr:
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
else:
- lf = lambda x: (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
+
+ def lf(x):
+ return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
diff --git a/train.py b/train.py
index a870262a9..44cbd1ac0 100644
--- a/train.py
+++ b/train.py
@@ -224,7 +224,10 @@ def train(hyp, opt, device, callbacks):
if opt.cos_lr:
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
else:
- lf = lambda x: (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
+
+ def lf(x):
+ return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py
index a7575a049..1c2f4ccfb 100644
--- a/utils/loggers/__init__.py
+++ b/utils/loggers/__init__.py
@@ -21,7 +21,10 @@ RANK = int(os.getenv("RANK", -1))
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
- SummaryWriter = lambda *args: None # None = SummaryWriter(str)
+
+ def SummaryWriter(*args):
+ return None # None = SummaryWriter(str)
+
try:
import wandb
diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md
index bc40919ab..2810c92a6 100644
--- a/utils/loggers/clearml/README.md
+++ b/utils/loggers/clearml/README.md
@@ -16,16 +16,10 @@
🔠Turn your newly trained YOLOv5 model into an API with just a few commands using ClearML Serving
-
And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline!
-
-

-
-
-
## 🦾 Setting Things Up
To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one:
@@ -46,8 +40,6 @@ Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-t
That's it! You're done 😎
-
-
## 🚀 Training YOLOv5 With ClearML
To enable ClearML experiment tracking, simply install the ClearML pip package.
@@ -89,8 +81,6 @@ That's a lot right? 🤯 Now, we can visualize all of this information in the Cl
There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works!
-
-
## 🔗 Dataset Version Management
Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment!
@@ -157,8 +147,6 @@ Now that you have a ClearML dataset, you can very simply use it to train custom
python train.py --img 640 --batch 16 --epochs 3 --data clearml:// --weights yolov5s.pt --cache
```
-
-
## 👀 Hyperparameter Optimization
Now that we have our experiments and data versioned, it's time to take a look at what we can build on top!