Merge branch 'master' into master
commit
7a1c3f4caa
|
@ -178,7 +178,9 @@ def train(opt, device):
|
||||||
# Scheduler
|
# Scheduler
|
||||||
lrf = 0.01 # final lr (fraction of lr0)
|
lrf = 0.01 # final lr (fraction of lr0)
|
||||||
# lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
|
# lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
|
||||||
lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear
|
def lf(x):
|
||||||
|
return (1 - x / epochs) * (1 - lrf) + lrf # linear
|
||||||
|
|
||||||
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
||||||
# scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
|
# scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
|
||||||
# final_div_factor=1 / 25 / lrf)
|
# final_div_factor=1 / 25 / lrf)
|
||||||
|
|
|
@ -244,7 +244,10 @@ class DetectionModel(BaseModel):
|
||||||
if isinstance(m, (Detect, Segment)):
|
if isinstance(m, (Detect, Segment)):
|
||||||
s = 256 # 2x min stride
|
s = 256 # 2x min stride
|
||||||
m.inplace = self.inplace
|
m.inplace = self.inplace
|
||||||
forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
|
|
||||||
|
def forward(x):
|
||||||
|
return self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
|
||||||
|
|
||||||
m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
|
m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
|
||||||
check_anchor_order(m)
|
check_anchor_order(m)
|
||||||
m.anchors /= m.stride.view(-1, 1, 1)
|
m.anchors /= m.stride.view(-1, 1, 1)
|
||||||
|
|
|
@ -214,7 +214,10 @@ def train(hyp, opt, device, callbacks):
|
||||||
if opt.cos_lr:
|
if opt.cos_lr:
|
||||||
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
|
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
|
||||||
else:
|
else:
|
||||||
lf = lambda x: (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
|
|
||||||
|
def lf(x):
|
||||||
|
return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
|
||||||
|
|
||||||
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
|
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
|
||||||
|
|
||||||
# EMA
|
# EMA
|
||||||
|
|
5
train.py
5
train.py
|
@ -224,7 +224,10 @@ def train(hyp, opt, device, callbacks):
|
||||||
if opt.cos_lr:
|
if opt.cos_lr:
|
||||||
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
|
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
|
||||||
else:
|
else:
|
||||||
lf = lambda x: (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
|
|
||||||
|
def lf(x):
|
||||||
|
return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
|
||||||
|
|
||||||
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
|
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
|
||||||
|
|
||||||
# EMA
|
# EMA
|
||||||
|
|
|
@ -21,7 +21,10 @@ RANK = int(os.getenv("RANK", -1))
|
||||||
try:
|
try:
|
||||||
from torch.utils.tensorboard import SummaryWriter
|
from torch.utils.tensorboard import SummaryWriter
|
||||||
except ImportError:
|
except ImportError:
|
||||||
SummaryWriter = lambda *args: None # None = SummaryWriter(str)
|
|
||||||
|
def SummaryWriter(*args):
|
||||||
|
return None # None = SummaryWriter(str)
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import wandb
|
import wandb
|
||||||
|
|
|
@ -16,16 +16,10 @@
|
||||||
|
|
||||||
🔭 Turn your newly trained <b>YOLOv5 model into an API</b> with just a few commands using ClearML Serving
|
🔭 Turn your newly trained <b>YOLOv5 model into an API</b> with just a few commands using ClearML Serving
|
||||||
|
|
||||||
<br />
|
|
||||||
And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline!
|
And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline!
|
||||||
<br />
|
|
||||||
<br />
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<br />
|
|
||||||
<br />
|
|
||||||
|
|
||||||
## 🦾 Setting Things Up
|
## 🦾 Setting Things Up
|
||||||
|
|
||||||
To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one:
|
To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one:
|
||||||
|
@ -46,8 +40,6 @@ Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-t
|
||||||
|
|
||||||
That's it! You're done 😎
|
That's it! You're done 😎
|
||||||
|
|
||||||
<br />
|
|
||||||
|
|
||||||
## 🚀 Training YOLOv5 With ClearML
|
## 🚀 Training YOLOv5 With ClearML
|
||||||
|
|
||||||
To enable ClearML experiment tracking, simply install the ClearML pip package.
|
To enable ClearML experiment tracking, simply install the ClearML pip package.
|
||||||
|
@ -89,8 +81,6 @@ That's a lot right? 🤯 Now, we can visualize all of this information in the Cl
|
||||||
|
|
||||||
There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works!
|
There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works!
|
||||||
|
|
||||||
<br />
|
|
||||||
|
|
||||||
## 🔗 Dataset Version Management
|
## 🔗 Dataset Version Management
|
||||||
|
|
||||||
Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment!
|
Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment!
|
||||||
|
@ -157,8 +147,6 @@ Now that you have a ClearML dataset, you can very simply use it to train custom
|
||||||
python train.py --img 640 --batch 16 --epochs 3 --data clearml://<your_dataset_id> --weights yolov5s.pt --cache
|
python train.py --img 640 --batch 16 --epochs 3 --data clearml://<your_dataset_id> --weights yolov5s.pt --cache
|
||||||
```
|
```
|
||||||
|
|
||||||
<br />
|
|
||||||
|
|
||||||
## 👀 Hyperparameter Optimization
|
## 👀 Hyperparameter Optimization
|
||||||
|
|
||||||
Now that we have our experiments and data versioned, it's time to take a look at what we can build on top!
|
Now that we have our experiments and data versioned, it's time to take a look at what we can build on top!
|
||||||
|
|
Loading…
Reference in New Issue