Merge branch 'master' into Maj-Pierre-1
commit
686e54c4d5
|
@ -746,7 +746,7 @@ def run(**kwargs):
|
|||
"""
|
||||
Executes YOLOv5 training with given parameters, altering options programmatically; returns updated options.
|
||||
|
||||
Example: mport train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
|
||||
Example: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
|
||||
"""
|
||||
opt = parse_opt(True)
|
||||
for k, v in kwargs.items():
|
||||
|
|
|
@ -353,7 +353,7 @@ def classify_albumentations(
|
|||
if vflip > 0:
|
||||
T += [A.VerticalFlip(p=vflip)]
|
||||
if jitter > 0:
|
||||
color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue
|
||||
color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, saturation, 0 hue
|
||||
T += [A.ColorJitter(*color_jitter, 0)]
|
||||
else: # Use fixed crop for eval set (reproducibility)
|
||||
T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]
|
||||
|
|
|
@ -136,7 +136,7 @@ class SmartDistributedSampler(distributed.DistributedSampler):
|
|||
g = torch.Generator()
|
||||
g.manual_seed(self.seed + self.epoch)
|
||||
|
||||
# determine the the eventual size (n) of self.indices (DDP indices)
|
||||
# determine the eventual size (n) of self.indices (DDP indices)
|
||||
n = int((len(self.dataset) - self.rank - 1) / self.num_replicas) + 1 # num_replicas == WORLD_SIZE
|
||||
idx = torch.randperm(n, generator=g)
|
||||
if not self.shuffle:
|
||||
|
|
|
@ -58,7 +58,7 @@ optimizer = HyperParameterOptimizer(
|
|||
# now we decide if we want to maximize it or minimize it (accuracy we maximize)
|
||||
objective_metric_sign="max",
|
||||
# let us limit the number of concurrent experiments,
|
||||
# this in turn will make sure we do dont bombard the scheduler with experiments.
|
||||
# this in turn will make sure we don't bombard the scheduler with experiments.
|
||||
# if we have an auto-scaler connected, this, by proxy, will limit the number of machine
|
||||
max_number_of_concurrent_tasks=1,
|
||||
# this is the optimizer class (actually doing the optimization)
|
||||
|
|
Loading…
Reference in New Issue