Move hyp and opt yaml save to top of train()

Fixes bug where scaled values were saved in hyp.yaml, which would cause continuity issues with --resume
pull/338/head
Alex Stoken 2020-07-09 16:18:55 -05:00 committed by GitHub
parent bf6f41567a
commit 9d631408a2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 6 additions and 6 deletions

View File

@ -52,6 +52,12 @@ def train(hyp):
best = wdir + 'best.pt'
results_file = log_dir + os.sep + 'results.txt'
# Save run settings
with open(Path(log_dir) / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
epochs = opt.epochs # 300
batch_size = opt.batch_size # 64
weights = opt.weights # initial training weights
@ -171,12 +177,6 @@ def train(hyp):
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
model.names = data_dict['names']
# Save run settings
with open(Path(log_dir) / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Class frequency
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes