mirror of https://github.com/WongKinYiu/yolov7.git
fix: torch.load requires weights_only
See: https://pytorch.org/blog/pytorch2-6/ Also in this release as an important security improvement measure we have changed the default value for weights_only parameter of torch.load. This is a backward compatibility-breaking change, please see this forum post for more details.pull/2117/head
parent
a207844b1c
commit
41da49e028
42
train.py
42
train.py
|
@ -68,7 +68,7 @@ def train(hyp, opt, device, tb_writer=None):
|
|||
loggers = {'wandb': None} # loggers dict
|
||||
if rank in [-1, 0]:
|
||||
opt.hyp = hyp # add hyperparameters
|
||||
run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
|
||||
run_id = torch.load(weights, map_location=device, weights_only=False).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
|
||||
wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
|
||||
loggers['wandb'] = wandb_logger.wandb
|
||||
data_dict = wandb_logger.data_dict
|
||||
|
@ -84,7 +84,7 @@ def train(hyp, opt, device, tb_writer=None):
|
|||
if pretrained:
|
||||
with torch_distributed_zero_first(rank):
|
||||
attempt_download(weights) # download if not found locally
|
||||
ckpt = torch.load(weights, map_location=device) # load checkpoint
|
||||
ckpt = torch.load(weights, map_location=device, weights_only=False) # load checkpoint
|
||||
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
||||
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
|
||||
state_dict = ckpt['model'].float().state_dict() # to FP32
|
||||
|
@ -121,60 +121,60 @@ def train(hyp, opt, device, tb_writer=None):
|
|||
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
|
||||
pg1.append(v.weight) # apply decay
|
||||
if hasattr(v, 'im'):
|
||||
if hasattr(v.im, 'implicit'):
|
||||
if hasattr(v.im, 'implicit'):
|
||||
pg0.append(v.im.implicit)
|
||||
else:
|
||||
for iv in v.im:
|
||||
pg0.append(iv.implicit)
|
||||
if hasattr(v, 'imc'):
|
||||
if hasattr(v.imc, 'implicit'):
|
||||
if hasattr(v.imc, 'implicit'):
|
||||
pg0.append(v.imc.implicit)
|
||||
else:
|
||||
for iv in v.imc:
|
||||
pg0.append(iv.implicit)
|
||||
if hasattr(v, 'imb'):
|
||||
if hasattr(v.imb, 'implicit'):
|
||||
if hasattr(v.imb, 'implicit'):
|
||||
pg0.append(v.imb.implicit)
|
||||
else:
|
||||
for iv in v.imb:
|
||||
pg0.append(iv.implicit)
|
||||
if hasattr(v, 'imo'):
|
||||
if hasattr(v.imo, 'implicit'):
|
||||
if hasattr(v.imo, 'implicit'):
|
||||
pg0.append(v.imo.implicit)
|
||||
else:
|
||||
for iv in v.imo:
|
||||
pg0.append(iv.implicit)
|
||||
if hasattr(v, 'ia'):
|
||||
if hasattr(v.ia, 'implicit'):
|
||||
if hasattr(v.ia, 'implicit'):
|
||||
pg0.append(v.ia.implicit)
|
||||
else:
|
||||
for iv in v.ia:
|
||||
pg0.append(iv.implicit)
|
||||
if hasattr(v, 'attn'):
|
||||
if hasattr(v.attn, 'logit_scale'):
|
||||
if hasattr(v.attn, 'logit_scale'):
|
||||
pg0.append(v.attn.logit_scale)
|
||||
if hasattr(v.attn, 'q_bias'):
|
||||
if hasattr(v.attn, 'q_bias'):
|
||||
pg0.append(v.attn.q_bias)
|
||||
if hasattr(v.attn, 'v_bias'):
|
||||
if hasattr(v.attn, 'v_bias'):
|
||||
pg0.append(v.attn.v_bias)
|
||||
if hasattr(v.attn, 'relative_position_bias_table'):
|
||||
if hasattr(v.attn, 'relative_position_bias_table'):
|
||||
pg0.append(v.attn.relative_position_bias_table)
|
||||
if hasattr(v, 'rbr_dense'):
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_origin'):
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_origin'):
|
||||
pg0.append(v.rbr_dense.weight_rbr_origin)
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'):
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'):
|
||||
pg0.append(v.rbr_dense.weight_rbr_avg_conv)
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'):
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'):
|
||||
pg0.append(v.rbr_dense.weight_rbr_pfir_conv)
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'):
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'):
|
||||
pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1)
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'):
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'):
|
||||
pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2)
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'):
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'):
|
||||
pg0.append(v.rbr_dense.weight_rbr_gconv_dw)
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'):
|
||||
if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'):
|
||||
pg0.append(v.rbr_dense.weight_rbr_gconv_pw)
|
||||
if hasattr(v.rbr_dense, 'vector'):
|
||||
if hasattr(v.rbr_dense, 'vector'):
|
||||
pg0.append(v.rbr_dense.vector)
|
||||
|
||||
if opt.adam:
|
||||
|
@ -648,12 +648,12 @@ if __name__ == '__main__':
|
|||
'mixup': (1, 0.0, 1.0), # image mixup (probability)
|
||||
'copy_paste': (1, 0.0, 1.0), # segment copy-paste (probability)
|
||||
'paste_in': (1, 0.0, 1.0)} # segment copy-paste (probability)
|
||||
|
||||
|
||||
with open(opt.hyp, errors='ignore') as f:
|
||||
hyp = yaml.safe_load(f) # load hyps dict
|
||||
if 'anchors' not in hyp: # anchors commented in hyp.yaml
|
||||
hyp['anchors'] = 3
|
||||
|
||||
|
||||
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
|
||||
opt.notest, opt.nosave = True, True # only test/save final epoch
|
||||
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
|
||||
|
|
Loading…
Reference in New Issue