mirror of https://github.com/WongKinYiu/yolov7.git
Merge 8a5d9730ab
into a207844b1c
commit
1fcfbb6f5b
|
@ -45,7 +45,7 @@ def detect(save_img=False):
|
|||
classify = False
|
||||
if classify:
|
||||
modelc = load_classifier(name='resnet101', n=2) # initialize
|
||||
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
|
||||
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model'],weights_only=False).to(device).eval()
|
||||
|
||||
# Set Dataloader
|
||||
vid_path, vid_writer = None, None
|
||||
|
|
|
@ -37,7 +37,7 @@ def create(name, pretrained, channels, classes, autoshape):
|
|||
if pretrained:
|
||||
fname = f'{name}.pt' # checkpoint filename
|
||||
attempt_download(fname) # download if not found locally
|
||||
ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
|
||||
ckpt = torch.load(fname, map_location=torch.device('cpu'), weights_only=False) # load
|
||||
msd = model.state_dict() # model state_dict
|
||||
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
|
||||
csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter
|
||||
|
@ -65,7 +65,7 @@ def custom(path_or_model='path/to/model.pt', autoshape=True):
|
|||
Returns:
|
||||
pytorch model
|
||||
"""
|
||||
model = torch.load(path_or_model, map_location=torch.device('cpu')) if isinstance(path_or_model, str) else path_or_model # load checkpoint
|
||||
model = torch.load(path_or_model, map_location=torch.device('cpu'), weights_only=False) if isinstance(path_or_model, str) else path_or_model # load checkpoint
|
||||
if isinstance(model, dict):
|
||||
model = model['ema' if model.get('ema') else 'model'] # load model
|
||||
|
||||
|
|
|
@ -249,7 +249,7 @@ def attempt_load(weights, map_location=None):
|
|||
model = Ensemble()
|
||||
for w in weights if isinstance(weights, list) else [weights]:
|
||||
attempt_download(w)
|
||||
ckpt = torch.load(w, map_location=map_location) # load
|
||||
ckpt = torch.load(w, map_location=map_location, weights_only=False) # load
|
||||
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
|
||||
|
||||
# Compatibility updates
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -56,7 +56,7 @@
|
|||
"\n",
|
||||
"device = select_device('0', batch_size=1)\n",
|
||||
"# model trained by cfg/training/*.yaml\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7_training.pt', map_location=device)\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7_training.pt', map_location=device, weights_only=False)\n",
|
||||
"# reparameterized model in cfg/deploy/*.yaml\n",
|
||||
"model = Model('cfg/deploy/yolov7.yaml', ch=3, nc=80).to(device)\n",
|
||||
"\n",
|
||||
|
@ -118,7 +118,7 @@
|
|||
"\n",
|
||||
"device = select_device('0', batch_size=1)\n",
|
||||
"# model trained by cfg/training/*.yaml\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7x_trainig.pt', map_location=device)\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7x_trainig.pt', map_location=device, weights_only=False)\n",
|
||||
"# reparameterized model in cfg/deploy/*.yaml\n",
|
||||
"model = Model('cfg/deploy/yolov7x.yaml', ch=3, nc=80).to(device)\n",
|
||||
"\n",
|
||||
|
@ -180,7 +180,7 @@
|
|||
"\n",
|
||||
"device = select_device('0', batch_size=1)\n",
|
||||
"# model trained by cfg/training/*.yaml\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7-w6_trainig.pt', map_location=device)\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7-w6_trainig.pt', map_location=device, weights_only=False)\n",
|
||||
"# reparameterized model in cfg/deploy/*.yaml\n",
|
||||
"model = Model('cfg/deploy/yolov7-w6.yaml', ch=3, nc=80).to(device)\n",
|
||||
"\n",
|
||||
|
@ -266,7 +266,7 @@
|
|||
"\n",
|
||||
"device = select_device('0', batch_size=1)\n",
|
||||
"# model trained by cfg/training/*.yaml\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7-e6.pt', map_location=device)\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7-e6.pt', map_location=device, weights_only=False)\n",
|
||||
"# reparameterized model in cfg/deploy/*.yaml\n",
|
||||
"model = Model('cfg/deploy/yolov7-e6.yaml', ch=3, nc=80).to(device)\n",
|
||||
"\n",
|
||||
|
@ -352,7 +352,7 @@
|
|||
"\n",
|
||||
"device = select_device('0', batch_size=1)\n",
|
||||
"# model trained by cfg/training/*.yaml\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7-d6_trainig.pt', map_location=device)\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7-d6_trainig.pt', map_location=device, weights_only=False)\n",
|
||||
"# reparameterized model in cfg/deploy/*.yaml\n",
|
||||
"model = Model('cfg/deploy/yolov7-d6.yaml', ch=3, nc=80).to(device)\n",
|
||||
"\n",
|
||||
|
@ -438,7 +438,7 @@
|
|||
"\n",
|
||||
"device = select_device('0', batch_size=1)\n",
|
||||
"# model trained by cfg/training/*.yaml\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7-e6e_trainig.pt', map_location=device)\n",
|
||||
"ckpt = torch.load('cfg/training/yolov7-e6e_trainig.pt', map_location=device, weights_only=False)\n",
|
||||
"# reparameterized model in cfg/deploy/*.yaml\n",
|
||||
"model = Model('cfg/deploy/yolov7-e6e.yaml', ch=3, nc=80).to(device)\n",
|
||||
"\n",
|
||||
|
|
File diff suppressed because one or more lines are too long
4
train.py
4
train.py
|
@ -68,7 +68,7 @@ def train(hyp, opt, device, tb_writer=None):
|
|||
loggers = {'wandb': None} # loggers dict
|
||||
if rank in [-1, 0]:
|
||||
opt.hyp = hyp # add hyperparameters
|
||||
run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
|
||||
run_id = torch.load(weights, map_location=device, weights_only=False).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
|
||||
wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
|
||||
loggers['wandb'] = wandb_logger.wandb
|
||||
data_dict = wandb_logger.data_dict
|
||||
|
@ -84,7 +84,7 @@ def train(hyp, opt, device, tb_writer=None):
|
|||
if pretrained:
|
||||
with torch_distributed_zero_first(rank):
|
||||
attempt_download(weights) # download if not found locally
|
||||
ckpt = torch.load(weights, map_location=device) # load checkpoint
|
||||
ckpt = torch.load(weights, map_location=device, weights_only=False) # load checkpoint
|
||||
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
||||
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
|
||||
state_dict = ckpt['model'].float().state_dict() # to FP32
|
||||
|
|
|
@ -68,7 +68,7 @@ def train(hyp, opt, device, tb_writer=None):
|
|||
loggers = {'wandb': None} # loggers dict
|
||||
if rank in [-1, 0]:
|
||||
opt.hyp = hyp # add hyperparameters
|
||||
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
|
||||
run_id = torch.load(weights, weights_only=False).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
|
||||
wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
|
||||
loggers['wandb'] = wandb_logger.wandb
|
||||
data_dict = wandb_logger.data_dict
|
||||
|
@ -84,7 +84,7 @@ def train(hyp, opt, device, tb_writer=None):
|
|||
if pretrained:
|
||||
with torch_distributed_zero_first(rank):
|
||||
attempt_download(weights) # download if not found locally
|
||||
ckpt = torch.load(weights, map_location=device) # load checkpoint
|
||||
ckpt = torch.load(weights, map_location=device, weights_only=False) # load checkpoint
|
||||
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
||||
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
|
||||
state_dict = ckpt['model'].float().state_dict() # to FP32
|
||||
|
|
|
@ -13,7 +13,7 @@ sys.path.append('./') # to run '$ python *.py' files in subdirectories
|
|||
port = 0 # --master_port
|
||||
path = Path('').resolve()
|
||||
for last in path.rglob('*/**/last.pt'):
|
||||
ckpt = torch.load(last)
|
||||
ckpt = torch.load(last, weights_only=False)
|
||||
if ckpt['optimizer'] is None:
|
||||
continue
|
||||
|
||||
|
|
|
@ -389,7 +389,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
|
|||
self.label_files = img2label_paths(self.img_files) # labels
|
||||
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
|
||||
if cache_path.is_file():
|
||||
cache, exists = torch.load(cache_path), True # load
|
||||
cache, exists = torch.load(cache_path, weights_only=False), True # load
|
||||
#if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
|
||||
# cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
|
||||
else:
|
||||
|
|
|
@ -799,7 +799,7 @@ def non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes
|
|||
|
||||
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
|
||||
# Strip optimizer from 'f' to finalize training, optionally save as 's'
|
||||
x = torch.load(f, map_location=torch.device('cpu'))
|
||||
x = torch.load(f, map_location=torch.device('cpu'), weights_only=False)
|
||||
if x.get('ema'):
|
||||
x['model'] = x['ema'] # replace model with ema
|
||||
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
|
||||
|
|
Loading…
Reference in New Issue