fix: pythorch 2.6 updates for all

pull/2117/head
Roman KC 2025-03-16 12:14:34 +05:45
parent 41da49e028
commit dc195aa6f9
No known key found for this signature in database
GPG Key ID: 94251AC9FE7364C4
11 changed files with 74 additions and 75 deletions

View File

@ -45,7 +45,7 @@ def detect(save_img=False):
classify = False classify = False
if classify: if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model'], weights_only=False).to(device).eval()
# Set Dataloader # Set Dataloader
vid_path, vid_writer = None, None vid_path, vid_writer = None, None

View File

@ -37,7 +37,7 @@ def create(name, pretrained, channels, classes, autoshape):
if pretrained: if pretrained:
fname = f'{name}.pt' # checkpoint filename fname = f'{name}.pt' # checkpoint filename
attempt_download(fname) # download if not found locally attempt_download(fname) # download if not found locally
ckpt = torch.load(fname, map_location=torch.device('cpu')) # load ckpt = torch.load(fname, map_location=torch.device('cpu'), weights_only=False) # load
msd = model.state_dict() # model state_dict msd = model.state_dict() # model state_dict
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter
@ -65,7 +65,7 @@ def custom(path_or_model='path/to/model.pt', autoshape=True):
Returns: Returns:
pytorch model pytorch model
""" """
model = torch.load(path_or_model, map_location=torch.device('cpu')) if isinstance(path_or_model, str) else path_or_model # load checkpoint model = torch.load(path_or_model, map_location=torch.device('cpu'), weights_only=False) if isinstance(path_or_model, str) else path_or_model # load checkpoint
if isinstance(model, dict): if isinstance(model, dict):
model = model['ema' if model.get('ema') else 'model'] # load model model = model['ema' if model.get('ema') else 'model'] # load model

View File

@ -249,7 +249,7 @@ def attempt_load(weights, map_location=None):
model = Ensemble() model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]: for w in weights if isinstance(weights, list) else [weights]:
attempt_download(w) attempt_download(w)
ckpt = torch.load(w, map_location=map_location) # load ckpt = torch.load(w, map_location=map_location, weights_only=False) # load
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
# Compatibility updates # Compatibility updates

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -56,7 +56,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7_training.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7_training.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",
@ -118,7 +118,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7x_trainig.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7x_trainig.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7x.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7x.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",
@ -180,7 +180,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-w6_trainig.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7-w6_trainig.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-w6.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7-w6.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",
@ -266,7 +266,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-e6.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7-e6.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-e6.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7-e6.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",
@ -352,7 +352,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-d6_trainig.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7-d6_trainig.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-d6.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7-d6.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",
@ -438,7 +438,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-e6e_trainig.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7-e6e_trainig.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-e6e.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7-e6e.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",

File diff suppressed because one or more lines are too long

View File

@ -68,7 +68,7 @@ def train(hyp, opt, device, tb_writer=None):
loggers = {'wandb': None} # loggers dict loggers = {'wandb': None} # loggers dict
if rank in [-1, 0]: if rank in [-1, 0]:
opt.hyp = hyp # add hyperparameters opt.hyp = hyp # add hyperparameters
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None run_id = torch.load(weights, weights_only=False).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
loggers['wandb'] = wandb_logger.wandb loggers['wandb'] = wandb_logger.wandb
data_dict = wandb_logger.data_dict data_dict = wandb_logger.data_dict
@ -84,7 +84,7 @@ def train(hyp, opt, device, tb_writer=None):
if pretrained: if pretrained:
with torch_distributed_zero_first(rank): with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint ckpt = torch.load(weights, map_location=device, weights_only=False) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = ckpt['model'].float().state_dict() # to FP32

View File

@ -13,7 +13,7 @@ sys.path.append('./') # to run '$ python *.py' files in subdirectories
port = 0 # --master_port port = 0 # --master_port
path = Path('').resolve() path = Path('').resolve()
for last in path.rglob('*/**/last.pt'): for last in path.rglob('*/**/last.pt'):
ckpt = torch.load(last) ckpt = torch.load(last, weights_only=False)
if ckpt['optimizer'] is None: if ckpt['optimizer'] is None:
continue continue

View File

@ -389,7 +389,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
self.label_files = img2label_paths(self.img_files) # labels self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file(): if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load cache, exists = torch.load(cache_path, weights_only=False), True # load
#if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed #if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
# cache, exists = self.cache_labels(cache_path, prefix), False # re-cache # cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else: else:

View File

@ -799,7 +799,7 @@ def non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's' # Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu')) x = torch.load(f, map_location=torch.device('cpu'), weights_only=False)
if x.get('ema'): if x.get('ema'):
x['model'] = x['ema'] # replace model with ema x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys