fix: pythorch 2.6 updates for all

pull/2117/head
Roman KC 2025-03-16 12:14:34 +05:45
parent 41da49e028
commit dc195aa6f9
No known key found for this signature in database
GPG Key ID: 94251AC9FE7364C4
11 changed files with 74 additions and 75 deletions

View File

@ -45,7 +45,7 @@ def detect(save_img=False):
classify = False classify = False
if classify: if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model'], weights_only=False).to(device).eval()
# Set Dataloader # Set Dataloader
vid_path, vid_writer = None, None vid_path, vid_writer = None, None

View File

@ -37,7 +37,7 @@ def create(name, pretrained, channels, classes, autoshape):
if pretrained: if pretrained:
fname = f'{name}.pt' # checkpoint filename fname = f'{name}.pt' # checkpoint filename
attempt_download(fname) # download if not found locally attempt_download(fname) # download if not found locally
ckpt = torch.load(fname, map_location=torch.device('cpu')) # load ckpt = torch.load(fname, map_location=torch.device('cpu'), weights_only=False) # load
msd = model.state_dict() # model state_dict msd = model.state_dict() # model state_dict
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter
@ -65,7 +65,7 @@ def custom(path_or_model='path/to/model.pt', autoshape=True):
Returns: Returns:
pytorch model pytorch model
""" """
model = torch.load(path_or_model, map_location=torch.device('cpu')) if isinstance(path_or_model, str) else path_or_model # load checkpoint model = torch.load(path_or_model, map_location=torch.device('cpu'), weights_only=False) if isinstance(path_or_model, str) else path_or_model # load checkpoint
if isinstance(model, dict): if isinstance(model, dict):
model = model['ema' if model.get('ema') else 'model'] # load model model = model['ema' if model.get('ema') else 'model'] # load model

View File

@ -249,9 +249,9 @@ def attempt_load(weights, map_location=None):
model = Ensemble() model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]: for w in weights if isinstance(weights, list) else [weights]:
attempt_download(w) attempt_download(w)
ckpt = torch.load(w, map_location=map_location) # load ckpt = torch.load(w, map_location=map_location, weights_only=False) # load
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
# Compatibility updates # Compatibility updates
for m in model.modules(): for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
@ -260,7 +260,7 @@ def attempt_load(weights, map_location=None):
m.recompute_scale_factor = None # torch 1.11.0 compatibility m.recompute_scale_factor = None # torch 1.11.0 compatibility
elif type(m) is Conv: elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if len(model) == 1: if len(model) == 1:
return model[-1] # return model return model[-1] # return model
else: else:

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -56,7 +56,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7_training.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7_training.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",
@ -118,7 +118,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7x_trainig.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7x_trainig.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7x.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7x.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",
@ -180,7 +180,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-w6_trainig.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7-w6_trainig.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-w6.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7-w6.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",
@ -266,7 +266,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-e6.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7-e6.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-e6.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7-e6.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",
@ -352,7 +352,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-d6_trainig.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7-d6_trainig.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-d6.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7-d6.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",
@ -438,7 +438,7 @@
"\n", "\n",
"device = select_device('0', batch_size=1)\n", "device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n", "# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-e6e_trainig.pt', map_location=device)\n", "ckpt = torch.load('cfg/training/yolov7-e6e_trainig.pt', map_location=device, weights_only=False)\n",
"# reparameterized model in cfg/deploy/*.yaml\n", "# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-e6e.yaml', ch=3, nc=80).to(device)\n", "model = Model('cfg/deploy/yolov7-e6e.yaml', ch=3, nc=80).to(device)\n",
"\n", "\n",

File diff suppressed because one or more lines are too long

View File

@ -68,7 +68,7 @@ def train(hyp, opt, device, tb_writer=None):
loggers = {'wandb': None} # loggers dict loggers = {'wandb': None} # loggers dict
if rank in [-1, 0]: if rank in [-1, 0]:
opt.hyp = hyp # add hyperparameters opt.hyp = hyp # add hyperparameters
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None run_id = torch.load(weights, weights_only=False).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
loggers['wandb'] = wandb_logger.wandb loggers['wandb'] = wandb_logger.wandb
data_dict = wandb_logger.data_dict data_dict = wandb_logger.data_dict
@ -84,7 +84,7 @@ def train(hyp, opt, device, tb_writer=None):
if pretrained: if pretrained:
with torch_distributed_zero_first(rank): with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint ckpt = torch.load(weights, map_location=device, weights_only=False) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = ckpt['model'].float().state_dict() # to FP32
@ -121,60 +121,60 @@ def train(hyp, opt, device, tb_writer=None):
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay pg1.append(v.weight) # apply decay
if hasattr(v, 'im'): if hasattr(v, 'im'):
if hasattr(v.im, 'implicit'): if hasattr(v.im, 'implicit'):
pg0.append(v.im.implicit) pg0.append(v.im.implicit)
else: else:
for iv in v.im: for iv in v.im:
pg0.append(iv.implicit) pg0.append(iv.implicit)
if hasattr(v, 'imc'): if hasattr(v, 'imc'):
if hasattr(v.imc, 'implicit'): if hasattr(v.imc, 'implicit'):
pg0.append(v.imc.implicit) pg0.append(v.imc.implicit)
else: else:
for iv in v.imc: for iv in v.imc:
pg0.append(iv.implicit) pg0.append(iv.implicit)
if hasattr(v, 'imb'): if hasattr(v, 'imb'):
if hasattr(v.imb, 'implicit'): if hasattr(v.imb, 'implicit'):
pg0.append(v.imb.implicit) pg0.append(v.imb.implicit)
else: else:
for iv in v.imb: for iv in v.imb:
pg0.append(iv.implicit) pg0.append(iv.implicit)
if hasattr(v, 'imo'): if hasattr(v, 'imo'):
if hasattr(v.imo, 'implicit'): if hasattr(v.imo, 'implicit'):
pg0.append(v.imo.implicit) pg0.append(v.imo.implicit)
else: else:
for iv in v.imo: for iv in v.imo:
pg0.append(iv.implicit) pg0.append(iv.implicit)
if hasattr(v, 'ia'): if hasattr(v, 'ia'):
if hasattr(v.ia, 'implicit'): if hasattr(v.ia, 'implicit'):
pg0.append(v.ia.implicit) pg0.append(v.ia.implicit)
else: else:
for iv in v.ia: for iv in v.ia:
pg0.append(iv.implicit) pg0.append(iv.implicit)
if hasattr(v, 'attn'): if hasattr(v, 'attn'):
if hasattr(v.attn, 'logit_scale'): if hasattr(v.attn, 'logit_scale'):
pg0.append(v.attn.logit_scale) pg0.append(v.attn.logit_scale)
if hasattr(v.attn, 'q_bias'): if hasattr(v.attn, 'q_bias'):
pg0.append(v.attn.q_bias) pg0.append(v.attn.q_bias)
if hasattr(v.attn, 'v_bias'): if hasattr(v.attn, 'v_bias'):
pg0.append(v.attn.v_bias) pg0.append(v.attn.v_bias)
if hasattr(v.attn, 'relative_position_bias_table'): if hasattr(v.attn, 'relative_position_bias_table'):
pg0.append(v.attn.relative_position_bias_table) pg0.append(v.attn.relative_position_bias_table)
if hasattr(v, 'rbr_dense'): if hasattr(v, 'rbr_dense'):
if hasattr(v.rbr_dense, 'weight_rbr_origin'): if hasattr(v.rbr_dense, 'weight_rbr_origin'):
pg0.append(v.rbr_dense.weight_rbr_origin) pg0.append(v.rbr_dense.weight_rbr_origin)
if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'):
pg0.append(v.rbr_dense.weight_rbr_avg_conv) pg0.append(v.rbr_dense.weight_rbr_avg_conv)
if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'):
pg0.append(v.rbr_dense.weight_rbr_pfir_conv) pg0.append(v.rbr_dense.weight_rbr_pfir_conv)
if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'):
pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1)
if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'):
pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2)
if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'):
pg0.append(v.rbr_dense.weight_rbr_gconv_dw) pg0.append(v.rbr_dense.weight_rbr_gconv_dw)
if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'):
pg0.append(v.rbr_dense.weight_rbr_gconv_pw) pg0.append(v.rbr_dense.weight_rbr_gconv_pw)
if hasattr(v.rbr_dense, 'vector'): if hasattr(v.rbr_dense, 'vector'):
pg0.append(v.rbr_dense.vector) pg0.append(v.rbr_dense.vector)
if opt.adam: if opt.adam:
@ -642,12 +642,12 @@ if __name__ == '__main__':
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
'mosaic': (1, 0.0, 1.0), # image mixup (probability) 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0)} # image mixup (probability) 'mixup': (1, 0.0, 1.0)} # image mixup (probability)
with open(opt.hyp, errors='ignore') as f: with open(opt.hyp, errors='ignore') as f:
hyp = yaml.safe_load(f) # load hyps dict hyp = yaml.safe_load(f) # load hyps dict
if 'anchors' not in hyp: # anchors commented in hyp.yaml if 'anchors' not in hyp: # anchors commented in hyp.yaml
hyp['anchors'] = 3 hyp['anchors'] = 3
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
opt.notest, opt.nosave = True, True # only test/save final epoch opt.notest, opt.nosave = True, True # only test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices

View File

@ -13,7 +13,7 @@ sys.path.append('./') # to run '$ python *.py' files in subdirectories
port = 0 # --master_port port = 0 # --master_port
path = Path('').resolve() path = Path('').resolve()
for last in path.rglob('*/**/last.pt'): for last in path.rglob('*/**/last.pt'):
ckpt = torch.load(last) ckpt = torch.load(last, weights_only=False)
if ckpt['optimizer'] is None: if ckpt['optimizer'] is None:
continue continue

View File

@ -361,7 +361,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2] self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride self.stride = stride
self.path = path self.path = path
#self.albumentations = Albumentations() if augment else None #self.albumentations = Albumentations() if augment else None
try: try:
@ -389,7 +389,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
self.label_files = img2label_paths(self.img_files) # labels self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file(): if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load cache, exists = torch.load(cache_path, weights_only=False), True # load
#if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed #if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
# cache, exists = self.cache_labels(cache_path, prefix), False # re-cache # cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else: else:
@ -576,8 +576,8 @@ class LoadImagesAndLabels(Dataset): # for training/testing
scale=hyp['scale'], scale=hyp['scale'],
shear=hyp['shear'], shear=hyp['shear'],
perspective=hyp['perspective']) perspective=hyp['perspective'])
#img, labels = self.albumentations(img, labels) #img, labels = self.albumentations(img, labels)
# Augment colorspace # Augment colorspace
@ -586,9 +586,9 @@ class LoadImagesAndLabels(Dataset): # for training/testing
# Apply cutouts # Apply cutouts
# if random.random() < 0.9: # if random.random() < 0.9:
# labels = cutout(img, labels) # labels = cutout(img, labels)
if random.random() < hyp['paste_in']: if random.random() < hyp['paste_in']:
sample_labels, sample_images, sample_masks = [], [], [] sample_labels, sample_images, sample_masks = [], [], []
while len(sample_labels) < 30: while len(sample_labels) < 30:
sample_labels_, sample_images_, sample_masks_ = load_samples(self, random.randint(0, len(self.labels) - 1)) sample_labels_, sample_images_, sample_masks_ = load_samples(self, random.randint(0, len(self.labels) - 1))
sample_labels += sample_labels_ sample_labels += sample_labels_
@ -925,7 +925,7 @@ def remove_background(img, labels, segments):
cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
result = cv2.bitwise_and(src1=img, src2=im_new) result = cv2.bitwise_and(src1=img, src2=im_new)
i = result > 0 # pixels to replace i = result > 0 # pixels to replace
img_new[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug img_new[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
@ -942,19 +942,19 @@ def sample_segments(img, labels, segments, probability=0.5):
h, w, c = img.shape # height, width, channels h, w, c = img.shape # height, width, channels
for j in random.sample(range(n), k=round(probability * n)): for j in random.sample(range(n), k=round(probability * n)):
l, s = labels[j], segments[j] l, s = labels[j], segments[j]
box = l[1].astype(int).clip(0,w-1), l[2].astype(int).clip(0,h-1), l[3].astype(int).clip(0,w-1), l[4].astype(int).clip(0,h-1) box = l[1].astype(int).clip(0,w-1), l[2].astype(int).clip(0,h-1), l[3].astype(int).clip(0,w-1), l[4].astype(int).clip(0,h-1)
#print(box) #print(box)
if (box[2] <= box[0]) or (box[3] <= box[1]): if (box[2] <= box[0]) or (box[3] <= box[1]):
continue continue
sample_labels.append(l[0]) sample_labels.append(l[0])
mask = np.zeros(img.shape, np.uint8) mask = np.zeros(img.shape, np.uint8)
cv2.drawContours(mask, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) cv2.drawContours(mask, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
sample_masks.append(mask[box[1]:box[3],box[0]:box[2],:]) sample_masks.append(mask[box[1]:box[3],box[0]:box[2],:])
result = cv2.bitwise_and(src1=img, src2=mask) result = cv2.bitwise_and(src1=img, src2=mask)
i = result > 0 # pixels to replace i = result > 0 # pixels to replace
mask[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug mask[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
@ -1128,7 +1128,7 @@ def bbox_ioa(box1, box2):
# Intersection over box2 area # Intersection over box2 area
return inter_area / box2_area return inter_area / box2_area
def cutout(image, labels): def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552 # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
@ -1156,7 +1156,7 @@ def cutout(image, labels):
labels = labels[ioa < 0.60] # remove >60% obscured labels labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels return labels
def pastein(image, labels, sample_labels, sample_images, sample_masks): def pastein(image, labels, sample_labels, sample_images, sample_masks):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552 # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
@ -1174,14 +1174,14 @@ def pastein(image, labels, sample_labels, sample_images, sample_masks):
xmin = max(0, random.randint(0, w) - mask_w // 2) xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2) ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w) xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h) ymax = min(h, ymin + mask_h)
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
if len(labels): if len(labels):
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
else: else:
ioa = np.zeros(1) ioa = np.zeros(1)
if (ioa < 0.30).all() and len(sample_labels) and (xmax > xmin+20) and (ymax > ymin+20): # allow 30% obscuration of existing labels if (ioa < 0.30).all() and len(sample_labels) and (xmax > xmin+20) and (ymax > ymin+20): # allow 30% obscuration of existing labels
sel_ind = random.randint(0, len(sample_labels)-1) sel_ind = random.randint(0, len(sample_labels)-1)
#print(len(sample_labels)) #print(len(sample_labels))
@ -1194,7 +1194,7 @@ def pastein(image, labels, sample_labels, sample_images, sample_masks):
r_scale = min((ymax-ymin)/hs, (xmax-xmin)/ws) r_scale = min((ymax-ymin)/hs, (xmax-xmin)/ws)
r_w = int(ws*r_scale) r_w = int(ws*r_scale)
r_h = int(hs*r_scale) r_h = int(hs*r_scale)
if (r_w > 10) and (r_h > 10): if (r_w > 10) and (r_h > 10):
r_mask = cv2.resize(sample_masks[sel_ind], (r_w, r_h)) r_mask = cv2.resize(sample_masks[sel_ind], (r_w, r_h))
r_image = cv2.resize(sample_images[sel_ind], (r_w, r_h)) r_image = cv2.resize(sample_images[sel_ind], (r_w, r_h))
@ -1210,7 +1210,7 @@ def pastein(image, labels, sample_labels, sample_images, sample_masks):
labels = np.concatenate((labels, [[sample_labels[sel_ind], *box]]), 0) labels = np.concatenate((labels, [[sample_labels[sel_ind], *box]]), 0)
else: else:
labels = np.array([[sample_labels[sel_ind], *box]]) labels = np.array([[sample_labels[sel_ind], *box]])
image[ymin:ymin+r_h, xmin:xmin+r_w] = temp_crop image[ymin:ymin+r_h, xmin:xmin+r_w] = temp_crop
return labels return labels
@ -1311,8 +1311,8 @@ def autosplit(path='../coco', weights=(0.9, 0.1, 0.0), annotated_only=False):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path / txt[i], 'a') as f: with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file f.write(str(img) + '\n') # add image to txt file
def load_segmentations(self, index): def load_segmentations(self, index):
key = '/work/handsomejw66/coco17/' + self.img_files[index] key = '/work/handsomejw66/coco17/' + self.img_files[index]
#print(key) #print(key)

View File

@ -493,7 +493,7 @@ def box_giou(box1, box2):
area1 = box_area(box1.T) area1 = box_area(box1.T)
area2 = box_area(box2.T) area2 = box_area(box2.T)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
union = (area1[:, None] + area2 - inter) union = (area1[:, None] + area2 - inter)
@ -528,7 +528,7 @@ def box_ciou(box1, box2, eps: float = 1e-7):
area1 = box_area(box1.T) area1 = box_area(box1.T)
area2 = box_area(box2.T) area2 = box_area(box2.T)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
union = (area1[:, None] + area2 - inter) union = (area1[:, None] + area2 - inter)
@ -580,7 +580,7 @@ def box_diou(box1, box2, eps: float = 1e-7):
area1 = box_area(box1.T) area1 = box_area(box1.T)
area2 = box_area(box2.T) area2 = box_area(box2.T)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
union = (area1[:, None] + area2 - inter) union = (area1[:, None] + area2 - inter)
@ -799,7 +799,7 @@ def non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's' # Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu')) x = torch.load(f, map_location=torch.device('cpu'), weights_only=False)
if x.get('ema'): if x.get('ema'):
x['model'] = x['ema'] # replace model with ema x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys