mirror of
https://github.com/ultralytics/yolov5.git
synced 2025-06-03 14:49:29 +08:00
Standardize warnings with WARNING ⚠️ ...
(#9467)
* Standardize warnings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
parent
295c5e9d3c
commit
ca9c993d6c
@ -91,7 +91,7 @@ def run(
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
if hard_fail:
|
if hard_fail:
|
||||||
assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'
|
assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'
|
||||||
LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}')
|
LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}')
|
||||||
y.append([name, None, None, None]) # mAP, t_inference
|
y.append([name, None, None, None]) # mAP, t_inference
|
||||||
if pt_only and i == 0:
|
if pt_only and i == 0:
|
||||||
break # break after PyTorch
|
break # break after PyTorch
|
||||||
|
@ -114,7 +114,7 @@ def train(opt, device):
|
|||||||
m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models
|
m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models
|
||||||
raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m))
|
raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m))
|
||||||
if isinstance(model, DetectionModel):
|
if isinstance(model, DetectionModel):
|
||||||
LOGGER.warning("WARNING: pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'")
|
LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'")
|
||||||
model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model
|
model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model
|
||||||
reshape_classifier_output(model, nc) # update class count
|
reshape_classifier_output(model, nc) # update class count
|
||||||
for m in model.modules():
|
for m in model.modules():
|
||||||
|
@ -282,7 +282,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose
|
|||||||
|
|
||||||
if dynamic:
|
if dynamic:
|
||||||
if im.shape[0] <= 1:
|
if im.shape[0] <= 1:
|
||||||
LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument")
|
LOGGER.warning(f"{prefix}WARNING ⚠️ --dynamic model requires maximum --batch-size argument")
|
||||||
profile = builder.create_optimization_profile()
|
profile = builder.create_optimization_profile()
|
||||||
for inp in inputs:
|
for inp in inputs:
|
||||||
profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
|
profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
|
||||||
|
@ -47,7 +47,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo
|
|||||||
model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
|
model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
|
||||||
if autoshape:
|
if autoshape:
|
||||||
if model.pt and isinstance(model.model, ClassificationModel):
|
if model.pt and isinstance(model.model, ClassificationModel):
|
||||||
LOGGER.warning('WARNING: ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. '
|
LOGGER.warning('WARNING ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. '
|
||||||
'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).')
|
'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).')
|
||||||
else:
|
else:
|
||||||
model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
|
model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
|
||||||
|
@ -176,7 +176,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
|
|||||||
|
|
||||||
# DP mode
|
# DP mode
|
||||||
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
|
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
|
||||||
LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
|
LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
|
||||||
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
|
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
|
||||||
model = torch.nn.DataParallel(model)
|
model = torch.nn.DataParallel(model)
|
||||||
|
|
||||||
|
@ -345,7 +345,7 @@ def run(
|
|||||||
pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format
|
pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format
|
||||||
LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results()))
|
LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results()))
|
||||||
if nt.sum() == 0:
|
if nt.sum() == 0:
|
||||||
LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️')
|
LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
|
||||||
|
|
||||||
# Print results per class
|
# Print results per class
|
||||||
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
||||||
@ -438,9 +438,9 @@ def main(opt):
|
|||||||
|
|
||||||
if opt.task in ('train', 'val', 'test'): # run normally
|
if opt.task in ('train', 'val', 'test'): # run normally
|
||||||
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
|
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
|
||||||
LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️')
|
LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
|
||||||
if opt.save_hybrid:
|
if opt.save_hybrid:
|
||||||
LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️')
|
LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone')
|
||||||
run(**vars(opt))
|
run(**vars(opt))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
2
train.py
2
train.py
@ -173,7 +173,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
|
|||||||
|
|
||||||
# DP mode
|
# DP mode
|
||||||
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
|
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
|
||||||
LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
|
LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
|
||||||
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
|
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
|
||||||
model = torch.nn.DataParallel(model)
|
model = torch.nn.DataParallel(model)
|
||||||
|
|
||||||
|
@ -4,9 +4,15 @@ utils/initialization
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import contextlib
|
import contextlib
|
||||||
|
import platform
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
|
|
||||||
|
def emojis(str=''):
|
||||||
|
# Return platform-dependent emoji-safe version of string
|
||||||
|
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
|
||||||
|
|
||||||
|
|
||||||
class TryExcept(contextlib.ContextDecorator):
|
class TryExcept(contextlib.ContextDecorator):
|
||||||
# YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
|
# YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
|
||||||
def __init__(self, msg=''):
|
def __init__(self, msg=''):
|
||||||
@ -17,7 +23,7 @@ class TryExcept(contextlib.ContextDecorator):
|
|||||||
|
|
||||||
def __exit__(self, exc_type, value, traceback):
|
def __exit__(self, exc_type, value, traceback):
|
||||||
if value:
|
if value:
|
||||||
print(f'{self.msg}{value}')
|
print(emojis(f'{self.msg}{value}'))
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
@ -38,7 +44,7 @@ def notebook_init(verbose=True):
|
|||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
from utils.general import check_font, check_requirements, emojis, is_colab
|
from utils.general import check_font, check_requirements, is_colab
|
||||||
from utils.torch_utils import select_device # imports
|
from utils.torch_utils import select_device # imports
|
||||||
|
|
||||||
check_requirements(('psutil', 'IPython'))
|
check_requirements(('psutil', 'IPython'))
|
||||||
|
@ -122,7 +122,7 @@ def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen
|
|||||||
# Filter
|
# Filter
|
||||||
i = (wh0 < 3.0).any(1).sum()
|
i = (wh0 < 3.0).any(1).sum()
|
||||||
if i:
|
if i:
|
||||||
LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size')
|
LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size')
|
||||||
wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels
|
wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels
|
||||||
# wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
|
# wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
|
||||||
|
|
||||||
@ -134,7 +134,7 @@ def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen
|
|||||||
k = kmeans(wh / s, n, iter=30)[0] * s # points
|
k = kmeans(wh / s, n, iter=30)[0] * s # points
|
||||||
assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar
|
assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar
|
||||||
except Exception:
|
except Exception:
|
||||||
LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init')
|
LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init')
|
||||||
k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init
|
k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init
|
||||||
wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0))
|
wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0))
|
||||||
k = print_results(k, verbose=False)
|
k = print_results(k, verbose=False)
|
||||||
|
@ -65,7 +65,7 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16):
|
|||||||
b = batch_sizes[max(i - 1, 0)] # select prior safe point
|
b = batch_sizes[max(i - 1, 0)] # select prior safe point
|
||||||
if b < 1 or b > 1024: # b outside of safe range
|
if b < 1 or b > 1024: # b outside of safe range
|
||||||
b = batch_size
|
b = batch_size
|
||||||
LOGGER.warning(f'{prefix}WARNING: ⚠️ CUDA anomaly detected, recommend restart environment and retry command.')
|
LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.')
|
||||||
|
|
||||||
fraction = np.polyval(p, b) / t # actual fraction predicted
|
fraction = np.polyval(p, b) / t # actual fraction predicted
|
||||||
LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
|
LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
|
||||||
|
@ -116,7 +116,7 @@ def create_dataloader(path,
|
|||||||
prefix='',
|
prefix='',
|
||||||
shuffle=False):
|
shuffle=False):
|
||||||
if rect and shuffle:
|
if rect and shuffle:
|
||||||
LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False')
|
LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')
|
||||||
shuffle = False
|
shuffle = False
|
||||||
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
||||||
dataset = LoadImagesAndLabels(
|
dataset = LoadImagesAndLabels(
|
||||||
@ -328,7 +328,7 @@ class LoadStreams:
|
|||||||
self.auto = auto and self.rect
|
self.auto = auto and self.rect
|
||||||
self.transforms = transforms # optional
|
self.transforms = transforms # optional
|
||||||
if not self.rect:
|
if not self.rect:
|
||||||
LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')
|
LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')
|
||||||
|
|
||||||
def update(self, i, cap, stream):
|
def update(self, i, cap, stream):
|
||||||
# Read stream `i` frames in daemon thread
|
# Read stream `i` frames in daemon thread
|
||||||
@ -341,7 +341,7 @@ class LoadStreams:
|
|||||||
if success:
|
if success:
|
||||||
self.imgs[i] = im
|
self.imgs[i] = im
|
||||||
else:
|
else:
|
||||||
LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')
|
LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')
|
||||||
self.imgs[i] = np.zeros_like(self.imgs[i])
|
self.imgs[i] = np.zeros_like(self.imgs[i])
|
||||||
cap.open(stream) # re-open stream if signal was lost
|
cap.open(stream) # re-open stream if signal was lost
|
||||||
time.sleep(0.0) # wait time
|
time.sleep(0.0) # wait time
|
||||||
@ -543,7 +543,7 @@ class LoadImagesAndLabels(Dataset):
|
|||||||
if msgs:
|
if msgs:
|
||||||
LOGGER.info('\n'.join(msgs))
|
LOGGER.info('\n'.join(msgs))
|
||||||
if nf == 0:
|
if nf == 0:
|
||||||
LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. {HELP_URL}')
|
LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}')
|
||||||
x['hash'] = get_hash(self.label_files + self.im_files)
|
x['hash'] = get_hash(self.label_files + self.im_files)
|
||||||
x['results'] = nf, nm, ne, nc, len(self.im_files)
|
x['results'] = nf, nm, ne, nc, len(self.im_files)
|
||||||
x['msgs'] = msgs # warnings
|
x['msgs'] = msgs # warnings
|
||||||
@ -553,7 +553,7 @@ class LoadImagesAndLabels(Dataset):
|
|||||||
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
|
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
|
||||||
LOGGER.info(f'{prefix}New cache created: {path}')
|
LOGGER.info(f'{prefix}New cache created: {path}')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable
|
LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable
|
||||||
return x
|
return x
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
@ -917,7 +917,7 @@ def verify_image_label(args):
|
|||||||
f.seek(-2, 2)
|
f.seek(-2, 2)
|
||||||
if f.read() != b'\xff\xd9': # corrupt JPEG
|
if f.read() != b'\xff\xd9': # corrupt JPEG
|
||||||
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
|
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
|
||||||
msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved'
|
msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved'
|
||||||
|
|
||||||
# verify labels
|
# verify labels
|
||||||
if os.path.isfile(lb_file):
|
if os.path.isfile(lb_file):
|
||||||
@ -939,7 +939,7 @@ def verify_image_label(args):
|
|||||||
lb = lb[i] # remove duplicates
|
lb = lb[i] # remove duplicates
|
||||||
if segments:
|
if segments:
|
||||||
segments = [segments[x] for x in i]
|
segments = [segments[x] for x in i]
|
||||||
msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed'
|
msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed'
|
||||||
else:
|
else:
|
||||||
ne = 1 # label empty
|
ne = 1 # label empty
|
||||||
lb = np.zeros((0, 5), dtype=np.float32)
|
lb = np.zeros((0, 5), dtype=np.float32)
|
||||||
@ -949,7 +949,7 @@ def verify_image_label(args):
|
|||||||
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
|
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
nc = 1
|
nc = 1
|
||||||
msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'
|
msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}'
|
||||||
return [None, None, None, None, nm, nf, ne, nc, msg]
|
return [None, None, None, None, nm, nf, ne, nc, msg]
|
||||||
|
|
||||||
|
|
||||||
@ -1012,7 +1012,7 @@ class HUBDatasetStats():
|
|||||||
im = im.resize((int(im.width * r), int(im.height * r)))
|
im = im.resize((int(im.width * r), int(im.height * r)))
|
||||||
im.save(f_new, 'JPEG', quality=50, optimize=True) # save
|
im.save(f_new, 'JPEG', quality=50, optimize=True) # save
|
||||||
except Exception as e: # use OpenCV
|
except Exception as e: # use OpenCV
|
||||||
print(f'WARNING: HUB ops PIL failure {f}: {e}')
|
LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}')
|
||||||
im = cv2.imread(f)
|
im = cv2.imread(f)
|
||||||
im_height, im_width = im.shape[:2]
|
im_height, im_width = im.shape[:2]
|
||||||
r = max_dim / max(im_height, im_width) # ratio
|
r = max_dim / max(im_height, im_width) # ratio
|
||||||
|
@ -34,7 +34,7 @@ import torch
|
|||||||
import torchvision
|
import torchvision
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from utils import TryExcept
|
from utils import TryExcept, emojis
|
||||||
from utils.downloads import gsutil_getsize
|
from utils.downloads import gsutil_getsize
|
||||||
from utils.metrics import box_iou, fitness
|
from utils.metrics import box_iou, fitness
|
||||||
|
|
||||||
@ -248,11 +248,6 @@ def get_latest_run(search_dir='.'):
|
|||||||
return max(last_list, key=os.path.getctime) if last_list else ''
|
return max(last_list, key=os.path.getctime) if last_list else ''
|
||||||
|
|
||||||
|
|
||||||
def emojis(str=''):
|
|
||||||
# Return platform-dependent emoji-safe version of string
|
|
||||||
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
|
|
||||||
|
|
||||||
|
|
||||||
def file_age(path=__file__):
|
def file_age(path=__file__):
|
||||||
# Return days since last file update
|
# Return days since last file update
|
||||||
dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta
|
dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta
|
||||||
@ -333,7 +328,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals
|
|||||||
# Check version vs. required version
|
# Check version vs. required version
|
||||||
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
|
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
|
||||||
result = (current == minimum) if pinned else (current >= minimum) # bool
|
result = (current == minimum) if pinned else (current >= minimum) # bool
|
||||||
s = f'WARNING: ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string
|
s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string
|
||||||
if hard:
|
if hard:
|
||||||
assert result, emojis(s) # assert min requirements met
|
assert result, emojis(s) # assert min requirements met
|
||||||
if verbose and not result:
|
if verbose and not result:
|
||||||
@ -373,7 +368,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta
|
|||||||
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
|
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
|
||||||
LOGGER.info(s)
|
LOGGER.info(s)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOGGER.warning(f'{prefix} {e}')
|
LOGGER.warning(f'{prefix} ❌ {e}')
|
||||||
|
|
||||||
|
|
||||||
def check_img_size(imgsz, s=32, floor=0):
|
def check_img_size(imgsz, s=32, floor=0):
|
||||||
@ -384,7 +379,7 @@ def check_img_size(imgsz, s=32, floor=0):
|
|||||||
imgsz = list(imgsz) # convert to list if tuple
|
imgsz = list(imgsz) # convert to list if tuple
|
||||||
new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
|
new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
|
||||||
if new_size != imgsz:
|
if new_size != imgsz:
|
||||||
LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')
|
LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')
|
||||||
return new_size
|
return new_size
|
||||||
|
|
||||||
|
|
||||||
@ -399,7 +394,7 @@ def check_imshow():
|
|||||||
cv2.waitKey(1)
|
cv2.waitKey(1)
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
|
LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@ -589,9 +584,9 @@ def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry
|
|||||||
if success:
|
if success:
|
||||||
break
|
break
|
||||||
elif i < retry:
|
elif i < retry:
|
||||||
LOGGER.warning(f'Download failure, retrying {i + 1}/{retry} {url}...')
|
LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...')
|
||||||
else:
|
else:
|
||||||
LOGGER.warning(f'Failed to download {url}...')
|
LOGGER.warning(f'❌ Failed to download {url}...')
|
||||||
|
|
||||||
if unzip and success and f.suffix in ('.zip', '.tar', '.gz'):
|
if unzip and success and f.suffix in ('.zip', '.tar', '.gz'):
|
||||||
LOGGER.info(f'Unzipping {f}...')
|
LOGGER.info(f'Unzipping {f}...')
|
||||||
@ -908,7 +903,7 @@ def non_max_suppression(
|
|||||||
|
|
||||||
output[xi] = x[i]
|
output[xi] = x[i]
|
||||||
if (time.time() - t) > time_limit:
|
if (time.time() - t) > time_limit:
|
||||||
LOGGER.warning(f'WARNING: NMS time limit {time_limit:.3f}s exceeded')
|
LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded')
|
||||||
break # time limit exceeded
|
break # time limit exceeded
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
@ -11,7 +11,7 @@ import pkg_resources as pkg
|
|||||||
import torch
|
import torch
|
||||||
from torch.utils.tensorboard import SummaryWriter
|
from torch.utils.tensorboard import SummaryWriter
|
||||||
|
|
||||||
from utils.general import colorstr, cv2
|
from utils.general import LOGGER, colorstr, cv2
|
||||||
from utils.loggers.clearml.clearml_utils import ClearmlLogger
|
from utils.loggers.clearml.clearml_utils import ClearmlLogger
|
||||||
from utils.loggers.wandb.wandb_utils import WandbLogger
|
from utils.loggers.wandb.wandb_utils import WandbLogger
|
||||||
from utils.plots import plot_images, plot_labels, plot_results
|
from utils.plots import plot_images, plot_labels, plot_results
|
||||||
@ -393,7 +393,7 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)):
|
|||||||
warnings.simplefilter('ignore') # suppress jit trace warning
|
warnings.simplefilter('ignore') # suppress jit trace warning
|
||||||
tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), [])
|
tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), [])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f'WARNING: TensorBoard graph visualization failure {e}')
|
LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}')
|
||||||
|
|
||||||
|
|
||||||
def web_project_name(project):
|
def web_project_name(project):
|
||||||
|
@ -186,7 +186,7 @@ class ConfusionMatrix:
|
|||||||
# fn = self.matrix.sum(0) - tp # false negatives (missed detections)
|
# fn = self.matrix.sum(0) - tp # false negatives (missed detections)
|
||||||
return tp[:-1], fp[:-1] # remove background class
|
return tp[:-1], fp[:-1] # remove background class
|
||||||
|
|
||||||
@TryExcept('WARNING: ConfusionMatrix plot failure: ')
|
@TryExcept('WARNING ⚠️ ConfusionMatrix plot failure: ')
|
||||||
def plot(self, normalize=True, save_dir='', names=()):
|
def plot(self, normalize=True, save_dir='', names=()):
|
||||||
import seaborn as sn
|
import seaborn as sn
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ def create_dataloader(path,
|
|||||||
mask_downsample_ratio=1,
|
mask_downsample_ratio=1,
|
||||||
overlap_mask=False):
|
overlap_mask=False):
|
||||||
if rect and shuffle:
|
if rect and shuffle:
|
||||||
LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False')
|
LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')
|
||||||
shuffle = False
|
shuffle = False
|
||||||
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
||||||
dataset = LoadImagesAndLabelsAndMasks(
|
dataset = LoadImagesAndLabelsAndMasks(
|
||||||
|
@ -47,7 +47,7 @@ def smartCrossEntropyLoss(label_smoothing=0.0):
|
|||||||
if check_version(torch.__version__, '1.10.0'):
|
if check_version(torch.__version__, '1.10.0'):
|
||||||
return nn.CrossEntropyLoss(label_smoothing=label_smoothing)
|
return nn.CrossEntropyLoss(label_smoothing=label_smoothing)
|
||||||
if label_smoothing > 0:
|
if label_smoothing > 0:
|
||||||
LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0')
|
LOGGER.warning(f'WARNING ⚠️ label smoothing {label_smoothing} requires torch>=1.10.0')
|
||||||
return nn.CrossEntropyLoss()
|
return nn.CrossEntropyLoss()
|
||||||
|
|
||||||
|
|
||||||
|
6
val.py
6
val.py
@ -282,7 +282,7 @@ def run(
|
|||||||
pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format
|
pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format
|
||||||
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
|
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
|
||||||
if nt.sum() == 0:
|
if nt.sum() == 0:
|
||||||
LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️')
|
LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
|
||||||
|
|
||||||
# Print results per class
|
# Print results per class
|
||||||
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
||||||
@ -374,9 +374,9 @@ def main(opt):
|
|||||||
|
|
||||||
if opt.task in ('train', 'val', 'test'): # run normally
|
if opt.task in ('train', 'val', 'test'): # run normally
|
||||||
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
|
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
|
||||||
LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️')
|
LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
|
||||||
if opt.save_hybrid:
|
if opt.save_hybrid:
|
||||||
LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️')
|
LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone')
|
||||||
run(**vars(opt))
|
run(**vars(opt))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user