remove check_requirements

pull/13506/head
Edward Yang 2025-02-08 13:20:54 +11:00
parent 718dd8cc8d
commit 915d4ec0a4
17 changed files with 16 additions and 69 deletions

View File

@ -46,7 +46,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import DetectMultiBackend from models.common import DetectMultiBackend
from utils.augmentations import classify_transforms from utils.augmentations import classify_transforms
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, colorstr, cv2,
increment_path, print_args, strip_optimizer) increment_path, print_args, strip_optimizer)
from utils.plots import Annotator from utils.plots import Annotator
from utils.torch_utils import select_device, smart_inference_mode from utils.torch_utils import select_device, smart_inference_mode
@ -217,7 +217,6 @@ def parse_opt():
def main(opt): def main(opt):
check_requirements(exclude=('tensorboard', 'thop'))
run(**vars(opt)) run(**vars(opt))

View File

@ -41,7 +41,7 @@ from models.experimental import attempt_load
from models.yolo import ClassificationModel, DetectionModel from models.yolo import ClassificationModel, DetectionModel
from utils.dataloaders import create_classification_dataloader from utils.dataloaders import create_classification_dataloader
from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status, from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status,
check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) colorstr, download, increment_path, init_seeds, print_args, yaml_save)
from utils.loggers import GenericLogger from utils.loggers import GenericLogger
from utils.plots import imshow_cls from utils.plots import imshow_cls
from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP, from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP,
@ -300,7 +300,6 @@ def main(opt):
if RANK in {-1, 0}: if RANK in {-1, 0}:
print_args(vars(opt)) print_args(vars(opt))
check_git_status() check_git_status()
check_requirements()
# DDP mode # DDP mode
device = select_device(opt.device, batch_size=opt.batch_size) device = select_device(opt.device, batch_size=opt.batch_size)

View File

@ -36,7 +36,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import DetectMultiBackend from models.common import DetectMultiBackend
from utils.dataloaders import create_classification_dataloader from utils.dataloaders import create_classification_dataloader
from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr, from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, colorstr,
increment_path, print_args) increment_path, print_args)
from utils.torch_utils import select_device, smart_inference_mode from utils.torch_utils import select_device, smart_inference_mode
@ -161,7 +161,6 @@ def parse_opt():
def main(opt): def main(opt):
check_requirements(exclude=('tensorboard', 'thop'))
run(**vars(opt)) run(**vars(opt))

View File

@ -386,9 +386,8 @@ names:
download: | download: |
from tqdm import tqdm from tqdm import tqdm
from utils.general import Path, check_requirements, download, np, xyxy2xywhn from utils.general import Path, download, np, xyxy2xywhn
check_requirements(('pycocotools>=2.0',))
from pycocotools.coco import COCO from pycocotools.coco import COCO
# Make Directories # Make Directories

View File

@ -44,7 +44,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import DetectMultiBackend from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, colorstr, cv2,
increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, smart_inference_mode from utils.torch_utils import select_device, smart_inference_mode
@ -252,7 +252,6 @@ def parse_opt():
def main(opt): def main(opt):
check_requirements(exclude=('tensorboard', 'thop'))
run(**vars(opt)) run(**vars(opt))

View File

@ -77,7 +77,7 @@ if platform.system() != 'Windows':
from models.experimental import attempt_load from models.experimental import attempt_load
from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel, Segment from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel, Segment
from utils.dataloaders import LoadImages from utils.dataloaders import LoadImages
from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_version,
check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save)
from utils.torch_utils import select_device, smart_inference_mode from utils.torch_utils import select_device, smart_inference_mode
@ -116,7 +116,6 @@ def try_export(inner_func):
@try_export @try_export
def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')):
# YOLOv5 ONNX export # YOLOv5 ONNX export
check_requirements('onnx>=1.12.0')
import onnx import onnx
LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
@ -157,7 +156,6 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX
if simplify: if simplify:
try: try:
cuda = torch.cuda.is_available() cuda = torch.cuda.is_available()
check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1'))
import onnxsim import onnxsim
LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
@ -172,7 +170,6 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX
@try_export @try_export
def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
# YOLOv5 OpenVINO export # YOLOv5 OpenVINO export
check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/
import openvino.inference_engine as ie import openvino.inference_engine as ie
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
@ -198,8 +195,6 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose
try: try:
import tensorrt as trt import tensorrt as trt
except Exception: except Exception:
if platform.system() == 'Linux':
check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com')
import tensorrt as trt import tensorrt as trt
if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
@ -255,7 +250,6 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose
@try_export @try_export
def export_rknn(model, batch_size, int8, data, prefix=colorstr('RKNN:')): def export_rknn(model, batch_size, int8, data, prefix=colorstr('RKNN:')):
# YOLOv5 RKNN export # YOLOv5 RKNN export
check_requirements('rknn-toolkit2')
from rknn.api import RKNN from rknn.api import RKNN
# Create RKNN object # Create RKNN object
rknn = RKNN(verbose=False) rknn = RKNN(verbose=False)

View File

@ -34,12 +34,11 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo
from models.experimental import attempt_load from models.experimental import attempt_load
from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from models.yolo import ClassificationModel, DetectionModel, SegmentationModel
from utils.downloads import attempt_download from utils.downloads import attempt_download
from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.general import LOGGER, intersect_dicts, logging
from utils.torch_utils import select_device from utils.torch_utils import select_device
if not verbose: if not verbose:
LOGGER.setLevel(logging.WARNING) LOGGER.setLevel(logging.WARNING)
check_requirements(exclude=('opencv-python', 'tensorboard', 'thop'))
name = Path(name) name = Path(name)
path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path
try: try:

View File

@ -27,7 +27,7 @@ from torch.cuda import amp
from utils import TryExcept from utils import TryExcept
from utils.dataloaders import exif_transpose, letterbox from utils.dataloaders import exif_transpose, letterbox
from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, from utils.general import (LOGGER, ROOT, Profile, check_suffix, check_version, colorstr,
increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
xyxy2xywh, yaml_load) xyxy2xywh, yaml_load)
from utils.plots import Annotator, colors, save_one_box from utils.plots import Annotator, colors, save_one_box
@ -415,11 +415,9 @@ class DetectMultiBackend(nn.Module):
stride, names = int(d['stride']), d['names'] stride, names = int(d['stride']), d['names']
elif dnn: # ONNX OpenCV DNN elif dnn: # ONNX OpenCV DNN
LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
check_requirements('opencv-python>=4.5.4')
net = cv2.dnn.readNetFromONNX(w) net = cv2.dnn.readNetFromONNX(w)
elif onnx: # ONNX Runtime elif onnx: # ONNX Runtime
LOGGER.info(f'Loading {w} for ONNX Runtime inference...') LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
import onnxruntime import onnxruntime
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
session = onnxruntime.InferenceSession(w, providers=providers) session = onnxruntime.InferenceSession(w, providers=providers)
@ -429,7 +427,6 @@ class DetectMultiBackend(nn.Module):
stride, names = int(meta['stride']), eval(meta['names']) stride, names = int(meta['stride']), eval(meta['names'])
elif xml: # OpenVINO elif xml: # OpenVINO
LOGGER.info(f'Loading {w} for OpenVINO inference...') LOGGER.info(f'Loading {w} for OpenVINO inference...')
check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
from openvino.runtime import Core, Layout, get_batch from openvino.runtime import Core, Layout, get_batch
ie = Core() ie = Core()
if not Path(w).is_file(): # if not *.xml if not Path(w).is_file(): # if not *.xml
@ -531,7 +528,6 @@ class DetectMultiBackend(nn.Module):
raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')
elif paddle: # PaddlePaddle elif paddle: # PaddlePaddle
LOGGER.info(f'Loading {w} for PaddlePaddle inference...') LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
import paddle.inference as pdi import paddle.inference as pdi
if not Path(w).is_file(): # if not *.pdmodel if not Path(w).is_file(): # if not *.pdmodel
w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
@ -544,7 +540,6 @@ class DetectMultiBackend(nn.Module):
output_names = predictor.get_output_names() output_names = predictor.get_output_names()
elif triton: # NVIDIA Triton Inference Server elif triton: # NVIDIA Triton Inference Server
LOGGER.info(f'Using {w} as Triton Inference Server...') LOGGER.info(f'Using {w} as Triton Inference Server...')
check_requirements('tritonclient[all]')
from utils.triton import TritonRemoteModel from utils.triton import TritonRemoteModel
model = TritonRemoteModel(url=w) model = TritonRemoteModel(url=w)
nhwc = model.runtime.startswith('tensorflow') nhwc = model.runtime.startswith('tensorflow')

View File

@ -44,7 +44,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import DetectMultiBackend from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, colorstr, cv2,
increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments,
strip_optimizer) strip_optimizer)
from utils.plots import Annotator, colors, save_one_box from utils.plots import Annotator, colors, save_one_box
@ -275,7 +275,6 @@ def parse_opt():
def main(opt): def main(opt):
check_requirements(exclude=('tensorboard', 'thop'))
run(**vars(opt)) run(**vars(opt))

View File

@ -48,7 +48,7 @@ from utils.autobatch import check_train_batch_size
from utils.callbacks import Callbacks from utils.callbacks import Callbacks
from utils.downloads import attempt_download, is_url from utils.downloads import attempt_download, is_url
from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info,
check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, check_git_status, check_img_size, check_suffix, check_yaml, colorstr,
get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save)
from utils.loggers import GenericLogger from utils.loggers import GenericLogger
@ -509,7 +509,6 @@ def main(opt, callbacks=Callbacks()):
if RANK in {-1, 0}: if RANK in {-1, 0}:
print_args(vars(opt)) print_args(vars(opt))
check_git_status() check_git_status()
check_requirements()
# Resume # Resume
if opt.resume and not opt.evolve: # resume from specified or most recent last.pt if opt.resume and not opt.evolve: # resume from specified or most recent last.pt

View File

@ -44,7 +44,7 @@ from models.common import DetectMultiBackend
from models.yolo import SegmentationModel from models.yolo import SegmentationModel
from utils.callbacks import Callbacks from utils.callbacks import Callbacks
from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size,
check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, check_yaml, coco80_to_coco91_class, colorstr, increment_path,
non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
from utils.metrics import ConfusionMatrix, box_iou from utils.metrics import ConfusionMatrix, box_iou
from utils.plots import output_to_target, plot_val_study from utils.plots import output_to_target, plot_val_study
@ -160,7 +160,6 @@ def run(
callbacks=Callbacks(), callbacks=Callbacks(),
): ):
if save_json: if save_json:
check_requirements('pycocotools>=2.0.6')
process = process_mask_native # more accurate process = process_mask_native # more accurate
else: else:
process = process_mask # faster process = process_mask # faster
@ -434,7 +433,6 @@ def parse_opt():
def main(opt): def main(opt):
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
if opt.task in ('train', 'val', 'test'): # run normally if opt.task in ('train', 'val', 'test'): # run normally
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466

View File

@ -49,7 +49,7 @@ from utils.callbacks import Callbacks
from utils.dataloaders import create_dataloader from utils.dataloaders import create_dataloader
from utils.downloads import attempt_download, is_url from utils.downloads import attempt_download, is_url
from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info,
check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, check_img_size, check_suffix, check_yaml, colorstr,
get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer,
yaml_save) yaml_save)
@ -480,8 +480,6 @@ def main(opt, callbacks=Callbacks()):
# Checks # Checks
if RANK in {-1, 0}: if RANK in {-1, 0}:
print_args(vars(opt)) print_args(vars(opt))
check_git_status()
check_requirements()
# Resume (from specified or most recent last.pt) # Resume (from specified or most recent last.pt)
if opt.resume and not opt.evolve: if opt.resume and not opt.evolve:

View File

@ -54,7 +54,7 @@ def notebook_init(verbose=True):
import os import os
import shutil import shutil
from utils.general import check_font, check_requirements, is_colab from utils.general import check_font, is_colab
from utils.torch_utils import select_device # imports from utils.torch_utils import select_device # imports
check_font() check_font()

View File

@ -30,7 +30,7 @@ from tqdm import tqdm
from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste,
letterbox, mixup, random_perspective) letterbox, mixup, random_perspective)
from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset,
check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy,
xywh2xyxy, xywhn2xyxy, xyxy2xywhn) xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
from utils.torch_utils import torch_distributed_zero_first from utils.torch_utils import torch_distributed_zero_first
@ -191,7 +191,6 @@ class LoadScreenshots:
# YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):
# source = [screen_number left top width height] (pixels) # source = [screen_number left top width height] (pixels)
check_requirements('mss')
import mss import mss
source, *params = source.split() source, *params = source.split()
@ -356,7 +355,6 @@ class LoadStreams:
st = f'{i + 1}/{n}: {s}... ' st = f'{i + 1}/{n}: {s}... '
if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc'
check_requirements(('pafy', 'youtube_dl==2020.12.2'))
import pafy import pafy
s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam

View File

@ -355,7 +355,6 @@ def check_git_status(repo='ultralytics/yolov5', branch='master'):
@WorkingDirectory(ROOT) @WorkingDirectory(ROOT)
def check_git_info(path='.'): def check_git_info(path='.'):
# YOLOv5 git info check, return {remote, branch, commit} # YOLOv5 git info check, return {remote, branch, commit}
check_requirements('gitpython')
import git import git
try: try:
repo = git.Repo(path) repo = git.Repo(path)
@ -387,29 +386,6 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals
return result return result
@TryExcept()
def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''):
# Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str)
prefix = colorstr('red', 'bold', 'requirements:')
check_python() # check python version
if isinstance(requirements, Path): # requirements.txt file
file = requirements.resolve()
assert file.exists(), f'{prefix} {file} not found, check failed.'
with file.open() as f:
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]
elif isinstance(requirements, str):
requirements = [requirements]
s = ''
n = 0
for r in requirements:
try:
pkg.require(r)
except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met
s += f'"{r}" '
n += 1
def check_img_size(imgsz, s=32, floor=0): def check_img_size(imgsz, s=32, floor=0):
# Verify image size is a multiple of stride s in each dimension # Verify image size is a multiple of stride s in each dimension
if isinstance(imgsz, int): # integer i.e. img_size=640 if isinstance(imgsz, int): # integer i.e. img_size=640

View File

@ -20,7 +20,7 @@ import torch
from PIL import Image, ImageDraw, ImageFont from PIL import Image, ImageDraw, ImageFont
from utils import TryExcept, threaded from utils import TryExcept, threaded
from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, clip_boxes, increment_path,
is_ascii, xywh2xyxy, xyxy2xywh) is_ascii, xywh2xyxy, xyxy2xywh)
from utils.metrics import fitness from utils.metrics import fitness
from utils.segment.general import scale_image from utils.segment.general import scale_image
@ -62,8 +62,6 @@ def check_pil_font(font=FONT, size=10):
try: try:
check_font(font) check_font(font)
return ImageFont.truetype(str(font), size) return ImageFont.truetype(str(font), size)
except TypeError:
check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
except URLError: # not online except URLError: # not online
return ImageFont.load_default() return ImageFont.load_default()

4
val.py
View File

@ -39,7 +39,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import DetectMultiBackend from models.common import DetectMultiBackend
from utils.callbacks import Callbacks from utils.callbacks import Callbacks
from utils.dataloaders import create_dataloader from utils.dataloaders import create_dataloader
from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size,
check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression,
print_args, scale_boxes, xywh2xyxy, xyxy2xywh) print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
@ -311,7 +311,6 @@ def run(
json.dump(jdict, f) json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements('pycocotools>=2.0.6')
from pycocotools.coco import COCO from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval from pycocotools.cocoeval import COCOeval
@ -371,7 +370,6 @@ def parse_opt():
def main(opt): def main(opt):
check_requirements(exclude=('tensorboard', 'thop'))
if opt.task in ('train', 'val', 'test'): # run normally if opt.task in ('train', 'val', 'test'): # run normally
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466