Merge branch 'master' into Maj-Pierre-1
commit
c81d5bf5d6
|
@ -68,7 +68,7 @@ def run(
|
|||
imgsz (int): Inference size in pixels (default: 640).
|
||||
batch_size (int): Batch size for inference (default: 1).
|
||||
data (Path | str): Path to the dataset.yaml file (default: ROOT / "data/coco128.yaml").
|
||||
device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu' (default: None).
|
||||
device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu' (default: "").
|
||||
half (bool): Use FP16 half-precision inference (default: False).
|
||||
test (bool): Test export formats only (default: False).
|
||||
pt_only (bool): Test PyTorch format only (default: False).
|
||||
|
@ -175,6 +175,24 @@ def test(
|
|||
|
||||
Returns:
|
||||
pd.DataFrame: DataFrame containing the results of the export tests, including format names and export statuses.
|
||||
|
||||
Examples:
|
||||
```python
|
||||
$ python benchmarks.py --weights yolov5s.pt --img 640
|
||||
```
|
||||
|
||||
Notes:
|
||||
Supported export formats and models include PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow
|
||||
SavedModel, TensorFlow GraphDef, TensorFlow Lite, and TensorFlow Edge TPU. Edge TPU and TF.js are unsupported.
|
||||
|
||||
Usage:
|
||||
Install required packages:
|
||||
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU support
|
||||
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU support
|
||||
$ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
|
||||
|
||||
Run export tests:
|
||||
$ python benchmarks.py --weights yolov5s.pt --img 640
|
||||
"""
|
||||
y, t = [], time.time()
|
||||
device = select_device(device)
|
||||
|
@ -213,8 +231,8 @@ def parse_opt():
|
|||
half (bool): Use FP16 half-precision inference. This is a flag and defaults to False.
|
||||
test (bool): Test exports only. This is a flag and defaults to False.
|
||||
pt_only (bool): Test PyTorch only. This is a flag and defaults to False.
|
||||
hard_fail (bool|str): Throw an error on benchmark failure. Can be a boolean or a string representing a minimum metric
|
||||
floor, i.e., '0.29'. Defaults to False.
|
||||
hard_fail (bool | str): Throw an error on benchmark failure. Can be a boolean or a string representing a minimum
|
||||
metric floor, e.g., '0.29'. Defaults to False.
|
||||
|
||||
Returns:
|
||||
argparse.Namespace: Parsed command-line arguments encapsulated in an argparse Namespace object.
|
||||
|
|
74
detect.py
74
detect.py
|
@ -101,40 +101,40 @@ def run(
|
|||
Runs YOLOv5 detection inference on various sources like images, videos, directories, streams, etc.
|
||||
|
||||
Args:
|
||||
weights (str | Path): Path to the model weights file or a Triton URL. Default is 'yolov5s.pt'.
|
||||
source (str | Path): Input source, which can be a file, directory, URL, glob pattern, screen capture, or webcam index.
|
||||
Default is 'data/images'.
|
||||
data (str | Path): Path to the dataset YAML file. Default is 'data/coco128.yaml'.
|
||||
imgsz (tuple[int, int]): Inference image size as a tuple (height, width). Default is (640, 640).
|
||||
conf_thres (float): Confidence threshold for detections. Default is 0.25.
|
||||
iou_thres (float): Intersection Over Union (IOU) threshold for non-max suppression. Default is 0.45.
|
||||
max_det (int): Maximum number of detections per image. Default is 1000.
|
||||
device (str): CUDA device identifier (e.g., '0' or '0,1,2,3') or 'cpu'. Default is an empty string, which
|
||||
uses the best available device.
|
||||
view_img (bool): If True, display inference results using OpenCV. Default is False.
|
||||
save_txt (bool): If True, save results in a text file. Default is False.
|
||||
save_csv (bool): If True, save results in a CSV file. Default is False.
|
||||
save_conf (bool): If True, include confidence scores in the saved results. Default is False.
|
||||
save_crop (bool): If True, save cropped prediction boxes. Default is False.
|
||||
nosave (bool): If True, do not save inference images or videos. Default is False.
|
||||
classes (list[int]): List of class indices to filter detections by. Default is None.
|
||||
agnostic_nms (bool): If True, perform class-agnostic non-max suppression. Default is False.
|
||||
augment (bool): If True, use augmented inference. Default is False.
|
||||
visualize (bool): If True, visualize feature maps. Default is False.
|
||||
update (bool): If True, update all models' weights. Default is False.
|
||||
project (str | Path): Directory to save results. Default is 'runs/detect'.
|
||||
name (str): Name of the current experiment; used to create a subdirectory within 'project'. Default is 'exp'.
|
||||
exist_ok (bool): If True, existing directories with the same name are reused instead of being incremented. Default is
|
||||
False.
|
||||
line_thickness (int): Thickness of bounding box lines in pixels. Default is 3.
|
||||
hide_labels (bool): If True, do not display labels on bounding boxes. Default is False.
|
||||
hide_conf (bool): If True, do not display confidence scores on bounding boxes. Default is False.
|
||||
half (bool): If True, use FP16 half-precision inference. Default is False.
|
||||
dnn (bool): If True, use OpenCV DNN backend for ONNX inference. Default is False.
|
||||
vid_stride (int): Stride for processing video frames, to skip frames between processing. Default is 1.
|
||||
weights (str | Path): Path to the model weights file or a Triton URL. Default is 'yolov5s.pt'.
|
||||
source (str | Path): Input source, which can be a file, directory, URL, glob pattern, screen capture, or webcam index.
|
||||
Default is 'data/images'.
|
||||
data (str | Path): Path to the dataset YAML file. Default is 'data/coco128.yaml'.
|
||||
imgsz (tuple[int, int]): Inference image size as a tuple (height, width). Default is (640, 640).
|
||||
conf_thres (float): Confidence threshold for detections. Default is 0.25.
|
||||
iou_thres (float): Intersection Over Union (IOU) threshold for non-max suppression. Default is 0.45.
|
||||
max_det (int): Maximum number of detections per image. Default is 1000.
|
||||
device (str): CUDA device identifier (e.g., '0' or '0,1,2,3') or 'cpu'. Default is an empty string, which
|
||||
uses the best available device.
|
||||
view_img (bool): If True, display inference results using OpenCV. Default is False.
|
||||
save_txt (bool): If True, save results in a text file. Default is False.
|
||||
save_csv (bool): If True, save results in a CSV file. Default is False.
|
||||
save_conf (bool): If True, include confidence scores in the saved results. Default is False.
|
||||
save_crop (bool): If True, save cropped prediction boxes. Default is False.
|
||||
nosave (bool): If True, do not save inference images or videos. Default is False.
|
||||
classes (list[int]): List of class indices to filter detections by. Default is None.
|
||||
agnostic_nms (bool): If True, perform class-agnostic non-max suppression. Default is False.
|
||||
augment (bool): If True, use augmented inference. Default is False.
|
||||
visualize (bool): If True, visualize feature maps. Default is False.
|
||||
update (bool): If True, update all models' weights. Default is False.
|
||||
project (str | Path): Directory to save results. Default is 'runs/detect'.
|
||||
name (str): Name of the current experiment; used to create a subdirectory within 'project'. Default is 'exp'.
|
||||
exist_ok (bool): If True, existing directories with the same name are reused instead of being incremented. Default is
|
||||
False.
|
||||
line_thickness (int): Thickness of bounding box lines in pixels. Default is 3.
|
||||
hide_labels (bool): If True, do not display labels on bounding boxes. Default is False.
|
||||
hide_conf (bool): If True, do not display confidence scores on bounding boxes. Default is False.
|
||||
half (bool): If True, use FP16 half-precision inference. Default is False.
|
||||
dnn (bool): If True, use OpenCV DNN backend for ONNX inference. Default is False.
|
||||
vid_stride (int): Stride for processing video frames, to skip frames between processing. Default is 1.
|
||||
|
||||
Returns:
|
||||
None
|
||||
None
|
||||
|
||||
Examples:
|
||||
```python
|
||||
|
@ -397,15 +397,15 @@ def main(opt):
|
|||
Executes YOLOv5 model inference based on provided command-line arguments, validating dependencies before running.
|
||||
|
||||
Args:
|
||||
opt (argparse.Namespace): Command-line arguments for YOLOv5 detection. See function `parse_opt` for details.
|
||||
opt (argparse.Namespace): Command-line arguments for YOLOv5 detection. See function `parse_opt` for details.
|
||||
|
||||
Returns:
|
||||
None
|
||||
None
|
||||
|
||||
Note:
|
||||
This function performs essential pre-execution checks and initiates the YOLOv5 detection process based on user-specified options.
|
||||
Refer to the usage guide and examples for more information about different sources and formats at:
|
||||
https://github.com/ultralytics/ultralytics
|
||||
This function performs essential pre-execution checks and initiates the YOLOv5 detection process based on user-specified options.
|
||||
Refer to the usage guide and examples for more information about different sources and formats at:
|
||||
https://github.com/ultralytics/ultralytics
|
||||
|
||||
Example usage:
|
||||
|
||||
|
|
202
export.py
202
export.py
|
@ -103,10 +103,9 @@ class iOSModel(torch.nn.Module):
|
|||
None: This method does not return any value.
|
||||
|
||||
Notes:
|
||||
This initializer configures normalization based on the input image dimensions, which is critical for
|
||||
ensuring the model's compatibility and proper functionality on iOS devices. The normalization step
|
||||
involves dividing by the image width if the image is square; otherwise, additional conditions might
|
||||
apply (trimmed for brevity).
|
||||
This initializer configures normalization based on the input image dimensions, which is critical for ensuring the
|
||||
model's compatibility and proper functionality on iOS devices. The normalization step involves dividing by the image
|
||||
width if the image is square; otherwise, additional conditions might apply (trimmed for brevity).
|
||||
"""
|
||||
super().__init__()
|
||||
b, c, h, w = im.shape # batch, channel, height, width
|
||||
|
@ -124,16 +123,17 @@ class iOSModel(torch.nn.Module):
|
|||
Runs a forward pass on the input tensor, returning class confidences and normalized coordinates.
|
||||
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor containing the image data.
|
||||
x (torch.Tensor): Input tensor containing the image data.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Concatenated tensor with normalized coordinates (xywh), confidence scores (conf), and class probabilities (cls).
|
||||
torch.Tensor: Concatenated tensor with normalized coordinates (xywh), confidence scores (conf),
|
||||
and class probabilities (cls).
|
||||
|
||||
Examples:
|
||||
```python
|
||||
model = iOSModel(pretrained_model, input_image)
|
||||
output = model.forward(torch_input_tensor)
|
||||
```
|
||||
```python
|
||||
model = iOSModel(pretrained_model, input_image)
|
||||
output = model.forward(torch_input_tensor)
|
||||
```
|
||||
"""
|
||||
xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1)
|
||||
return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4)
|
||||
|
@ -151,7 +151,7 @@ def export_formats():
|
|||
Examples:
|
||||
```python
|
||||
formats = export_formats()
|
||||
print(formats)
|
||||
print(f"Supported export formats:\n{formats}")
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
@ -188,16 +188,18 @@ def try_export(inner_func):
|
|||
|
||||
Returns:
|
||||
Callable: The wrapped function that logs execution details. When executed, this wrapper function returns either:
|
||||
- Tuple (str, torch.nn.Module): On success — the file path of the exported model and the model instance.
|
||||
- Tuple (None, None): On failure — None values indicating export failed.
|
||||
- Tuple (str | torch.nn.Module): On success — the file path of the exported model and the model instance.
|
||||
- Tuple (None, None): On failure — None values indicating export failure.
|
||||
|
||||
Examples:
|
||||
```python
|
||||
@try_export
|
||||
def export_onnx(model, filepath):
|
||||
# implementation here
|
||||
pass
|
||||
|
||||
exported_file, exported_model = export_onnx(yolo_model, 'path/to/save/model.onnx')
|
||||
```
|
||||
|
||||
Notes:
|
||||
For additional requirements and model export formats, refer to the [Ultralytics YOLOv5 GitHub repository](https://github.com/ultralytics/ultralytics).
|
||||
|
@ -239,7 +241,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:"
|
|||
Notes:
|
||||
- This function uses tracing to create the TorchScript model.
|
||||
- Metadata, including the input image shape, model stride, and class names, is saved in an extra file (`config.txt`)
|
||||
within the TorchScript model package.
|
||||
within the TorchScript model package.
|
||||
- For mobile optimization, refer to the PyTorch tutorial: https://pytorch.org/tutorials/recipes/mobile_interpreter.html
|
||||
|
||||
Example:
|
||||
|
@ -359,8 +361,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX
|
|||
@try_export
|
||||
def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:")):
|
||||
"""
|
||||
Exports a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization; see
|
||||
https://pypi.org/project/openvino-dev/.
|
||||
Exports a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization.
|
||||
|
||||
Args:
|
||||
file (Path): The path to the output file where the OpenVINO model will be saved.
|
||||
|
@ -450,7 +451,7 @@ def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:
|
|||
@try_export
|
||||
def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")):
|
||||
"""
|
||||
Exports a YOLOv5 model to PaddlePaddle format using X2Paddle, saving the converted model and metadata.
|
||||
Export a YOLOv5 model to PaddlePaddle format using X2Paddle, saving the converted model and metadata.
|
||||
|
||||
Args:
|
||||
model (torch.nn.Module): The YOLOv5 model to be exported.
|
||||
|
@ -476,6 +477,7 @@ def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")):
|
|||
|
||||
export_paddle(model=model, im=im, file=file, metadata=metadata)
|
||||
```
|
||||
|
||||
Notes:
|
||||
Ensure that `paddlepaddle` and `x2paddle` are installed, as these are required for the export function. You can
|
||||
install them via pip:
|
||||
|
@ -556,7 +558,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose
|
|||
|
||||
Args:
|
||||
model (torch.nn.Module): YOLOv5 model to be exported.
|
||||
im (torch.Tensor): Input tensor of shape (B,C,H,W).
|
||||
im (torch.Tensor): Input tensor of shape (B, C, H, W).
|
||||
file (Path): Path to save the exported model.
|
||||
half (bool): Set to True to export with FP16 precision.
|
||||
dynamic (bool): Set to True to enable dynamic input shapes.
|
||||
|
@ -664,7 +666,8 @@ def export_saved_model(
|
|||
prefix=colorstr("TensorFlow SavedModel:"),
|
||||
):
|
||||
"""
|
||||
Exports a YOLOv5 model to TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression (NMS).
|
||||
Exports a YOLOv5 model to the TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression
|
||||
(NMS).
|
||||
|
||||
Args:
|
||||
model (torch.nn.Module): The PyTorch model to convert.
|
||||
|
@ -681,7 +684,8 @@ def export_saved_model(
|
|||
prefix (str, optional): Prefix for logging messages. Default is "TensorFlow SavedModel:".
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing the path to the saved model folder (str) and the Keras model instance (tf.keras.Model | None).
|
||||
tuple[str, tf.keras.Model | None]: A tuple containing the path to the saved model folder and the Keras model instance,
|
||||
or None if TensorFlow export fails.
|
||||
|
||||
Notes:
|
||||
- The method supports TensorFlow versions up to 2.15.1.
|
||||
|
@ -689,9 +693,6 @@ def export_saved_model(
|
|||
- If the TensorFlow version exceeds 2.13.1, it might cause issues when exporting to TFLite.
|
||||
Refer to: https://github.com/ultralytics/yolov5/issues/12489
|
||||
|
||||
Raises:
|
||||
Exception: If TensorFlow is not installed.
|
||||
|
||||
Example:
|
||||
```python
|
||||
model, im = ... # Initialize your PyTorch model and input tensor
|
||||
|
@ -749,7 +750,7 @@ def export_saved_model(
|
|||
@try_export
|
||||
def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")):
|
||||
"""
|
||||
Exports YOLOv5 model to TensorFlow GraphDef (*.pb) format.
|
||||
Export YOLOv5 model to TensorFlow GraphDef (*.pb) format.
|
||||
|
||||
Args:
|
||||
keras_model (tf.keras.Model): The Keras model to be converted.
|
||||
|
@ -790,44 +791,43 @@ def export_tflite(
|
|||
):
|
||||
# YOLOv5 TensorFlow Lite export
|
||||
"""
|
||||
Exports YOLOv5 model to TensorFlow Lite format with optional FP16, INT8, and NMS support.
|
||||
Exports a YOLOv5 model to TensorFlow Lite format with optional INT8 quantization and NMS support.
|
||||
|
||||
Args:
|
||||
keras_model (tf.keras.Model): The Keras model to be exported.
|
||||
im (torch.Tensor): Image tensor for normalization and model tracing.
|
||||
file (Path): The file path to save the exported TensorFlow Lite model.
|
||||
im (torch.Tensor): An input image tensor for normalization and model tracing.
|
||||
file (Path): The file path to save the TensorFlow Lite model.
|
||||
int8 (bool): Enables INT8 quantization if True.
|
||||
per_tensor (bool): If True, disable per-channel quantization (applicable when int8 is True).
|
||||
data (str): Path to dataset for representative dataset generation in INT8 quantization.
|
||||
nms (bool): Enables Non-Maximum Suppression (NMS) support if True.
|
||||
agnostic_nms (bool): Enables class-agnostic NMS support if True.
|
||||
prefix (str): Prefix for logging messages.
|
||||
per_tensor (bool): If True, disables per-channel quantization.
|
||||
data (str): Path to the dataset for representative dataset generation in INT8 quantization.
|
||||
nms (bool): Enables Non-Maximum Suppression (NMS) if True.
|
||||
agnostic_nms (bool): Enables class-agnostic NMS if True.
|
||||
prefix (str): Prefix for log messages.
|
||||
|
||||
Returns:
|
||||
(str | None, tf.lite.Model | None): The file path of the saved TFLite model, and the TFLite model instance if successful.
|
||||
(str | None, tflite.Model | None): The file path of the exported TFLite model and the TFLite model instance, or None if export failed.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from pathlib import Path
|
||||
import torch
|
||||
import tensorflow as tf
|
||||
from torchvision import models
|
||||
|
||||
# Load a pre-trained model from torchvision
|
||||
model = models.yolov5() # Placeholder for actual YOLOv5 model loading
|
||||
im = torch.zeros(1, 3, 640, 640) # Example image tensor
|
||||
|
||||
# Provide the Keras model wrapping the PyTorch YOLOv5 model
|
||||
# Load a Keras model wrapping a YOLOv5 model
|
||||
keras_model = tf.keras.models.load_model('path/to/keras_model.h5')
|
||||
|
||||
# Export the model to TensorFlow Lite format
|
||||
file_path = export_tflite(keras_model, im, Path('model.tflite'), int8=False, per_tensor=False,
|
||||
data='path/to/dataset.yaml', nms=False, agnostic_nms=False)
|
||||
# Example input tensor
|
||||
im = torch.zeros(1, 3, 640, 640)
|
||||
|
||||
# Export the model
|
||||
export_tflite(keras_model, im, Path('model.tflite'), int8=True, per_tensor=False, data='data/coco.yaml',
|
||||
nms=True, agnostic_nms=False)
|
||||
```
|
||||
|
||||
Notes:
|
||||
Ensure the TensorFlow and TensorFlow Lite dependencies are installed. The exported TFLite model can be used for
|
||||
efficient inference on mobile and edge devices.
|
||||
- Ensure TensorFlow and TensorFlow Lite dependencies are installed.
|
||||
- INT8 quantization requires a representative dataset to achieve optimal accuracy.
|
||||
- TensorFlow Lite models are suitable for efficient inference on mobile and edge devices.
|
||||
"""
|
||||
import tensorflow as tf
|
||||
|
||||
|
@ -935,18 +935,18 @@ def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")):
|
|||
prefix (str): Prefix for logging information (default: colorstr("TensorFlow.js:")).
|
||||
|
||||
Returns:
|
||||
tuple: Output directory path (str), None
|
||||
(str, None): The output directory path as a string and None.
|
||||
|
||||
Notes:
|
||||
This function requires `tensorflowjs` to be installed. You can install it using:
|
||||
```shell
|
||||
pip install tensorflowjs
|
||||
```
|
||||
```shell
|
||||
pip install tensorflowjs
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python
|
||||
export_tfjs(Path('yolov5s.onnx'), int8=False)
|
||||
```
|
||||
```python
|
||||
export_tfjs(Path('yolov5s.onnx'), int8=False)
|
||||
```
|
||||
|
||||
The TensorFlow.js converted model is saved in the directory specified by `file` with "_web_model" suffix.
|
||||
"""
|
||||
|
@ -987,12 +987,12 @@ def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")):
|
|||
|
||||
def add_tflite_metadata(file, metadata, num_outputs):
|
||||
"""
|
||||
Adds TFLite metadata to a model file, supporting multiple outputs, as specified by TensorFlow guidelines.
|
||||
Adds metadata to a TensorFlow Lite (TFLite) model file, supporting multiple outputs, based on TensorFlow guidelines.
|
||||
|
||||
Args:
|
||||
file (str): The path to the TensorFlow Lite model file to which metadata will be added.
|
||||
metadata (dict): Metadata information to be added to the model, structured as required by TFLite metadata schema.
|
||||
num_outputs (int): Number of output tensors the model has, to properly configure the metadata.
|
||||
file (str): Path to the TFLite model file to which metadata will be added.
|
||||
metadata (dict): Metadata information to be added to the model, structured as required by the TFLite metadata schema.
|
||||
num_outputs (int): Number of output tensors the model has, used to configure the metadata properly.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
@ -1011,7 +1011,7 @@ def add_tflite_metadata(file, metadata, num_outputs):
|
|||
|
||||
Note:
|
||||
TFLite metadata can include information such as model name, version, author, and other relevant details.
|
||||
For more details and structure of the metadata, refer to the TensorFlow Lite
|
||||
For more details on the structure of the metadata, refer to TensorFlow Lite
|
||||
[metadata guidelines](https://www.tensorflow.org/lite/models/convert/metadata).
|
||||
"""
|
||||
with contextlib.suppress(ImportError):
|
||||
|
@ -1071,24 +1071,24 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline:
|
|||
- Flexible input shapes and additional NMS options can be customized within the function.
|
||||
|
||||
Examples:
|
||||
```python
|
||||
from pathlib import Path
|
||||
import torch
|
||||
```python
|
||||
from pathlib import Path
|
||||
import torch
|
||||
|
||||
# Load YOLOv5 model and an example input tensor
|
||||
model = torch.load("yolov5s.pt")
|
||||
im = torch.zeros(1, 3, 640, 640) # Example input tensor
|
||||
# Load YOLOv5 model and an example input tensor
|
||||
model = torch.load("yolov5s.pt")
|
||||
im = torch.zeros(1, 3, 640, 640) # Example input tensor
|
||||
|
||||
# Define class names
|
||||
names = {0: "person", 1: "bicycle", 2: "car", ...}
|
||||
# Define class names
|
||||
names = {0: "person", 1: "bicycle", 2: "car", ...}
|
||||
|
||||
# Perform forward pass to get model output
|
||||
y = model(im)
|
||||
# Perform forward pass to get model output
|
||||
y = model(im)
|
||||
|
||||
# Convert to CoreML
|
||||
output_file = Path("yolov5s.mlmodel")
|
||||
pipeline_coreml(model, im, output_file, names, y)
|
||||
```
|
||||
# Convert to CoreML
|
||||
output_file = Path("yolov5s.mlmodel")
|
||||
pipeline_coreml(model, im, output_file, names, y)
|
||||
```
|
||||
"""
|
||||
import coremltools as ct
|
||||
from PIL import Image
|
||||
|
@ -1246,6 +1246,8 @@ def run(
|
|||
conf_thres=0.25, # TF.js NMS: confidence threshold
|
||||
):
|
||||
"""
|
||||
Clear and concise summary line describing the function's purpose:
|
||||
|
||||
Exports a YOLOv5 model to specified formats including ONNX, TensorRT, CoreML, and TensorFlow.
|
||||
|
||||
Args:
|
||||
|
@ -1474,42 +1476,42 @@ def parse_opt(known=False):
|
|||
|
||||
def main(opt):
|
||||
"""
|
||||
```python Exports the YOLOv5 model to specified formats, including ONNX, TensorRT, CoreML, and TensorFlow.
|
||||
Exports the YOLOv5 model to specified formats, including ONNX, TensorRT, CoreML, and TensorFlow.
|
||||
|
||||
Args:
|
||||
opt (argparse.Namespace): Parsed command-line arguments containing the export configurations.
|
||||
- data (str): Path to the dataset.yaml.
|
||||
- weights (list[str]): Paths to model (.pt) file(s).
|
||||
- imgsz (list[int]): Image size (height, width).
|
||||
- batch_size (int): Batch size.
|
||||
- device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu'.
|
||||
- half (bool): FP16 half-precision export flag.
|
||||
- inplace (bool): Set YOLOv5 Detect() inplace to True.
|
||||
- keras (bool): Use Keras for TensorFlow models.
|
||||
- optimize (bool): Optimize TorchScript model for mobile.
|
||||
- int8 (bool): INT8 quantization flag.
|
||||
- per_tensor (bool): Per tensor quantization for TensorFlow.
|
||||
- dynamic (bool): Dynamic axes for ONNX/TF/TensorRT.
|
||||
- simplify (bool): Simplify ONNX model.
|
||||
- opset (int): ONNX opset version.
|
||||
- verbose (bool): Verbose logging for TensorRT.
|
||||
- workspace (int): Workspace size for TensorRT (in GB).
|
||||
- nms (bool): Add NMS to TensorFlow model.
|
||||
- agnostic_nms (bool): Add agnostic NMS to TensorFlow model.
|
||||
- topk_per_class (int): Top-k per class for TensorFlow.js NMS.
|
||||
- topk_all (int): Top-k for all classes for TensorFlow.js NMS.
|
||||
- iou_thres (float): IoU threshold for TensorFlow.js NMS.
|
||||
- conf_thres (float): Confidence threshold for TensorFlow.js NMS.
|
||||
- include (list[str]): List of formats to include in export, e.g., ['torchscript', 'onnx'].
|
||||
- data (str): Path to the dataset YAML configuration file (e.g., 'data/coco128.yaml').
|
||||
- weights (list[str] | str): Paths to the pretrained model weights file(s) (e.g., 'yolov5s.pt').
|
||||
- imgsz (list[int]): Image size as a list [height, width].
|
||||
- batch_size (int): Batch size for exporting the model.
|
||||
- device (str): Device to run the export on, such as '0' for GPU, or 'cpu' for CPU.
|
||||
- half (bool): Flag to export the model with FP16 half-precision.
|
||||
- inplace (bool): Set the YOLOv5 Detect() module inplace mode to True.
|
||||
- keras (bool): Flag to use Keras for TensorFlow SavedModel export.
|
||||
- optimize (bool): Optimize TorchScript model for mobile deployment.
|
||||
- int8 (bool): Apply INT8 quantization for CoreML or TensorFlow models.
|
||||
- per_tensor (bool): Apply per-tensor quantization for TensorFlow models.
|
||||
- dynamic (bool): Enable dynamic axes for ONNX, TensorFlow, or TensorRT exports.
|
||||
- simplify (bool): Simplify ONNX model during export.
|
||||
- opset (int): ONNX opset version.
|
||||
- verbose (bool): Enable verbose logging for TensorRT export.
|
||||
- workspace (int): TensorRT workspace size in GB.
|
||||
- nms (bool): Add non-maximum suppression (NMS) to the TensorFlow model.
|
||||
- agnostic_nms (bool): Add class-agnostic NMS to the TensorFlow model.
|
||||
- topk_per_class (int): Top-K boxes per class to keep for TensorFlow.js NMS.
|
||||
- topk_all (int): Top-K boxes for all classes to keep for TensorFlow.js NMS.
|
||||
- iou_thres (float): IoU threshold for NMS.
|
||||
- conf_thres (float): Confidence threshold for NMS.
|
||||
- include (list[str]): List of formats to include in export (e.g., ['torchscript', 'onnx']).
|
||||
|
||||
Returns:
|
||||
list[str]: List of exported file paths.
|
||||
```python
|
||||
list[str]: List of paths to the exported model files.
|
||||
|
||||
# Example usage:
|
||||
# opt = parse_opt()
|
||||
# main(opt)
|
||||
```
|
||||
Example:
|
||||
```python
|
||||
opt = parse_opt()
|
||||
main(opt)
|
||||
```
|
||||
"""
|
||||
for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
|
||||
run(**vars(opt))
|
||||
|
|
41
hubconf.py
41
hubconf.py
|
@ -188,8 +188,7 @@ def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=Tr
|
|||
auto-select the best available device. Defaults to None.
|
||||
|
||||
Returns:
|
||||
YOLOv5 model (torch.nn.Module): The YOLOv5-small model loaded with specified configurations and optionally
|
||||
pretrained weights.
|
||||
torch.nn.Module: The YOLOv5-small model loaded with specified configurations and optionally pretrained weights.
|
||||
|
||||
Usage:
|
||||
```python
|
||||
|
@ -245,10 +244,12 @@ def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=Tr
|
|||
classes (int): Number of model classes. Default is 80.
|
||||
autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model. Default is True.
|
||||
_verbose (bool): Print all information to screen. Default is True.
|
||||
device (str | torch.device | None): Device to use for model parameters, e.g., 'cpu', 'cuda', or a torch.device instance. Default is None.
|
||||
device (str | torch.device | None): Device to use for model parameters, e.g., 'cpu', 'cuda', or a torch.device instance.
|
||||
Default is None.
|
||||
|
||||
Returns:
|
||||
YOLOv5 model (torch.nn.Module).
|
||||
YOLOv5 model (torch.nn.Module): The YOLOv5-large model instantiated with specified configurations and possibly
|
||||
pretrained weights.
|
||||
|
||||
Example:
|
||||
```python
|
||||
|
@ -368,9 +369,11 @@ def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T
|
|||
pretrained (bool): If True, loads pretrained weights. Default is True.
|
||||
channels (int): Number of input channels. Default is 3.
|
||||
classes (int): Number of model classes. Default is 80.
|
||||
autoshape (bool): Apply YOLOv5 .autoshape() wrapper to the model for file/URI/PIL/cv2/np inputs and NMS. Default is True.
|
||||
autoshape (bool): Apply YOLOv5 .autoshape() wrapper to the model for file/URI/PIL/cv2/np inputs and NMS.
|
||||
Default is True.
|
||||
_verbose (bool): If True, prints detailed information to the screen. Default is True.
|
||||
device (str | torch.device | None): Device to use for model parameters. Default is None, which uses the best available device.
|
||||
device (str | torch.device | None): Device to use for model parameters. Default is None, which uses the
|
||||
best available device.
|
||||
|
||||
Returns:
|
||||
torch.nn.Module: The YOLOv5-medium-P6 model.
|
||||
|
@ -378,12 +381,12 @@ def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T
|
|||
Refer to the PyTorch Hub models documentation: https://pytorch.org/hub/ultralytics_yolov5 for additional details.
|
||||
|
||||
Example:
|
||||
```python
|
||||
import torch
|
||||
```python
|
||||
import torch
|
||||
|
||||
# Load YOLOv5-medium-P6 model
|
||||
model = torch.hub.load('ultralytics/yolov5', 'yolov5m6')
|
||||
```
|
||||
# Load YOLOv5-medium-P6 model
|
||||
model = torch.hub.load('ultralytics/yolov5', 'yolov5m6')
|
||||
```
|
||||
|
||||
Notes:
|
||||
- The model can be loaded with pre-trained weights for better performance on specific tasks.
|
||||
|
@ -401,12 +404,10 @@ def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T
|
|||
pretrained (bool, optional): If True, load pretrained weights into the model. Default is True.
|
||||
channels (int, optional): Number of input channels. Default is 3.
|
||||
classes (int, optional): Number of model classes. Default is 80.
|
||||
autoshape (bool, optional): If True, apply YOLOv5 .autoshape() wrapper to the model for input flexibility.
|
||||
Default is True.
|
||||
autoshape (bool, optional): If True, apply YOLOv5 .autoshape() wrapper to the model for input flexibility. Default is True.
|
||||
_verbose (bool, optional): If True, print all information to the screen. Default is True.
|
||||
device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda', or
|
||||
torch.device. If None, automatically selects the best available
|
||||
device. Default is None.
|
||||
device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda', or torch.device.
|
||||
If None, automatically selects the best available device. Default is None.
|
||||
|
||||
Returns:
|
||||
torch.nn.Module: The instantiated YOLOv5-large-P6 model.
|
||||
|
@ -444,10 +445,10 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T
|
|||
torch.nn.Module: The instantiated YOLOv5-xlarge-P6 model.
|
||||
|
||||
Example:
|
||||
```python
|
||||
import torch
|
||||
model = torch.hub.load('ultralytics/yolov5', 'yolov5x6') # load the YOLOv5-xlarge-P6 model
|
||||
```
|
||||
```python
|
||||
import torch
|
||||
model = torch.hub.load('ultralytics/yolov5', 'yolov5x6') # load the YOLOv5-xlarge-P6 model
|
||||
```
|
||||
|
||||
Note:
|
||||
For more information on YOLOv5 models, visit the official documentation:
|
||||
|
|
107
train.py
107
train.py
|
@ -546,10 +546,10 @@ def parse_opt(known=False):
|
|||
Parses command-line arguments for YOLOv5 training, validation, and testing.
|
||||
|
||||
Args:
|
||||
known (bool, optional): If True, parses known arguments, ignoring the unknown. Defaults to False.
|
||||
known (bool, optional): If True, parses known arguments, ignoring the unknown. Defaults to False.
|
||||
|
||||
Returns:
|
||||
argparse.Namespace: Parsed command-line arguments.
|
||||
argparse.Namespace: Parsed command-line arguments.
|
||||
|
||||
Example:
|
||||
```python
|
||||
|
@ -559,9 +559,9 @@ def parse_opt(known=False):
|
|||
```
|
||||
|
||||
Links:
|
||||
Models: https://github.com/ultralytics/yolov5/tree/master/models
|
||||
Datasets: https://github.com/ultralytics/yolov5/tree/master/data
|
||||
Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
|
||||
Models: https://github.com/ultralytics/yolov5/tree/master/models
|
||||
Datasets: https://github.com/ultralytics/yolov5/tree/master/data
|
||||
Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
|
||||
"""
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path")
|
||||
|
@ -621,15 +621,15 @@ def main(opt, callbacks=Callbacks()):
|
|||
Runs training or hyperparameter evolution with specified options and optional callbacks.
|
||||
|
||||
Args:
|
||||
opt (argparse.Namespace): The command-line arguments parsed for YOLOv5 training and evolution.
|
||||
callbacks (ultralytics.utils.callbacks.Callbacks, optional): Callback functions for various training stages.
|
||||
Defaults to Callbacks().
|
||||
opt (argparse.Namespace): The command-line arguments parsed for YOLOv5 training and evolution.
|
||||
callbacks (ultralytics.utils.callbacks.Callbacks, optional): Callback functions for various training stages.
|
||||
Defaults to Callbacks().
|
||||
|
||||
Returns:
|
||||
None
|
||||
None
|
||||
|
||||
Note:
|
||||
For detailed usage, visit:
|
||||
For detailed usage, visit:
|
||||
https://github.com/ultralytics/yolov5/tree/master/models
|
||||
"""
|
||||
if RANK in {-1, 0}:
|
||||
|
@ -918,51 +918,56 @@ def run(**kwargs):
|
|||
Executes YOLOv5 training with given options, allowing optional overrides through keyword arguments.
|
||||
|
||||
Args:
|
||||
weights (str): Path to initial weights. Defaults to ROOT / 'yolov5s.pt'.
|
||||
cfg (str): Path to model YAML configuration. Defaults to an empty string.
|
||||
data (str): Path to dataset YAML configuration. Defaults to ROOT / 'data/coco128.yaml'.
|
||||
hyp (str): Path to hyperparameters YAML configuration. Defaults to ROOT / 'data/hyps/hyp.scratch-low.yaml'.
|
||||
epochs (int): Total number of training epochs. Defaults to 100.
|
||||
batch_size (int): Total batch size for all GPUs. Use -1 for automatic batch size determination. Defaults to 16.
|
||||
imgsz (int): Image size (pixels) for training and validation. Defaults to 640.
|
||||
rect (bool): Use rectangular training. Defaults to False.
|
||||
resume (bool | str): Resume most recent training with an optional path. Defaults to False.
|
||||
nosave (bool): Only save final checkpoint. Defaults to False.
|
||||
noval (bool): Only validate at the final epoch. Defaults to False.
|
||||
noautoanchor (bool): Disable AutoAnchor. Defaults to False.
|
||||
noplots (bool): Do not save plot files. Defaults to False.
|
||||
evolve (int): Evolve hyperparameters for a specified number of generations. Use 300 if provided without a value.
|
||||
evolve_population (str): Directory for loading population during evolution. Defaults to ROOT / 'data/hyps'.
|
||||
resume_evolve (str): Resume hyperparameter evolution from the last generation. Defaults to None.
|
||||
bucket (str): gsutil bucket for saving checkpoints. Defaults to an empty string.
|
||||
cache (str): Cache image data in 'ram' or 'disk'. Defaults to None.
|
||||
image_weights (bool): Use weighted image selection for training. Defaults to False.
|
||||
device (str): CUDA device identifier, e.g., '0', '0,1,2,3', or 'cpu'. Defaults to an empty string.
|
||||
multi_scale (bool): Use multi-scale training, varying image size by ±50%. Defaults to False.
|
||||
single_cls (bool): Train with multi-class data as single-class. Defaults to False.
|
||||
optimizer (str): Optimizer type, choices are ['SGD', 'Adam', 'AdamW']. Defaults to 'SGD'.
|
||||
sync_bn (bool): Use synchronized BatchNorm, only available in DDP mode. Defaults to False.
|
||||
workers (int): Maximum dataloader workers per rank in DDP mode. Defaults to 8.
|
||||
project (str): Directory for saving training runs. Defaults to ROOT / 'runs/train'.
|
||||
name (str): Name for saving the training run. Defaults to 'exp'.
|
||||
exist_ok (bool): Allow existing project/name without incrementing. Defaults to False.
|
||||
quad (bool): Use quad dataloader. Defaults to False.
|
||||
cos_lr (bool): Use cosine learning rate scheduler. Defaults to False.
|
||||
label_smoothing (float): Label smoothing epsilon value. Defaults to 0.0.
|
||||
patience (int): Patience for early stopping, measured in epochs without improvement. Defaults to 100.
|
||||
freeze (list): Layers to freeze, e.g., backbone=10, first 3 layers = [0, 1, 2]. Defaults to [0].
|
||||
save_period (int): Frequency in epochs to save checkpoints. Disabled if < 1. Defaults to -1.
|
||||
seed (int): Global training random seed. Defaults to 0.
|
||||
local_rank (int): Automatic DDP Multi-GPU argument. Do not modify. Defaults to -1.
|
||||
weights (str, optional): Path to initial weights. Defaults to ROOT / 'yolov5s.pt'.
|
||||
cfg (str, optional): Path to model YAML configuration. Defaults to an empty string.
|
||||
data (str, optional): Path to dataset YAML configuration. Defaults to ROOT / 'data/coco128.yaml'.
|
||||
hyp (str, optional): Path to hyperparameters YAML configuration. Defaults to ROOT / 'data/hyps/hyp.scratch-low.yaml'.
|
||||
epochs (int, optional): Total number of training epochs. Defaults to 100.
|
||||
batch_size (int, optional): Total batch size for all GPUs. Use -1 for automatic batch size determination. Defaults to 16.
|
||||
imgsz (int, optional): Image size (pixels) for training and validation. Defaults to 640.
|
||||
rect (bool, optional): Use rectangular training. Defaults to False.
|
||||
resume (bool | str, optional): Resume most recent training with an optional path. Defaults to False.
|
||||
nosave (bool, optional): Only save the final checkpoint. Defaults to False.
|
||||
noval (bool, optional): Only validate at the final epoch. Defaults to False.
|
||||
noautoanchor (bool, optional): Disable AutoAnchor. Defaults to False.
|
||||
noplots (bool, optional): Do not save plot files. Defaults to False.
|
||||
evolve (int, optional): Evolve hyperparameters for a specified number of generations. Use 300 if provided without a value.
|
||||
evolve_population (str, optional): Directory for loading population during evolution. Defaults to ROOT / 'data/hyps'.
|
||||
resume_evolve (str, optional): Resume hyperparameter evolution from the last generation. Defaults to None.
|
||||
bucket (str, optional): gsutil bucket for saving checkpoints. Defaults to an empty string.
|
||||
cache (str, optional): Cache image data in 'ram' or 'disk'. Defaults to None.
|
||||
image_weights (bool, optional): Use weighted image selection for training. Defaults to False.
|
||||
device (str, optional): CUDA device identifier, e.g., '0', '0,1,2,3', or 'cpu'. Defaults to an empty string.
|
||||
multi_scale (bool, optional): Use multi-scale training, varying image size by ±50%. Defaults to False.
|
||||
single_cls (bool, optional): Train with multi-class data as single-class. Defaults to False.
|
||||
optimizer (str, optional): Optimizer type, choices are ['SGD', 'Adam', 'AdamW']. Defaults to 'SGD'.
|
||||
sync_bn (bool, optional): Use synchronized BatchNorm, only available in DDP mode. Defaults to False.
|
||||
workers (int, optional): Maximum dataloader workers per rank in DDP mode. Defaults to 8.
|
||||
project (str, optional): Directory for saving training runs. Defaults to ROOT / 'runs/train'.
|
||||
name (str, optional): Name for saving the training run. Defaults to 'exp'.
|
||||
exist_ok (bool, optional): Allow existing project/name without incrementing. Defaults to False.
|
||||
quad (bool, optional): Use quad dataloader. Defaults to False.
|
||||
cos_lr (bool, optional): Use cosine learning rate scheduler. Defaults to False.
|
||||
label_smoothing (float, optional): Label smoothing epsilon value. Defaults to 0.0.
|
||||
patience (int, optional): Patience for early stopping, measured in epochs without improvement. Defaults to 100.
|
||||
freeze (list, optional): Layers to freeze, e.g., backbone=10, first 3 layers = [0, 1, 2]. Defaults to [0].
|
||||
save_period (int, optional): Frequency in epochs to save checkpoints. Disabled if < 1. Defaults to -1.
|
||||
seed (int, optional): Global training random seed. Defaults to 0.
|
||||
local_rank (int, optional): Automatic DDP Multi-GPU argument. Do not modify. Defaults to -1.
|
||||
|
||||
Returns:
|
||||
None: The function initiates YOLOv5 training or hyperparameter evolution based on the provided options.
|
||||
None: The function initiates YOLOv5 training or hyperparameter evolution based on the provided options.
|
||||
|
||||
Examples:
|
||||
```python
|
||||
import train
|
||||
train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
|
||||
```
|
||||
```python
|
||||
import train
|
||||
train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
|
||||
```
|
||||
|
||||
Notes:
|
||||
- Models: https://github.com/ultralytics/yolov5/tree/master/models
|
||||
- Datasets: https://github.com/ultralytics/yolov5/tree/master/data
|
||||
- Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
|
||||
"""
|
||||
opt = parse_opt(True)
|
||||
for k, v in kwargs.items():
|
||||
|
|
77
val.py
77
val.py
|
@ -66,8 +66,8 @@ def save_one_txt(predn, save_conf, shape, file):
|
|||
Saves one detection result to a txt file in normalized xywh format, optionally including confidence.
|
||||
|
||||
Args:
|
||||
predn (torch.Tensor): Predicted bounding boxes and associated confidence scores and classes
|
||||
in xyxy format, tensor of shape (N, 6) where N is the number of detections.
|
||||
predn (torch.Tensor): Predicted bounding boxes and associated confidence scores and classes in xyxy format,
|
||||
tensor of shape (N, 6) where N is the number of detections.
|
||||
save_conf (bool): If True, saves the confidence scores along with the bounding box coordinates.
|
||||
shape (tuple): Shape of the original image as (height, width).
|
||||
file (str | Path): File path where the result will be saved.
|
||||
|
@ -77,8 +77,8 @@ def save_one_txt(predn, save_conf, shape, file):
|
|||
|
||||
Notes:
|
||||
The xyxy bounding box format represents the coordinates (xmin, ymin, xmax, ymax).
|
||||
The xywh format represents the coordinates (center_x, center_y, width, height) and is
|
||||
normalized by the width and height of the image.
|
||||
The xywh format represents the coordinates (center_x, center_y, width, height) and is normalized by the width and
|
||||
height of the image.
|
||||
|
||||
Example:
|
||||
```python
|
||||
|
@ -99,33 +99,33 @@ def save_one_json(predn, jdict, path, class_map):
|
|||
Saves a single JSON detection result, including image ID, category ID, bounding box, and confidence score.
|
||||
|
||||
Args:
|
||||
predn (torch.Tensor): Predicted detections in xyxy format with shape (n, 6) where n is the number of detections.
|
||||
The tensor should contain [x_min, y_min, x_max, y_max, confidence, class_id] for each detection.
|
||||
jdict (list[dict]): List to collect JSON formatted detection results.
|
||||
path (pathlib.Path): Path object of the image file, used to extract image_id.
|
||||
class_map (dict[int, int]): Mapping from model class indices to dataset-specific category IDs.
|
||||
predn (torch.Tensor): Predicted detections in xyxy format with shape (n, 6) where n is the number of detections.
|
||||
The tensor should contain [x_min, y_min, x_max, y_max, confidence, class_id] for each detection.
|
||||
jdict (list[dict]): List to collect JSON formatted detection results.
|
||||
path (pathlib.Path): Path object of the image file, used to extract image_id.
|
||||
class_map (dict[int, int]): Mapping from model class indices to dataset-specific category IDs.
|
||||
|
||||
Returns:
|
||||
None: Appends detection results as dictionaries to `jdict` list in-place.
|
||||
None: Appends detection results as dictionaries to `jdict` list in-place.
|
||||
|
||||
Example:
|
||||
```python
|
||||
predn = torch.tensor([[100, 50, 200, 150, 0.9, 0], [50, 30, 100, 80, 0.8, 1]])
|
||||
jdict = []
|
||||
path = Path("42.jpg")
|
||||
class_map = {0: 18, 1: 19}
|
||||
save_one_json(predn, jdict, path, class_map)
|
||||
```
|
||||
This will append to `jdict`:
|
||||
```
|
||||
[
|
||||
{'image_id': 42, 'category_id': 18, 'bbox': [125.0, 75.0, 100.0, 100.0], 'score': 0.9},
|
||||
{'image_id': 42, 'category_id': 19, 'bbox': [75.0, 55.0, 50.0, 50.0], 'score': 0.8}
|
||||
]
|
||||
```
|
||||
```python
|
||||
predn = torch.tensor([[100, 50, 200, 150, 0.9, 0], [50, 30, 100, 80, 0.8, 1]])
|
||||
jdict = []
|
||||
path = Path("42.jpg")
|
||||
class_map = {0: 18, 1: 19}
|
||||
save_one_json(predn, jdict, path, class_map)
|
||||
```
|
||||
This will append to `jdict`:
|
||||
```
|
||||
[
|
||||
{'image_id': 42, 'category_id': 18, 'bbox': [125.0, 75.0, 100.0, 100.0], 'score': 0.9},
|
||||
{'image_id': 42, 'category_id': 19, 'bbox': [75.0, 55.0, 50.0, 50.0], 'score': 0.8}
|
||||
]
|
||||
```
|
||||
|
||||
Notes:
|
||||
The `bbox` values are formatted as [x, y, width, height], where x and y represent the top-left corner of the box.
|
||||
The `bbox` values are formatted as [x, y, width, height], where x and y represent the top-left corner of the box.
|
||||
"""
|
||||
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
|
||||
box = xyxy2xywh(predn[:, :4]) # xywh
|
||||
|
@ -146,15 +146,15 @@ def process_batch(detections, labels, iouv):
|
|||
Return a correct prediction matrix given detections and labels at various IoU thresholds.
|
||||
|
||||
Args:
|
||||
detections (np.ndarray): Array of shape (N, 6) where each row corresponds to a detection with
|
||||
format [x1, y1, x2, y2, conf, class].
|
||||
labels (np.ndarray): Array of shape (M, 5) where each row corresponds to a ground truth label with
|
||||
format [class, x1, y1, x2, y2].
|
||||
detections (np.ndarray): Array of shape (N, 6) where each row corresponds to a detection with format
|
||||
[x1, y1, x2, y2, conf, class].
|
||||
labels (np.ndarray): Array of shape (M, 5) where each row corresponds to a ground truth label with format
|
||||
[class, x1, y1, x2, y2].
|
||||
iouv (np.ndarray): Array of IoU thresholds to evaluate at.
|
||||
|
||||
Returns:
|
||||
correct (np.ndarray): A binary array of shape (N, len(iouv)) indicating whether each detection
|
||||
is a true positive for each IoU threshold. There are 10 IoU levels used in the evaluation.
|
||||
correct (np.ndarray): A binary array of shape (N, len(iouv)) indicating whether each detection is a true positive
|
||||
for each IoU threshold. There are 10 IoU levels used in the evaluation.
|
||||
|
||||
Example:
|
||||
```python
|
||||
|
@ -220,7 +220,7 @@ def run(
|
|||
|
||||
Args:
|
||||
data (str | dict): Path to a dataset yaml file or a dataset dictionary.
|
||||
weights (str | list[str], optional): Path to the model weights file(s). Supports various formats: PyTorch,
|
||||
weights (str | list[str], optional): Path to the model weights file(s). Supports various formats including PyTorch,
|
||||
TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow SavedModel, TensorFlow GraphDef, TensorFlow Lite,
|
||||
TensorFlow Edge TPU, and PaddlePaddle.
|
||||
batch_size (int, optional): Batch size for inference. Default is 32.
|
||||
|
@ -473,15 +473,14 @@ def parse_opt():
|
|||
|
||||
Args:
|
||||
data (str): Path to the dataset YAML file, default is 'data/coco128.yaml'.
|
||||
weights (List[str]): List of paths to the model weight files, default is 'yolov5s.pt'.
|
||||
weights (list[str]): List of paths to the model weight files, default is 'yolov5s.pt'.
|
||||
batch_size (int): Batch size for inference, default is 32.
|
||||
imgsz (int): Inference image size in pixels, default is 640.
|
||||
conf_thres (float): Confidence threshold for predictions, default is 0.001.
|
||||
iou_thres (float): IoU threshold for Non-Max Suppression (NMS), default is 0.6.
|
||||
max_det (int): Maximum number of detections per image, default is 300.
|
||||
task (str): Task type - options are 'train', 'val', 'test', 'speed', or 'study'. Default is 'val'.
|
||||
device (str): Device to run the model on, e.g., '0' or '0,1,2,3' or 'cpu'. Default is empty to let
|
||||
the system choose automatically.
|
||||
device (str): Device to run the model on, e.g., '0' or '0,1,2,3' or 'cpu'. Default is empty to let the system choose automatically.
|
||||
workers (int): Maximum number of dataloader workers per rank in DDP mode, default is 8.
|
||||
single_cls (bool): If set, treats the dataset as a single-class dataset. Default is False.
|
||||
augment (bool): If set, performs augmented inference. Default is False.
|
||||
|
@ -552,10 +551,10 @@ def main(opt):
|
|||
|
||||
Args:
|
||||
opt (argparse.Namespace): Parsed command-line options.
|
||||
- This includes values for parameters like 'data', 'weights', 'batch_size', 'imgsz', 'conf_thres', 'iou_thres',
|
||||
'max_det', 'task', 'device', 'workers', 'single_cls', 'augment', 'verbose', 'save_txt', 'save_hybrid',
|
||||
'save_conf', 'save_json', 'project', 'name', 'exist_ok', 'half', and 'dnn', essential for configuring
|
||||
the YOLOv5 tasks.
|
||||
This includes values for parameters like 'data', 'weights', 'batch_size', 'imgsz', 'conf_thres', 'iou_thres',
|
||||
'max_det', 'task', 'device', 'workers', 'single_cls', 'augment', 'verbose', 'save_txt', 'save_hybrid',
|
||||
'save_conf', 'save_json', 'project', 'name', 'exist_ok', 'half', and 'dnn', essential for configuring
|
||||
the YOLOv5 tasks.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
|
Loading…
Reference in New Issue