diff --git a/docs/build_docs.py b/docs/build_docs.py
index 16ec8ca4741..7c40deaf697 100644
--- a/docs/build_docs.py
+++ b/docs/build_docs.py
@@ -63,7 +63,6 @@ def prepare_docs_markdown(clone_repos=True):
def update_page_title(file_path: Path, new_title: str):
"""Update the title of an HTML file."""
-
# Read the content of the file
with open(file_path, encoding="utf-8") as file:
content = file.read()
diff --git a/docs/overrides/javascript/extra.js b/docs/overrides/javascript/extra.js
index 3233a644119..0ab326adc78 100644
--- a/docs/overrides/javascript/extra.js
+++ b/docs/overrides/javascript/extra.js
@@ -1,7 +1,9 @@
// Function that applies light/dark theme based on the user's preference
const applyAutoTheme = () => {
// Determine the user's preferred color scheme
- const prefersLight = window.matchMedia("(prefers-color-scheme: light)").matches;
+ const prefersLight = window.matchMedia(
+ "(prefers-color-scheme: light)",
+ ).matches;
const prefersDark = window.matchMedia("(prefers-color-scheme: dark)").matches;
// Apply the appropriate attributes based on the user's preference
@@ -17,7 +19,21 @@ const applyAutoTheme = () => {
// Function that checks and applies light/dark theme based on the user's preference (if auto theme is enabled)
function checkAutoTheme() {
// Array of supported language codes -> each language has its own palette (stored in local storage)
- const supportedLangCodes = ["en", "zh", "ko", "ja", "ru", "de", "fr", "es", "pt", "it", "tr", "vi", "nl"];
+ const supportedLangCodes = [
+ "en",
+ "zh",
+ "ko",
+ "ja",
+ "ru",
+ "de",
+ "fr",
+ "es",
+ "pt",
+ "it",
+ "tr",
+ "vi",
+ "nl",
+ ];
// Get the URL path
const path = window.location.pathname;
// Extract the language code from the URL (assuming it's in the format /xx/...)
@@ -25,7 +41,9 @@ function checkAutoTheme() {
// Check if the extracted language code is in the supported languages
const isValidLangCode = supportedLangCodes.includes(langCode);
// Construct the local storage key based on the language code if valid, otherwise default to the root key
- const localStorageKey = isValidLangCode ? `/${langCode}/.__palette` : "/.__palette";
+ const localStorageKey = isValidLangCode
+ ? `/${langCode}/.__palette`
+ : "/.__palette";
// Retrieve the palette from local storage using the constructed key
const palette = localStorage.getItem(localStorageKey);
if (palette) {
@@ -41,8 +59,12 @@ function checkAutoTheme() {
checkAutoTheme();
// Re-run the function when the user's preference changes (when the user changes their system theme)
-window.matchMedia("(prefers-color-scheme: light)").addEventListener("change", checkAutoTheme);
-window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", checkAutoTheme);
+window
+ .matchMedia("(prefers-color-scheme: light)")
+ .addEventListener("change", checkAutoTheme);
+window
+ .matchMedia("(prefers-color-scheme: dark)")
+ .addEventListener("change", checkAutoTheme);
// Re-run the function when the palette changes (e.g. user switched from dark theme to auto theme)
// ! We can't use window.addEventListener("storage", checkAutoTheme) because it will NOT be triggered on the current tab
@@ -61,9 +83,15 @@ if (autoThemeInput) {
}
// Add iframe navigation
-window.onhashchange = function() {
- window.parent.postMessage({
- type: 'navigation',
- hash: window.location.pathname + window.location.search + window.location.hash
- }, '*');
+window.onhashchange = function () {
+ window.parent.postMessage(
+ {
+ type: "navigation",
+ hash:
+ window.location.pathname +
+ window.location.search +
+ window.location.hash,
+ },
+ "*",
+ );
};
diff --git a/docs/overrides/main.html b/docs/overrides/main.html
index 45bcff7d8eb..9b1bfb174a5 100644
--- a/docs/overrides/main.html
+++ b/docs/overrides/main.html
@@ -1,8 +1,6 @@
-{% extends "base.html" %}
-
-{% block announce %}
+{% extends "base.html" %} {% block announce %}
\n",
+ "\n",
+ "\n",
+ "
\n",
+ "\n",
+ "[中文](https://docs.ultralytics.com/zh/hub/) | [한국어](https://docs.ultralytics.com/ko/hub/) | [日本語](https://docs.ultralytics.com/ja/hub/) | [Русский](https://docs.ultralytics.com/ru/hub/) | [Deutsch](https://docs.ultralytics.com/de/hub/) | [Français](https://docs.ultralytics.com/fr/hub/) | [Español](https://docs.ultralytics.com/es/hub/) | [Português](https://docs.ultralytics.com/pt/hub/) | [Türkçe](https://docs.ultralytics.com/tr/hub/) | [Tiếng Việt](https://docs.ultralytics.com/vi/hub/) | [हिन्दी](https://docs.ultralytics.com/hi/hub/) | [العربية](https://docs.ultralytics.com/ar/hub/)\n",
+ "\n",
+ "
\n",
+ " \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "\n",
+ "Welcome to the [Ultralytics](https://ultralytics.com/) HUB notebook!\n",
+ "\n",
+ "This notebook allows you to train Ultralytics [YOLO](https://github.com/ultralytics/ultralytics) 🚀 models using [HUB](https://hub.ultralytics.com/). Please browse the HUB
Docs for details, raise an issue on
GitHub for support, and join our
Discord community for questions and discussions!\n",
+ "
"
+ ]
},
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "FIzICjaph_Wy"
- },
- "source": [
- "\n",
- "\n",
- "\n",
- "\n",
- "\n",
- "[中文](https://docs.ultralytics.com/zh/hub/) | [한국어](https://docs.ultralytics.com/ko/hub/) | [日本語](https://docs.ultralytics.com/ja/hub/) | [Русский](https://docs.ultralytics.com/ru/hub/) | [Deutsch](https://docs.ultralytics.com/de/hub/) | [Français](https://docs.ultralytics.com/fr/hub/) | [Español](https://docs.ultralytics.com/es/hub/) | [Português](https://docs.ultralytics.com/pt/hub/) | [Türkçe](https://docs.ultralytics.com/tr/hub/) | [Tiếng Việt](https://docs.ultralytics.com/vi/hub/) | [हिन्दी](https://docs.ultralytics.com/hi/hub/) | [العربية](https://docs.ultralytics.com/ar/hub/)\n",
- "\n",
- "
\n",
- " \n",
- "
\n",
- " \n",
- "
\n",
- "\n",
- "Welcome to the [Ultralytics](https://ultralytics.com/) HUB notebook!\n",
- "\n",
- "This notebook allows you to train Ultralytics [YOLO](https://github.com/ultralytics/ultralytics) 🚀 models using [HUB](https://hub.ultralytics.com/). Please browse the HUB
Docs for details, raise an issue on
GitHub for support, and join our
Discord community for questions and discussions!\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "eRQ2ow94MiOv"
- },
- "source": [
- "# Setup\n",
- "\n",
- "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.\n",
- "\n",
- "[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)"
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "FyDnXd-n4c7Y",
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "outputId": "e1d713ec-e8a6-4422-fe61-c76ec9f03df5"
- },
- "source": [
- "%pip install ultralytics # install\n",
- "from ultralytics import YOLO, checks, hub\n",
- "checks() # checks"
- ],
- "execution_count": 1,
- "outputs": [
- {
- "output_type": "stream",
- "name": "stdout",
- "text": [
- "Ultralytics YOLOv8.2.3 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
- "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 28.8/78.2 GB disk)\n"
- ]
- }
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "cQ9BwaAqxAm4"
- },
- "source": [
- "# Start\n",
- "\n",
- "⚡ Login with your API key, load your YOLO 🚀 model and start training in 3 lines of code!"
- ]
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "eRQ2ow94MiOv"
+ },
+ "source": [
+ "# Setup\n",
+ "\n",
+ "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.\n",
+ "\n",
+ "[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
},
+ "id": "FyDnXd-n4c7Y",
+ "outputId": "e1d713ec-e8a6-4422-fe61-c76ec9f03df5"
+ },
+ "outputs": [
{
- "cell_type": "code",
- "metadata": {
- "id": "XSlZaJ9Iw_iZ"
- },
- "source": [
- "# Log in to HUB using your API key (https://hub.ultralytics.com/settings?tab=api+keys)\n",
- "hub.login('YOUR_API_KEY')\n",
- "\n",
- "# Load your model from HUB (replace 'YOUR_MODEL_ID' with your model ID)\n",
- "model = YOLO('https://hub.ultralytics.com/models/YOUR_MODEL_ID')\n",
- "\n",
- "# Train the model\n",
- "results = model.train()"
- ],
- "execution_count": null,
- "outputs": []
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Ultralytics YOLOv8.2.3 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
+ "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 28.8/78.2 GB disk)\n"
+ ]
}
- ]
+ ],
+ "source": [
+ "%pip install ultralytics # install\n",
+ "from ultralytics import YOLO, checks, hub\n",
+ "\n",
+ "checks() # checks"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "cQ9BwaAqxAm4"
+ },
+ "source": [
+ "# Start\n",
+ "\n",
+ "⚡ Login with your API key, load your YOLO 🚀 model and start training in 3 lines of code!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "XSlZaJ9Iw_iZ"
+ },
+ "outputs": [],
+ "source": [
+ "# Log in to HUB using your API key (https://hub.ultralytics.com/settings?tab=api+keys)\n",
+ "hub.login(\"YOUR_API_KEY\")\n",
+ "\n",
+ "# Load your model from HUB (replace 'YOUR_MODEL_ID' with your model ID)\n",
+ "model = YOLO(\"https://hub.ultralytics.com/models/YOUR_MODEL_ID\")\n",
+ "\n",
+ "# Train the model\n",
+ "results = model.train()"
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "name": "Ultralytics HUB",
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
}
diff --git a/examples/object_counting.ipynb b/examples/object_counting.ipynb
index 4dc197cad99..265b65c79e3 100644
--- a/examples/object_counting.ipynb
+++ b/examples/object_counting.ipynb
@@ -1,208 +1,210 @@
{
- "nbformat": 4,
- "nbformat_minor": 0,
- "metadata": {
- "colab": {
- "provenance": [],
- "gpuType": "T4"
- },
- "kernelspec": {
- "name": "python3",
- "display_name": "Python 3"
- },
- "language_info": {
- "name": "python"
- },
- "accelerator": "GPU"
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "PN1cAxdvd61e"
+ },
+ "source": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ "\n",
+ " [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
+ "\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "\n",
+ "Welcome to the Ultralytics YOLOv8 🚀 notebook!
YOLOv8 is the latest version of the YOLO (You Only Look Once) AI models developed by
Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLOv8 and understand its features and capabilities.\n",
+ "\n",
+ "YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
+ "\n",
+ "We hope that the resources in this notebook will help you get the most out of YOLOv8. Please browse the YOLOv8
Object Counting Docs for details, raise an issue on
GitHub for support, and join our
Discord community for questions and discussions!\n",
+ "\n",
+ "
"
+ ]
},
- "cells": [
- {
- "cell_type": "markdown",
- "source": [
- "\n",
- "\n",
- "
\n",
- " \n",
- "\n",
- " [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
- "\n",
- "
\n",
- "
\n",
- "
\n",
- "
\n",
- "
\n",
- "\n",
- "Welcome to the Ultralytics YOLOv8 🚀 notebook!
YOLOv8 is the latest version of the YOLO (You Only Look Once) AI models developed by
Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLOv8 and understand its features and capabilities.\n",
- "\n",
- "YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
- "\n",
- "We hope that the resources in this notebook will help you get the most out of YOLOv8. Please browse the YOLOv8
Object Counting Docs for details, raise an issue on
GitHub for support, and join our
Discord community for questions and discussions!\n",
- "\n",
- "
"
- ],
- "metadata": {
- "id": "PN1cAxdvd61e"
- }
- },
- {
- "cell_type": "markdown",
- "source": [
- "# Setup\n",
- "\n",
- "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.\n",
- "\n",
- "[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)"
- ],
- "metadata": {
- "id": "o68Sg1oOeZm2"
- }
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {
- "id": "9dSwz_uOReMI",
- "outputId": "fd3bab88-2f25-46c0-cae9-04d2beedc0c1",
- "colab": {
- "base_uri": "https://localhost:8080/"
- }
- },
- "outputs": [
- {
- "output_type": "stream",
- "name": "stdout",
- "text": [
- "Ultralytics YOLOv8.2.18 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
- "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 29.8/78.2 GB disk)\n"
- ]
- }
- ],
- "source": [
- "%pip install ultralytics\n",
- "import ultralytics\n",
- "ultralytics.checks()"
- ]
- },
- {
- "cell_type": "markdown",
- "source": [
- "# Object Counting using Ultralytics YOLOv8 🚀\n",
- "\n",
- "## What is Object Counting?\n",
- "\n",
- "Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) involves accurate identification and counting of specific objects in videos and camera streams. YOLOv8 excels in real-time applications, providing efficient and precise object counting for various scenarios like crowd analysis and surveillance, thanks to its state-of-the-art algorithms and deep learning capabilities.\n",
- "\n",
- "## Advantages of Object Counting?\n",
- "\n",
- "- **Resource Optimization:** Object counting facilitates efficient resource management by providing accurate counts, and optimizing resource allocation in applications like inventory management.\n",
- "- **Enhanced Security:** Object counting enhances security and surveillance by accurately tracking and counting entities, aiding in proactive threat detection.\n",
- "- **Informed Decision-Making:** Object counting offers valuable insights for decision-making, optimizing processes in retail, traffic management, and various other domains.\n",
- "\n",
- "## Real World Applications\n",
- "\n",
- "| Logistics | Aquaculture |\n",
- "|:-------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------:|\n",
- "| ![Conveyor Belt Packets Counting Using Ultralytics YOLOv8](https://github.com/RizwanMunawar/ultralytics/assets/62513924/70e2d106-510c-4c6c-a57a-d34a765aa757) | ![Fish Counting in Sea using Ultralytics YOLOv8](https://github.com/RizwanMunawar/ultralytics/assets/62513924/c60d047b-3837-435f-8d29-bb9fc95d2191) |\n",
- "| Conveyor Belt Packets Counting Using Ultralytics YOLOv8 | Fish Counting in Sea using Ultralytics YOLOv8 |\n"
- ],
- "metadata": {
- "id": "m7VkxQ2aeg7k"
- }
- },
- {
- "cell_type": "code",
- "source": [
- "import cv2\n",
- "from ultralytics import YOLO, solutions\n",
- "\n",
- "# Load the pre-trained YOLOv8 model\n",
- "model = YOLO(\"yolov8n.pt\")\n",
- "\n",
- "# Open the video file\n",
- "cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
- "assert cap.isOpened(), \"Error reading video file\"\n",
- "\n",
- "# Get video properties: width, height, and frames per second (fps)\n",
- "w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
- "\n",
- "# Define points for a line or region of interest in the video frame\n",
- "line_points = [(20, 400), (1080, 400)] # Line coordinates\n",
- "\n",
- "# Specify classes to count, for example: person (0) and car (2)\n",
- "classes_to_count = [0, 2] # Class IDs for person and car\n",
- "\n",
- "# Initialize the video writer to save the output video\n",
- "video_writer = cv2.VideoWriter(\"object_counting_output.avi\", cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (w, h))\n",
- "\n",
- "# Initialize the Object Counter with visualization options and other parameters\n",
- "counter = solutions.ObjectCounter(\n",
- " view_img=True, # Display the image during processing\n",
- " reg_pts=line_points, # Region of interest points\n",
- " classes_names=model.names, # Class names from the YOLO model\n",
- " draw_tracks=True, # Draw tracking lines for objects\n",
- " line_thickness=2, # Thickness of the lines drawn\n",
- ")\n",
- "\n",
- "# Process video frames in a loop\n",
- "while cap.isOpened():\n",
- " success, im0 = cap.read()\n",
- " if not success:\n",
- " print(\"Video frame is empty or video processing has been successfully completed.\")\n",
- " break\n",
- "\n",
- " # Perform object tracking on the current frame, filtering by specified classes\n",
- " tracks = model.track(im0, persist=True, show=False, classes=classes_to_count)\n",
- "\n",
- " # Use the Object Counter to count objects in the frame and get the annotated image\n",
- " im0 = counter.start_counting(im0, tracks)\n",
- "\n",
- " # Write the annotated frame to the output video\n",
- " video_writer.write(im0)\n",
- "\n",
- "# Release the video capture and writer objects\n",
- "cap.release()\n",
- "video_writer.release()\n",
- "\n",
- "# Close all OpenCV windows\n",
- "cv2.destroyAllWindows()"
- ],
- "metadata": {
- "id": "Cx-u59HQdu2o"
- },
- "execution_count": null,
- "outputs": []
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "o68Sg1oOeZm2"
+ },
+ "source": [
+ "# Setup\n",
+ "\n",
+ "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.\n",
+ "\n",
+ "[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
},
+ "id": "9dSwz_uOReMI",
+ "outputId": "fd3bab88-2f25-46c0-cae9-04d2beedc0c1"
+ },
+ "outputs": [
{
- "cell_type": "markdown",
- "source": [
- "# Additional Resources\n",
- "\n",
- "## Community Support\n",
- "\n",
- "For more information on counting objects with Ultralytics, you can explore the comprehensive [Ultralytics Object Counting Docs](https://docs.ultralytics.com/guides/object-counting/). This guide covers everything from basic concepts to advanced techniques, ensuring you get the most out of counting and visualization.\n",
- "\n",
- "## Ultralytics ⚡ Resources\n",
- "\n",
- "At Ultralytics, we are committed to providing cutting-edge AI solutions. Here are some key resources to learn more about our company and get involved with our community:\n",
- "\n",
- "- [Ultralytics HUB](https://ultralytics.com/hub): Simplify your AI projects with Ultralytics HUB, our no-code tool for effortless YOLO training and deployment.\n",
- "- [Ultralytics Licensing](https://ultralytics.com/license): Review our licensing terms to understand how you can use our software in your projects.\n",
- "- [About Us](https://ultralytics.com/about): Discover our mission, vision, and the story behind Ultralytics.\n",
- "- [Join Our Team](https://ultralytics.com/work): Explore career opportunities and join our team of talented professionals.\n",
- "\n",
- "## YOLOv8 🚀 Resources\n",
- "\n",
- "YOLOv8 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLOv8:\n",
- "\n",
- "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLOv8 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n",
- "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLOv8, including installation guides, tutorials, and detailed API references.\n",
- "- [Discord](https://ultralytics.com/discord): Join our Discord community to connect with other users, share your projects, and get help from the Ultralytics team.\n",
- "\n",
- "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLOv8. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed."
- ],
- "metadata": {
- "id": "QrlKg-y3fEyD"
- }
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Ultralytics YOLOv8.2.18 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
+ "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 29.8/78.2 GB disk)\n"
+ ]
}
- ]
+ ],
+ "source": [
+ "%pip install ultralytics\n",
+ "import ultralytics\n",
+ "\n",
+ "ultralytics.checks()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "m7VkxQ2aeg7k"
+ },
+ "source": [
+ "# Object Counting using Ultralytics YOLOv8 🚀\n",
+ "\n",
+ "## What is Object Counting?\n",
+ "\n",
+ "Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) involves accurate identification and counting of specific objects in videos and camera streams. YOLOv8 excels in real-time applications, providing efficient and precise object counting for various scenarios like crowd analysis and surveillance, thanks to its state-of-the-art algorithms and deep learning capabilities.\n",
+ "\n",
+ "## Advantages of Object Counting?\n",
+ "\n",
+ "- **Resource Optimization:** Object counting facilitates efficient resource management by providing accurate counts, and optimizing resource allocation in applications like inventory management.\n",
+ "- **Enhanced Security:** Object counting enhances security and surveillance by accurately tracking and counting entities, aiding in proactive threat detection.\n",
+ "- **Informed Decision-Making:** Object counting offers valuable insights for decision-making, optimizing processes in retail, traffic management, and various other domains.\n",
+ "\n",
+ "## Real World Applications\n",
+ "\n",
+ "| Logistics | Aquaculture |\n",
+ "|:-------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------:|\n",
+ "| ![Conveyor Belt Packets Counting Using Ultralytics YOLOv8](https://github.com/RizwanMunawar/ultralytics/assets/62513924/70e2d106-510c-4c6c-a57a-d34a765aa757) | ![Fish Counting in Sea using Ultralytics YOLOv8](https://github.com/RizwanMunawar/ultralytics/assets/62513924/c60d047b-3837-435f-8d29-bb9fc95d2191) |\n",
+ "| Conveyor Belt Packets Counting Using Ultralytics YOLOv8 | Fish Counting in Sea using Ultralytics YOLOv8 |\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "Cx-u59HQdu2o"
+ },
+ "outputs": [],
+ "source": [
+ "import cv2\n",
+ "\n",
+ "from ultralytics import YOLO, solutions\n",
+ "\n",
+ "# Load the pre-trained YOLOv8 model\n",
+ "model = YOLO(\"yolov8n.pt\")\n",
+ "\n",
+ "# Open the video file\n",
+ "cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
+ "assert cap.isOpened(), \"Error reading video file\"\n",
+ "\n",
+ "# Get video properties: width, height, and frames per second (fps)\n",
+ "w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
+ "\n",
+ "# Define points for a line or region of interest in the video frame\n",
+ "line_points = [(20, 400), (1080, 400)] # Line coordinates\n",
+ "\n",
+ "# Specify classes to count, for example: person (0) and car (2)\n",
+ "classes_to_count = [0, 2] # Class IDs for person and car\n",
+ "\n",
+ "# Initialize the video writer to save the output video\n",
+ "video_writer = cv2.VideoWriter(\"object_counting_output.avi\", cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (w, h))\n",
+ "\n",
+ "# Initialize the Object Counter with visualization options and other parameters\n",
+ "counter = solutions.ObjectCounter(\n",
+ " view_img=True, # Display the image during processing\n",
+ " reg_pts=line_points, # Region of interest points\n",
+ " classes_names=model.names, # Class names from the YOLO model\n",
+ " draw_tracks=True, # Draw tracking lines for objects\n",
+ " line_thickness=2, # Thickness of the lines drawn\n",
+ ")\n",
+ "\n",
+ "# Process video frames in a loop\n",
+ "while cap.isOpened():\n",
+ " success, im0 = cap.read()\n",
+ " if not success:\n",
+ " print(\"Video frame is empty or video processing has been successfully completed.\")\n",
+ " break\n",
+ "\n",
+ " # Perform object tracking on the current frame, filtering by specified classes\n",
+ " tracks = model.track(im0, persist=True, show=False, classes=classes_to_count)\n",
+ "\n",
+ " # Use the Object Counter to count objects in the frame and get the annotated image\n",
+ " im0 = counter.start_counting(im0, tracks)\n",
+ "\n",
+ " # Write the annotated frame to the output video\n",
+ " video_writer.write(im0)\n",
+ "\n",
+ "# Release the video capture and writer objects\n",
+ "cap.release()\n",
+ "video_writer.release()\n",
+ "\n",
+ "# Close all OpenCV windows\n",
+ "cv2.destroyAllWindows()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "QrlKg-y3fEyD"
+ },
+ "source": [
+ "# Additional Resources\n",
+ "\n",
+ "## Community Support\n",
+ "\n",
+ "For more information on counting objects with Ultralytics, you can explore the comprehensive [Ultralytics Object Counting Docs](https://docs.ultralytics.com/guides/object-counting/). This guide covers everything from basic concepts to advanced techniques, ensuring you get the most out of counting and visualization.\n",
+ "\n",
+ "## Ultralytics ⚡ Resources\n",
+ "\n",
+ "At Ultralytics, we are committed to providing cutting-edge AI solutions. Here are some key resources to learn more about our company and get involved with our community:\n",
+ "\n",
+ "- [Ultralytics HUB](https://ultralytics.com/hub): Simplify your AI projects with Ultralytics HUB, our no-code tool for effortless YOLO training and deployment.\n",
+ "- [Ultralytics Licensing](https://ultralytics.com/license): Review our licensing terms to understand how you can use our software in your projects.\n",
+ "- [About Us](https://ultralytics.com/about): Discover our mission, vision, and the story behind Ultralytics.\n",
+ "- [Join Our Team](https://ultralytics.com/work): Explore career opportunities and join our team of talented professionals.\n",
+ "\n",
+ "## YOLOv8 🚀 Resources\n",
+ "\n",
+ "YOLOv8 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLOv8:\n",
+ "\n",
+ "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLOv8 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n",
+ "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLOv8, including installation guides, tutorials, and detailed API references.\n",
+ "- [Discord](https://ultralytics.com/discord): Join our Discord community to connect with other users, share your projects, and get help from the Ultralytics team.\n",
+ "\n",
+ "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLOv8. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed."
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "gpuType": "T4",
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
}
diff --git a/examples/object_tracking.ipynb b/examples/object_tracking.ipynb
index 5aedab5a4a6..14d5981ae03 100644
--- a/examples/object_tracking.ipynb
+++ b/examples/object_tracking.ipynb
@@ -1,243 +1,245 @@
{
- "nbformat": 4,
- "nbformat_minor": 0,
- "metadata": {
- "colab": {
- "provenance": [],
- "gpuType": "T4"
- },
- "kernelspec": {
- "name": "python3",
- "display_name": "Python 3"
- },
- "language_info": {
- "name": "python"
- },
- "accelerator": "GPU"
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "PN1cAxdvd61e"
+ },
+ "source": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ "\n",
+ " [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
+ "\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "\n",
+ "Welcome to the Ultralytics YOLOv8 🚀 notebook!
YOLOv8 is the latest version of the YOLO (You Only Look Once) AI models developed by
Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLOv8 and understand its features and capabilities.\n",
+ "\n",
+ "YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
+ "\n",
+ "We hope that the resources in this notebook will help you get the most out of YOLOv8. Please browse the YOLOv8
Tracking Docs for details, raise an issue on
GitHub for support, and join our
Discord community for questions and discussions!\n",
+ "\n",
+ "
"
+ ]
},
- "cells": [
- {
- "cell_type": "markdown",
- "source": [
- "\n",
- "\n",
- "
\n",
- " \n",
- "\n",
- " [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
- "\n",
- "
\n",
- "
\n",
- "
\n",
- "
\n",
- "
\n",
- "\n",
- "Welcome to the Ultralytics YOLOv8 🚀 notebook!
YOLOv8 is the latest version of the YOLO (You Only Look Once) AI models developed by
Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLOv8 and understand its features and capabilities.\n",
- "\n",
- "YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
- "\n",
- "We hope that the resources in this notebook will help you get the most out of YOLOv8. Please browse the YOLOv8
Tracking Docs for details, raise an issue on
GitHub for support, and join our
Discord community for questions and discussions!\n",
- "\n",
- "
"
- ],
- "metadata": {
- "id": "PN1cAxdvd61e"
- }
- },
- {
- "cell_type": "markdown",
- "source": [
- "# Setup\n",
- "\n",
- "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.\n",
- "\n",
- "[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)"
- ],
- "metadata": {
- "id": "o68Sg1oOeZm2"
- }
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {
- "id": "9dSwz_uOReMI",
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "outputId": "ed8c2370-8fc7-4e4e-f669-d0bae4d944e9"
- },
- "outputs": [
- {
- "output_type": "stream",
- "name": "stdout",
- "text": [
- "Ultralytics YOLOv8.2.17 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
- "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 29.8/78.2 GB disk)\n"
- ]
- }
- ],
- "source": [
- "%pip install ultralytics\n",
- "import ultralytics\n",
- "ultralytics.checks()"
- ]
- },
- {
- "cell_type": "markdown",
- "source": [
- "# Ultralytics Object Tracking\n",
- "\n",
- "[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) instance segmentation involves identifying and outlining individual objects in an image, providing a detailed understanding of spatial distribution. Unlike semantic segmentation, it uniquely labels and precisely delineates each object, crucial for tasks like object detection and medical imaging.\n",
- "\n",
- "There are two types of instance segmentation tracking available in the Ultralytics package:\n",
- "\n",
- "- **Instance Segmentation with Class Objects:** Each class object is assigned a unique color for clear visual separation.\n",
- "\n",
- "- **Instance Segmentation with Object Tracks:** Every track is represented by a distinct color, facilitating easy identification and tracking.\n",
- "\n",
- "## Samples\n",
- "\n",
- "| Instance Segmentation | Instance Segmentation + Object Tracking |\n",
- "|:---------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------:|\n",
- "| ![Ultralytics Instance Segmentation](https://github.com/RizwanMunawar/ultralytics/assets/62513924/d4ad3499-1f33-4871-8fbc-1be0b2643aa2) | ![Ultralytics Instance Segmentation with Object Tracking](https://github.com/RizwanMunawar/ultralytics/assets/62513924/2e5c38cc-fd5c-4145-9682-fa94ae2010a0) |\n",
- "| Ultralytics Instance Segmentation 😍 | Ultralytics Instance Segmentation with Object Tracking 🔥 |"
- ],
- "metadata": {
- "id": "m7VkxQ2aeg7k"
- }
- },
- {
- "cell_type": "markdown",
- "source": [
- "## CLI\n",
- "\n",
- "Command-Line Interface (CLI) example."
- ],
- "metadata": {
- "id": "-ZF9DM6e6gz0"
- }
- },
- {
- "cell_type": "code",
- "source": [
- "!yolo track source=\"/path/to/video/file.mp4\" save=True"
- ],
- "metadata": {
- "id": "-XJqhOwo6iqT"
- },
- "execution_count": null,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "source": [
- "## Python\n",
- "\n",
- "Python Instance Segmentation and Object tracking example."
- ],
- "metadata": {
- "id": "XRcw0vIE6oNb"
- }
- },
- {
- "cell_type": "code",
- "source": [
- "from collections import defaultdict\n",
- "\n",
- "import cv2\n",
- "from ultralytics import YOLO\n",
- "from ultralytics.utils.plotting import Annotator, colors\n",
- "\n",
- "# Dictionary to store tracking history with default empty lists\n",
- "track_history = defaultdict(lambda: [])\n",
- "\n",
- "# Load the YOLO model with segmentation capabilities\n",
- "model = YOLO(\"yolov8n-seg.pt\")\n",
- "\n",
- "# Open the video file\n",
- "cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
- "\n",
- "# Retrieve video properties: width, height, and frames per second\n",
- "w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
- "\n",
- "# Initialize video writer to save the output video with the specified properties\n",
- "out = cv2.VideoWriter(\"instance-segmentation-object-tracking.avi\", cv2.VideoWriter_fourcc(*\"MJPG\"), fps, (w, h))\n",
- "\n",
- "while True:\n",
- " # Read a frame from the video\n",
- " ret, im0 = cap.read()\n",
- " if not ret:\n",
- " print(\"Video frame is empty or video processing has been successfully completed.\")\n",
- " break\n",
- "\n",
- " # Create an annotator object to draw on the frame\n",
- " annotator = Annotator(im0, line_width=2)\n",
- "\n",
- " # Perform object tracking on the current frame\n",
- " results = model.track(im0, persist=True)\n",
- "\n",
- " # Check if tracking IDs and masks are present in the results\n",
- " if results[0].boxes.id is not None and results[0].masks is not None:\n",
- " # Extract masks and tracking IDs\n",
- " masks = results[0].masks.xy\n",
- " track_ids = results[0].boxes.id.int().cpu().tolist()\n",
- "\n",
- " # Annotate each mask with its corresponding tracking ID and color\n",
- " for mask, track_id in zip(masks, track_ids):\n",
- " annotator.seg_bbox(mask=mask, mask_color=colors(track_id, True), track_label=str(track_id))\n",
- "\n",
- " # Write the annotated frame to the output video\n",
- " out.write(im0)\n",
- " # Display the annotated frame\n",
- " cv2.imshow(\"instance-segmentation-object-tracking\", im0)\n",
- "\n",
- " # Exit the loop if 'q' is pressed\n",
- " if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n",
- " break\n",
- "\n",
- "# Release the video writer and capture objects, and close all OpenCV windows\n",
- "out.release()\n",
- "cap.release()\n",
- "cv2.destroyAllWindows()"
- ],
- "metadata": {
- "id": "Cx-u59HQdu2o"
- },
- "execution_count": null,
- "outputs": []
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "o68Sg1oOeZm2"
+ },
+ "source": [
+ "# Setup\n",
+ "\n",
+ "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.\n",
+ "\n",
+ "[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
},
+ "id": "9dSwz_uOReMI",
+ "outputId": "ed8c2370-8fc7-4e4e-f669-d0bae4d944e9"
+ },
+ "outputs": [
{
- "cell_type": "markdown",
- "source": [
- "# Additional Resources\n",
- "\n",
- "## Community Support\n",
- "\n",
- "For more information on using tracking with Ultralytics, you can explore the comprehensive [Ultralytics Tracking Docs](https://docs.ultralytics.com/modes/track/). This guide covers everything from basic concepts to advanced techniques, ensuring you get the most out of tracking and visualization.\n",
- "\n",
- "## Ultralytics ⚡ Resources\n",
- "\n",
- "At Ultralytics, we are committed to providing cutting-edge AI solutions. Here are some key resources to learn more about our company and get involved with our community:\n",
- "\n",
- "- [Ultralytics HUB](https://ultralytics.com/hub): Simplify your AI projects with Ultralytics HUB, our no-code tool for effortless YOLO training and deployment.\n",
- "- [Ultralytics Licensing](https://ultralytics.com/license): Review our licensing terms to understand how you can use our software in your projects.\n",
- "- [About Us](https://ultralytics.com/about): Discover our mission, vision, and the story behind Ultralytics.\n",
- "- [Join Our Team](https://ultralytics.com/work): Explore career opportunities and join our team of talented professionals.\n",
- "\n",
- "## YOLOv8 🚀 Resources\n",
- "\n",
- "YOLOv8 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLOv8:\n",
- "\n",
- "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLOv8 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n",
- "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLOv8, including installation guides, tutorials, and detailed API references.\n",
- "- [Discord](https://ultralytics.com/discord): Join our Discord community to connect with other users, share your projects, and get help from the Ultralytics team.\n",
- "\n",
- "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLOv8. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed."
- ],
- "metadata": {
- "id": "QrlKg-y3fEyD"
- }
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Ultralytics YOLOv8.2.17 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
+ "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 29.8/78.2 GB disk)\n"
+ ]
}
- ]
+ ],
+ "source": [
+ "%pip install ultralytics\n",
+ "import ultralytics\n",
+ "\n",
+ "ultralytics.checks()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "m7VkxQ2aeg7k"
+ },
+ "source": [
+ "# Ultralytics Object Tracking\n",
+ "\n",
+ "[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) instance segmentation involves identifying and outlining individual objects in an image, providing a detailed understanding of spatial distribution. Unlike semantic segmentation, it uniquely labels and precisely delineates each object, crucial for tasks like object detection and medical imaging.\n",
+ "\n",
+ "There are two types of instance segmentation tracking available in the Ultralytics package:\n",
+ "\n",
+ "- **Instance Segmentation with Class Objects:** Each class object is assigned a unique color for clear visual separation.\n",
+ "\n",
+ "- **Instance Segmentation with Object Tracks:** Every track is represented by a distinct color, facilitating easy identification and tracking.\n",
+ "\n",
+ "## Samples\n",
+ "\n",
+ "| Instance Segmentation | Instance Segmentation + Object Tracking |\n",
+ "|:---------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------:|\n",
+ "| ![Ultralytics Instance Segmentation](https://github.com/RizwanMunawar/ultralytics/assets/62513924/d4ad3499-1f33-4871-8fbc-1be0b2643aa2) | ![Ultralytics Instance Segmentation with Object Tracking](https://github.com/RizwanMunawar/ultralytics/assets/62513924/2e5c38cc-fd5c-4145-9682-fa94ae2010a0) |\n",
+ "| Ultralytics Instance Segmentation 😍 | Ultralytics Instance Segmentation with Object Tracking 🔥 |"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "-ZF9DM6e6gz0"
+ },
+ "source": [
+ "## CLI\n",
+ "\n",
+ "Command-Line Interface (CLI) example."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "-XJqhOwo6iqT"
+ },
+ "outputs": [],
+ "source": [
+ "!yolo track source=\"/path/to/video/file.mp4\" save=True"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "XRcw0vIE6oNb"
+ },
+ "source": [
+ "## Python\n",
+ "\n",
+ "Python Instance Segmentation and Object tracking example."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "Cx-u59HQdu2o"
+ },
+ "outputs": [],
+ "source": [
+ "from collections import defaultdict\n",
+ "\n",
+ "import cv2\n",
+ "\n",
+ "from ultralytics import YOLO\n",
+ "from ultralytics.utils.plotting import Annotator, colors\n",
+ "\n",
+ "# Dictionary to store tracking history with default empty lists\n",
+ "track_history = defaultdict(lambda: [])\n",
+ "\n",
+ "# Load the YOLO model with segmentation capabilities\n",
+ "model = YOLO(\"yolov8n-seg.pt\")\n",
+ "\n",
+ "# Open the video file\n",
+ "cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
+ "\n",
+ "# Retrieve video properties: width, height, and frames per second\n",
+ "w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
+ "\n",
+ "# Initialize video writer to save the output video with the specified properties\n",
+ "out = cv2.VideoWriter(\"instance-segmentation-object-tracking.avi\", cv2.VideoWriter_fourcc(*\"MJPG\"), fps, (w, h))\n",
+ "\n",
+ "while True:\n",
+ " # Read a frame from the video\n",
+ " ret, im0 = cap.read()\n",
+ " if not ret:\n",
+ " print(\"Video frame is empty or video processing has been successfully completed.\")\n",
+ " break\n",
+ "\n",
+ " # Create an annotator object to draw on the frame\n",
+ " annotator = Annotator(im0, line_width=2)\n",
+ "\n",
+ " # Perform object tracking on the current frame\n",
+ " results = model.track(im0, persist=True)\n",
+ "\n",
+ " # Check if tracking IDs and masks are present in the results\n",
+ " if results[0].boxes.id is not None and results[0].masks is not None:\n",
+ " # Extract masks and tracking IDs\n",
+ " masks = results[0].masks.xy\n",
+ " track_ids = results[0].boxes.id.int().cpu().tolist()\n",
+ "\n",
+ " # Annotate each mask with its corresponding tracking ID and color\n",
+ " for mask, track_id in zip(masks, track_ids):\n",
+ " annotator.seg_bbox(mask=mask, mask_color=colors(track_id, True), track_label=str(track_id))\n",
+ "\n",
+ " # Write the annotated frame to the output video\n",
+ " out.write(im0)\n",
+ " # Display the annotated frame\n",
+ " cv2.imshow(\"instance-segmentation-object-tracking\", im0)\n",
+ "\n",
+ " # Exit the loop if 'q' is pressed\n",
+ " if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n",
+ " break\n",
+ "\n",
+ "# Release the video writer and capture objects, and close all OpenCV windows\n",
+ "out.release()\n",
+ "cap.release()\n",
+ "cv2.destroyAllWindows()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "QrlKg-y3fEyD"
+ },
+ "source": [
+ "# Additional Resources\n",
+ "\n",
+ "## Community Support\n",
+ "\n",
+ "For more information on using tracking with Ultralytics, you can explore the comprehensive [Ultralytics Tracking Docs](https://docs.ultralytics.com/modes/track/). This guide covers everything from basic concepts to advanced techniques, ensuring you get the most out of tracking and visualization.\n",
+ "\n",
+ "## Ultralytics ⚡ Resources\n",
+ "\n",
+ "At Ultralytics, we are committed to providing cutting-edge AI solutions. Here are some key resources to learn more about our company and get involved with our community:\n",
+ "\n",
+ "- [Ultralytics HUB](https://ultralytics.com/hub): Simplify your AI projects with Ultralytics HUB, our no-code tool for effortless YOLO training and deployment.\n",
+ "- [Ultralytics Licensing](https://ultralytics.com/license): Review our licensing terms to understand how you can use our software in your projects.\n",
+ "- [About Us](https://ultralytics.com/about): Discover our mission, vision, and the story behind Ultralytics.\n",
+ "- [Join Our Team](https://ultralytics.com/work): Explore career opportunities and join our team of talented professionals.\n",
+ "\n",
+ "## YOLOv8 🚀 Resources\n",
+ "\n",
+ "YOLOv8 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLOv8:\n",
+ "\n",
+ "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLOv8 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n",
+ "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLOv8, including installation guides, tutorials, and detailed API references.\n",
+ "- [Discord](https://ultralytics.com/discord): Join our Discord community to connect with other users, share your projects, and get help from the Ultralytics team.\n",
+ "\n",
+ "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLOv8. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed."
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "gpuType": "T4",
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
}
diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py
index 07bc2391755..490cffee7a3 100644
--- a/ultralytics/__init__.py
+++ b/ultralytics/__init__.py
@@ -8,7 +8,7 @@
os.environ["OMP_NUM_THREADS"] = "1" # reduce CPU utilization during training
from ultralytics.data.explorer.explorer import Explorer
-from ultralytics.models import NAS, RTDETR, SAM, YOLO, FastSAM, YOLOWorld, NeuronYOLO
+from ultralytics.models import NAS, RTDETR, SAM, YOLO, FastSAM, NeuronYOLO, YOLOWorld
from ultralytics.utils import ASSETS, SETTINGS
from ultralytics.utils.checks import check_yolo as checks
from ultralytics.utils.downloads import download
diff --git a/ultralytics/cfg/__init__.py b/ultralytics/cfg/__init__.py
index 293abcb53c7..45b830a8c23 100644
--- a/ultralytics/cfg/__init__.py
+++ b/ultralytics/cfg/__init__.py
@@ -316,7 +316,6 @@ def check_cfg(cfg, hard=True):
def get_save_dir(args, name=None):
"""Returns the directory path for saving outputs, derived from arguments or default settings."""
-
if getattr(args, "save_dir", None):
save_dir = args.save_dir
else:
@@ -331,7 +330,6 @@ def get_save_dir(args, name=None):
def _handle_deprecation(custom):
"""Handles deprecated configuration keys by mapping them to current equivalents with deprecation warnings."""
-
for key in custom.copy().keys():
if key == "boxes":
deprecation_warn(key, "show_boxes")
diff --git a/ultralytics/data/augment.py b/ultralytics/data/augment.py
index 2400de11cd8..381b4f8e022 100644
--- a/ultralytics/data/augment.py
+++ b/ultralytics/data/augment.py
@@ -416,7 +416,6 @@ def __init__(
self, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, border=(0, 0), pre_transform=None
):
"""Initializes RandomPerspective object with transformation parameters."""
-
self.degrees = degrees
self.translate = translate
self.scale = scale
@@ -438,7 +437,6 @@ def affine_transform(self, img, border):
M (ndarray): Transformation matrix.
s (float): Scale factor.
"""
-
# Center
C = np.eye(3, dtype=np.float32)
diff --git a/ultralytics/data/converter.py b/ultralytics/data/converter.py
index 0ee390877da..e0352c415eb 100644
--- a/ultralytics/data/converter.py
+++ b/ultralytics/data/converter.py
@@ -115,7 +115,7 @@ def coco91_to_coco80_class():
def coco80_to_coco91_class():
- """
+ r"""
Converts 80-index (val2014) to 91-index (paper).
For details see https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/.
@@ -243,7 +243,6 @@ def convert_coco(
Output:
Generates output files in the specified output directory.
"""
-
# Create dataset directory
save_dir = increment_path(save_dir) # increment if save directory already exists
for p in save_dir / "labels", save_dir / "images":
@@ -410,7 +409,7 @@ def convert_label(image_name, image_width, image_height, orig_label_dir, save_di
normalized_coords = [
coords[i] / image_width if i % 2 == 0 else coords[i] / image_height for i in range(8)
]
- formatted_coords = ["{:.6g}".format(coord) for coord in normalized_coords]
+ formatted_coords = [f"{coord:.6g}" for coord in normalized_coords]
g.write(f"{class_idx} {' '.join(formatted_coords)}\n")
for phase in ["train", "val"]:
diff --git a/ultralytics/data/dataset.py b/ultralytics/data/dataset.py
index 8bbb08f8a9f..1e7d19d0e97 100644
--- a/ultralytics/data/dataset.py
+++ b/ultralytics/data/dataset.py
@@ -293,7 +293,7 @@ def get_labels(self):
"""Loads annotations from a JSON file, filters, and normalizes bounding boxes for each image."""
labels = []
LOGGER.info("Loading annotation file...")
- with open(self.json_file, "r") as f:
+ with open(self.json_file) as f:
annotations = json.load(f)
images = {f'{x["id"]:d}': x for x in annotations["images"]}
imgToAnns = defaultdict(list)
diff --git a/ultralytics/data/explorer/explorer.py b/ultralytics/data/explorer/explorer.py
index 1852b89b3fd..96e9228c3b7 100644
--- a/ultralytics/data/explorer/explorer.py
+++ b/ultralytics/data/explorer/explorer.py
@@ -222,6 +222,7 @@ def sql_query(
def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image:
"""
Plot the results of a SQL-Like query on the table.
+
Args:
query (str): SQL query to run.
labels (bool): Whether to plot the labels or not.
@@ -467,6 +468,6 @@ def generate_report(self, result):
"""
Generate a report of the dataset.
- TODO
+ Todo:
"""
pass
diff --git a/ultralytics/data/loaders.py b/ultralytics/data/loaders.py
index cd03850c26b..7b30db9d98f 100644
--- a/ultralytics/data/loaders.py
+++ b/ultralytics/data/loaders.py
@@ -240,7 +240,7 @@ def __iter__(self):
return self
def __next__(self):
- """mss screen capture: get raw pixels from the screen as np array."""
+ """Mss screen capture: get raw pixels from the screen as np array."""
im0 = np.asarray(self.sct.grab(self.monitor))[:, :, :3] # BGRA to BGR
s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: "
diff --git a/ultralytics/data/split_dota.py b/ultralytics/data/split_dota.py
index f0a1630ca35..07bbc7721ab 100644
--- a/ultralytics/data/split_dota.py
+++ b/ultralytics/data/split_dota.py
@@ -184,7 +184,7 @@ def crop_and_save(anno, windows, window_objs, im_dir, lb_dir):
with open(Path(lb_dir) / f"{new_name}.txt", "w") as f:
for lb in label:
- formatted_coords = ["{:.6g}".format(coord) for coord in lb[1:]]
+ formatted_coords = [f"{coord:.6g}" for coord in lb[1:]]
f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")
diff --git a/ultralytics/data/utils.py b/ultralytics/data/utils.py
index fa9bfbb1948..86c073a25f8 100644
--- a/ultralytics/data/utils.py
+++ b/ultralytics/data/utils.py
@@ -265,7 +265,6 @@ def check_det_dataset(dataset, autodownload=True):
Returns:
(dict): Parsed dataset information and paths.
"""
-
file = check_file(dataset)
# Download (optional)
@@ -363,7 +362,6 @@ def check_cls_dataset(dataset, split=""):
- 'nc' (int): The number of classes in the dataset.
- 'names' (dict): A dictionary of class names in the dataset.
"""
-
# Download (optional if dataset=https://file.zip is passed directly)
if str(dataset).startswith(("http:/", "https:/")):
dataset = safe_download(dataset, dir=DATASETS_DIR, unzip=True, delete=False)
@@ -602,7 +600,6 @@ def compress_one_image(f, f_new=None, max_dim=1920, quality=50):
compress_one_image(f)
```
"""
-
try: # use PIL
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
@@ -635,7 +632,6 @@ def autosplit(path=DATASETS_DIR / "coco8/images", weights=(0.9, 0.1, 0.0), annot
autosplit()
```
"""
-
path = Path(path) # images dir
files = sorted(x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
diff --git a/ultralytics/engine/exporter.py b/ultralytics/engine/exporter.py
index d982965e55b..fdc638e5ddc 100644
--- a/ultralytics/engine/exporter.py
+++ b/ultralytics/engine/exporter.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
"""
-Export a YOLOv8 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
+Export a YOLOv8 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.
Format | `format=argument` | Model
--- | --- | ---
@@ -533,9 +533,7 @@ def export_paddle(self, prefix=colorstr("PaddlePaddle:")):
@try_export
def export_ncnn(self, prefix=colorstr("NCNN:")):
- """
- YOLOv8 NCNN export using PNNX https://github.com/pnnx/pnnx.
- """
+ """YOLOv8 NCNN export using PNNX https://github.com/pnnx/pnnx."""
check_requirements("ncnn")
import ncnn # noqa
diff --git a/ultralytics/engine/model.py b/ultralytics/engine/model.py
index 44b7539b86e..dd6d7b84e86 100644
--- a/ultralytics/engine/model.py
+++ b/ultralytics/engine/model.py
@@ -175,7 +175,7 @@ def __call__(
@staticmethod
def is_triton_model(model: str) -> bool:
- """Is model a Triton Server URL string, i.e. :////"""
+ """Is model a Triton Server URL string, i.e. :////."""
from urllib.parse import urlsplit
url = urlsplit(model)
diff --git a/ultralytics/engine/neuron_exporter.py b/ultralytics/engine/neuron_exporter.py
index fe2ec2e31eb..c7208fa2fc4 100644
--- a/ultralytics/engine/neuron_exporter.py
+++ b/ultralytics/engine/neuron_exporter.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
"""
-Export a YOLOv8 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
+Export a YOLOv8 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.
Format | `format=argument` | Model
--- | --- | ---
@@ -52,49 +52,32 @@
$ npm start
"""
-import gc
import json
-import os
-import shutil
-import subprocess
import time
import warnings
from copy import deepcopy
from datetime import datetime
from pathlib import Path
-import numpy as np
import torch
-from ultralytics.cfg import TASK2DATA, get_cfg
-from ultralytics.data import build_dataloader
-from ultralytics.data.dataset import YOLODataset
-from ultralytics.data.utils import check_cls_dataset, check_det_dataset
+from ultralytics.cfg import TASK2DATA
+from ultralytics.engine.exporter import Exporter
from ultralytics.nn.autobackend import check_class_names, default_class_names
from ultralytics.nn.modules import C2f, Detect, RTDETRDecoder
-from ultralytics.nn.tasks import DetectionModel, SegmentationModel, WorldModel
+from ultralytics.nn.tasks import WorldModel
from ultralytics.utils import (
- ARM64,
DEFAULT_CFG,
- IS_JETSON,
LINUX,
LOGGER,
- MACOS,
- PYTHON_VERSION,
- ROOT,
- WINDOWS,
__version__,
- callbacks,
colorstr,
get_default_args,
- yaml_save,
)
-from ultralytics.utils.checks import check_imgsz, check_is_path_safe, check_requirements, check_version
-from ultralytics.utils.downloads import attempt_download_asset, get_github_assets, safe_download
-from ultralytics.utils.files import file_size, spaces_in_path
+from ultralytics.utils.checks import check_imgsz
+from ultralytics.utils.files import file_size
from ultralytics.utils.ops import Profile
-from ultralytics.utils.torch_utils import TORCH_1_13, get_latest_opset, select_device, smart_inference_mode
-from ultralytics.engine.exporter import Exporter
+from ultralytics.utils.torch_utils import select_device, smart_inference_mode
def export_formats():
@@ -105,6 +88,7 @@ def export_formats():
["PyTorch", "-", ".pt", True, True],
["TorchScript", "torchscript", ".torchscript", True, True],
["AWS NeuronX", "neuronx", ".neuronx", True, True],
+ ["AWS Neuron", "neuron", ".neuron", True, True],
["ONNX", "onnx", ".onnx", True, True],
["OpenVINO", "openvino", "_openvino_model", True, False],
["TensorRT", "engine", ".engine", False, True],
@@ -159,6 +143,7 @@ class NeuronExporter(Exporter):
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
super().__init__(cfg, overrides, _callbacks)
+
@smart_inference_mode()
def __call__(self, model=None) -> str:
"""Returns list of exported files/dirs after running callbacks."""
@@ -173,7 +158,22 @@ def __call__(self, model=None) -> str:
flags = [x == fmt for x in fmts]
if sum(flags) != 1:
raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}")
- jit, neuronx, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn = flags # export booleans
+ (
+ jit,
+ neuronx,
+ neuron,
+ onnx,
+ xml,
+ engine,
+ coreml,
+ saved_model,
+ pb,
+ tflite,
+ edgetpu,
+ tfjs,
+ paddle,
+ ncnn,
+ ) = flags # export booleans
is_tf_format = any((saved_model, pb, tflite, edgetpu, tfjs))
# Device
@@ -315,6 +315,8 @@ def __call__(self, model=None) -> str:
f[11], _ = self.export_ncnn()
if neuronx: # NeuronX
f[12], _ = self.export_neuronx()
+ if neuron: # Neuron
+ f[13], _ = self.export_neuron()
# Finish
f = [str(x) for x in f if x] # filter out '' and None
@@ -340,20 +342,27 @@ def __call__(self, model=None) -> str:
self.run_callbacks("on_export_end")
return f # return list of exported files/dirs
+
@try_export
def export_neuronx(self, prefix=colorstr("AWS NeuronX:")):
import torch_neuronx
+
"""YOLOv8 NeuronX model export."""
LOGGER.info(f"\n{prefix} starting export with torch {torch_neuronx.__version__}...")
f = self.file.with_suffix(".neuronx")
-
ts = torch_neuronx.trace(self.model, self.im, strict=False)
extra_files = {"config.txt": json.dumps(self.metadata)} # torch._C.ExtraFilesMap()
- if self.args.optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
- LOGGER.info(f"{prefix} optimizing for mobile...")
- from torch.utils.mobile_optimizer import optimize_for_mobile
-
- optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
- else:
- ts.save(str(f), _extra_files=extra_files)
- return f, None
\ No newline at end of file
+ ts.save(str(f), _extra_files=extra_files)
+ return f, None
+
+ @try_export
+ def export_neuron(self, prefix=colorstr("AWS Neuron:")):
+ import torch_neuron
+
+ """YOLOv8 Neuron model export."""
+ LOGGER.info(f"\n{prefix} starting export with torch {torch_neuron.__version__}...")
+ f = self.file.with_suffix(".neuron")
+ ts = torch_neuron.trace(self.model, self.im, strict=False)
+ extra_files = {"config.txt": json.dumps(self.metadata)}
+ ts.save(str(f), _extra_files=extra_files)
+ return f, None
diff --git a/ultralytics/engine/neuron_model.py b/ultralytics/engine/neuron_model.py
index ac39e57a6fe..4506c401dfb 100644
--- a/ultralytics/engine/neuron_model.py
+++ b/ultralytics/engine/neuron_model.py
@@ -1,7 +1,8 @@
from pathlib import Path
-from ultralytics.engine.model import Model
from typing import Union
+from ultralytics.engine.model import Model
+
class NeuronModel(Model):
def __init__(
@@ -51,6 +52,4 @@ def export(
**kwargs,
"mode": "export",
} # highest priority args on the right
- return NeuronExporter(overrides=args, _callbacks=self.callbacks)(
- model=self.model
- )
+ return NeuronExporter(overrides=args, _callbacks=self.callbacks)(model=self.model)
diff --git a/ultralytics/engine/neuron_predictor.py b/ultralytics/engine/neuron_predictor.py
index df0b0c68c7e..eb28beb4f12 100644
--- a/ultralytics/engine/neuron_predictor.py
+++ b/ultralytics/engine/neuron_predictor.py
@@ -1,7 +1,6 @@
from ultralytics.engine.predictor import BasePredictor
-from ultralytics.utils import DEFAULT_CFG
-from ultralytics.utils.torch_utils import select_device
from ultralytics.nn.neuron_autobackend import NeuronAutoBackend
+from ultralytics.utils.torch_utils import select_device
class NeuronPredictor(BasePredictor):
diff --git a/ultralytics/engine/predictor.py b/ultralytics/engine/predictor.py
index 8597a60a22a..d9933e6e951 100644
--- a/ultralytics/engine/predictor.py
+++ b/ultralytics/engine/predictor.py
@@ -328,7 +328,7 @@ def write_results(self, i, p, im, s):
frame = int(match[1]) if match else None # 0 if frame undetermined
self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}"))
- string += "%gx%g " % im.shape[2:]
+ string += "{:g}x{:g} ".format(*im.shape[2:])
result = self.results[i]
result.save_dir = self.save_dir.__str__() # used in other locations
string += f"{result.verbose()}{result.speed['inference']:.1f}ms"
diff --git a/ultralytics/engine/results.py b/ultralytics/engine/results.py
index 346ed6500ff..51356d6a66c 100644
--- a/ultralytics/engine/results.py
+++ b/ultralytics/engine/results.py
@@ -645,7 +645,7 @@ class Keypoints(BaseTensor):
"""
A class for storing and manipulating detection keypoints.
- Attributes
+ Attributes:
xy (torch.Tensor): A collection of keypoints containing x, y coordinates for each detection.
xyn (torch.Tensor): A normalized version of xy with coordinates in the range [0, 1].
conf (torch.Tensor): Confidence values associated with keypoints if available, otherwise None.
@@ -694,7 +694,7 @@ class Probs(BaseTensor):
"""
A class for storing and manipulating classification predictions.
- Attributes
+ Attributes:
top1 (int): Index of the top 1 class.
top5 (list[int]): Indices of the top 5 classes.
top1conf (torch.Tensor): Confidence of the top 1 class.
@@ -746,7 +746,7 @@ class OBB(BaseTensor):
If present, the third last column contains track IDs, and the fifth column from the left contains rotation.
orig_shape (tuple): Original image size, in the format (height, width).
- Attributes
+ Attributes:
xywhr (torch.Tensor | numpy.ndarray): The boxes in [x_center, y_center, width, height, rotation] format.
conf (torch.Tensor | numpy.ndarray): The confidence values of the boxes.
cls (torch.Tensor | numpy.ndarray): The class values of the boxes.
diff --git a/ultralytics/engine/trainer.py b/ultralytics/engine/trainer.py
index c833e761655..8232528bb83 100644
--- a/ultralytics/engine/trainer.py
+++ b/ultralytics/engine/trainer.py
@@ -226,7 +226,6 @@ def _setup_ddp(self, world_size):
def _setup_train(self, world_size):
"""Builds dataloaders and optimizer on correct rank process."""
-
# Model
self.run_callbacks("on_pretrain_routine_start")
ckpt = self.setup_model()
@@ -632,7 +631,7 @@ def plot_metrics(self):
pass
def on_plot(self, name, data=None):
- """Registers plots (e.g. to be consumed in callbacks)"""
+ """Registers plots (e.g. to be consumed in callbacks)."""
path = Path(name)
self.plots[path] = {"data": data, "timestamp": time.time()}
@@ -728,7 +727,6 @@ def build_optimizer(self, model, name="auto", lr=0.001, momentum=0.9, decay=1e-5
Returns:
(torch.optim.Optimizer): The constructed optimizer.
"""
-
g = [], [], [] # optimizer parameter groups
bn = tuple(v for k, v in nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d()
if name == "auto":
diff --git a/ultralytics/engine/tuner.py b/ultralytics/engine/tuner.py
index 7fc1897d3c1..618647683ea 100644
--- a/ultralytics/engine/tuner.py
+++ b/ultralytics/engine/tuner.py
@@ -176,7 +176,6 @@ def __call__(self, model=None, iterations=10, cleanup=True):
The method utilizes the `self.tune_csv` Path object to read and log hyperparameters and fitness scores.
Ensure this path is set correctly in the Tuner instance.
"""
-
t0 = time.time()
best_save_dir, best_metrics = None, None
(self.tune_dir / "weights").mkdir(parents=True, exist_ok=True)
diff --git a/ultralytics/engine/validator.py b/ultralytics/engine/validator.py
index 8a2765c98f3..525a6b57ae9 100644
--- a/ultralytics/engine/validator.py
+++ b/ultralytics/engine/validator.py
@@ -204,8 +204,9 @@ def __call__(self, trainer=None, model=None):
return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats
else:
LOGGER.info(
- "Speed: %.1fms preprocess, %.1fms inference, %.1fms loss, %.1fms postprocess per image"
- % tuple(self.speed.values())
+ "Speed: {:.1f}ms preprocess, {:.1f}ms inference, {:.1f}ms loss, {:.1f}ms postprocess per image".format(
+ *tuple(self.speed.values())
+ )
)
if self.args.save_json and self.jdict:
with open(str(self.save_dir / "predictions.json"), "w") as f:
@@ -317,7 +318,7 @@ def metric_keys(self):
return []
def on_plot(self, name, data=None):
- """Registers plots (e.g. to be consumed in callbacks)"""
+ """Registers plots (e.g. to be consumed in callbacks)."""
self.plots[Path(name)] = {"data": data, "timestamp": time.time()}
# TODO: may need to put these following functions into callback
diff --git a/ultralytics/hub/session.py b/ultralytics/hub/session.py
index ddd4d8c1a5b..eb23991ee0e 100644
--- a/ultralytics/hub/session.py
+++ b/ultralytics/hub/session.py
@@ -154,7 +154,6 @@ def _parse_identifier(identifier):
Raises:
HUBModelError: If the identifier format is not recognized.
"""
-
# Initialize variables
api_key, model_id, filename = None, None, None
diff --git a/ultralytics/hub/utils.py b/ultralytics/hub/utils.py
index 2e84c7d3a8f..9ddb50768e1 100644
--- a/ultralytics/hub/utils.py
+++ b/ultralytics/hub/utils.py
@@ -55,23 +55,22 @@ def request_with_credentials(url: str) -> any:
display.display(
display.Javascript(
- """
- window._hub_tmp = new Promise((resolve, reject) => {
+ f"""
+ window._hub_tmp = new Promise((resolve, reject) => {{
const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000)
- fetch("%s", {
+ fetch("{url}", {{
method: 'POST',
credentials: 'include'
- })
+ }})
.then((response) => resolve(response.json()))
- .then((json) => {
+ .then((json) => {{
clearTimeout(timeout);
- }).catch((err) => {
+ }}).catch((err) => {{
clearTimeout(timeout);
reject(err);
- });
- });
+ }});
+ }});
"""
- % url
)
)
return output.eval_js("_hub_tmp")
diff --git a/ultralytics/models/__init__.py b/ultralytics/models/__init__.py
index 870e0db0811..2a98f3cea9f 100644
--- a/ultralytics/models/__init__.py
+++ b/ultralytics/models/__init__.py
@@ -4,7 +4,7 @@
from .nas import NAS
from .rtdetr import RTDETR
from .sam import SAM
-from .yolo import YOLO, YOLOWorld, NeuronYOLO
+from .yolo import YOLO, NeuronYOLO, YOLOWorld
__all__ = (
"YOLO",
diff --git a/ultralytics/models/fastsam/utils.py b/ultralytics/models/fastsam/utils.py
index 480e903942e..2108dedf278 100644
--- a/ultralytics/models/fastsam/utils.py
+++ b/ultralytics/models/fastsam/utils.py
@@ -15,7 +15,6 @@ def adjust_bboxes_to_image_border(boxes, image_shape, threshold=20):
Returns:
adjusted_boxes (torch.Tensor): adjusted bounding boxes
"""
-
# Image dimensions
h, w = image_shape
diff --git a/ultralytics/models/nas/predict.py b/ultralytics/models/nas/predict.py
index 2e485462126..753cae91e67 100644
--- a/ultralytics/models/nas/predict.py
+++ b/ultralytics/models/nas/predict.py
@@ -34,7 +34,6 @@ class NASPredictor(BasePredictor):
def postprocess(self, preds_in, img, orig_imgs):
"""Postprocess predictions and returns a list of Results objects."""
-
# Cat boxes and class scores
boxes = ops.xyxy2xywh(preds_in[0][0])
preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1)
diff --git a/ultralytics/models/sam/modules/decoders.py b/ultralytics/models/sam/modules/decoders.py
index 073b1ad40cb..02a72f78df9 100644
--- a/ultralytics/models/sam/modules/decoders.py
+++ b/ultralytics/models/sam/modules/decoders.py
@@ -154,7 +154,7 @@ def predict_masks(
class MLP(nn.Module):
"""
MLP (Multi-Layer Perceptron) model lightly adapted from
- https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py
+ https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py.
"""
def __init__(
diff --git a/ultralytics/models/sam/modules/encoders.py b/ultralytics/models/sam/modules/encoders.py
index a51c34721ae..a8bd89ef7e8 100644
--- a/ultralytics/models/sam/modules/encoders.py
+++ b/ultralytics/models/sam/modules/encoders.py
@@ -453,6 +453,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""
Partition into non-overlapping windows with padding if needed.
+
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
diff --git a/ultralytics/models/sam/modules/transformer.py b/ultralytics/models/sam/modules/transformer.py
index db684f8f137..1489d3554cc 100644
--- a/ultralytics/models/sam/modules/transformer.py
+++ b/ultralytics/models/sam/modules/transformer.py
@@ -170,7 +170,6 @@ def __init__(
def forward(self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor) -> Tuple[Tensor, Tensor]:
"""Apply self-attention and cross-attention to queries and keys and return the processed embeddings."""
-
# Self attention block
if self.skip_first_layer_pe:
queries = self.self_attn(q=queries, k=queries, v=queries)
@@ -251,7 +250,6 @@ def _recombine_heads(x: Tensor) -> Tensor:
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
"""Compute the attention output given the input query, key, and value tensors."""
-
# Input projections
q = self.q_proj(q)
k = self.k_proj(k)
diff --git a/ultralytics/models/utils/ops.py b/ultralytics/models/utils/ops.py
index 4f66feef652..2630fc7391f 100644
--- a/ultralytics/models/utils/ops.py
+++ b/ultralytics/models/utils/ops.py
@@ -70,7 +70,6 @@ def forward(self, pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
-
bs, nq, nc = pred_scores.shape
if sum(gt_groups) == 0:
@@ -175,7 +174,6 @@ def get_cdn_group(
bounding boxes, attention mask and meta information for denoising. If not in training mode or 'num_dn'
is less than or equal to 0, the function returns None for all elements in the tuple.
"""
-
if (not training) or num_dn <= 0:
return None, None, None, None
gt_groups = batch["gt_groups"]
diff --git a/ultralytics/models/yolo/detect/__init__.py b/ultralytics/models/yolo/detect/__init__.py
index 936c8a8fbf5..bde72324e38 100644
--- a/ultralytics/models/yolo/detect/__init__.py
+++ b/ultralytics/models/yolo/detect/__init__.py
@@ -1,7 +1,7 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-from .predict import DetectionPredictor
from .neuron_predict import NeuronDetectionPredictor
+from .predict import DetectionPredictor
from .train import DetectionTrainer
from .val import DetectionValidator
diff --git a/ultralytics/models/yolo/detect/neuron_predict.py b/ultralytics/models/yolo/detect/neuron_predict.py
index 752705cd7f1..9ee6c8093d8 100644
--- a/ultralytics/models/yolo/detect/neuron_predict.py
+++ b/ultralytics/models/yolo/detect/neuron_predict.py
@@ -1,7 +1,6 @@
from ultralytics.models.yolo.detect.predict import DetectionPredictor
-from ultralytics.utils.torch_utils import select_device
from ultralytics.nn.neuron_autobackend import NeuronAutoBackend
-from ultralytics.utils import DEFAULT_CFG
+from ultralytics.utils.torch_utils import select_device
class NeuronDetectionPredictor(DetectionPredictor):
diff --git a/ultralytics/models/yolo/neuron_model.py b/ultralytics/models/yolo/neuron_model.py
index 1978c0a11d5..4a67e085e4e 100644
--- a/ultralytics/models/yolo/neuron_model.py
+++ b/ultralytics/models/yolo/neuron_model.py
@@ -1,4 +1,3 @@
-from .model import YOLO
from ultralytics.engine.neuron_model import NeuronModel
from ultralytics.models import yolo
diff --git a/ultralytics/nn/modules/block.py b/ultralytics/nn/modules/block.py
index 9d08dd7efa7..db46d237524 100644
--- a/ultralytics/nn/modules/block.py
+++ b/ultralytics/nn/modules/block.py
@@ -684,7 +684,7 @@ class CBLinear(nn.Module):
def __init__(self, c1, c2s, k=1, s=1, p=None, g=1):
"""Initializes the CBLinear module, passing inputs unchanged."""
- super(CBLinear, self).__init__()
+ super().__init__()
self.c2s = c2s
self.conv = nn.Conv2d(c1, sum(c2s), k, s, autopad(k, p), groups=g, bias=True)
@@ -698,7 +698,7 @@ class CBFuse(nn.Module):
def __init__(self, idx):
"""Initializes CBFuse module with layer index for selective feature fusion."""
- super(CBFuse, self).__init__()
+ super().__init__()
self.idx = idx
def forward(self, xs):
diff --git a/ultralytics/nn/modules/head.py b/ultralytics/nn/modules/head.py
index d80864844d9..c372b03d252 100644
--- a/ultralytics/nn/modules/head.py
+++ b/ultralytics/nn/modules/head.py
@@ -565,7 +565,7 @@ def _reset_parameters(self):
class v10Detect(Detect):
"""
- v10 Detection head from https://arxiv.org/pdf/2405.14458
+ v10 Detection head from https://arxiv.org/pdf/2405.14458.
Args:
nc (int): Number of classes.
diff --git a/ultralytics/nn/modules/transformer.py b/ultralytics/nn/modules/transformer.py
index 062c6094eaf..78913c756fa 100644
--- a/ultralytics/nn/modules/transformer.py
+++ b/ultralytics/nn/modules/transformer.py
@@ -350,7 +350,6 @@ def forward_ffn(self, tgt):
def forward(self, embed, refer_bbox, feats, shapes, padding_mask=None, attn_mask=None, query_pos=None):
"""Perform the forward pass through the entire decoder layer."""
-
# Self attention
q = k = self.with_pos_embed(embed, query_pos)
tgt = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), embed.transpose(0, 1), attn_mask=attn_mask)[
diff --git a/ultralytics/nn/modules/utils.py b/ultralytics/nn/modules/utils.py
index 15129673408..a7c86391c42 100644
--- a/ultralytics/nn/modules/utils.py
+++ b/ultralytics/nn/modules/utils.py
@@ -50,7 +50,6 @@ def multi_scale_deformable_attn_pytorch(
https://github.com/IDEA-Research/detrex/blob/main/detrex/layers/multi_scale_deform_attn.py
"""
-
bs, _, num_heads, embed_dims = value.shape
_, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
diff --git a/ultralytics/nn/neuron_autobackend.py b/ultralytics/nn/neuron_autobackend.py
index 1fc48e360c1..719160a25bb 100644
--- a/ultralytics/nn/neuron_autobackend.py
+++ b/ultralytics/nn/neuron_autobackend.py
@@ -110,6 +110,7 @@ def __init__(
pt,
jit,
neuronx,
+ neuron,
onnx,
xml,
engine,
@@ -176,12 +177,24 @@ def __init__(
# NeuronX
elif neuronx:
import torch_neuronx
- LOGGER.info(f"Loading {w} for NeuronX inference...")
+
+ LOGGER.info(f"Loading {w} for NeuronX inference... version {torch_neuronx.__version__}")
+ extra_files = {"config.txt": ""} # model metadata
+ model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
+ model.half() if fp16 else model.float()
+ if extra_files["config.txt"]: # load metadata dict
+ metadata = json.loads(extra_files["config.txt"], object_hook=lambda x: dict(x.items()))
+ ## Neuron
+ elif neuron:
+ import torch_neuron
+
+ LOGGER.info(f"Loading {w} for Neuron inference... version {torch_neuron.__version__}")
extra_files = {"config.txt": ""} # model metadata
model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
model.half() if fp16 else model.float()
if extra_files["config.txt"]: # load metadata dict
metadata = json.loads(extra_files["config.txt"], object_hook=lambda x: dict(x.items()))
+
# ONNX OpenCV DNN
elif dnn:
LOGGER.info(f"Loading {w} for ONNX OpenCV DNN inference...")
@@ -466,9 +479,11 @@ def forward(self, im, augment=False, visualize=False, embed=None):
# TorchScript
elif self.jit:
y = self.model(im)
-
+
elif self.neuronx:
y = self.model(im)
+ elif self.neuron:
+ y = self.model(im)
# ONNX OpenCV DNN
elif self.dnn:
im = im.cpu().numpy() # torch to numpy
diff --git a/ultralytics/nn/tasks.py b/ultralytics/nn/tasks.py
index fd7d4028c87..4031e999d06 100644
--- a/ultralytics/nn/tasks.py
+++ b/ultralytics/nn/tasks.py
@@ -723,7 +723,6 @@ def temporary_modules(modules=None, attributes=None):
Be aware that directly manipulating `sys.modules` can lead to unpredictable results, especially in larger
applications or libraries. Use this function with caution.
"""
-
if modules is None:
modules = {}
if attributes is None:
@@ -813,7 +812,6 @@ def torch_safe_load(weight):
def attempt_load_weights(weights, device=None, inplace=True, fuse=False):
"""Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a."""
-
ensemble = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
ckpt, w = torch_safe_load(w) # load ckpt
diff --git a/ultralytics/solutions/ai_gym.py b/ultralytics/solutions/ai_gym.py
index 2cf4e00a293..349e46e8f08 100644
--- a/ultralytics/solutions/ai_gym.py
+++ b/ultralytics/solutions/ai_gym.py
@@ -29,7 +29,6 @@ def __init__(
pose_down_angle (float, optional): Angle threshold for the 'down' pose. Defaults to 90.0.
pose_type (str, optional): Type of pose to detect ('pullup', 'pushup', 'abworkout'). Defaults to "pullup".
"""
-
# Image and line thickness
self.im0 = None
self.tf = line_thickness
@@ -65,7 +64,6 @@ def start_counting(self, im0, results):
im0 (ndarray): Current frame from the video stream.
results (list): Pose estimation data.
"""
-
self.im0 = im0
if not len(results[0]):
diff --git a/ultralytics/solutions/analytics.py b/ultralytics/solutions/analytics.py
index 3715c21abeb..c2990097786 100644
--- a/ultralytics/solutions/analytics.py
+++ b/ultralytics/solutions/analytics.py
@@ -51,7 +51,6 @@ def __init__(
save_img (bool): Whether to save the image.
max_points (int): Specifies when to remove the oldest points in a graph for multiple lines.
"""
-
self.bg_color = bg_color
self.fg_color = fg_color
self.view_img = view_img
@@ -115,7 +114,6 @@ def update_area(self, frame_number, counts_dict):
frame_number (int): The current frame number.
counts_dict (dict): Dictionary with class names as keys and counts as values.
"""
-
x_data = np.array([])
y_data_dict = {key: np.array([]) for key in counts_dict.keys()}
@@ -177,7 +175,6 @@ def update_line(self, frame_number, total_counts):
frame_number (int): The current frame number.
total_counts (int): The total counts to plot.
"""
-
# Update line graph data
x_data = self.line.get_xdata()
y_data = self.line.get_ydata()
@@ -230,7 +227,7 @@ def write_and_display(self, im0):
"""
Write and display the line graph
Args:
- im0 (ndarray): Image for processing
+ im0 (ndarray): Image for processing.
"""
im0 = cv2.cvtColor(im0[:, :, :3], cv2.COLOR_RGBA2BGR)
cv2.imshow(self.title, im0) if self.view_img else None
@@ -243,7 +240,6 @@ def update_bar(self, count_dict):
Args:
count_dict (dict): Dictionary containing the count data to plot.
"""
-
# Update bar graph data
self.ax.clear()
self.ax.set_facecolor(self.bg_color)
@@ -282,7 +278,6 @@ def update_pie(self, classes_dict):
Args:
classes_dict (dict): Dictionary containing the class data to plot.
"""
-
# Update pie chart data
labels = list(classes_dict.keys())
sizes = list(classes_dict.values())
diff --git a/ultralytics/solutions/heatmap.py b/ultralytics/solutions/heatmap.py
index 04d467f3c88..6997c76ed7a 100644
--- a/ultralytics/solutions/heatmap.py
+++ b/ultralytics/solutions/heatmap.py
@@ -37,7 +37,6 @@ def __init__(
shape="circle",
):
"""Initializes the heatmap class with default values for Visual, Image, track, count and heatmap parameters."""
-
# Visual information
self.annotator = None
self.view_img = view_img
diff --git a/ultralytics/solutions/object_counter.py b/ultralytics/solutions/object_counter.py
index e7de1cb186f..224b26a5233 100644
--- a/ultralytics/solutions/object_counter.py
+++ b/ultralytics/solutions/object_counter.py
@@ -53,7 +53,6 @@ def __init__(
line_dist_thresh (int): Euclidean distance threshold for line counter.
cls_txtdisplay_gap (int): Display gap between each class count.
"""
-
# Mouse events
self.is_drawing = False
self.selected_point = None
@@ -141,7 +140,6 @@ def mouse_event_for_region(self, event, x, y, flags, params):
def extract_and_process_tracks(self, tracks):
"""Extracts and processes tracks for object counting in a video stream."""
-
# Annotator Init and region drawing
self.annotator = Annotator(self.im0, self.tf, self.names)
diff --git a/ultralytics/solutions/parking_management.py b/ultralytics/solutions/parking_management.py
index 19564cf9145..e70238392f3 100644
--- a/ultralytics/solutions/parking_management.py
+++ b/ultralytics/solutions/parking_management.py
@@ -201,7 +201,7 @@ def parking_regions_extraction(json_file):
Args:
json_file (str): file that have all parking slot points
"""
- with open(json_file, "r") as json_file:
+ with open(json_file) as json_file:
return json.load(json_file)
def process_data(self, json_data, im0, boxes, clss):
diff --git a/ultralytics/solutions/queue_management.py b/ultralytics/solutions/queue_management.py
index 96ac6291000..9b10d119823 100644
--- a/ultralytics/solutions/queue_management.py
+++ b/ultralytics/solutions/queue_management.py
@@ -49,7 +49,6 @@ def __init__(
region_thickness (int, optional): Thickness of the counting region lines. Defaults to 5.
fontsize (float, optional): Font size for the text annotations. Defaults to 0.7.
"""
-
# Mouse events state
self.is_drawing = False
self.selected_point = None
@@ -88,7 +87,6 @@ def __init__(
def extract_and_process_tracks(self, tracks):
"""Extracts and processes tracks for queue management in a video stream."""
-
# Initialize annotator and draw the queue region
self.annotator = Annotator(self.im0, self.tf, self.names)
diff --git a/ultralytics/trackers/byte_tracker.py b/ultralytics/trackers/byte_tracker.py
index e0e5bd618a6..a2526e245af 100644
--- a/ultralytics/trackers/byte_tracker.py
+++ b/ultralytics/trackers/byte_tracker.py
@@ -421,7 +421,7 @@ def sub_stracks(tlista, tlistb):
tid = t.track_id
if stracks.get(tid, 0):
del stracks[tid]
- return list(stracks.values())
+ return list(stracks.values()).
"""
track_ids_b = {t.track_id for t in tlistb}
return [t for t in tlista if t.track_id not in track_ids_b]
diff --git a/ultralytics/trackers/utils/matching.py b/ultralytics/trackers/utils/matching.py
index 222c3a5c66d..564c0c2ca20 100644
--- a/ultralytics/trackers/utils/matching.py
+++ b/ultralytics/trackers/utils/matching.py
@@ -32,7 +32,6 @@ def linear_assignment(cost_matrix: np.ndarray, thresh: float, use_lap: bool = Tr
- unmatched indices from 'a'
- unmatched indices from 'b'
"""
-
if cost_matrix.size == 0:
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
@@ -69,7 +68,6 @@ def iou_distance(atracks: list, btracks: list) -> np.ndarray:
Returns:
(np.ndarray): Cost matrix computed based on IoU.
"""
-
if atracks and isinstance(atracks[0], np.ndarray) or btracks and isinstance(btracks[0], np.ndarray):
atlbrs = atracks
btlbrs = btracks
@@ -105,7 +103,6 @@ def embedding_distance(tracks: list, detections: list, metric: str = "cosine") -
Returns:
(np.ndarray): Cost matrix computed based on embeddings.
"""
-
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32)
if cost_matrix.size == 0:
return cost_matrix
@@ -128,7 +125,6 @@ def fuse_score(cost_matrix: np.ndarray, detections: list) -> np.ndarray:
Returns:
(np.ndarray): Fused similarity matrix.
"""
-
if cost_matrix.size == 0:
return cost_matrix
iou_sim = 1 - cost_matrix
diff --git a/ultralytics/utils/__init__.py b/ultralytics/utils/__init__.py
index b97a0fc4205..b92a04ae3c8 100644
--- a/ultralytics/utils/__init__.py
+++ b/ultralytics/utils/__init__.py
@@ -208,7 +208,6 @@ def plt_settings(rcparams=None, backend="Agg"):
(Callable): Decorated function with temporarily set rc parameters and backend. This decorator can be
applied to any function that needs to have specific matplotlib rc parameters and backend for its execution.
"""
-
if rcparams is None:
rcparams = {"font.size": 11}
@@ -698,7 +697,7 @@ def get_user_config_dir(sub_dir="Ultralytics"):
def colorstr(*input):
- """
+ r"""
Colors a string based on the provided color and style arguments. Utilizes ANSI escape codes.
See https://en.wikipedia.org/wiki/ANSI_escape_code for more details.
diff --git a/ultralytics/utils/autobatch.py b/ultralytics/utils/autobatch.py
index 2f695df82aa..2ff8b8966b9 100644
--- a/ultralytics/utils/autobatch.py
+++ b/ultralytics/utils/autobatch.py
@@ -22,7 +22,6 @@ def check_train_batch_size(model, imgsz=640, amp=True, batch=-1):
Returns:
(int): Optimal batch size computed using the autobatch() function.
"""
-
with torch.cuda.amp.autocast(amp):
return autobatch(deepcopy(model).train(), imgsz, fraction=batch if 0.0 < batch < 1.0 else 0.6)
@@ -40,7 +39,6 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
Returns:
(int): The optimal batch size.
"""
-
# Check device
prefix = colorstr("AutoBatch: ")
LOGGER.info(f"{prefix}Computing optimal batch size for imgsz={imgsz} at {fraction * 100}% CUDA memory utilization.")
diff --git a/ultralytics/utils/benchmarks.py b/ultralytics/utils/benchmarks.py
index fde0d5fdab7..2fe693aff42 100644
--- a/ultralytics/utils/benchmarks.py
+++ b/ultralytics/utils/benchmarks.py
@@ -178,7 +178,6 @@ def set_key(self, api_key):
Args:
api_key (str): The API key.
"""
-
check_requirements("roboflow")
from roboflow import Roboflow
@@ -191,13 +190,12 @@ def parse_dataset(self, ds_link_txt="datasets_links.txt"):
Args:
ds_link_txt (str): Path to dataset_links file.
"""
-
(shutil.rmtree("rf-100"), os.mkdir("rf-100")) if os.path.exists("rf-100") else os.mkdir("rf-100")
os.chdir("rf-100")
os.mkdir("ultralytics-benchmarks")
safe_download("https://ultralytics.com/assets/datasets_links.txt")
- with open(ds_link_txt, "r") as file:
+ with open(ds_link_txt) as file:
for line in file:
try:
_, url, workspace, project, version = re.split("/+", line.strip())
@@ -221,8 +219,7 @@ def fix_yaml(path):
Args:
path (str): YAML file path.
"""
-
- with open(path, "r") as file:
+ with open(path) as file:
yaml_data = yaml.safe_load(file)
yaml_data["train"] = "train/images"
yaml_data["val"] = "valid/images"
@@ -242,7 +239,7 @@ def evaluate(self, yaml_path, val_log_file, eval_log_file, list_ind):
skip_symbols = ["🚀", "⚠️", "💡", "❌"]
with open(yaml_path) as stream:
class_names = yaml.safe_load(stream)["names"]
- with open(val_log_file, "r", encoding="utf-8") as f:
+ with open(val_log_file, encoding="utf-8") as f:
lines = f.readlines()
eval_lines = []
for line in lines:
diff --git a/ultralytics/utils/callbacks/base.py b/ultralytics/utils/callbacks/base.py
index e1e9b42b55b..98b20256e52 100644
--- a/ultralytics/utils/callbacks/base.py
+++ b/ultralytics/utils/callbacks/base.py
@@ -192,7 +192,6 @@ def add_integration_callbacks(instance):
instance (Trainer, Predictor, Validator, Exporter): An object with a 'callbacks' attribute that is a dictionary
of callback lists.
"""
-
# Load HUB callbacks
from .hub import callbacks as hub_cb
diff --git a/ultralytics/utils/callbacks/comet.py b/ultralytics/utils/callbacks/comet.py
index 518860c5f97..7e90a538638 100644
--- a/ultralytics/utils/callbacks/comet.py
+++ b/ultralytics/utils/callbacks/comet.py
@@ -114,7 +114,6 @@ def _scale_bounding_box_to_original_image_shape(box, resized_image_shape, origin
This function rescales the bounding box labels to the original image shape.
"""
-
resized_image_height, resized_image_width = resized_image_shape
# Convert normalized xywh format predictions to xyxy in resized scale format
diff --git a/ultralytics/utils/callbacks/tensorboard.py b/ultralytics/utils/callbacks/tensorboard.py
index f921e613aa1..2aa114b53b7 100644
--- a/ultralytics/utils/callbacks/tensorboard.py
+++ b/ultralytics/utils/callbacks/tensorboard.py
@@ -34,7 +34,6 @@ def _log_scalars(scalars, step=0):
def _log_tensorboard_graph(trainer):
"""Log model graph to TensorBoard."""
-
# Input image
imgsz = trainer.args.imgsz
imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz
diff --git a/ultralytics/utils/checks.py b/ultralytics/utils/checks.py
index 6e94cf83a81..9d24ad8b76d 100644
--- a/ultralytics/utils/checks.py
+++ b/ultralytics/utils/checks.py
@@ -64,7 +64,6 @@ def parse_requirements(file_path=ROOT.parent / "requirements.txt", package=""):
parse_requirements(package='ultralytics')
```
"""
-
if package:
requires = [x for x in metadata.distribution(package).requires if "extra == " not in x]
else:
@@ -361,7 +360,6 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
check_requirements(['numpy', 'ultralytics>=8.0.0'])
```
"""
-
prefix = colorstr("red", "bold", "requirements:")
check_python() # check python version
check_torchvision() # check torch-torchvision compatibility
@@ -421,7 +419,6 @@ def check_torchvision():
The compatibility table is a dictionary where the keys are PyTorch versions and the values are lists of compatible
Torchvision versions.
"""
-
# Compatibility table
compatibility_table = {
"2.3": ["0.18"],
@@ -582,7 +579,6 @@ def check_yolo(verbose=True, device=""):
def collect_system_info():
"""Collect and print relevant system information including OS, Python, RAM, CPU, and CUDA."""
-
import psutil
from ultralytics.utils import ENVIRONMENT, IS_GIT_DIR
diff --git a/ultralytics/utils/downloads.py b/ultralytics/utils/downloads.py
index ab7ffa08e0b..891c8d47ca4 100644
--- a/ultralytics/utils/downloads.py
+++ b/ultralytics/utils/downloads.py
@@ -391,7 +391,6 @@ def get_github_assets(repo="ultralytics/assets", version="latest", retry=False):
tag, assets = get_github_assets(repo='ultralytics/assets', version='latest')
```
"""
-
if version != "latest":
version = f"tags/{version}" # i.e. tags/v6.2
url = f"https://api.github.com/repos/{repo}/releases/{version}"
diff --git a/ultralytics/utils/files.py b/ultralytics/utils/files.py
index 719cacaee1a..759341788aa 100644
--- a/ultralytics/utils/files.py
+++ b/ultralytics/utils/files.py
@@ -48,7 +48,6 @@ def spaces_in_path(path):
# Your code here
```
"""
-
# If path has spaces, replace them with underscores
if " " in str(path):
string = isinstance(path, str) # input type
diff --git a/ultralytics/utils/metrics.py b/ultralytics/utils/metrics.py
index ad4ff397a3a..77d377536d4 100644
--- a/ultralytics/utils/metrics.py
+++ b/ultralytics/utils/metrics.py
@@ -30,7 +30,6 @@ def bbox_ioa(box1, box2, iou=False, eps=1e-7):
Returns:
(np.ndarray): A numpy array of shape (n, m) representing the intersection over box2 area.
"""
-
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1.T
b2_x1, b2_y1, b2_x2, b2_y2 = box2.T
@@ -53,7 +52,7 @@ def bbox_ioa(box1, box2, iou=False, eps=1e-7):
def box_iou(box1, box2, eps=1e-7):
"""
Calculate intersection-over-union (IoU) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
- Based on https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
+ Based on https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py.
Args:
box1 (torch.Tensor): A tensor of shape (N, 4) representing N bounding boxes.
@@ -63,7 +62,6 @@ def box_iou(box1, box2, eps=1e-7):
Returns:
(torch.Tensor): An NxM tensor containing the pairwise IoU values for every element in box1 and box2.
"""
-
# NOTE: Need .float() to get accurate iou values
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
(a1, a2), (b1, b2) = box1.float().unsqueeze(1).chunk(2, 2), box2.float().unsqueeze(0).chunk(2, 2)
@@ -90,7 +88,6 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7
Returns:
(torch.Tensor): IoU, GIoU, DIoU, or CIoU values depending on the specified flags.
"""
-
# Get the coordinates of bounding boxes
if xywh: # transform from xywh to xyxy
(x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1)
@@ -456,7 +453,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=(), on_plot=N
else:
ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision)
- ax.plot(px, py.mean(1), linewidth=3, color="blue", label="all classes %.3f mAP@0.5" % ap[:, 0].mean())
+ ax.plot(px, py.mean(1), linewidth=3, color="blue", label=f"all classes {ap[:, 0].mean():.3f} mAP@0.5")
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
ax.set_xlim(0, 1)
@@ -507,7 +504,6 @@ def compute_ap(recall, precision):
(np.ndarray): Precision envelope curve.
(np.ndarray): Modified recall curve with sentinel values added at the beginning and end.
"""
-
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.0], recall, [1.0]))
mpre = np.concatenate(([1.0], precision, [0.0]))
@@ -560,7 +556,6 @@ def ap_per_class(
x (np.ndarray): X-axis values for the curves. Shape: (1000,).
prec_values: Precision values at mAP@0.5 for each class. Shape: (nc, 1000).
"""
-
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
@@ -942,7 +937,6 @@ def process(self, tp, tp_m, conf, pred_cls, target_cls):
pred_cls (list): List of predicted classes.
target_cls (list): List of target classes.
"""
-
results_mask = ap_per_class(
tp_m,
conf,
@@ -1084,7 +1078,6 @@ def process(self, tp, tp_p, conf, pred_cls, target_cls):
pred_cls (list): List of predicted classes.
target_cls (list): List of target classes.
"""
-
results_pose = ap_per_class(
tp_p,
conf,
diff --git a/ultralytics/utils/ops.py b/ultralytics/utils/ops.py
index 4c79feff104..75e36a57612 100644
--- a/ultralytics/utils/ops.py
+++ b/ultralytics/utils/ops.py
@@ -597,7 +597,7 @@ def ltwh2xyxy(x):
def segments2boxes(segments):
"""
- It converts segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
+ It converts segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh).
Args:
segments (list): list of segments, each segment is a list of points, each point is a list of x, y coordinates
@@ -688,7 +688,6 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False):
(torch.Tensor): A binary mask tensor of shape [n, h, w], where n is the number of masks after NMS, and h and w
are the height and width of the input image. The mask is applied to the bounding boxes.
"""
-
c, mh, mw = protos.shape # CHW
ih, iw = shape
masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw) # CHW
@@ -806,7 +805,7 @@ def regularize_rboxes(rboxes):
def masks2segments(masks, strategy="largest"):
"""
- It takes a list of masks(n,h,w) and returns a list of segments(n,xy)
+ It takes a list of masks(n,h,w) and returns a list of segments(n,xy).
Args:
masks (torch.Tensor): the output of the model, which is a tensor of shape (batch_size, 160, 160)
@@ -844,7 +843,7 @@ def convert_torch2numpy_batch(batch: torch.Tensor) -> np.ndarray:
def clean_str(s):
"""
- Cleans a string by replacing special characters with underscore _
+ Cleans a string by replacing special characters with underscore _.
Args:
s (str): a string needing special characters replaced
diff --git a/ultralytics/utils/plotting.py b/ultralytics/utils/plotting.py
index 4083b96537b..a2ba7b17e3b 100644
--- a/ultralytics/utils/plotting.py
+++ b/ultralytics/utils/plotting.py
@@ -204,7 +204,6 @@ def circle_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255
txt_color (tuple, optional): The color of the text (R, G, B).
margin (int, optional): The margin between the text and the rectangle border.
"""
-
# If label have more than 3 characters, skip other characters, due to circle size
if len(label) > 3:
print(
@@ -246,7 +245,6 @@ def text_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255,
txt_color (tuple, optional): The color of the text (R, G, B).
margin (int, optional): The margin between the text and the rectangle border.
"""
-
# Calculate the center of the bounding box
x_center, y_center = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
# Get the size of the text
@@ -284,7 +282,6 @@ def box_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 2
txt_color (tuple, optional): The color of the text (R, G, B).
rotated (bool, optional): Variable used to check if task is OBB
"""
-
txt_color = self.get_txt_color(color, txt_color)
if isinstance(box, torch.Tensor):
box = box.tolist()
@@ -343,7 +340,6 @@ def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False):
alpha (float): Mask transparency: 0.0 fully transparent, 1.0 opaque
retina_masks (bool): Whether to use high resolution masks or not. Defaults to False.
"""
-
if self.pil:
# Convert to numpy first
self.im = np.asarray(self.im).copy()
@@ -383,7 +379,6 @@ def kpts(self, kpts, shape=(640, 640), radius=5, kpt_line=True, conf_thres=0.25)
Note:
`kpt_line=True` currently only supports human pose plotting.
"""
-
if self.pil:
# Convert to numpy first
self.im = np.asarray(self.im).copy()
@@ -480,7 +475,6 @@ def get_bbox_dimension(self, bbox=None):
Returns:
angle (degree): Degree value of angle between three points
"""
-
x_min, y_min, x_max, y_max = bbox
width = x_max - x_min
height = y_max - y_min
@@ -495,7 +489,6 @@ def draw_region(self, reg_pts=None, color=(0, 255, 0), thickness=5):
color (tuple): Region Color value
thickness (int): Region area thickness value
"""
-
cv2.polylines(self.im, [np.array(reg_pts, dtype=np.int32)], isClosed=True, color=color, thickness=thickness)
def draw_centroid_and_tracks(self, track, color=(255, 0, 255), track_thickness=2):
@@ -507,7 +500,6 @@ def draw_centroid_and_tracks(self, track, color=(255, 0, 255), track_thickness=2
color (tuple): tracks line color
track_thickness (int): track line thickness value
"""
-
points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
cv2.polylines(self.im, [points], isClosed=False, color=color, thickness=track_thickness)
cv2.circle(self.im, (int(track[-1][0]), int(track[-1][1])), track_thickness * 2, color, -1)
@@ -522,7 +514,6 @@ def queue_counts_display(self, label, points=None, region_color=(255, 255, 255),
region_color (RGB): queue region color
txt_color (RGB): text display color
"""
-
x_values = [point[0] for point in points]
y_values = [point[1] for point in points]
center_x = sum(x_values) // len(points)
@@ -566,7 +557,6 @@ def display_objects_labels(self, im0, text, txt_color, bg_color, x_center, y_cen
y_center (float): y position center point for bounding box
margin (int): gap between text and rectangle for better display
"""
-
text_size = cv2.getTextSize(text, 0, fontScale=self.sf, thickness=self.tf)[0]
text_x = x_center - text_size[0] // 2
text_y = y_center + text_size[1] // 2
@@ -589,7 +579,6 @@ def display_analytics(self, im0, text, txt_color, bg_color, margin):
bg_color (bgr color): display color for text background
margin (int): gap between text and rectangle for better display
"""
-
horizontal_gap = int(im0.shape[1] * 0.02)
vertical_gap = int(im0.shape[0] * 0.01)
text_y_offset = 0
@@ -621,7 +610,6 @@ def estimate_pose_angle(a, b, c):
Returns:
angle (degree): Degree value of angle between three points
"""
-
a, b, c = np.array(a), np.array(b), np.array(c)
radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
angle = np.abs(radians * 180.0 / np.pi)
@@ -639,7 +627,6 @@ def draw_specific_points(self, keypoints, indices=None, shape=(640, 640), radius
shape (tuple): imgsz for model inference
radius (int): Keypoint radius value
"""
-
if indices is None:
indices = [2, 5, 7]
for i, k in enumerate(keypoints):
@@ -667,7 +654,6 @@ def plot_angle_and_count_and_stage(
color (tuple): text background color for workout monitoring
txt_color (tuple): text foreground color for workout monitoring
"""
-
angle_text, count_text, stage_text = (f" {angle_text:.2f}", f"Steps : {count_text}", f" {stage_text}")
# Draw angle
@@ -736,7 +722,6 @@ def seg_bbox(self, mask, mask_color=(255, 0, 255), det_label=None, track_label=N
det_label (str): Detection label text
track_label (str): Tracking label text
"""
-
cv2.polylines(self.im, [np.int32([mask])], isClosed=True, color=mask_color, thickness=2)
label = f"Track ID: {track_label}" if track_label else det_label
@@ -765,7 +750,6 @@ def plot_distance_and_line(self, distance_m, distance_mm, centroids, line_color,
line_color (RGB): Distance line color.
centroid_color (RGB): Bounding box centroid color.
"""
-
(text_width_m, text_height_m), _ = cv2.getTextSize(f"Distance M: {distance_m:.2f}m", 0, self.sf, self.tf)
cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 10, 25 + text_height_m + 20), line_color, -1)
cv2.putText(
@@ -806,7 +790,6 @@ def visioneye(self, box, center_point, color=(235, 219, 11), pin_color=(255, 0,
color (tuple): object centroid and line color value
pin_color (tuple): visioneye point color value
"""
-
center_bbox = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
cv2.circle(self.im, center_point, self.tf * 2, pin_color, -1)
cv2.circle(self.im, center_bbox, self.tf * 2, color, -1)
@@ -899,7 +882,6 @@ def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False,
cropped_im = save_one_box(xyxy, im, file='cropped.jpg', square=True)
```
"""
-
if not isinstance(xyxy, torch.Tensor): # may be list
xyxy = torch.stack(xyxy)
b = ops.xyxy2xywh(xyxy.view(-1, 4)) # boxes
@@ -1164,7 +1146,6 @@ def plt_color_scatter(v, f, bins=20, cmap="viridis", alpha=0.8, edgecolors="none
>>> f = np.random.rand(100)
>>> plt_color_scatter(v, f)
"""
-
# Calculate 2D histogram and corresponding colors
hist, xedges, yedges = np.histogram2d(v, f, bins=bins)
colors = [
@@ -1190,7 +1171,6 @@ def plot_tune_results(csv_file="tune_results.csv"):
Examples:
>>> plot_tune_results('path/to/tune_results.csv')
"""
-
import pandas as pd # scope for faster 'import ultralytics'
from scipy.ndimage import gaussian_filter1d
diff --git a/ultralytics/utils/tal.py b/ultralytics/utils/tal.py
index 9cee05008f0..0dadfa2a196 100644
--- a/ultralytics/utils/tal.py
+++ b/ultralytics/utils/tal.py
@@ -140,7 +140,6 @@ def select_topk_candidates(self, metrics, largest=True, topk_mask=None):
Returns:
(Tensor): A tensor of shape (b, max_num_obj, h*w) containing the selected top-k candidates.
"""
-
# (b, max_num_obj, topk)
topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest)
if topk_mask is None:
@@ -184,7 +183,6 @@ def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask):
for positive anchor points, where num_classes is the number
of object classes.
"""
-
# Assigned target labels, (b, 1)
batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None]
target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes # (b, h*w)
@@ -332,6 +330,7 @@ def dist2rbox(pred_dist, pred_angle, anchor_points, dim=-1):
pred_dist (torch.Tensor): Predicted rotated distance, (bs, h*w, 4).
pred_angle (torch.Tensor): Predicted angle, (bs, h*w, 1).
anchor_points (torch.Tensor): Anchor points, (h*w, 2).
+
Returns:
(torch.Tensor): Predicted rotated bounding boxes, (bs, h*w, 4).
"""
diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py
index 0016c970341..fd1178a538c 100644
--- a/ultralytics/utils/torch_utils.py
+++ b/ultralytics/utils/torch_utils.py
@@ -109,7 +109,6 @@ def select_device(device="", batch=0, newline=False, verbose=True):
Note:
Sets the 'CUDA_VISIBLE_DEVICES' environment variable for specifying which GPUs to use.
"""
-
if isinstance(device, torch.device):
return device
@@ -459,7 +458,7 @@ def init_seeds(seed=0, deterministic=False):
class ModelEMA:
"""
Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models. Keeps a moving
- average of everything in the model state_dict (parameters and buffers)
+ average of everything in the model state_dict (parameters and buffers).
For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
diff --git a/ultralytics/utils/tuner.py b/ultralytics/utils/tuner.py
index 1bde002c8a4..8c86d6b7463 100644
--- a/ultralytics/utils/tuner.py
+++ b/ultralytics/utils/tuner.py
@@ -34,7 +34,6 @@ def run_ray_tune(
result_grid = model.tune(data='coco8.yaml', use_ray=True)
```
"""
-
LOGGER.info("💡 Learn about RayTune at https://docs.ultralytics.com/integrations/ray-tune")
if train_args is None:
train_args = {}