diff --git a/notebooks/export-onnx-inference-onnxruntime.ipynb b/notebooks/export-onnx-inference-onnxruntime.ipynb new file mode 100644 index 00000000..18c51d0d --- /dev/null +++ b/notebooks/export-onnx-inference-onnxruntime.ipynb @@ -0,0 +1,357 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "\n", + "import numpy as np\n", + "from numpy import random\n", + "\n", + "from pathlib import Path\n", + "\n", + "import shutil\n", + "\n", + "import cv2\n", + "from IPython import display\n", + "\n", + "import torch\n", + "from torch import nn\n", + "import torch.nn.functional as F\n", + "\n", + "import onnx\n", + "import onnxruntime\n", + "\n", + "from utils.image_utils import cv2_imshow\n", + "from models import yolov5_onnx\n", + "\n", + "from detect import read_image, load_names, overlay_boxes" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n", + "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n", + "\n", + "device = torch.device('cuda')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Model Definition and Initialization" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "model = yolov5_onnx(pretrained=True, min_size=320, max_size=416, score_thresh=0.5)\n", + "\n", + "model.eval()\n", + "model = model.to(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load images to infer" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "path = 'notebooks/assets/bus.jpg'" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "img_test = read_image(path, is_half=False)\n", + "img_test = img_test.to(device)\n", + "\n", + "images = [img_test]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Inference in `pytorch` backend" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "with torch.no_grad():\n", + " model_out = model([img_test])" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[ 48.4041, 401.9219, 237.0266, 897.8111],\n", + " [215.3344, 408.0325, 344.7906, 857.3686],\n", + " [ 13.2764, 225.2432, 802.3018, 735.7723],\n", + " [674.4550, 397.7541, 812.1368, 868.5444]], device='cuda:0')" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model_out[0]['boxes']" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([0.8940, 0.8635, 0.8609, 0.7063], device='cuda:0')" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model_out[0]['scores']" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([0, 0, 5, 0], device='cuda:0')" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model_out[0]['labels']" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Export to ONNX model" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting ONNX export with onnx 1.8.0, onnxruntime 1.5.2...\n" + ] + } + ], + "source": [ + "# TorchScript export\n", + "print(f'Starting ONNX export with onnx {onnx.__version__}, onnxruntime {onnxruntime.__version__}...')\n", + "export_onnx_name = './checkpoints/yolov5/yolov5s.onnx'" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "from torchvision.ops._register_onnx_ops import _onnx_opset_version" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.6/dist-packages/torch/onnx/utils.py:1112: UserWarning: No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input images_tensors\n", + " 'Automatically generated names will be applied to each dynamic axes of input {}'.format(key))\n", + "/usr/local/lib/python3.6/dist-packages/torch/onnx/utils.py:1112: UserWarning: No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input outputs\n", + " 'Automatically generated names will be applied to each dynamic axes of input {}'.format(key))\n", + "/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:3123: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " dtype=torch.float32)).float())) for i in range(dim)]\n", + "/mnt/yolov5-rt-stack/models/anchor_utils.py:26: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", + " stride = torch.as_tensor([stride], dtype=dtype, device=device)\n", + "/mnt/yolov5-rt-stack/models/anchor_utils.py:40: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", + " anchor_grid = torch.as_tensor(anchor_grid, dtype=dtype, device=device)\n", + "/mnt/yolov5-rt-stack/models/anchor_utils.py:63: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", + " shifts = shifts - torch.tensor(0.5, dtype=shifts.dtype, device=device)\n", + "/usr/local/lib/python3.6/dist-packages/torchvision/models/detection/transform.py:271: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " for s, s_orig in zip(new_size, original_size)\n", + "/usr/local/lib/python3.6/dist-packages/torch/onnx/symbolic_opset9.py:2378: UserWarning: Exporting aten::index operator of advanced indexing in opset 11 is achieved by combination of multiple ONNX operators, including Reshape, Transpose, Concat, and Gather. If indices include negative values, the exported graph will produce incorrect results.\n", + " \"If indices include negative values, the exported graph will produce incorrect results.\")\n", + "/usr/local/lib/python3.6/dist-packages/torch/onnx/symbolic_opset9.py:588: UserWarning: This model contains a squeeze operation on dimension 1 on an input with unknown shape. Note that if the size of dimension 1 of the input is not 1, the ONNX model will return an error. Opset version 11 supports squeezing on non-singleton dimensions, it is recommended to export this model using opset version 11 or higher.\n", + " \"version 11 or higher.\")\n" + ] + } + ], + "source": [ + "model.eval()\n", + "\n", + "torch.onnx.export(\n", + " model,\n", + " (images,),\n", + " export_onnx_name,\n", + " do_constant_folding=True,\n", + " opset_version=_onnx_opset_version,\n", + " dynamic_axes={\"images_tensors\": [0, 1, 2, 3], \"outputs\": [0, 1, 2, 3]}, \n", + " input_names=[\"images_tensors\"],\n", + " output_names=[\"outputs\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Inference on `ONNXRuntime` Backend" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "inputs, _ = torch.jit._flatten(images)\n", + "outputs, _ = torch.jit._flatten(model_out)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "def to_numpy(tensor):\n", + " if tensor.requires_grad:\n", + " return tensor.detach().cpu().numpy()\n", + " else:\n", + " return tensor.cpu().numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "inputs = list(map(to_numpy, inputs))\n", + "outputs = list(map(to_numpy, outputs))" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "ort_session = onnxruntime.InferenceSession(export_onnx_name)\n", + "# compute onnxruntime output prediction\n", + "ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs))\n", + "ort_outs = ort_session.run(None, ort_inputs)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Exported model has been tested with ONNXRuntime, and the result looks good!\n" + ] + } + ], + "source": [ + "for i in range(0, len(outputs)):\n", + " torch.testing.assert_allclose(outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05)\n", + "\n", + "print(\"Exported model has been tested with ONNXRuntime, and the result looks good!\")" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "include_colab_link": true, + "name": "YOLOv5 Tutorial", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file