diff --git a/deployment/libtorch/README.md b/deployment/libtorch/README.md index a8864437..91cba9d3 100644 --- a/deployment/libtorch/README.md +++ b/deployment/libtorch/README.md @@ -59,8 +59,8 @@ The LibTorch inference for `yolort`, both GPU and CPU are supported. 1. Now, you can infer your own images. ```bash - ./yolo_inference [--input_source ../../../test/assets/zidane.jpg] - [--checkpoint ../yolov5n.torchscript.pt] - [--labelmap ../../../notebooks/assets/coco.names] - [--gpu] # GPU switch, which is optional, and set False as default + ./yolort_torch [--input_source ../../../test/assets/zidane.jpg] + [--checkpoint ../yolov5n.torchscript.pt] + [--labelmap ../../../notebooks/assets/coco.names] + [--gpu] # GPU switch, which is optional, and set False as default ``` diff --git a/deployment/libtorch/main.cpp b/deployment/libtorch/main.cpp index e1c31e56..8749c030 100644 --- a/deployment/libtorch/main.cpp +++ b/deployment/libtorch/main.cpp @@ -172,9 +172,9 @@ int main(int argc, char* argv[]) { // Run once to warm up std::cout << "Run once on empty image" << std::endl; - auto img_dumy = torch::rand({3, 416, 320}, options); + auto img_dummy = torch::rand({3, 416, 320}, options); - images.push_back(img_dumy); + images.push_back(img_dummy); inputs.push_back(images); auto output = module.forward(inputs); diff --git a/deployment/tensorrt/README.md b/deployment/tensorrt/README.md index 5280f485..9e6dc162 100644 --- a/deployment/tensorrt/README.md +++ b/deployment/tensorrt/README.md @@ -8,7 +8,7 @@ The TensorRT inference for `yolort`, support CUDA only. ## Usage -1. Create build director and cmake config. +1. Create build directory and cmake config. ```bash mkdir -p build/ && cd build/ diff --git a/notebooks/export-relay-inference-tvm.ipynb b/notebooks/export-relay-inference-tvm.ipynb index c9a8db6e..2aff1f3b 100644 --- a/notebooks/export-relay-inference-tvm.ipynb +++ b/notebooks/export-relay-inference-tvm.ipynb @@ -336,7 +336,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Varify the Inference Output on TVM backend" + "## Verify the Inference Output on TVM backend" ] }, { diff --git a/notebooks/how-to-align-with-ultralytics-yolov5.ipynb b/notebooks/how-to-align-with-ultralytics-yolov5.ipynb index 5663cb29..95cfe546 100644 --- a/notebooks/how-to-align-with-ultralytics-yolov5.ipynb +++ b/notebooks/how-to-align-with-ultralytics-yolov5.ipynb @@ -324,7 +324,7 @@ "id": "4f3f7c09", "metadata": {}, "source": [ - "## Varify the detection results between yolort and ultralytics" + "## Verify the detection results between yolort and ultralytics" ] }, { diff --git a/notebooks/inference-pytorch-export-libtorch.ipynb b/notebooks/inference-pytorch-export-libtorch.ipynb index cc97de0c..61a9e95b 100644 --- a/notebooks/inference-pytorch-export-libtorch.ipynb +++ b/notebooks/inference-pytorch-export-libtorch.ipynb @@ -113,7 +113,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Varify the PyTorch backend inference results" + "## Verify the PyTorch backend inference results" ] }, { @@ -375,7 +375,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Varify the Inference Output on LibTorch backend" + "## Verify the Inference Output on LibTorch backend" ] }, { diff --git a/notebooks/onnx-graphsurgeon-inference-tensorrt.ipynb b/notebooks/onnx-graphsurgeon-inference-tensorrt.ipynb index e7868943..caa69103 100644 --- a/notebooks/onnx-graphsurgeon-inference-tensorrt.ipynb +++ b/notebooks/onnx-graphsurgeon-inference-tensorrt.ipynb @@ -618,7 +618,7 @@ "id": "00196f91-2b49-4b9d-8be7-aa8aea11c0ee", "metadata": {}, "source": [ - "## Varify the detection results between yolort and TensorRT" + "## Verify the detection results between yolort and TensorRT" ] }, { diff --git a/test/test_onnx.py b/test/test_onnx.py index bf1a6502..21c926e2 100644 --- a/test/test_onnx.py +++ b/test/test_onnx.py @@ -59,10 +59,10 @@ def run_model( with torch.no_grad(): if isinstance(test_inputs, Tensor) or isinstance(test_inputs, list): test_inputs = (test_inputs,) - test_ouputs = model(*test_inputs) - if isinstance(test_ouputs, Tensor): - test_ouputs = (test_ouputs,) - self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch) + test_outputs = model(*test_inputs) + if isinstance(test_outputs, Tensor): + test_outputs = (test_outputs,) + self.ort_validate(onnx_io, test_inputs, test_outputs, tolerate_small_mismatch) def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False): diff --git a/tools/export_model.py b/tools/export_model.py index ee1ef198..363af516 100644 --- a/tools/export_model.py +++ b/tools/export_model.py @@ -75,7 +75,7 @@ def export_onnx( Args: model (nn.Module): The model to be exported. inputs (Tuple[torch.Tensor]): The inputs to the model. - export_onnx_path (str): A string containg a file name. A binary Protobuf + export_onnx_path (str): A string containing a file name. A binary Protobuf will be written to this file. dynamic_axes (dict): A dictionary of dynamic axes. input_names (str): A names list of input names. @@ -110,7 +110,7 @@ def simplify_onnx(onnx_path, input_shapes): # Load onnx mode onnx_model = onnx.load(onnx_path) - # Simlify the ONNX model + # Simplify the ONNX model model_sim, check = onnxsim.simplify( onnx_model, input_shapes=input_shapes, diff --git a/yolort/models/backbone_utils.py b/yolort/models/backbone_utils.py index 4d8d6771..0c0f730c 100644 --- a/yolort/models/backbone_utils.py +++ b/yolort/models/backbone_utils.py @@ -13,7 +13,7 @@ class BackboneWithPAN(nn.Module): Adds a PAN on top of a model. Internally, it uses torchvision.models._utils.IntermediateLayerGetter to extract a submodel that returns the feature maps specified in return_layers. - The same limitations of IntermediatLayerGetter apply here. + The same limitations of IntermediateLayerGetter apply here. Args: backbone (nn.Module) diff --git a/yolort/relaying/trace_wrapper.py b/yolort/relaying/trace_wrapper.py index 19a88ed6..f41ca59c 100644 --- a/yolort/relaying/trace_wrapper.py +++ b/yolort/relaying/trace_wrapper.py @@ -40,7 +40,7 @@ def get_trace_module( input_shape: Tuple[int, int] = (416, 416), ): """ - Get the tarcing of a given model function. + Get the tracing of a given model function. Example: diff --git a/yolort/runtime/trt_helper.py b/yolort/runtime/trt_helper.py index 2817c111..1dd01208 100644 --- a/yolort/runtime/trt_helper.py +++ b/yolort/runtime/trt_helper.py @@ -129,7 +129,7 @@ def __init__( model.load_state_dict(model_info["state_dict"]) self.model = model - self.num_clases = num_classes + self.num_classes = num_classes @torch.no_grad() def forward(self, inputs: Tensor) -> Tuple[Tensor, Tensor]: diff --git a/yolort/runtime/yolo_graphsurgeon.py b/yolort/runtime/yolo_graphsurgeon.py index b0feb2f3..68d86388 100644 --- a/yolort/runtime/yolo_graphsurgeon.py +++ b/yolort/runtime/yolo_graphsurgeon.py @@ -68,7 +68,7 @@ def __init__( # Fold constants via ONNX-GS that PyTorch2ONNX may have missed self.graph.fold_constants() - self.num_classes = model.num_clases + self.num_classes = model.num_classes self.batch_size = 1 def infer(self): diff --git a/yolort/utils/image_utils.py b/yolort/utils/image_utils.py index d2e78d9d..caf2829e 100644 --- a/yolort/utils/image_utils.py +++ b/yolort/utils/image_utils.py @@ -296,7 +296,7 @@ def anchor_match_visualize(images, targets, indices, anchors, pred): ) # The anchors need to restore the offset. - # In eacy layer there has at most 3x3=9 anchors for matching. + # In each layer there has at most 3x3=9 anchors for matching. anchor_restored = restore_anchor(anchor, grid_x, grid_y, stride, pred[i].shape, image_sizes) # visualize positive anchor @@ -309,13 +309,13 @@ def anchor_match_visualize(images, targets, indices, anchors, pred): return images_with_anchor -def overlay_bbox(image, bboxs_list, color=None, thickness=2, font_scale=0.3, with_mask=False): +def overlay_bbox(image, bboxes_list, color=None, thickness=2, font_scale=0.3, with_mask=False): """ Visualize bbox in object detection by drawing rectangle. Args: image: numpy.ndarray. - bboxs_list: list: [pts_xyxy, prob, id]: label or prediction. + bboxes_list: list: [pts_xyxy, prob, id]: label or prediction. color: tuple. thickness: int. font_scale: float. @@ -329,7 +329,7 @@ def overlay_bbox(image, bboxs_list, color=None, thickness=2, font_scale=0.3, wit txt = "" COLORS = color_list() # list of COLORS - for bbox in bboxs_list: + for bbox in bboxes_list: if len(bbox) == 5: txt = "{:.3f}".format(bbox[4]) elif len(bbox) == 6: