diff --git a/README.md b/README.md index 84e3c9416..5f643fd5d 100644 --- a/README.md +++ b/README.md @@ -144,12 +144,12 @@ import torch from yolort.runtime import PredictorTRT # Load the exported TensorRT engine -engine_path = 'yolov5n6.engine' -device = torch.device('cuda') +engine_path = "yolov5n6.engine" +device = torch.device("cuda") y_runtime = PredictorTRT(engine_path, device=device) # Perform inference on an image file -predictions = y_runtime.predict('bus.jpg') +predictions = y_runtime.predict("bus.jpg") ``` On the `TensorRT` front you can use the [C++ example](deployment/tensorrt), and we also provide a [tutorial](https://zhiqwang.com/yolov5-rt-stack/notebooks/onnx-graphsurgeon-inference-tensorrt.html) for using the `TensorRT`.