From 10957ebe44753fd8784a9f4e121467a6e1ef0a49 Mon Sep 17 00:00:00 2001 From: Naren Dasan Date: Wed, 1 Dec 2021 12:09:31 -0800 Subject: [PATCH] refactor(//cpp/bin/torchtrtc)!: Rename enabled precisions arugment to enable-precision BREAKING CHANGE: This is a minor change but may cause scripts using torchtrtc to fail. We are renaming enabled-precisions to enable-precision since it makes more sense as the argument can be repeated Signed-off-by: Naren Dasan Signed-off-by: Naren Dasan --- cpp/bin/torchtrtc/README.md | 8 ++++---- cpp/bin/torchtrtc/main.cpp | 8 ++++---- docsrc/tutorials/torchtrtc.rst | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/cpp/bin/torchtrtc/README.md b/cpp/bin/torchtrtc/README.md index 033b36052c..35e7dc5837 100644 --- a/cpp/bin/torchtrtc/README.md +++ b/cpp/bin/torchtrtc/README.md @@ -14,12 +14,12 @@ to standard TorchScript. Load with `torch.jit.load()` and run like you would run ``` torchtrtc [input_file_path] [output_file_path] - [input_specs...] {OPTIONS} + [input_specs...] {OPTIONS} - Torch-TensorRT is a compiler for TorchScript, it will compile and optimize - TorchScript programs to run on NVIDIA GPUs using TensorRT + torchtrtc is a compiler for TorchScript, it will compile and optimize + TorchScript programs to run on NVIDIA GPUs using TensorRT -OPTIONS: + OPTIONS: -h, --help Display this help menu Verbiosity of the compiler diff --git a/cpp/bin/torchtrtc/main.cpp b/cpp/bin/torchtrtc/main.cpp index a437a5e133..5bb2779255 100644 --- a/cpp/bin/torchtrtc/main.cpp +++ b/cpp/bin/torchtrtc/main.cpp @@ -249,11 +249,11 @@ int main(int argc, char** argv) { args::Flag sparse_weights( parser, "sparse-weights", "Enable sparsity for weights of conv and FC layers", {"sparse-weights"}); - args::ValueFlagList enabled_precision( + args::ValueFlagList enabled_precisions( parser, "precision", "(Repeatable) Enabling an operating precision for kernels to use when building the engine (Int8 requires a calibration-cache argument) [ float | float32 | f32 | fp32 | half | float16 | f16 | fp16 | int8 | i8 | char ] (default: float)", - {'p', "enabled-precision"}); + {'p', "enable-precision"}); args::ValueFlag device_type( parser, "type", @@ -501,8 +501,8 @@ int main(int argc, char** argv) { } } - if (enabled_precision) { - for (const auto precision : args::get(enabled_precision)) { + if (enabled_precisions) { + for (const auto precision : args::get(enabled_precisions)) { auto dtype = parseDataType(precision); if (dtype == torchtrt::DataType::kFloat) { compile_settings.enabled_precisions.insert(torch::kF32); diff --git a/docsrc/tutorials/torchtrtc.rst b/docsrc/tutorials/torchtrtc.rst index f1741f373a..55427744eb 100644 --- a/docsrc/tutorials/torchtrtc.rst +++ b/docsrc/tutorials/torchtrtc.rst @@ -17,12 +17,12 @@ to standard TorchScript. Load with ``torch.jit.load()`` and run like you would r .. code-block:: txt torchtrtc [input_file_path] [output_file_path] - [input_specs...] {OPTIONS} + [input_specs...] {OPTIONS} - Torch-TensorRT is a compiler for TorchScript, it will compile and optimize - TorchScript programs to run on NVIDIA GPUs using TensorRT + torchtrtc is a compiler for TorchScript, it will compile and optimize + TorchScript programs to run on NVIDIA GPUs using TensorRT - OPTIONS: + OPTIONS: -h, --help Display this help menu Verbiosity of the compiler