diff --git a/README.md b/README.md index aa607d71a4..0de8c71a19 100644 --- a/README.md +++ b/README.md @@ -73,11 +73,11 @@ torch.jit.save(trt_ts_module, "trt_torchscript_module.ts") ### Dependencies These are the following dependencies used to verify the testcases. TRTorch can work with other versions, but the tests are not guaranteed to pass. -- Bazel 3.7.0 -- Libtorch 1.7.x (built with CUDA 11.0) -- CUDA 11.0 (10.2 on Jetson) -- cuDNN 8 -- TensorRT 7.2 +- Bazel 4.0.0 +- Libtorch 1.8.0 (built with CUDA 11.1) +- CUDA 11.1 (10.2 on Jetson) +- cuDNN 8.1 +- TensorRT 7.2.3 ## Prebuilt Binaries and Wheel files diff --git a/WORKSPACE b/WORKSPACE index 1cb4500b90..a4bce2a937 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -35,7 +35,7 @@ git_repository( # CUDA should be installed on the system locally new_local_repository( name = "cuda", - path = "/usr/local/cuda-11.0/", + path = "/usr/local/cuda-11.1/", build_file = "@//third_party/cuda:BUILD", ) @@ -53,16 +53,16 @@ http_archive( name = "libtorch", build_file = "@//third_party/libtorch:BUILD", strip_prefix = "libtorch", - sha256 = "117f6dd65b7267839197397edd0b10fd2900b0f291e3e54b0b800caefc31bcb6", - urls = ["https://download.pytorch.org/libtorch/cu110/libtorch-cxx11-abi-shared-with-deps-1.7.1%2Bcu110.zip"], + sha256 = "62a2c06761c32576b30f5884240cf675b937945d929e4b13cc776de8d9c2236c", + urls = ["https://download.pytorch.org/libtorch/cu111/libtorch-cxx11-abi-shared-with-deps-1.8.0%2Bcu111.zip"], ) http_archive( name = "libtorch_pre_cxx11_abi", build_file = "@//third_party/libtorch:BUILD", strip_prefix = "libtorch", - sha256 = "c77f926afd55d7e860ec9c7abc992c25be77c89771c3ec6fcc13ea42f07d46df", - urls = ["https://download.pytorch.org/libtorch/cu110/libtorch-shared-with-deps-1.7.1%2Bcu110.zip"], + sha256 = "1c8b0c0883dd17f5ce952d42ec5f7f0cc7ceb370307535cee26a66c10419f1f6", + urls = ["https://download.pytorch.org/libtorch/cu111/libtorch-shared-with-deps-1.8.0%2Bcu111.zip"], ) # Download these tarballs manually from the NVIDIA website @@ -71,18 +71,18 @@ http_archive( http_archive( name = "cudnn", - urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.0.5/11.0_20201106/cudnn-11.0-linux-x64-v8.0.5.39.tgz",], + urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.1.1.33/11.2_20210301/cudnn-11.2-linux-x64-v8.1.1.33.tgz",], build_file = "@//third_party/cudnn/archive:BUILD", - sha256 = "4e16ee7895deb4a8b1c194b812ba49586ef7d26902051401d3717511898a9b73", + sha256 = "98a8784e92862f20018d20c281b30d4a0cd951f93694f6433ccf4ae9c502ba6a", strip_prefix = "cuda" ) http_archive( name = "tensorrt", - urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.2.2/tars/TensorRT-7.2.2.3.Ubuntu-18.04.x86_64-gnu.cuda-11.0.cudnn8.0.tar.gz",], + urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.2.3/tars/TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz",], build_file = "@//third_party/tensorrt/archive:BUILD", - strip_prefix = "TensorRT-7.2.2.3", - sha256 = "b5c325e38e1d92ce1ce92ca8b54ede9c224bf128c9a53eb0b9022f1ee4313ee0" + strip_prefix = "TensorRT-7.2.3.4", + sha256 = "d3a1f478e304b48878604fac70ce7920fece71f9cac62f925c9c59c197f5d087" ) #################################################################################### diff --git a/core/util/trt_util.cpp b/core/util/trt_util.cpp index de34c9ac02..d32ba0cbaf 100644 --- a/core/util/trt_util.cpp +++ b/core/util/trt_util.cpp @@ -295,7 +295,7 @@ nvinfer1::DataType toTRTDataType(at::ScalarType t) { } c10::optional toTRTDataType(caffe2::TypeMeta dtype) { - if (auto t = c10::tryTypeMetaToScalarType(dtype)) { + if (auto t = c10::optTypeMetaToScalarType(dtype)) { return toTRTDataType(t.value()); } else { return {}; diff --git a/cpp/api/include/trtorch/trtorch.h b/cpp/api/include/trtorch/trtorch.h index 4739d9199a..38841a2d36 100644 --- a/cpp/api/include/trtorch/trtorch.h +++ b/cpp/api/include/trtorch/trtorch.h @@ -23,7 +23,7 @@ struct Module; } // namespace torch namespace c10 { -enum class DeviceType : int16_t; +enum class DeviceType : int8_t; enum class ScalarType : int8_t; template class ArrayRef; diff --git a/py/requirements.txt b/py/requirements.txt index 4a73b3af4d..b47209f0f2 100644 --- a/py/requirements.txt +++ b/py/requirements.txt @@ -1 +1 @@ -torch==1.7.1 +torch==1.8.0 diff --git a/py/setup.py b/py/setup.py index 53790b1637..a27b4091e7 100644 --- a/py/setup.py +++ b/py/setup.py @@ -204,7 +204,7 @@ def run(self): long_description=long_description, ext_modules=ext_modules, install_requires=[ - 'torch>=1.7.0,<1.8.0', + 'torch>=1.8.0+cu111,<1.9.0', ], setup_requires=[], cmdclass={ diff --git a/tests/core/conversion/converters/converter_test.bzl b/tests/core/conversion/converters/converter_test.bzl index cecd2b74c0..37e3b4d244 100644 --- a/tests/core/conversion/converters/converter_test.bzl +++ b/tests/core/conversion/converters/converter_test.bzl @@ -12,5 +12,5 @@ def converter_test(name, visibility=None): ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"], "//conditions:default": ["@libtorch//:libtorch"], }), - timeout="short" + timeout="moderate" ) diff --git a/tests/core/conversion/converters/test_activation.cpp b/tests/core/conversion/converters/test_activation.cpp index cf1cf2d49a..f3f3897250 100644 --- a/tests/core/conversion/converters/test_activation.cpp +++ b/tests/core/conversion/converters/test_activation.cpp @@ -66,28 +66,28 @@ TEST(Converters, ATenTanhConvertsCorrectly) { // TODO: Seems like the IR parser is not handling negative numbers well, need to // follow up with the PyTorch Team -// TEST(Converters, ATenHardTanhConvertsCorrectly) { -// const auto graph = R"IR( -// graph(%0 : Tensor): -// %1 : float = prim::Constant[value=-1.0]() -// %2 : float = prim::Constant[value=1.0]() -// %3 : Tensor = aten::hardtanh(%0, %1, %2) -// return (%3))IR"; +TEST(Converters, ATenHardTanhConvertsCorrectly) { + const auto graph = R"IR( + graph(%0 : Tensor): + %1 : float = prim::Constant[value=-1.0]() + %2 : float = prim::Constant[value=1.0]() + %3 : Tensor = aten::hardtanh(%0, %1, %2) + return (%3))IR"; -// auto g = std::make_shared(); -// torch::jit::script::parseIR(graph, &*g); + auto g = std::make_shared(); + torch::jit::parseIR(graph, &*g); -// auto in = at::randint(-5, 5, {5}, {at::kCUDA}); -// auto params = trtorch::core::conversion::get_named_params(g->inputs(), -// {}); auto jit_results = trtorch::tests::util::RunGraph(g, params, {in}); + auto in = at::randint(-5, 5, {5}, {at::kCUDA}); + auto params = trtorch::core::conversion::get_named_params(g->inputs(), {}); + auto jit_results = trtorch::tests::util::RunGraph(g, params, {in}); -// in = at::clone(in); -// params = trtorch::core::conversion::get_named_params(g->inputs(), {}); -// auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {in}); + in = at::clone(in); + params = trtorch::core::conversion::get_named_params(g->inputs(), {}); + auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {in}); -// ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[0], -// trt_results[0], 2e-6)); -// } + ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[0], + trt_results[0], 2e-6)); +} TEST(Converters, ATenHardTanhCustomRangeConvertsCorrectly) { const auto graph = R"IR( @@ -114,7 +114,7 @@ TEST(Converters, ATenHardTanhCustomRangeConvertsCorrectly) { TEST(Converters, ATenPReLUConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(1:1)): + %1 : Float(1, strides=[1])): %3 : Tensor = aten::prelu(%0, %1) return (%3))IR"; @@ -137,7 +137,7 @@ TEST(Converters, ATenPReLUConvertsCorrectly) { TEST(Converters, ATenPReLUMultiChannelConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(10:1)): + %1 : Float(10, strides=[1])): %3 : Tensor = aten::prelu(%0, %1) return (%3))IR"; diff --git a/tests/core/conversion/converters/test_batch_norm.cpp b/tests/core/conversion/converters/test_batch_norm.cpp index 8522850906..bfbae3d926 100644 --- a/tests/core/conversion/converters/test_batch_norm.cpp +++ b/tests/core/conversion/converters/test_batch_norm.cpp @@ -7,10 +7,10 @@ TEST(Converters, ATenBatchNormConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1: Float(5:1), - %2: Float(5:1), - %3: Float(5:1), - %4: Float(5:1)): + %1: Float(5, strides=[1]), + %2: Float(5, strides=[1]), + %3: Float(5, strides=[1]), + %4: Float(5, strides=[1])): %5 : bool = prim::Constant[value=0]() %6 : float = prim::Constant[value=1.0000000000000001e-05]() %7 : float = prim::Constant[value=0.10000000000000001]() diff --git a/tests/core/conversion/converters/test_conv_deconv.cpp b/tests/core/conversion/converters/test_conv_deconv.cpp index 34652a523f..70b3ff7a8a 100644 --- a/tests/core/conversion/converters/test_conv_deconv.cpp +++ b/tests/core/conversion/converters/test_conv_deconv.cpp @@ -39,8 +39,8 @@ void conv_test_helper(std::string graph_ir) { TEST(Converters, ATenConvolutionConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(8:45, 3:15, 5:5, 5:1), - %2 : Float(8:1)): + %1 : Float(8, 3, 5, 5, strides=[45, 15, 5, 1]), + %2 : Float(8)): %3 : int = prim::Constant[value=1]() %4 : int = prim::Constant[value=0]() %5 : int = prim::Constant[value=1]() @@ -81,7 +81,7 @@ TEST(Converters, ATenConvolutionConvertsCorrectly) { TEST(Converters, ATenConvolutionNoBiasConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(4:9, 1:9, 3:3, 3:1)): + %1 : Float(4, 1, 3, 3, strides=[9, 9, 3, 1])): %2 : None = prim::Constant() %3 : int = prim::Constant[value=1]() %4 : int = prim::Constant[value=0]() @@ -119,8 +119,8 @@ TEST(Converters, ATenConvolutionNoBiasConvertsCorrectly) { TEST(Converters, ATenConvolutionWithStrideConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(4:27, 3:9, 3:3, 3:1), - %2 : Float(4:1)): + %1 : Float(4, 3, 3, 3, strides=[27, 9, 3, 1]), + %2 : Float(4)): %3 : int = prim::Constant[value=3]() %4 : int = prim::Constant[value=0]() %5 : int = prim::Constant[value=1]() @@ -162,8 +162,8 @@ TEST(Converters, ATenConvolutionWithStrideConvertsCorrectly) { TEST(Converters, ATenConvolutionWithPaddingConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(4:48, 3:16, 4:4, 4:1), - %2 : Float(4:1)): + %1 : Float(4, 3, 4, 4, strides=[48, 16, 4, 1]), + %2 : Float(4)): %3 : int = prim::Constant[value=1]() %4 : int = prim::Constant[value=2]() %5 : int = prim::Constant[value=1]() @@ -205,8 +205,8 @@ TEST(Converters, ATenConvolutionWithPaddingConvertsCorrectly) { TEST(Converters, ATenConvolution3dConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(32:81, 3:27, 3:9, 3:3, 3:1), - %2 : Float(32:1)): + %1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1]), + %2 : Float(32)): %sv : int = prim::Constant[value=1]() %s : int[] = prim::ListConstruct(%sv, %sv, %sv) %pv : int = prim::Constant[value=0]() @@ -247,7 +247,7 @@ TEST(Converters, ATenConvolution3dConvertsCorrectly) { TEST(Converters, ATenConvolution3dNoBiasConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(32:81, 3:27, 3:9, 3:3, 3:1)): + %1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1])): %bias : None = prim::Constant() %sv : int = prim::Constant[value=1]() %s : int[] = prim::ListConstruct(%sv, %sv, %sv) @@ -285,8 +285,8 @@ TEST(Converters, ATenConvolution3dNoBiasConvertsCorrectly) { TEST(Converters, ATenConvolution3dWithPaddingConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(32:81, 3:27, 3:9, 3:3, 3:1), - %2 : Float(32:1)): + %1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1]), + %2 : Float(32)): %sv : int = prim::Constant[value=1]() %s : int[] = prim::ListConstruct(%sv, %sv, %sv) %pv : int = prim::Constant[value=1]() @@ -327,8 +327,8 @@ TEST(Converters, ATenConvolution3dWithPaddingConvertsCorrectly) { TEST(Converters, ATenConvolution3dWithStrideDilationConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(32:81, 3:27, 3:9, 3:3, 3:1), - %2 : Float(32:1)): + %1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1]), + %2 : Float(32)): %sv : int = prim::Constant[value=2]() %s : int[] = prim::ListConstruct(%sv, %sv, %sv) %pv : int = prim::Constant[value=1]() @@ -369,8 +369,8 @@ TEST(Converters, ATenConvolution3dWithStrideDilationConvertsCorrectly) { TEST(Converters, ATenConvTransposeConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(8:27, 3:9, 3:3, 3:1), - %2 : Float(8:1)): + %1 : Float(8, 3, 3, 3, strides=[27, 9, 3, 1]), + %2 : Float(8)): %3 : int = prim::Constant[value=1]() %4 : int = prim::Constant[value=0]() %5 : int = prim::Constant[value=1]() @@ -411,7 +411,7 @@ TEST(Converters, ATenConvTransposeConvertsCorrectly) { TEST(Converters, ATenConvTransposeNoBiasConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(4:9, 1:9, 3:3, 3:1)): + %1 : Float(4, 1, 3, 3, strides=[9, 9, 3, 1])): %2 : None = prim::Constant() %3 : int = prim::Constant[value=1]() %4 : int = prim::Constant[value=0]() @@ -449,8 +449,8 @@ TEST(Converters, ATenConvTransposeNoBiasConvertsCorrectly) { TEST(Converters, ATenConvTransposeWithStrideConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(4:27, 3:9, 3:3, 3:1), - %2 : Float(4:1)): + %1 : Float(4, 3, 3, 3, strides=[27, 9, 3, 1]), + %2 : Float(4)): %3 : int = prim::Constant[value=3]() %4 : int = prim::Constant[value=0]() %5 : int = prim::Constant[value=1]() @@ -492,8 +492,8 @@ TEST(Converters, ATenConvTransposeWithStrideConvertsCorrectly) { TEST(Converters, ATenConvTransposeWithPaddingConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(4:48, 3:16, 4:4, 4:1), - %2 : Float(4:1)): + %1 : Float(4, 3, 4, 4, strides=[48, 16, 4, 1]), + %2 : Float(4)): %3 : int = prim::Constant[value=1]() %4 : int = prim::Constant[value=2]() %5 : int = prim::Constant[value=1]() @@ -535,8 +535,8 @@ TEST(Converters, ATenConvTransposeWithPaddingConvertsCorrectly) { TEST(Converters, ATenConvolutionWithGroupConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(8:48, 1:16, 2:4, 2:1), - %2 : Float(8:1)): + %1 : Float(8, 1, 2, 2, strides=[48, 16, 4, 1]), + %2 : Float(8)): %3 : int = prim::Constant[value=1]() %4 : int = prim::Constant[value=2]() %5 : int = prim::Constant[value=1]() @@ -578,8 +578,8 @@ TEST(Converters, ATenConvolutionWithGroupConvertsCorrectly) { TEST(Converters, ATenConvTransposeWithGroupConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(8:56, 4:16, 3:3, 3:1), - %2 : Float(16:1)): + %1 : Float(8, 4, 3, 3, strides=[56, 16, 3, 1]), + %2 : Float(16)): %3 : int = prim::Constant[value=1]() %4 : int = prim::Constant[value=1]() %5 : int = prim::Constant[value=1]() diff --git a/tests/core/conversion/converters/test_linear.cpp b/tests/core/conversion/converters/test_linear.cpp index 46662a6e7a..54c5e6f168 100644 --- a/tests/core/conversion/converters/test_linear.cpp +++ b/tests/core/conversion/converters/test_linear.cpp @@ -7,7 +7,7 @@ TEST(Converters, ATenLinearNoBiasConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(3:2, 2:1)): + %1 : Float(3, 2, strides=[2, 1])): %2 : None = prim::Constant() %3 : Tensor = aten::linear(%0, %1, %2) return (%3))IR"; @@ -33,8 +33,8 @@ TEST(Converters, ATenLinearNoBiasConvertsCorrectly) { TEST(Converters, ATenLinearBiasConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(2:3, 3:1), - %2 : Float(2:1)): + %1 : Float(2, 3, strides=[3, 1]), + %2 : Float(2)): %3 : Tensor = aten::linear(%0, %1, %2) return (%3))IR"; diff --git a/tests/core/conversion/converters/test_select.cpp b/tests/core/conversion/converters/test_select.cpp index 44d6669e5e..d3df97872b 100644 --- a/tests/core/conversion/converters/test_select.cpp +++ b/tests/core/conversion/converters/test_select.cpp @@ -88,7 +88,7 @@ TEST(Converters, ATenNarrowStartScalarConvertsCorrectly) { TEST(Converters, ATenEmbeddingConvertsCorrectly) { const auto graph = R"IR( - graph(%1 : Tensor, %emb_weight : Float(10:3, 3:1)): + graph(%1 : Tensor, %emb_weight : Float(10, 3, strides=[3, 1])): %2 : bool = prim::Constant[value=0]() %3 : int = prim::Constant[value=-1]() %5 : Tensor = aten::embedding(%emb_weight, %1, %3, %2, %2) diff --git a/tests/core/conversion/converters/test_stack.cpp b/tests/core/conversion/converters/test_stack.cpp index ec64c101c0..729e18d037 100644 --- a/tests/core/conversion/converters/test_stack.cpp +++ b/tests/core/conversion/converters/test_stack.cpp @@ -31,7 +31,7 @@ TEST(Converters, ATenStackPureTensorConvertsCorrectly) { TEST(Converters, ATenStackDiffTensorConvertsCorrectly) { const auto graph = R"IR( graph(%0 : Tensor, - %1 : Float(4:16, 4:4, 4:1)): + %1 : Float(4, 4, 4, strides=[16, 4, 1])): %2 : Tensor[] = prim::ListConstruct(%0, %1) %3 : int = prim::Constant[value=1]() %4 : Tensor = aten::stack(%2, %3) diff --git a/tests/py/requirements.txt b/tests/py/requirements.txt index e17a1d0c6c..e56850d006 100644 --- a/tests/py/requirements.txt +++ b/tests/py/requirements.txt @@ -1 +1 @@ -torchvision==0.8.2 +torchvision==0.9.0 \ No newline at end of file