Skip to content

Commit

Permalink
feat!: Updating versions of CUDA, cuDNN, TensorRT and PyTorch
Browse files Browse the repository at this point in the history
BREAKING CHANGE: PyTorch version has been bumped to 1.8.0
Default CUDA version is CUDA 11.1
TensorRT version is TensorRT 7.2.3.4
cuDNN version is now cuDNN 8.1

Signed-off-by: Naren Dasan <[email protected]>
Signed-off-by: Naren Dasan <[email protected]>
  • Loading branch information
narendasan committed Mar 6, 2021
1 parent 6bb9fbf commit 71c4dcb
Show file tree
Hide file tree
Showing 14 changed files with 75 additions and 75 deletions.
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,11 @@ torch.jit.save(trt_ts_module, "trt_torchscript_module.ts")
### Dependencies
These are the following dependencies used to verify the testcases. TRTorch can work with other versions, but the tests are not guaranteed to pass.

- Bazel 3.7.0
- Libtorch 1.7.x (built with CUDA 11.0)
- CUDA 11.0 (10.2 on Jetson)
- cuDNN 8
- TensorRT 7.2
- Bazel 4.0.0
- Libtorch 1.8.0 (built with CUDA 11.1)
- CUDA 11.1 (10.2 on Jetson)
- cuDNN 8.1
- TensorRT 7.2.3

## Prebuilt Binaries and Wheel files

Expand Down
20 changes: 10 additions & 10 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ git_repository(
# CUDA should be installed on the system locally
new_local_repository(
name = "cuda",
path = "/usr/local/cuda-11.0/",
path = "/usr/local/cuda-11.1/",
build_file = "@//third_party/cuda:BUILD",
)

Expand All @@ -53,16 +53,16 @@ http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
sha256 = "117f6dd65b7267839197397edd0b10fd2900b0f291e3e54b0b800caefc31bcb6",
urls = ["https://download.pytorch.org/libtorch/cu110/libtorch-cxx11-abi-shared-with-deps-1.7.1%2Bcu110.zip"],
sha256 = "62a2c06761c32576b30f5884240cf675b937945d929e4b13cc776de8d9c2236c",
urls = ["https://download.pytorch.org/libtorch/cu111/libtorch-cxx11-abi-shared-with-deps-1.8.0%2Bcu111.zip"],
)

http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
sha256 = "c77f926afd55d7e860ec9c7abc992c25be77c89771c3ec6fcc13ea42f07d46df",
urls = ["https://download.pytorch.org/libtorch/cu110/libtorch-shared-with-deps-1.7.1%2Bcu110.zip"],
sha256 = "1c8b0c0883dd17f5ce952d42ec5f7f0cc7ceb370307535cee26a66c10419f1f6",
urls = ["https://download.pytorch.org/libtorch/cu111/libtorch-shared-with-deps-1.8.0%2Bcu111.zip"],
)

# Download these tarballs manually from the NVIDIA website
Expand All @@ -71,18 +71,18 @@ http_archive(

http_archive(
name = "cudnn",
urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.0.5/11.0_20201106/cudnn-11.0-linux-x64-v8.0.5.39.tgz",],
urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.1.1.33/11.2_20210301/cudnn-11.2-linux-x64-v8.1.1.33.tgz",],
build_file = "@//third_party/cudnn/archive:BUILD",
sha256 = "4e16ee7895deb4a8b1c194b812ba49586ef7d26902051401d3717511898a9b73",
sha256 = "98a8784e92862f20018d20c281b30d4a0cd951f93694f6433ccf4ae9c502ba6a",
strip_prefix = "cuda"
)

http_archive(
name = "tensorrt",
urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.2.2/tars/TensorRT-7.2.2.3.Ubuntu-18.04.x86_64-gnu.cuda-11.0.cudnn8.0.tar.gz",],
urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.2.3/tars/TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz",],
build_file = "@//third_party/tensorrt/archive:BUILD",
strip_prefix = "TensorRT-7.2.2.3",
sha256 = "b5c325e38e1d92ce1ce92ca8b54ede9c224bf128c9a53eb0b9022f1ee4313ee0"
strip_prefix = "TensorRT-7.2.3.4",
sha256 = "d3a1f478e304b48878604fac70ce7920fece71f9cac62f925c9c59c197f5d087"
)

####################################################################################
Expand Down
2 changes: 1 addition & 1 deletion core/util/trt_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ nvinfer1::DataType toTRTDataType(at::ScalarType t) {
}

c10::optional<nvinfer1::DataType> toTRTDataType(caffe2::TypeMeta dtype) {
if (auto t = c10::tryTypeMetaToScalarType(dtype)) {
if (auto t = c10::optTypeMetaToScalarType(dtype)) {
return toTRTDataType(t.value());
} else {
return {};
Expand Down
2 changes: 1 addition & 1 deletion cpp/api/include/trtorch/trtorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ struct Module;
} // namespace torch

namespace c10 {
enum class DeviceType : int16_t;
enum class DeviceType : int8_t;
enum class ScalarType : int8_t;
template <class>
class ArrayRef;
Expand Down
2 changes: 1 addition & 1 deletion py/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
torch==1.7.1
torch==1.8.0
2 changes: 1 addition & 1 deletion py/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def run(self):
long_description=long_description,
ext_modules=ext_modules,
install_requires=[
'torch>=1.7.0,<1.8.0',
'torch>=1.8.0+cu111,<1.9.0',
],
setup_requires=[],
cmdclass={
Expand Down
2 changes: 1 addition & 1 deletion tests/core/conversion/converters/converter_test.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -12,5 +12,5 @@ def converter_test(name, visibility=None):
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
timeout="short"
timeout="moderate"
)
40 changes: 20 additions & 20 deletions tests/core/conversion/converters/test_activation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,28 +66,28 @@ TEST(Converters, ATenTanhConvertsCorrectly) {

// TODO: Seems like the IR parser is not handling negative numbers well, need to
// follow up with the PyTorch Team
// TEST(Converters, ATenHardTanhConvertsCorrectly) {
// const auto graph = R"IR(
// graph(%0 : Tensor):
// %1 : float = prim::Constant[value=-1.0]()
// %2 : float = prim::Constant[value=1.0]()
// %3 : Tensor = aten::hardtanh(%0, %1, %2)
// return (%3))IR";
TEST(Converters, ATenHardTanhConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor):
%1 : float = prim::Constant[value=-1.0]()
%2 : float = prim::Constant[value=1.0]()
%3 : Tensor = aten::hardtanh(%0, %1, %2)
return (%3))IR";

// auto g = std::make_shared<torch::jit::Graph>();
// torch::jit::script::parseIR(graph, &*g);
auto g = std::make_shared<torch::jit::Graph>();
torch::jit::parseIR(graph, &*g);

// auto in = at::randint(-5, 5, {5}, {at::kCUDA});
// auto params = trtorch::core::conversion::get_named_params(g->inputs(),
// {}); auto jit_results = trtorch::tests::util::RunGraph(g, params, {in});
auto in = at::randint(-5, 5, {5}, {at::kCUDA});
auto params = trtorch::core::conversion::get_named_params(g->inputs(), {});
auto jit_results = trtorch::tests::util::RunGraph(g, params, {in});

// in = at::clone(in);
// params = trtorch::core::conversion::get_named_params(g->inputs(), {});
// auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {in});
in = at::clone(in);
params = trtorch::core::conversion::get_named_params(g->inputs(), {});
auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {in});

// ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[0],
// trt_results[0], 2e-6));
// }
ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[0],
trt_results[0], 2e-6));
}

TEST(Converters, ATenHardTanhCustomRangeConvertsCorrectly) {
const auto graph = R"IR(
Expand All @@ -114,7 +114,7 @@ TEST(Converters, ATenHardTanhCustomRangeConvertsCorrectly) {
TEST(Converters, ATenPReLUConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(1:1)):
%1 : Float(1, strides=[1])):
%3 : Tensor = aten::prelu(%0, %1)
return (%3))IR";

Expand All @@ -137,7 +137,7 @@ TEST(Converters, ATenPReLUConvertsCorrectly) {
TEST(Converters, ATenPReLUMultiChannelConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(10:1)):
%1 : Float(10, strides=[1])):
%3 : Tensor = aten::prelu(%0, %1)
return (%3))IR";

Expand Down
8 changes: 4 additions & 4 deletions tests/core/conversion/converters/test_batch_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@
TEST(Converters, ATenBatchNormConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1: Float(5:1),
%2: Float(5:1),
%3: Float(5:1),
%4: Float(5:1)):
%1: Float(5, strides=[1]),
%2: Float(5, strides=[1]),
%3: Float(5, strides=[1]),
%4: Float(5, strides=[1])):
%5 : bool = prim::Constant[value=0]()
%6 : float = prim::Constant[value=1.0000000000000001e-05]()
%7 : float = prim::Constant[value=0.10000000000000001]()
Expand Down
50 changes: 25 additions & 25 deletions tests/core/conversion/converters/test_conv_deconv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ void conv_test_helper(std::string graph_ir) {
TEST(Converters, ATenConvolutionConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(8:45, 3:15, 5:5, 5:1),
%2 : Float(8:1)):
%1 : Float(8, 3, 5, 5, strides=[45, 15, 5, 1]),
%2 : Float(8)):
%3 : int = prim::Constant[value=1]()
%4 : int = prim::Constant[value=0]()
%5 : int = prim::Constant[value=1]()
Expand Down Expand Up @@ -81,7 +81,7 @@ TEST(Converters, ATenConvolutionConvertsCorrectly) {
TEST(Converters, ATenConvolutionNoBiasConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(4:9, 1:9, 3:3, 3:1)):
%1 : Float(4, 1, 3, 3, strides=[9, 9, 3, 1])):
%2 : None = prim::Constant()
%3 : int = prim::Constant[value=1]()
%4 : int = prim::Constant[value=0]()
Expand Down Expand Up @@ -119,8 +119,8 @@ TEST(Converters, ATenConvolutionNoBiasConvertsCorrectly) {
TEST(Converters, ATenConvolutionWithStrideConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(4:27, 3:9, 3:3, 3:1),
%2 : Float(4:1)):
%1 : Float(4, 3, 3, 3, strides=[27, 9, 3, 1]),
%2 : Float(4)):
%3 : int = prim::Constant[value=3]()
%4 : int = prim::Constant[value=0]()
%5 : int = prim::Constant[value=1]()
Expand Down Expand Up @@ -162,8 +162,8 @@ TEST(Converters, ATenConvolutionWithStrideConvertsCorrectly) {
TEST(Converters, ATenConvolutionWithPaddingConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(4:48, 3:16, 4:4, 4:1),
%2 : Float(4:1)):
%1 : Float(4, 3, 4, 4, strides=[48, 16, 4, 1]),
%2 : Float(4)):
%3 : int = prim::Constant[value=1]()
%4 : int = prim::Constant[value=2]()
%5 : int = prim::Constant[value=1]()
Expand Down Expand Up @@ -205,8 +205,8 @@ TEST(Converters, ATenConvolutionWithPaddingConvertsCorrectly) {
TEST(Converters, ATenConvolution3dConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(32:81, 3:27, 3:9, 3:3, 3:1),
%2 : Float(32:1)):
%1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1]),
%2 : Float(32)):
%sv : int = prim::Constant[value=1]()
%s : int[] = prim::ListConstruct(%sv, %sv, %sv)
%pv : int = prim::Constant[value=0]()
Expand Down Expand Up @@ -247,7 +247,7 @@ TEST(Converters, ATenConvolution3dConvertsCorrectly) {
TEST(Converters, ATenConvolution3dNoBiasConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(32:81, 3:27, 3:9, 3:3, 3:1)):
%1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1])):
%bias : None = prim::Constant()
%sv : int = prim::Constant[value=1]()
%s : int[] = prim::ListConstruct(%sv, %sv, %sv)
Expand Down Expand Up @@ -285,8 +285,8 @@ TEST(Converters, ATenConvolution3dNoBiasConvertsCorrectly) {
TEST(Converters, ATenConvolution3dWithPaddingConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(32:81, 3:27, 3:9, 3:3, 3:1),
%2 : Float(32:1)):
%1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1]),
%2 : Float(32)):
%sv : int = prim::Constant[value=1]()
%s : int[] = prim::ListConstruct(%sv, %sv, %sv)
%pv : int = prim::Constant[value=1]()
Expand Down Expand Up @@ -327,8 +327,8 @@ TEST(Converters, ATenConvolution3dWithPaddingConvertsCorrectly) {
TEST(Converters, ATenConvolution3dWithStrideDilationConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(32:81, 3:27, 3:9, 3:3, 3:1),
%2 : Float(32:1)):
%1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1]),
%2 : Float(32)):
%sv : int = prim::Constant[value=2]()
%s : int[] = prim::ListConstruct(%sv, %sv, %sv)
%pv : int = prim::Constant[value=1]()
Expand Down Expand Up @@ -369,8 +369,8 @@ TEST(Converters, ATenConvolution3dWithStrideDilationConvertsCorrectly) {
TEST(Converters, ATenConvTransposeConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(8:27, 3:9, 3:3, 3:1),
%2 : Float(8:1)):
%1 : Float(8, 3, 3, 3, strides=[27, 9, 3, 1]),
%2 : Float(8)):
%3 : int = prim::Constant[value=1]()
%4 : int = prim::Constant[value=0]()
%5 : int = prim::Constant[value=1]()
Expand Down Expand Up @@ -411,7 +411,7 @@ TEST(Converters, ATenConvTransposeConvertsCorrectly) {
TEST(Converters, ATenConvTransposeNoBiasConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(4:9, 1:9, 3:3, 3:1)):
%1 : Float(4, 1, 3, 3, strides=[9, 9, 3, 1])):
%2 : None = prim::Constant()
%3 : int = prim::Constant[value=1]()
%4 : int = prim::Constant[value=0]()
Expand Down Expand Up @@ -449,8 +449,8 @@ TEST(Converters, ATenConvTransposeNoBiasConvertsCorrectly) {
TEST(Converters, ATenConvTransposeWithStrideConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(4:27, 3:9, 3:3, 3:1),
%2 : Float(4:1)):
%1 : Float(4, 3, 3, 3, strides=[27, 9, 3, 1]),
%2 : Float(4)):
%3 : int = prim::Constant[value=3]()
%4 : int = prim::Constant[value=0]()
%5 : int = prim::Constant[value=1]()
Expand Down Expand Up @@ -492,8 +492,8 @@ TEST(Converters, ATenConvTransposeWithStrideConvertsCorrectly) {
TEST(Converters, ATenConvTransposeWithPaddingConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(4:48, 3:16, 4:4, 4:1),
%2 : Float(4:1)):
%1 : Float(4, 3, 4, 4, strides=[48, 16, 4, 1]),
%2 : Float(4)):
%3 : int = prim::Constant[value=1]()
%4 : int = prim::Constant[value=2]()
%5 : int = prim::Constant[value=1]()
Expand Down Expand Up @@ -535,8 +535,8 @@ TEST(Converters, ATenConvTransposeWithPaddingConvertsCorrectly) {
TEST(Converters, ATenConvolutionWithGroupConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(8:48, 1:16, 2:4, 2:1),
%2 : Float(8:1)):
%1 : Float(8, 1, 2, 2, strides=[48, 16, 4, 1]),
%2 : Float(8)):
%3 : int = prim::Constant[value=1]()
%4 : int = prim::Constant[value=2]()
%5 : int = prim::Constant[value=1]()
Expand Down Expand Up @@ -578,8 +578,8 @@ TEST(Converters, ATenConvolutionWithGroupConvertsCorrectly) {
TEST(Converters, ATenConvTransposeWithGroupConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(8:56, 4:16, 3:3, 3:1),
%2 : Float(16:1)):
%1 : Float(8, 4, 3, 3, strides=[56, 16, 3, 1]),
%2 : Float(16)):
%3 : int = prim::Constant[value=1]()
%4 : int = prim::Constant[value=1]()
%5 : int = prim::Constant[value=1]()
Expand Down
6 changes: 3 additions & 3 deletions tests/core/conversion/converters/test_linear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
TEST(Converters, ATenLinearNoBiasConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(3:2, 2:1)):
%1 : Float(3, 2, strides=[2, 1])):
%2 : None = prim::Constant()
%3 : Tensor = aten::linear(%0, %1, %2)
return (%3))IR";
Expand All @@ -33,8 +33,8 @@ TEST(Converters, ATenLinearNoBiasConvertsCorrectly) {
TEST(Converters, ATenLinearBiasConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(2:3, 3:1),
%2 : Float(2:1)):
%1 : Float(2, 3, strides=[3, 1]),
%2 : Float(2)):
%3 : Tensor = aten::linear(%0, %1, %2)
return (%3))IR";

Expand Down
2 changes: 1 addition & 1 deletion tests/core/conversion/converters/test_select.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ TEST(Converters, ATenNarrowStartScalarConvertsCorrectly) {

TEST(Converters, ATenEmbeddingConvertsCorrectly) {
const auto graph = R"IR(
graph(%1 : Tensor, %emb_weight : Float(10:3, 3:1)):
graph(%1 : Tensor, %emb_weight : Float(10, 3, strides=[3, 1])):
%2 : bool = prim::Constant[value=0]()
%3 : int = prim::Constant[value=-1]()
%5 : Tensor = aten::embedding(%emb_weight, %1, %3, %2, %2)
Expand Down
2 changes: 1 addition & 1 deletion tests/core/conversion/converters/test_stack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ TEST(Converters, ATenStackPureTensorConvertsCorrectly) {
TEST(Converters, ATenStackDiffTensorConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor,
%1 : Float(4:16, 4:4, 4:1)):
%1 : Float(4, 4, 4, strides=[16, 4, 1])):
%2 : Tensor[] = prim::ListConstruct(%0, %1)
%3 : int = prim::Constant[value=1]()
%4 : Tensor = aten::stack(%2, %3)
Expand Down
2 changes: 1 addition & 1 deletion tests/py/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
torchvision==0.8.2
torchvision==0.9.0

0 comments on commit 71c4dcb

Please sign in to comment.