Skip to content

Commit

Permalink
Merge pull request #1158 from pytorch/trt_8.4_rel_1.1
Browse files Browse the repository at this point in the history
feat: Upgrade TensorRT to 8.4 EA
  • Loading branch information
narendasan authored Jul 16, 2022
2 parents 3cf58a2 + 5da47c1 commit c2e396a
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 47 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,8 @@ These are the following dependencies used to verify the testcases. Torch-TensorR
- Bazel 4.2.1
- Libtorch 1.11.0 (built with CUDA 11.3)
- CUDA 11.3 (10.2 on Jetson)
- cuDNN 8.2.1
- TensorRT 8.2.4.2 (TensorRT 8.2.1 on Jetson)
- cuDNN 8.3.2
- TensorRT 8.4.0.6

## Prebuilt Binaries and Wheel files

Expand Down
12 changes: 6 additions & 6 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -76,20 +76,20 @@ http_archive(
http_archive(
name = "cudnn",
build_file = "@//third_party/cudnn/archive:BUILD",
sha256 = "0e5d2df890b9967efa6619da421310d97323565a79f05a1a8cb9b7165baad0d7",
strip_prefix = "cuda",
sha256 = "5500953c08c5e5d1dddcfda234f9efbddcdbe43a53b26dc0a82c723fa170c457",
strip_prefix = "cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive",
urls = [
"https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.2.4/11.4_20210831/cudnn-11.4-linux-x64-v8.2.4.15.tgz",
"https://developer.nvidia.com/compute/cudnn/secure/8.3.2/local_installers/11.5/cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive.tar.xz",
],
)

http_archive(
name = "tensorrt",
build_file = "@//third_party/tensorrt/archive:BUILD",
sha256 = "826180eaaecdf9a7e76116855b9f1f3400ea9b06e66b06a3f6a0747ba6f863ad",
strip_prefix = "TensorRT-8.2.4.2",
sha256 = "0cd8071d717f1b870ada79ce5889ab3d702439c356e96cbef23d0b469007fcb4",
strip_prefix = "TensorRT-8.4.0.6",
urls = [
"https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/8.2.4/tars/tensorrt-8.2.4.2.linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz",
"https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/8.4.0/tars/tensorrt-8.4.0.6.linux.x86_64-gnu.cuda-11.6.cudnn8.3.tar.gz",
],
)

Expand Down
4 changes: 2 additions & 2 deletions core/conversion/converters/converter_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,9 @@ nvinfer1::ITensor* castITensor(ConversionCtx* ctx, nvinfer1::ITensor* tensor, nv

auto id_layer = ctx->net->addIdentity(*tensor);
TORCHTRT_CHECK(id_layer, "Unable to create identity layer for ITensor: " << tensor_id.str());
// layer->setOutputType should be used for casting and not manually setting output_tensor->setType()
id_layer->setOutputType(0, dtype);
auto casted_tensor = id_layer->getOutput(0);
casted_tensor->setType(dtype);

LOG_DEBUG(ctx->logger, "Casting ITensor " << tensor_id.str() << " from " << tensor->getType() << " to " << dtype);

std::stringstream ss;
Expand Down
36 changes: 0 additions & 36 deletions tests/core/partitioning/test_fallback_graph_output.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,40 +66,4 @@ TEST(Partitioning, ComputeMobileNetFallbackGraphCorrectly) {
auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor();
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6));
}

TEST(Partitioning, ComputeResNet50HalfFallbackGraphCorrectly) {
torch::jit::script::Module mod;
try {
mod = torch::jit::load("tests/modules/resnet50_traced.jit.pt");
} catch (const c10::Error& e) {
std::cerr << "error loading the model\n";
return;
}

mod.to(torch::kHalf);

const std::vector<std::vector<int64_t>> input_shapes = {{1, 3, 224, 224}};
std::vector<torch::jit::IValue> jit_inputs_ivalues;
std::vector<torch::jit::IValue> trt_inputs_ivalues;
for (auto in_shape : input_shapes) {
auto in = at::randint(5, in_shape, {at::kCUDA}).to(torch::kHalf);
jit_inputs_ivalues.push_back(in.clone());
trt_inputs_ivalues.push_back(in.clone());
}

auto in_shape = torch_tensorrt::core::ir::Input({1, 3, 224, 224});
in_shape.dtype = nvinfer1::DataType::kHALF;

std::vector<torch_tensorrt::core::ir::Input> input_ranges({in_shape});
auto g = mod.get_method("forward").graph();
torch_tensorrt::core::CompileSpec cfg(input_ranges);
cfg.partition_info.enabled = true;
cfg.partition_info.forced_fallback_operators.push_back("aten::add");

auto jit_results = mod.forward(jit_inputs_ivalues).toTensor();
auto trt_mod = torch_tensorrt::core::CompileGraph(mod, cfg);
auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor();
// Lower threshold because FP16
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-1));
}
#endif
2 changes: 1 addition & 1 deletion third_party/cudnn/archive/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ cc_library(

cc_import(
name = "cudnn_lib",
shared_library = "lib64/libcudnn.so",
shared_library = "lib/libcudnn.so",
visibility = ["//visibility:private"],
)

Expand Down

0 comments on commit c2e396a

Please sign in to comment.