Skip to content

Commit

Permalink
chore: Upgrade TensorRT version to TRT 10 EA (#2699)
Browse files Browse the repository at this point in the history
Co-authored-by: Evan Li <[email protected]>
  • Loading branch information
peri044 and zewenli98 committed Apr 26, 2024
1 parent 30f5094 commit 29e1380
Show file tree
Hide file tree
Showing 7 changed files with 25 additions and 36 deletions.
1 change: 1 addition & 0 deletions .github/workflows/build-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,7 @@ jobs:
pre-script: ${{ matrix.pre-script }}
script: |
export USE_HOST_DEPS=1
export LD_LIBRARY_PATH=/opt/torch-tensorrt-builds/TensorRT-10.0.0.6/lib:$LD_LIBRARY_PATH
pushd .
cd tests/py/core
${CONDA_RUN} python -m pip install --pre pytest-xdist timm transformers parameterized expecttest==0.1.6 --use-deprecated=legacy-resolver
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ torch.jit.save(trt_ts_module, "trt_torchscript_module.ts") # save the TRT embedd
These are the following dependencies used to verify the testcases. Torch-TensorRT can work with other versions, but the tests are not guaranteed to pass.

- Bazel 5.2.0
- Libtorch 2.4.0.dev (latest nightly) (built with CUDA 12.1)
- Libtorch 2.3.0 (built with CUDA 12.1)
- CUDA 12.1
- cuDNN 8.9.5
- TensorRT 10.0.0.6
Expand Down
2 changes: 1 addition & 1 deletion py/torch_tensorrt/_enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def _from(
return dtype.f16
elif t == trt.float32:
return dtype.f32
elif trt.__version__ >= "7.0" and t == trt.bool:
elif t == trt.bool:
return dtype.b
else:
raise TypeError(
Expand Down
2 changes: 1 addition & 1 deletion py/torch_tensorrt/dynamo/conversion/_TRTInterpreter.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ def run(
)
timing_cache = self._create_timing_cache(builder_config, existing_cache)

engine = self.builder.build_engine(self.ctx.net, builder_config)
engine = self.builder.build_serialized_network(self.ctx.net, builder_config)
assert engine

serialized_cache = (
Expand Down
7 changes: 7 additions & 0 deletions py/torch_tensorrt/dynamo/conversion/impl/shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from torch_tensorrt.dynamo._SourceIR import SourceIR
from torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext
from torch_tensorrt.dynamo.conversion.converter_utils import (
cast_trt_tensor,
get_positive_dim,
get_trt_tensor,
)
Expand Down Expand Up @@ -38,6 +39,12 @@ def shape(
"""
shape_layer = ctx.net.add_shape(input_val)
input_shape = shape_layer.get_output(0)
input_shape = cast_trt_tensor(
ctx,
input_shape,
trt.int32,
name + "_shape_casted",
)
set_layer_name(shape_layer, target, name + "_shape", source_ir)

n_dims = len(input_val.shape)
Expand Down
43 changes: 12 additions & 31 deletions py/torch_tensorrt/dynamo/runtime/_PythonTorchTensorRTModule.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
_select_rt_device,
multi_gpu_device_check,
)
from torch_tensorrt.logging import TRT_LOGGER

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -64,35 +65,19 @@ def _initialize(self) -> None:
) == (len(self.input_names) + len(self.output_names))

self.input_dtypes = [
dtype._from(self.engine.get_binding_dtype(idx))
for idx in self.input_binding_indices_in_order
dtype._from(self.engine.get_tensor_dtype(input_name))
for input_name in self.input_names
]
self.input_shapes = [
self.engine.get_tensor_shape(input_name) for input_name in self.input_names
]
self.output_dtypes = [
dtype._from(self.engine.get_binding_dtype(idx))
for idx in self.output_binding_indices_in_order
dtype._from(self.engine.get_tensor_dtype(output_name))
for output_name in self.output_names
]
self.output_shapes = [
(
tuple(self.engine.get_binding_shape(idx))
if self.engine.has_implicit_batch_dimension
else tuple()
)
for idx in self.output_binding_indices_in_order
]
self.hidden_output_dtypes = [
dtype._from(self.engine.get_binding_dtype(idx))
for idx in self.hidden_output_binding_indices_in_order
]
self.hidden_output_shapes = [
(
tuple(self.engine.get_binding_shape(idx))
if self.engine.has_implicit_batch_dimension
else tuple()
)
for idx in self.hidden_output_binding_indices_in_order
self.engine.get_tensor_shape(output_name)
for output_name in self.output_names
]

def _check_initialized(self) -> None:
Expand Down Expand Up @@ -234,15 +219,11 @@ def forward(self, *inputs: torch.Tensor) -> torch.Tensor | Tuple[torch.Tensor, .
bindings.append(output.data_ptr())
outputs.append(output)

for i, idx in enumerate(self.hidden_output_binding_indices_in_order):
shape = tuple(self.context.get_binding_shape(idx))

output = torch.empty(
size=shape,
dtype=self.hidden_output_dtypes[i].to(torch.dtype),
device=torch.cuda.current_device(),
)
bindings[idx] = output.data_ptr()
# Assign tensor address appropriately
for idx in range(self.engine.num_io_tensors):
self.context.set_tensor_address(
self.engine.get_tensor_name(idx), bindings[idx]
)

with (
torch.autograd.profiler.record_function(
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ requires = [
"cffi>=1.15.1",
"typing-extensions>=4.7.0",
"future>=0.18.3",
"tensorrt>=8.6,<8.7",
"tensorrt",
"torch==2.3.0",
"pybind11==2.6.2",
"numpy",
Expand Down Expand Up @@ -42,7 +42,7 @@ requires-python = ">=3.8"
keywords = ["pytorch", "torch", "tensorrt", "trt", "ai", "artificial intelligence", "ml", "machine learning", "dl", "deep learning", "compiler", "dynamo", "torchscript", "inference"]
dependencies = [
"torch==2.3.0",
"tensorrt>=8.6,<8.7",
"tensorrt",
"packaging>=23",
"numpy",
"typing-extensions>=4.7.0",
Expand Down

0 comments on commit 29e1380

Please sign in to comment.