diff --git a/.bazelversion b/.bazelversion index fcdb2e109f..fae6e3d04b 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -4.0.0 +4.2.1 diff --git a/WORKSPACE b/WORKSPACE index 4e472e4fdc..f2b8755115 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -41,7 +41,7 @@ local_repository( new_local_repository( name = "cuda", build_file = "@//third_party/cuda:BUILD", - path = "/usr/local/cuda-11.1/", + path = "/usr/local/cuda-11.3/", ) new_local_repository( diff --git a/docsrc/tutorials/installation.rst b/docsrc/tutorials/installation.rst index fe1d98f497..52e05a96b0 100644 --- a/docsrc/tutorials/installation.rst +++ b/docsrc/tutorials/installation.rst @@ -52,7 +52,7 @@ Torch-TensorRT is built with Bazel, so begin by installing it. .. code-block:: shell - export BAZEL_VERSION=$(cat /.bazelversion) + export BAZEL_VERSION=$(cat /.bazelversion) mkdir bazel cd bazel curl -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-dist.zip @@ -237,7 +237,7 @@ Install or compile a build of PyTorch/LibTorch for aarch64 NVIDIA hosts builds the latest release branch for Jetson here: - https://forums.developer.nvidia.com/t/pytorch-for-jetson-version-1-9-0-now-available/72048 + https://forums.developer.nvidia.com/t/pytorch-for-jetson-version-1-10-now-available/72048 Enviorment Setup diff --git a/py/Dockerfile b/py/Dockerfile index 9685ee91aa..160b27d68e 100644 --- a/py/Dockerfile +++ b/py/Dockerfile @@ -1,8 +1,8 @@ -FROM pytorch/manylinux-cuda111 +FROM pytorch/manylinux-cuda113 RUN yum install -y ninja-build -RUN wget https://copr.fedorainfracloud.org/coprs/vbatts/bazel/repo/epel-7/vbatts-bazel-epel-7.repo \ +RUN wget --no-check-certificate https://copr.fedorainfracloud.org/coprs/vbatts/bazel/repo/epel-7/vbatts-bazel-epel-7.repo \ && mv vbatts-bazel-epel-7.repo /etc/yum.repos.d/ RUN yum install -y bazel4 --nogpgcheck diff --git a/py/build_whl.sh b/py/build_whl.sh index 4b650c7c7e..7706994727 100755 --- a/py/build_whl.sh +++ b/py/build_whl.sh @@ -2,7 +2,7 @@ # Example usage: docker run -it -v$(pwd)/..:/workspace/TRTorch build_trtorch_wheel /bin/bash /workspace/TRTorch/py/build_whl.sh -cd /workspace/TRTorch/py +cd /workspace/Torch-TensorRT/py export CXX=g++