From 408471a296ec7f88dacf92160785341f377aa91f Mon Sep 17 00:00:00 2001 From: Rohit Kumar Srivastava Date: Wed, 24 Feb 2021 17:56:23 -0800 Subject: [PATCH] [BACKPORT]Enable CUDA 11.0 on nightly + CUDA 11.2 on pip (#19295)(#19764) (#19930) * Enable CUDA 11.0 on nightly development builds (#19295) Remove CUDA 9.2 and CUDA 10.0 * [PIP] add build variant for cuda 11.2 (#19764) * adding ci docker files for cu111 and cu112 * removing previous CUDA make versions and adding support for cuda11.2 Co-authored-by: waytrue17 <52505574+waytrue17@users.noreply.github.com> Co-authored-by: Sheng Zha Co-authored-by: Rohit Kumar Srivastava --- cd/Jenkinsfile_cd_pipeline | 2 +- cd/Jenkinsfile_release_job | 4 +- cd/README.md | 5 +- cd/python/pypi/pypi_package.sh | 2 +- cd/utils/artifact_repository.md | 8 +- cd/utils/mxnet_base_image.sh | 3 + cd/utils/test_artifact_repository.py | 4 +- ci/docker/Dockerfile.build.ubuntu_gpu_cu112 | 44 +++++ ci/docker/runtime_functions.sh | 2 +- .../{linux_cu92.cmake => linux_cu112.cmake} | 4 +- config/distribution/linux_cu75.cmake | 34 ---- config/distribution/linux_cu80.cmake | 34 ---- config/distribution/linux_cu90.cmake | 34 ---- config/distribution/linux_cu91.cmake | 34 ---- .../{linux_cu92.mk => linux_cu112.mk} | 3 +- make/staticbuild/linux_cu75.mk | 167 ----------------- make/staticbuild/linux_cu80.mk | 170 ----------------- make/staticbuild/linux_cu90.mk | 172 ------------------ make/staticbuild/linux_cu91.mk | 172 ------------------ tools/pip/doc/CPU_ADDITIONAL.md | 4 +- tools/pip/doc/CU100_ADDITIONAL.md | 3 +- tools/pip/doc/CU101_ADDITIONAL.md | 5 +- tools/pip/doc/CU102_ADDITIONAL.md | 5 +- tools/pip/doc/CU110_ADDITIONAL.md | 5 +- ...CU92_ADDITIONAL.md => CU112_ADDITIONAL.md} | 5 +- tools/pip/doc/NATIVE_ADDITIONAL.md | 5 +- tools/pip/setup.py | 20 +- tools/setup_gpu_build_tools.sh | 39 +++- tools/staticbuild/README.md | 4 +- 29 files changed, 130 insertions(+), 863 deletions(-) create mode 100644 ci/docker/Dockerfile.build.ubuntu_gpu_cu112 rename config/distribution/{linux_cu92.cmake => linux_cu112.cmake} (91%) delete mode 100644 config/distribution/linux_cu75.cmake delete mode 100644 config/distribution/linux_cu80.cmake delete mode 100644 config/distribution/linux_cu90.cmake delete mode 100644 config/distribution/linux_cu91.cmake rename make/staticbuild/{linux_cu92.mk => linux_cu112.mk} (99%) delete mode 100644 make/staticbuild/linux_cu75.mk delete mode 100644 make/staticbuild/linux_cu80.mk delete mode 100644 make/staticbuild/linux_cu90.mk delete mode 100644 make/staticbuild/linux_cu91.mk rename tools/pip/doc/{CU92_ADDITIONAL.md => CU112_ADDITIONAL.md} (92%) diff --git a/cd/Jenkinsfile_cd_pipeline b/cd/Jenkinsfile_cd_pipeline index 6ef1104ab03d..03f2fb12280e 100644 --- a/cd/Jenkinsfile_cd_pipeline +++ b/cd/Jenkinsfile_cd_pipeline @@ -36,7 +36,7 @@ pipeline { parameters { // Release parameters - string(defaultValue: "cpu,native,cu92,cu100,cu101,cu102,cu110", description: "Comma separated list of variants", name: "MXNET_VARIANTS") + string(defaultValue: "cpu,native,cu100,cu101,cu102,cu110,cu112", description: "Comma separated list of variants", name: "MXNET_VARIANTS") booleanParam(defaultValue: false, description: 'Whether this is a release build or not', name: "RELEASE_BUILD") } diff --git a/cd/Jenkinsfile_release_job b/cd/Jenkinsfile_release_job index a2dd674f3df3..089896ba1553 100644 --- a/cd/Jenkinsfile_release_job +++ b/cd/Jenkinsfile_release_job @@ -42,8 +42,8 @@ pipeline { // Using string instead of choice parameter to keep the changes to the parameters minimal to avoid // any disruption caused by different COMMIT_ID values chaning the job parameter configuration on // Jenkins. - string(defaultValue: "mxnet_lib", description: "Pipeline to build", name: "RELEASE_JOB_TYPE") - string(defaultValue: "cpu,native,cu100,cu101,cu102,cu110", description: "Comma separated list of variants", name: "MXNET_VARIANTS") + string(defaultValue: "mxnet_lib/static", description: "Pipeline to build", name: "RELEASE_JOB_TYPE") + string(defaultValue: "cpu,native,cu100,cu101,cu102,cu110,cu112", description: "Comma separated list of variants", name: "MXNET_VARIANTS") booleanParam(defaultValue: false, description: 'Whether this is a release build or not', name: "RELEASE_BUILD") string(defaultValue: "nightly_v1.x", description: "String used for naming docker images", name: "VERSION") } diff --git a/cd/README.md b/cd/README.md index 2c86c6efb953..f782a21d70a4 100644 --- a/cd/README.md +++ b/cd/README.md @@ -25,7 +25,7 @@ MXNet aims to support a variety of frontends, e.g. Python, Java, Perl, R, etc. a The CD process is driven by the [CD pipeline job](Jenkinsfile_cd_pipeline), which orchestrates the order in which the artifacts are delivered. For instance, first publish the libmxnet library before publishing the pip package. It does this by triggering the [release job](Jenkinsfile_release_job) with a specific set of parameters for each delivery channel. The release job executes the specific release pipeline for a delivery channel across all MXNet *variants*. -A variant is a specific environment or features for which MXNet is compiled. For instance CPU, GPU with CUDA v10.0, CUDA v9.0 with MKL-DNN support, etc. +A variant is a specific environment or features for which MXNet is compiled. For instance CPU, GPU with CUDA v10.0, CUDA v11.0 with MKL-DNN support, etc. Currently, below variants are supported. All of these variants except native have MKL-DNN backend enabled. @@ -36,6 +36,7 @@ Currently, below variants are supported. All of these variants except native hav * *cu101*: CUDA 10.1 * *cu102*: CUDA 10.2 * *cu110*: CUDA 11.0 +* *cu112*: CUDA 11.2 *For more on variants, see [here](https://github.com/apache/incubator-mxnet/issues/8671)* @@ -121,7 +122,7 @@ The "first mile" of the CD process is posting the mxnet binaries to the [artifac ##### Timeout -We shouldn't set global timeouts for the pipelines. Rather, the `step` being executed should be rapped with a `timeout` function (as in the pipeline example above). The `max_time` is a global variable set at the [release job](Jenkinsfile_release_job) level. +We shouldn't set global timeouts for the pipelines. Rather, the `step` being executed should be rapped with a `timeout` function (as in the pipeline example above). The `max_time` is a global variable set at the [release job](Jenkinsfile_release_job) level. ##### Node of execution diff --git a/cd/python/pypi/pypi_package.sh b/cd/python/pypi/pypi_package.sh index 1e8103bdfca7..4c2a3d9eb7d8 100755 --- a/cd/python/pypi/pypi_package.sh +++ b/cd/python/pypi/pypi_package.sh @@ -18,7 +18,7 @@ set -ex -# variant = cpu, native, cu80, cu100, etc. +# variant = cpu, native, cu100, cu101, cu102, cu110, cu112 etc. export mxnet_variant=${1:?"Please specify the mxnet variant"} # Due to this PR: https://github.com/apache/incubator-mxnet/pull/14899 diff --git a/cd/utils/artifact_repository.md b/cd/utils/artifact_repository.md index 49399bbb5551..9de806eb6edf 100644 --- a/cd/utils/artifact_repository.md +++ b/cd/utils/artifact_repository.md @@ -17,7 +17,7 @@ # Artifact Repository - Pushing and Pulling libmxnet -The artifact repository is an S3 bucket accessible only to restricted Jenkins nodes. It is used to store compiled MXNet artifacts that can be used by downstream CD pipelines to package the compiled libraries for different delivery channels (e.g. DockerHub, PyPI, Maven, etc.). The S3 object keys for the files being posted will be prefixed with the following distinguishing characteristics of the binary: branch, commit id, operating system, variant and dependency linking strategy (static or dynamic). For instance, s3://bucket/73b29fa90d3eac0b1fae403b7583fdd1529942dc/ubuntu16.04/cu92mkl/static/libmxnet.so +The artifact repository is an S3 bucket accessible only to restricted Jenkins nodes. It is used to store compiled MXNet artifacts that can be used by downstream CD pipelines to package the compiled libraries for different delivery channels (e.g. DockerHub, PyPI, Maven, etc.). The S3 object keys for the files being posted will be prefixed with the following distinguishing characteristics of the binary: branch, commit id, operating system, variant and dependency linking strategy (static or dynamic). For instance, s3://bucket/73b29fa90d3eac0b1fae403b7583fdd1529942dc/ubuntu16.04/cu100/static/libmxnet.so An MXNet artifact is defined as the following set of files: @@ -53,13 +53,13 @@ If not set, derived through the value of sys.platform (https://docs.python.org/3 **Variant** -Manually configured through the --variant argument. The current variants are: cpu, native, cu92, cu100, cu101, cu102 and cu110. +Manually configured through the --variant argument. The current variants are: cpu, native, cu100, cu101, cu102, cu110 and cu112. As long as the tool is being run from the MXNet code base, the runtime feature detection tool (https://github.com/larroy/mxnet/blob/dd432b7f241c9da2c96bcb877c2dc84e6a1f74d4/docs/api/python/libinfo/libinfo.md) can be used to detect whether the library has been compiled with MKL (library has MKL-DNN feature enabled) and/or CUDA support (compiled with CUDA feature enabled). -If it has been compiled with CUDA support, the output of /usr/local/cuda/bin/nvcc --version can be mined for the exact CUDA version (eg. 8.0, 9.0, etc.). +If it has been compiled with CUDA support, the output of /usr/local/cuda/bin/nvcc --version can be mined for the exact CUDA version (eg. 10.0, 11.0, etc.). -By knowing which features are enabled on the binary, and if necessary, which CUDA version is installed on the machine, the value for the variant argument can be calculated. Eg. if CUDA features are enabled, and nvcc reports cuda version 10, then the variant would be cu100. If neither MKL-DNN nor CUDA features are enabled, the variant would be native. +By knowing which features are enabled on the binary, and if necessary, which CUDA version is installed on the machine, the value for the variant argument can be calculated. Eg. if CUDA features are enabled, and nvcc reports cuda version 10.0, then the variant would be cu100. If neither MKL-DNN nor CUDA features are enabled, the variant would be native. **Dependency Linking** diff --git a/cd/utils/mxnet_base_image.sh b/cd/utils/mxnet_base_image.sh index 5073a3783b2c..5632fe8d024a 100755 --- a/cd/utils/mxnet_base_image.sh +++ b/cd/utils/mxnet_base_image.sh @@ -33,6 +33,9 @@ case ${mxnet_variant} in cu110*) echo "nvidia/cuda:11.0-cudnn8-runtime-ubuntu18.04" ;; + cu112*) + echo "nvidia/cuda:11.2.1-cudnn8-runtime-ubuntu18.04" + ;; cpu) echo "ubuntu:18.04" ;; diff --git a/cd/utils/test_artifact_repository.py b/cd/utils/test_artifact_repository.py index 2ab5d910c87b..827457e335e7 100644 --- a/cd/utils/test_artifact_repository.py +++ b/cd/utils/test_artifact_repository.py @@ -144,9 +144,9 @@ def test_get_cuda_version(self, mock): cuda_version = get_cuda_version() self.assertEqual(cuda_version, '100') - mock.return_value = b'Cuda compilation tools, release 9.2, V9.2.148' + mock.return_value = b'Cuda compilation tools, release 11.0, V11.0.148' cuda_version = get_cuda_version() - self.assertEqual(cuda_version, '92') + self.assertEqual(cuda_version, '110') @patch('artifact_repository.check_output') def test_get_cuda_version_not_found(self, mock): diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu112 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu112 new file mode 100644 index 000000000000..14706737620b --- /dev/null +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu112 @@ -0,0 +1,44 @@ +# -*- mode: dockerfile -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Dockerfile to run MXNet on Ubuntu 16.04 for GPU + +FROM nvidia/cuda:11.2-cudnn8-devel-ubuntu16.04 + +WORKDIR /work/deps + +COPY install/ubuntu_core.sh /work/ +RUN /work/ubuntu_core.sh + +COPY install/deb_ubuntu_ccache.sh /work/ +RUN /work/deb_ubuntu_ccache.sh + +COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ +RUN /work/ubuntu_python.sh + +# Always last +ARG USER_ID=0 +ARG GROUP_ID=0 +COPY install/ubuntu_adduser.sh /work/ +RUN /work/ubuntu_adduser.sh + +COPY runtime_functions.sh /work/ + +WORKDIR /work/mxnet +ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/compat diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index c230ccdccd44..f876c4f7a415 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -224,7 +224,7 @@ build_ubuntu_gpu_mkldnn_release() { # Compiles the dynamic mxnet library # Parameters: -# $1 -> mxnet_variant: the mxnet variant to build, e.g. cpu, cu100, cu92mkl, etc. +# $1 -> mxnet_variant: the mxnet variant to build, e.g. cpu, native, cu100, cu101, cu102, cu110, cu112, etc. build_dynamic_libmxnet() { set -ex diff --git a/config/distribution/linux_cu92.cmake b/config/distribution/linux_cu112.cmake similarity index 91% rename from config/distribution/linux_cu92.cmake rename to config/distribution/linux_cu112.cmake index 63ab9fce20d8..6c9a87650aee 100644 --- a/config/distribution/linux_cu92.cmake +++ b/config/distribution/linux_cu112.cmake @@ -30,5 +30,5 @@ set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") -set(CUDACXX "/usr/local/cuda-9.2/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.2" CACHE STRING "Cuda architectures") +set(CUDACXX "/usr/local/cuda-11.2/bin/nvcc" CACHE STRING "Cuda compiler") +set(MXNET_CUDA_ARCH "5.0;6.0;7.0;8.0;8.6" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu75.cmake b/config/distribution/linux_cu75.cmake deleted file mode 100644 index 45ba2b9de5d7..000000000000 --- a/config/distribution/linux_cu75.cmake +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set(CMAKE_BUILD_TYPE "Distribution" CACHE STRING "Build type") -set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") -set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") - -set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") -set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") -set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") -set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") -set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support") -set(USE_LAPACK ON CACHE BOOL "Build with lapack support") -set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") -set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") -set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - -set(CUDACXX "/usr/local/cuda-7.5/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;3.5;5.0;5.2" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu80.cmake b/config/distribution/linux_cu80.cmake deleted file mode 100644 index ce8e0083bcad..000000000000 --- a/config/distribution/linux_cu80.cmake +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set(CMAKE_BUILD_TYPE "Distribution" CACHE STRING "Build type") -set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") -set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") - -set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") -set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") -set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") -set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") -set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support") -set(USE_LAPACK ON CACHE BOOL "Build with lapack support") -set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") -set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") -set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - -set(CUDACXX "/usr/local/cuda-8.0/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;5.0;6.0;6.2" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu90.cmake b/config/distribution/linux_cu90.cmake deleted file mode 100644 index 01097cb882e4..000000000000 --- a/config/distribution/linux_cu90.cmake +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set(CMAKE_BUILD_TYPE "Distribution" CACHE STRING "Build type") -set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") -set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") - -set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") -set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") -set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") -set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") -set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support") -set(USE_LAPACK ON CACHE BOOL "Build with lapack support") -set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") -set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") -set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - -set(CUDACXX "/usr/local/cuda-9.0/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.2" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu91.cmake b/config/distribution/linux_cu91.cmake deleted file mode 100644 index f6301fa9f720..000000000000 --- a/config/distribution/linux_cu91.cmake +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set(CMAKE_BUILD_TYPE "Distribution" CACHE STRING "Build type") -set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") -set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") - -set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") -set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") -set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") -set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") -set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support") -set(USE_LAPACK ON CACHE BOOL "Build with lapack support") -set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") -set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") -set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - -set(CUDACXX "/usr/local/cuda-9.1/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.2" CACHE STRING "Cuda architectures") diff --git a/make/staticbuild/linux_cu92.mk b/make/staticbuild/linux_cu112.mk similarity index 99% rename from make/staticbuild/linux_cu92.mk rename to make/staticbuild/linux_cu112.mk index bbaa4bfcd772..874e1576d34f 100644 --- a/make/staticbuild/linux_cu92.mk +++ b/make/staticbuild/linux_cu112.mk @@ -66,7 +66,7 @@ USE_CUDA = 1 # add the path to CUDA library to link and compile flag # if you have already add them to environment variable, leave it as NONE # USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-9.2 +USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-11.2 # whether to use CuDNN library USE_CUDNN = 1 @@ -170,3 +170,4 @@ EXTRA_OPERATORS = # git@github.com:dato-code/SFrame.git # SFRAME_PATH = $(HOME)/SFrame # MXNET_PLUGINS += plugin/sframe/plugin.mk + diff --git a/make/staticbuild/linux_cu75.mk b/make/staticbuild/linux_cu75.mk deleted file mode 100644 index e263794600df..000000000000 --- a/make/staticbuild/linux_cu75.mk +++ /dev/null @@ -1,167 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -#------------------------------------------------------------------------------- -# Template configuration for compiling mxnet for making python wheel -#------------------------------------------------------------------------------- - -#--------------------- -# choice of compiler -#-------------------- - -export CC = gcc -export CXX = g++ -export NVCC = nvcc - -# whether compile with options for MXNet developer -DEV = 0 - -# whether compile with debug -DEBUG = 0 - -# whether to turn on signal handler (e.g. segfault logger) -USE_SIGNAL_HANDLER = 1 - -# the additional link flags you want to add -ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections - -# the additional compile flags you want to add -ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections - -#--------------------------------------------- -# matrix computation libraries for CPU/GPU -#--------------------------------------------- - -# choose the version of blas you want to use -# can be: mkl, blas, atlas, openblas -# in default use atlas for linux while apple for osx -USE_BLAS=openblas - -# whether use opencv during compilation -# you can disable it, however, you will not able to use -# imbin iterator -USE_OPENCV = 1 -# Add OpenCV include path, in which the directory `opencv2` exists -USE_OPENCV_INC_PATH = NONE -# Add OpenCV shared library path, in which the shared library exists -USE_OPENCV_LIB_PATH = NONE - -# whether use CUDA during compile -USE_CUDA = 1 - -# add the path to CUDA library to link and compile flag -# if you have already add them to environment variable, leave it as NONE -# USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-7.5 - -# whether use CuDNN R3 library -USE_CUDNN = 1 - -# CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. -# CUDA_ARCH := - -# whether use cuda runtime compiling for writing kernels in native language (i.e. Python) -ENABLE_CUDA_RTC = 1 - -# use openmp for parallelization -USE_OPENMP = 1 -USE_OPERATOR_TUNING = 1 -USE_LIBJPEG_TURBO = 1 - -# whether use MKL-DNN library -USE_MKLDNN = 1 - -# whether use NNPACK library -USE_NNPACK = 0 - -# whether use lapack during compilation -# only effective when compiled with blas versions openblas/apple/atlas/mkl -USE_LAPACK = 1 - -# path to lapack library in case of a non-standard installation -USE_LAPACK_PATH = $(DEPS_PATH)/lib - -# add path to intel library, you may need it for MKL, if you did not add the path -# to environment variable -USE_INTEL_PATH = NONE - -# If use MKL, choose static link automatically to allow python wrapper -ifeq ($(USE_BLAS), mkl) -USE_STATIC_MKL = 1 -else -USE_STATIC_MKL = NONE -endif - -#---------------------------- -# Settings for power and arm arch -#---------------------------- -ARCH := $(shell uname -a) -ifneq (,$(filter $(ARCH), armv6l armv7l powerpc64le ppc64le aarch64)) - USE_SSE=0 -else - USE_SSE=1 -endif - -#---------------------------- -# distributed computing -#---------------------------- - -# whether or not to enable multi-machine supporting -USE_DIST_KVSTORE = 1 - -# whether or not allow to read and write HDFS directly. If yes, then hadoop is -# required -USE_HDFS = 0 - -# path to libjvm.so. required if USE_HDFS=1 -LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server - -# whether or not allow to read and write AWS S3 directly. If yes, then -# libcurl4-openssl-dev is required, it can be installed on Ubuntu by -# sudo apt-get install -y libcurl4-openssl-dev -USE_S3 = 1 - -#---------------------------- -# additional operators -#---------------------------- - -# path to folders containing projects specific operators that you don't want to put in src/operators -EXTRA_OPERATORS = - - -#---------------------------- -# plugins -#---------------------------- - -# whether to use caffe integration. This requires installing caffe. -# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH -# CAFFE_PATH = $(HOME)/caffe -# MXNET_PLUGINS += plugin/caffe/caffe.mk - -# whether to use torch integration. This requires installing torch. -# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH -# TORCH_PATH = $(HOME)/torch -# MXNET_PLUGINS += plugin/torch/torch.mk - -# WARPCTC_PATH = $(HOME)/warp-ctc -# MXNET_PLUGINS += plugin/warpctc/warpctc.mk - -# whether to use sframe integration. This requires build sframe -# git@github.com:dato-code/SFrame.git -# SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/make/staticbuild/linux_cu80.mk b/make/staticbuild/linux_cu80.mk deleted file mode 100644 index a42220d3d467..000000000000 --- a/make/staticbuild/linux_cu80.mk +++ /dev/null @@ -1,170 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -#------------------------------------------------------------------------------- -# Template configuration for compiling mxnet for making python wheel -#------------------------------------------------------------------------------- - -#--------------------- -# choice of compiler -#-------------------- - -export CC = gcc -export CXX = g++ -export NVCC = nvcc - -# whether compile with options for MXNet developer -DEV = 0 - -# whether compile with debug -DEBUG = 0 - -# whether to turn on signal handler (e.g. segfault logger) -USE_SIGNAL_HANDLER = 1 - -# the additional link flags you want to add -ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections - -# the additional compile flags you want to add -ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections - -#--------------------------------------------- -# matrix computation libraries for CPU/GPU -#--------------------------------------------- - -# choose the version of blas you want to use -# can be: mkl, blas, atlas, openblas -# in default use atlas for linux while apple for osx -USE_BLAS=openblas - -# whether use opencv during compilation -# you can disable it, however, you will not able to use -# imbin iterator -USE_OPENCV = 1 -# Add OpenCV include path, in which the directory `opencv2` exists -USE_OPENCV_INC_PATH = NONE -# Add OpenCV shared library path, in which the shared library exists -USE_OPENCV_LIB_PATH = NONE - -# whether use CUDA during compile -USE_CUDA = 1 - -# add the path to CUDA library to link and compile flag -# if you have already add them to environment variable, leave it as NONE -# USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-8.0 - -# whether to use CuDNN library -USE_CUDNN = 1 - -# whether to use NCCL library -USE_NCCL = 1 - -# CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. -# CUDA_ARCH := - -# whether use cuda runtime compiling for writing kernels in native language (i.e. Python) -ENABLE_CUDA_RTC = 1 - -# use openmp for parallelization -USE_OPENMP = 1 -USE_OPERATOR_TUNING = 1 -USE_LIBJPEG_TURBO = 1 - -# whether use MKL-DNN library -USE_MKLDNN = 1 - -# whether use NNPACK library -USE_NNPACK = 0 - -# whether use lapack during compilation -# only effective when compiled with blas versions openblas/apple/atlas/mkl -USE_LAPACK = 1 - -# path to lapack library in case of a non-standard installation -USE_LAPACK_PATH = $(DEPS_PATH)/lib - -# add path to intel library, you may need it for MKL, if you did not add the path -# to environment variable -USE_INTEL_PATH = NONE - -# If use MKL, choose static link automatically to allow python wrapper -ifeq ($(USE_BLAS), mkl) -USE_STATIC_MKL = 1 -else -USE_STATIC_MKL = NONE -endif - -#---------------------------- -# Settings for power and arm arch -#---------------------------- -ARCH := $(shell uname -a) -ifneq (,$(filter $(ARCH), armv6l armv7l powerpc64le ppc64le aarch64)) - USE_SSE=0 -else - USE_SSE=1 -endif - -#---------------------------- -# distributed computing -#---------------------------- - -# whether or not to enable multi-machine supporting -USE_DIST_KVSTORE = 1 - -# whether or not allow to read and write HDFS directly. If yes, then hadoop is -# required -USE_HDFS = 0 - -# path to libjvm.so. required if USE_HDFS=1 -LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server - -# whether or not allow to read and write AWS S3 directly. If yes, then -# libcurl4-openssl-dev is required, it can be installed on Ubuntu by -# sudo apt-get install -y libcurl4-openssl-dev -USE_S3 = 1 - -#---------------------------- -# additional operators -#---------------------------- - -# path to folders containing projects specific operators that you don't want to put in src/operators -EXTRA_OPERATORS = - - -#---------------------------- -# plugins -#---------------------------- - -# whether to use caffe integration. This requires installing caffe. -# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH -# CAFFE_PATH = $(HOME)/caffe -# MXNET_PLUGINS += plugin/caffe/caffe.mk - -# whether to use torch integration. This requires installing torch. -# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH -# TORCH_PATH = $(HOME)/torch -# MXNET_PLUGINS += plugin/torch/torch.mk - -# WARPCTC_PATH = $(HOME)/warp-ctc -# MXNET_PLUGINS += plugin/warpctc/warpctc.mk - -# whether to use sframe integration. This requires build sframe -# git@github.com:dato-code/SFrame.git -# SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/make/staticbuild/linux_cu90.mk b/make/staticbuild/linux_cu90.mk deleted file mode 100644 index c46c10f6358b..000000000000 --- a/make/staticbuild/linux_cu90.mk +++ /dev/null @@ -1,172 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -#------------------------------------------------------------------------------- -# Template configuration for compiling mxnet for making python wheel -#------------------------------------------------------------------------------- - -#--------------------- -# choice of compiler -#-------------------- - -export CC = gcc -export CXX = g++ -export NVCC = nvcc - -# whether compile with options for MXNet developer -DEV = 0 - -# whether compile with debug -DEBUG = 0 - -# whether to turn on signal handler (e.g. segfault logger) -USE_SIGNAL_HANDLER = 1 - -# the additional link flags you want to add -ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections - -# the additional compile flags you want to add -ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections - -#--------------------------------------------- -# matrix computation libraries for CPU/GPU -#--------------------------------------------- - -# choose the version of blas you want to use -# can be: mkl, blas, atlas, openblas -# in default use atlas for linux while apple for osx -USE_BLAS=openblas - -# whether use opencv during compilation -# you can disable it, however, you will not able to use -# imbin iterator -USE_OPENCV = 1 -# Add OpenCV include path, in which the directory `opencv2` exists -USE_OPENCV_INC_PATH = NONE -# Add OpenCV shared library path, in which the shared library exists -USE_OPENCV_LIB_PATH = NONE - -# whether use CUDA during compile -USE_CUDA = 1 - -# add the path to CUDA library to link and compile flag -# if you have already add them to environment variable, leave it as NONE -# USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-9.0 - -# whether to use CuDNN library -USE_CUDNN = 1 - -# whether to use NCCL library -USE_NCCL = 1 - -# CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. -# CUDA_ARCH := - -# whether use cuda runtime compiling for writing kernels in native language (i.e. Python) -ENABLE_CUDA_RTC = 1 - -USE_NVTX=1 - -# use openmp for parallelization -USE_OPENMP = 1 -USE_OPERATOR_TUNING = 1 -USE_LIBJPEG_TURBO = 1 - -# whether use MKL-DNN library -USE_MKLDNN = 1 - -# whether use NNPACK library -USE_NNPACK = 0 - -# whether use lapack during compilation -# only effective when compiled with blas versions openblas/apple/atlas/mkl -USE_LAPACK = 1 - -# path to lapack library in case of a non-standard installation -USE_LAPACK_PATH = $(DEPS_PATH)/lib - -# add path to intel library, you may need it for MKL, if you did not add the path -# to environment variable -USE_INTEL_PATH = NONE - -# If use MKL, choose static link automatically to allow python wrapper -ifeq ($(USE_BLAS), mkl) -USE_STATIC_MKL = 1 -else -USE_STATIC_MKL = NONE -endif - -#---------------------------- -# Settings for power and arm arch -#---------------------------- -ARCH := $(shell uname -a) -ifneq (,$(filter $(ARCH), armv6l armv7l powerpc64le ppc64le aarch64)) - USE_SSE=0 -else - USE_SSE=1 -endif - -#---------------------------- -# distributed computing -#---------------------------- - -# whether or not to enable multi-machine supporting -USE_DIST_KVSTORE = 1 - -# whether or not allow to read and write HDFS directly. If yes, then hadoop is -# required -USE_HDFS = 0 - -# path to libjvm.so. required if USE_HDFS=1 -LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server - -# whether or not allow to read and write AWS S3 directly. If yes, then -# libcurl4-openssl-dev is required, it can be installed on Ubuntu by -# sudo apt-get install -y libcurl4-openssl-dev -USE_S3 = 1 - -#---------------------------- -# additional operators -#---------------------------- - -# path to folders containing projects specific operators that you don't want to put in src/operators -EXTRA_OPERATORS = - - -#---------------------------- -# plugins -#---------------------------- - -# whether to use caffe integration. This requires installing caffe. -# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH -# CAFFE_PATH = $(HOME)/caffe -# MXNET_PLUGINS += plugin/caffe/caffe.mk - -# whether to use torch integration. This requires installing torch. -# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH -# TORCH_PATH = $(HOME)/torch -# MXNET_PLUGINS += plugin/torch/torch.mk - -# WARPCTC_PATH = $(HOME)/warp-ctc -# MXNET_PLUGINS += plugin/warpctc/warpctc.mk - -# whether to use sframe integration. This requires build sframe -# git@github.com:dato-code/SFrame.git -# SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/make/staticbuild/linux_cu91.mk b/make/staticbuild/linux_cu91.mk deleted file mode 100644 index b2a33d7e36c8..000000000000 --- a/make/staticbuild/linux_cu91.mk +++ /dev/null @@ -1,172 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -#------------------------------------------------------------------------------- -# Template configuration for compiling mxnet for making python wheel -#------------------------------------------------------------------------------- - -#--------------------- -# choice of compiler -#-------------------- - -export CC = gcc -export CXX = g++ -export NVCC = nvcc - -# whether compile with options for MXNet developer -DEV = 0 - -# whether compile with debug -DEBUG = 0 - -# whether to turn on signal handler (e.g. segfault logger) -USE_SIGNAL_HANDLER = 1 - -# the additional link flags you want to add -ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections - -# the additional compile flags you want to add -ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections - -#--------------------------------------------- -# matrix computation libraries for CPU/GPU -#--------------------------------------------- - -# choose the version of blas you want to use -# can be: mkl, blas, atlas, openblas -# in default use atlas for linux while apple for osx -USE_BLAS=openblas - -# whether use opencv during compilation -# you can disable it, however, you will not able to use -# imbin iterator -USE_OPENCV = 1 -# Add OpenCV include path, in which the directory `opencv2` exists -USE_OPENCV_INC_PATH = NONE -# Add OpenCV shared library path, in which the shared library exists -USE_OPENCV_LIB_PATH = NONE - -# whether use CUDA during compile -USE_CUDA = 1 - -# add the path to CUDA library to link and compile flag -# if you have already add them to environment variable, leave it as NONE -# USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-9.1 - -# whether to use CuDNN library -USE_CUDNN = 1 - -# whether to use NCCL library -USE_NCCL = 1 - -# CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. -# CUDA_ARCH := - -# whether use cuda runtime compiling for writing kernels in native language (i.e. Python) -ENABLE_CUDA_RTC = 1 - -USE_NVTX=1 - -# use openmp for parallelization -USE_OPENMP = 1 -USE_OPERATOR_TUNING = 1 -USE_LIBJPEG_TURBO = 1 - -# whether use MKL-DNN library -USE_MKLDNN = 1 - -# whether use NNPACK library -USE_NNPACK = 0 - -# whether use lapack during compilation -# only effective when compiled with blas versions openblas/apple/atlas/mkl -USE_LAPACK = 1 - -# path to lapack library in case of a non-standard installation -USE_LAPACK_PATH = $(DEPS_PATH)/lib - -# add path to intel library, you may need it for MKL, if you did not add the path -# to environment variable -USE_INTEL_PATH = NONE - -# If use MKL, choose static link automatically to allow python wrapper -ifeq ($(USE_BLAS), mkl) -USE_STATIC_MKL = 1 -else -USE_STATIC_MKL = NONE -endif - -#---------------------------- -# Settings for power and arm arch -#---------------------------- -ARCH := $(shell uname -a) -ifneq (,$(filter $(ARCH), armv6l armv7l powerpc64le ppc64le aarch64)) - USE_SSE=0 -else - USE_SSE=1 -endif - -#---------------------------- -# distributed computing -#---------------------------- - -# whether or not to enable multi-machine supporting -USE_DIST_KVSTORE = 1 - -# whether or not allow to read and write HDFS directly. If yes, then hadoop is -# required -USE_HDFS = 0 - -# path to libjvm.so. required if USE_HDFS=1 -LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server - -# whether or not allow to read and write AWS S3 directly. If yes, then -# libcurl4-openssl-dev is required, it can be installed on Ubuntu by -# sudo apt-get install -y libcurl4-openssl-dev -USE_S3 = 1 - -#---------------------------- -# additional operators -#---------------------------- - -# path to folders containing projects specific operators that you don't want to put in src/operators -EXTRA_OPERATORS = - - -#---------------------------- -# plugins -#---------------------------- - -# whether to use caffe integration. This requires installing caffe. -# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH -# CAFFE_PATH = $(HOME)/caffe -# MXNET_PLUGINS += plugin/caffe/caffe.mk - -# whether to use torch integration. This requires installing torch. -# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH -# TORCH_PATH = $(HOME)/torch -# MXNET_PLUGINS += plugin/torch/torch.mk - -# WARPCTC_PATH = $(HOME)/warp-ctc -# MXNET_PLUGINS += plugin/warpctc/warpctc.mk - -# whether to use sframe integration. This requires build sframe -# git@github.com:dato-code/SFrame.git -# SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/tools/pip/doc/CPU_ADDITIONAL.md b/tools/pip/doc/CPU_ADDITIONAL.md index 090186e652ae..224ca9892f7d 100644 --- a/tools/pip/doc/CPU_ADDITIONAL.md +++ b/tools/pip/doc/CPU_ADDITIONAL.md @@ -18,11 +18,11 @@ Prerequisites ------------- This package supports Linux, Mac OSX, and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu110](https://pypi.python.org/pypi/mxnet-cu110/) with CUDA-11.0 support. - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. - [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To use this package on Linux you need the `libquadmath.so.0` shared library. On @@ -33,7 +33,7 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ diff --git a/tools/pip/doc/CU100_ADDITIONAL.md b/tools/pip/doc/CU100_ADDITIONAL.md index c7638e83b0c1..b0399e54743f 100644 --- a/tools/pip/doc/CU100_ADDITIONAL.md +++ b/tools/pip/doc/CU100_ADDITIONAL.md @@ -18,11 +18,12 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu110](https://pypi.python.org/pypi/mxnet-cu110/) with CUDA-11.0 support. - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). diff --git a/tools/pip/doc/CU101_ADDITIONAL.md b/tools/pip/doc/CU101_ADDITIONAL.md index 44ebb894c2db..7014f3293556 100644 --- a/tools/pip/doc/CU101_ADDITIONAL.md +++ b/tools/pip/doc/CU101_ADDITIONAL.md @@ -18,11 +18,12 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu110](https://pypi.python.org/pypi/mxnet-cu110/) with CUDA-11.0 support. - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. - [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.0 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). @@ -34,7 +35,7 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.incubator.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ diff --git a/tools/pip/doc/CU102_ADDITIONAL.md b/tools/pip/doc/CU102_ADDITIONAL.md index 4a81de827d60..ce3f52811f5f 100644 --- a/tools/pip/doc/CU102_ADDITIONAL.md +++ b/tools/pip/doc/CU102_ADDITIONAL.md @@ -18,11 +18,12 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu110](https://pypi.python.org/pypi/mxnet-cu110/) with CUDA-11.0 support. - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. - [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). @@ -34,7 +35,7 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.incubator.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ diff --git a/tools/pip/doc/CU110_ADDITIONAL.md b/tools/pip/doc/CU110_ADDITIONAL.md index 8eaa7b204d35..2745a68ca6d7 100644 --- a/tools/pip/doc/CU110_ADDITIONAL.md +++ b/tools/pip/doc/CU110_ADDITIONAL.md @@ -18,11 +18,12 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. - [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). @@ -34,7 +35,7 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.incubator.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ diff --git a/tools/pip/doc/CU92_ADDITIONAL.md b/tools/pip/doc/CU112_ADDITIONAL.md similarity index 92% rename from tools/pip/doc/CU92_ADDITIONAL.md rename to tools/pip/doc/CU112_ADDITIONAL.md index a141e5686915..5e7c135f783c 100644 --- a/tools/pip/doc/CU92_ADDITIONAL.md +++ b/tools/pip/doc/CU112_ADDITIONAL.md @@ -23,6 +23,7 @@ This package supports Linux and Windows platforms. You may also want to check: - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. - [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). @@ -34,11 +35,11 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ To install: ```bash -pip install mxnet-cu92 +pip install mxnet-cu112 ``` diff --git a/tools/pip/doc/NATIVE_ADDITIONAL.md b/tools/pip/doc/NATIVE_ADDITIONAL.md index 23c592b811ba..88677f7fa207 100644 --- a/tools/pip/doc/NATIVE_ADDITIONAL.md +++ b/tools/pip/doc/NATIVE_ADDITIONAL.md @@ -18,10 +18,11 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu110](https://pypi.python.org/pypi/mxnet-cu110/) with CUDA-11.0 support. - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. +- [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). @@ -34,7 +35,7 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ diff --git a/tools/pip/setup.py b/tools/pip/setup.py index 1950f1dd28ac..9f56e20d9e4b 100644 --- a/tools/pip/setup.py +++ b/tools/pip/setup.py @@ -137,7 +137,9 @@ def skip_markdown_comments(md): if variant == 'CPU': libraries.append('openblas') else: - if variant.startswith('CU110'): + if variant.startswith('CU112'): + libraries.append('CUDA-11.2') + elif variant.startswith('CU110'): libraries.append('CUDA-11.0') elif variant.startswith('CU102'): libraries.append('CUDA-10.2') @@ -145,18 +147,10 @@ def skip_markdown_comments(md): libraries.append('CUDA-10.1') elif variant.startswith('CU100'): libraries.append('CUDA-10.0') - elif variant.startswith('CU92'): - libraries.append('CUDA-9.2') - elif variant.startswith('CU91'): - libraries.append('CUDA-9.1') - elif variant.startswith('CU90'): - libraries.append('CUDA-9.0') - elif variant.startswith('CU80'): - libraries.append('CUDA-8.0') - elif variant.startswith('CU75'): - libraries.append('CUDA-7.5') - if variant.endswith('MKL'): - libraries.append('MKLDNN') + +from mxnet.runtime import Features +if Features().is_enabled("MKLDNN"): + libraries.append('MKLDNN') short_description += ' This version uses {0}.'.format(' and '.join(libraries)) diff --git a/tools/setup_gpu_build_tools.sh b/tools/setup_gpu_build_tools.sh index 754022a9b516..31ff578de5b5 100755 --- a/tools/setup_gpu_build_tools.sh +++ b/tools/setup_gpu_build_tools.sh @@ -29,7 +29,18 @@ VARIANT=$1 DEPS_PATH=$2 >&2 echo "Setting CUDA versions for $VARIANT" -if [[ $VARIANT == cu110* ]]; then +if [[ $VARIANT == cu112* ]]; then + CUDA_VERSION='11.2.135-1' + CUDA_PATCH_VERSION='11.4.1.1026-1' + CUDA_LIBS_VERSION='10.2.3.135-1' + CUDA_SOLVER_VERSION='11.1.0.135-1' + CUDA_NVTX_VERSION='11.2.67-1' + LIBCUDA_VERSION='460.32.03-0ubuntu1' + LIBCUDNN_VERSION='8.1.0.77-1+cuda11.2' + LIBNCCL_VERSION='2.8.4-1+cuda11.2' + LIBCUDART_VERSION='11.2.72-1' + LIBCUFFT_VERSION='10.4.0.135-1' +elif [[ $VARIANT == cu110* ]]; then CUDA_VERSION='11.0.221-1' CUDA_PATCH_VERSION='11.2.0.252-1' CUDA_LIBS_VERSION='10.2.1.245-1' @@ -109,7 +120,31 @@ if [[ $VARIANT == cu* ]]; then fi # list of debs to download from nvidia -if [[ $VARIANT == cu110* ]]; then +if [[ $VARIANT == cu112* ]]; then + cuda_files=( \ + "libcublas-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ + "libcublas-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ + "cuda-cudart-${CUDA_MAJOR_DASH}_${LIBCUDART_VERSION}_amd64.deb" \ + "cuda-cudart-dev-${CUDA_MAJOR_DASH}_${LIBCUDART_VERSION}_amd64.deb" \ + "libcurand-${CUDA_MAJOR_DASH}_${CUDA_LIBS_VERSION}_amd64.deb" \ + "libcurand-dev-${CUDA_MAJOR_DASH}_${CUDA_LIBS_VERSION}_amd64.deb" \ + "libcufft-${CUDA_MAJOR_DASH}_${LIBCUFFT_VERSION}_amd64.deb" \ + "libcufft-dev-${CUDA_MAJOR_DASH}_${LIBCUFFT_VERSION}_amd64.deb" \ + "cuda-nvrtc-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ + "cuda-nvrtc-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ + "libcusolver-${CUDA_MAJOR_DASH}_${CUDA_SOLVER_VERSION}_amd64.deb" \ + "libcusolver-dev-${CUDA_MAJOR_DASH}_${CUDA_SOLVER_VERSION}_amd64.deb" \ + "cuda-nvcc-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ + "cuda-nvtx-${CUDA_MAJOR_DASH}_${CUDA_NVTX_VERSION}_amd64.deb" \ + "libcuda1-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ + "cuda-nvprof-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ + "nvidia-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ + ) + ml_files=( \ + "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ + "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ + ) +elif [[ $VARIANT == cu110* ]]; then cuda_files=( \ "libcublas-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ "libcublas-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ diff --git a/tools/staticbuild/README.md b/tools/staticbuild/README.md index e21abdfa0b3e..2ca892331a74 100644 --- a/tools/staticbuild/README.md +++ b/tools/staticbuild/README.md @@ -25,9 +25,9 @@ automatically identifing the system version, number of cores, and all environment variable settings. Here are examples you can run with this script: ``` -tools/staticbuild/build.sh cu102 +tools/staticbuild/build.sh cu112 ``` -This would build the mxnet package based on CUDA 10.2. Currently, we support variants cpu, native, cu92, cu100, cu101, cu102 and cu110. All of these variants expect native have MKL-DNN backend enabled. +This would build the mxnet package based on CUDA 11.2. Currently, we support variants cpu, native, cu100, cu101, cu102, cu110, and cu112. All of these variants expect native have MKL-DNN backend enabled. ``` tools/staticbuild/build.sh cpu