diff --git a/CMakeLists.txt b/CMakeLists.txt index 6ede7cb1219..001c3ad0950 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,13 +15,6 @@ set(NVFUSER_THIRD_PARTY_DIR "${NVFUSER_ROOT}/third_party") option(NVFUSER_STANDALONE_BUILD_WITH_UCC "" OFF) option(NVFUSER_BUILD_WITH_ASAN "Build nvFuser with asan" OFF) -include(CMakeDependentOption) -cmake_dependent_option(NVFUSER_DISTRIBUTED "" ON "USE_DISTRIBUTED" OFF) -if (NVFUSER_DISTRIBUTED) - add_compile_definitions(NVFUSER_DISTRIBUTED) -endif() -message(STATUS "Setting NVFUSER_DISTRIBUTED=${NVFUSER_DISTRIBUTED}") - # We try to update which C++ standard we use together in lockstep across all # built libraries, and these variables control which that is. Generally we are # on C++20, but we still support a version of CUDA (11) that does not recognize @@ -769,7 +762,6 @@ message(STATUS "******** Nvfuser configuration summary ********") message(STATUS " UCC_FOUND: ${UCC_FOUND}") message(STATUS " NVFUSER_STANDALONE_BUILD_WITH_UCC : ${NVFUSER_STANDALONE_BUILD_WITH_UCC}") message(STATUS " NVFUSER_BUILD_WITH_ASAN : ${NVFUSER_BUILD_WITH_ASAN}") -message(STATUS " NVFUSER_DISTRIBUTED : ${NVFUSER_DISTRIBUTED}") message(STATUS " NVFUSER_CPP_STANDARD : ${NVFUSER_CPP_STANDARD}") if(NVFUSER_STANDALONE_BUILD_WITH_UCC) diff --git a/csrc/multidevice/communication.cpp b/csrc/multidevice/communication.cpp index 4f4db1711ab..0882e9af335 100644 --- a/csrc/multidevice/communication.cpp +++ b/csrc/multidevice/communication.cpp @@ -6,7 +6,7 @@ */ // clang-format on #include -#if defined(NVFUSER_DISTRIBUTED) && defined(USE_C10D_NCCL) +#if defined(USE_DISTRIBUTED) && defined(USE_C10D_NCCL) #include #endif #include @@ -229,7 +229,7 @@ c10::intrusive_ptr Reduce::post( c10d::ReduceOptions options = { .reduceOp = params_.redOp, .rootRank = root_relative_index_}; auto team_backend = comm.getBackendForTeam(params_.team, backend); -#if defined(NVFUSER_DISTRIBUTED) && defined(USE_C10D_NCCL) +#if defined(USE_DISTRIBUTED) && defined(USE_C10D_NCCL) auto nccl_backend = dynamic_cast(team_backend.get()); if (nccl_backend) { #if NVF_TORCH_VERSION_NO_LESS(2, 3, 0) diff --git a/csrc/multidevice/communication.h b/csrc/multidevice/communication.h index 5c2e59f887d..77f7a4b6de4 100644 --- a/csrc/multidevice/communication.h +++ b/csrc/multidevice/communication.h @@ -9,7 +9,7 @@ #include #include -#ifdef NVFUSER_DISTRIBUTED +#ifdef USE_DISTRIBUTED #include #else #include diff --git a/csrc/multidevice/communicator.cpp b/csrc/multidevice/communicator.cpp index b113edd61d6..67766ce85d4 100644 --- a/csrc/multidevice/communicator.cpp +++ b/csrc/multidevice/communicator.cpp @@ -10,7 +10,7 @@ #include #include -#ifdef NVFUSER_DISTRIBUTED +#ifdef USE_DISTRIBUTED #include #ifdef USE_C10D_GLOO #include @@ -132,7 +132,7 @@ inline std::string getTeamKey(const Team& team, CommunicatorBackend backend) { }); } -#ifdef NVFUSER_DISTRIBUTED +#ifdef USE_DISTRIBUTED // creates and return a process group backend c10::intrusive_ptr createBackend( CommunicatorBackend backend, @@ -187,7 +187,7 @@ Communicator::Communicator( return; } -#ifdef NVFUSER_DISTRIBUTED +#ifdef USE_DISTRIBUTED c10d::TCPStoreOptions store_opts; { char hostname[HOST_NAME_MAX]; // NOLINT (modernize-avoid-c-arrays) @@ -222,7 +222,7 @@ c10::intrusive_ptr Communicator::getBackendForTeam( // check if backend associated with the team is present in the cache if (backends_.find(team_key) == backends_.end()) { // create the backend and cache it -#ifdef NVFUSER_DISTRIBUTED +#ifdef USE_DISTRIBUTED // check that the caller's rank belongs to the requested team auto rank_it = std::find(team.begin(), team.end(), deviceId()); NVF_ERROR( diff --git a/csrc/multidevice/communicator.h b/csrc/multidevice/communicator.h index 9666ec10cb9..3a8fc465a01 100644 --- a/csrc/multidevice/communicator.h +++ b/csrc/multidevice/communicator.h @@ -13,7 +13,7 @@ #include #include -#ifdef NVFUSER_DISTRIBUTED +#ifdef USE_DISTRIBUTED #include #include #include diff --git a/csrc/multidevice/utils.cpp b/csrc/multidevice/utils.cpp index 037862e41e0..12a0f4b9a7c 100644 --- a/csrc/multidevice/utils.cpp +++ b/csrc/multidevice/utils.cpp @@ -21,7 +21,7 @@ namespace nvfuser { NVF_API bool distributedEnabled() { -#ifdef NVFUSER_DISTRIBUTED +#ifdef USE_DISTRIBUTED return true; #else return false; diff --git a/setup.py b/setup.py index 5f5e47a8e03..3521035db1a 100644 --- a/setup.py +++ b/setup.py @@ -26,9 +26,6 @@ # --build-with-ucc # Build nvfuser with UCC support. You may need to specify environment variables of UCC_HOME, UCC_DIR, UCX_HOME, UCX_DIR. # -# --build-without-distributed -# Build nvfuser without multidevice support -# # --debug # Building nvfuser in debug mode # @@ -74,7 +71,6 @@ NO_NINJA = False BUILD_WITH_UCC = False BUILD_WITH_ASAN = False -BUILD_WITHOUT_DISTRIBUTED = False OVERWRITE_VERSION = False VERSION_TAG = None BUILD_TYPE = "Release" @@ -106,9 +102,6 @@ if arg == "--build-with-asan": BUILD_WITH_ASAN = True continue - if arg == "--build-without-distributed": - BUILD_WITHOUT_DISTRIBUTED = True - continue if arg == "--debug": BUILD_TYPE = "Debug" continue @@ -289,10 +282,7 @@ def cmake(install_prefix: str = "./nvfuser"): if not os.path.exists(cmake_build_dir): os.makedirs(cmake_build_dir) - from tools.gen_nvfuser_version import ( - get_pytorch_cmake_prefix, - get_pytorch_use_distributed, - ) + from tools.gen_nvfuser_version import get_pytorch_cmake_prefix # this is used to suppress import error. # so we can get the right pytorch prefix for cmake @@ -306,8 +296,6 @@ def cmake(install_prefix: str = "./nvfuser"): logger.setLevel(logger_level) - pytorch_use_distributed = get_pytorch_use_distributed() - # generate cmake directory cmd_str = [ get_cmake_bin(), @@ -315,7 +303,6 @@ def cmake(install_prefix: str = "./nvfuser"): "-DCMAKE_BUILD_TYPE=" + BUILD_TYPE, f"-DCMAKE_INSTALL_PREFIX={install_prefix}", f"-DNVFUSER_CPP_STANDARD={CPP_STANDARD}", - f"-DUSE_DISTRIBUTED={pytorch_use_distributed}", "-B", cmake_build_dir, ] @@ -333,8 +320,6 @@ def cmake(install_prefix: str = "./nvfuser"): cmd_str.append("-DBUILD_NVFUSER_BENCHMARK=ON") if BUILD_WITH_ASAN: cmd_str.append("-DNVFUSER_BUILD_WITH_ASAN=ON") - if BUILD_WITHOUT_DISTRIBUTED: - cmd_str.append("-DNVFUSER_DISTRIBUTED=OFF") cmd_str.append(".") print(f"Configuring CMake with {' '.join(cmd_str)}") diff --git a/tools/gen_nvfuser_version.py b/tools/gen_nvfuser_version.py index 789aa96d37a..7537ff3ad4a 100644 --- a/tools/gen_nvfuser_version.py +++ b/tools/gen_nvfuser_version.py @@ -45,22 +45,6 @@ def get_pytorch_cmake_prefix(): return stdout_msg.decode("utf-8").rstrip("\n") -def get_pytorch_use_distributed(): - from subprocess import Popen, PIPE - - # need to do this in a separate process so we are not going to delete nvfuser library while it's loaded by torch - process_torch_prefix = Popen( - [ - sys.executable, - "-c", - "import torch; print(torch._C._has_distributed())", - ], - stdout=PIPE, - ) - stdout_msg, error_msg = process_torch_prefix.communicate() - return stdout_msg.decode("utf-8").rstrip("\n") - - if __name__ == "__main__": version_file = nvfuser_root / "nvfuser" / "version.py" with open(version_file, "w") as f: