Skip to content

Commit

Permalink
Revert "NVFUSER_DISTRIBUTED instead of USE_DISTRIBUTED (#1711)"
Browse files Browse the repository at this point in the history
This reverts commit 9ae6c76.
  • Loading branch information
wujingyue committed Apr 26, 2024
1 parent e5c2bd0 commit 5a2d420
Show file tree
Hide file tree
Showing 8 changed files with 10 additions and 49 deletions.
8 changes: 0 additions & 8 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,6 @@ set(NVFUSER_THIRD_PARTY_DIR "${NVFUSER_ROOT}/third_party")
option(NVFUSER_STANDALONE_BUILD_WITH_UCC "" OFF)
option(NVFUSER_BUILD_WITH_ASAN "Build nvFuser with asan" OFF)

include(CMakeDependentOption)
cmake_dependent_option(NVFUSER_DISTRIBUTED "" ON "USE_DISTRIBUTED" OFF)
if (NVFUSER_DISTRIBUTED)
add_compile_definitions(NVFUSER_DISTRIBUTED)
endif()
message(STATUS "Setting NVFUSER_DISTRIBUTED=${NVFUSER_DISTRIBUTED}")

# We try to update which C++ standard we use together in lockstep across all
# built libraries, and these variables control which that is. Generally we are
# on C++20, but we still support a version of CUDA (11) that does not recognize
Expand Down Expand Up @@ -769,7 +762,6 @@ message(STATUS "******** Nvfuser configuration summary ********")
message(STATUS " UCC_FOUND: ${UCC_FOUND}")
message(STATUS " NVFUSER_STANDALONE_BUILD_WITH_UCC : ${NVFUSER_STANDALONE_BUILD_WITH_UCC}")
message(STATUS " NVFUSER_BUILD_WITH_ASAN : ${NVFUSER_BUILD_WITH_ASAN}")
message(STATUS " NVFUSER_DISTRIBUTED : ${NVFUSER_DISTRIBUTED}")
message(STATUS " NVFUSER_CPP_STANDARD : ${NVFUSER_CPP_STANDARD}")

if(NVFUSER_STANDALONE_BUILD_WITH_UCC)
Expand Down
4 changes: 2 additions & 2 deletions csrc/multidevice/communication.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
*/
// clang-format on
#include <multidevice/communication.h>
#if defined(NVFUSER_DISTRIBUTED) && defined(USE_C10D_NCCL)
#if defined(USE_DISTRIBUTED) && defined(USE_C10D_NCCL)
#include <torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp>
#endif
#include <utils.h>
Expand Down Expand Up @@ -229,7 +229,7 @@ c10::intrusive_ptr<c10d::Work> Reduce::post(
c10d::ReduceOptions options = {
.reduceOp = params_.redOp, .rootRank = root_relative_index_};
auto team_backend = comm.getBackendForTeam(params_.team, backend);
#if defined(NVFUSER_DISTRIBUTED) && defined(USE_C10D_NCCL)
#if defined(USE_DISTRIBUTED) && defined(USE_C10D_NCCL)
auto nccl_backend = dynamic_cast<c10d::ProcessGroupNCCL*>(team_backend.get());
if (nccl_backend) {
#if NVF_TORCH_VERSION_NO_LESS(2, 3, 0)
Expand Down
2 changes: 1 addition & 1 deletion csrc/multidevice/communication.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

#include <multidevice/communicator.h>
#include <multidevice/multidevice.h>
#ifdef NVFUSER_DISTRIBUTED
#ifdef USE_DISTRIBUTED
#include <torch/csrc/distributed/c10d/Types.hpp>
#else
#include <multidevice/c10d_mock.h>
Expand Down
8 changes: 4 additions & 4 deletions csrc/multidevice/communicator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
#include <netdb.h>
#include <map>

#ifdef NVFUSER_DISTRIBUTED
#ifdef USE_DISTRIBUTED
#include <torch/csrc/distributed/c10d/PrefixStore.hpp>
#ifdef USE_C10D_GLOO
#include <torch/csrc/distributed/c10d/ProcessGroupGloo.hpp>
Expand Down Expand Up @@ -132,7 +132,7 @@ inline std::string getTeamKey(const Team& team, CommunicatorBackend backend) {
});
}

#ifdef NVFUSER_DISTRIBUTED
#ifdef USE_DISTRIBUTED
// creates and return a process group backend
c10::intrusive_ptr<c10d::Backend> createBackend(
CommunicatorBackend backend,
Expand Down Expand Up @@ -187,7 +187,7 @@ Communicator::Communicator(
return;
}

#ifdef NVFUSER_DISTRIBUTED
#ifdef USE_DISTRIBUTED
c10d::TCPStoreOptions store_opts;
{
char hostname[HOST_NAME_MAX]; // NOLINT (modernize-avoid-c-arrays)
Expand Down Expand Up @@ -222,7 +222,7 @@ c10::intrusive_ptr<c10d::Backend> Communicator::getBackendForTeam(
// check if backend associated with the team is present in the cache
if (backends_.find(team_key) ==
backends_.end()) { // create the backend and cache it
#ifdef NVFUSER_DISTRIBUTED
#ifdef USE_DISTRIBUTED
// check that the caller's rank belongs to the requested team
auto rank_it = std::find(team.begin(), team.end(), deviceId());
NVF_ERROR(
Expand Down
2 changes: 1 addition & 1 deletion csrc/multidevice/communicator.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

#include <exceptions.h>
#include <multidevice/multidevice.h>
#ifdef NVFUSER_DISTRIBUTED
#ifdef USE_DISTRIBUTED
#include <torch/csrc/distributed/c10d/Backend.hpp>
#include <torch/csrc/distributed/c10d/TCPStore.hpp>
#include <torch/csrc/distributed/c10d/Work.hpp>
Expand Down
2 changes: 1 addition & 1 deletion csrc/multidevice/utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
namespace nvfuser {

NVF_API bool distributedEnabled() {
#ifdef NVFUSER_DISTRIBUTED
#ifdef USE_DISTRIBUTED
return true;
#else
return false;
Expand Down
17 changes: 1 addition & 16 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@
# --build-with-ucc
# Build nvfuser with UCC support. You may need to specify environment variables of UCC_HOME, UCC_DIR, UCX_HOME, UCX_DIR.
#
# --build-without-distributed
# Build nvfuser without multidevice support
#
# --debug
# Building nvfuser in debug mode
#
Expand Down Expand Up @@ -74,7 +71,6 @@
NO_NINJA = False
BUILD_WITH_UCC = False
BUILD_WITH_ASAN = False
BUILD_WITHOUT_DISTRIBUTED = False
OVERWRITE_VERSION = False
VERSION_TAG = None
BUILD_TYPE = "Release"
Expand Down Expand Up @@ -106,9 +102,6 @@
if arg == "--build-with-asan":
BUILD_WITH_ASAN = True
continue
if arg == "--build-without-distributed":
BUILD_WITHOUT_DISTRIBUTED = True
continue
if arg == "--debug":
BUILD_TYPE = "Debug"
continue
Expand Down Expand Up @@ -289,10 +282,7 @@ def cmake(install_prefix: str = "./nvfuser"):
if not os.path.exists(cmake_build_dir):
os.makedirs(cmake_build_dir)

from tools.gen_nvfuser_version import (
get_pytorch_cmake_prefix,
get_pytorch_use_distributed,
)
from tools.gen_nvfuser_version import get_pytorch_cmake_prefix

# this is used to suppress import error.
# so we can get the right pytorch prefix for cmake
Expand All @@ -306,16 +296,13 @@ def cmake(install_prefix: str = "./nvfuser"):

logger.setLevel(logger_level)

pytorch_use_distributed = get_pytorch_use_distributed()

# generate cmake directory
cmd_str = [
get_cmake_bin(),
pytorch_cmake_config,
"-DCMAKE_BUILD_TYPE=" + BUILD_TYPE,
f"-DCMAKE_INSTALL_PREFIX={install_prefix}",
f"-DNVFUSER_CPP_STANDARD={CPP_STANDARD}",
f"-DUSE_DISTRIBUTED={pytorch_use_distributed}",
"-B",
cmake_build_dir,
]
Expand All @@ -333,8 +320,6 @@ def cmake(install_prefix: str = "./nvfuser"):
cmd_str.append("-DBUILD_NVFUSER_BENCHMARK=ON")
if BUILD_WITH_ASAN:
cmd_str.append("-DNVFUSER_BUILD_WITH_ASAN=ON")
if BUILD_WITHOUT_DISTRIBUTED:
cmd_str.append("-DNVFUSER_DISTRIBUTED=OFF")
cmd_str.append(".")

print(f"Configuring CMake with {' '.join(cmd_str)}")
Expand Down
16 changes: 0 additions & 16 deletions tools/gen_nvfuser_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,22 +45,6 @@ def get_pytorch_cmake_prefix():
return stdout_msg.decode("utf-8").rstrip("\n")


def get_pytorch_use_distributed():
from subprocess import Popen, PIPE

# need to do this in a separate process so we are not going to delete nvfuser library while it's loaded by torch
process_torch_prefix = Popen(
[
sys.executable,
"-c",
"import torch; print(torch._C._has_distributed())",
],
stdout=PIPE,
)
stdout_msg, error_msg = process_torch_prefix.communicate()
return stdout_msg.decode("utf-8").rstrip("\n")


if __name__ == "__main__":
version_file = nvfuser_root / "nvfuser" / "version.py"
with open(version_file, "w") as f:
Expand Down

0 comments on commit 5a2d420

Please sign in to comment.