Skip to content

Commit

Permalink
Allow using stream-ordered and managed RMM allocators in benchmarks (#…
Browse files Browse the repository at this point in the history
…1012)

This change requires UCX 1.14 for async support from openucx/ucx#8623 and UVM fixes from openucx/ucx#8754 .

Authors:
  - Peter Andreas Entschev (https://github.com/pentschev)
  - Mads R. B. Kristensen (https://github.com/madsbk)

Approvers:
  - Benjamin Zaitlen (https://github.com/quasiben)

URL: #1012
  • Loading branch information
pentschev authored Feb 13, 2023
1 parent 4d0bdfd commit a0fda76
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 4 deletions.
2 changes: 2 additions & 0 deletions dask_cuda/benchmarks/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,8 @@ def run(client: Client, args: Namespace, config: Config):
args.type == "gpu",
args.rmm_pool_size,
args.disable_rmm_pool,
args.enable_rmm_async,
args.enable_rmm_managed,
args.rmm_log_directory,
args.enable_rmm_statistics,
)
Expand Down
34 changes: 30 additions & 4 deletions dask_cuda/benchmarks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,16 @@ def parse_benchmark_args(description="Generic dask-cuda Benchmark", args_list=[]
cluster_args.add_argument(
"--disable-rmm-pool", action="store_true", help="Disable the RMM memory pool"
)
cluster_args.add_argument(
"--enable-rmm-managed",
action="store_true",
help="Enable RMM managed memory allocator",
)
cluster_args.add_argument(
"--enable-rmm-async",
action="store_true",
help="Enable RMM async memory allocator (implies --disable-rmm-pool)",
)
cluster_args.add_argument(
"--rmm-log-directory",
default=None,
Expand Down Expand Up @@ -346,6 +356,8 @@ def setup_memory_pool(
dask_worker=None,
pool_size=None,
disable_pool=False,
rmm_async=False,
rmm_managed=False,
log_directory=None,
statistics=False,
):
Expand All @@ -357,10 +369,13 @@ def setup_memory_pool(

logging = log_directory is not None

if not disable_pool:
if rmm_async:
rmm.mr.set_current_device_resource(rmm.mr.CudaAsyncMemoryResource())
cupy.cuda.set_allocator(rmm.rmm_cupy_allocator)
else:
rmm.reinitialize(
pool_allocator=True,
devices=0,
pool_allocator=not disable_pool,
managed_memory=rmm_managed,
initial_pool_size=pool_size,
logging=logging,
log_file_name=get_rmm_log_file_name(dask_worker, logging, log_directory),
Expand All @@ -373,14 +388,23 @@ def setup_memory_pool(


def setup_memory_pools(
client, is_gpu, pool_size, disable_pool, log_directory, statistics
client,
is_gpu,
pool_size,
disable_pool,
rmm_async,
rmm_managed,
log_directory,
statistics,
):
if not is_gpu:
return
client.run(
setup_memory_pool,
pool_size=pool_size,
disable_pool=disable_pool,
rmm_async=rmm_async,
rmm_managed=rmm_managed,
log_directory=log_directory,
statistics=statistics,
)
Expand All @@ -390,6 +414,8 @@ def setup_memory_pools(
setup_memory_pool,
pool_size=1e9,
disable_pool=disable_pool,
rmm_async=rmm_async,
rmm_managed=rmm_managed,
log_directory=log_directory,
statistics=statistics,
)
Expand Down

0 comments on commit a0fda76

Please sign in to comment.