diff --git a/conda/environments/cudf_dev_cuda11.0.yml b/conda/environments/cudf_dev_cuda11.0.yml
index 171a5ed8af5..e61b76145e3 100644
--- a/conda/environments/cudf_dev_cuda11.0.yml
+++ b/conda/environments/cudf_dev_cuda11.0.yml
@@ -44,7 +44,7 @@ dependencies:
- dask==2021.4.0
- distributed>=2.22.0,<=2021.4.0
- streamz
- - dlpack==0.3
+ - dlpack>=0.5,<0.6.0a0
- arrow-cpp=1.0.1
- arrow-cpp-proc * cuda
- double-conversion
diff --git a/conda/environments/cudf_dev_cuda11.2.yml b/conda/environments/cudf_dev_cuda11.2.yml
index 28373d203d1..3ad4bf3d4e1 100644
--- a/conda/environments/cudf_dev_cuda11.2.yml
+++ b/conda/environments/cudf_dev_cuda11.2.yml
@@ -44,7 +44,7 @@ dependencies:
- dask==2021.4.0
- distributed>=2.22.0,<=2021.4.0
- streamz
- - dlpack==0.3
+ - dlpack>=0.5,<0.6.0a0
- arrow-cpp=1.0.1
- arrow-cpp-proc * cuda
- double-conversion
diff --git a/conda/recipes/cudf/meta.yaml b/conda/recipes/cudf/meta.yaml
index 631ebf16aea..d1aaf924555 100644
--- a/conda/recipes/cudf/meta.yaml
+++ b/conda/recipes/cudf/meta.yaml
@@ -29,7 +29,7 @@ requirements:
- cython >=0.29,<0.30
- setuptools
- numba >=0.53.1
- - dlpack 0.3
+ - dlpack>=0.5,<0.6.0a0
- pyarrow 1.0.1
- libcudf {{ version }}
- rmm {{ minor_version }}
diff --git a/conda/recipes/libcudf/meta.yaml b/conda/recipes/libcudf/meta.yaml
index 3fa4cbdff51..dc41c439d27 100644
--- a/conda/recipes/libcudf/meta.yaml
+++ b/conda/recipes/libcudf/meta.yaml
@@ -39,7 +39,7 @@ requirements:
- cudatoolkit {{ cuda_version }}.*
- arrow-cpp 1.0.1
- arrow-cpp-proc * cuda
- - dlpack 0.3
+ - dlpack>=0.5,<0.6.0a0
run:
- {{ pin_compatible('cudatoolkit', max_pin='x.x') }}
- arrow-cpp-proc * cuda
diff --git a/cpp/cmake/thirdparty/CUDF_GetDLPack.cmake b/cpp/cmake/thirdparty/CUDF_GetDLPack.cmake
index b41c6d3b8d2..349f75d604f 100644
--- a/cpp/cmake/thirdparty/CUDF_GetDLPack.cmake
+++ b/cpp/cmake/thirdparty/CUDF_GetDLPack.cmake
@@ -36,6 +36,6 @@ function(find_and_configure_dlpack VERSION)
set(DLPACK_INCLUDE_DIR "${dlpack_SOURCE_DIR}/include" PARENT_SCOPE)
endfunction()
-set(CUDF_MIN_VERSION_dlpack 0.3)
+set(CUDF_MIN_VERSION_dlpack 0.5)
find_and_configure_dlpack(${CUDF_MIN_VERSION_dlpack})
diff --git a/cpp/include/cudf/interop.hpp b/cpp/include/cudf/interop.hpp
index 9dbde1432aa..bbe0eb0eaac 100644
--- a/cpp/include/cudf/interop.hpp
+++ b/cpp/include/cudf/interop.hpp
@@ -35,8 +35,8 @@ namespace cudf {
/**
* @brief Convert a DLPack DLTensor into a cudf table
*
- * The `device_type` of the DLTensor must be `kDLGPU`, `kDLCPU`, or
- * `kDLCPUPinned`, and `device_id` must match the current device. The `ndim`
+ * The `device_type` of the DLTensor must be `kDLCPU`, `kDLCuda`, or
+ * `kDLCUDAHost`, and `device_id` must match the current device. The `ndim`
* must be set to 1 or 2. The `dtype` must have 1 lane and the bitsize must
* match a supported `cudf::data_type`.
*
diff --git a/cpp/libcudf_kafka/cmake/thirdparty/CUDF_KAFKA_GetCUDF.cmake b/cpp/libcudf_kafka/cmake/thirdparty/CUDF_KAFKA_GetCUDF.cmake
index 50c8b696d8c..5b0f31035c3 100644
--- a/cpp/libcudf_kafka/cmake/thirdparty/CUDF_KAFKA_GetCUDF.cmake
+++ b/cpp/libcudf_kafka/cmake/thirdparty/CUDF_KAFKA_GetCUDF.cmake
@@ -28,7 +28,7 @@ function(find_and_configure_cudf VERSION)
endif()
endfunction()
-set(CUDA_KAFKA_MIN_VERSION_cudf "${CUDA_KAFKA_VERSION_MAJOR}.${CUDA_KAFKA_VERSION_MINOR}")
+set(CUDA_KAFKA_MIN_VERSION_cudf "${CUDA_KAFKA_VERSION_MAJOR}.${CUDA_KAFKA_VERSION_MINOR}.00")
find_and_configure_cudf(${CUDA_KAFKA_MIN_VERSION_cudf})
if(cudf_ADDED)
diff --git a/cpp/src/interop/dlpack.cpp b/cpp/src/interop/dlpack.cpp
index 571c695e66e..0e0ce8c4335 100644
--- a/cpp/src/interop/dlpack.cpp
+++ b/cpp/src/interop/dlpack.cpp
@@ -137,15 +137,15 @@ std::unique_ptr
from_dlpack(DLManagedTensor const* managed_tensor,
auto const& tensor = managed_tensor->dl_tensor;
// We can copy from host or device pointers
- CUDF_EXPECTS(kDLGPU == tensor.ctx.device_type || kDLCPU == tensor.ctx.device_type ||
- kDLCPUPinned == tensor.ctx.device_type,
- "DLTensor must be GPU, CPU, or pinned type");
+ CUDF_EXPECTS(tensor.device.device_type == kDLCPU || tensor.device.device_type == kDLCUDA ||
+ tensor.device.device_type == kDLCUDAHost,
+ "DLTensor device type must be CPU, CUDA or CUDAHost");
// Make sure the current device ID matches the Tensor's device ID
- if (tensor.ctx.device_type != kDLCPU) {
+ if (tensor.device.device_type != kDLCPU) {
int device_id = 0;
CUDA_TRY(cudaGetDevice(&device_id));
- CUDF_EXPECTS(tensor.ctx.device_id == device_id, "DLTensor device ID must be current device");
+ CUDF_EXPECTS(tensor.device.device_id == device_id, "DLTensor device ID must be current device");
}
// Currently only 1D and 2D tensors are supported
@@ -234,8 +234,8 @@ DLManagedTensor* to_dlpack(table_view const& input,
tensor.strides[1] = num_rows;
}
- CUDA_TRY(cudaGetDevice(&tensor.ctx.device_id));
- tensor.ctx.device_type = kDLGPU;
+ CUDA_TRY(cudaGetDevice(&tensor.device.device_id));
+ tensor.device.device_type = kDLCUDA;
// If there is only one column, then a 1D tensor can just copy the pointer
// to the data in the column, and the deleter should not delete the original
diff --git a/cpp/tests/interop/dlpack_test.cpp b/cpp/tests/interop/dlpack_test.cpp
index 0512ef73fda..4d8a94f276d 100644
--- a/cpp/tests/interop/dlpack_test.cpp
+++ b/cpp/tests/interop/dlpack_test.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, NVIDIA CORPORATION.
+ * Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -111,7 +111,7 @@ TEST_F(DLPackUntypedTests, UnsupportedDeviceTypeFromDlpack)
unique_managed_tensor tensor(cudf::to_dlpack(input));
// Spoof an unsupported device type
- tensor->dl_tensor.ctx.device_type = kDLOpenCL;
+ tensor->dl_tensor.device.device_type = kDLOpenCL;
EXPECT_THROW(cudf::from_dlpack(tensor.get()), cudf::logic_error);
}
@@ -122,7 +122,7 @@ TEST_F(DLPackUntypedTests, InvalidDeviceIdFromDlpack)
unique_managed_tensor tensor(cudf::to_dlpack(input));
// Spoof the wrong device ID
- tensor->dl_tensor.ctx.device_id += 1;
+ tensor->dl_tensor.device.device_id += 1;
EXPECT_THROW(cudf::from_dlpack(tensor.get()), cudf::logic_error);
}
@@ -242,7 +242,7 @@ TYPED_TEST(DLPackNumericTests, ToDlpack1D)
auto const& tensor = result->dl_tensor;
validate_dtype(tensor.dtype);
- EXPECT_EQ(kDLGPU, tensor.ctx.device_type);
+ EXPECT_EQ(kDLCUDA, tensor.device.device_type);
EXPECT_EQ(1, tensor.ndim);
EXPECT_EQ(uint64_t{0}, tensor.byte_offset);
EXPECT_EQ(nullptr, tensor.strides);
@@ -275,7 +275,7 @@ TYPED_TEST(DLPackNumericTests, ToDlpack2D)
auto const& tensor = result->dl_tensor;
validate_dtype(tensor.dtype);
- EXPECT_EQ(kDLGPU, tensor.ctx.device_type);
+ EXPECT_EQ(kDLCUDA, tensor.device.device_type);
EXPECT_EQ(2, tensor.ndim);
EXPECT_EQ(uint64_t{0}, tensor.byte_offset);
@@ -341,12 +341,12 @@ TYPED_TEST(DLPackNumericTests, FromDlpackCpu)
int64_t strides[2] = {1, 5};
DLManagedTensor tensor{};
- tensor.dl_tensor.ctx.device_type = kDLCPU;
- tensor.dl_tensor.dtype = get_dtype();
- tensor.dl_tensor.ndim = 2;
- tensor.dl_tensor.byte_offset = offset;
- tensor.dl_tensor.shape = shape;
- tensor.dl_tensor.strides = strides;
+ tensor.dl_tensor.device.device_type = kDLCPU;
+ tensor.dl_tensor.dtype = get_dtype();
+ tensor.dl_tensor.ndim = 2;
+ tensor.dl_tensor.byte_offset = offset;
+ tensor.dl_tensor.shape = shape;
+ tensor.dl_tensor.strides = strides;
thrust::host_vector host_vector(data.begin(), data.end());
tensor.dl_tensor.data = host_vector.data();