From 11c3dfef2e7fe6fd67ff93bdf36a47c0a5b2eb37 Mon Sep 17 00:00:00 2001 From: brandon-b-miller <53796099+brandon-b-miller@users.noreply.github.com> Date: Wed, 1 Dec 2021 10:28:24 -0600 Subject: [PATCH] Remove unused masked udf cython/c++ code (#9792) This PR removes the c++ side of the original masked UDF code introduced in https://github.com/rapidsai/cudf/pull/8213. These kernels had some limitations and are now superseded by the numba-generated versions we moved to in https://github.com/rapidsai/cudf/pull/9174. As far as I can tell, cuDF python was the only thing consuming this API for the short time it has existed. However I am marking this breaking just in case. Authors: - https://github.com/brandon-b-miller Approvers: - Mark Harris (https://github.com/harrism) - David Wendt (https://github.com/davidwendt) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/9792 --- .../Modules/JitifyPreprocessKernels.cmake | 4 +- cpp/include/cudf/transform.hpp | 6 -- cpp/src/transform/jit/masked_udf_kernel.cu | 85 --------------- cpp/src/transform/transform.cpp | 102 ------------------ python/cudf/cudf/_lib/cpp/transform.pxd | 6 -- python/cudf/cudf/_lib/transform.pyx | 24 ----- 6 files changed, 2 insertions(+), 225 deletions(-) delete mode 100644 cpp/src/transform/jit/masked_udf_kernel.cu diff --git a/cpp/cmake/Modules/JitifyPreprocessKernels.cmake b/cpp/cmake/Modules/JitifyPreprocessKernels.cmake index c2ad25760b8..6ab1293ab6f 100644 --- a/cpp/cmake/Modules/JitifyPreprocessKernels.cmake +++ b/cpp/cmake/Modules/JitifyPreprocessKernels.cmake @@ -51,8 +51,8 @@ function(jit_preprocess_files) endfunction() jit_preprocess_files( - SOURCE_DIRECTORY ${CUDF_SOURCE_DIR}/src FILES binaryop/jit/kernel.cu - transform/jit/masked_udf_kernel.cu transform/jit/kernel.cu rolling/jit/kernel.cu + SOURCE_DIRECTORY ${CUDF_SOURCE_DIR}/src FILES binaryop/jit/kernel.cu transform/jit/kernel.cu + rolling/jit/kernel.cu ) add_custom_target( diff --git a/cpp/include/cudf/transform.hpp b/cpp/include/cudf/transform.hpp index 55e7bc84dbe..45e8ff1310c 100644 --- a/cpp/include/cudf/transform.hpp +++ b/cpp/include/cudf/transform.hpp @@ -54,12 +54,6 @@ std::unique_ptr transform( bool is_ptx, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); -std::unique_ptr generalized_masked_op( - table_view const& data_view, - std::string const& binary_udf, - data_type output_type, - rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); - /** * @brief Creates a null_mask from `input` by converting `NaN` to null and * preserving existing null values and also returns new null_count. diff --git a/cpp/src/transform/jit/masked_udf_kernel.cu b/cpp/src/transform/jit/masked_udf_kernel.cu deleted file mode 100644 index 319ad730c53..00000000000 --- a/cpp/src/transform/jit/masked_udf_kernel.cu +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include - -namespace cudf { -namespace transformation { -namespace jit { - -template -struct Masked { - T value; - bool valid; -}; - -template -__device__ auto make_args(cudf::size_type id, TypeIn in_ptr, MaskType in_mask, OffsetType in_offset) -{ - bool valid = in_mask ? cudf::bit_is_set(in_mask, in_offset + id) : true; - return cuda::std::make_tuple(in_ptr[id], valid); -} - -template -__device__ auto make_args(cudf::size_type id, - InType in_ptr, - MaskType in_mask, // in practice, always cudf::bitmask_type const* - OffsetType in_offset, // in practice, always cudf::size_type - Arguments... args) -{ - bool valid = in_mask ? cudf::bit_is_set(in_mask, in_offset + id) : true; - return cuda::std::tuple_cat(cuda::std::make_tuple(in_ptr[id], valid), make_args(id, args...)); -} - -template -__global__ void generic_udf_kernel(cudf::size_type size, - TypeOut* out_data, - bool* out_mask, - Arguments... args) -{ - int const tid = threadIdx.x; - int const blkid = blockIdx.x; - int const blksz = blockDim.x; - int const gridsz = gridDim.x; - int const start = tid + blkid * blksz; - int const step = blksz * gridsz; - - Masked output; - for (cudf::size_type i = start; i < size; i += step) { - auto func_args = cuda::std::tuple_cat( - cuda::std::make_tuple(&output.value), - make_args(i, args...) // passed int64*, bool*, int64, int64*, bool*, int64 - ); - cuda::std::apply(GENERIC_OP, func_args); - out_data[i] = output.value; - out_mask[i] = output.valid; - } -} - -} // namespace jit -} // namespace transformation -} // namespace cudf diff --git a/cpp/src/transform/transform.cpp b/cpp/src/transform/transform.cpp index 5230b853a79..0cca6699586 100644 --- a/cpp/src/transform/transform.cpp +++ b/cpp/src/transform/transform.cpp @@ -19,12 +19,10 @@ #include #include #include -#include #include #include #include -#include #include #include @@ -65,80 +63,6 @@ void unary_operation(mutable_column_view output, cudf::jit::get_data_ptr(input)); } -std::vector make_template_types(column_view outcol_view, table_view const& data_view) -{ - std::string mskptr_type = - cudf::jit::get_type_name(cudf::data_type(cudf::type_to_id())) + "*"; - std::string offset_type = - cudf::jit::get_type_name(cudf::data_type(cudf::type_to_id())); - - std::vector template_types; - template_types.reserve((3 * data_view.num_columns()) + 1); - - template_types.push_back(cudf::jit::get_type_name(outcol_view.type())); - for (auto const& col : data_view) { - template_types.push_back(cudf::jit::get_type_name(col.type()) + "*"); - template_types.push_back(mskptr_type); - template_types.push_back(offset_type); - } - return template_types; -} - -void generalized_operation(table_view const& data_view, - std::string const& udf, - data_type output_type, - mutable_column_view outcol_view, - mutable_column_view outmsk_view, - rmm::cuda_stream_view stream, - rmm::mr::device_memory_resource* mr) -{ - auto const template_types = make_template_types(outcol_view, data_view); - - std::string generic_kernel_name = - jitify2::reflection::Template("cudf::transformation::jit::generic_udf_kernel") - .instantiate(template_types); - - std::string generic_cuda_source = cudf::jit::parse_single_function_ptx( - udf, "GENERIC_OP", cudf::jit::get_type_name(output_type), {0}); - - std::vector kernel_args; - kernel_args.reserve((data_view.num_columns() * 3) + 3); - - cudf::size_type size = outcol_view.size(); - const void* outcol_ptr = cudf::jit::get_data_ptr(outcol_view); - const void* outmsk_ptr = cudf::jit::get_data_ptr(outmsk_view); - kernel_args.insert(kernel_args.begin(), {&size, &outcol_ptr, &outmsk_ptr}); - - std::vector data_ptrs; - std::vector mask_ptrs; - std::vector offsets; - - data_ptrs.reserve(data_view.num_columns()); - mask_ptrs.reserve(data_view.num_columns()); - offsets.reserve(data_view.num_columns()); - - auto const iters = thrust::make_zip_iterator( - thrust::make_tuple(data_ptrs.begin(), mask_ptrs.begin(), offsets.begin())); - - std::for_each(iters, iters + data_view.num_columns(), [&](auto const& tuple_vals) { - kernel_args.push_back(&thrust::get<0>(tuple_vals)); - kernel_args.push_back(&thrust::get<1>(tuple_vals)); - kernel_args.push_back(&thrust::get<2>(tuple_vals)); - }); - - std::transform(data_view.begin(), data_view.end(), iters, [&](column_view const& col) { - return thrust::make_tuple(cudf::jit::get_data_ptr(col), col.null_mask(), col.offset()); - }); - - cudf::jit::get_program_cache(*transform_jit_masked_udf_kernel_cu_jit) - .get_kernel(generic_kernel_name, - {}, - {{"transform/jit/operation-udf.hpp", generic_cuda_source}}, - {"-arch=sm_."}) - ->configure_1d_max_occupancy(0, 0, 0, stream.value()) - ->launch(kernel_args.data()); -} - } // namespace jit } // namespace transformation @@ -165,24 +89,6 @@ std::unique_ptr transform(column_view const& input, return output; } -std::unique_ptr generalized_masked_op(table_view const& data_view, - std::string const& udf, - data_type output_type, - rmm::cuda_stream_view stream, - rmm::mr::device_memory_resource* mr) -{ - std::unique_ptr output = make_fixed_width_column(output_type, data_view.num_rows()); - std::unique_ptr output_mask = - make_fixed_width_column(cudf::data_type{cudf::type_id::BOOL8}, data_view.num_rows()); - - transformation::jit::generalized_operation( - data_view, udf, output_type, *output, *output_mask, stream, mr); - - auto final_output_mask = cudf::bools_to_mask(*output_mask); - output.get()->set_null_mask(std::move(*(final_output_mask.first))); - return output; -} - } // namespace detail std::unique_ptr transform(column_view const& input, @@ -195,12 +101,4 @@ std::unique_ptr transform(column_view const& input, return detail::transform(input, unary_udf, output_type, is_ptx, rmm::cuda_stream_default, mr); } -std::unique_ptr generalized_masked_op(table_view const& data_view, - std::string const& udf, - data_type output_type, - rmm::mr::device_memory_resource* mr) -{ - return detail::generalized_masked_op(data_view, udf, output_type, rmm::cuda_stream_default, mr); -} - } // namespace cudf diff --git a/python/cudf/cudf/_lib/cpp/transform.pxd b/python/cudf/cudf/_lib/cpp/transform.pxd index 3153427ce3c..590a371ff52 100644 --- a/python/cudf/cudf/_lib/cpp/transform.pxd +++ b/python/cudf/cudf/_lib/cpp/transform.pxd @@ -34,12 +34,6 @@ cdef extern from "cudf/transform.hpp" namespace "cudf" nogil: bool is_ptx ) except + - cdef unique_ptr[column] generalized_masked_op( - const table_view& data_view, - string udf, - data_type output_type, - ) except + - cdef pair[unique_ptr[table], unique_ptr[column]] encode( table_view input ) except + diff --git a/python/cudf/cudf/_lib/transform.pyx b/python/cudf/cudf/_lib/transform.pyx index a0eb7c68183..96d25cb92c9 100644 --- a/python/cudf/cudf/_lib/transform.pyx +++ b/python/cudf/cudf/_lib/transform.pyx @@ -123,30 +123,6 @@ def transform(Column input, op): return Column.from_unique_ptr(move(c_output)) -def masked_udf(incols, op, output_type): - cdef table_view data_view = table_view_from_table( - incols, ignore_index=True) - cdef string c_str = op.encode("UTF-8") - cdef type_id c_tid - cdef data_type c_dtype - - c_tid = ( - SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[ - output_type - ] - ) - c_dtype = data_type(c_tid) - - with nogil: - c_output = move(libcudf_transform.generalized_masked_op( - data_view, - c_str, - c_dtype, - )) - - return Column.from_unique_ptr(move(c_output)) - - def table_encode(input): cdef table_view c_input = table_view_from_table( input, ignore_index=True)