diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index b6a61368fe7..53da710f0ea 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -359,6 +359,7 @@ add_library( src/interop/from_arrow.cu src/interop/to_arrow.cu src/interop/to_arrow_device.cu + src/interop/from_arrow_device.cu src/interop/to_arrow_schema.cpp src/interop/to_arrow_utilities.cpp src/interop/detail/arrow_allocator.cpp diff --git a/cpp/include/cudf/interop.hpp b/cpp/include/cudf/interop.hpp index defc1fc834c..bb05a622f40 100644 --- a/cpp/include/cudf/interop.hpp +++ b/cpp/include/cudf/interop.hpp @@ -348,5 +348,129 @@ std::unique_ptr from_arrow( rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::device_async_resource_ref mr = rmm::mr::get_current_device_resource()); +/** + * @brief typedef for a vector of owning columns, used for conversion from ArrowDeviceArray + * + */ +using owned_columns_t = std::vector>; + +/** + * @brief functor for a custom deleter to a unique_ptr of table_view + * + * When converting from an ArrowDeviceArray, there are cases where data can't + * be zero-copy (i.e. bools or non-UINT32 dictionary indices). This custom deleter + * is used to maintain ownership over the data allocated since a `cudf::table_view` + * doesn't hold ownership. + */ +template +struct custom_view_deleter { + /** + * @brief Construct a new custom view deleter object + * + * @param owned Vector of owning columns + */ + explicit custom_view_deleter(owned_columns_t&& owned) : owned_mem_{std::move(owned)} {} + + /** + * @brief operator to delete the unique_ptr + * + * @param ptr Pointer to the object to be deleted + */ + void operator()(ViewType* ptr) const { delete ptr; } + + owned_columns_t owned_mem_; ///< Owned columns that must be deleted. +}; + +/** + * @brief typedef for a unique_ptr to a `cudf::table_view` with custom deleter + * + */ +using unique_table_view_t = + std::unique_ptr>; + +/** + * @brief Create `cudf::table_view` from given `ArrowDeviceArray` and `ArrowSchema` + * + * Constructs a non-owning `cudf::table_view` using `ArrowDeviceArray` and `ArrowSchema`, + * data must be accessible to the CUDA device. Because the resulting `cudf::table_view` will + * not own the data, the `ArrowDeviceArray` must be kept alive for the lifetime of the result. + * It is the responsibility of callers to ensure they call the release callback on the + * `ArrowDeviceArray` after it is no longer needed, and that the `cudf::table_view` is not + * accessed after this happens. + * + * @throws cudf::logic_error if device_type is not `ARROW_DEVICE_CUDA`, `ARROW_DEVICE_CUDA_HOST` + * or `ARROW_DEVICE_CUDA_MANAGED` + * + * @throws cudf::data_type_error if the input array is not a struct array, non-struct + * arrays should be passed to `from_arrow_device_column` instead. + * + * @throws cudf::data_type_error if the input arrow data type is not supported. + * + * Each child of the input struct will be the columns of the resulting table_view. + * + * @note The custom deleter used for the unique_ptr to the table_view maintains ownership + * over any memory which is allocated, such as converting boolean columns from the bitmap + * used by Arrow to the 1-byte per value for cudf. + * + * @note If the input `ArrowDeviceArray` contained a non-null sync_event it is assumed + * to be a `cudaEvent_t*` and the passed in stream will have `cudaStreamWaitEvent` called + * on it with the event. This function, however, will not explicitly synchronize on the + * stream. + * + * @param schema `ArrowSchema` pointer to object describing the type of the device array + * @param input `ArrowDeviceArray` pointer to object owning the Arrow data + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to perform any allocations + * @return `cudf::table_view` generated from given Arrow data + */ +unique_table_view_t from_arrow_device( + ArrowSchema const* schema, + ArrowDeviceArray const* input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), + rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); + +/** + * @brief typedef for a unique_ptr to a `cudf::column_view` with custom deleter + * + */ +using unique_column_view_t = + std::unique_ptr>; + +/** + * @brief Create `cudf::column_view` from given `ArrowDeviceArray` and `ArrowSchema` + * + * Constructs a non-owning `cudf::column_view` using `ArrowDeviceArray` and `ArrowSchema`, + * data must be accessible to the CUDA device. Because the resulting `cudf::column_view` will + * not own the data, the `ArrowDeviceArray` must be kept alive for the lifetime of the result. + * It is the responsibility of callers to ensure they call the release callback on the + * `ArrowDeviceArray` after it is no longer needed, and that the `cudf::column_view` is not + * accessed after this happens. + * + * @throws cudf::logic_error if device_type is not `ARROW_DEVICE_CUDA`, `ARROW_DEVICE_CUDA_HOST` + * or `ARROW_DEVICE_CUDA_MANAGED` + * + * @throws cudf::data_type_error input arrow data type is not supported. + * + * @note The custom deleter used for the unique_ptr to the table_view maintains ownership + * over any memory which is allocated, such as converting boolean columns from the bitmap + * used by Arrow to the 1-byte per value for cudf. + * + * @note If the input `ArrowDeviceArray` contained a non-null sync_event it is assumed + * to be a `cudaEvent_t*` and the passed in stream will have `cudaStreamWaitEvent` called + * on it with the event. This function, however, will not explicitly synchronize on the + * stream. + * + * @param schema `ArrowSchema` pointer to object describing the type of the device array + * @param input `ArrowDeviceArray` pointer to object owning the Arrow data + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to perform any allocations + * @return `cudf::column_view` generated from given Arrow data + */ +unique_column_view_t from_arrow_device_column( + ArrowSchema const* schema, + ArrowDeviceArray const* input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), + rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); + /** @} */ // end of group } // namespace cudf diff --git a/cpp/src/interop/arrow_utilities.hpp b/cpp/src/interop/arrow_utilities.hpp new file mode 100644 index 00000000000..9bbdaa2c363 --- /dev/null +++ b/cpp/src/interop/arrow_utilities.hpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +namespace cudf { +namespace detail { + +/** + * @brief constants for buffer indexes of Arrow arrays + * + */ +static constexpr int validity_buffer_idx = 0; +static constexpr int fixed_width_data_buffer_idx = 1; + +} // namespace detail +} // namespace cudf diff --git a/cpp/src/interop/from_arrow_device.cu b/cpp/src/interop/from_arrow_device.cu new file mode 100644 index 00000000000..d4d31d1989b --- /dev/null +++ b/cpp/src/interop/from_arrow_device.cu @@ -0,0 +1,483 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "arrow_utilities.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +namespace cudf { + +namespace detail { +data_type arrow_to_cudf_type(const ArrowSchemaView* arrow_view) +{ + switch (arrow_view->type) { + case NANOARROW_TYPE_NA: return data_type(type_id::EMPTY); + case NANOARROW_TYPE_BOOL: return data_type(type_id::BOOL8); + case NANOARROW_TYPE_INT8: return data_type(type_id::INT8); + case NANOARROW_TYPE_INT16: return data_type(type_id::INT16); + case NANOARROW_TYPE_INT32: return data_type(type_id::INT32); + case NANOARROW_TYPE_INT64: return data_type(type_id::INT64); + case NANOARROW_TYPE_UINT8: return data_type(type_id::UINT8); + case NANOARROW_TYPE_UINT16: return data_type(type_id::UINT16); + case NANOARROW_TYPE_UINT32: return data_type(type_id::UINT32); + case NANOARROW_TYPE_UINT64: return data_type(type_id::UINT64); + case NANOARROW_TYPE_FLOAT: return data_type(type_id::FLOAT32); + case NANOARROW_TYPE_DOUBLE: return data_type(type_id::FLOAT64); + case NANOARROW_TYPE_DATE32: return data_type(type_id::TIMESTAMP_DAYS); + case NANOARROW_TYPE_STRING: return data_type(type_id::STRING); + case NANOARROW_TYPE_LIST: return data_type(type_id::LIST); + case NANOARROW_TYPE_DICTIONARY: return data_type(type_id::DICTIONARY32); + case NANOARROW_TYPE_STRUCT: return data_type(type_id::STRUCT); + case NANOARROW_TYPE_TIMESTAMP: { + switch (arrow_view->time_unit) { + case NANOARROW_TIME_UNIT_SECOND: return data_type(type_id::TIMESTAMP_SECONDS); + case NANOARROW_TIME_UNIT_MILLI: return data_type(type_id::TIMESTAMP_MILLISECONDS); + case NANOARROW_TIME_UNIT_MICRO: return data_type(type_id::TIMESTAMP_MICROSECONDS); + case NANOARROW_TIME_UNIT_NANO: return data_type(type_id::TIMESTAMP_NANOSECONDS); + default: CUDF_FAIL("Unsupported timestamp unit in arrow", cudf::data_type_error); + } + } + case NANOARROW_TYPE_DURATION: { + switch (arrow_view->time_unit) { + case NANOARROW_TIME_UNIT_SECOND: return data_type(type_id::DURATION_SECONDS); + case NANOARROW_TIME_UNIT_MILLI: return data_type(type_id::DURATION_MILLISECONDS); + case NANOARROW_TIME_UNIT_MICRO: return data_type(type_id::DURATION_MICROSECONDS); + case NANOARROW_TIME_UNIT_NANO: return data_type(type_id::DURATION_NANOSECONDS); + default: CUDF_FAIL("Unsupported duration unit in arrow", cudf::data_type_error); + } + } + case NANOARROW_TYPE_DECIMAL128: + return data_type{type_id::DECIMAL128, -arrow_view->decimal_scale}; + default: CUDF_FAIL("Unsupported type_id conversion to cudf", cudf::data_type_error); + } +} + +namespace { + +using dispatch_tuple_t = std::tuple; + +struct dispatch_from_arrow_device { + template () && + !std::is_same_v)> + dispatch_tuple_t operator()(ArrowSchemaView*, + ArrowArray const*, + data_type, + bool, + rmm::cuda_stream_view, + rmm::mr::device_memory_resource*) + { + CUDF_FAIL("Unsupported type in from_arrow_device", cudf::data_type_error); + } + + template () || std::is_same_v)> + dispatch_tuple_t operator()(ArrowSchemaView* schema, + ArrowArray const* input, + data_type type, + bool skip_mask, + rmm::cuda_stream_view, + rmm::mr::device_memory_resource*) + { + size_type const num_rows = input->length; + size_type const offset = input->offset; + size_type const null_count = input->null_count; + bitmask_type const* null_mask = + skip_mask ? nullptr + : reinterpret_cast(input->buffers[validity_buffer_idx]); + auto data_buffer = input->buffers[fixed_width_data_buffer_idx]; + return std::make_tuple( + {type, num_rows, data_buffer, null_mask, null_count, offset}, {}); + } +}; + +// forward declaration is needed because `type_dispatch` instantiates the +// dispatch_from_arrow_device struct causing a recursive situation for struct, +// dictionary and list_view types. +dispatch_tuple_t get_column(ArrowSchemaView* schema, + ArrowArray const* input, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr); + +template <> +dispatch_tuple_t dispatch_from_arrow_device::operator()(ArrowSchemaView* schema, + ArrowArray const* input, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + if (input->length == 0) { + return std::make_tuple( + {type, + 0, + nullptr, + skip_mask ? nullptr + : reinterpret_cast(input->buffers[validity_buffer_idx]), + 0}, + {}); + } + + auto out_col = mask_to_bools( + reinterpret_cast(input->buffers[fixed_width_data_buffer_idx]), + input->offset, + input->offset + input->length, + stream, + mr); + auto const has_nulls = skip_mask ? false : input->buffers[validity_buffer_idx] != nullptr; + if (has_nulls) { + auto out_mask = cudf::detail::copy_bitmask( + reinterpret_cast(input->buffers[validity_buffer_idx]), + input->offset, + input->offset + input->length, + stream, + mr); + out_col->set_null_mask(std::move(out_mask), input->null_count); + } + + auto out_view = out_col->view(); + owned_columns_t owned; + owned.emplace_back(std::move(out_col)); + return std::make_tuple(std::move(out_view), std::move(owned)); +} + +template <> +dispatch_tuple_t dispatch_from_arrow_device::operator()( + ArrowSchemaView* schema, + ArrowArray const* input, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + if (input->length == 0) { + return std::make_tuple( + {type, + 0, + nullptr, + skip_mask ? nullptr + : reinterpret_cast(input->buffers[validity_buffer_idx]), + 0}, + {}); + } + + auto offsets_view = column_view{data_type(type_id::INT32), + static_cast(input->offset + input->length) + 1, + input->buffers[fixed_width_data_buffer_idx], + nullptr, + 0, + 0}; + return std::make_tuple( + {type, + static_cast(input->length), + input->buffers[2], + skip_mask ? nullptr + : reinterpret_cast(input->buffers[validity_buffer_idx]), + static_cast(input->null_count), + static_cast(input->offset), + {offsets_view}}, + {}); +} + +template <> +dispatch_tuple_t dispatch_from_arrow_device::operator()( + ArrowSchemaView* schema, + ArrowArray const* input, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + ArrowSchemaView keys_schema_view; + NANOARROW_THROW_NOT_OK( + ArrowSchemaViewInit(&keys_schema_view, schema->schema->dictionary, nullptr)); + + auto const keys_type = arrow_to_cudf_type(&keys_schema_view); + auto [keys_view, owned_cols] = + get_column(&keys_schema_view, input->dictionary, keys_type, true, stream, mr); + + auto const dict_indices_type = [&schema]() -> data_type { + // cudf dictionary requires an unsigned type for the indices, + // since it is invalid for an arrow dictionary to contain negative + // indices, we can safely use the unsigned equivalent without having + // to modify the buffers. + switch (schema->storage_type) { + case NANOARROW_TYPE_INT8: + case NANOARROW_TYPE_UINT8: return data_type(type_id::UINT8); + case NANOARROW_TYPE_INT16: + case NANOARROW_TYPE_UINT16: return data_type(type_id::UINT16); + case NANOARROW_TYPE_INT32: + case NANOARROW_TYPE_UINT32: return data_type(type_id::UINT32); + case NANOARROW_TYPE_INT64: + case NANOARROW_TYPE_UINT64: return data_type(type_id::UINT64); + default: CUDF_FAIL("Unsupported type_id for dictionary indices", cudf::data_type_error); + } + }(); + + size_type const num_rows = input->length; + size_type const offset = input->offset; + size_type const null_count = input->null_count; + column_view indices_view = column_view{dict_indices_type, + offset + num_rows, + input->buffers[fixed_width_data_buffer_idx], + nullptr, + 0, + 0}; + + return std::make_tuple( + {type, + num_rows, + nullptr, + reinterpret_cast(input->buffers[validity_buffer_idx]), + null_count, + offset, + {indices_view, keys_view}}, + std::move(owned_cols)); +} + +template <> +dispatch_tuple_t dispatch_from_arrow_device::operator()( + ArrowSchemaView* schema, + ArrowArray const* input, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + std::vector children; + owned_columns_t out_owned_cols; + std::transform( + input->children, + input->children + input->n_children, + schema->schema->children, + std::back_inserter(children), + [&out_owned_cols, &stream, &mr](ArrowArray const* child, ArrowSchema const* child_schema) { + ArrowSchemaView view; + NANOARROW_THROW_NOT_OK(ArrowSchemaViewInit(&view, child_schema, nullptr)); + auto type = arrow_to_cudf_type(&view); + auto [out_view, owned] = get_column(&view, child, type, false, stream, mr); + if (out_owned_cols.empty()) { + out_owned_cols = std::move(owned); + } else { + out_owned_cols.insert(std::end(out_owned_cols), + std::make_move_iterator(std::begin(owned)), + std::make_move_iterator(std::end(owned))); + } + return out_view; + }); + + size_type const num_rows = input->length; + size_type const offset = input->offset; + size_type const null_count = input->null_count; + return std::make_tuple( + {type, + num_rows, + nullptr, + reinterpret_cast(input->buffers[validity_buffer_idx]), + null_count, + offset, + std::move(children)}, + std::move(out_owned_cols)); +} + +template <> +dispatch_tuple_t dispatch_from_arrow_device::operator()( + ArrowSchemaView* schema, + ArrowArray const* input, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + size_type const num_rows = input->length; + size_type const offset = input->offset; + size_type const null_count = input->null_count; + auto offsets_view = column_view{data_type(type_id::INT32), + offset + num_rows + 1, + input->buffers[fixed_width_data_buffer_idx], + nullptr, + 0, + 0}; + + ArrowSchemaView child_schema_view; + NANOARROW_THROW_NOT_OK( + ArrowSchemaViewInit(&child_schema_view, schema->schema->children[0], nullptr)); + auto child_type = arrow_to_cudf_type(&child_schema_view); + auto [child_view, owned] = + get_column(&child_schema_view, input->children[0], child_type, false, stream, mr); + + // in the scenario where we were sliced and there are more elements in the child_view + // than can be referenced by the sliced offsets, we need to slice the child_view + // so that when `get_sliced_child` is called, we still produce the right result + auto max_child_offset = cudf::detail::get_value(offsets_view, offset + num_rows, stream); + child_view = cudf::slice(child_view, {0, max_child_offset}, stream).front(); + + return std::make_tuple( + {type, + num_rows, + rmm::device_buffer{0, stream, mr}.data(), + reinterpret_cast(input->buffers[validity_buffer_idx]), + null_count, + offset, + {offsets_view, child_view}}, + std::move(owned)); +} + +dispatch_tuple_t get_column(ArrowSchemaView* schema, + ArrowArray const* input, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + return type.id() != type_id::EMPTY + ? std::move(type_dispatcher( + type, dispatch_from_arrow_device{}, schema, input, type, skip_mask, stream, mr)) + : std::make_tuple({data_type(type_id::EMPTY), + static_cast(input->length), + nullptr, + nullptr, + static_cast(input->length)}, + {}); +} + +} // namespace + +unique_table_view_t from_arrow_device(ArrowSchemaView* schema, + ArrowDeviceArray const* input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + if (input->sync_event != nullptr) { + CUDF_CUDA_TRY( + cudaStreamWaitEvent(stream.value(), *reinterpret_cast(input->sync_event))); + } + + std::vector columns; + owned_columns_t owned_mem; + + auto type = arrow_to_cudf_type(schema); + CUDF_EXPECTS(type == data_type(type_id::STRUCT), + "Must pass a struct to `from_arrow_device`", + cudf::data_type_error); + std::transform( + input->array.children, + input->array.children + input->array.n_children, + schema->schema->children, + std::back_inserter(columns), + [&owned_mem, &stream, &mr](ArrowArray const* child, ArrowSchema const* child_schema) { + ArrowSchemaView view; + NANOARROW_THROW_NOT_OK(ArrowSchemaViewInit(&view, child_schema, nullptr)); + auto type = arrow_to_cudf_type(&view); + auto [out_view, owned] = get_column(&view, child, type, false, stream, mr); + if (owned_mem.empty()) { + owned_mem = std::move(owned); + } else { + owned_mem.insert(std::end(owned_mem), + std::make_move_iterator(std::begin(owned)), + std::make_move_iterator(std::end(owned))); + } + return out_view; + }); + + return unique_table_view_t{new table_view{columns}, + custom_view_deleter{std::move(owned_mem)}}; +} + +unique_column_view_t from_arrow_device_column(ArrowSchemaView* schema, + ArrowDeviceArray const* input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + if (input->sync_event != nullptr) { + CUDF_CUDA_TRY( + cudaStreamWaitEvent(stream.value(), *reinterpret_cast(input->sync_event))); + } + + auto type = arrow_to_cudf_type(schema); + auto [colview, owned] = get_column(schema, &input->array, type, false, stream, mr); + return unique_column_view_t{new column_view{colview}, + custom_view_deleter{std::move(owned)}}; +} + +} // namespace detail + +unique_table_view_t from_arrow_device(ArrowSchema const* schema, + ArrowDeviceArray const* input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_EXPECTS(schema != nullptr && input != nullptr, + "input ArrowSchema and ArrowDeviceArray must not be NULL"); + CUDF_EXPECTS(input->device_type == ARROW_DEVICE_CUDA || + input->device_type == ARROW_DEVICE_CUDA_HOST || + input->device_type == ARROW_DEVICE_CUDA_MANAGED, + "ArrowDeviceArray memory must be accessible to CUDA"); + + CUDF_FUNC_RANGE(); + + rmm::cuda_set_device_raii dev( + rmm::cuda_device_id{static_cast(input->device_id)}); + ArrowSchemaView view; + NANOARROW_THROW_NOT_OK(ArrowSchemaViewInit(&view, schema, nullptr)); + return detail::from_arrow_device(&view, input, stream, mr); +} + +unique_column_view_t from_arrow_device_column(ArrowSchema const* schema, + ArrowDeviceArray const* input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_EXPECTS(schema != nullptr && input != nullptr, + "input ArrowSchema and ArrowDeviceArray must not be NULL"); + CUDF_EXPECTS(input->device_type == ARROW_DEVICE_CUDA || + input->device_type == ARROW_DEVICE_CUDA_HOST || + input->device_type == ARROW_DEVICE_CUDA_MANAGED, + "ArrowDeviceArray must be accessible to CUDA"); + + CUDF_FUNC_RANGE(); + + rmm::cuda_set_device_raii dev( + rmm::cuda_device_id{static_cast(input->device_id)}); + ArrowSchemaView view; + NANOARROW_THROW_NOT_OK(ArrowSchemaViewInit(&view, schema, nullptr)); + return detail::from_arrow_device_column(&view, input, stream, mr); +} + +} // namespace cudf diff --git a/cpp/src/interop/to_arrow_device.cu b/cpp/src/interop/to_arrow_device.cu index 737f8c7f625..f2b1669df9b 100644 --- a/cpp/src/interop/to_arrow_device.cu +++ b/cpp/src/interop/to_arrow_device.cu @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "arrow_utilities.hpp" #include "to_arrow_utilities.hpp" #include @@ -49,9 +50,6 @@ namespace cudf { namespace detail { namespace { -static constexpr int validity_buffer_idx = 0; -static constexpr int fixed_width_data_buffer_idx = 1; - template void device_buffer_finalize(ArrowBufferAllocator* allocator, uint8_t*, int64_t) { diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index d0c2b3d2bce..f59e675e1d5 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -268,8 +268,14 @@ ConfigureTest( # ################################################################################################## # * interop tests ------------------------------------------------------------------------- ConfigureTest( - INTEROP_TEST interop/to_arrow_device_test.cpp interop/to_arrow_test.cpp - interop/from_arrow_test.cpp interop/dlpack_test.cpp EXTRA_LIB nanoarrow + INTEROP_TEST + interop/to_arrow_device_test.cpp + interop/to_arrow_test.cpp + interop/from_arrow_test.cpp + interop/from_arrow_device_test.cpp + interop/dlpack_test.cpp + EXTRA_LIB + nanoarrow ) # ################################################################################################## diff --git a/cpp/tests/interop/from_arrow_device_test.cpp b/cpp/tests/interop/from_arrow_device_test.cpp new file mode 100644 index 00000000000..95cbe8057d1 --- /dev/null +++ b/cpp/tests/interop/from_arrow_device_test.cpp @@ -0,0 +1,732 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nanoarrow_utils.hpp" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct FromArrowDeviceTest : public cudf::test::BaseFixture {}; + +template +struct FromArrowDeviceTestDurationsTest : public cudf::test::BaseFixture {}; + +TYPED_TEST_SUITE(FromArrowDeviceTestDurationsTest, cudf::test::DurationTypes); + +TEST_F(FromArrowDeviceTest, FailConditions) +{ + // can't pass null for schema or device array + EXPECT_THROW(cudf::from_arrow_device(nullptr, nullptr), cudf::logic_error); + // can't pass null for device array + ArrowSchema schema; + EXPECT_THROW(cudf::from_arrow_device(&schema, nullptr), cudf::logic_error); + // device_type must be CUDA/CUDA_HOST/CUDA_MANAGED + // should fail with ARROW_DEVICE_CPU + ArrowDeviceArray arr; + arr.device_type = ARROW_DEVICE_CPU; + EXPECT_THROW(cudf::from_arrow_device(&schema, &arr), cudf::logic_error); + + // can't pass null for schema or device array + EXPECT_THROW(cudf::from_arrow_device_column(nullptr, nullptr), cudf::logic_error); + // can't pass null for device array + EXPECT_THROW(cudf::from_arrow_device_column(&schema, nullptr), cudf::logic_error); + // device_type must be CUDA/CUDA_HOST/CUDA_MANAGED + // should fail with ARROW_DEVICE_CPU + EXPECT_THROW(cudf::from_arrow_device_column(&schema, &arr), cudf::logic_error); +} + +TEST_F(FromArrowDeviceTest, EmptyTable) +{ + const auto [table, schema, arr] = get_nanoarrow_tables(0); + + auto expected_cudf_table = table->view(); + + ArrowDeviceArray input; + memcpy(&input.array, arr.get(), sizeof(ArrowArray)); + input.device_id = rmm::get_current_cuda_device().value(); + input.device_type = ARROW_DEVICE_CUDA; + input.sync_event = nullptr; + + auto got_cudf_table = cudf::from_arrow_device(schema.get(), &input); + CUDF_TEST_EXPECT_TABLES_EQUAL(expected_cudf_table, *got_cudf_table); + + auto got_cudf_col = cudf::from_arrow_device_column(schema.get(), &input); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUAL(*got_cudf_table, from_struct); +} + +TEST_F(FromArrowDeviceTest, DateTimeTable) +{ + auto data = std::vector{1, 2, 3, 4, 5, 6}; + auto col = cudf::test::fixed_width_column_wrapper( + data.begin(), data.end()); + + cudf::table_view expected_table_view({col}); + + nanoarrow::UniqueSchema input_schema; + ArrowSchemaInit(input_schema.get()); + ArrowSchemaSetTypeStruct(input_schema.get(), 1); + ArrowSchemaInit(input_schema->children[0]); + ArrowSchemaSetTypeDateTime( + input_schema->children[0], NANOARROW_TYPE_TIMESTAMP, NANOARROW_TIME_UNIT_MILLI, nullptr); + ArrowSchemaSetName(input_schema->children[0], "a"); + + nanoarrow::UniqueArray input_array; + ArrowArrayInitFromSchema(input_array.get(), input_schema.get(), nullptr); + input_array->length = 6; + input_array->null_count = 0; + input_array->children[0]->length = 6; + input_array->children[0]->null_count = 0; + ArrowBufferSetAllocator(ArrowArrayBuffer(input_array->children[0], 1), noop_alloc); + ArrowArrayBuffer(input_array->children[0], 1)->data = + const_cast(cudf::column_view(col).data()); + ArrowArrayFinishBuilding(input_array.get(), NANOARROW_VALIDATION_LEVEL_MINIMAL, nullptr); + + ArrowDeviceArray input_device_array; + input_device_array.device_id = rmm::get_current_cuda_device().value(); + input_device_array.device_type = ARROW_DEVICE_CUDA; + input_device_array.sync_event = nullptr; + memcpy(&input_device_array.array, input_array.get(), sizeof(ArrowArray)); + + auto got_cudf_table_view = cudf::from_arrow_device(input_schema.get(), &input_device_array); + CUDF_TEST_EXPECT_TABLES_EQUAL(expected_table_view, *got_cudf_table_view); + + auto got_cudf_col = cudf::from_arrow_device_column(input_schema.get(), &input_device_array); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUAL(*got_cudf_table_view, from_struct); +} + +TYPED_TEST(FromArrowDeviceTestDurationsTest, DurationTable) +{ + using T = TypeParam; + + if (cudf::type_to_id() == cudf::type_id::DURATION_DAYS) { return; } + + auto data = {T{1}, T{2}, T{3}, T{4}, T{5}, T{6}}; + auto col = cudf::test::fixed_width_column_wrapper(data); + + cudf::table_view expected_table_view({col}); + const ArrowTimeUnit time_unit = [&] { + switch (cudf::type_to_id()) { + case cudf::type_id::DURATION_SECONDS: return NANOARROW_TIME_UNIT_SECOND; + case cudf::type_id::DURATION_MILLISECONDS: return NANOARROW_TIME_UNIT_MILLI; + case cudf::type_id::DURATION_MICROSECONDS: return NANOARROW_TIME_UNIT_MICRO; + case cudf::type_id::DURATION_NANOSECONDS: return NANOARROW_TIME_UNIT_NANO; + default: CUDF_FAIL("Unsupported duration unit in arrow"); + } + }(); + + nanoarrow::UniqueSchema input_schema; + ArrowSchemaInit(input_schema.get()); + ArrowSchemaSetTypeStruct(input_schema.get(), 1); + + ArrowSchemaInit(input_schema->children[0]); + ArrowSchemaSetTypeDateTime( + input_schema->children[0], NANOARROW_TYPE_DURATION, time_unit, nullptr); + ArrowSchemaSetName(input_schema->children[0], "a"); + + auto data_ptr = expected_table_view.column(0).data(); + nanoarrow::UniqueArray input_array; + ArrowArrayInitFromSchema(input_array.get(), input_schema.get(), nullptr); + input_array->length = expected_table_view.num_rows(); + input_array->null_count = 0; + input_array->children[0]->length = expected_table_view.num_rows(); + input_array->children[0]->null_count = 0; + ArrowBufferSetAllocator(ArrowArrayBuffer(input_array->children[0], 1), noop_alloc); + ArrowArrayBuffer(input_array->children[0], 1)->data = const_cast(data_ptr); + ArrowArrayFinishBuilding(input_array.get(), NANOARROW_VALIDATION_LEVEL_MINIMAL, nullptr); + + ArrowDeviceArray input_device_array; + input_device_array.device_id = rmm::get_current_cuda_device().value(); + input_device_array.device_type = ARROW_DEVICE_CUDA; + input_device_array.sync_event = nullptr; + memcpy(&input_device_array.array, input_array.get(), sizeof(ArrowArray)); + + auto got_cudf_table_view = cudf::from_arrow_device(input_schema.get(), &input_device_array); + CUDF_TEST_EXPECT_TABLES_EQUAL(expected_table_view, *got_cudf_table_view); + + auto got_cudf_col = cudf::from_arrow_device_column(input_schema.get(), &input_device_array); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUAL(*got_cudf_table_view, from_struct); +} + +TEST_F(FromArrowDeviceTest, NestedList) +{ + auto valids = + cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 3 != 0; }); + auto col = cudf::test::lists_column_wrapper( + {{{{{1, 2}, valids}, {{3, 4}, valids}, {5}}, {{6}, {{7, 8, 9}, valids}}}, valids}); + cudf::table_view expected_table_view({col}); + + nanoarrow::UniqueSchema input_schema; + ArrowSchemaInit(input_schema.get()); + ArrowSchemaSetTypeStruct(input_schema.get(), 1); + + ArrowSchemaInitFromType(input_schema->children[0], NANOARROW_TYPE_LIST); + ArrowSchemaSetName(input_schema->children[0], "a"); + input_schema->children[0]->flags = ARROW_FLAG_NULLABLE; + + ArrowSchemaInitFromType(input_schema->children[0]->children[0], NANOARROW_TYPE_LIST); + ArrowSchemaSetName(input_schema->children[0]->children[0], "element"); + input_schema->children[0]->children[0]->flags = 0; + + ArrowSchemaInitFromType(input_schema->children[0]->children[0]->children[0], + NANOARROW_TYPE_INT64); + ArrowSchemaSetName(input_schema->children[0]->children[0]->children[0], "element"); + input_schema->children[0]->children[0]->children[0]->flags = ARROW_FLAG_NULLABLE; + + nanoarrow::UniqueArray input_array; + EXPECT_EQ(NANOARROW_OK, ArrowArrayInitFromSchema(input_array.get(), input_schema.get(), nullptr)); + input_array->length = expected_table_view.num_rows(); + auto top_list = input_array->children[0]; + cudf::lists_column_view lview{expected_table_view.column(0)}; + populate_list_from_col(top_list, lview); + cudf::lists_column_view nested_view{lview.child()}; + populate_list_from_col(top_list->children[0], nested_view); + populate_from_col(top_list->children[0]->children[0], nested_view.child()); + ArrowArrayFinishBuilding(input_array.get(), NANOARROW_VALIDATION_LEVEL_NONE, nullptr); + + ArrowDeviceArray input_device_array; + input_device_array.device_id = rmm::get_current_cuda_device().value(); + input_device_array.device_type = ARROW_DEVICE_CUDA; + input_device_array.sync_event = nullptr; + memcpy(&input_device_array.array, input_array.get(), sizeof(ArrowArray)); + + auto got_cudf_table_view = cudf::from_arrow_device(input_schema.get(), &input_device_array); + CUDF_TEST_EXPECT_TABLES_EQUAL(expected_table_view, *got_cudf_table_view); + + auto got_cudf_col = cudf::from_arrow_device_column(input_schema.get(), &input_device_array); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUAL(*got_cudf_table_view, from_struct); +} + +TEST_F(FromArrowDeviceTest, StructColumn) +{ + using vector_of_columns = std::vector>; + + // Create cudf table + auto nested_type_field_names = + std::vector>{{"string", "integral", "bool", "nested_list", "struct"}}; + auto str_col = + cudf::test::strings_column_wrapper{ + "Samuel Vimes", "Carrot Ironfoundersson", "Angua von Überwald"} + .release(); + auto str_col2 = + cudf::test::strings_column_wrapper{{"CUDF", "ROCKS", "EVERYWHERE"}, {0, 1, 0}}.release(); + int num_rows{str_col->size()}; + auto int_col = cudf::test::fixed_width_column_wrapper{{48, 27, 25}}.release(); + auto int_col2 = + cudf::test::fixed_width_column_wrapper{{12, 24, 47}, {1, 0, 1}}.release(); + auto bool_col = cudf::test::fixed_width_column_wrapper{{true, true, false}}.release(); + auto list_col = + cudf::test::lists_column_wrapper({{{1, 2}, {3, 4}, {5}}, {{{6}}}, {{7}, {8, 9}}}) + .release(); + vector_of_columns cols2; + cols2.push_back(std::move(str_col2)); + cols2.push_back(std::move(int_col2)); + auto [null_mask, null_count] = + cudf::bools_to_mask(cudf::test::fixed_width_column_wrapper{{true, true, false}}); + auto sub_struct_col = + cudf::make_structs_column(num_rows, std::move(cols2), null_count, std::move(*null_mask)); + vector_of_columns cols; + cols.push_back(std::move(str_col)); + cols.push_back(std::move(int_col)); + cols.push_back(std::move(bool_col)); + cols.push_back(std::move(list_col)); + cols.push_back(std::move(sub_struct_col)); + + auto struct_col = cudf::make_structs_column(num_rows, std::move(cols), 0, {}); + cudf::table_view expected_table_view({struct_col->view()}); + + // Create name metadata + auto sub_metadata = cudf::column_metadata{"struct"}; + sub_metadata.children_meta = {{"string2"}, {"integral2"}}; + auto metadata = cudf::column_metadata{"a"}; + metadata.children_meta = {{"string"}, {"integral"}, {"bool"}, {"nested_list"}, sub_metadata}; + + nanoarrow::UniqueSchema input_schema; + ArrowSchemaInit(input_schema.get()); + ArrowSchemaSetTypeStruct(input_schema.get(), 1); + + ArrowSchemaInit(input_schema->children[0]); + ArrowSchemaSetTypeStruct(input_schema->children[0], 5); + ArrowSchemaSetName(input_schema->children[0], "a"); + input_schema->children[0]->flags = 0; + + auto child = input_schema->children[0]; + ArrowSchemaInitFromType(child->children[0], NANOARROW_TYPE_STRING); + ArrowSchemaSetName(child->children[0], "string"); + child->children[0]->flags = 0; + + ArrowSchemaInitFromType(child->children[1], NANOARROW_TYPE_INT32); + ArrowSchemaSetName(child->children[1], "integral"); + child->children[1]->flags = 0; + + ArrowSchemaInitFromType(child->children[2], NANOARROW_TYPE_BOOL); + ArrowSchemaSetName(child->children[2], "bool"); + child->children[2]->flags = 0; + + ArrowSchemaInitFromType(child->children[3], NANOARROW_TYPE_LIST); + ArrowSchemaSetName(child->children[3], "nested_list"); + child->children[3]->flags = 0; + ArrowSchemaInitFromType(child->children[3]->children[0], NANOARROW_TYPE_LIST); + ArrowSchemaSetName(child->children[3]->children[0], "element"); + child->children[3]->children[0]->flags = 0; + ArrowSchemaInitFromType(child->children[3]->children[0]->children[0], NANOARROW_TYPE_INT64); + ArrowSchemaSetName(child->children[3]->children[0]->children[0], "element"); + child->children[3]->children[0]->children[0]->flags = 0; + + ArrowSchemaInit(child->children[4]); + ArrowSchemaSetTypeStruct(child->children[4], 2); + ArrowSchemaSetName(child->children[4], "struct"); + + ArrowSchemaInitFromType(child->children[4]->children[0], NANOARROW_TYPE_STRING); + ArrowSchemaSetName(child->children[4]->children[0], "string2"); + ArrowSchemaInitFromType(child->children[4]->children[1], NANOARROW_TYPE_INT32); + ArrowSchemaSetName(child->children[4]->children[1], "integral2"); + + nanoarrow::UniqueArray input_array; + ArrowArrayInitFromSchema(input_array.get(), input_schema.get(), nullptr); + + input_array->length = expected_table_view.num_rows(); + + auto array_a = input_array->children[0]; + auto view_a = expected_table_view.column(0); + array_a->length = view_a.size(); + array_a->null_count = view_a.null_count(); + + ArrowBufferSetAllocator(ArrowArrayBuffer(array_a, 0), noop_alloc); + ArrowArrayValidityBitmap(array_a)->buffer.data = + const_cast(reinterpret_cast(view_a.null_mask())); + + populate_from_col(array_a->children[0], view_a.child(0)); + populate_from_col(array_a->children[1], view_a.child(1)); + populate_from_col(array_a->children[2], view_a.child(2)); + populate_list_from_col(array_a->children[3], cudf::lists_column_view{view_a.child(3)}); + populate_list_from_col(array_a->children[3]->children[0], + cudf::lists_column_view{view_a.child(3).child(1)}); + populate_from_col(array_a->children[3]->children[0]->children[0], + view_a.child(3).child(1).child(1)); + + auto array_struct = array_a->children[4]; + auto view_struct = view_a.child(4); + array_struct->length = view_struct.size(); + array_struct->null_count = view_struct.null_count(); + + ArrowBufferSetAllocator(ArrowArrayBuffer(array_struct, 0), noop_alloc); + ArrowArrayValidityBitmap(array_struct)->buffer.data = + const_cast(reinterpret_cast(view_struct.null_mask())); + + populate_from_col(array_struct->children[0], view_struct.child(0)); + populate_from_col(array_struct->children[1], view_struct.child(1)); + + ArrowArrayFinishBuilding(input_array.get(), NANOARROW_VALIDATION_LEVEL_NONE, nullptr); + + ArrowDeviceArray input_device_array; + input_device_array.device_id = rmm::get_current_cuda_device().value(); + input_device_array.device_type = ARROW_DEVICE_CUDA; + input_device_array.sync_event = nullptr; + memcpy(&input_device_array.array, input_array.get(), sizeof(ArrowArray)); + + auto got_cudf_table_view = cudf::from_arrow_device(input_schema.get(), &input_device_array); + CUDF_TEST_EXPECT_TABLES_EQUAL(expected_table_view, *got_cudf_table_view); + + { + // there's one boolean column so we should have one "owned_mem" column in the + // returned unique_ptr's custom deleter + const cudf::custom_view_deleter& deleter = got_cudf_table_view.get_deleter(); + EXPECT_EQ(deleter.owned_mem_.size(), 1); + } + + auto got_cudf_col = cudf::from_arrow_device_column(input_schema.get(), &input_device_array); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUAL(*got_cudf_table_view, from_struct); + + { + // there's one boolean column so we should have one "owned_mem" column in the + // returned unique_ptr's custom deleter + const cudf::custom_view_deleter& deleter = got_cudf_col.get_deleter(); + EXPECT_EQ(deleter.owned_mem_.size(), 1); + } +} + +TEST_F(FromArrowDeviceTest, DictionaryIndicesType) +{ + std::vector> columns; + auto col = cudf::test::fixed_width_column_wrapper({1, 2, 5, 2, 7}, {1, 0, 1, 1, 1}); + columns.emplace_back(std::move(cudf::dictionary::encode(col))); + columns.emplace_back(std::move(cudf::dictionary::encode(col))); + columns.emplace_back(std::move(cudf::dictionary::encode(col))); + + cudf::table expected_table(std::move(columns)); + cudf::table_view expected_table_view = expected_table.view(); + + nanoarrow::UniqueSchema input_schema; + ArrowSchemaInit(input_schema.get()); + ArrowSchemaSetTypeStruct(input_schema.get(), 3); + + ArrowSchemaInitFromType(input_schema->children[0], NANOARROW_TYPE_INT8); + ArrowSchemaSetName(input_schema->children[0], "a"); + ArrowSchemaAllocateDictionary(input_schema->children[0]); + ArrowSchemaInitFromType(input_schema->children[0]->dictionary, NANOARROW_TYPE_INT64); + + ArrowSchemaInitFromType(input_schema->children[1], NANOARROW_TYPE_INT16); + ArrowSchemaSetName(input_schema->children[1], "b"); + ArrowSchemaAllocateDictionary(input_schema->children[1]); + ArrowSchemaInitFromType(input_schema->children[1]->dictionary, NANOARROW_TYPE_INT64); + + ArrowSchemaInitFromType(input_schema->children[2], NANOARROW_TYPE_INT64); + ArrowSchemaSetName(input_schema->children[2], "c"); + ArrowSchemaAllocateDictionary(input_schema->children[2]); + ArrowSchemaInitFromType(input_schema->children[2]->dictionary, NANOARROW_TYPE_INT64); + + nanoarrow::UniqueArray input_array; + ArrowArrayInitFromSchema(input_array.get(), input_schema.get(), nullptr); + input_array->length = expected_table.num_rows(); + input_array->null_count = 0; + + auto col1_indices = + cudf::test::fixed_width_column_wrapper({0, 1, 2, 1, 3}, {1, 0, 1, 1, 1}); + populate_from_col(input_array->children[0], col1_indices); + populate_from_col(input_array->children[0]->dictionary, + cudf::dictionary_column_view{expected_table_view.column(0)}.keys()); + + auto col2_indices = + cudf::test::fixed_width_column_wrapper({0, 1, 2, 1, 3}, {1, 0, 1, 1, 1}); + populate_from_col(input_array->children[1], col2_indices); + populate_from_col(input_array->children[1]->dictionary, + cudf::dictionary_column_view{expected_table_view.column(1)}.keys()); + + auto col3_indices = + cudf::test::fixed_width_column_wrapper({0, 1, 2, 1, 3}, {1, 0, 1, 1, 1}); + populate_from_col(input_array->children[2], col3_indices); + populate_from_col(input_array->children[2]->dictionary, + cudf::dictionary_column_view{expected_table_view.column(2)}.keys()); + + ArrowArrayFinishBuilding(input_array.get(), NANOARROW_VALIDATION_LEVEL_NONE, nullptr); + + ArrowDeviceArray input_device_array; + input_device_array.device_id = rmm::get_current_cuda_device().value(); + input_device_array.device_type = ARROW_DEVICE_CUDA; + input_device_array.sync_event = nullptr; + memcpy(&input_device_array.array, input_array.get(), sizeof(ArrowArray)); + + auto got_cudf_table_view = cudf::from_arrow_device(input_schema.get(), &input_device_array); + CUDF_TEST_EXPECT_TABLES_EQUAL(expected_table_view, *got_cudf_table_view); + + { + const cudf::custom_view_deleter& deleter = got_cudf_table_view.get_deleter(); + EXPECT_EQ(deleter.owned_mem_.size(), 0); + } + + auto got_cudf_col = cudf::from_arrow_device_column(input_schema.get(), &input_device_array); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUAL(*got_cudf_table_view, from_struct); + + { + const cudf::custom_view_deleter& deleter = got_cudf_col.get_deleter(); + EXPECT_EQ(deleter.owned_mem_.size(), 0); + } +} + +void slice_nanoarrow(ArrowArray* arr, int64_t start, int64_t end) +{ + auto op = [&](ArrowArray* array) { + array->offset = start; + array->length = end - start; + if (array->null_count != 0) { + array->null_count = + cudf::null_count(reinterpret_cast(array->buffers[0]), + start, + end, + cudf::get_default_stream()); + } + }; + + if (arr->n_children == 0) { + op(arr); + return; + } + + arr->length = end - start; + for (int64_t i = 0; i < arr->n_children; ++i) { + op(arr->children[i]); + } +} + +struct FromArrowDeviceTestSlice + : public FromArrowDeviceTest, + public ::testing::WithParamInterface> {}; + +TEST_P(FromArrowDeviceTestSlice, SliceTest) +{ + auto [table, schema, array] = get_nanoarrow_tables(10000); + auto cudf_table_view = table->view(); + auto const [start, end] = GetParam(); + + auto sliced_cudf_table = cudf::slice(cudf_table_view, {start, end})[0]; + slice_nanoarrow(array.get(), start, end); + + ArrowDeviceArray input_device_array; + input_device_array.device_id = rmm::get_current_cuda_device().value(); + input_device_array.device_type = ARROW_DEVICE_CUDA; + input_device_array.sync_event = nullptr; + memcpy(&input_device_array.array, array.get(), sizeof(ArrowArray)); + + auto got_cudf_table_view = cudf::from_arrow_device(schema.get(), &input_device_array); + if (got_cudf_table_view->num_rows() == 0 and sliced_cudf_table.num_rows() == 0) { + CUDF_TEST_EXPECT_TABLES_EQUIVALENT(sliced_cudf_table, *got_cudf_table_view); + + auto got_cudf_col = cudf::from_arrow_device_column(schema.get(), &input_device_array); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUIVALENT(*got_cudf_table_view, from_struct); + + } else { + CUDF_TEST_EXPECT_TABLES_EQUAL(sliced_cudf_table, *got_cudf_table_view); + + auto got_cudf_col = cudf::from_arrow_device_column(schema.get(), &input_device_array); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUAL(*got_cudf_table_view, from_struct); + } +} + +INSTANTIATE_TEST_CASE_P(FromArrowDeviceTest, + FromArrowDeviceTestSlice, + ::testing::Values(std::make_tuple(0, 10000), + std::make_tuple(2912, 2915), + std::make_tuple(100, 3000), + std::make_tuple(0, 0), + std::make_tuple(0, 3000), + std::make_tuple(10000, 10000))); + +template +using fp_wrapper = cudf::test::fixed_point_column_wrapper; + +TEST_F(FromArrowDeviceTest, FixedPoint128Table) +{ + using namespace numeric; + + for (auto const scale : {3, 2, 1, 0, -1, -2, -3}) { + auto const data = std::vector<__int128_t>{1, 2, 3, 4, 5, 6}; + auto const col = fp_wrapper<__int128_t>(data.cbegin(), data.cend(), scale_type{scale}); + auto const expected = cudf::table_view({col}); + + nanoarrow::UniqueSchema input_schema; + ArrowSchemaInit(input_schema.get()); + ArrowSchemaSetTypeStruct(input_schema.get(), 1); + ArrowSchemaInit(input_schema->children[0]); + ArrowSchemaSetTypeDecimal(input_schema->children[0], + NANOARROW_TYPE_DECIMAL128, + cudf::detail::max_precision<__int128_t>(), + -scale); + ArrowSchemaSetName(input_schema->children[0], "a"); + + nanoarrow::UniqueArray input_array; + ArrowArrayInitFromSchema(input_array.get(), input_schema.get(), nullptr); + input_array->length = expected.num_rows(); + + populate_from_col<__int128_t>(input_array->children[0], expected.column(0)); + ArrowArrayFinishBuilding(input_array.get(), NANOARROW_VALIDATION_LEVEL_NONE, nullptr); + + ArrowDeviceArray input_device_array; + input_device_array.device_id = rmm::get_current_cuda_device().value(); + input_device_array.device_type = ARROW_DEVICE_CUDA; + input_device_array.sync_event = nullptr; + memcpy(&input_device_array.array, input_array.get(), sizeof(ArrowArray)); + + auto got_cudf_table_view = cudf::from_arrow_device(input_schema.get(), &input_device_array); + CUDF_TEST_EXPECT_TABLES_EQUAL(expected, *got_cudf_table_view); + + auto got_cudf_col = cudf::from_arrow_device_column(input_schema.get(), &input_device_array); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUAL(*got_cudf_table_view, from_struct); + } +} + +TEST_F(FromArrowDeviceTest, FixedPoint128TableLarge) +{ + using namespace numeric; + auto constexpr NUM_ELEMENTS = 1000; + + for (auto const scale : {3, 2, 1, 0, -1, -2, -3}) { + auto iota = thrust::make_counting_iterator(1); + auto const data = std::vector<__int128_t>(iota, iota + NUM_ELEMENTS); + auto const col = fp_wrapper<__int128_t>(iota, iota + NUM_ELEMENTS, scale_type{scale}); + auto const expected = cudf::table_view({col}); + + nanoarrow::UniqueSchema input_schema; + ArrowSchemaInit(input_schema.get()); + ArrowSchemaSetTypeStruct(input_schema.get(), 1); + ArrowSchemaInit(input_schema->children[0]); + ArrowSchemaSetTypeDecimal(input_schema->children[0], + NANOARROW_TYPE_DECIMAL128, + cudf::detail::max_precision<__int128_t>(), + -scale); + ArrowSchemaSetName(input_schema->children[0], "a"); + + nanoarrow::UniqueArray input_array; + ArrowArrayInitFromSchema(input_array.get(), input_schema.get(), nullptr); + input_array->length = expected.num_rows(); + + populate_from_col<__int128_t>(input_array->children[0], expected.column(0)); + ArrowArrayFinishBuilding(input_array.get(), NANOARROW_VALIDATION_LEVEL_NONE, nullptr); + + ArrowDeviceArray input_device_array; + input_device_array.device_id = rmm::get_current_cuda_device().value(); + input_device_array.device_type = ARROW_DEVICE_CUDA; + input_device_array.sync_event = nullptr; + memcpy(&input_device_array.array, input_array.get(), sizeof(ArrowArray)); + + auto got_cudf_table_view = cudf::from_arrow_device(input_schema.get(), &input_device_array); + CUDF_TEST_EXPECT_TABLES_EQUAL(expected, *got_cudf_table_view); + + auto got_cudf_col = cudf::from_arrow_device_column(input_schema.get(), &input_device_array); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUAL(*got_cudf_table_view, from_struct); + } +} + +TEST_F(FromArrowDeviceTest, FixedPoint128TableNulls) +{ + using namespace numeric; + + for (auto const scale : {3, 2, 1, 0, -1, -2, -3}) { + auto const data = std::vector<__int128_t>{1, 2, 3, 4, 5, 6, 0, 0}; + auto const validity = std::vector{1, 1, 1, 1, 1, 1, 0, 0}; + auto const col = + fp_wrapper<__int128_t>({1, 2, 3, 4, 5, 6, 0, 0}, {1, 1, 1, 1, 1, 1, 0, 0}, scale_type{scale}); + auto const expected = cudf::table_view({col}); + + nanoarrow::UniqueSchema input_schema; + ArrowSchemaInit(input_schema.get()); + ArrowSchemaSetTypeStruct(input_schema.get(), 1); + ArrowSchemaInit(input_schema->children[0]); + ArrowSchemaSetTypeDecimal(input_schema->children[0], + NANOARROW_TYPE_DECIMAL128, + cudf::detail::max_precision<__int128_t>(), + -scale); + ArrowSchemaSetName(input_schema->children[0], "a"); + + nanoarrow::UniqueArray input_array; + ArrowArrayInitFromSchema(input_array.get(), input_schema.get(), nullptr); + input_array->length = expected.num_rows(); + + populate_from_col<__int128_t>(input_array->children[0], expected.column(0)); + ArrowArrayFinishBuilding(input_array.get(), NANOARROW_VALIDATION_LEVEL_NONE, nullptr); + + ArrowDeviceArray input_device_array; + input_device_array.device_id = rmm::get_current_cuda_device().value(); + input_device_array.device_type = ARROW_DEVICE_CUDA; + input_device_array.sync_event = nullptr; + memcpy(&input_device_array.array, input_array.get(), sizeof(ArrowArray)); + + auto got_cudf_table_view = cudf::from_arrow_device(input_schema.get(), &input_device_array); + CUDF_TEST_EXPECT_TABLES_EQUAL(expected, *got_cudf_table_view); + + auto got_cudf_col = cudf::from_arrow_device_column(input_schema.get(), &input_device_array); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUAL(*got_cudf_table_view, from_struct); + } +} + +TEST_F(FromArrowDeviceTest, FixedPoint128TableNullsLarge) +{ + using namespace numeric; + auto constexpr NUM_ELEMENTS = 1000; + + for (auto const scale : {3, 2, 1, 0, -1, -2, -3}) { + auto every_other = [](auto i) { return i % 2 ? 0 : 1; }; + auto validity = cudf::detail::make_counting_transform_iterator(0, every_other); + auto iota = thrust::make_counting_iterator(1); + auto const data = std::vector<__int128_t>(iota, iota + NUM_ELEMENTS); + auto const col = fp_wrapper<__int128_t>(iota, iota + NUM_ELEMENTS, validity, scale_type{scale}); + auto const expected = cudf::table_view({col}); + + nanoarrow::UniqueSchema input_schema; + ArrowSchemaInit(input_schema.get()); + ArrowSchemaSetTypeStruct(input_schema.get(), 1); + ArrowSchemaInit(input_schema->children[0]); + ArrowSchemaSetTypeDecimal(input_schema->children[0], + NANOARROW_TYPE_DECIMAL128, + cudf::detail::max_precision<__int128_t>(), + -scale); + ArrowSchemaSetName(input_schema->children[0], "a"); + + nanoarrow::UniqueArray input_array; + ArrowArrayInitFromSchema(input_array.get(), input_schema.get(), nullptr); + input_array->length = expected.num_rows(); + + populate_from_col<__int128_t>(input_array->children[0], expected.column(0)); + ArrowArrayFinishBuilding(input_array.get(), NANOARROW_VALIDATION_LEVEL_NONE, nullptr); + + ArrowDeviceArray input_device_array; + input_device_array.device_id = rmm::get_current_cuda_device().value(); + input_device_array.device_type = ARROW_DEVICE_CUDA; + input_device_array.sync_event = nullptr; + memcpy(&input_device_array.array, input_array.get(), sizeof(ArrowArray)); + + auto got_cudf_table_view = cudf::from_arrow_device(input_schema.get(), &input_device_array); + CUDF_TEST_EXPECT_TABLES_EQUAL(expected, *got_cudf_table_view); + + auto got_cudf_col = cudf::from_arrow_device_column(input_schema.get(), &input_device_array); + EXPECT_EQ(got_cudf_col->type(), cudf::data_type{cudf::type_id::STRUCT}); + cudf::table_view from_struct{ + std::vector(got_cudf_col->child_begin(), got_cudf_col->child_end())}; + CUDF_TEST_EXPECT_TABLES_EQUAL(*got_cudf_table_view, from_struct); + } +} diff --git a/cpp/tests/interop/nanoarrow_utils.hpp b/cpp/tests/interop/nanoarrow_utils.hpp index c4b53282402..b795bafed97 100644 --- a/cpp/tests/interop/nanoarrow_utils.hpp +++ b/cpp/tests/interop/nanoarrow_utils.hpp @@ -17,6 +17,7 @@ #pragma once #include +#include #include #include #include @@ -25,6 +26,8 @@ #include #include +#include + // no-op allocator/deallocator to set into ArrowArray buffers that we don't // want to own their buffers. static ArrowBufferAllocator noop_alloc = (struct ArrowBufferAllocator){ @@ -35,28 +38,6 @@ static ArrowBufferAllocator noop_alloc = (struct ArrowBufferAllocator){ .private_data = nullptr, }; -// populate the ArrowArray by copying host data buffers for fixed width types other -// than boolean. -template -std::enable_if_t() and !std::is_same_v, void> get_nanoarrow_array( - ArrowArray* arr, std::vector const& data, std::vector const& mask = {}) -{ - arr->length = data.size(); - NANOARROW_THROW_NOT_OK( - ArrowBufferAppend(ArrowArrayBuffer(arr, 1), data.data(), sizeof(T) * data.size())); - if (!mask.empty()) { - NANOARROW_THROW_NOT_OK(ArrowBitmapReserve(ArrowArrayValidityBitmap(arr), mask.size())); - ArrowBitmapAppendInt8Unsafe( - ArrowArrayValidityBitmap(arr), reinterpret_cast(mask.data()), mask.size()); - arr->null_count = ArrowBitCountSet(ArrowArrayValidityBitmap(arr)->buffer.data, 0, data.size()); - } else { - arr->null_count = 0; - } - - CUDF_EXPECTS(ArrowArrayFinishBuildingDefault(arr, nullptr) == NANOARROW_OK, - "failed to construct array"); -} - // populate an ArrowArray with pointers to the raw device buffers of a cudf::column_view // and use the no-op alloc so that the ArrowArray doesn't presume ownership of the data template @@ -66,38 +47,13 @@ std::enable_if_t() and !std::is_same_v, void> p arr->length = view.size(); arr->null_count = view.null_count(); NANOARROW_THROW_NOT_OK(ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 0), noop_alloc)); + ArrowArrayValidityBitmap(arr)->buffer.size_bytes = + cudf::bitmask_allocation_size_bytes(view.size()); ArrowArrayValidityBitmap(arr)->buffer.data = const_cast(reinterpret_cast(view.null_mask())); NANOARROW_THROW_NOT_OK(ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 1), noop_alloc)); - ArrowArrayBuffer(arr, 1)->data = const_cast(view.data()); -} - -// populate an ArrowArray with boolean data by generating the appropriate -// bitmaps to copy the data. -template -std::enable_if_t, void> get_nanoarrow_array( - ArrowArray* arr, std::vector const& data, std::vector const& mask = {}) -{ - ArrowBitmap bool_data; - ArrowBitmapInit(&bool_data); - NANOARROW_THROW_NOT_OK(ArrowBitmapReserve(&bool_data, data.size())); - std::for_each(data.begin(), data.end(), [&](const auto&& elem) { - NANOARROW_THROW_NOT_OK(ArrowBitmapAppend(&bool_data, (elem) ? 1 : 0, 1)); - }); - NANOARROW_THROW_NOT_OK(ArrowArraySetBuffer(arr, 1, &bool_data.buffer)); - - if (!mask.empty()) { - NANOARROW_THROW_NOT_OK(ArrowBitmapReserve(ArrowArrayValidityBitmap(arr), mask.size())); - std::for_each(mask.begin(), mask.end(), [&](const auto&& elem) { - NANOARROW_THROW_NOT_OK(ArrowBitmapAppend(ArrowArrayValidityBitmap(arr), (elem) ? 1 : 0, 1)); - }); - arr->null_count = ArrowBitCountSet(ArrowArrayValidityBitmap(arr)->buffer.data, 0, data.size()); - } else { - arr->null_count = 0; - } - - CUDF_EXPECTS(ArrowArrayFinishBuildingDefault(arr, nullptr) == NANOARROW_OK, - "failed to construct boolean array"); + ArrowArrayBuffer(arr, 1)->size_bytes = sizeof(T) * view.size(); + ArrowArrayBuffer(arr, 1)->data = const_cast(view.data()); } // populate an ArrowArray from a boolean cudf column. Since Arrow and cudf @@ -109,7 +65,10 @@ std::enable_if_t, void> populate_from_col(ArrowArray* ar { arr->length = view.size(); arr->null_count = view.null_count(); + NANOARROW_THROW_NOT_OK(ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 0), noop_alloc)); + ArrowArrayValidityBitmap(arr)->buffer.size_bytes = + cudf::bitmask_allocation_size_bytes(view.size()); ArrowArrayValidityBitmap(arr)->buffer.data = const_cast(reinterpret_cast(view.null_mask())); @@ -123,32 +82,8 @@ std::enable_if_t, void> populate_from_col(ArrowArray* ar delete buf; }, new std::unique_ptr(std::move(bitmask.first))))); - ArrowArrayBuffer(arr, 1)->data = ptr; -} - -// populate an ArrowArray by copying the string data and constructing the offsets -// buffer. -template -std::enable_if_t, void> get_nanoarrow_array( - ArrowArray* arr, std::vector const& data, std::vector const& mask = {}) -{ - NANOARROW_THROW_NOT_OK(ArrowArrayStartAppending(arr)); - for (auto& str : data) { - NANOARROW_THROW_NOT_OK(ArrowArrayAppendString(arr, ArrowCharView(str.c_str()))); - } - - if (!mask.empty()) { - ArrowBitmapReset(ArrowArrayValidityBitmap(arr)); - NANOARROW_THROW_NOT_OK(ArrowBitmapReserve(ArrowArrayValidityBitmap(arr), mask.size())); - ArrowBitmapAppendInt8Unsafe( - ArrowArrayValidityBitmap(arr), reinterpret_cast(mask.data()), mask.size()); - arr->null_count = ArrowBitCountSet(ArrowArrayValidityBitmap(arr)->buffer.data, 0, data.size()); - } else { - arr->null_count = 0; - } - - CUDF_EXPECTS(ArrowArrayFinishBuildingDefault(arr, nullptr) == NANOARROW_OK, - "failed to construct string array"); + ArrowArrayBuffer(arr, 1)->size_bytes = cudf::bitmask_allocation_size_bytes(view.size()); + ArrowArrayBuffer(arr, 1)->data = ptr; } // populate an ArrowArray with the string data buffers of a cudf column_view @@ -160,67 +95,47 @@ std::enable_if_t, void> populate_from_col( { arr->length = view.size(); arr->null_count = view.null_count(); + NANOARROW_THROW_NOT_OK(ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 0), noop_alloc)); + ArrowArrayValidityBitmap(arr)->buffer.size_bytes = + cudf::bitmask_allocation_size_bytes(view.size()); ArrowArrayValidityBitmap(arr)->buffer.data = const_cast(reinterpret_cast(view.null_mask())); cudf::strings_column_view sview{view}; - NANOARROW_THROW_NOT_OK(ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 1), noop_alloc)); - ArrowArrayBuffer(arr, 1)->data = const_cast(sview.offsets().data()); - NANOARROW_THROW_NOT_OK(ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 2), noop_alloc)); - ArrowArrayBuffer(arr, 2)->data = const_cast(view.data()); + if (view.size() > 0) { + NANOARROW_THROW_NOT_OK(ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 1), noop_alloc)); + ArrowArrayBuffer(arr, 1)->size_bytes = sizeof(int32_t) * sview.offsets().size(); + ArrowArrayBuffer(arr, 1)->data = const_cast(sview.offsets().data()); + NANOARROW_THROW_NOT_OK(ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 2), noop_alloc)); + ArrowArrayBuffer(arr, 2)->size_bytes = sview.chars_size(cudf::get_default_stream()); + ArrowArrayBuffer(arr, 2)->data = const_cast(view.data()); + } else { + auto zero = rmm::device_scalar(0, cudf::get_default_stream()); + const uint8_t* ptr = reinterpret_cast(zero.data()); + nanoarrow::BufferInitWrapped(ArrowArrayBuffer(arr, 1), std::move(zero), ptr, 4); + } } -// populate a dictionary ArrowArray by delegating the copying of the indices -// and key arrays template -void get_nanoarrow_dict_array(ArrowArray* arr, - std::vector const& keys, - std::vector const& ind, - std::vector const& validity = {}) +void populate_dict_from_col(ArrowArray* arr, cudf::dictionary_column_view dview) { - get_nanoarrow_array(arr->dictionary, keys); - get_nanoarrow_array(arr, ind, validity); -} + arr->length = dview.size(); + arr->null_count = dview.null_count(); + ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 0), noop_alloc); + ArrowArrayValidityBitmap(arr)->buffer.size_bytes = + cudf::bitmask_allocation_size_bytes(dview.size()); + ArrowArrayValidityBitmap(arr)->buffer.data = + const_cast(reinterpret_cast(dview.null_mask())); -// populate a list ArrowArray by copying the offsets and data buffers -template -void get_nanoarrow_list_array(ArrowArray* arr, - std::vector data, - std::vector offsets, - std::vector data_validity = {}, - std::vector list_validity = {}) -{ - get_nanoarrow_array(arr->children[0], data, data_validity); - - arr->length = offsets.size() - 1; - NANOARROW_THROW_NOT_OK( - ArrowBufferAppend(ArrowArrayBuffer(arr, 1), offsets.data(), sizeof(int32_t) * offsets.size())); - if (!list_validity.empty()) { - NANOARROW_THROW_NOT_OK(ArrowBitmapReserve(ArrowArrayValidityBitmap(arr), list_validity.size())); - ArrowBitmapAppendInt8Unsafe(ArrowArrayValidityBitmap(arr), - reinterpret_cast(list_validity.data()), - arr->length); - arr->null_count = ArrowBitCountSet(ArrowArrayValidityBitmap(arr)->buffer.data, 0, arr->length); - } else { - arr->null_count = 0; - } + ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 1), noop_alloc); + ArrowArrayBuffer(arr, 1)->size_bytes = sizeof(IND_TYPE) * dview.indices().size(); + ArrowArrayBuffer(arr, 1)->data = const_cast(dview.indices().data()); - CUDF_EXPECTS(ArrowArrayFinishBuildingDefault(arr, nullptr) == NANOARROW_OK, - "failed to construct list array"); + populate_from_col(arr->dictionary, dview.keys()); } -// populate an ArrowArray list array from device buffers using a no-op -// allocator so that the ArrowArray doesn't have ownership of the buffers -void populate_list_from_col(ArrowArray* arr, cudf::lists_column_view view) -{ - arr->length = view.size(); - arr->null_count = view.null_count(); +std::tuple, nanoarrow::UniqueSchema, nanoarrow::UniqueArray> +get_nanoarrow_tables(cudf::size_type length = 10000); - NANOARROW_THROW_NOT_OK(ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 0), noop_alloc)); - ArrowArrayValidityBitmap(arr)->buffer.data = - const_cast(reinterpret_cast(view.null_mask())); - - NANOARROW_THROW_NOT_OK(ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 1), noop_alloc)); - ArrowArrayBuffer(arr, 1)->data = const_cast(view.offsets().data()); -} +void populate_list_from_col(ArrowArray* arr, cudf::lists_column_view view); diff --git a/cpp/tests/interop/to_arrow_device_test.cpp b/cpp/tests/interop/to_arrow_device_test.cpp index d6eae8dece1..fb346dad538 100644 --- a/cpp/tests/interop/to_arrow_device_test.cpp +++ b/cpp/tests/interop/to_arrow_device_test.cpp @@ -57,6 +57,26 @@ get_nanoarrow_tables(cudf::size_type length) std::vector> columns; + std::generate(int64_data.begin(), int64_data.end(), []() { return rand() % 500000; }); + std::generate(list_int64_data.begin(), list_int64_data.end(), []() { return rand() % 500000; }); + auto validity_generator = []() { return rand() % 7 != 0; }; + std::generate( + list_int64_data_validity.begin(), list_int64_data_validity.end(), validity_generator); + std::generate( + list_offsets.begin(), list_offsets.end(), [length_of_individual_list, n = 0]() mutable { + return (n++) * length_of_individual_list; + }); + std::generate(bool_data.begin(), bool_data.end(), validity_generator); + std::generate( + string_data.begin(), string_data.end(), []() { return rand() % 7 != 0 ? "CUDF" : "Rocks"; }); + std::generate(validity.begin(), validity.end(), validity_generator); + std::generate(bool_validity.begin(), bool_validity.end(), validity_generator); + + std::transform(bool_validity.cbegin(), + bool_validity.cend(), + std::back_inserter(bool_data_validity), + [](auto val) { return static_cast(val); }); + columns.emplace_back(cudf::test::fixed_width_column_wrapper( int64_data.begin(), int64_data.end(), validity.begin()) .release()); @@ -180,41 +200,58 @@ get_nanoarrow_tables(cudf::size_type length) nanoarrow::UniqueArray arrow; NANOARROW_THROW_NOT_OK(ArrowArrayInitFromSchema(arrow.get(), schema.get(), nullptr)); - - get_nanoarrow_array(arrow->children[0], int64_data, validity); - get_nanoarrow_array(arrow->children[1], string_data, validity); - cudf::dictionary_column_view view(dict_col->view()); - auto keys = cudf::test::to_host(view.keys()).first; - auto indices = cudf::test::to_host(view.indices()).first; - get_nanoarrow_dict_array(arrow->children[2], - std::vector(keys.begin(), keys.end()), - std::vector(indices.begin(), indices.end()), - validity); - get_nanoarrow_array(arrow->children[3], bool_data, bool_validity); - get_nanoarrow_list_array(arrow->children[4], - list_int64_data, - list_offsets, - list_int64_data_validity, - bool_data_validity); - - get_nanoarrow_array(arrow->children[5]->children[0], int64_data, validity); - get_nanoarrow_array(arrow->children[5]->children[1], string_data, validity); - arrow->children[5]->length = length; - NANOARROW_THROW_NOT_OK(ArrowBitmapReserve(ArrowArrayValidityBitmap(arrow->children[5]), length)); - std::for_each(bool_data_validity.begin(), bool_data_validity.end(), [&](auto&& elem) { - NANOARROW_THROW_NOT_OK( - ArrowBitmapAppend(ArrowArrayValidityBitmap(arrow->children[5]), (elem) ? 1 : 0, 1)); - }); - arrow->children[5]->null_count = - ArrowBitCountSet(ArrowArrayValidityBitmap(arrow->children[5])->buffer.data, 0, length); - - CUDF_EXPECTS(ArrowArrayFinishBuildingDefault(arrow.get(), nullptr) == NANOARROW_OK, - "failed to build example Arrays"); + arrow->length = length; + + populate_from_col(arrow->children[0], columns[0]->view()); + populate_from_col(arrow->children[1], columns[1]->view()); + populate_dict_from_col(arrow->children[2], + cudf::dictionary_column_view(columns[2]->view())); + + populate_from_col(arrow->children[3], columns[3]->view()); + cudf::lists_column_view list_view{columns[4]->view()}; + populate_list_from_col(arrow->children[4], list_view); + populate_from_col(arrow->children[4]->children[0], list_view.child()); + + cudf::structs_column_view struct_view{columns[5]->view()}; + populate_from_col(arrow->children[5]->children[0], struct_view.child(0)); + populate_from_col(arrow->children[5]->children[1], struct_view.child(1)); + arrow->children[5]->length = struct_view.size(); + arrow->children[5]->null_count = struct_view.null_count(); + ArrowBufferSetAllocator(ArrowArrayBuffer(arrow->children[5], 0), noop_alloc); + ArrowArrayValidityBitmap(arrow->children[5])->buffer.size_bytes = + cudf::bitmask_allocation_size_bytes(struct_view.size()); + ArrowArrayValidityBitmap(arrow->children[5])->buffer.data = + const_cast(reinterpret_cast(struct_view.null_mask())); + + ArrowError error; + if (ArrowArrayFinishBuilding(arrow.get(), NANOARROW_VALIDATION_LEVEL_MINIMAL, &error) != + NANOARROW_OK) { + std::cerr << ArrowErrorMessage(&error) << std::endl; + CUDF_FAIL("failed to build example arrays"); + } return std::make_tuple( std::make_unique(std::move(columns)), std::move(schema), std::move(arrow)); } +// populate an ArrowArray list array from device buffers using a no-op +// allocator so that the ArrowArray doesn't have ownership of the buffers +void populate_list_from_col(ArrowArray* arr, cudf::lists_column_view view) +{ + arr->length = view.size(); + arr->null_count = view.null_count(); + + ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 0), noop_alloc); + ArrowArrayValidityBitmap(arr)->buffer.size_bytes = + cudf::bitmask_allocation_size_bytes(view.size()); + ArrowArrayValidityBitmap(arr)->buffer.data = + const_cast(reinterpret_cast(view.null_mask())); + + ArrowBufferSetAllocator(ArrowArrayBuffer(arr, 1), noop_alloc); + ArrowArrayBuffer(arr, 1)->size_bytes = sizeof(int32_t) * view.offsets().size(); + ArrowArrayBuffer(arr, 1)->data = const_cast(view.offsets().data()); +} + struct BaseArrowFixture : public cudf::test::BaseFixture { void compare_schemas(const ArrowSchema* expected, const ArrowSchema* actual) {