Skip to content

Commit

Permalink
Merge branch 'branch-24.02' into rbc_eps
Browse files Browse the repository at this point in the history
  • Loading branch information
mfoerste4 authored Jan 5, 2024
2 parents 5d04250 + 6762fe5 commit 8a6d669
Show file tree
Hide file tree
Showing 35 changed files with 3,583 additions and 116 deletions.
8 changes: 4 additions & 4 deletions cpp/include/raft/core/cublas_macros.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
* Copyright (c) 2022-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -33,7 +33,7 @@
namespace raft {

/**
* @ingroup error_handling
* @addtogroup error_handling
* @{
*/

Expand Down Expand Up @@ -76,7 +76,7 @@ inline const char* cublas_error_to_string(cublasStatus_t err)
#undef _CUBLAS_ERR_TO_STR

/**
* @ingroup assertion
* @addtogroup assertion
* @{
*/

Expand Down Expand Up @@ -135,4 +135,4 @@ inline const char* cublas_error_to_string(cublasStatus_t err)
#define CUBLAS_CHECK_NO_THROW(call) RAFT_CUBLAS_TRY_NO_THROW(call)
#endif

#endif
#endif
146 changes: 146 additions & 0 deletions cpp/include/raft/core/detail/fail_container_policy.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
/*
* Copyright (c) 2023-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once
#include <raft/core/error.hpp>
#include <raft/core/logger-macros.hpp>
#include <raft/core/resources.hpp>
#include <raft/thirdparty/mdspan/include/experimental/mdspan>
#include <stddef.h>

namespace raft {
namespace detail {

template <typename T>
struct fail_reference {
using value_type = typename std::remove_cv_t<T>;
using pointer = T*;
using const_pointer = T const*;

fail_reference() = default;
template <typename StreamViewType>
fail_reference(T* ptr, StreamViewType stream)
{
throw non_cuda_build_error{"Attempted to construct reference to device data in non-CUDA build"};
}

operator value_type() const // NOLINT
{
throw non_cuda_build_error{"Attempted to dereference device data in non-CUDA build"};
return value_type{};
}
auto operator=(T const& other) -> fail_reference&
{
throw non_cuda_build_error{"Attempted to assign to device data in non-CUDA build"};
return *this;
}
};

/** A placeholder container which throws an exception on use
*
* This placeholder is used in non-CUDA builds for container types that would
* otherwise be provided with CUDA code. Attempting to construct a non-empty
* container of this type throws an exception indicating that there was an
* attempt to use the device from a non-CUDA build. An example of when this
* might happen is if a downstream application attempts to allocate a device
* mdarray using a library built with non-CUDA RAFT.
*/
template <typename T>
struct fail_container {
using value_type = T;
using size_type = std::size_t;

using reference = fail_reference<T>;
using const_reference = fail_reference<T const>;

using pointer = value_type*;
using const_pointer = value_type const*;

using iterator = pointer;
using const_iterator = const_pointer;

explicit fail_container(size_t n = size_t{})
{
if (n != size_t{}) {
throw non_cuda_build_error{"Attempted to allocate device container in non-CUDA build"};
}
}

template <typename Index>
auto operator[](Index i) noexcept -> reference
{
RAFT_LOG_ERROR("Attempted to access device data in non-CUDA build");
return reference{};
}

template <typename Index>
auto operator[](Index i) const noexcept -> const_reference
{
RAFT_LOG_ERROR("Attempted to access device data in non-CUDA build");
return const_reference{};
}
void resize(size_t n)
{
if (n != size_t{}) {
throw non_cuda_build_error{"Attempted to allocate device container in non-CUDA build"};
}
}

[[nodiscard]] auto data() noexcept -> pointer { return nullptr; }
[[nodiscard]] auto data() const noexcept -> const_pointer { return nullptr; }
};

/** A placeholder container policy which throws an exception on use
*
* This placeholder is used in non-CUDA builds for container types that would
* otherwise be provided with CUDA code. Attempting to construct a non-empty
* container of this type throws an exception indicating that there was an
* attempt to use the device from a non-CUDA build. An example of when this
* might happen is if a downstream application attempts to allocate a device
* mdarray using a library built with non-CUDA RAFT.
*/
template <typename ElementType>
struct fail_container_policy {
using element_type = ElementType;
using container_type = fail_container<element_type>;
using pointer = typename container_type::pointer;
using const_pointer = typename container_type::const_pointer;
using reference = typename container_type::reference;
using const_reference = typename container_type::const_reference;

using accessor_policy = std::experimental::default_accessor<element_type>;
using const_accessor_policy = std::experimental::default_accessor<element_type const>;

auto create(raft::resources const& res, size_t n) -> container_type { return container_type(n); }

fail_container_policy() = default;

[[nodiscard]] constexpr auto access(container_type& c, size_t n) const noexcept -> reference
{
return c[n];
}
[[nodiscard]] constexpr auto access(container_type const& c, size_t n) const noexcept
-> const_reference
{
return c[n];
}

[[nodiscard]] auto make_accessor_policy() noexcept { return accessor_policy{}; }
[[nodiscard]] auto make_accessor_policy() const noexcept { return const_accessor_policy{}; }
};

} // namespace detail
} // namespace raft
22 changes: 21 additions & 1 deletion cpp/include/raft/core/device_container_policy.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
*/

/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
* Copyright (c) 2022-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -21,6 +21,7 @@
* limitations under the License.
*/
#pragma once
#ifndef RAFT_DISABLE_CUDA
#include <raft/core/device_mdspan.hpp>
#include <raft/util/cudart_utils.hpp>

Expand Down Expand Up @@ -196,3 +197,22 @@ class device_uvector_policy {
};

} // namespace raft
#else
#include <raft/core/detail/fail_container_policy.hpp>
namespace raft {

// Provide placeholders that will allow CPU-GPU interoperable codebases to
// compile in non-CUDA mode but which will throw exceptions at runtime on any
// attempt to touch device data

template <typename T>
using device_reference = detail::fail_reference<T>;

template <typename T>
using device_uvector = detail::fail_container<T>;

template <typename ElementType>
using device_uvector_policy = detail::fail_container_policy<ElementType>;

} // namespace raft
#endif
88 changes: 10 additions & 78 deletions cpp/include/raft/core/device_mdspan.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
* Copyright (c) 2022-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -26,9 +26,6 @@ namespace raft {
template <typename AccessorPolicy>
using device_accessor = host_device_accessor<AccessorPolicy, memory_type::device>;

template <typename AccessorPolicy>
using managed_accessor = host_device_accessor<AccessorPolicy, memory_type::managed>;

/**
* @brief std::experimental::mdspan with device tag to avoid accessing incorrect memory location.
*/
Expand All @@ -38,12 +35,6 @@ template <typename ElementType,
typename AccessorPolicy = std::experimental::default_accessor<ElementType>>
using device_mdspan = mdspan<ElementType, Extents, LayoutPolicy, device_accessor<AccessorPolicy>>;

template <typename ElementType,
typename Extents,
typename LayoutPolicy = layout_c_contiguous,
typename AccessorPolicy = std::experimental::default_accessor<ElementType>>
using managed_mdspan = mdspan<ElementType, Extents, LayoutPolicy, managed_accessor<AccessorPolicy>>;

template <typename T, bool B>
struct is_device_mdspan : std::false_type {};
template <typename T>
Expand All @@ -61,23 +52,6 @@ using is_input_device_mdspan_t = is_device_mdspan<T, is_input_mdspan_v<T>>;
template <typename T>
using is_output_device_mdspan_t = is_device_mdspan<T, is_output_mdspan_v<T>>;

template <typename T, bool B>
struct is_managed_mdspan : std::false_type {};
template <typename T>
struct is_managed_mdspan<T, true> : std::bool_constant<T::accessor_type::is_managed_accessible> {};

/**
* @\brief Boolean to determine if template type T is either raft::managed_mdspan or a derived type
*/
template <typename T>
using is_managed_mdspan_t = is_managed_mdspan<T, is_mdspan_v<T>>;

template <typename T>
using is_input_managed_mdspan_t = is_managed_mdspan<T, is_input_mdspan_v<T>>;

template <typename T>
using is_output_managed_mdspan_t = is_managed_mdspan<T, is_output_mdspan_v<T>>;

/**
* @\brief Boolean to determine if variadic template types Tn are either raft::device_mdspan or a
* derived type
Expand All @@ -102,30 +76,6 @@ using enable_if_input_device_mdspan = std::enable_if_t<is_input_device_mdspan_v<
template <typename... Tn>
using enable_if_output_device_mdspan = std::enable_if_t<is_output_device_mdspan_v<Tn...>>;

/**
* @\brief Boolean to determine if variadic template types Tn are either raft::managed_mdspan or a
* derived type
*/
template <typename... Tn>
inline constexpr bool is_managed_mdspan_v = std::conjunction_v<is_managed_mdspan_t<Tn>...>;

template <typename... Tn>
inline constexpr bool is_input_managed_mdspan_v =
std::conjunction_v<is_input_managed_mdspan_t<Tn>...>;

template <typename... Tn>
inline constexpr bool is_output_managed_mdspan_v =
std::conjunction_v<is_output_managed_mdspan_t<Tn>...>;

template <typename... Tn>
using enable_if_managed_mdspan = std::enable_if_t<is_managed_mdspan_v<Tn...>>;

template <typename... Tn>
using enable_if_input_managed_mdspan = std::enable_if_t<is_input_managed_mdspan_v<Tn...>>;

template <typename... Tn>
using enable_if_output_managed_mdspan = std::enable_if_t<is_output_managed_mdspan_v<Tn...>>;

/**
* @brief Shorthand for 0-dim host mdspan (scalar).
* @tparam ElementType the data type of the scalar element
Expand Down Expand Up @@ -186,7 +136,7 @@ using device_aligned_matrix_view =
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_right_padded<ElementType>>
auto make_device_aligned_matrix_view(ElementType* ptr, IndexType n_rows, IndexType n_cols)
auto constexpr make_device_aligned_matrix_view(ElementType* ptr, IndexType n_rows, IndexType n_cols)
{
using data_handle_type =
typename std::experimental::aligned_accessor<ElementType,
Expand All @@ -203,24 +153,6 @@ auto make_device_aligned_matrix_view(ElementType* ptr, IndexType n_rows, IndexTy
return device_aligned_matrix_view<ElementType, IndexType, LayoutPolicy>{aligned_pointer, extents};
}

/**
* @brief Create a raft::managed_mdspan
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param ptr Pointer to the data
* @param exts dimensionality of the array (series of integers)
* @return raft::managed_mdspan
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous,
size_t... Extents>
auto make_managed_mdspan(ElementType* ptr, extents<IndexType, Extents...> exts)
{
return make_mdspan<ElementType, IndexType, LayoutPolicy, true, true>(ptr, exts);
}

/**
* @brief Create a 0-dim (scalar) mdspan instance for device value.
*
Expand All @@ -229,7 +161,7 @@ auto make_managed_mdspan(ElementType* ptr, extents<IndexType, Extents...> exts)
* @param[in] ptr on device to wrap
*/
template <typename ElementType, typename IndexType = std::uint32_t>
auto make_device_scalar_view(ElementType* ptr)
auto constexpr make_device_scalar_view(ElementType* ptr)
{
scalar_extent<IndexType> extents;
return device_scalar_view<ElementType, IndexType>{ptr, extents};
Expand All @@ -249,7 +181,7 @@ auto make_device_scalar_view(ElementType* ptr)
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
auto make_device_matrix_view(ElementType* ptr, IndexType n_rows, IndexType n_cols)
auto constexpr make_device_matrix_view(ElementType* ptr, IndexType n_rows, IndexType n_cols)
{
matrix_extent<IndexType> extents{n_rows, n_cols};
return device_matrix_view<ElementType, IndexType, LayoutPolicy>{ptr, extents};
Expand All @@ -269,10 +201,10 @@ auto make_device_matrix_view(ElementType* ptr, IndexType n_rows, IndexType n_col
* @param[in] stride leading dimension / stride of data
*/
template <typename ElementType, typename IndexType, typename LayoutPolicy = layout_c_contiguous>
auto make_device_strided_matrix_view(ElementType* ptr,
IndexType n_rows,
IndexType n_cols,
IndexType stride)
auto constexpr make_device_strided_matrix_view(ElementType* ptr,
IndexType n_rows,
IndexType n_cols,
IndexType stride)
{
constexpr auto is_row_major = std::is_same_v<LayoutPolicy, layout_c_contiguous>;
IndexType stride0 = is_row_major ? (stride > 0 ? stride : n_cols) : 1;
Expand All @@ -295,7 +227,7 @@ auto make_device_strided_matrix_view(ElementType* ptr,
* @return raft::device_vector_view
*/
template <typename ElementType, typename IndexType, typename LayoutPolicy = layout_c_contiguous>
auto make_device_vector_view(ElementType* ptr, IndexType n)
auto constexpr make_device_vector_view(ElementType* ptr, IndexType n)
{
return device_vector_view<ElementType, IndexType, LayoutPolicy>{ptr, n};
}
Expand All @@ -310,7 +242,7 @@ auto make_device_vector_view(ElementType* ptr, IndexType n)
* @return raft::device_vector_view
*/
template <typename ElementType, typename IndexType, typename LayoutPolicy = layout_c_contiguous>
auto make_device_vector_view(
auto constexpr make_device_vector_view(
ElementType* ptr,
const typename LayoutPolicy::template mapping<vector_extent<IndexType>>& mapping)
{
Expand Down
Loading

0 comments on commit 8a6d669

Please sign in to comment.