Skip to content

Commit

Permalink
Implement the transform_e primitive (to update property values for al…
Browse files Browse the repository at this point in the history
…l edges) (#3917)

We have two versions of transform_e, one updating property values for all edges and another updating property values only for the edges in the given edge list.

Both were declared but only the latter was implemented. This PR implements the first version (necessary to update edge masks in testing the neighbor intersection primitives with edge masking).

Authors:
  - Seunghwa Kang (https://github.com/seunghwak)
  - Chuck Hastings (https://github.com/ChuckHastings)

Approvers:
  - Chuck Hastings (https://github.com/ChuckHastings)

URL: #3917
  • Loading branch information
seunghwak authored Oct 18, 2023
1 parent 9efd87d commit 34dce3d
Show file tree
Hide file tree
Showing 7 changed files with 382 additions and 83 deletions.
31 changes: 27 additions & 4 deletions cpp/include/cugraph/edge_partition_device_view.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,13 @@ class edge_partition_device_view_base_t {
__host__ __device__ edge_t const* offsets() const { return offsets_.data(); }
__host__ __device__ vertex_t const* indices() const { return indices_.data(); }

__device__ vertex_t major_idx_from_local_edge_idx_nocheck(edge_t local_edge_idx) const noexcept
{
return static_cast<vertex_t>(thrust::distance(
offsets_.begin() + 1,
thrust::upper_bound(thrust::seq, offsets_.begin() + 1, offsets_.end(), local_edge_idx)));
}

// major_idx == major offset if CSR/CSC, major_offset != major_idx if DCSR/DCSC
__device__ thrust::tuple<vertex_t const*, edge_t, edge_t> local_edges(
vertex_t major_idx) const noexcept
Expand Down Expand Up @@ -291,8 +298,19 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t
return major_range_first_ + major_offset;
}

__device__ vertex_t major_from_major_idx_nocheck(vertex_t major_idx) const noexcept
{
if (major_hypersparse_first_) {
return major_idx >= (*major_hypersparse_first_ - major_range_first_)
? (*dcs_nzd_vertices_)[major_idx - (*major_hypersparse_first_ - major_range_first_)]
: major_from_major_offset_nocheck(major_idx);
} else { // major_idx == major_offset
return major_from_major_offset_nocheck(major_idx);
}
}

// major_hypersparse_idx: index within the hypersparse segment
__host__ __device__ thrust::optional<vertex_t> major_hypersparse_idx_from_major_nocheck(
__device__ thrust::optional<vertex_t> major_hypersparse_idx_from_major_nocheck(
vertex_t major) const noexcept
{
if (dcs_nzd_vertices_) {
Expand All @@ -303,7 +321,7 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t
}

// major_hypersparse_idx: index within the hypersparse segment
__host__ __device__ thrust::optional<vertex_t> major_from_major_hypersparse_idx_nocheck(
__device__ thrust::optional<vertex_t> major_from_major_hypersparse_idx_nocheck(
vertex_t major_hypersparse_idx) const noexcept
{
return dcs_nzd_vertices_
Expand Down Expand Up @@ -442,16 +460,21 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t
return major_offset;
}

__device__ vertex_t major_from_major_idx_nocheck(vertex_t major_idx) const noexcept
{
return major_from_major_offset_nocheck(major_idx);
}

// major_hypersparse_idx: index within the hypersparse segment
__host__ __device__ thrust::optional<vertex_t> major_hypersparse_idx_from_major_nocheck(
__device__ thrust::optional<vertex_t> major_hypersparse_idx_from_major_nocheck(
vertex_t major) const noexcept
{
assert(false);
return thrust::nullopt;
}

// major_hypersparse_idx: index within the hypersparse segment
__host__ __device__ thrust::optional<vertex_t> major_from_major_hypersparse_idx_nocheck(
__device__ thrust::optional<vertex_t> major_from_major_hypersparse_idx_nocheck(
vertex_t major_hypersparse_idx) const noexcept
{
assert(false);
Expand Down
39 changes: 22 additions & 17 deletions cpp/include/cugraph/edge_partition_edge_property_device_view.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -33,18 +33,21 @@ template <typename edge_t,
typename value_t = typename thrust::iterator_traits<ValueIterator>::value_type>
class edge_partition_edge_property_device_view_t {
public:
using edge_type = edge_t;
using value_type = value_t;
static constexpr bool is_packed_bool = cugraph::is_packed_bool<ValueIterator, value_t>();
static constexpr bool has_packed_bool_element =
cugraph::has_packed_bool_element<ValueIterator, value_t>();

static_assert(
std::is_same_v<typename thrust::iterator_traits<ValueIterator>::value_type, value_t> ||
cugraph::has_packed_bool_element<ValueIterator, value_t>());
has_packed_bool_element);
static_assert(cugraph::is_arithmetic_or_thrust_tuple_of_arithmetic<value_t>::value);

using edge_type = edge_t;
using value_type = value_t;

edge_partition_edge_property_device_view_t() = default;

edge_partition_edge_property_device_view_t(
edge_property_view_t<edge_t, ValueIterator> const& view, size_t partition_idx)
edge_property_view_t<edge_t, ValueIterator, value_t> const& view, size_t partition_idx)
: value_first_(view.value_firsts()[partition_idx])
{
value_first_ = view.value_firsts()[partition_idx];
Expand All @@ -54,8 +57,8 @@ class edge_partition_edge_property_device_view_t {

__device__ value_t get(edge_t offset) const
{
if constexpr (cugraph::has_packed_bool_element<ValueIterator, value_t>()) {
static_assert(std::is_arithmetic_v<value_t>, "unimplemented for thrust::tuple types.");
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
auto mask = cugraph::packed_bool_mask(offset);
return static_cast<bool>(*(value_first_ + cugraph::packed_bool_offset(offset)) & mask);
} else {
Expand All @@ -69,8 +72,8 @@ class edge_partition_edge_property_device_view_t {
void>
set(edge_t offset, value_t val) const
{
if constexpr (cugraph::has_packed_bool_element<ValueIterator, value_t>()) {
static_assert(std::is_arithmetic_v<value_t>, "unimplemented for thrust::tuple types.");
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
auto mask = cugraph::packed_bool_mask(offset);
if (val) {
atomicOr(value_first_ + cugraph::packed_bool_offset(offset), mask);
Expand All @@ -88,8 +91,8 @@ class edge_partition_edge_property_device_view_t {
value_t>
atomic_and(edge_t offset, value_t val) const
{
if constexpr (cugraph::has_packed_bool_element<ValueIterator, value_t>()) {
static_assert(std::is_arithmetic_v<value_t>, "unimplemented for thrust::tuple types.");
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
auto mask = cugraph::packed_bool_mask(offset);
auto old = atomicAnd(value_first_ + cugraph::packed_bool_offset(offset),
val ? uint32_t{0xffffffff} : ~mask);
Expand All @@ -105,8 +108,8 @@ class edge_partition_edge_property_device_view_t {
value_t>
atomic_or(edge_t offset, value_t val) const
{
if constexpr (cugraph::has_packed_bool_element<ValueIterator, value_t>()) {
static_assert(std::is_arithmetic_v<value_t>, "unimplemented for thrust::tuple types.");
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
auto mask = cugraph::packed_bool_mask(offset);
auto old =
atomicOr(value_first_ + cugraph::packed_bool_offset(offset), val ? mask : uint32_t{0});
Expand All @@ -132,8 +135,8 @@ class edge_partition_edge_property_device_view_t {
value_t>
elementwise_atomic_cas(edge_t offset, value_t compare, value_t val) const
{
if constexpr (cugraph::has_packed_bool_element<ValueIterator, value_t>()) {
static_assert(std::is_arithmetic_v<value_t>, "unimplemented for thrust::tuple types.");
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
auto mask = cugraph::packed_bool_mask(offset);
auto old = val ? atomicOr(value_first_ + cugraph::packed_bool_offset(offset), mask)
: atomicAnd(value_first_ + cugraph::packed_bool_offset(offset), ~mask);
Expand Down Expand Up @@ -170,8 +173,10 @@ class edge_partition_edge_property_device_view_t {
template <typename edge_t>
class edge_partition_edge_dummy_property_device_view_t {
public:
using edge_type = edge_t;
using value_type = thrust::nullopt_t;
using edge_type = edge_t;
using value_type = thrust::nullopt_t;
static constexpr bool is_packed_bool = false;
static constexpr bool has_packed_bool_element = false;

edge_partition_edge_dummy_property_device_view_t() = default;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,15 @@ template <typename vertex_t,
typename value_t = typename thrust::iterator_traits<ValueIterator>::value_type>
class edge_partition_endpoint_property_device_view_t {
public:
using vertex_type = vertex_t;
using value_type = value_t;
static constexpr bool is_packed_bool = cugraph::is_packed_bool<ValueIterator, value_t>();
static constexpr bool has_packed_bool_element =
cugraph::has_packed_bool_element<ValueIterator, value_t>();

static_assert(
std::is_same_v<typename thrust::iterator_traits<ValueIterator>::value_type, value_t> ||
cugraph::has_packed_bool_element<ValueIterator, value_t>());

using vertex_type = vertex_t;
using value_type = value_t;
has_packed_bool_element);

edge_partition_endpoint_property_device_view_t() = default;

Expand Down Expand Up @@ -77,8 +80,8 @@ class edge_partition_endpoint_property_device_view_t {
__device__ value_t get(vertex_t offset) const
{
auto val_offset = value_offset(offset);
if constexpr (cugraph::has_packed_bool_element<ValueIterator, value_t>()) {
static_assert(std::is_arithmetic_v<value_t>, "unimplemented for thrust::tuple types.");
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
auto mask = cugraph::packed_bool_mask(val_offset);
return static_cast<bool>(*(value_first_ + cugraph::packed_bool_offset(val_offset)) & mask);
} else {
Expand All @@ -93,8 +96,8 @@ class edge_partition_endpoint_property_device_view_t {
atomic_and(vertex_t offset, value_t val) const
{
auto val_offset = value_offset(offset);
if constexpr (cugraph::has_packed_bool_element<ValueIterator, value_t>()) {
static_assert(std::is_arithmetic_v<value_t>, "unimplemented for thrust::tuple types.");
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
auto mask = cugraph::packed_bool_mask(val_offset);
auto old = atomicAnd(value_first_ + cugraph::packed_bool_offset(val_offset),
val ? cugraph::packed_bool_full_mask() : ~mask);
Expand All @@ -111,8 +114,8 @@ class edge_partition_endpoint_property_device_view_t {
atomic_or(vertex_t offset, value_t val) const
{
auto val_offset = value_offset(offset);
if constexpr (cugraph::has_packed_bool_element<ValueIterator, value_t>()) {
static_assert(std::is_arithmetic_v<value_t>, "unimplemented for thrust::tuple types.");
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
auto mask = cugraph::packed_bool_mask(val_offset);
auto old = atomicOr(value_first_ + cugraph::packed_bool_offset(val_offset),
val ? mask : cugraph::packed_bool_empty_mask());
Expand Down Expand Up @@ -140,8 +143,8 @@ class edge_partition_endpoint_property_device_view_t {
elementwise_atomic_cas(vertex_t offset, value_t compare, value_t val) const
{
auto val_offset = value_offset(offset);
if constexpr (cugraph::has_packed_bool_element<ValueIterator, value_t>()) {
static_assert(std::is_arithmetic_v<value_t>, "unimplemented for thrust::tuple types.");
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
auto mask = cugraph::packed_bool_mask(val_offset);
auto old = val ? atomicOr(value_first_ + cugraph::packed_bool_offset(val_offset), mask)
: atomicAnd(value_first_ + cugraph::packed_bool_offset(val_offset), ~mask);
Expand Down Expand Up @@ -203,8 +206,10 @@ class edge_partition_endpoint_property_device_view_t {
template <typename vertex_t>
class edge_partition_endpoint_dummy_property_device_view_t {
public:
using vertex_type = vertex_t;
using value_type = thrust::nullopt_t;
using vertex_type = vertex_t;
using value_type = thrust::nullopt_t;
static constexpr bool is_packed_bool = false;
static constexpr bool has_packed_bool_element = false;

edge_partition_endpoint_dummy_property_device_view_t() = default;

Expand Down
8 changes: 5 additions & 3 deletions cpp/include/cugraph/edge_property.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,11 @@ class edge_property_t {
public:
static_assert(cugraph::is_arithmetic_or_thrust_tuple_of_arithmetic<T>::value);

using edge_type = typename GraphViewType::edge_type;
using value_type = T;
using buffer_type = decltype(allocate_dataframe_buffer<T>(size_t{0}, rmm::cuda_stream_view{}));
using edge_type = typename GraphViewType::edge_type;
using value_type = T;
using buffer_type =
decltype(allocate_dataframe_buffer<std::conditional_t<std::is_same_v<T, bool>, uint32_t, T>>(
size_t{0}, rmm::cuda_stream_view{}));

edge_property_t(raft::handle_t const& handle) {}

Expand Down
7 changes: 7 additions & 0 deletions cpp/include/cugraph/utilities/packed_bool_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,13 @@ has_packed_bool_element(std::index_sequence<Is...>)

} // namespace detail

template <typename ValueIterator, typename value_t>
constexpr bool is_packed_bool()
{
return std::is_same_v<typename thrust::iterator_traits<ValueIterator>::value_type, uint32_t> &&
std::is_same_v<value_t, bool>;
}

// sizeof(uint32_t) * 8 packed Boolean values are stored using one uint32_t
template <typename ValueIterator, typename value_t>
constexpr bool has_packed_bool_element()
Expand Down
Loading

0 comments on commit 34dce3d

Please sign in to comment.