Skip to content

Commit

Permalink
Revert "Revert "Print original vertex ids""
Browse files Browse the repository at this point in the history
This reverts commit 3096cf0.
  • Loading branch information
Naim committed Feb 7, 2024
1 parent 3096cf0 commit b74aa03
Show file tree
Hide file tree
Showing 8 changed files with 196 additions and 87 deletions.
2 changes: 1 addition & 1 deletion cpp/examples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ For single_gpu

`path_to_executable path_to_a_csv_graph_file [memory allocation mode]`

For multi_gpu and graph_partitioning
For multi_gpu, graph_partitioning and cugraph_operations

`mpirun -np 2 path_to_executable path_to_a_csv_graph_file [memory allocation mode]`

Expand Down
142 changes: 98 additions & 44 deletions cpp/examples/cugraph_operations/graph_operations.cu
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,8 @@ std::unique_ptr<raft::handle_t> initialize_mg_handle(std::string const& allocati

template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
void perform_example_graph_operations(raft::handle_t const& handle,
std::string const& csv_graph_file_path)
std::string const& csv_graph_file_path,
const bool weighted = false)
{
auto const comm_rank = handle.get_comms().get_rank();
auto const comm_size = handle.get_comms().get_size();
Expand All @@ -118,7 +119,7 @@ void perform_example_graph_operations(raft::handle_t const& handle,

auto [graph, edge_weights, renumber_map] =
cugraph::test::read_graph_from_csv_file<vertex_t, edge_t, weight_t, false, multi_gpu>(
handle, csv_graph_file_path, true, renumber);
handle, csv_graph_file_path, weighted, renumber);

// Non-owning view of the graph object
auto graph_view = graph.view();
Expand All @@ -132,46 +133,99 @@ void perform_example_graph_operations(raft::handle_t const& handle,
auto edge_weight_view = edge_weights ? std::make_optional((*edge_weights).view()) : std::nullopt;

using graph_view_t = cugraph::graph_view_t<vertex_t, edge_t, false, multi_gpu>;
rmm::device_uvector<weight_t> vertex_weights =
compute_out_weight_sums(handle, graph_view, *edge_weight_view);

cugraph::edge_src_property_t<graph_view_t, weight_t> src_vertex_weights_cache(handle, graph_view);

cugraph::edge_dst_property_t<graph_view_t, weight_t> dst_vertex_weights_cache(handle, graph_view);

update_edge_src_property(handle, graph_view, vertex_weights.begin(), src_vertex_weights_cache);

update_edge_dst_property(handle, graph_view, vertex_weights.begin(), dst_vertex_weights_cache);

rmm::device_uvector<weight_t> outputs(size_of_the_vertex_partition_assigned_to_this_process,
handle.get_stream());

per_v_transform_reduce_incoming_e(
handle,
graph_view,
src_vertex_weights_cache.view(),
dst_vertex_weights_cache.view(),
*edge_weight_view,
[] __device__(auto src, auto dst, auto src_prop, auto dst_prop, auto edge_prop) {
printf("\n%d ---> %d : src_prop= %f dst_prop = %f edge_prop = %f \n",
static_cast<int>(src),
static_cast<int>(dst),
static_cast<float>(src_prop),
static_cast<float>(dst_prop),
static_cast<float>(edge_prop));
return dst_prop * edge_prop;
},
weight_t{0},
cugraph::reduce_op::plus<weight_t>{},
outputs.begin());

auto outputs_title = std::string("outputs_").append(std::to_string(comm_rank));
size_t max_nr_of_elements_to_print = 10;
RAFT_CUDA_TRY(cudaDeviceSynchronize());
raft::print_device_vector(outputs_title.c_str(),
outputs.begin(),
std::min<size_t>(outputs.size(), max_nr_of_elements_to_print),
std::cout);

//
// As an example operation, compute the weighted average of the properties of
// neighboring vertices, weighted by the edge weights, if the input graph is weighted;
// Otherwise, compute the simple average.
//
if (weighted) {
using result_t = weight_t;
auto vertex_weights = compute_out_weight_sums(handle, graph_view, *edge_weight_view);

cugraph::edge_src_property_t<graph_view_t, result_t> src_vertex_weights_cache(handle,
graph_view);

cugraph::edge_dst_property_t<graph_view_t, result_t> dst_vertex_weights_cache(handle,
graph_view);

update_edge_src_property(handle, graph_view, vertex_weights.begin(), src_vertex_weights_cache);

update_edge_dst_property(handle, graph_view, vertex_weights.begin(), dst_vertex_weights_cache);

rmm::device_uvector<result_t> outputs(size_of_the_vertex_partition_assigned_to_this_process,
handle.get_stream());

per_v_transform_reduce_incoming_e(
handle,
graph_view,
src_vertex_weights_cache.view(),
dst_vertex_weights_cache.view(),
(*edge_weight_view),
[new_to_original_id_map = (*renumber_map).data()] __device__(
auto src, auto dst, auto src_prop, auto dst_prop, auto edge_prop) {
printf("\nsrc ---> %d dst = %d : src_prop = %f dst_prop = %f edge_prop = %f\n",
static_cast<int>(new_to_original_id_map[src]),
static_cast<int>(new_to_original_id_map[dst]),
static_cast<float>(src_prop),
static_cast<float>(dst_prop),
static_cast<float>(edge_prop));
return dst_prop * edge_prop;
},
result_t{0},
cugraph::reduce_op::plus<result_t>{},
outputs.begin());

auto outputs_title = std::string("outputs_").append(std::to_string(comm_rank));
size_t max_nr_of_elements_to_print = 10;
RAFT_CUDA_TRY(cudaDeviceSynchronize());
raft::print_device_vector(outputs_title.c_str(),
outputs.begin(),
std::min<size_t>(outputs.size(), max_nr_of_elements_to_print),
std::cout);
} else {
using result_t = edge_t;
auto vertex_weights = graph_view.compute_out_degrees(handle);

cugraph::edge_src_property_t<graph_view_t, result_t> src_vertex_weights_cache(handle,
graph_view);
cugraph::edge_dst_property_t<graph_view_t, result_t> dst_vertex_weights_cache(handle,
graph_view);

update_edge_src_property(handle, graph_view, vertex_weights.begin(), src_vertex_weights_cache);

update_edge_dst_property(handle, graph_view, vertex_weights.begin(), dst_vertex_weights_cache);

rmm::device_uvector<result_t> outputs(size_of_the_vertex_partition_assigned_to_this_process,
handle.get_stream());

per_v_transform_reduce_incoming_e(
handle,
graph_view,
src_vertex_weights_cache.view(),
dst_vertex_weights_cache.view(),
cugraph::edge_dummy_property_t{}.view(),
[new_to_original_id_map = (*renumber_map).data()] __device__(
auto src, auto dst, auto src_prop, auto dst_prop, auto) {
printf("\nsrc ---> %d dst = %d : src_prop = %f dst_prop = %f\n",
static_cast<int>(new_to_original_id_map[src]),
static_cast<int>(new_to_original_id_map[dst]),
static_cast<float>(src_prop),
static_cast<float>(dst_prop));
return dst_prop;
},
result_t{0},
cugraph::reduce_op::plus<result_t>{},
outputs.begin());

auto outputs_title = std::string("outputs_").append(std::to_string(comm_rank));
size_t max_nr_of_elements_to_print = 10;
RAFT_CUDA_TRY(cudaDeviceSynchronize());
raft::print_device_vector(outputs_title.c_str(),
outputs.begin(),
std::min<size_t>(outputs.size(), max_nr_of_elements_to_print),
std::cout);
}
}

int main(int argc, char** argv)
Expand All @@ -196,6 +250,6 @@ int main(int argc, char** argv)
using weight_t = float;
constexpr bool multi_gpu = true;

perform_example_graph_operations<vertex_t, edge_t, weight_t, multi_gpu>(*handle,
csv_graph_file_path);
perform_example_graph_operations<vertex_t, edge_t, weight_t, multi_gpu>(
*handle, csv_graph_file_path, false);
}
7 changes: 7 additions & 0 deletions cpp/examples/graph3.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
0 1 10
0 2 20
0 3 30
0 4 40
0 5 50
6 0 6

6 changes: 6 additions & 0 deletions cpp/examples/graph4.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
0 1
0 2
0 3
0 4
0 5
6 0
94 changes: 61 additions & 33 deletions cpp/examples/graph_partition/vertex_and_edge_partition.cu
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,8 @@ std::unique_ptr<raft::handle_t> initialize_mg_handle(std::string const& allocati

template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
void look_into_vertex_and_edge_partitions(raft::handle_t const& handle,
std::string const& csv_graph_file_path)
std::string const& csv_graph_file_path,
bool weighted = false)
{
auto const comm_rank = handle.get_comms().get_rank();
auto const comm_size = handle.get_comms().get_size();
Expand All @@ -112,7 +113,7 @@ void look_into_vertex_and_edge_partitions(raft::handle_t const& handle,

auto [graph, edge_weights, renumber_map] =
cugraph::test::read_graph_from_csv_file<vertex_t, edge_t, weight_t, false, multi_gpu>(
handle, csv_graph_file_path, true, renumber);
handle, csv_graph_file_path, weighted, renumber);

// Meta of the non-owning view of the graph object store vertex/edge partitioning map
auto graph_view = graph.view();
Expand Down Expand Up @@ -160,35 +161,53 @@ void look_into_vertex_and_edge_partitions(raft::handle_t const& handle,
// vertex id. The new (aka renumbered) id of the first vertex, ie the vertex at position 0
// of `renumber_map`, assigned to this process

vertex_t renumber_vertex_id_of_local_first = graph_view.local_vertex_partition_range_first();
vertex_t renumbered_vertex_id_of_local_first = graph_view.local_vertex_partition_range_first();

// The new (aka renumbered) id of the last vertex, ie the vertex at position
// `size_of_the_vertex_partition_assigned_to_this_process` - 1 of `renumber_map`,
// assigned to this process

vertex_t renumber_vertex_id_of_local_last = graph_view.local_vertex_partition_range_last();
vertex_t renumbered_vertex_id_of_local_last = graph_view.local_vertex_partition_range_last();

// Print original vertex ids, new (aka renumbered) vertex ids and the ranks of the owner processes

if (renumber_map) {
thrust::for_each(
thrust::host,
thrust::make_zip_iterator(
thrust::make_tuple(h_vertices_in_this_proces.begin(),
thrust::make_counting_iterator(renumber_vertex_id_of_local_first))),
thrust::make_zip_iterator(
thrust::make_tuple(h_vertices_in_this_proces.end(),
thrust::make_counting_iterator(renumber_vertex_id_of_local_last))),
[comm_rank](auto old_and_new_id_pair) {
auto old_id = thrust::get<0>(old_and_new_id_pair);
auto new_id = thrust::get<1>(old_and_new_id_pair);
printf("owner rank = %d, original vertex id %d -----> new (renumbered) vertex id %d\n",
comm_rank,
static_cast<int>(old_id),
static_cast<int>(new_id));
});
thrust::for_each(thrust::host,
thrust::make_zip_iterator(thrust::make_tuple(
h_vertices_in_this_proces.begin(),
thrust::make_counting_iterator(renumbered_vertex_id_of_local_first))),
thrust::make_zip_iterator(thrust::make_tuple(
h_vertices_in_this_proces.end(),
thrust::make_counting_iterator(renumbered_vertex_id_of_local_last))),
[comm_rank](auto old_and_new_id_pair) {
auto old_id = thrust::get<0>(old_and_new_id_pair);
auto new_id = thrust::get<1>(old_and_new_id_pair);
printf("owner rank = %d, original vertex id %d is renumbered to %d\n",
comm_rank,
static_cast<int>(old_id),
static_cast<int>(new_id));
});
}

// if (renumber_map) {
// thrust::for_each(
// handle.get_thrust_policy(),
// thrust::make_zip_iterator(
// thrust::make_tuple((*renumber_map).begin(),
// thrust::make_counting_iterator(renumbered_vertex_id_of_local_first))),
// thrust::make_zip_iterator(thrust::make_tuple(
// (*renumber_map).end(),
// thrust::make_counting_iterator(renumbered_vertex_id_of_local_last))),
// [comm_rank](auto old_and_new_id_pair) {
// auto old_id = thrust::get<0>(old_and_new_id_pair);
// auto new_id = thrust::get<1>(old_and_new_id_pair);
// printf("owner rank = %d, original vertex id %d is renumbered to %d\n",
// comm_rank,
// static_cast<int>(old_id),
// static_cast<int>(new_id));
// });
// }

//
// Look into edge partitions and their associated edge properties (if any)
//
Expand Down Expand Up @@ -239,23 +258,25 @@ void look_into_vertex_and_edge_partitions(raft::handle_t const& handle,
indices,
major_range_first,
is_weighted,
weights = weights_of_edges_stored_in_this_edge_partition.begin()] __device__(auto i) {
weights = weights_of_edges_stored_in_this_edge_partition.begin(),
new_to_original_id_map = (*renumber_map).data()] __device__(auto i) {
auto v = major_range_first + i;
auto deg_of_v_in_this_edge_partition = offsets[i + 1] - offsets[i];

thrust::for_each(
thrust::seq,
thrust::make_counting_iterator(edge_t{offsets[i]}),
thrust::make_counting_iterator(edge_t{offsets[i + 1]}),
[comm_rank, ep_idx, v, indices, is_weighted, weights] __device__(auto pos) {
[comm_rank, ep_idx, v, indices, new_to_original_id_map, is_weighted, weights] __device__(
auto pos) {
if (is_weighted) {
printf(
"\n[comm_rank = %d local edge partition id = %d] edge: source = %d "
"destination = %d weight = %f\n",
static_cast<int>(comm_rank),
static_cast<int>(ep_idx),
static_cast<int>(v),
static_cast<int>(indices[pos]),
static_cast<int>(new_to_original_id_map[v]),
static_cast<int>(new_to_original_id_map[indices[pos]]),
static_cast<float>(weights[pos]));

} else {
Expand All @@ -264,8 +285,8 @@ void look_into_vertex_and_edge_partitions(raft::handle_t const& handle,
"destination = %d\n",
static_cast<int>(comm_rank),
static_cast<int>(ep_idx),
static_cast<int>(v),
static_cast<int>(indices[pos]));
static_cast<int>(new_to_original_id_map[v]),
static_cast<int>(new_to_original_id_map[indices[pos]]));
}
});
});
Expand All @@ -283,6 +304,7 @@ void look_into_vertex_and_edge_partitions(raft::handle_t const& handle,
major_range_first,
is_weighted,
weights = weights_of_edges_stored_in_this_edge_partition.begin(),
new_to_original_id_map = (*renumber_map).data(),
dcs_nzd_vertices = (*dcs_nzd_vertices),
major_hypersparse_first = (*major_hypersparse_first)] __device__(auto i) {
auto v = dcs_nzd_vertices[i];
Expand All @@ -293,15 +315,21 @@ void look_into_vertex_and_edge_partitions(raft::handle_t const& handle,
thrust::seq,
thrust::make_counting_iterator(edge_t{offsets[major_idx]}),
thrust::make_counting_iterator(edge_t{offsets[major_idx + 1]}),
[comm_rank, ep_idx, v, indices, is_weighted, weights] __device__(auto pos) {
[comm_rank,
ep_idx,
v,
indices,
new_to_original_id_map,
is_weighted,
weights] __device__(auto pos) {
if (is_weighted) {
printf(
"\n[comm_rank = %d local edge partition id = %d] edge: source = %d "
"destination = %d weight = %f\n",
static_cast<int>(comm_rank),
static_cast<int>(ep_idx),
static_cast<int>(v),
static_cast<int>(indices[pos]),
static_cast<int>(new_to_original_id_map[v]),
static_cast<int>(new_to_original_id_map[indices[pos]]),
static_cast<float>(weights[pos]));

} else {
Expand All @@ -310,8 +338,8 @@ void look_into_vertex_and_edge_partitions(raft::handle_t const& handle,
"destination = %d\n",
static_cast<int>(comm_rank),
static_cast<int>(ep_idx),
static_cast<int>(v),
static_cast<int>(indices[pos]));
static_cast<int>(new_to_original_id_map[v]),
static_cast<int>(new_to_original_id_map[indices[pos]]));
}
});
});
Expand Down Expand Up @@ -360,6 +388,6 @@ int main(int argc, char** argv)
using weight_t = float;
constexpr bool multi_gpu = true;

look_into_vertex_and_edge_partitions<vertex_t, edge_t, weight_t, multi_gpu>(*handle,
csv_graph_file_path);
look_into_vertex_and_edge_partitions<vertex_t, edge_t, weight_t, multi_gpu>(
*handle, csv_graph_file_path, false);
}
Loading

0 comments on commit b74aa03

Please sign in to comment.