diff --git a/.github/labeler.yml b/.github/labeler.yml index c589fda6099..368bf328b99 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -9,17 +9,6 @@ python: benchmarks: - 'benchmarks/**' -doc: - - 'docs/**' - - '**/*.md' - - 'datasets/**' - - 'notebooks/**' - - '**/*.txt' - - '**/*.rst' - - '**/*.ipynb' - - '**/*.pdf' - - '**/*.png' - datasets: - 'datasets/**' diff --git a/github/workflows/labeler.yml b/.github/workflows/labeler.yml similarity index 83% rename from github/workflows/labeler.yml rename to .github/workflows/labeler.yml index 23956a02fbd..31e78f82a62 100644 --- a/github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -6,6 +6,6 @@ jobs: triage: runs-on: ubuntu-latest steps: - - uses: actions/labeler@main + - uses: actions/labeler@v4 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/CHANGELOG.md b/CHANGELOG.md index 33a5b2bc5e7..d165cd7efc4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,84 @@ +# cuGraph 23.12.00 (6 Dec 2023) + +## 🚨 Breaking Changes + +- [BUG] Restore the original default order of CSR, which does not reverse edges in cuGraph-PyG ([#3980](https://github.com/rapidsai/cugraph/pull/3980)) [@alexbarghi-nv](https://github.com/alexbarghi-nv) +- `Resultset` and `Dataset` Refactors ([#3957](https://github.com/rapidsai/cugraph/pull/3957)) [@nv-rliu](https://github.com/nv-rliu) +- Moves more MG graph ETL to libcugraph and re-enables MG tests in CI ([#3941](https://github.com/rapidsai/cugraph/pull/3941)) [@jnke2016](https://github.com/jnke2016) + +## 🐛 Bug Fixes + +- Pin actions/labeler to v4 ([#4038](https://github.com/rapidsai/cugraph/pull/4038)) [@raydouglass](https://github.com/raydouglass) +- Find rmm before cuco ([#4011](https://github.com/rapidsai/cugraph/pull/4011)) [@vyasr](https://github.com/vyasr) +- Pin to minor versions of packages outside the cuGraph repository. ([#4004](https://github.com/rapidsai/cugraph/pull/4004)) [@bdice](https://github.com/bdice) +- Move MTMG_TEST to MG tests block ([#3993](https://github.com/rapidsai/cugraph/pull/3993)) [@naimnv](https://github.com/naimnv) +- Fix Leiden refinement phase ([#3990](https://github.com/rapidsai/cugraph/pull/3990)) [@naimnv](https://github.com/naimnv) +- [BUG] Fix Graph Construction From Pandas in cuGraph-PyG ([#3985](https://github.com/rapidsai/cugraph/pull/3985)) [@alexbarghi-nv](https://github.com/alexbarghi-nv) +- [BUG] Restore the original default order of CSR, which does not reverse edges in cuGraph-PyG ([#3980](https://github.com/rapidsai/cugraph/pull/3980)) [@alexbarghi-nv](https://github.com/alexbarghi-nv) +- Fix eigenvector testing and HITS testing discrepancies ([#3979](https://github.com/rapidsai/cugraph/pull/3979)) [@ChuckHastings](https://github.com/ChuckHastings) +- [BUG] Fix Incorrect Edge Index, Directory Selection in cuGraph-PyG Loader ([#3978](https://github.com/rapidsai/cugraph/pull/3978)) [@alexbarghi-nv](https://github.com/alexbarghi-nv) +- [BUG] Check if Dask has quit to avoid throwing an exception and triggering a segfault on ddp exit ([#3961](https://github.com/rapidsai/cugraph/pull/3961)) [@alexbarghi-nv](https://github.com/alexbarghi-nv) +- nx-cugraph: xfail test_louvain.py:test_threshold in Python 3.9 ([#3944](https://github.com/rapidsai/cugraph/pull/3944)) [@eriknw](https://github.com/eriknw) + +## 📖 Documentation + +- [DOC]: Fix invalid links and add materials to notebooks ([#4002](https://github.com/rapidsai/cugraph/pull/4002)) [@huiyuxie](https://github.com/huiyuxie) +- Update Broken Links in README.md ([#3924](https://github.com/rapidsai/cugraph/pull/3924)) [@nv-rliu](https://github.com/nv-rliu) + +## 🚀 New Features + +- Implement the transform_e primitive (to update property values for all edges) ([#3917](https://github.com/rapidsai/cugraph/pull/3917)) [@seunghwak](https://github.com/seunghwak) +- Update the neighbor intersection primitive to support edge masking. ([#3550](https://github.com/rapidsai/cugraph/pull/3550)) [@seunghwak](https://github.com/seunghwak) + +## 🛠️ Improvements + +- Correct defect found in DLFW testing ([#4021](https://github.com/rapidsai/cugraph/pull/4021)) [@ChuckHastings](https://github.com/ChuckHastings) +- `nx-cugraph` README update: adds missing `connected_components` algo to table ([#4019](https://github.com/rapidsai/cugraph/pull/4019)) [@rlratzel](https://github.com/rlratzel) +- Build concurrency for nightly and merge triggers ([#4009](https://github.com/rapidsai/cugraph/pull/4009)) [@bdice](https://github.com/bdice) +- Support `drop_last` Argument in cuGraph-PyG Loader ([#3995](https://github.com/rapidsai/cugraph/pull/3995)) [@alexbarghi-nv](https://github.com/alexbarghi-nv) +- Adds `update-version.sh` support for recently added files containing RAPIDS versions ([#3994](https://github.com/rapidsai/cugraph/pull/3994)) [@rlratzel](https://github.com/rlratzel) +- Use new `rapids-dask-dependency` metapackage for managing `dask` versions ([#3991](https://github.com/rapidsai/cugraph/pull/3991)) [@galipremsagar](https://github.com/galipremsagar) +- Fixes to nx-cugraph README: fixes typos, updates link to NX backend docs ([#3989](https://github.com/rapidsai/cugraph/pull/3989)) [@rlratzel](https://github.com/rlratzel) +- Address FIXMEs ([#3988](https://github.com/rapidsai/cugraph/pull/3988)) [@seunghwak](https://github.com/seunghwak) +- Updates README file to include nx-cugraph user documentation, adds nx-cugraph to main README ([#3984](https://github.com/rapidsai/cugraph/pull/3984)) [@rlratzel](https://github.com/rlratzel) +- Update C API graph creation function signatures ([#3982](https://github.com/rapidsai/cugraph/pull/3982)) [@ChuckHastings](https://github.com/ChuckHastings) +- [REVIEW]Optimize cugraph-DGL csc codepath ([#3977](https://github.com/rapidsai/cugraph/pull/3977)) [@VibhuJawa](https://github.com/VibhuJawa) +- nx-cugraph: add SSSP (unweighted) ([#3976](https://github.com/rapidsai/cugraph/pull/3976)) [@eriknw](https://github.com/eriknw) +- CuGraph compatibility fixes ([#3973](https://github.com/rapidsai/cugraph/pull/3973)) [@brandon-b-miller](https://github.com/brandon-b-miller) +- Skip certain `cugraph-pyg` tests when torch-sparse is not available ([#3970](https://github.com/rapidsai/cugraph/pull/3970)) [@tingyu66](https://github.com/tingyu66) +- nx-cugraph: add `eigenvector_centrality`, `katz_centrality`, `hits`, `pagerank` ([#3968](https://github.com/rapidsai/cugraph/pull/3968)) [@eriknw](https://github.com/eriknw) +- Cut peak memory footprint in graph creation ([#3966](https://github.com/rapidsai/cugraph/pull/3966)) [@seunghwak](https://github.com/seunghwak) +- nx-cugraph: add CC for undirected graphs to fix k-truss ([#3965](https://github.com/rapidsai/cugraph/pull/3965)) [@eriknw](https://github.com/eriknw) +- Skip certain `cugraph-pyg` tests when `torch_sparse` is not available ([#3962](https://github.com/rapidsai/cugraph/pull/3962)) [@tingyu66](https://github.com/tingyu66) +- `Resultset` and `Dataset` Refactors ([#3957](https://github.com/rapidsai/cugraph/pull/3957)) [@nv-rliu](https://github.com/nv-rliu) +- Download `xml` docs artifact through CloudFront endpoint ([#3955](https://github.com/rapidsai/cugraph/pull/3955)) [@AyodeAwe](https://github.com/AyodeAwe) +- Add many graph generators to nx-cugraph ([#3954](https://github.com/rapidsai/cugraph/pull/3954)) [@eriknw](https://github.com/eriknw) +- Unpin `dask` and `distributed` for `23.12` development ([#3953](https://github.com/rapidsai/cugraph/pull/3953)) [@galipremsagar](https://github.com/galipremsagar) +- Errors compiling for DLFW on CUDA 12.3 ([#3952](https://github.com/rapidsai/cugraph/pull/3952)) [@ChuckHastings](https://github.com/ChuckHastings) +- nx-cugraph: add k_truss and degree centralities ([#3945](https://github.com/rapidsai/cugraph/pull/3945)) [@eriknw](https://github.com/eriknw) +- nx-cugraph: handle seed argument in edge_betweenness_centrality ([#3943](https://github.com/rapidsai/cugraph/pull/3943)) [@eriknw](https://github.com/eriknw) +- Moves more MG graph ETL to libcugraph and re-enables MG tests in CI ([#3941](https://github.com/rapidsai/cugraph/pull/3941)) [@jnke2016](https://github.com/jnke2016) +- Temporarily disable mg testing ([#3940](https://github.com/rapidsai/cugraph/pull/3940)) [@jnke2016](https://github.com/jnke2016) +- adding C/C++ API docs ([#3938](https://github.com/rapidsai/cugraph/pull/3938)) [@BradReesWork](https://github.com/BradReesWork) +- Add multigraph support to nx-cugraph ([#3934](https://github.com/rapidsai/cugraph/pull/3934)) [@eriknw](https://github.com/eriknw) +- Setup Consistent Nightly Versions for Pip and Conda ([#3933](https://github.com/rapidsai/cugraph/pull/3933)) [@divyegala](https://github.com/divyegala) +- MTMG multi node ([#3932](https://github.com/rapidsai/cugraph/pull/3932)) [@ChuckHastings](https://github.com/ChuckHastings) +- Use branch-23.12 workflows. ([#3928](https://github.com/rapidsai/cugraph/pull/3928)) [@bdice](https://github.com/bdice) +- Fix an issue occurring in the cuGraph-DGL example for "mixed" mode. ([#3927](https://github.com/rapidsai/cugraph/pull/3927)) [@drivanov](https://github.com/drivanov) +- Updating Docs ([#3923](https://github.com/rapidsai/cugraph/pull/3923)) [@BradReesWork](https://github.com/BradReesWork) +- Forward-merge branch-23.10 to branch-23.12 ([#3919](https://github.com/rapidsai/cugraph/pull/3919)) [@nv-rliu](https://github.com/nv-rliu) +- new build all option ([#3916](https://github.com/rapidsai/cugraph/pull/3916)) [@BradReesWork](https://github.com/BradReesWork) +- Silence spurious compiler warnings ([#3913](https://github.com/rapidsai/cugraph/pull/3913)) [@seunghwak](https://github.com/seunghwak) +- Link wholegrah and cugraphops XML docs ([#3906](https://github.com/rapidsai/cugraph/pull/3906)) [@AyodeAwe](https://github.com/AyodeAwe) +- Updates to 23.12 ([#3905](https://github.com/rapidsai/cugraph/pull/3905)) [@raydouglass](https://github.com/raydouglass) +- Forward-merge branch-23.10 to branch-23.12 ([#3904](https://github.com/rapidsai/cugraph/pull/3904)) [@GPUtester](https://github.com/GPUtester) +- Build CUDA 12.0 ARM conda packages. ([#3903](https://github.com/rapidsai/cugraph/pull/3903)) [@bdice](https://github.com/bdice) +- Merge branch-23.10 into branch-23.12 ([#3898](https://github.com/rapidsai/cugraph/pull/3898)) [@rlratzel](https://github.com/rlratzel) +- Some MTMG code cleanup and small optimizations ([#3894](https://github.com/rapidsai/cugraph/pull/3894)) [@ChuckHastings](https://github.com/ChuckHastings) +- Enable parallel mode ([#3875](https://github.com/rapidsai/cugraph/pull/3875)) [@jnke2016](https://github.com/jnke2016) +- Adds benchmarks for `nx-cugraph` ([#3854](https://github.com/rapidsai/cugraph/pull/3854)) [@rlratzel](https://github.com/rlratzel) +- Add nx-cugraph notebook for showing accelerated networkX APIs ([#3830](https://github.com/rapidsai/cugraph/pull/3830)) [@betochimas](https://github.com/betochimas) + # cuGraph 23.10.00 (11 Oct 2023) ## 🚨 Breaking Changes diff --git a/ci/build_wheel_cugraph.sh b/ci/build_wheel_cugraph.sh index 5b5061f67c2..0a722c88c3e 100755 --- a/ci/build_wheel_cugraph.sh +++ b/ci/build_wheel_cugraph.sh @@ -12,6 +12,6 @@ RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" RAPIDS_PY_WHEEL_NAME=pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX} rapids-download-wheels-from-s3 ./local-pylibcugraph export PIP_FIND_LINKS=$(pwd)/local-pylibcugraph -export SKBUILD_CONFIGURE_OPTIONS="-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=${GITHUB_WORKSPACE}/cugraph-ops/" +export SKBUILD_CONFIGURE_OPTIONS="-DDETECT_CONDA_ENV=OFF -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=${GITHUB_WORKSPACE}/cugraph-ops/" ./ci/build_wheel.sh cugraph python/cugraph diff --git a/ci/build_wheel_pylibcugraph.sh b/ci/build_wheel_pylibcugraph.sh index 8d365bc250b..9e236c145ce 100755 --- a/ci/build_wheel_pylibcugraph.sh +++ b/ci/build_wheel_pylibcugraph.sh @@ -3,6 +3,6 @@ set -euo pipefail -export SKBUILD_CONFIGURE_OPTIONS="-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=${GITHUB_WORKSPACE}/cugraph-ops/" +export SKBUILD_CONFIGURE_OPTIONS="-DDETECT_CONDA_ENV=OFF -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=${GITHUB_WORKSPACE}/cugraph-ops/" ./ci/build_wheel.sh pylibcugraph python/pylibcugraph diff --git a/cpp/include/cugraph/graph_view.hpp b/cpp/include/cugraph/graph_view.hpp index f30a8b7e2af..d79d4635c54 100644 --- a/cpp/include/cugraph/graph_view.hpp +++ b/cpp/include/cugraph/graph_view.hpp @@ -268,7 +268,11 @@ class graph_base_t { properties_(properties){}; vertex_t number_of_vertices() const { return number_of_vertices_; } - edge_t number_of_edges() const { return number_of_edges_; } + edge_t number_of_edges() const + { + CUGRAPH_EXPECTS(!(this->has_edge_mask()), "unimplemented."); + return number_of_edges_; + } template std::enable_if_t::value, bool> is_valid_vertex(vertex_type v) const @@ -285,6 +289,20 @@ class graph_base_t { bool is_symmetric() const { return properties_.is_symmetric; } bool is_multigraph() const { return properties_.is_multigraph; } + void attach_edge_mask(edge_property_view_t edge_mask_view) + { + edge_mask_view_ = edge_mask_view; + } + + void clear_edge_mask() { edge_mask_view_ = std::nullopt; } + + bool has_edge_mask() const { return edge_mask_view_.has_value(); } + + std::optional> edge_mask_view() const + { + return edge_mask_view_; + } + protected: raft::handle_t const* handle_ptr() const { return handle_ptr_; }; graph_properties_t graph_properties() const { return properties_; } @@ -296,6 +314,8 @@ class graph_base_t { edge_t number_of_edges_{0}; graph_properties_t properties_{}; + + std::optional> edge_mask_view_{std::nullopt}; }; } // namespace detail @@ -731,20 +751,6 @@ class graph_view_t edge_mask_view) - { - edge_mask_view_ = edge_mask_view; - } - - void clear_edge_mask() { edge_mask_view_ = std::nullopt; } - - bool has_edge_mask() const { return edge_mask_view_.has_value(); } - - std::optional> edge_mask_view() const - { - return edge_mask_view_; - } - private: std::vector edge_partition_offsets_{}; std::vector edge_partition_indices_{}; @@ -790,8 +796,6 @@ class graph_view_t>, std::optional /* dummy */> local_sorted_unique_edge_dst_vertex_partition_offsets_{std::nullopt}; - - std::optional> edge_mask_view_{std::nullopt}; }; // single-GPU version @@ -1012,28 +1016,12 @@ class graph_view_t edge_mask_view) - { - edge_mask_view_ = edge_mask_view; - } - - void clear_edge_mask() { edge_mask_view_ = std::nullopt; } - - bool has_edge_mask() const { return edge_mask_view_.has_value(); } - - std::optional> edge_mask_view() const - { - return edge_mask_view_; - } - private: edge_t const* offsets_{nullptr}; vertex_t const* indices_{nullptr}; // segment offsets based on vertex degree, relevant only if vertex IDs are renumbered std::optional> segment_offsets_{std::nullopt}; - - std::optional> edge_mask_view_{std::nullopt}; }; } // namespace cugraph diff --git a/cpp/include/cugraph/mtmg/detail/device_shared_wrapper.hpp b/cpp/include/cugraph/mtmg/detail/device_shared_wrapper.hpp index c4cacb401af..5fbe7bc9f01 100644 --- a/cpp/include/cugraph/mtmg/detail/device_shared_wrapper.hpp +++ b/cpp/include/cugraph/mtmg/detail/device_shared_wrapper.hpp @@ -57,10 +57,10 @@ class device_shared_wrapper_t { { std::lock_guard lock(lock_); - auto pos = objects_.find(handle.get_local_rank()); + auto pos = objects_.find(handle.get_rank()); CUGRAPH_EXPECTS(pos == objects_.end(), "Cannot overwrite wrapped object"); - objects_.insert(std::make_pair(handle.get_local_rank(), std::move(obj))); + objects_.insert(std::make_pair(handle.get_rank(), std::move(obj))); } /** @@ -79,7 +79,6 @@ class device_shared_wrapper_t { objects_.insert(std::make_pair(local_rank, std::move(obj))); } - public: /** * @brief Get reference to an object for a particular thread * @@ -90,7 +89,7 @@ class device_shared_wrapper_t { { std::lock_guard lock(lock_); - auto pos = objects_.find(handle.get_local_rank()); + auto pos = objects_.find(handle.get_rank()); CUGRAPH_EXPECTS(pos != objects_.end(), "Uninitialized wrapped object"); return pos->second; @@ -106,7 +105,7 @@ class device_shared_wrapper_t { { std::lock_guard lock(lock_); - auto pos = objects_.find(handle.get_local_rank()); + auto pos = objects_.find(handle.get_rank()); CUGRAPH_EXPECTS(pos != objects_.end(), "Uninitialized wrapped object"); diff --git a/cpp/include/cugraph/mtmg/graph_view.hpp b/cpp/include/cugraph/mtmg/graph_view.hpp index 94347e016ea..8e202ab4904 100644 --- a/cpp/include/cugraph/mtmg/graph_view.hpp +++ b/cpp/include/cugraph/mtmg/graph_view.hpp @@ -27,8 +27,27 @@ namespace mtmg { * @brief Graph view for each GPU */ template -using graph_view_t = detail::device_shared_wrapper_t< - cugraph::graph_view_t>; +class graph_view_t : public detail::device_shared_wrapper_t< + cugraph::graph_view_t> { + public: + /** + * @brief Get the vertex_partition_view for this graph + */ + vertex_partition_view_t get_vertex_partition_view( + cugraph::mtmg::handle_t const& handle) const + { + return this->get(handle).local_vertex_partition_view(); + } + + /** + * @brief Get the vertex_partition_view for this graph + */ + std::vector get_vertex_partition_range_lasts( + cugraph::mtmg::handle_t const& handle) const + { + return this->get(handle).vertex_partition_range_lasts(); + } +}; } // namespace mtmg } // namespace cugraph diff --git a/cpp/include/cugraph/mtmg/handle.hpp b/cpp/include/cugraph/mtmg/handle.hpp index 6223de1781d..0b02091a3cc 100644 --- a/cpp/include/cugraph/mtmg/handle.hpp +++ b/cpp/include/cugraph/mtmg/handle.hpp @@ -32,18 +32,19 @@ namespace mtmg { * */ class handle_t { + handle_t(handle_t const&) = delete; + handle_t operator=(handle_t const&) = delete; + public: /** * @brief Constructor * * @param raft_handle Raft handle for the resources * @param thread_rank Rank for this thread + * @param device_id Device id for the device this handle operates on */ - handle_t(raft::handle_t const& raft_handle, int thread_rank, size_t device_id) - : raft_handle_(raft_handle), - thread_rank_(thread_rank), - local_rank_(raft_handle.get_comms().get_rank()), // FIXME: update for multi-node - device_id_(device_id) + handle_t(raft::handle_t const& raft_handle, int thread_rank, rmm::cuda_device_id device_id) + : raft_handle_(raft_handle), thread_rank_(thread_rank), device_id_raii_(device_id) { } @@ -118,18 +119,10 @@ class handle_t { */ int get_rank() const { return raft_handle_.get_comms().get_rank(); } - /** - * @brief Get local gpu rank - * - * @return local gpu rank - */ - int get_local_rank() const { return local_rank_; } - private: raft::handle_t const& raft_handle_; int thread_rank_; - int local_rank_; - size_t device_id_; + rmm::cuda_set_device_raii device_id_raii_; }; } // namespace mtmg diff --git a/cpp/include/cugraph/mtmg/instance_manager.hpp b/cpp/include/cugraph/mtmg/instance_manager.hpp index f819a5a0abe..f60063c4101 100644 --- a/cpp/include/cugraph/mtmg/instance_manager.hpp +++ b/cpp/include/cugraph/mtmg/instance_manager.hpp @@ -47,15 +47,10 @@ class instance_manager_t { ~instance_manager_t() { - int current_device{}; - RAFT_CUDA_TRY(cudaGetDevice(¤t_device)); - for (size_t i = 0; i < nccl_comms_.size(); ++i) { - RAFT_CUDA_TRY(cudaSetDevice(device_ids_[i].value())); + rmm::cuda_set_device_raii local_set_device(device_ids_[i]); RAFT_NCCL_TRY(ncclCommDestroy(*nccl_comms_[i])); } - - RAFT_CUDA_TRY(cudaSetDevice(current_device)); } /** @@ -75,8 +70,7 @@ class instance_manager_t { int gpu_id = local_id % raft_handle_.size(); int thread_id = local_id / raft_handle_.size(); - RAFT_CUDA_TRY(cudaSetDevice(device_ids_[gpu_id].value())); - return handle_t(*raft_handle_[gpu_id], thread_id, static_cast(gpu_id)); + return handle_t(*raft_handle_[gpu_id], thread_id, device_ids_[gpu_id]); } /** diff --git a/cpp/include/cugraph/mtmg/resource_manager.hpp b/cpp/include/cugraph/mtmg/resource_manager.hpp index 127944cf7ba..bc312c9ae77 100644 --- a/cpp/include/cugraph/mtmg/resource_manager.hpp +++ b/cpp/include/cugraph/mtmg/resource_manager.hpp @@ -89,7 +89,7 @@ class resource_manager_t { local_rank_map_.insert(std::pair(global_rank, local_device_id)); - RAFT_CUDA_TRY(cudaSetDevice(local_device_id.value())); + rmm::cuda_set_device_raii local_set_device(local_device_id); // FIXME: There is a bug in the cuda_memory_resource that results in a Hang. // using the pool resource as a work-around. @@ -182,14 +182,12 @@ class resource_manager_t { --gpu_row_comm_size; } - int current_device{}; - RAFT_CUDA_TRY(cudaGetDevice(¤t_device)); RAFT_NCCL_TRY(ncclGroupStart()); for (size_t i = 0; i < local_ranks_to_include.size(); ++i) { int rank = local_ranks_to_include[i]; auto pos = local_rank_map_.find(rank); - RAFT_CUDA_TRY(cudaSetDevice(pos->second.value())); + rmm::cuda_set_device_raii local_set_device(pos->second); nccl_comms.push_back(std::make_unique()); handles.push_back( @@ -204,7 +202,6 @@ class resource_manager_t { handles[i].get(), *nccl_comms[i], ranks_to_include.size(), rank); } RAFT_NCCL_TRY(ncclGroupEnd()); - RAFT_CUDA_TRY(cudaSetDevice(current_device)); std::vector running_threads; @@ -217,9 +214,7 @@ class resource_manager_t { &device_ids, &nccl_comms, &handles]() { - int rank = local_ranks_to_include[idx]; - RAFT_CUDA_TRY(cudaSetDevice(device_ids[idx].value())); - + rmm::cuda_set_device_raii local_set_device(device_ids[idx]); cugraph::partition_manager::init_subcomm(*handles[idx], gpu_row_comm_size); }); } diff --git a/cpp/include/cugraph/mtmg/vertex_result_view.hpp b/cpp/include/cugraph/mtmg/vertex_result_view.hpp index a349bb95333..42b80cea62f 100644 --- a/cpp/include/cugraph/mtmg/vertex_result_view.hpp +++ b/cpp/include/cugraph/mtmg/vertex_result_view.hpp @@ -39,11 +39,12 @@ class vertex_result_view_t : public detail::device_shared_device_span_t + template rmm::device_uvector gather( handle_t const& handle, raft::device_span vertices, - cugraph::mtmg::graph_view_t const& graph_view, + std::vector const& vertex_partition_range_lasts, + cugraph::vertex_partition_view_t vertex_partition_view, std::optional>& renumber_map_view); }; diff --git a/cpp/include/cugraph/utilities/misc_utils.cuh b/cpp/include/cugraph/utilities/misc_utils.cuh index a62e8ce85ec..04aeac49c9d 100644 --- a/cpp/include/cugraph/utilities/misc_utils.cuh +++ b/cpp/include/cugraph/utilities/misc_utils.cuh @@ -19,12 +19,15 @@ #include #include +#include #include #include #include #include #include +#include + #include #include #include @@ -43,7 +46,8 @@ std::tuple, std::vector> compute_offset_aligned_ed { auto search_offset_first = thrust::make_transform_iterator( thrust::make_counting_iterator(size_t{1}), - [approx_edge_chunk_size] __device__(auto i) { return i * approx_edge_chunk_size; }); + cuda::proclaim_return_type( + [approx_edge_chunk_size] __device__(auto i) { return i * approx_edge_chunk_size; })); auto num_chunks = (num_edges + approx_edge_chunk_size - 1) / approx_edge_chunk_size; if (num_chunks > 1) { diff --git a/cpp/include/cugraph/utilities/shuffle_comm.cuh b/cpp/include/cugraph/utilities/shuffle_comm.cuh index ab6a54cc1c0..414d9b36992 100644 --- a/cpp/include/cugraph/utilities/shuffle_comm.cuh +++ b/cpp/include/cugraph/utilities/shuffle_comm.cuh @@ -40,6 +40,8 @@ #include #include +#include + #include #include #include @@ -197,12 +199,13 @@ void multi_partition(ValueIterator value_first, value_last, thrust::make_zip_iterator( thrust::make_tuple(group_ids.begin(), intra_partition_offsets.begin())), - [value_to_group_id_op, group_first, counts = counts.data()] __device__(auto value) { - auto group_id = value_to_group_id_op(value); - cuda::std::atomic_ref counter(counts[group_id - group_first]); - return thrust::make_tuple(group_id, - counter.fetch_add(size_t{1}, cuda::std::memory_order_relaxed)); - }); + cuda::proclaim_return_type>( + [value_to_group_id_op, group_first, counts = counts.data()] __device__(auto value) { + auto group_id = value_to_group_id_op(value); + cuda::std::atomic_ref counter(counts[group_id - group_first]); + return thrust::make_tuple(group_id, + counter.fetch_add(size_t{1}, cuda::std::memory_order_relaxed)); + })); rmm::device_uvector displacements(num_groups, stream_view); thrust::exclusive_scan( @@ -245,17 +248,19 @@ void multi_partition(KeyIterator key_first, rmm::device_uvector group_ids(num_keys, stream_view); rmm::device_uvector intra_partition_offsets(num_keys, stream_view); thrust::fill(rmm::exec_policy(stream_view), counts.begin(), counts.end(), size_t{0}); - thrust::transform(rmm::exec_policy(stream_view), - key_first, - key_last, - thrust::make_zip_iterator( - thrust::make_tuple(group_ids.begin(), intra_partition_offsets.begin())), - [key_to_group_id_op, group_first, counts = counts.data()] __device__(auto key) { - auto group_id = key_to_group_id_op(key); - cuda::std::atomic_ref counter(counts[group_id - group_first]); - return thrust::make_tuple( - group_id, counter.fetch_add(size_t{1}, cuda::std::memory_order_relaxed)); - }); + thrust::transform( + rmm::exec_policy(stream_view), + key_first, + key_last, + thrust::make_zip_iterator( + thrust::make_tuple(group_ids.begin(), intra_partition_offsets.begin())), + cuda::proclaim_return_type>( + [key_to_group_id_op, group_first, counts = counts.data()] __device__(auto key) { + auto group_id = key_to_group_id_op(key); + cuda::std::atomic_ref counter(counts[group_id - group_first]); + return thrust::make_tuple(group_id, + counter.fetch_add(size_t{1}, cuda::std::memory_order_relaxed)); + })); rmm::device_uvector displacements(num_groups, stream_view); thrust::exclusive_scan( @@ -761,8 +766,9 @@ rmm::device_uvector groupby_and_count(ValueIterator tx_value_first /* [I stream_view); auto group_id_first = thrust::make_transform_iterator( - tx_value_first, - [value_to_group_id_op] __device__(auto value) { return value_to_group_id_op(value); }); + tx_value_first, cuda::proclaim_return_type([value_to_group_id_op] __device__(auto value) { + return value_to_group_id_op(value); + })); rmm::device_uvector d_tx_dst_ranks(num_groups, stream_view); rmm::device_uvector d_tx_value_counts(d_tx_dst_ranks.size(), stream_view); auto rank_count_pair_first = thrust::make_zip_iterator( @@ -795,7 +801,9 @@ rmm::device_uvector groupby_and_count(VertexIterator tx_key_first /* [IN stream_view); auto group_id_first = thrust::make_transform_iterator( - tx_key_first, [key_to_group_id_op] __device__(auto key) { return key_to_group_id_op(key); }); + tx_key_first, cuda::proclaim_return_type([key_to_group_id_op] __device__(auto key) { + return key_to_group_id_op(key); + })); rmm::device_uvector d_tx_dst_ranks(num_groups, stream_view); rmm::device_uvector d_tx_value_counts(d_tx_dst_ranks.size(), stream_view); auto rank_count_pair_first = thrust::make_zip_iterator( diff --git a/cpp/src/community/detail/mis_impl.cuh b/cpp/src/community/detail/mis_impl.cuh index bcd71af5a08..2659a982183 100644 --- a/cpp/src/community/detail/mis_impl.cuh +++ b/cpp/src/community/detail/mis_impl.cuh @@ -37,6 +37,8 @@ #include #include +#include + #include namespace cugraph { @@ -78,13 +80,13 @@ rmm::device_uvector maximal_independent_set( thrust::copy(handle.get_thrust_policy(), vertex_begin, vertex_end, ranks.begin()); // Set ranks of zero out-degree vetices to std::numeric_limits::lowest() - thrust::transform_if( - handle.get_thrust_policy(), - out_degrees.begin(), - out_degrees.end(), - ranks.begin(), - [] __device__(auto) { return std::numeric_limits::lowest(); }, - [] __device__(auto deg) { return deg == 0; }); + thrust::transform_if(handle.get_thrust_policy(), + out_degrees.begin(), + out_degrees.end(), + ranks.begin(), + cuda::proclaim_return_type( + [] __device__(auto) { return std::numeric_limits::lowest(); }), + [] __device__(auto deg) { return deg == 0; }); out_degrees.resize(0, handle.get_stream()); out_degrees.shrink_to_fit(handle.get_stream()); diff --git a/cpp/src/community/detail/refine_impl.cuh b/cpp/src/community/detail/refine_impl.cuh index ebaae498d04..eb874657f01 100644 --- a/cpp/src/community/detail/refine_impl.cuh +++ b/cpp/src/community/detail/refine_impl.cuh @@ -46,6 +46,8 @@ #include #include +#include + CUCO_DECLARE_BITWISE_COMPARABLE(float) CUCO_DECLARE_BITWISE_COMPARABLE(double) // FIXME: a temporary workaround for a compiler error, should be deleted once cuco gets patched. @@ -213,16 +215,17 @@ refine_clustering( : detail::edge_minor_property_view_t( louvain_assignment_of_vertices.data(), vertex_t{0}), *edge_weight_view, - [] __device__(auto src, auto dst, auto src_cluster, auto dst_cluster, auto wt) { - weight_t weighted_cut_contribution{0}; + cuda::proclaim_return_type( + [] __device__(auto src, auto dst, auto src_cluster, auto dst_cluster, auto wt) { + weight_t weighted_cut_contribution{0}; - if (src == dst) // self loop - weighted_cut_contribution = 0; - else if (src_cluster == dst_cluster) - weighted_cut_contribution = wt; + if (src == dst) // self loop + weighted_cut_contribution = 0; + else if (src_cluster == dst_cluster) + weighted_cut_contribution = wt; - return weighted_cut_contribution; - }, + return weighted_cut_contribution; + }), weight_t{0}, cugraph::reduce_op::plus{}, weighted_cut_of_vertices_to_louvain.begin()); @@ -243,13 +246,14 @@ refine_clustering( wcut_deg_and_cluster_vol_triple_begin, wcut_deg_and_cluster_vol_triple_end, singleton_and_connected_flags.begin(), - [resolution, total_edge_weight] __device__(auto wcut_wdeg_and_louvain_volume) { + cuda::proclaim_return_type([resolution, total_edge_weight] __device__( + auto wcut_wdeg_and_louvain_volume) { auto wcut = thrust::get<0>(wcut_wdeg_and_louvain_volume); auto wdeg = thrust::get<1>(wcut_wdeg_and_louvain_volume); auto louvain_volume = thrust::get<2>(wcut_wdeg_and_louvain_volume); - return wcut > - (resolution * wdeg * (louvain_volume - wdeg) / total_edge_weight); - }); + return static_cast( + wcut > (resolution * wdeg * (louvain_volume - wdeg) / total_edge_weight)); + })); edge_src_property_t src_louvain_cluster_weight_cache(handle); edge_src_property_t src_cut_to_louvain_cache(handle); @@ -718,11 +722,12 @@ refine_clustering( vertices_in_mis.begin(), vertices_in_mis.end(), dst_vertices.begin(), - [dst_first = thrust::get<1>(gain_and_dst_first.get_iterator_tuple()), - v_first = graph_view.local_vertex_partition_range_first()] __device__(vertex_t v) { - auto dst = *(dst_first + v - v_first); - return dst; - }); + cuda::proclaim_return_type( + [dst_first = thrust::get<1>(gain_and_dst_first.get_iterator_tuple()), + v_first = graph_view.local_vertex_partition_range_first()] __device__(vertex_t v) { + auto dst = *(dst_first + v - v_first); + return dst; + })); cugraph::resize_dataframe_buffer(gain_and_dst_output_pairs, 0, handle.get_stream()); cugraph::shrink_to_fit_dataframe_buffer(gain_and_dst_output_pairs, handle.get_stream()); diff --git a/cpp/src/detail/collect_local_vertex_values.cu b/cpp/src/detail/collect_local_vertex_values.cu index 9d5d2cb553b..795902dfd87 100644 --- a/cpp/src/detail/collect_local_vertex_values.cu +++ b/cpp/src/detail/collect_local_vertex_values.cu @@ -19,6 +19,8 @@ #include #include +#include + namespace cugraph { namespace detail { @@ -64,7 +66,8 @@ rmm::device_uvector collect_local_vertex_values_from_ext_vertex_value_p auto vertex_iterator = thrust::make_transform_iterator( d_vertices.begin(), - [local_vertex_first] __device__(vertex_t v) { return v - local_vertex_first; }); + cuda::proclaim_return_type( + [local_vertex_first] __device__(vertex_t v) { return v - local_vertex_first; })); d_local_values.resize(local_vertex_last - local_vertex_first, handle.get_stream()); thrust::fill( diff --git a/cpp/src/generators/erdos_renyi_generator.cu b/cpp/src/generators/erdos_renyi_generator.cu index 6d847ae0bde..8448eeaf960 100644 --- a/cpp/src/generators/erdos_renyi_generator.cu +++ b/cpp/src/generators/erdos_renyi_generator.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,6 +28,8 @@ #include #include +#include + namespace cugraph { template @@ -42,12 +44,13 @@ generate_erdos_renyi_graph_edgelist_gnp(raft::handle_t const& handle, "Implementation cannot support specified value"); auto random_iterator = thrust::make_transform_iterator( - thrust::make_counting_iterator(0), [seed] __device__(size_t index) { + thrust::make_counting_iterator(0), + cuda::proclaim_return_type([seed] __device__(size_t index) { thrust::default_random_engine rng(seed); thrust::uniform_real_distribution dist(0.0, 1.0); rng.discard(index); return dist(rng); - }); + })); size_t count = thrust::count_if(handle.get_thrust_policy(), random_iterator, @@ -69,13 +72,14 @@ generate_erdos_renyi_graph_edgelist_gnp(raft::handle_t const& handle, indices_v.begin(), indices_v.end(), thrust::make_zip_iterator(thrust::make_tuple(src_v.begin(), src_v.end())), - [num_vertices] __device__(size_t index) { - size_t src = index / num_vertices; - size_t dst = index % num_vertices; - - return thrust::make_tuple(static_cast(src), - static_cast(dst)); - }); + cuda::proclaim_return_type>( + [num_vertices] __device__(size_t index) { + size_t src = index / num_vertices; + size_t dst = index % num_vertices; + + return thrust::make_tuple(static_cast(src), + static_cast(dst)); + })); handle.sync_stream(); diff --git a/cpp/src/generators/simple_generators.cu b/cpp/src/generators/simple_generators.cu index 6dba63909c3..65647be5de0 100644 --- a/cpp/src/generators/simple_generators.cu +++ b/cpp/src/generators/simple_generators.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,6 +27,8 @@ #include #include +#include + #include namespace cugraph { @@ -264,23 +266,24 @@ generate_complete_graph_edgelist( auto transform_iter = thrust::make_transform_iterator( thrust::make_counting_iterator(0), - [base_vertex_id, num_vertices, invalid_vertex] __device__(size_t index) { - size_t graph_index = index / (num_vertices * num_vertices); - size_t local_index = index % (num_vertices * num_vertices); - - vertex_t src = base_vertex_id + static_cast(local_index / num_vertices); - vertex_t dst = base_vertex_id + static_cast(local_index % num_vertices); - - if (src == dst) { - src = invalid_vertex; - dst = invalid_vertex; - } else { - src += (graph_index * num_vertices); - dst += (graph_index * num_vertices); - } - - return thrust::make_tuple(src, dst); - }); + cuda::proclaim_return_type>( + [base_vertex_id, num_vertices, invalid_vertex] __device__(size_t index) { + size_t graph_index = index / (num_vertices * num_vertices); + size_t local_index = index % (num_vertices * num_vertices); + + vertex_t src = base_vertex_id + static_cast(local_index / num_vertices); + vertex_t dst = base_vertex_id + static_cast(local_index % num_vertices); + + if (src == dst) { + src = invalid_vertex; + dst = invalid_vertex; + } else { + src += (graph_index * num_vertices); + dst += (graph_index * num_vertices); + } + + return thrust::make_tuple(src, dst); + })); output_iterator = thrust::copy_if(handle.get_thrust_policy(), transform_iter, diff --git a/cpp/src/link_analysis/hits_impl.cuh b/cpp/src/link_analysis/hits_impl.cuh index 674046745b1..5cdf1b9dc6a 100644 --- a/cpp/src/link_analysis/hits_impl.cuh +++ b/cpp/src/link_analysis/hits_impl.cuh @@ -80,6 +80,7 @@ std::tuple hits(raft::handle_t const& handle, if (num_vertices == 0) { return std::make_tuple(diff_sum, final_iteration_count); } CUGRAPH_EXPECTS(epsilon >= 0.0, "Invalid input argument: epsilon should be non-negative."); + auto tolerance = static_cast(graph_view.number_of_vertices()) * epsilon; // Check validity of initial guess if supplied if (has_initial_hubs_guess && do_expensive_check) { @@ -171,7 +172,7 @@ std::tuple hits(raft::handle_t const& handle, std::swap(prev_hubs, curr_hubs); iter++; - if (diff_sum < epsilon) { + if (diff_sum < tolerance) { break; } else if (iter >= max_iterations) { CUGRAPH_FAIL("HITS failed to converge."); diff --git a/cpp/src/mtmg/vertex_result.cu b/cpp/src/mtmg/vertex_result.cu index 97fcd291c87..414f1bdfa88 100644 --- a/cpp/src/mtmg/vertex_result.cu +++ b/cpp/src/mtmg/vertex_result.cu @@ -21,21 +21,21 @@ #include +#include #include namespace cugraph { namespace mtmg { template -template +template rmm::device_uvector vertex_result_view_t::gather( handle_t const& handle, raft::device_span vertices, - cugraph::mtmg::graph_view_t const& graph_view, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, std::optional>& renumber_map_view) { - auto this_gpu_graph_view = graph_view.get(handle); - rmm::device_uvector local_vertices(vertices.size(), handle.get_stream()); rmm::device_uvector vertex_gpu_ids(vertices.size(), handle.get_stream()); rmm::device_uvector vertex_pos(vertices.size(), handle.get_stream()); @@ -47,11 +47,11 @@ rmm::device_uvector vertex_result_view_t::gather( cugraph::detail::sequence_fill( handle.get_stream(), vertex_pos.data(), vertex_pos.size(), size_t{0}); - rmm::device_uvector d_vertex_partition_range_lasts( - this_gpu_graph_view.vertex_partition_range_lasts().size(), handle.get_stream()); + rmm::device_uvector d_vertex_partition_range_lasts(vertex_partition_range_lasts.size(), + handle.get_stream()); raft::update_device(d_vertex_partition_range_lasts.data(), - this_gpu_graph_view.vertex_partition_range_lasts().data(), - this_gpu_graph_view.vertex_partition_range_lasts().size(), + vertex_partition_range_lasts.data(), + vertex_partition_range_lasts.size(), handle.get_stream()); if (renumber_map_view) { @@ -60,8 +60,8 @@ rmm::device_uvector vertex_result_view_t::gather( local_vertices.data(), local_vertices.size(), renumber_map_view->get(handle).data(), - this_gpu_graph_view.local_vertex_partition_range_first(), - this_gpu_graph_view.local_vertex_partition_range_last()); + vertex_partition_view.local_vertex_partition_range_first(), + vertex_partition_view.local_vertex_partition_range_last()); } auto const major_comm_size = @@ -89,13 +89,14 @@ rmm::device_uvector vertex_result_view_t::gather( auto& wrapped = this->get(handle); - auto vertex_partition = vertex_partition_device_view_t( - this_gpu_graph_view.local_vertex_partition_view()); + auto vertex_partition = + vertex_partition_device_view_t(vertex_partition_view); - auto iter = - thrust::make_transform_iterator(local_vertices.begin(), [vertex_partition] __device__(auto v) { + auto iter = thrust::make_transform_iterator( + local_vertices.begin(), + cuda::proclaim_return_type([vertex_partition] __device__(auto v) { return vertex_partition.local_vertex_partition_offset_from_vertex_nocheck(v); - }); + })); thrust::gather(handle.get_thrust_policy(), iter, @@ -112,7 +113,7 @@ rmm::device_uvector vertex_result_view_t::gather( vertex_gpu_ids.begin(), vertex_gpu_ids.end(), thrust::make_zip_iterator(local_vertices.begin(), vertex_pos.begin(), tmp_result.begin()), - [] __device__(int gpu) { return gpu; }, + thrust::identity{}, handle.get_stream()); // @@ -130,37 +131,85 @@ rmm::device_uvector vertex_result_view_t::gather( template rmm::device_uvector vertex_result_view_t::gather( handle_t const& handle, raft::device_span vertices, - cugraph::mtmg::graph_view_t const& graph_view, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, std::optional>& renumber_map_view); +template rmm::device_uvector vertex_result_view_t::gather( + handle_t const& handle, + raft::device_span vertices, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, + std::optional>& renumber_map_view); + template rmm::device_uvector vertex_result_view_t::gather( handle_t const& handle, raft::device_span vertices, - cugraph::mtmg::graph_view_t const& graph_view, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, std::optional>& renumber_map_view); template rmm::device_uvector vertex_result_view_t::gather( handle_t const& handle, raft::device_span vertices, - cugraph::mtmg::graph_view_t const& graph_view, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, std::optional>& renumber_map_view); -template rmm::device_uvector vertex_result_view_t::gather( +template rmm::device_uvector vertex_result_view_t::gather( handle_t const& handle, raft::device_span vertices, - cugraph::mtmg::graph_view_t const& graph_view, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, std::optional>& renumber_map_view); -template rmm::device_uvector vertex_result_view_t::gather( +template rmm::device_uvector vertex_result_view_t::gather( + handle_t const& handle, + raft::device_span vertices, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, + std::optional>& renumber_map_view); + +template rmm::device_uvector vertex_result_view_t::gather( handle_t const& handle, raft::device_span vertices, - cugraph::mtmg::graph_view_t const& graph_view, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, std::optional>& renumber_map_view); -template rmm::device_uvector vertex_result_view_t::gather( +template rmm::device_uvector vertex_result_view_t::gather( + handle_t const& handle, + raft::device_span vertices, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, + std::optional>& renumber_map_view); + +template rmm::device_uvector vertex_result_view_t::gather( + handle_t const& handle, + raft::device_span vertices, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, + std::optional>& renumber_map_view); + +template rmm::device_uvector vertex_result_view_t::gather( + handle_t const& handle, + raft::device_span vertices, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, + std::optional>& renumber_map_view); + +template rmm::device_uvector vertex_result_view_t::gather( + handle_t const& handle, + raft::device_span vertices, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, + std::optional>& renumber_map_view); + +template rmm::device_uvector vertex_result_view_t::gather( handle_t const& handle, raft::device_span vertices, - cugraph::mtmg::graph_view_t const& graph_view, + std::vector const& vertex_partition_range_lasts, + vertex_partition_view_t vertex_partition_view, std::optional>& renumber_map_view); } // namespace mtmg diff --git a/cpp/src/prims/count_if_e.cuh b/cpp/src/prims/count_if_e.cuh index f6e4bc9bead..9cff4f5eceb 100644 --- a/cpp/src/prims/count_if_e.cuh +++ b/cpp/src/prims/count_if_e.cuh @@ -74,8 +74,6 @@ typename GraphViewType::edge_type count_if_e(raft::handle_t const& handle, using vertex_t = typename GraphViewType::vertex_type; using edge_t = typename GraphViewType::edge_type; - CUGRAPH_EXPECTS(!graph_view.has_edge_mask(), "unimplemented."); - if (do_expensive_check) { // currently, nothing to do } diff --git a/cpp/src/prims/fill_edge_property.cuh b/cpp/src/prims/fill_edge_property.cuh index d446944b65b..e6875576044 100644 --- a/cpp/src/prims/fill_edge_property.cuh +++ b/cpp/src/prims/fill_edge_property.cuh @@ -15,6 +15,7 @@ */ #pragma once +#include #include #include #include @@ -23,6 +24,7 @@ #include #include +#include #include @@ -38,21 +40,78 @@ void fill_edge_property(raft::handle_t const& handle, { static_assert(std::is_same_v); + using edge_t = typename GraphViewType::edge_type; + + auto edge_mask_view = graph_view.edge_mask_view(); + auto value_firsts = edge_property_output.value_firsts(); auto edge_counts = edge_property_output.edge_counts(); for (size_t i = 0; i < graph_view.number_of_local_edge_partitions(); ++i) { + auto edge_partition_e_mask = + edge_mask_view + ? thrust::make_optional< + detail::edge_partition_edge_property_device_view_t>( + *edge_mask_view, i) + : thrust::nullopt; + if constexpr (cugraph::has_packed_bool_element< std::remove_reference_t, T>()) { static_assert(std::is_arithmetic_v, "unimplemented for thrust::tuple types."); auto packed_input = input ? packed_bool_full_mask() : packed_bool_empty_mask(); - thrust::fill_n(handle.get_thrust_policy(), - value_firsts[i], - packed_bool_size(static_cast(edge_counts[i])), - packed_input); + auto rem = edge_counts[i] % packed_bools_per_word(); + if (edge_partition_e_mask) { + auto input_first = + thrust::make_zip_iterator(value_firsts[i], (*edge_partition_e_mask).value_first()); + thrust::transform(handle.get_thrust_policy(), + input_first, + input_first + packed_bool_size(static_cast(edge_counts[i] - rem)), + value_firsts[i], + [packed_input] __device__(thrust::tuple pair) { + auto old_value = thrust::get<0>(pair); + auto mask = thrust::get<1>(pair); + return (old_value & ~mask) | (packed_input & mask); + }); + if (rem > 0) { + thrust::transform( + handle.get_thrust_policy(), + input_first + packed_bool_size(static_cast(edge_counts[i] - rem)), + input_first + packed_bool_size(static_cast(edge_counts[i])), + value_firsts[i] + packed_bool_size(static_cast(edge_counts[i] - rem)), + [packed_input, rem] __device__(thrust::tuple pair) { + auto old_value = thrust::get<0>(pair); + auto mask = thrust::get<1>(pair); + return ((old_value & ~mask) | (packed_input & mask)) & packed_bool_partial_mask(rem); + }); + } + } else { + thrust::fill_n(handle.get_thrust_policy(), + value_firsts[i], + packed_bool_size(static_cast(edge_counts[i] - rem)), + packed_input); + if (rem > 0) { + thrust::fill_n( + handle.get_thrust_policy(), + value_firsts[i] + packed_bool_size(static_cast(edge_counts[i] - rem)), + 1, + packed_input & packed_bool_partial_mask(rem)); + } + } } else { - thrust::fill_n( - handle.get_thrust_policy(), value_firsts[i], static_cast(edge_counts[i]), input); + if (edge_partition_e_mask) { + thrust::transform_if(handle.get_thrust_policy(), + thrust::make_constant_iterator(input), + thrust::make_constant_iterator(input) + edge_counts[i], + thrust::make_counting_iterator(edge_t{0}), + value_firsts[i], + thrust::identity{}, + [edge_partition_e_mask = *edge_partition_e_mask] __device__(edge_t i) { + return edge_partition_e_mask.get(i); + }); + } else { + thrust::fill_n( + handle.get_thrust_policy(), value_firsts[i], static_cast(edge_counts[i]), input); + } } } } @@ -79,8 +138,6 @@ void fill_edge_property(raft::handle_t const& handle, edge_property_t& edge_property_output, bool do_expensive_check = false) { - CUGRAPH_EXPECTS(!graph_view.has_edge_mask(), "unimplemented."); - if (do_expensive_check) { // currently, nothing to do } diff --git a/cpp/src/prims/per_v_random_select_transform_outgoing_e.cuh b/cpp/src/prims/per_v_random_select_transform_outgoing_e.cuh index 5fee97790f1..4c5c43c7d1e 100644 --- a/cpp/src/prims/per_v_random_select_transform_outgoing_e.cuh +++ b/cpp/src/prims/per_v_random_select_transform_outgoing_e.cuh @@ -44,6 +44,8 @@ #include #include +#include + #include #include @@ -596,8 +598,9 @@ rmm::device_uvector get_sampling_index_without_replacement( multiplier_t{high_partition_over_sampling_K}), thrust::make_transform_iterator( thrust::make_counting_iterator(size_t{0}), - [high_partition_over_sampling_K, unique_counts = unique_counts.data()] __device__( - size_t i) { return i * high_partition_over_sampling_K + unique_counts[i]; }), + cuda::proclaim_return_type( + [high_partition_over_sampling_K, unique_counts = unique_counts.data()] __device__( + size_t i) { return i * high_partition_over_sampling_K + unique_counts[i]; })), handle.get_stream()); if (tmp_storage_bytes > d_tmp_storage.size()) { d_tmp_storage = rmm::device_uvector(tmp_storage_bytes, handle.get_stream()); @@ -615,8 +618,9 @@ rmm::device_uvector get_sampling_index_without_replacement( multiplier_t{high_partition_over_sampling_K}), thrust::make_transform_iterator( thrust::make_counting_iterator(size_t{0}), - [high_partition_over_sampling_K, unique_counts = unique_counts.data()] __device__( - size_t i) { return i * high_partition_over_sampling_K + unique_counts[i]; }), + cuda::proclaim_return_type( + [high_partition_over_sampling_K, unique_counts = unique_counts.data()] __device__( + size_t i) { return i * high_partition_over_sampling_K + unique_counts[i]; })), handle.get_stream()); // copy the neighbor indices back to sample_nbr_indices diff --git a/cpp/src/prims/per_v_transform_reduce_incoming_outgoing_e.cuh b/cpp/src/prims/per_v_transform_reduce_incoming_outgoing_e.cuh index 0b6c6a554bb..5de2dbc18e2 100644 --- a/cpp/src/prims/per_v_transform_reduce_incoming_outgoing_e.cuh +++ b/cpp/src/prims/per_v_transform_reduce_incoming_outgoing_e.cuh @@ -51,6 +51,8 @@ #include #include +#include + #include #include #include @@ -940,16 +942,19 @@ void per_v_transform_reduce_e(raft::handle_t const& handle, minor_init); auto value_first = thrust::make_transform_iterator( view.value_first(), - [reduce_op, minor_init] __device__(auto val) { return reduce_op(val, minor_init); }); - thrust::scatter( - handle.get_thrust_policy(), - value_first + (*minor_key_offsets)[i], - value_first + (*minor_key_offsets)[i + 1], - thrust::make_transform_iterator( - (*(view.keys())).begin() + (*minor_key_offsets)[i], - [key_first = graph_view.vertex_partition_range_first( - this_segment_vertex_partition_id)] __device__(auto key) { return key - key_first; }), - tx_buffer_first); + cuda::proclaim_return_type( + [reduce_op, minor_init] __device__(auto val) { return reduce_op(val, minor_init); })); + thrust::scatter(handle.get_thrust_policy(), + value_first + (*minor_key_offsets)[i], + value_first + (*minor_key_offsets)[i + 1], + thrust::make_transform_iterator( + (*(view.keys())).begin() + (*minor_key_offsets)[i], + cuda::proclaim_return_type( + [key_first = graph_view.vertex_partition_range_first( + this_segment_vertex_partition_id)] __device__(auto key) { + return key - key_first; + })), + tx_buffer_first); device_reduce(major_comm, tx_buffer_first, vertex_value_output_first, diff --git a/cpp/src/prims/transform_e.cuh b/cpp/src/prims/transform_e.cuh index edacdc8a970..c6623621d24 100644 --- a/cpp/src/prims/transform_e.cuh +++ b/cpp/src/prims/transform_e.cuh @@ -16,10 +16,12 @@ #pragma once #include +#include #include #include #include #include +#include #include #include @@ -44,6 +46,7 @@ template __global__ void transform_e_packed_bool( @@ -53,6 +56,7 @@ __global__ void transform_e_packed_bool( EdgePartitionSrcValueInputWrapper edge_partition_src_value_input, EdgePartitionDstValueInputWrapper edge_partition_dst_value_input, EdgePartitionEdgeValueInputWrapper edge_partition_e_value_input, + thrust::optional edge_partition_e_mask, EdgePartitionEdgeValueOutputWrapper edge_partition_e_value_output, EdgeOp e_op) { @@ -68,11 +72,14 @@ __global__ void transform_e_packed_bool( auto num_edges = edge_partition.number_of_edges(); while (idx < static_cast(packed_bool_size(num_edges))) { + auto edge_mask = packed_bool_full_mask(); + if (edge_partition_e_mask) { edge_mask = *((*edge_partition_e_mask).value_first() + idx); } + auto local_edge_idx = idx * static_cast(packed_bools_per_word()) + static_cast(lane_id); - uint32_t mask{0}; int predicate{0}; - if (local_edge_idx < num_edges) { + + if ((local_edge_idx < num_edges) && (edge_mask & packed_bool_mask(lane_id))) { auto major_idx = edge_partition.major_idx_from_local_edge_idx_nocheck(local_edge_idx); auto major = edge_partition.major_from_major_idx_nocheck(major_idx); auto major_offset = edge_partition.major_offset_from_major_nocheck(major); @@ -91,8 +98,15 @@ __global__ void transform_e_packed_bool( ? int{1} : int{0}; } - mask = __ballot_sync(uint32_t{0xffffffff}, predicate); - if (lane_id == 0) { *(edge_partition_e_value_output.value_first() + idx) = mask; } + uint32_t new_val = __ballot_sync(uint32_t{0xffffffff}, predicate); + if (lane_id == 0) { + if (edge_mask == packed_bool_full_mask()) { + *(edge_partition_e_value_output.value_first() + idx) = new_val; + } else { + auto old_val = *(edge_partition_e_value_output.value_first() + idx); + *(edge_partition_e_value_output.value_first() + idx) = (old_val & ~edge_mask) | new_val; + } + } idx += static_cast(gridDim.x * (blockDim.x / raft::warp_size())); } @@ -178,12 +192,18 @@ void transform_e(raft::handle_t const& handle, typename EdgeValueOutputWrapper::value_iterator, typename EdgeValueOutputWrapper::value_type>; - CUGRAPH_EXPECTS(!graph_view.has_edge_mask(), "unimplemented."); + auto edge_mask_view = graph_view.edge_mask_view(); for (size_t i = 0; i < graph_view.number_of_local_edge_partitions(); ++i) { auto edge_partition = edge_partition_device_view_t( graph_view.local_edge_partition_view(i)); + auto edge_partition_e_mask = + edge_mask_view + ? thrust::make_optional< + detail::edge_partition_edge_property_device_view_t>( + *edge_mask_view, i) + : thrust::nullopt; edge_partition_src_input_device_view_t edge_partition_src_value_input{}; edge_partition_dst_input_device_view_t edge_partition_dst_value_input{}; @@ -214,35 +234,40 @@ void transform_e(raft::handle_t const& handle, edge_partition_src_value_input, edge_partition_dst_value_input, edge_partition_e_value_input, + edge_partition_e_mask, edge_partition_e_value_output, e_op); } } else { - thrust::transform( + thrust::for_each( handle.get_thrust_policy(), thrust::make_counting_iterator(edge_t{0}), thrust::make_counting_iterator(num_edges), - edge_partition_e_value_output.value_first(), [e_op, edge_partition, edge_partition_src_value_input, edge_partition_dst_value_input, - edge_partition_e_value_input] __device__(edge_t i) { - auto major_idx = edge_partition.major_idx_from_local_edge_idx_nocheck(i); - auto major = edge_partition.major_from_major_idx_nocheck(major_idx); - auto major_offset = edge_partition.major_offset_from_major_nocheck(major); - auto minor = *(edge_partition.indices() + i); - auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor); - - auto src = GraphViewType::is_storage_transposed ? minor : major; - auto dst = GraphViewType::is_storage_transposed ? major : minor; - auto src_offset = GraphViewType::is_storage_transposed ? minor_offset : major_offset; - auto dst_offset = GraphViewType::is_storage_transposed ? major_offset : minor_offset; - return e_op(src, - dst, - edge_partition_src_value_input.get(src_offset), - edge_partition_dst_value_input.get(dst_offset), - edge_partition_e_value_input.get(i)); + edge_partition_e_value_input, + edge_partition_e_mask, + edge_partition_e_value_output] __device__(edge_t i) { + if (!edge_partition_e_mask || (*edge_partition_e_mask).get(i)) { + auto major_idx = edge_partition.major_idx_from_local_edge_idx_nocheck(i); + auto major = edge_partition.major_from_major_idx_nocheck(major_idx); + auto major_offset = edge_partition.major_offset_from_major_nocheck(major); + auto minor = *(edge_partition.indices() + i); + auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor); + + auto src = GraphViewType::is_storage_transposed ? minor : major; + auto dst = GraphViewType::is_storage_transposed ? major : minor; + auto src_offset = GraphViewType::is_storage_transposed ? minor_offset : major_offset; + auto dst_offset = GraphViewType::is_storage_transposed ? major_offset : minor_offset; + auto e_op_result = e_op(src, + dst, + edge_partition_src_value_input.get(src_offset), + edge_partition_dst_value_input.get(dst_offset), + edge_partition_e_value_input.get(i)); + edge_partition_e_value_output.set(i, e_op_result); + } }); } } @@ -336,14 +361,12 @@ void transform_e(raft::handle_t const& handle, typename EdgeValueOutputWrapper::value_iterator, typename EdgeValueOutputWrapper::value_type>; - CUGRAPH_EXPECTS(!graph_view.has_edge_mask(), "unimplemented."); - auto major_first = GraphViewType::is_storage_transposed ? edge_list.dst_begin() : edge_list.src_begin(); auto minor_first = GraphViewType::is_storage_transposed ? edge_list.src_begin() : edge_list.dst_begin(); - auto edge_first = thrust::make_zip_iterator(thrust::make_tuple(major_first, minor_first)); + auto edge_first = thrust::make_zip_iterator(major_first, minor_first); if (do_expensive_check) { CUGRAPH_EXPECTS( @@ -382,10 +405,18 @@ void transform_e(raft::handle_t const& handle, edge_partition_offsets.back() = edge_list.size(); } + auto edge_mask_view = graph_view.edge_mask_view(); + for (size_t i = 0; i < graph_view.number_of_local_edge_partitions(); ++i) { auto edge_partition = edge_partition_device_view_t( graph_view.local_edge_partition_view(i)); + auto edge_partition_e_mask = + edge_mask_view + ? thrust::make_optional< + detail::edge_partition_edge_property_device_view_t>( + *edge_mask_view, i) + : thrust::nullopt; if (do_expensive_check) { CUGRAPH_EXPECTS( @@ -393,7 +424,8 @@ void transform_e(raft::handle_t const& handle, handle.get_thrust_policy(), edge_first + edge_partition_offsets[i], edge_first + edge_partition_offsets[i + 1], - [edge_partition] __device__(thrust::tuple edge) { + [edge_partition, + edge_partition_e_mask] __device__(thrust::tuple edge) { auto major = thrust::get<0>(edge); auto minor = thrust::get<1>(edge); vertex_t major_idx{}; @@ -416,8 +448,19 @@ void transform_e(raft::handle_t const& handle, edge_t edge_offset{}; edge_t local_degree{}; thrust::tie(indices, edge_offset, local_degree) = edge_partition.local_edges(major_idx); - auto it = thrust::lower_bound(thrust::seq, indices, indices + local_degree, minor); - return *it != minor; + auto lower_it = + thrust::lower_bound(thrust::seq, indices, indices + local_degree, minor); + if (*lower_it != minor) { return true; } + if (edge_partition_e_mask) { + auto upper_it = + thrust::upper_bound(thrust::seq, lower_it, indices + local_degree, minor); + if (detail::count_set_bits((*edge_partition_e_mask).value_first(), + edge_offset + thrust::distance(indices, lower_it), + thrust::distance(lower_it, upper_it)) == 0) { + return true; + } + } + return false; }) == 0, "Invalid input arguments: edge_list contains edges that do not exist in the input graph."); } @@ -446,6 +489,7 @@ void transform_e(raft::handle_t const& handle, edge_partition_src_value_input, edge_partition_dst_value_input, edge_partition_e_value_input, + edge_partition_e_mask, edge_partition_e_value_output] __device__(thrust::tuple edge) { auto major = thrust::get<0>(edge); auto minor = thrust::get<1>(edge); @@ -469,7 +513,7 @@ void transform_e(raft::handle_t const& handle, edge_t local_degree{}; thrust::tie(indices, edge_offset, local_degree) = edge_partition.local_edges(major_idx); auto lower_it = thrust::lower_bound(thrust::seq, indices, indices + local_degree, minor); - auto upper_it = thrust::upper_bound(thrust::seq, indices, indices + local_degree, minor); + auto upper_it = thrust::upper_bound(thrust::seq, lower_it, indices + local_degree, minor); auto src = GraphViewType::is_storage_transposed ? minor : major; auto dst = GraphViewType::is_storage_transposed ? major : minor; @@ -478,14 +522,17 @@ void transform_e(raft::handle_t const& handle, for (auto it = lower_it; it != upper_it; ++it) { assert(*it == minor); - auto e_op_result = - e_op(src, - dst, - edge_partition_src_value_input.get(src_offset), - edge_partition_dst_value_input.get(dst_offset), - edge_partition_e_value_input.get(edge_offset + thrust::distance(indices, it))); - edge_partition_e_value_output.set(edge_offset + thrust::distance(indices, it), - e_op_result); + if (!edge_partition_e_mask || + ((*edge_partition_e_mask).get(edge_offset + thrust::distance(indices, it)))) { + auto e_op_result = + e_op(src, + dst, + edge_partition_src_value_input.get(src_offset), + edge_partition_dst_value_input.get(dst_offset), + edge_partition_e_value_input.get(edge_offset + thrust::distance(indices, it))); + edge_partition_e_value_output.set(edge_offset + thrust::distance(indices, it), + e_op_result); + } } }); } diff --git a/cpp/src/prims/transform_reduce_e.cuh b/cpp/src/prims/transform_reduce_e.cuh index 9c23f3fca18..483ab64dcd9 100644 --- a/cpp/src/prims/transform_reduce_e.cuh +++ b/cpp/src/prims/transform_reduce_e.cuh @@ -56,6 +56,7 @@ template __global__ void transform_reduce_e_hypersparse( @@ -65,6 +66,7 @@ __global__ void transform_reduce_e_hypersparse( EdgePartitionSrcValueInputWrapper edge_partition_src_value_input, EdgePartitionDstValueInputWrapper edge_partition_dst_value_input, EdgePartitionEdgeValueInputWrapper edge_partition_e_value_input, + thrust::optional edge_partition_e_mask, ResultIterator result_iter /* size 1 */, EdgeOp e_op) { @@ -101,24 +103,31 @@ __global__ void transform_reduce_e_hypersparse( &edge_partition_src_value_input, &edge_partition_dst_value_input, &edge_partition_e_value_input, + &edge_partition_e_mask, &e_op, major, indices, edge_offset] __device__(auto i) { - auto major_offset = edge_partition.major_offset_from_major_nocheck(major); - auto minor = indices[i]; - auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor); - auto src = GraphViewType::is_storage_transposed ? minor : major; - auto dst = GraphViewType::is_storage_transposed ? major : minor; - auto src_offset = - GraphViewType::is_storage_transposed ? minor_offset : static_cast(major_offset); - auto dst_offset = - GraphViewType::is_storage_transposed ? static_cast(major_offset) : minor_offset; - return e_op(src, - dst, - edge_partition_src_value_input.get(src_offset), - edge_partition_dst_value_input.get(dst_offset), - edge_partition_e_value_input.get(edge_offset + i)); + if (!edge_partition_e_mask || (*edge_partition_e_mask).get(edge_offset + i)) { + auto major_offset = edge_partition.major_offset_from_major_nocheck(major); + auto minor = indices[i]; + auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor); + auto src = GraphViewType::is_storage_transposed ? minor : major; + auto dst = GraphViewType::is_storage_transposed ? major : minor; + auto src_offset = GraphViewType::is_storage_transposed + ? minor_offset + : static_cast(major_offset); + auto dst_offset = GraphViewType::is_storage_transposed + ? static_cast(major_offset) + : minor_offset; + return e_op(src, + dst, + edge_partition_src_value_input.get(src_offset), + edge_partition_dst_value_input.get(dst_offset), + edge_partition_e_value_input.get(edge_offset + i)); + } else { + return e_op_result_t{}; + } }, e_op_result_t{}, edge_property_add); @@ -135,6 +144,7 @@ template __global__ void transform_reduce_e_low_degree( @@ -146,6 +156,7 @@ __global__ void transform_reduce_e_low_degree( EdgePartitionSrcValueInputWrapper edge_partition_src_value_input, EdgePartitionDstValueInputWrapper edge_partition_dst_value_input, EdgePartitionEdgeValueInputWrapper edge_partition_e_value_input, + thrust::optional edge_partition_e_mask, ResultIterator result_iter /* size 1 */, EdgeOp e_op) { @@ -177,27 +188,34 @@ __global__ void transform_reduce_e_low_degree( &edge_partition_src_value_input, &edge_partition_dst_value_input, &edge_partition_e_value_input, + &edge_partition_e_mask, &e_op, major_offset, indices, edge_offset] __device__(auto i) { - auto minor = indices[i]; - auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor); - auto src = GraphViewType::is_storage_transposed - ? minor - : edge_partition.major_from_major_offset_nocheck(major_offset); - auto dst = GraphViewType::is_storage_transposed - ? edge_partition.major_from_major_offset_nocheck(major_offset) - : minor; - auto src_offset = - GraphViewType::is_storage_transposed ? minor_offset : static_cast(major_offset); - auto dst_offset = - GraphViewType::is_storage_transposed ? static_cast(major_offset) : minor_offset; - return e_op(src, - dst, - edge_partition_src_value_input.get(src_offset), - edge_partition_dst_value_input.get(dst_offset), - edge_partition_e_value_input.get(edge_offset + i)); + if (!edge_partition_e_mask || (*edge_partition_e_mask).get(edge_offset + i)) { + auto minor = indices[i]; + auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor); + auto src = GraphViewType::is_storage_transposed + ? minor + : edge_partition.major_from_major_offset_nocheck(major_offset); + auto dst = GraphViewType::is_storage_transposed + ? edge_partition.major_from_major_offset_nocheck(major_offset) + : minor; + auto src_offset = GraphViewType::is_storage_transposed + ? minor_offset + : static_cast(major_offset); + auto dst_offset = GraphViewType::is_storage_transposed + ? static_cast(major_offset) + : minor_offset; + return e_op(src, + dst, + edge_partition_src_value_input.get(src_offset), + edge_partition_dst_value_input.get(dst_offset), + edge_partition_e_value_input.get(edge_offset + i)); + } else { + return e_op_result_t{}; + } }, e_op_result_t{}, edge_property_add); @@ -214,6 +232,7 @@ template __global__ void transform_reduce_e_mid_degree( @@ -225,6 +244,7 @@ __global__ void transform_reduce_e_mid_degree( EdgePartitionSrcValueInputWrapper edge_partition_src_value_input, EdgePartitionDstValueInputWrapper edge_partition_dst_value_input, EdgePartitionEdgeValueInputWrapper edge_partition_e_value_input, + thrust::optional edge_partition_e_mask, ResultIterator result_iter /* size 1 */, EdgeOp e_op) { @@ -250,24 +270,26 @@ __global__ void transform_reduce_e_mid_degree( edge_t local_degree{}; thrust::tie(indices, edge_offset, local_degree) = edge_partition.local_edges(major_offset); for (edge_t i = lane_id; i < local_degree; i += raft::warp_size()) { - auto minor = indices[i]; - auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor); - auto src = GraphViewType::is_storage_transposed - ? minor - : edge_partition.major_from_major_offset_nocheck(major_offset); - auto dst = GraphViewType::is_storage_transposed - ? edge_partition.major_from_major_offset_nocheck(major_offset) - : minor; - auto src_offset = - GraphViewType::is_storage_transposed ? minor_offset : static_cast(major_offset); - auto dst_offset = - GraphViewType::is_storage_transposed ? static_cast(major_offset) : minor_offset; - auto e_op_result = e_op(src, - dst, - edge_partition_src_value_input.get(src_offset), - edge_partition_dst_value_input.get(dst_offset), - edge_partition_e_value_input.get(edge_offset + i)); - e_op_result_sum = edge_property_add(e_op_result_sum, e_op_result); + if (!edge_partition_e_mask || (*edge_partition_e_mask).get(edge_offset + i)) { + auto minor = indices[i]; + auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor); + auto src = GraphViewType::is_storage_transposed + ? minor + : edge_partition.major_from_major_offset_nocheck(major_offset); + auto dst = GraphViewType::is_storage_transposed + ? edge_partition.major_from_major_offset_nocheck(major_offset) + : minor; + auto src_offset = + GraphViewType::is_storage_transposed ? minor_offset : static_cast(major_offset); + auto dst_offset = + GraphViewType::is_storage_transposed ? static_cast(major_offset) : minor_offset; + auto e_op_result = e_op(src, + dst, + edge_partition_src_value_input.get(src_offset), + edge_partition_dst_value_input.get(dst_offset), + edge_partition_e_value_input.get(edge_offset + i)); + e_op_result_sum = edge_property_add(e_op_result_sum, e_op_result); + } } idx += gridDim.x * (blockDim.x / raft::warp_size()); } @@ -280,6 +302,7 @@ template __global__ void transform_reduce_e_high_degree( @@ -291,6 +314,7 @@ __global__ void transform_reduce_e_high_degree( EdgePartitionSrcValueInputWrapper edge_partition_src_value_input, EdgePartitionDstValueInputWrapper edge_partition_dst_value_input, EdgePartitionEdgeValueInputWrapper edge_partition_e_value_input, + thrust::optional edge_partition_e_mask, ResultIterator result_iter /* size 1 */, EdgeOp e_op) { @@ -313,24 +337,26 @@ __global__ void transform_reduce_e_high_degree( edge_t local_degree{}; thrust::tie(indices, edge_offset, local_degree) = edge_partition.local_edges(major_offset); for (edge_t i = threadIdx.x; i < local_degree; i += blockDim.x) { - auto minor = indices[i]; - auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor); - auto src = GraphViewType::is_storage_transposed - ? minor - : edge_partition.major_from_major_offset_nocheck(major_offset); - auto dst = GraphViewType::is_storage_transposed - ? edge_partition.major_from_major_offset_nocheck(major_offset) - : minor; - auto src_offset = - GraphViewType::is_storage_transposed ? minor_offset : static_cast(major_offset); - auto dst_offset = - GraphViewType::is_storage_transposed ? static_cast(major_offset) : minor_offset; - auto e_op_result = e_op(src, - dst, - edge_partition_src_value_input.get(src_offset), - edge_partition_dst_value_input.get(dst_offset), - edge_partition_e_value_input.get(edge_offset + i)); - e_op_result_sum = edge_property_add(e_op_result_sum, e_op_result); + if (!edge_partition_e_mask || (*edge_partition_e_mask).get(edge_offset + i)) { + auto minor = indices[i]; + auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor); + auto src = GraphViewType::is_storage_transposed + ? minor + : edge_partition.major_from_major_offset_nocheck(major_offset); + auto dst = GraphViewType::is_storage_transposed + ? edge_partition.major_from_major_offset_nocheck(major_offset) + : minor; + auto src_offset = + GraphViewType::is_storage_transposed ? minor_offset : static_cast(major_offset); + auto dst_offset = + GraphViewType::is_storage_transposed ? static_cast(major_offset) : minor_offset; + auto e_op_result = e_op(src, + dst, + edge_partition_src_value_input.get(src_offset), + edge_partition_dst_value_input.get(dst_offset), + edge_partition_e_value_input.get(edge_offset + i)); + e_op_result_sum = edge_property_add(e_op_result_sum, e_op_result); + } } idx += gridDim.x; } @@ -417,8 +443,6 @@ T transform_reduce_e(raft::handle_t const& handle, typename EdgeValueInputWrapper::value_iterator, typename EdgeValueInputWrapper::value_type>>; - CUGRAPH_EXPECTS(!graph_view.has_edge_mask(), "unimplemented."); - if (do_expensive_check) { // currently, nothing to do } @@ -431,10 +455,18 @@ T transform_reduce_e(raft::handle_t const& handle, get_dataframe_buffer_begin(result_buffer) + 1, T{}); + auto edge_mask_view = graph_view.edge_mask_view(); + for (size_t i = 0; i < graph_view.number_of_local_edge_partitions(); ++i) { auto edge_partition = edge_partition_device_view_t( graph_view.local_edge_partition_view(i)); + auto edge_partition_e_mask = + edge_mask_view + ? thrust::make_optional< + detail::edge_partition_edge_property_device_view_t>( + *edge_mask_view, i) + : thrust::nullopt; edge_partition_src_input_device_view_t edge_partition_src_value_input{}; edge_partition_dst_input_device_view_t edge_partition_dst_value_input{}; @@ -467,6 +499,7 @@ T transform_reduce_e(raft::handle_t const& handle, edge_partition_src_value_input, edge_partition_dst_value_input, edge_partition_e_value_input, + edge_partition_e_mask, get_dataframe_buffer_begin(result_buffer), e_op); } @@ -482,6 +515,7 @@ T transform_reduce_e(raft::handle_t const& handle, edge_partition_src_value_input, edge_partition_dst_value_input, edge_partition_e_value_input, + edge_partition_e_mask, get_dataframe_buffer_begin(result_buffer), e_op); } @@ -497,6 +531,7 @@ T transform_reduce_e(raft::handle_t const& handle, edge_partition_src_value_input, edge_partition_dst_value_input, edge_partition_e_value_input, + edge_partition_e_mask, get_dataframe_buffer_begin(result_buffer), e_op); } @@ -510,6 +545,7 @@ T transform_reduce_e(raft::handle_t const& handle, edge_partition_src_value_input, edge_partition_dst_value_input, edge_partition_e_value_input, + edge_partition_e_mask, get_dataframe_buffer_begin(result_buffer), e_op); } @@ -527,6 +563,7 @@ T transform_reduce_e(raft::handle_t const& handle, edge_partition_src_value_input, edge_partition_dst_value_input, edge_partition_e_value_input, + edge_partition_e_mask, get_dataframe_buffer_begin(result_buffer), e_op); } @@ -601,8 +638,6 @@ auto transform_reduce_e(raft::handle_t const& handle, edge_op_result_type::type; static_assert(!std::is_same_v); - CUGRAPH_EXPECTS(!graph_view.has_edge_mask(), "unimplemented."); - if (do_expensive_check) { // currently, nothing to do } diff --git a/cpp/src/prims/update_edge_src_dst_property.cuh b/cpp/src/prims/update_edge_src_dst_property.cuh index 2d72a075ca5..18bdf5bcf2d 100644 --- a/cpp/src/prims/update_edge_src_dst_property.cuh +++ b/cpp/src/prims/update_edge_src_dst_property.cuh @@ -44,6 +44,8 @@ #include #include +#include + #include #include #include @@ -181,13 +183,14 @@ void update_edge_major_property(raft::handle_t const& handle, handle.get_stream()); auto bool_first = thrust::make_transform_iterator( (*edge_partition_keys)[i].begin(), - [rx_value_first, - v_first = graph_view.vertex_partition_range_first( - major_range_vertex_partition_id)] __device__(auto v) { - auto v_offset = v - v_first; - return static_cast(*(rx_value_first + packed_bool_offset(v_offset)) & - packed_bool_mask(v_offset)); - }); + cuda::proclaim_return_type( + [rx_value_first, + v_first = graph_view.vertex_partition_range_first( + major_range_vertex_partition_id)] __device__(auto v) { + auto v_offset = v - v_first; + return static_cast(*(rx_value_first + packed_bool_offset(v_offset)) & + packed_bool_mask(v_offset)); + })); pack_bools(handle, bool_first, bool_first + (*edge_partition_keys)[i].size(), @@ -202,8 +205,9 @@ void update_edge_major_property(raft::handle_t const& handle, auto v_offset_first = thrust::make_transform_iterator( (*edge_partition_keys)[i].begin(), - [v_first = graph_view.vertex_partition_range_first( - major_range_vertex_partition_id)] __device__(auto v) { return v - v_first; }); + cuda::proclaim_return_type( + [v_first = graph_view.vertex_partition_range_first( + major_range_vertex_partition_id)] __device__(auto v) { return v - v_first; })); thrust::gather(handle.get_thrust_policy(), v_offset_first, v_offset_first + (*edge_partition_keys)[i].size(), @@ -312,21 +316,24 @@ void update_edge_major_property(raft::handle_t const& handle, graph_view.local_vertex_partition_view()); if constexpr (packed_bool) { auto bool_first = thrust::make_transform_iterator( - vertex_first, [vertex_property_input_first, vertex_partition] __device__(auto v) { + vertex_first, + cuda::proclaim_return_type([vertex_property_input_first, + vertex_partition] __device__(auto v) { auto v_offset = vertex_partition.local_vertex_partition_offset_from_vertex_nocheck(v); return static_cast( *(vertex_property_input_first + packed_bool_offset(v_offset)) & packed_bool_mask(v_offset)); - }); + })); pack_bools(handle, bool_first, bool_first + thrust::distance(vertex_first, vertex_last), rx_value_first); } else { - auto map_first = - thrust::make_transform_iterator(vertex_first, [vertex_partition] __device__(auto v) { + auto map_first = thrust::make_transform_iterator( + vertex_first, + cuda::proclaim_return_type([vertex_partition] __device__(auto v) { return vertex_partition.local_vertex_partition_offset_from_vertex_nocheck(v); - }); + })); // FIXME: this gather (and temporary buffer) is unnecessary if NCCL directly takes a // permutation iterator (and directly gathers to the internal buffer) thrust::gather(handle.get_thrust_policy(), @@ -391,9 +398,10 @@ void update_edge_major_property(raft::handle_t const& handle, }); } else { auto map_first = thrust::make_transform_iterator( - rx_vertices.begin(), [edge_partition] __device__(auto v) { + rx_vertices.begin(), + cuda::proclaim_return_type([edge_partition] __device__(auto v) { return edge_partition.major_offset_from_major_nocheck(v); - }); + })); // FIXME: this scatter is unnecessary if NCCL directly takes a permutation iterator (and // directly scatters from the internal buffer) thrust::scatter(handle.get_thrust_policy(), @@ -593,13 +601,14 @@ void update_edge_minor_property(raft::handle_t const& handle, auto bool_first = thrust::make_transform_iterator( (*edge_partition_keys).begin() + key_offsets[j], - [rx_value_first, - v_first = graph_view.vertex_partition_range_first( - minor_range_vertex_partition_id)] __device__(auto v) { - auto v_offset = v - v_first; - return static_cast(*(rx_value_first + packed_bool_offset(v_offset)) & - packed_bool_mask(v_offset)); - }); + cuda::proclaim_return_type( + [rx_value_first, + v_first = graph_view.vertex_partition_range_first( + minor_range_vertex_partition_id)] __device__(auto v) { + auto v_offset = v - v_first; + return static_cast(*(rx_value_first + packed_bool_offset(v_offset)) & + packed_bool_mask(v_offset)); + })); pack_unaligned_bools( handle, bool_first, @@ -611,10 +620,10 @@ void update_edge_minor_property(raft::handle_t const& handle, std::get>(key_offsets_or_rx_displacements); auto bool_first = thrust::make_transform_iterator( thrust::make_counting_iterator(vertex_t{0}), - [rx_value_first] __device__(vertex_t v_offset) { + cuda::proclaim_return_type([rx_value_first] __device__(vertex_t v_offset) { return static_cast(*(rx_value_first + packed_bool_offset(v_offset)) & packed_bool_mask(v_offset)); - }); + })); pack_unaligned_bools( handle, bool_first, @@ -630,8 +639,9 @@ void update_edge_minor_property(raft::handle_t const& handle, auto v_offset_first = thrust::make_transform_iterator( (*edge_partition_keys).begin() + key_offsets[j], - [v_first = graph_view.vertex_partition_range_first( - minor_range_vertex_partition_id)] __device__(auto v) { return v - v_first; }); + cuda::proclaim_return_type( + [v_first = graph_view.vertex_partition_range_first( + minor_range_vertex_partition_id)] __device__(auto v) { return v - v_first; })); thrust::gather(handle.get_thrust_policy(), v_offset_first, v_offset_first + (key_offsets[j + 1] - key_offsets[j]), @@ -718,21 +728,24 @@ void update_edge_minor_property(raft::handle_t const& handle, graph_view.local_vertex_partition_view()); if constexpr (packed_bool) { auto bool_first = thrust::make_transform_iterator( - vertex_first, [vertex_property_input_first, vertex_partition] __device__(auto v) { + vertex_first, + cuda::proclaim_return_type([vertex_property_input_first, + vertex_partition] __device__(auto v) { auto v_offset = vertex_partition.local_vertex_partition_offset_from_vertex_nocheck(v); return static_cast( *(vertex_property_input_first + packed_bool_offset(v_offset)) & packed_bool_mask(v_offset)); - }); + })); pack_bools(handle, bool_first, bool_first + thrust::distance(vertex_first, vertex_last), rx_value_first); } else { - auto map_first = - thrust::make_transform_iterator(vertex_first, [vertex_partition] __device__(auto v) { + auto map_first = thrust::make_transform_iterator( + vertex_first, + cuda::proclaim_return_type([vertex_partition] __device__(auto v) { return vertex_partition.local_vertex_partition_offset_from_vertex_nocheck(v); - }); + })); // FIXME: this gather (and temporary buffer) is unnecessary if NCCL directly takes a // permutation iterator (and directly gathers to the internal buffer) thrust::gather(handle.get_thrust_policy(), @@ -799,9 +812,10 @@ void update_edge_minor_property(raft::handle_t const& handle, }); } else { auto map_first = thrust::make_transform_iterator( - rx_vertices.begin(), [edge_partition] __device__(auto v) { + rx_vertices.begin(), + cuda::proclaim_return_type([edge_partition] __device__(auto v) { return edge_partition.minor_offset_from_minor_nocheck(v); - }); + })); // FIXME: this scatter is unnecessary if NCCL directly takes a permutation iterator (and // directly scatters from the internal buffer) thrust::scatter(handle.get_thrust_policy(), @@ -866,8 +880,6 @@ void update_edge_src_property( edge_src_property_output, bool do_expensive_check = false) { - CUGRAPH_EXPECTS(!graph_view.has_edge_mask(), "unimplemented."); - if (do_expensive_check) { // currently, nothing to do } @@ -917,8 +929,6 @@ void update_edge_src_property( edge_src_property_output, bool do_expensive_check = false) { - CUGRAPH_EXPECTS(!graph_view.has_edge_mask(), "unimplemented."); - if (do_expensive_check) { auto num_invalids = thrust::count_if( handle.get_thrust_policy(), @@ -985,8 +995,6 @@ void update_edge_dst_property( edge_dst_property_output, bool do_expensive_check = false) { - CUGRAPH_EXPECTS(!graph_view.has_edge_mask(), "unimplemented."); - if (do_expensive_check) { // currently, nothing to do } @@ -1036,8 +1044,6 @@ void update_edge_dst_property( edge_dst_property_output, bool do_expensive_check = false) { - CUGRAPH_EXPECTS(!graph_view.has_edge_mask(), "unimplemented."); - if (do_expensive_check) { auto num_invalids = thrust::count_if( handle.get_thrust_policy(), diff --git a/cpp/src/sampling/random_walks.cuh b/cpp/src/sampling/random_walks.cuh index 6a7334e9f1a..5a9ded02009 100644 --- a/cpp/src/sampling/random_walks.cuh +++ b/cpp/src/sampling/random_walks.cuh @@ -52,6 +52,8 @@ #include #include +#include + #include #include // FIXME: requirement for temporary std::getenv() #include @@ -378,7 +380,8 @@ struct random_walker_t { // scatter d_src_init_v to coalesced vertex vector: // - auto dlambda = [stride = max_depth_] __device__(auto indx) { return indx * stride; }; + auto dlambda = cuda::proclaim_return_type( + [stride = max_depth_] __device__(auto indx) { return indx * stride; }); // use the transform iterator as map: // @@ -539,10 +542,11 @@ struct random_walker_t { // delta = ptr_d_sizes[indx] - 1 // - auto dlambda = [stride, ptr_d_sizes, ptr_d_coalesced] __device__(auto indx) { - auto delta = ptr_d_sizes[indx] - 1; - return ptr_d_coalesced[indx * stride + delta]; - }; + auto dlambda = cuda::proclaim_return_type( + [stride, ptr_d_sizes, ptr_d_coalesced] __device__(auto indx) { + auto delta = ptr_d_sizes[indx] - 1; + return ptr_d_coalesced[indx * stride + delta]; + }); // use the transform iterator as map: // @@ -587,10 +591,11 @@ struct random_walker_t { { index_t const* ptr_d_sizes = original::raw_const_ptr(d_sizes); - auto dlambda = [stride, adjust, ptr_d_sizes] __device__(auto indx) { - auto delta = ptr_d_sizes[indx] - adjust - 1; - return indx * stride + delta; - }; + auto dlambda = + cuda::proclaim_return_type([stride, adjust, ptr_d_sizes] __device__(auto indx) { + auto delta = ptr_d_sizes[indx] - adjust - 1; + return indx * stride + delta; + }); // use the transform iterator as map: // diff --git a/cpp/src/sampling/sampling_post_processing_impl.cuh b/cpp/src/sampling/sampling_post_processing_impl.cuh index 77d4f2d865f..852d82e78ab 100644 --- a/cpp/src/sampling/sampling_post_processing_impl.cuh +++ b/cpp/src/sampling/sampling_post_processing_impl.cuh @@ -40,6 +40,8 @@ #include #include +#include + #include namespace cugraph { @@ -1229,10 +1231,12 @@ renumber_and_compress_sampled_edgelist( auto pair_first = thrust::make_zip_iterator((*compressed_label_indices).begin(), (*compressed_hops).begin()); auto value_pair_first = thrust::make_transform_iterator( - thrust::make_counting_iterator(size_t{0}), [num_hops] __device__(size_t i) { - return thrust::make_tuple(static_cast(i / num_hops), - static_cast(i % num_hops)); - }); + thrust::make_counting_iterator(size_t{0}), + cuda::proclaim_return_type>( + [num_hops] __device__(size_t i) { + return thrust::make_tuple(static_cast(i / num_hops), + static_cast(i % num_hops)); + })); thrust::upper_bound(handle.get_thrust_policy(), pair_first, pair_first + (*compressed_label_indices).size(), diff --git a/cpp/src/structure/detail/structure_utils.cuh b/cpp/src/structure/detail/structure_utils.cuh index c49b62e4543..f0f729bce18 100644 --- a/cpp/src/structure/detail/structure_utils.cuh +++ b/cpp/src/structure/detail/structure_utils.cuh @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -524,34 +525,21 @@ std::tuple> mark_entries(raft::handle_t co return word; }); - size_t bit_count = thrust::transform_reduce( - handle.get_thrust_policy(), - marked_entries.begin(), - marked_entries.end(), - [] __device__(auto word) { return __popc(word); }, - size_t{0}, - thrust::plus()); + size_t bit_count = detail::count_set_bits(handle, marked_entries.begin(), num_entries); return std::make_tuple(bit_count, std::move(marked_entries)); } template -rmm::device_uvector remove_flagged_elements(raft::handle_t const& handle, - rmm::device_uvector&& vector, - raft::device_span remove_flags, - size_t remove_count) +rmm::device_uvector keep_flagged_elements(raft::handle_t const& handle, + rmm::device_uvector&& vector, + raft::device_span keep_flags, + size_t keep_count) { - rmm::device_uvector result(vector.size() - remove_count, handle.get_stream()); - - thrust::copy_if( - handle.get_thrust_policy(), - thrust::make_counting_iterator(size_t{0}), - thrust::make_counting_iterator(vector.size()), - thrust::make_transform_output_iterator(result.begin(), - indirection_t{vector.data()}), - [remove_flags] __device__(size_t i) { - return !(remove_flags[cugraph::packed_bool_offset(i)] & cugraph::packed_bool_mask(i)); - }); + rmm::device_uvector result(keep_count, handle.get_stream()); + + detail::copy_if_mask_set( + handle, vector.begin(), vector.end(), keep_flags.begin(), result.begin()); return result; } diff --git a/cpp/src/structure/graph_view_impl.cuh b/cpp/src/structure/graph_view_impl.cuh index 64a8a3212b3..a40747f13f7 100644 --- a/cpp/src/structure/graph_view_impl.cuh +++ b/cpp/src/structure/graph_view_impl.cuh @@ -51,6 +51,8 @@ #include #include +#include + #include #include #include @@ -159,14 +161,14 @@ rmm::device_uvector compute_major_degrees( thrust::make_counting_iterator(vertex_t{0}), thrust::make_counting_iterator(major_hypersparse_first - major_range_first), local_degrees.begin(), - [offsets, masks] __device__(auto i) { + cuda::proclaim_return_type([offsets, masks] __device__(auto i) { auto local_degree = offsets[i + 1] - offsets[i]; if (masks) { local_degree = static_cast( detail::count_set_bits(*masks, offsets[i], local_degree)); } return local_degree; - }); + })); if (use_dcs) { auto dcs_nzd_vertices = (*edge_partition_dcs_nzd_vertices)[i]; auto dcs_nzd_vertex_count = (*edge_partition_dcs_nzd_vertex_counts)[i]; @@ -548,7 +550,7 @@ graph_view_tpartition_, this->edge_partition_segment_offsets_); } else { - CUGRAPH_EXPECTS(!has_edge_mask(), "unimplemented."); + CUGRAPH_EXPECTS(!(this->has_edge_mask()), "unimplemented."); return compute_minor_degrees(handle, *this); } } @@ -566,7 +568,7 @@ graph_view_tlocal_vertex_partition_range_size()); } else { - CUGRAPH_EXPECTS(!has_edge_mask(), "unimplemented."); + CUGRAPH_EXPECTS(!(this->has_edge_mask()), "unimplemented."); return compute_minor_degrees(handle, *this); } } @@ -577,7 +579,7 @@ graph_view_thas_edge_mask()), "unimplemented."); return compute_minor_degrees(handle, *this); } else { return compute_major_degrees(handle, @@ -598,7 +600,7 @@ graph_view_thas_edge_mask()), "unimplemented."); return compute_minor_degrees(handle, *this); } else { return compute_major_degrees( @@ -614,7 +616,7 @@ template >:: compute_max_in_degree(raft::handle_t const& handle) const { - CUGRAPH_EXPECTS(!has_edge_mask(), "unimplemented."); + CUGRAPH_EXPECTS(!(this->has_edge_mask()), "unimplemented."); auto in_degrees = compute_in_degrees(handle); auto it = thrust::max_element(handle.get_thrust_policy(), in_degrees.begin(), in_degrees.end()); @@ -632,7 +634,7 @@ template >:: compute_max_in_degree(raft::handle_t const& handle) const { - CUGRAPH_EXPECTS(!has_edge_mask(), "unimplemented."); + CUGRAPH_EXPECTS(!(this->has_edge_mask()), "unimplemented."); auto in_degrees = compute_in_degrees(handle); auto it = thrust::max_element(handle.get_thrust_policy(), in_degrees.begin(), in_degrees.end()); @@ -646,7 +648,7 @@ template >:: compute_max_out_degree(raft::handle_t const& handle) const { - CUGRAPH_EXPECTS(!has_edge_mask(), "unimplemented."); + CUGRAPH_EXPECTS(!(this->has_edge_mask()), "unimplemented."); auto out_degrees = compute_out_degrees(handle); auto it = thrust::max_element(handle.get_thrust_policy(), out_degrees.begin(), out_degrees.end()); @@ -664,7 +666,7 @@ template >:: compute_max_out_degree(raft::handle_t const& handle) const { - CUGRAPH_EXPECTS(!has_edge_mask(), "unimplemented."); + CUGRAPH_EXPECTS(!(this->has_edge_mask()), "unimplemented."); auto out_degrees = compute_out_degrees(handle); auto it = thrust::max_element(handle.get_thrust_policy(), out_degrees.begin(), out_degrees.end()); @@ -678,7 +680,7 @@ template >:: count_self_loops(raft::handle_t const& handle) const { - CUGRAPH_EXPECTS(!has_edge_mask(), "unimplemented."); + CUGRAPH_EXPECTS(!(this->has_edge_mask()), "unimplemented."); return count_if_e( handle, @@ -693,7 +695,7 @@ template >:: count_self_loops(raft::handle_t const& handle) const { - CUGRAPH_EXPECTS(!has_edge_mask(), "unimplemented."); + CUGRAPH_EXPECTS(!(this->has_edge_mask()), "unimplemented."); return count_if_e( handle, @@ -708,7 +710,7 @@ template >:: count_multi_edges(raft::handle_t const& handle) const { - CUGRAPH_EXPECTS(!has_edge_mask(), "unimplemented."); + CUGRAPH_EXPECTS(!(this->has_edge_mask()), "unimplemented."); if (!this->is_multigraph()) { return edge_t{0}; } @@ -728,7 +730,7 @@ template >:: count_multi_edges(raft::handle_t const& handle) const { - CUGRAPH_EXPECTS(!has_edge_mask(), "unimplemented."); + CUGRAPH_EXPECTS(!(this->has_edge_mask()), "unimplemented."); if (!this->is_multigraph()) { return edge_t{0}; } diff --git a/cpp/src/structure/remove_multi_edges_impl.cuh b/cpp/src/structure/remove_multi_edges_impl.cuh index ab6b1fba8eb..fdd3059f874 100644 --- a/cpp/src/structure/remove_multi_edges_impl.cuh +++ b/cpp/src/structure/remove_multi_edges_impl.cuh @@ -254,50 +254,47 @@ remove_multi_edges(raft::handle_t const& handle, } } - auto [multi_edge_count, multi_edges_to_delete] = - detail::mark_entries(handle, - edgelist_srcs.size(), - [d_edgelist_srcs = edgelist_srcs.data(), - d_edgelist_dsts = edgelist_dsts.data()] __device__(auto idx) { - return (idx > 0) && (d_edgelist_srcs[idx - 1] == d_edgelist_srcs[idx]) && - (d_edgelist_dsts[idx - 1] == d_edgelist_dsts[idx]); - }); - - if (multi_edge_count > 0) { - edgelist_srcs = detail::remove_flagged_elements( + auto [keep_count, keep_flags] = detail::mark_entries( + handle, + edgelist_srcs.size(), + [d_edgelist_srcs = edgelist_srcs.data(), + d_edgelist_dsts = edgelist_dsts.data()] __device__(auto idx) { + return !((idx > 0) && (d_edgelist_srcs[idx - 1] == d_edgelist_srcs[idx]) && + (d_edgelist_dsts[idx - 1] == d_edgelist_dsts[idx])); + }); + + if (keep_count < edgelist_srcs.size()) { + edgelist_srcs = detail::keep_flagged_elements( handle, std::move(edgelist_srcs), - raft::device_span{multi_edges_to_delete.data(), multi_edges_to_delete.size()}, - multi_edge_count); - edgelist_dsts = detail::remove_flagged_elements( + raft::device_span{keep_flags.data(), keep_flags.size()}, + keep_count); + edgelist_dsts = detail::keep_flagged_elements( handle, std::move(edgelist_dsts), - raft::device_span{multi_edges_to_delete.data(), multi_edges_to_delete.size()}, - multi_edge_count); + raft::device_span{keep_flags.data(), keep_flags.size()}, + keep_count); if (edgelist_weights) - edgelist_weights = detail::remove_flagged_elements( + edgelist_weights = detail::keep_flagged_elements( handle, std::move(*edgelist_weights), - raft::device_span{multi_edges_to_delete.data(), - multi_edges_to_delete.size()}, - multi_edge_count); + raft::device_span{keep_flags.data(), keep_flags.size()}, + keep_count); if (edgelist_edge_ids) - edgelist_edge_ids = detail::remove_flagged_elements( + edgelist_edge_ids = detail::keep_flagged_elements( handle, std::move(*edgelist_edge_ids), - raft::device_span{multi_edges_to_delete.data(), - multi_edges_to_delete.size()}, - multi_edge_count); + raft::device_span{keep_flags.data(), keep_flags.size()}, + keep_count); if (edgelist_edge_types) - edgelist_edge_types = detail::remove_flagged_elements( + edgelist_edge_types = detail::keep_flagged_elements( handle, std::move(*edgelist_edge_types), - raft::device_span{multi_edges_to_delete.data(), - multi_edges_to_delete.size()}, - multi_edge_count); + raft::device_span{keep_flags.data(), keep_flags.size()}, + keep_count); } return std::make_tuple(std::move(edgelist_srcs), diff --git a/cpp/src/structure/remove_self_loops_impl.cuh b/cpp/src/structure/remove_self_loops_impl.cuh index 161ffeae28e..dafe26cd1c5 100644 --- a/cpp/src/structure/remove_self_loops_impl.cuh +++ b/cpp/src/structure/remove_self_loops_impl.cuh @@ -44,44 +44,44 @@ remove_self_loops(raft::handle_t const& handle, std::optional>&& edgelist_edge_ids, std::optional>&& edgelist_edge_types) { - auto [self_loop_count, self_loops_to_delete] = + auto [keep_count, keep_flags] = detail::mark_entries(handle, edgelist_srcs.size(), [d_srcs = edgelist_srcs.data(), d_dsts = edgelist_dsts.data()] __device__( - size_t i) { return d_srcs[i] == d_dsts[i]; }); + size_t i) { return d_srcs[i] != d_dsts[i]; }); - if (self_loop_count > 0) { - edgelist_srcs = detail::remove_flagged_elements( + if (keep_count < edgelist_srcs.size()) { + edgelist_srcs = detail::keep_flagged_elements( handle, std::move(edgelist_srcs), - raft::device_span{self_loops_to_delete.data(), self_loops_to_delete.size()}, - self_loop_count); - edgelist_dsts = detail::remove_flagged_elements( + raft::device_span{keep_flags.data(), keep_flags.size()}, + keep_count); + edgelist_dsts = detail::keep_flagged_elements( handle, std::move(edgelist_dsts), - raft::device_span{self_loops_to_delete.data(), self_loops_to_delete.size()}, - self_loop_count); + raft::device_span{keep_flags.data(), keep_flags.size()}, + keep_count); if (edgelist_weights) - edgelist_weights = detail::remove_flagged_elements( + edgelist_weights = detail::keep_flagged_elements( handle, std::move(*edgelist_weights), - raft::device_span{self_loops_to_delete.data(), self_loops_to_delete.size()}, - self_loop_count); + raft::device_span{keep_flags.data(), keep_flags.size()}, + keep_count); if (edgelist_edge_ids) - edgelist_edge_ids = detail::remove_flagged_elements( + edgelist_edge_ids = detail::keep_flagged_elements( handle, std::move(*edgelist_edge_ids), - raft::device_span{self_loops_to_delete.data(), self_loops_to_delete.size()}, - self_loop_count); + raft::device_span{keep_flags.data(), keep_flags.size()}, + keep_count); if (edgelist_edge_types) - edgelist_edge_types = detail::remove_flagged_elements( + edgelist_edge_types = detail::keep_flagged_elements( handle, std::move(*edgelist_edge_types), - raft::device_span{self_loops_to_delete.data(), self_loops_to_delete.size()}, - self_loop_count); + raft::device_span{keep_flags.data(), keep_flags.size()}, + keep_count); } return std::make_tuple(std::move(edgelist_srcs), diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 6530a25d178..d9c88bc179e 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -738,9 +738,16 @@ if (BUILD_CUGRAPH_MTMG_TESTS) # - MTMG tests ------------------------------------------------------------------------- ConfigureTest(MTMG_TEST mtmg/threaded_test.cu) target_link_libraries(MTMG_TEST - PRIVATE - UCP::UCP - ) + PRIVATE + UCP::UCP + ) + + ConfigureTest(MTMG_LOUVAIN_TEST mtmg/threaded_test_louvain.cu) + target_link_libraries(MTMG_LOUVAIN_TEST + PRIVATE + cugraphmgtestutil + UCP::UCP + ) if(BUILD_CUGRAPH_MG_TESTS) ############################################################################################### diff --git a/cpp/tests/c_api/hits_test.c b/cpp/tests/c_api/hits_test.c index c275d883d11..1ebd4f82a51 100644 --- a/cpp/tests/c_api/hits_test.c +++ b/cpp/tests/c_api/hits_test.c @@ -163,7 +163,7 @@ int test_hits() weight_t h_hubs[] = {0.347296, 0.532089, 1, 0.00003608, 0.00003608, 0}; weight_t h_authorities[] = {0.652703, 0.879385, 0, 1, 0.347296, 0.00009136}; - double epsilon = 0.0001; + double epsilon = 0.00002; size_t max_iterations = 20; // hits wants store_transposed = TRUE @@ -195,7 +195,7 @@ int test_hits_with_transpose() weight_t h_hubs[] = {0.347296, 0.532089, 1, 0.00003608, 0.00003608, 0}; weight_t h_authorities[] = {0.652703, 0.879385, 0, 1, 0.347296, 0.00009136}; - double epsilon = 0.0001; + double epsilon = 0.00002; size_t max_iterations = 20; // Hits wants store_transposed = TRUE @@ -232,7 +232,7 @@ int test_hits_with_initial() vertex_t h_initial_vertices[] = {0, 1, 2, 3, 4}; weight_t h_initial_hubs[] = {0.347296, 0.532089, 1, 0.00003608, 0.00003608}; - double epsilon = 0.0001; + double epsilon = 0.00002; size_t max_iterations = 20; return generic_hits_test(h_src, diff --git a/cpp/tests/c_api/mg_hits_test.c b/cpp/tests/c_api/mg_hits_test.c index 87371093613..3e10bfc05d6 100644 --- a/cpp/tests/c_api/mg_hits_test.c +++ b/cpp/tests/c_api/mg_hits_test.c @@ -171,7 +171,7 @@ int test_hits(const cugraph_resource_handle_t* handle) weight_t h_hubs[] = {0.347296, 0.532089, 1, 0.00003608, 0.00003608, 0}; weight_t h_authorities[] = {0.652703, 0.879385, 0, 1, 0.347296, 0.00009136}; - double epsilon = 0.0001; + double epsilon = 0.00002; size_t max_iterations = 20; // hits wants store_transposed = TRUE @@ -203,7 +203,7 @@ int test_hits_with_transpose(const cugraph_resource_handle_t* handle) weight_t h_hubs[] = {0.347296, 0.532089, 1, 0.00003608, 0.00003608, 0}; weight_t h_authorities[] = {0.652703, 0.879385, 0, 1, 0.347296, 0.00009136}; - double epsilon = 0.0001; + double epsilon = 0.00002; size_t max_iterations = 20; // Hits wants store_transposed = TRUE diff --git a/cpp/tests/community/triangle_count_test.cpp b/cpp/tests/community/triangle_count_test.cpp index 836bab59457..592924c3c47 100644 --- a/cpp/tests/community/triangle_count_test.cpp +++ b/cpp/tests/community/triangle_count_test.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -232,7 +232,7 @@ class Tests_TriangleCount for (size_t i = 0; i < h_cugraph_vertices.size(); ++i) { auto v = h_cugraph_vertices[i]; auto count = h_cugraph_triangle_counts[i]; - ASSERT_TRUE(count == h_reference_triangle_counts[v]) + ASSERT_EQ(count, h_reference_triangle_counts[v]) << "Triangle count values do not match with the reference values."; } } diff --git a/cpp/tests/link_analysis/hits_test.cpp b/cpp/tests/link_analysis/hits_test.cpp index d0e77769034..6796761e212 100644 --- a/cpp/tests/link_analysis/hits_test.cpp +++ b/cpp/tests/link_analysis/hits_test.cpp @@ -52,9 +52,11 @@ std::tuple, std::vector, double, size_t> hits_re size_t max_iterations, std::optional starting_hub_values, bool normalized, - double tolerance) + double epsilon) { CUGRAPH_EXPECTS(num_vertices > 1, "number of vertices expected to be non-zero"); + auto tolerance = static_cast(num_vertices) * epsilon; + std::vector prev_hubs(num_vertices, result_t{1.0} / num_vertices); std::vector prev_authorities(num_vertices, result_t{1.0} / num_vertices); std::vector curr_hubs(num_vertices); @@ -127,8 +129,8 @@ std::tuple, std::vector, double, size_t> hits_re } struct Hits_Usecase { - bool check_correctness{true}; bool check_initial_input{false}; + bool check_correctness{true}; }; template @@ -175,8 +177,8 @@ class Tests_Hits : public ::testing::TestWithParam d_hubs(graph_view.local_vertex_partition_range_size(), handle.get_stream()); @@ -201,7 +203,7 @@ class Tests_Hits : public ::testing::TestWithParam h_cugraph_hits{}; if (renumber) { @@ -246,8 +248,7 @@ class Tests_Hits : public ::testing::TestWithParam(graph_view.number_of_vertices())) * - threshold_ratio; // skip comparison for low hits vertices (lowly ranked vertices) + 1e-6; // skip comparison for low hits vertices (lowly ranked vertices) auto nearly_equal = [threshold_ratio, threshold_magnitude](auto lhs, auto rhs) { return std::abs(lhs - rhs) <= std::max(std::max(lhs, rhs) * threshold_ratio, threshold_magnitude); @@ -294,14 +295,17 @@ INSTANTIATE_TEST_SUITE_P( Tests_Hits_File, ::testing::Combine( // enable correctness checks - ::testing::Values(Hits_Usecase{true, false}, Hits_Usecase{true, true}), + ::testing::Values(Hits_Usecase{false, true}, Hits_Usecase{true, true}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), + cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), + cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), + cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"), cugraph::test::File_Usecase("test/datasets/dolphins.mtx")))); INSTANTIATE_TEST_SUITE_P(rmat_small_test, Tests_Hits_Rmat, // enable correctness checks - ::testing::Combine(::testing::Values(Hits_Usecase{true, false}, + ::testing::Combine(::testing::Values(Hits_Usecase{false, true}, Hits_Usecase{true, true}), ::testing::Values(cugraph::test::Rmat_Usecase( 10, 16, 0.57, 0.19, 0.19, 0, false, false)))); @@ -315,7 +319,7 @@ INSTANTIATE_TEST_SUITE_P( Tests_Hits_File, ::testing::Combine( // disable correctness checks - ::testing::Values(Hits_Usecase{false, false}, Hits_Usecase{false, true}), + ::testing::Values(Hits_Usecase{false, false}, Hits_Usecase{true, false}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx")))); INSTANTIATE_TEST_SUITE_P( @@ -327,7 +331,7 @@ INSTANTIATE_TEST_SUITE_P( Tests_Hits_Rmat, // disable correctness checks for large graphs ::testing::Combine( - ::testing::Values(Hits_Usecase{false, false}, Hits_Usecase{false, true}), + ::testing::Values(Hits_Usecase{false, false}, Hits_Usecase{true, false}), ::testing::Values(cugraph::test::Rmat_Usecase(10, 16, 0.57, 0.19, 0.19, 0, false, false)))); CUGRAPH_TEST_PROGRAM_MAIN() diff --git a/cpp/tests/link_analysis/mg_hits_test.cpp b/cpp/tests/link_analysis/mg_hits_test.cpp index cf95d03681d..5c89bafd08e 100644 --- a/cpp/tests/link_analysis/mg_hits_test.cpp +++ b/cpp/tests/link_analysis/mg_hits_test.cpp @@ -33,8 +33,8 @@ #include struct Hits_Usecase { - bool check_correctness{true}; bool check_initial_input{false}; + bool check_correctness{true}; }; template @@ -81,7 +81,7 @@ class Tests_MGHits : public ::testing::TestWithParam d_mg_hubs(mg_graph_view.local_vertex_partition_range_size(), handle_->get_stream()); @@ -110,7 +110,7 @@ class Tests_MGHits : public ::testing::TestWithParam(mg_graph_view.number_of_vertices())) * - threshold_ratio; // skip comparison for low Hits verties (lowly ranked - // vertices) + 1e-6; // skip comparison for low Hits verties (lowly ranked vertices) auto nearly_equal = [threshold_ratio, threshold_magnitude](auto lhs, auto rhs) { return std::abs(lhs - rhs) < std::max(std::max(lhs, rhs) * threshold_ratio, threshold_magnitude); @@ -274,7 +272,7 @@ INSTANTIATE_TEST_SUITE_P( Tests_MGHits_File, ::testing::Combine( // enable correctness checks - ::testing::Values(Hits_Usecase{true, false}, Hits_Usecase{true, true}), + ::testing::Values(Hits_Usecase{false, true}, Hits_Usecase{true, true}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), @@ -285,7 +283,7 @@ INSTANTIATE_TEST_SUITE_P( Tests_MGHits_Rmat, ::testing::Combine( // enable correctness checks - ::testing::Values(Hits_Usecase{true, false}, Hits_Usecase{true, true}), + ::testing::Values(Hits_Usecase{false, true}, Hits_Usecase{true, true}), ::testing::Values(cugraph::test::Rmat_Usecase(10, 16, 0.57, 0.19, 0.19, 0, false, false)))); INSTANTIATE_TEST_SUITE_P( @@ -297,7 +295,7 @@ INSTANTIATE_TEST_SUITE_P( Tests_MGHits_Rmat, ::testing::Combine( // disable correctness checks for large graphs - ::testing::Values(Hits_Usecase{false, false}), + ::testing::Values(Hits_Usecase{false, false}, Hits_Usecase{true, false}), ::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false)))); CUGRAPH_MG_TEST_PROGRAM_MAIN() diff --git a/cpp/tests/mtmg/multi_node_threaded_test.cu b/cpp/tests/mtmg/multi_node_threaded_test.cu index e5a7de07781..17aed4fdecf 100644 --- a/cpp/tests/mtmg/multi_node_threaded_test.cu +++ b/cpp/tests/mtmg/multi_node_threaded_test.cu @@ -311,7 +311,8 @@ class Tests_Multithreaded auto d_my_pageranks = pageranks_view.gather( thread_handle, raft::device_span{d_my_vertex_list.data(), d_my_vertex_list.size()}, - graph_view, + graph_view.get_vertex_partition_range_lasts(thread_handle), + graph_view.get_vertex_partition_view(thread_handle), renumber_map_view); std::vector my_pageranks(d_my_pageranks.size()); diff --git a/cpp/tests/mtmg/threaded_test.cu b/cpp/tests/mtmg/threaded_test.cu index bc4d8cfef6a..a5df0199cac 100644 --- a/cpp/tests/mtmg/threaded_test.cu +++ b/cpp/tests/mtmg/threaded_test.cu @@ -155,10 +155,25 @@ class Tests_Multithreaded input_usecase.template construct_edgelist( handle, multithreaded_usecase.test_weighted, false, false); + rmm::device_uvector d_unique_vertices(2 * d_src_v.size(), handle.get_stream()); + thrust::copy( + handle.get_thrust_policy(), d_src_v.begin(), d_src_v.end(), d_unique_vertices.begin()); + thrust::copy(handle.get_thrust_policy(), + d_dst_v.begin(), + d_dst_v.end(), + d_unique_vertices.begin() + d_src_v.size()); + thrust::sort(handle.get_thrust_policy(), d_unique_vertices.begin(), d_unique_vertices.end()); + + d_unique_vertices.resize(thrust::distance(d_unique_vertices.begin(), + thrust::unique(handle.get_thrust_policy(), + d_unique_vertices.begin(), + d_unique_vertices.end())), + handle.get_stream()); + auto h_src_v = cugraph::test::to_host(handle, d_src_v); auto h_dst_v = cugraph::test::to_host(handle, d_dst_v); auto h_weights_v = cugraph::test::to_host(handle, d_weights_v); - auto unique_vertices = cugraph::test::to_host(handle, d_vertices_v); + auto unique_vertices = cugraph::test::to_host(handle, d_unique_vertices); // Load edgelist from different threads. We'll use more threads than GPUs here for (int i = 0; i < num_threads; ++i) { @@ -293,13 +308,13 @@ class Tests_Multithreaded num_threads]() { auto thread_handle = instance_manager->get_handle(); - auto number_of_vertices = unique_vertices->size(); + auto number_of_vertices = unique_vertices.size(); std::vector my_vertex_list; my_vertex_list.reserve((number_of_vertices + num_threads - 1) / num_threads); for (size_t j = i; j < number_of_vertices; j += num_threads) { - my_vertex_list.push_back((*unique_vertices)[j]); + my_vertex_list.push_back(unique_vertices[j]); } rmm::device_uvector d_my_vertex_list(my_vertex_list.size(), @@ -312,7 +327,8 @@ class Tests_Multithreaded auto d_my_pageranks = pageranks_view.gather( thread_handle, raft::device_span{d_my_vertex_list.data(), d_my_vertex_list.size()}, - graph_view, + graph_view.get_vertex_partition_range_lasts(thread_handle), + graph_view.get_vertex_partition_view(thread_handle), renumber_map_view); std::vector my_pageranks(d_my_pageranks.size()); diff --git a/cpp/tests/mtmg/threaded_test_louvain.cu b/cpp/tests/mtmg/threaded_test_louvain.cu new file mode 100644 index 00000000000..c1395037646 --- /dev/null +++ b/cpp/tests/mtmg/threaded_test_louvain.cu @@ -0,0 +1,511 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include + +#include + +#include + +#include +#include + +struct Multithreaded_Usecase { + bool test_weighted{false}; + bool check_correctness{true}; +}; + +template +class Tests_Multithreaded + : public ::testing::TestWithParam> { + public: + Tests_Multithreaded() {} + + static void SetUpTestCase() {} + static void TearDownTestCase() {} + + virtual void SetUp() {} + virtual void TearDown() {} + + std::vector get_gpu_list() + { + int num_gpus_per_node{1}; + RAFT_CUDA_TRY(cudaGetDeviceCount(&num_gpus_per_node)); + + std::vector gpu_list(num_gpus_per_node); + std::iota(gpu_list.begin(), gpu_list.end(), 0); + + return gpu_list; + } + + template + void run_current_test( + std::tuple const& param, + std::vector gpu_list) + { + using edge_type_t = int32_t; + + constexpr bool renumber = true; + constexpr bool do_expensive_check = false; + + auto [multithreaded_usecase, input_usecase] = param; + + raft::handle_t handle{}; + + size_t max_level{1}; // Louvain is non-deterministic in MG if max_leve > 1 + weight_t threshold{1e-6}; + weight_t resolution{1}; + + size_t device_buffer_size{64 * 1024 * 1024}; + size_t thread_buffer_size{4 * 1024 * 1024}; + + int num_gpus = gpu_list.size(); + int num_threads = num_gpus * 4; + + cugraph::mtmg::resource_manager_t resource_manager; + + std::for_each(gpu_list.begin(), gpu_list.end(), [&resource_manager](int gpu_id) { + resource_manager.register_local_gpu(gpu_id, rmm::cuda_device_id{gpu_id}); + }); + + ncclUniqueId instance_manager_id; + ncclGetUniqueId(&instance_manager_id); + + auto instance_manager = resource_manager.create_instance_manager( + resource_manager.registered_ranks(), instance_manager_id); + + cugraph::mtmg::edgelist_t edgelist; + cugraph::mtmg::graph_t graph; + cugraph::mtmg::graph_view_t graph_view; + cugraph::mtmg::vertex_result_t louvain_clusters; + std::optional> renumber_map = + std::make_optional>(); + + auto edge_weights = multithreaded_usecase.test_weighted + ? std::make_optional, + weight_t>>() + : std::nullopt; + + // + // Simulate graph creation by spawning threads to walk through the + // local COO and add edges + // + std::vector running_threads; + + // Initialize shared edgelist object, one per GPU + for (int i = 0; i < num_gpus; ++i) { + running_threads.emplace_back([&instance_manager, + &edgelist, + device_buffer_size, + use_weight = true, + use_edge_id = false, + use_edge_type = false]() { + auto thread_handle = instance_manager->get_handle(); + + edgelist.set(thread_handle, device_buffer_size, use_weight, use_edge_id, use_edge_type); + }); + } + + // Wait for CPU threads to complete + std::for_each(running_threads.begin(), running_threads.end(), [](auto& t) { t.join(); }); + running_threads.resize(0); + instance_manager->reset_threads(); + + // Load SG edge list + auto [d_src_v, d_dst_v, d_weights_v, d_vertices_v, is_symmetric] = + input_usecase.template construct_edgelist( + handle, multithreaded_usecase.test_weighted, false, false); + + rmm::device_uvector d_unique_vertices(2 * d_src_v.size(), handle.get_stream()); + thrust::copy( + handle.get_thrust_policy(), d_src_v.begin(), d_src_v.end(), d_unique_vertices.begin()); + thrust::copy(handle.get_thrust_policy(), + d_dst_v.begin(), + d_dst_v.end(), + d_unique_vertices.begin() + d_src_v.size()); + thrust::sort(handle.get_thrust_policy(), d_unique_vertices.begin(), d_unique_vertices.end()); + + d_unique_vertices.resize(thrust::distance(d_unique_vertices.begin(), + thrust::unique(handle.get_thrust_policy(), + d_unique_vertices.begin(), + d_unique_vertices.end())), + handle.get_stream()); + + auto h_src_v = cugraph::test::to_host(handle, d_src_v); + auto h_dst_v = cugraph::test::to_host(handle, d_dst_v); + auto h_weights_v = cugraph::test::to_host(handle, d_weights_v); + auto unique_vertices = cugraph::test::to_host(handle, d_unique_vertices); + + // Load edgelist from different threads. We'll use more threads than GPUs here + for (int i = 0; i < num_threads; ++i) { + running_threads.emplace_back([&instance_manager, + thread_buffer_size, + &edgelist, + &h_src_v, + &h_dst_v, + &h_weights_v, + i, + num_threads]() { + auto thread_handle = instance_manager->get_handle(); + cugraph::mtmg::per_thread_edgelist_t + per_thread_edgelist(edgelist.get(thread_handle), thread_buffer_size); + + for (size_t j = i; j < h_src_v.size(); j += num_threads) { + per_thread_edgelist.append( + thread_handle, + h_src_v[j], + h_dst_v[j], + h_weights_v ? std::make_optional((*h_weights_v)[j]) : std::nullopt, + std::nullopt, + std::nullopt); + } + + per_thread_edgelist.flush(thread_handle); + }); + } + + // Wait for CPU threads to complete + std::for_each(running_threads.begin(), running_threads.end(), [](auto& t) { t.join(); }); + running_threads.resize(0); + instance_manager->reset_threads(); + + for (int i = 0; i < num_gpus; ++i) { + running_threads.emplace_back([&instance_manager, + &graph, + &edge_weights, + &edgelist, + &renumber_map, + is_symmetric = is_symmetric, + renumber, + do_expensive_check]() { + auto thread_handle = instance_manager->get_handle(); + + if (thread_handle.get_thread_rank() > 0) return; + + std::optional, + edge_t>> + edge_ids{std::nullopt}; + std::optional, + int32_t>> + edge_types{std::nullopt}; + + edgelist.finalize_buffer(thread_handle); + edgelist.consolidate_and_shuffle(thread_handle, false); + + cugraph::mtmg:: + create_graph_from_edgelist( + thread_handle, + edgelist, + cugraph::graph_properties_t{is_symmetric, true}, + renumber, + graph, + edge_weights, + edge_ids, + edge_types, + renumber_map, + do_expensive_check); + }); + } + + // Wait for CPU threads to complete + std::for_each(running_threads.begin(), running_threads.end(), [](auto& t) { t.join(); }); + running_threads.resize(0); + instance_manager->reset_threads(); + + graph_view = graph.view(); + auto renumber_map_view = renumber_map ? std::make_optional(renumber_map->view()) : std::nullopt; + + weight_t modularity{0}; + + for (int i = 0; i < num_threads; ++i) { + running_threads.emplace_back([&instance_manager, + &graph_view, + &edge_weights, + &louvain_clusters, + &modularity, + &renumber_map, + max_level, + threshold, + resolution]() { + auto thread_handle = instance_manager->get_handle(); + + if (thread_handle.get_thread_rank() > 0) return; + + rmm::device_uvector local_louvain_clusters( + graph_view.get(thread_handle).local_vertex_partition_range_size(), + thread_handle.get_stream()); + + std::tie(std::ignore, modularity) = cugraph::louvain( + thread_handle.raft_handle(), + graph_view.get(thread_handle), + edge_weights ? std::make_optional(edge_weights->get(thread_handle).view()) : std::nullopt, + local_louvain_clusters.data(), + max_level, + threshold, + resolution); + + louvain_clusters.set(thread_handle, std::move(local_louvain_clusters)); + }); + } + + // Wait for CPU threads to complete + std::for_each(running_threads.begin(), running_threads.end(), [](auto& t) { t.join(); }); + running_threads.resize(0); + instance_manager->reset_threads(); + + std::vector, std::vector>> computed_clusters_v; + std::mutex computed_clusters_lock{}; + + auto louvain_clusters_view = louvain_clusters.view(); + std::vector h_renumber_map; + + // Load computed_clusters_v from different threads. + for (int i = 0; i < num_gpus; ++i) { + running_threads.emplace_back([&instance_manager, + &graph_view, + &renumber_map_view, + &louvain_clusters_view, + &computed_clusters_lock, + &computed_clusters_v, + &h_src_v, + &h_dst_v, + &h_weights_v, + &h_renumber_map, + &unique_vertices, + i, + num_threads]() { + auto thread_handle = instance_manager->get_handle(); + + auto number_of_vertices = unique_vertices.size(); + + std::vector my_vertex_list; + my_vertex_list.reserve((number_of_vertices + num_threads - 1) / num_threads); + + for (size_t j = i; j < number_of_vertices; j += num_threads) { + my_vertex_list.push_back(unique_vertices[j]); + } + + rmm::device_uvector d_my_vertex_list(my_vertex_list.size(), + thread_handle.raft_handle().get_stream()); + raft::update_device(d_my_vertex_list.data(), + my_vertex_list.data(), + my_vertex_list.size(), + thread_handle.raft_handle().get_stream()); + + auto d_my_clusters = louvain_clusters_view.gather( + thread_handle, + raft::device_span{d_my_vertex_list.data(), d_my_vertex_list.size()}, + graph_view.get_vertex_partition_range_lasts(thread_handle), + graph_view.get_vertex_partition_view(thread_handle), + renumber_map_view); + + std::vector my_clusters(d_my_clusters.size()); + raft::update_host(my_clusters.data(), + d_my_clusters.data(), + d_my_clusters.size(), + thread_handle.raft_handle().get_stream()); + + { + std::lock_guard lock(computed_clusters_lock); + computed_clusters_v.push_back( + std::make_tuple(std::move(my_vertex_list), std::move(my_clusters))); + } + + h_renumber_map = cugraph::test::to_host( + thread_handle.raft_handle(), + cugraph::test::device_allgatherv(thread_handle.raft_handle(), + renumber_map_view->get(thread_handle))); + }); + } + + // Wait for CPU threads to complete + std::for_each(running_threads.begin(), running_threads.end(), [](auto& t) { t.join(); }); + running_threads.resize(0); + instance_manager->reset_threads(); + + if (multithreaded_usecase.check_correctness) { + // Want to compare the results in computed_clusters_v with SG results + cugraph::graph_t sg_graph(handle); + std::optional< + cugraph::edge_property_t, weight_t>> + sg_edge_weights{std::nullopt}; + + for (int i = 0; i < num_gpus; ++i) { + running_threads.emplace_back( + [&instance_manager, &graph_view, &edge_weights, &sg_graph, &sg_edge_weights]() { + auto thread_handle = instance_manager->get_handle(); + + if (thread_handle.get_rank() == 0) { + std::tie(sg_graph, sg_edge_weights, std::ignore) = + cugraph::test::mg_graph_to_sg_graph( + thread_handle.raft_handle(), + graph_view.get(thread_handle), + edge_weights ? std::make_optional(edge_weights->get(thread_handle).view()) + : std::nullopt, + std::optional>{std::nullopt}, + false); // create an SG graph with MG graph vertex IDs + } else { + cugraph::test::mg_graph_to_sg_graph( + thread_handle.raft_handle(), + graph_view.get(thread_handle), + edge_weights ? std::make_optional(edge_weights->get(thread_handle).view()) + : std::nullopt, + std::optional>{std::nullopt}, + false); // create an SG graph with MG graph vertex IDs + } + }); + } + + // Wait for CPU threads to complete + std::for_each(running_threads.begin(), running_threads.end(), [](auto& t) { t.join(); }); + running_threads.resize(0); + instance_manager->reset_threads(); + + rmm::device_uvector sg_clusters(sg_graph.number_of_vertices(), handle.get_stream()); + weight_t modularity; + + std::tie(std::ignore, modularity) = cugraph::louvain( + handle, + sg_graph.view(), + sg_edge_weights ? std::make_optional(sg_edge_weights->view()) : std::nullopt, + sg_clusters.data(), + max_level, + threshold, + resolution); + + auto h_sg_clusters = cugraph::test::to_host(handle, sg_clusters); + std::map h_cluster_map; + std::map h_cluster_reverse_map; + + std::for_each( + computed_clusters_v.begin(), + computed_clusters_v.end(), + [&h_sg_clusters, &h_cluster_map, &h_renumber_map, &h_cluster_reverse_map](auto t1) { + std::for_each( + thrust::make_zip_iterator(std::get<0>(t1).begin(), std::get<1>(t1).begin()), + thrust::make_zip_iterator(std::get<0>(t1).end(), std::get<1>(t1).end()), + [&h_sg_clusters, &h_cluster_map, &h_renumber_map, &h_cluster_reverse_map](auto t2) { + vertex_t v = thrust::get<0>(t2); + vertex_t c = thrust::get<1>(t2); + + auto pos = std::find(h_renumber_map.begin(), h_renumber_map.end(), v); + auto offset = std::distance(h_renumber_map.begin(), pos); + + auto cluster_pos = h_cluster_map.find(c); + if (cluster_pos == h_cluster_map.end()) { + auto reverse_pos = h_cluster_reverse_map.find(h_sg_clusters[offset]); + + ASSERT_TRUE(reverse_pos != h_cluster_map.end()) << "two different cluster mappings"; + + h_cluster_map.insert(std::make_pair(c, h_sg_clusters[offset])); + h_cluster_reverse_map.insert(std::make_pair(h_sg_clusters[offset], c)); + } else { + ASSERT_EQ(cluster_pos->second, h_sg_clusters[offset]) + << "vertex " << v << ", offset = " << offset + << ", SG cluster = " << h_sg_clusters[offset] << ", mtmg cluster = " << c + << ", mapped value = " << cluster_pos->second; + } + }); + }); + } + } +}; + +using Tests_Multithreaded_File = Tests_Multithreaded; +using Tests_Multithreaded_Rmat = Tests_Multithreaded; + +// FIXME: add tests for type combinations +TEST_P(Tests_Multithreaded_File, CheckInt32Int32FloatFloat) +{ + run_current_test( + override_File_Usecase_with_cmd_line_arguments(GetParam()), std::vector{{0, 1}}); +} + +TEST_P(Tests_Multithreaded_Rmat, CheckInt32Int32FloatFloat) +{ + run_current_test( + override_Rmat_Usecase_with_cmd_line_arguments(GetParam()), std::vector{{0, 1}}); +} + +INSTANTIATE_TEST_SUITE_P(file_test, + Tests_Multithreaded_File, + ::testing::Combine( + // enable correctness checks + ::testing::Values(Multithreaded_Usecase{true, true}), + ::testing::Values(cugraph::test::File_Usecase("karate.csv"), + cugraph::test::File_Usecase("dolphins.csv")))); + +INSTANTIATE_TEST_SUITE_P( + rmat_small_test, + Tests_Multithreaded_Rmat, + ::testing::Combine( + // enable correctness checks + ::testing::Values(Multithreaded_Usecase{true, true}), + //::testing::Values(cugraph::test::Rmat_Usecase(10, 16, 0.57, 0.19, 0.19, 0, false, false)))); + ::testing::Values(cugraph::test::Rmat_Usecase(5, 8, 0.57, 0.19, 0.19, 0, false, false)))); + +INSTANTIATE_TEST_SUITE_P( + file_benchmark_test, /* note that the test filename can be overridden in benchmarking (with + --gtest_filter to select only the file_benchmark_test with a specific + vertex & edge type combination) by command line arguments and do not + include more than one File_Usecase that differ only in filename + (to avoid running same benchmarks more than once) */ + Tests_Multithreaded_File, + ::testing::Combine( + // disable correctness checks + ::testing::Values(Multithreaded_Usecase{true, false}), + ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx")))); + +INSTANTIATE_TEST_SUITE_P( + rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with + --gtest_filter to select only the rmat_benchmark_test with a specific + vertex & edge type combination) by command line arguments and do not + include more than one Rmat_Usecase that differ only in scale or edge + factor (to avoid running same benchmarks more than once) */ + Tests_Multithreaded_Rmat, + ::testing::Combine( + // disable correctness checks for large graphs + ::testing::Values(Multithreaded_Usecase{true, false}), + ::testing::Values(cugraph::test::Rmat_Usecase(10, 16, 0.57, 0.19, 0.19, 0, false, false)))); + +CUGRAPH_TEST_PROGRAM_MAIN() diff --git a/cpp/tests/prims/mg_count_if_e.cu b/cpp/tests/prims/mg_count_if_e.cu index 449aa728d87..03bf8ae0ae5 100644 --- a/cpp/tests/prims/mg_count_if_e.cu +++ b/cpp/tests/prims/mg_count_if_e.cu @@ -53,8 +53,9 @@ #include struct Prims_Usecase { - bool check_correctness{true}; bool test_weighted{false}; + bool edge_masking{false}; + bool check_correctness{true}; }; template @@ -102,6 +103,13 @@ class Tests_MGCountIfE auto mg_graph_view = mg_graph.view(); + std::optional> edge_mask{std::nullopt}; + if (prims_usecase.edge_masking) { + edge_mask = + cugraph::test::generate::edge_property(*handle_, mg_graph_view, 2); + mg_graph_view.attach_edge_mask((*edge_mask).view()); + } + // 2. run MG count_if_e const int hash_bin_count = 5; @@ -148,19 +156,19 @@ class Tests_MGCountIfE (*mg_renumber_map).size()), false); - auto sg_graph_view = sg_graph.view(); + if (handle_->get_comms().get_rank() == 0) { + auto sg_graph_view = sg_graph.view(); - auto sg_vertex_prop = cugraph::test::generate::vertex_property( - *handle_, - thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()), - thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_last()), - hash_bin_count); - auto sg_src_prop = cugraph::test::generate::src_property( - *handle_, sg_graph_view, sg_vertex_prop); - auto sg_dst_prop = cugraph::test::generate::dst_property( - *handle_, sg_graph_view, sg_vertex_prop); + auto sg_vertex_prop = cugraph::test::generate::vertex_property( + *handle_, + thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()), + thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_last()), + hash_bin_count); + auto sg_src_prop = cugraph::test::generate::src_property( + *handle_, sg_graph_view, sg_vertex_prop); + auto sg_dst_prop = cugraph::test::generate::dst_property( + *handle_, sg_graph_view, sg_vertex_prop); - if (handle_->get_comms().get_rank() == 0) { auto expected_result = count_if_e( *handle_, sg_graph_view, @@ -312,7 +320,10 @@ INSTANTIATE_TEST_SUITE_P( file_test, Tests_MGCountIfE_File, ::testing::Combine( - ::testing::Values(Prims_Usecase{true}), + ::testing::Values(Prims_Usecase{false, false, true}, + Prims_Usecase{false, true, true}, + Prims_Usecase{true, false, true}, + Prims_Usecase{true, true, true}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), @@ -320,7 +331,10 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P(rmat_small_test, Tests_MGCountIfE_Rmat, - ::testing::Combine(::testing::Values(Prims_Usecase{true}), + ::testing::Combine(::testing::Values(Prims_Usecase{false, false, true}, + Prims_Usecase{false, true, true}, + Prims_Usecase{true, false, true}, + Prims_Usecase{true, true, true}), ::testing::Values(cugraph::test::Rmat_Usecase( 10, 16, 0.57, 0.19, 0.19, 0, false, false)))); @@ -332,7 +346,10 @@ INSTANTIATE_TEST_SUITE_P( factor (to avoid running same benchmarks more than once) */ Tests_MGCountIfE_Rmat, ::testing::Combine( - ::testing::Values(Prims_Usecase{false}), + ::testing::Values(Prims_Usecase{false, false, false}, + Prims_Usecase{false, true, false}, + Prims_Usecase{true, false, false}, + Prims_Usecase{true, true, false}), ::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false)))); CUGRAPH_MG_TEST_PROGRAM_MAIN() diff --git a/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_intersection.cu b/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_intersection.cu index a3edb1f6372..ac73c446d89 100644 --- a/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_intersection.cu +++ b/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_intersection.cu @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "property_generator.cuh" + #include #include #include @@ -116,29 +118,8 @@ class Tests_MGPerVPairTransformDstNbrIntersection std::optional> edge_mask{std::nullopt}; if (prims_usecase.edge_masking) { - cugraph::edge_src_property_t edge_src_renumber_map( - *handle_, mg_graph_view); - cugraph::edge_dst_property_t edge_dst_renumber_map( - *handle_, mg_graph_view); - cugraph::update_edge_src_property( - *handle_, mg_graph_view, (*mg_renumber_map).begin(), edge_src_renumber_map); - cugraph::update_edge_dst_property( - *handle_, mg_graph_view, (*mg_renumber_map).begin(), edge_dst_renumber_map); - - edge_mask = cugraph::edge_property_t(*handle_, mg_graph_view); - - cugraph::transform_e( - *handle_, - mg_graph_view, - edge_src_renumber_map.view(), - edge_dst_renumber_map.view(), - cugraph::edge_dummy_property_t{}.view(), - [] __device__(auto src, auto dst, auto src_property, auto dst_property, thrust::nullopt_t) { - return ((src_property % 2 == 0) && (dst_property % 2 == 0)) - ? false - : true; // mask out the edges with even unrenumbered src & dst vertex IDs - }, - (*edge_mask).mutable_view()); + edge_mask = + cugraph::test::generate::edge_property(*handle_, mg_graph_view, 2); mg_graph_view.attach_edge_mask((*edge_mask).view()); } @@ -257,42 +238,6 @@ class Tests_MGPerVPairTransformDstNbrIntersection if (handle_->get_comms().get_rank() == 0) { auto sg_graph_view = sg_graph.view(); - if (prims_usecase.edge_masking) { - rmm::device_uvector srcs(0, handle_->get_stream()); - rmm::device_uvector dsts(0, handle_->get_stream()); - std::tie(srcs, dsts, std::ignore, std::ignore) = - cugraph::decompress_to_edgelist( - *handle_, sg_graph_view, std::nullopt, std::nullopt, std::nullopt); - auto edge_first = thrust::make_zip_iterator(srcs.begin(), dsts.begin()); - srcs.resize(thrust::distance(edge_first, - thrust::remove_if(handle_->get_thrust_policy(), - edge_first, - edge_first + srcs.size(), - [] __device__(auto pair) { - return (thrust::get<0>(pair) % 2 == 0) && - (thrust::get<1>(pair) % 2 == 0); - })), - handle_->get_stream()); - dsts.resize(srcs.size(), handle_->get_stream()); - rmm::device_uvector vertices(sg_graph_view.number_of_vertices(), - handle_->get_stream()); - thrust::sequence( - handle_->get_thrust_policy(), vertices.begin(), vertices.end(), vertex_t{0}); - std::tie(sg_graph, std::ignore, std::ignore, std::ignore, std::ignore) = cugraph:: - create_graph_from_edgelist( - *handle_, - std::move(vertices), - std::move(srcs), - std::move(dsts), - std::nullopt, - std::nullopt, - std::nullopt, - cugraph::graph_properties_t{sg_graph_view.is_symmetric(), - sg_graph_view.is_multigraph()}, - false); - sg_graph_view = sg_graph.view(); - } - auto sg_result_buffer = cugraph::allocate_dataframe_buffer>( cugraph::size_dataframe_buffer(mg_aggregate_vertex_pair_buffer), handle_->get_stream()); auto sg_out_degrees = sg_graph_view.compute_out_degrees(*handle_); diff --git a/cpp/tests/prims/mg_per_v_random_select_transform_outgoing_e.cu b/cpp/tests/prims/mg_per_v_random_select_transform_outgoing_e.cu index eb6a8fd5cb6..2b9e9aafa3f 100644 --- a/cpp/tests/prims/mg_per_v_random_select_transform_outgoing_e.cu +++ b/cpp/tests/prims/mg_per_v_random_select_transform_outgoing_e.cu @@ -324,8 +324,9 @@ class Tests_MGPerVRandomSelectTransformOutgoingE with_replacement = prims_usecase.with_replacement, invalid_value = invalid_value ? thrust::make_optional(*invalid_value) : thrust::nullopt, - property_transform = cugraph::test::detail::property_transform{ - hash_bin_count}] __device__(size_t i) { + property_transform = + cugraph::test::detail::vertex_property_transform{ + hash_bin_count}] __device__(size_t i) { auto v = *(frontier_vertex_first + i); // check sample_offsets diff --git a/cpp/tests/prims/mg_transform_e.cu b/cpp/tests/prims/mg_transform_e.cu index 24deaad810a..e9be80f1f7d 100644 --- a/cpp/tests/prims/mg_transform_e.cu +++ b/cpp/tests/prims/mg_transform_e.cu @@ -52,6 +52,7 @@ struct Prims_Usecase { bool use_edgelist{false}; + bool edge_masking{false}; bool check_correctness{true}; }; @@ -100,6 +101,13 @@ class Tests_MGTransformE auto mg_graph_view = mg_graph.view(); + std::optional> edge_mask{std::nullopt}; + if (prims_usecase.edge_masking) { + edge_mask = + cugraph::test::generate::edge_property(*handle_, mg_graph_view, 2); + mg_graph_view.attach_edge_mask((*edge_mask).view()); + } + // 2. run MG transform_e const int hash_bin_count = 5; @@ -439,7 +447,10 @@ INSTANTIATE_TEST_SUITE_P( file_test, Tests_MGTransformE_File, ::testing::Combine( - ::testing::Values(Prims_Usecase{false, true}, Prims_Usecase{true, true}), + ::testing::Values(Prims_Usecase{false, false, true}, + Prims_Usecase{false, true, true}, + Prims_Usecase{true, false, true}, + Prims_Usecase{true, true, true}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), @@ -447,8 +458,10 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P(rmat_small_test, Tests_MGTransformE_Rmat, - ::testing::Combine(::testing::Values(Prims_Usecase{false, true}, - Prims_Usecase{true, true}), + ::testing::Combine(::testing::Values(Prims_Usecase{false, false, true}, + Prims_Usecase{false, true, true}, + Prims_Usecase{true, false, true}, + Prims_Usecase{true, true, true}), ::testing::Values(cugraph::test::Rmat_Usecase( 10, 16, 0.57, 0.19, 0.19, 0, false, false)))); @@ -460,7 +473,10 @@ INSTANTIATE_TEST_SUITE_P( factor (to avoid running same benchmarks more than once) */ Tests_MGTransformE_Rmat, ::testing::Combine( - ::testing::Values(Prims_Usecase{false, false}, Prims_Usecase{true, false}), + ::testing::Values(Prims_Usecase{false, false, false}, + Prims_Usecase{false, true, false}, + Prims_Usecase{true, false, false}, + Prims_Usecase{true, true, false}), ::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false)))); CUGRAPH_MG_TEST_PROGRAM_MAIN() diff --git a/cpp/tests/prims/mg_transform_reduce_e.cu b/cpp/tests/prims/mg_transform_reduce_e.cu index 79aa3da54df..c4ae11ab7c9 100644 --- a/cpp/tests/prims/mg_transform_reduce_e.cu +++ b/cpp/tests/prims/mg_transform_reduce_e.cu @@ -91,8 +91,9 @@ struct result_compare> { }; struct Prims_Usecase { - bool check_correctness{true}; bool test_weighted{false}; + bool edge_masking{false}; + bool check_correctness{true}; }; template @@ -141,6 +142,13 @@ class Tests_MGTransformReduceE auto mg_graph_view = mg_graph.view(); + std::optional> edge_mask{std::nullopt}; + if (prims_usecase.edge_masking) { + edge_mask = + cugraph::test::generate::edge_property(*handle_, mg_graph_view, 2); + mg_graph_view.attach_edge_mask((*edge_mask).view()); + } + // 2. run MG transform reduce const int hash_bin_count = 5; @@ -365,7 +373,10 @@ INSTANTIATE_TEST_SUITE_P( file_test, Tests_MGTransformReduceE_File, ::testing::Combine( - ::testing::Values(Prims_Usecase{true}), + ::testing::Values(Prims_Usecase{false, false, true}, + Prims_Usecase{false, true, true}, + Prims_Usecase{true, false, true}, + Prims_Usecase{true, true, true}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), @@ -373,7 +384,10 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P(rmat_small_test, Tests_MGTransformReduceE_Rmat, - ::testing::Combine(::testing::Values(Prims_Usecase{true}), + ::testing::Combine(::testing::Values(Prims_Usecase{false, false, true}, + Prims_Usecase{false, true, true}, + Prims_Usecase{true, false, true}, + Prims_Usecase{true, true, true}), ::testing::Values(cugraph::test::Rmat_Usecase( 10, 16, 0.57, 0.19, 0.19, 0, false, false)))); @@ -385,7 +399,10 @@ INSTANTIATE_TEST_SUITE_P( factor (to avoid running same benchmarks more than once) */ Tests_MGTransformReduceE_Rmat, ::testing::Combine( - ::testing::Values(Prims_Usecase{false}), + ::testing::Values(Prims_Usecase{false, false, false}, + Prims_Usecase{false, true, false}, + Prims_Usecase{true, false, false}, + Prims_Usecase{true, true, false}), ::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false)))); CUGRAPH_MG_TEST_PROGRAM_MAIN() diff --git a/cpp/tests/prims/property_generator.cuh b/cpp/tests/prims/property_generator.cuh index e7264cd276f..680455eda79 100644 --- a/cpp/tests/prims/property_generator.cuh +++ b/cpp/tests/prims/property_generator.cuh @@ -15,6 +15,7 @@ */ #pragma once +#include #include #include @@ -61,7 +62,7 @@ __host__ __device__ auto make_property_value(T val) } template -struct property_transform { +struct vertex_property_transform { int32_t mod{}; constexpr __device__ property_t operator()(vertex_t v) const @@ -73,6 +74,20 @@ struct property_transform { } }; +template +struct edge_property_transform { + int32_t mod{}; + + constexpr __device__ property_t operator()( + vertex_t src, vertex_t dst, thrust::nullopt_t, thrust::nullopt_t, thrust::nullopt_t) const + { + static_assert(cugraph::is_thrust_tuple_of_arithmetic::value || + std::is_arithmetic_v); + cuco::detail::MurmurHash3_32 hash_func{}; + return make_property_value(hash_func(src + dst) % mod); + } +}; + } // namespace detail template @@ -96,7 +111,7 @@ struct generate { labels.begin(), labels.end(), cugraph::get_dataframe_buffer_begin(data), - detail::property_transform{hash_bin_count}); + detail::vertex_property_transform{hash_bin_count}); return data; } @@ -111,7 +126,7 @@ struct generate { begin, end, cugraph::get_dataframe_buffer_begin(data), - detail::property_transform{hash_bin_count}); + detail::vertex_property_transform{hash_bin_count}); return data; } @@ -138,6 +153,22 @@ struct generate { handle, graph_view, cugraph::get_dataframe_buffer_begin(property), output_property); return output_property; } + + template + static auto edge_property(raft::handle_t const& handle, + graph_view_type const& graph_view, + int32_t hash_bin_count) + { + auto output_property = cugraph::edge_property_t(handle, graph_view); + cugraph::transform_e(handle, + graph_view, + cugraph::edge_src_dummy_property_t{}.view(), + cugraph::edge_dst_dummy_property_t{}.view(), + cugraph::edge_dummy_property_t{}.view(), + detail::edge_property_transform{hash_bin_count}, + output_property.mutable_view()); + return output_property; + } }; } // namespace test diff --git a/cpp/tests/sampling/sampling_post_processing_test.cu b/cpp/tests/sampling/sampling_post_processing_test.cu index e5267d75ac2..6be735c3482 100644 --- a/cpp/tests/sampling/sampling_post_processing_test.cu +++ b/cpp/tests/sampling/sampling_post_processing_test.cu @@ -38,6 +38,8 @@ #include #include +#include + struct SamplingPostProcessing_Usecase { size_t num_labels{}; size_t num_seeds_per_label{}; @@ -318,15 +320,16 @@ bool check_renumber_map_invariants( auto renumbered_merged_vertex_first = thrust::make_transform_iterator( merged_vertices.begin(), - [sorted_org_vertices = - raft::device_span(sorted_org_vertices.data(), sorted_org_vertices.size()), - matching_renumbered_vertices = raft::device_span( - matching_renumbered_vertices.data(), - matching_renumbered_vertices.size())] __device__(vertex_t major) { - auto it = thrust::lower_bound( - thrust::seq, sorted_org_vertices.begin(), sorted_org_vertices.end(), major); - return matching_renumbered_vertices[thrust::distance(sorted_org_vertices.begin(), it)]; - }); + cuda::proclaim_return_type( + [sorted_org_vertices = raft::device_span(sorted_org_vertices.data(), + sorted_org_vertices.size()), + matching_renumbered_vertices = raft::device_span( + matching_renumbered_vertices.data(), + matching_renumbered_vertices.size())] __device__(vertex_t major) { + auto it = thrust::lower_bound( + thrust::seq, sorted_org_vertices.begin(), sorted_org_vertices.end(), major); + return matching_renumbered_vertices[thrust::distance(sorted_org_vertices.begin(), it)]; + })); thrust::reduce_by_key(handle.get_thrust_policy(), sort_key_first, @@ -1020,23 +1023,24 @@ class Tests_SamplingPostProcessing ? this_label_output_edgelist_srcs.begin() : this_label_output_edgelist_dsts.begin()) + old_size, - [offsets = raft::device_span(d_offsets.data(), d_offsets.size()), - nzd_vertices = - renumbered_and_compressed_nzd_vertices - ? thrust::make_optional>( - (*renumbered_and_compressed_nzd_vertices).data() + offset_start_offset, - (offset_end_offset - offset_start_offset) - 1) - : thrust::nullopt, - base_v] __device__(size_t i) { - auto idx = static_cast(thrust::distance( - offsets.begin() + 1, - thrust::upper_bound(thrust::seq, offsets.begin() + 1, offsets.end(), i))); - if (nzd_vertices) { - return (*nzd_vertices)[idx]; - } else { - return base_v + static_cast(idx); - } - }); + cuda::proclaim_return_type( + [offsets = raft::device_span(d_offsets.data(), d_offsets.size()), + nzd_vertices = + renumbered_and_compressed_nzd_vertices + ? thrust::make_optional>( + (*renumbered_and_compressed_nzd_vertices).data() + offset_start_offset, + (offset_end_offset - offset_start_offset) - 1) + : thrust::nullopt, + base_v] __device__(size_t i) { + auto idx = static_cast(thrust::distance( + offsets.begin() + 1, + thrust::upper_bound(thrust::seq, offsets.begin() + 1, offsets.end(), i))); + if (nzd_vertices) { + return (*nzd_vertices)[idx]; + } else { + return base_v + static_cast(idx); + } + })); thrust::copy(handle.get_thrust_policy(), renumbered_and_compressed_edgelist_minors.begin() + h_offsets[0], renumbered_and_compressed_edgelist_minors.begin() + h_offsets.back(), diff --git a/cpp/tests/structure/mg_select_random_vertices_test.cpp b/cpp/tests/structure/mg_select_random_vertices_test.cpp index 79c50301922..e49e1ebcb99 100644 --- a/cpp/tests/structure/mg_select_random_vertices_test.cpp +++ b/cpp/tests/structure/mg_select_random_vertices_test.cpp @@ -90,7 +90,7 @@ class Tests_MGSelectRandomVertices std::iota( h_given_set.begin(), h_given_set.end(), mg_graph_view.local_vertex_partition_range_first()); std::shuffle(h_given_set.begin(), h_given_set.end(), std::mt19937{std::random_device{}()}); - h_given_set.resize(std::rand() % mg_graph_view.local_vertex_partition_range_size() + 1); + h_given_set.resize(std::rand() % (mg_graph_view.local_vertex_partition_range_size() + 1)); // Compute size of the distributed vertex set int num_of_elements_in_given_set = static_cast(h_given_set.size()); @@ -105,7 +105,7 @@ class Tests_MGSelectRandomVertices size_t select_count = num_of_elements_in_given_set > select_random_vertices_usecase.select_count ? select_random_vertices_usecase.select_count - : std::rand() % num_of_elements_in_given_set + 1; + : std::rand() % (num_of_elements_in_given_set + 1); for (int idx = 0; idx < with_replacement_flags.size(); idx++) { bool with_replacement = with_replacement_flags[idx]; diff --git a/cpp/tests/utilities/test_graphs.hpp b/cpp/tests/utilities/test_graphs.hpp index 16c9d3ed145..8cc87b26f1d 100644 --- a/cpp/tests/utilities/test_graphs.hpp +++ b/cpp/tests/utilities/test_graphs.hpp @@ -621,9 +621,25 @@ construct_graph(raft::handle_t const& handle, CUGRAPH_EXPECTS(d_src_v.size() <= static_cast(std::numeric_limits::max()), "Invalid template parameter: edge_t overflow."); - if (drop_self_loops) { remove_self_loops(handle, d_src_v, d_dst_v, d_weights_v); } + if (drop_self_loops) { + std::tie(d_src_v, d_dst_v, d_weights_v, std::ignore, std::ignore) = + cugraph::remove_self_loops(handle, + std::move(d_src_v), + std::move(d_dst_v), + std::move(d_weights_v), + std::nullopt, + std::nullopt); + } - if (drop_multi_edges) { sort_and_remove_multi_edges(handle, d_src_v, d_dst_v, d_weights_v); } + if (drop_multi_edges) { + std::tie(d_src_v, d_dst_v, d_weights_v, std::ignore, std::ignore) = + cugraph::remove_multi_edges(handle, + std::move(d_src_v), + std::move(d_dst_v), + std::move(d_weights_v), + std::nullopt, + std::nullopt); + } graph_t graph(handle); std::optional< diff --git a/cpp/tests/utilities/thrust_wrapper.cu b/cpp/tests/utilities/thrust_wrapper.cu index cb7e6f1bd66..2daf250b4a2 100644 --- a/cpp/tests/utilities/thrust_wrapper.cu +++ b/cpp/tests/utilities/thrust_wrapper.cu @@ -206,131 +206,5 @@ template void populate_vertex_ids(raft::handle_t const& handle, rmm::device_uvector& d_vertices_v, int64_t vertex_id_offset); -template -void remove_self_loops(raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */) -{ - if (d_weight_v) { - auto edge_first = thrust::make_zip_iterator( - thrust::make_tuple(d_src_v.begin(), d_dst_v.begin(), (*d_weight_v).begin())); - d_src_v.resize( - thrust::distance(edge_first, - thrust::remove_if( - handle.get_thrust_policy(), - edge_first, - edge_first + d_src_v.size(), - [] __device__(auto e) { return thrust::get<0>(e) == thrust::get<1>(e); })), - handle.get_stream()); - d_dst_v.resize(d_src_v.size(), handle.get_stream()); - (*d_weight_v).resize(d_src_v.size(), handle.get_stream()); - } else { - auto edge_first = - thrust::make_zip_iterator(thrust::make_tuple(d_src_v.begin(), d_dst_v.begin())); - d_src_v.resize( - thrust::distance(edge_first, - thrust::remove_if( - handle.get_thrust_policy(), - edge_first, - edge_first + d_src_v.size(), - [] __device__(auto e) { return thrust::get<0>(e) == thrust::get<1>(e); })), - handle.get_stream()); - d_dst_v.resize(d_src_v.size(), handle.get_stream()); - } - - d_src_v.shrink_to_fit(handle.get_stream()); - d_dst_v.shrink_to_fit(handle.get_stream()); - if (d_weight_v) { (*d_weight_v).shrink_to_fit(handle.get_stream()); } -} - -template void remove_self_loops( - raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */); - -template void remove_self_loops( - raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */); - -template void remove_self_loops( - raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */); - -template void remove_self_loops( - raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */); - -template -void sort_and_remove_multi_edges( - raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */) -{ - if (d_weight_v) { - auto edge_first = thrust::make_zip_iterator( - thrust::make_tuple(d_src_v.begin(), d_dst_v.begin(), (*d_weight_v).begin())); - thrust::sort(handle.get_thrust_policy(), edge_first, edge_first + d_src_v.size()); - d_src_v.resize( - thrust::distance(edge_first, - thrust::unique(handle.get_thrust_policy(), - edge_first, - edge_first + d_src_v.size(), - [] __device__(auto lhs, auto rhs) { - return (thrust::get<0>(lhs) == thrust::get<0>(rhs)) && - (thrust::get<1>(lhs) == thrust::get<1>(rhs)); - })), - handle.get_stream()); - d_dst_v.resize(d_src_v.size(), handle.get_stream()); - (*d_weight_v).resize(d_src_v.size(), handle.get_stream()); - } else { - auto edge_first = - thrust::make_zip_iterator(thrust::make_tuple(d_src_v.begin(), d_dst_v.begin())); - thrust::sort(handle.get_thrust_policy(), edge_first, edge_first + d_src_v.size()); - d_src_v.resize( - thrust::distance( - edge_first, - thrust::unique(handle.get_thrust_policy(), edge_first, edge_first + d_src_v.size())), - handle.get_stream()); - d_dst_v.resize(d_src_v.size(), handle.get_stream()); - } - - d_src_v.shrink_to_fit(handle.get_stream()); - d_dst_v.shrink_to_fit(handle.get_stream()); - if (d_weight_v) { (*d_weight_v).shrink_to_fit(handle.get_stream()); } -} - -template void sort_and_remove_multi_edges( - raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */); - -template void sort_and_remove_multi_edges( - raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */); - -template void sort_and_remove_multi_edges( - raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */); - -template void sort_and_remove_multi_edges( - raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */); - } // namespace test } // namespace cugraph diff --git a/cpp/tests/utilities/thrust_wrapper.hpp b/cpp/tests/utilities/thrust_wrapper.hpp index eead4dc268f..fb82d781198 100644 --- a/cpp/tests/utilities/thrust_wrapper.hpp +++ b/cpp/tests/utilities/thrust_wrapper.hpp @@ -46,18 +46,5 @@ void populate_vertex_ids(raft::handle_t const& handle, rmm::device_uvector& d_vertices_v /* [INOUT] */, vertex_t vertex_id_offset); -template -void remove_self_loops(raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */); - -template -void sort_and_remove_multi_edges( - raft::handle_t const& handle, - rmm::device_uvector& d_src_v /* [INOUT] */, - rmm::device_uvector& d_dst_v /* [INOUT] */, - std::optional>& d_weight_v /* [INOUT] */); - } // namespace test } // namespace cugraph diff --git a/dependencies.yaml b/dependencies.yaml index baa08c37413..b5c1fb2fa2d 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -511,6 +511,7 @@ dependencies: packages: # pip recognizes the index as a global option for the requirements.txt file - --extra-index-url=https://pypi.nvidia.com + - --extra-index-url=https://pypi.anaconda.org/rapidsai-wheels-nightly/simple specific: - output_types: [requirements, pyproject] matrices: @@ -536,6 +537,7 @@ dependencies: packages: # pip recognizes the index as a global option for the requirements.txt file - --extra-index-url=https://pypi.nvidia.com + - --extra-index-url=https://pypi.anaconda.org/rapidsai-wheels-nightly/simple specific: - output_types: [requirements, pyproject] matrices: @@ -561,6 +563,7 @@ dependencies: packages: # pip recognizes the index as a global option for the requirements.txt file - --extra-index-url=https://pypi.nvidia.com + - --extra-index-url=https://pypi.anaconda.org/rapidsai-wheels-nightly/simple specific: - output_types: [requirements, pyproject] matrices: @@ -586,6 +589,7 @@ dependencies: packages: # pip recognizes the index as a global option for the requirements.txt file - --extra-index-url=https://pypi.nvidia.com + - --extra-index-url=https://pypi.anaconda.org/rapidsai-wheels-nightly/simple specific: - output_types: [requirements, pyproject] matrices: @@ -611,6 +615,7 @@ dependencies: packages: # pip recognizes the index as a global option for the requirements.txt file - --extra-index-url=https://pypi.nvidia.com + - --extra-index-url=https://pypi.anaconda.org/rapidsai-wheels-nightly/simple specific: - output_types: [requirements, pyproject] matrices: @@ -636,6 +641,7 @@ dependencies: packages: # pip recognizes the index as a global option for the requirements.txt file - --extra-index-url=https://pypi.nvidia.com + - --extra-index-url=https://pypi.anaconda.org/rapidsai-wheels-nightly/simple specific: - output_types: [requirements, pyproject] matrices: @@ -661,6 +667,7 @@ dependencies: packages: # pip recognizes the index as a global option for the requirements.txt file - --extra-index-url=https://pypi.nvidia.com + - --extra-index-url=https://pypi.anaconda.org/rapidsai-wheels-nightly/simple specific: - output_types: [requirements, pyproject] matrices: diff --git a/docs/cugraph/source/installation/source_build.md b/docs/cugraph/source/installation/source_build.md index f5ee0741da6..1a129d45295 100644 --- a/docs/cugraph/source/installation/source_build.md +++ b/docs/cugraph/source/installation/source_build.md @@ -1,53 +1,46 @@ # Building from Source -The following instructions are for users wishing to build cuGraph from source code. These instructions are tested on supported distributions of Linux, CUDA, and Python - See [RAPIDS Getting Started](https://rapids.ai/start.html) for list of supported environments. Other operating systems _might be_ compatible, but are not currently tested. - -The cuGraph package include both a C/C++ CUDA portion and a python portion. Both libraries need to be installed in order for cuGraph to operate correctly. +These instructions are tested on supported versions/distributions of Linux, +CUDA, and Python - See [RAPIDS Getting Started](https://rapids.ai/start.html) +for the list of supported environments. Other environments _might be_ +compatible, but are not currently tested. ## Prerequisites -__Compiler:__ +__Compilers:__ * `gcc` version 9.3+ -* `nvcc` version 11.0+ -* `cmake` version 3.20.1+ +* `nvcc` version 11.5+ __CUDA:__ -* CUDA 11.0+ +* CUDA 11.2+ * NVIDIA driver 450.80.02+ * Pascal architecture or better -You can obtain CUDA from [https://developer.nvidia.com/cuda-downloads](https://developer.nvidia.com/cuda-downloads). - -__Packages:__ -* `cmake` version 3.20.1+ -* `libcugraphops` (version matching source branch version, eg. `23.10`) - -You can obtain `libcugraphops` using `conda`/`mamba` from the `nvidia` channel, or using `pip` with the `--extra-index-url=https://pypi.nvidia.com` option. See the [RAPIDS docs](https://docs.rapids.ai/install#environment) for more details. - -## Building cuGraph -To install cuGraph from source, ensure the dependencies are met. +Further details and download links for these prerequisites are available on the +[RAPIDS System Requirements page](https://docs.rapids.ai/install#system-req). +## Setting up the development environment -### Clone Repo and Configure Conda Environment -__GIT clone a version of the repository__ - - ```bash - # Set the localtion to cuGraph in an environment variable CUGRAPH_HOME - export CUGRAPH_HOME=$(pwd)/cugraph - - # Download the cuGraph repo - if you have a folked version, use that path here instead - git clone https://github.com/rapidsai/cugraph.git $CUGRAPH_HOME +### Clone the repository: +```bash +CUGRAPH_HOME=$(pwd)/cugraph +git clone https://github.com/rapidsai/cugraph.git $CUGRAPH_HOME +cd $CUGRAPH_HOME +``` - cd $CUGRAPH_HOME - ``` +### Create the conda environment -__Create the conda development environment__ +Using conda is the easiest way to install both the build and runtime +dependencies for cugraph. While it is possible to build and run cugraph without +conda, the required packages occasionally change, making it difficult to +document here. The best way to see the current dependencies needed for a build +and run environment is to examine the list of packages in the [conda +environment YAML +files](https://github.com/rapidsai/cugraph/blob/main/conda/environments). ```bash -# create the conda environment (assuming in base `cugraph` directory) - # for CUDA 11.x -conda env create --name cugraph_dev --file conda/environments/all_cuda-118_arch-x86_64.yaml +conda env create --name cugraph_dev --file $CUGRAPH_HOME/conda/environments/all_cuda-118_arch-x86_64.yaml # activate the environment conda activate cugraph_dev @@ -56,101 +49,53 @@ conda activate cugraph_dev conda deactivate ``` - - The environment can be updated as development includes/changes the dependencies. To do so, run: - +The environment can be updated as cugraph adds/removes/updates its dependencies. To do so, run: ```bash - -# Where XXX is the CUDA 11 version -conda env update --name cugraph_dev --file conda/environments/cugraph_dev_cuda11.XXX.yml - +# for CUDA 11.x +conda env update --name cugraph_dev --file $CUGRAPH_HOME/conda/environments/all_cuda-118_arch-x86_64.yaml conda activate cugraph_dev ``` +### Build and Install -### Build and Install Using the `build.sh` Script -Using the `build.sh` script make compiling and installing cuGraph a breeze. To build and install, simply do: +#### Build and install using `build.sh` +Using the `build.sh` script, located in the `$CUGRAPH_HOME` directory, is the +recommended way to build and install the cugraph libraries. By default, +`build.sh` will build and install a predefined set of targets +(packages/libraries), but can also accept a list of targets to build. -```bash -$ cd $CUGRAPH_HOME -$ ./build.sh clean -$ ./build.sh libcugraph -$ ./build.sh cugraph -``` +For example, to build only the cugraph C++ library (`libcugraph`) and the +high-level python library (`cugraph`) without building the C++ test binaries, +run this command: -There are several other options available on the build script for advanced users. -`build.sh` options: ```bash -build.sh [ ...] [ ...] - where is: - clean - remove all existing build artifacts and configuration (start over) - uninstall - uninstall libcugraph and cugraph from a prior build/install (see also -n) - libcugraph - build libcugraph.so and SG test binaries - libcugraph_etl - build libcugraph_etl.so and SG test binaries - pylibcugraph - build the pylibcugraph Python package - cugraph - build the cugraph Python package - nx-cugraph - build the nx-cugraph Python package - cugraph-service - build the cugraph-service_client and cugraph-service_server Python package - cpp-mgtests - build libcugraph and libcugraph_etl MG tests. Builds MPI communicator, adding MPI as a dependency. - cugraph-dgl - build the cugraph-dgl extensions for DGL - cugraph-pyg - build the cugraph-dgl extensions for PyG - docs - build the docs - and is: - -v - verbose build mode - -g - build for debug - -n - do not install after a successful build - --pydevelop - use setup.py develop instead of install - --allgpuarch - build for all supported GPU architectures - --skip_cpp_tests - do not build the SG test binaries as part of the libcugraph and libcugraph_etl targets - --without_cugraphops - do not build algos that require cugraph-ops - --cmake_default_generator - use the default cmake generator instead of ninja - --clean - clean an individual target (note: to do a complete rebuild, use the clean target described above) - -h - print this text - - default action (no args) is to build and install 'libcugraph' then 'libcugraph_etl' then 'pylibcugraph' then 'cugraph' then 'cugraph-service' targets - -examples: -$ ./build.sh clean # remove prior build artifacts (start over) -$ ./build.sh libcugraph -v # compile and install libcugraph with verbose output -$ ./build.sh libcugraph -g # compile and install libcugraph for debug -$ ./build.sh libcugraph -n # compile libcugraph but do not install - -# make parallelism options can also be defined: Example build jobs using 4 threads (make -j4) -$ PARALLEL_LEVEL=4 ./build.sh libcugraph - -Note that the libraries will be installed to the location set in `$PREFIX` if set (i.e. `export PREFIX=/install/path`), otherwise to `$CONDA_PREFIX`. +$ cd $CUGRAPH_HOME +$ ./build.sh libcugraph pylibcugraph cugraph --skip_cpp_tests ``` +There are several other options available on the build script for advanced +users. Refer to the output of `--help` for details. -## Building each section independently -#### Build and Install the C++/CUDA `libcugraph` Library -CMake depends on the `nvcc` executable being on your path or defined in `$CUDACXX`. - -This project uses cmake for building the C/C++ library. To configure cmake, run: - - ```bash - # Set the localtion to cuGraph in an environment variable CUGRAPH_HOME - export CUGRAPH_HOME=$(pwd)/cugraph - - cd $CUGRAPH_HOME - cd cpp # enter cpp directory - mkdir build # create build directory - cd build # enter the build directory - cmake .. -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX - - # now build the code - make -j # "-j" starts multiple threads - make install # install the libraries - ``` -The default installation locations are `$CMAKE_INSTALL_PREFIX/lib` and `$CMAKE_INSTALL_PREFIX/include/cugraph` respectively. +Note that libraries will be installed to the location set in `$PREFIX` if set +(i.e. `export PREFIX=/install/path`), otherwise to `$CONDA_PREFIX`. #### Updating the RAFT branch -`libcugraph` uses the [RAFT](https://github.com/rapidsai/raft) library and there are times when it might be desirable to build against a different RAFT branch, such as when working on new features that might span both RAFT and cuGraph. +`libcugraph` uses the [RAFT](https://github.com/rapidsai/raft) library and +there are times when it might be desirable to build against a different RAFT +branch, such as when working on new features that might span both RAFT and +cuGraph. -For local development, the `CPM_raft_SOURCE=` option can be passed to the `cmake` command to enable `libcugraph` to use the local RAFT branch. +For local development, the `CPM_raft_SOURCE=` option can +be passed to the `cmake` command to enable `libcugraph` to use the local RAFT +branch. The `build.sh` script calls `cmake` to build the C/C++ targets, but +developers can call `cmake` directly in order to pass it options like those +described here. Refer to the `build.sh` script to see how to call `cmake` and +other commands directly. -To have CI test a `cugraph` pull request against a different RAFT branch, modify the bottom of the `cpp/cmake/thirdparty/get_raft.cmake` file as follows: +To have CI test a `cugraph` pull request against a different RAFT branch, +modify the bottom of the `cpp/cmake/thirdparty/get_raft.cmake` file as follows: ```cmake # Change pinned tag and fork here to test a commit in CI @@ -167,24 +112,10 @@ find_and_configure_raft(VERSION ${CUGRAPH_MIN_VERSION_raft} ) ``` -When the above change is pushed to a pull request, the continuous integration servers will use the specified RAFT branch to run the cuGraph tests. After the changes in the RAFT branch are merged to the release branch, remember to revert the `get_raft.cmake` file back to the original cuGraph branch. - -### Building and installing the Python package - -2) Install the Python packages to your Python path: - -```bash -cd $CUGRAPH_HOME -cd python -cd pylibcugraph -python setup.py build_ext --inplace -python setup.py install # install pylibcugraph -cd ../cugraph -python setup.py build_ext --inplace -python setup.py install # install cugraph python bindings - -``` - +When the above change is pushed to a pull request, the continuous integration +servers will use the specified RAFT branch to run the cuGraph tests. After the +changes in the RAFT branch are merged to the release branch, remember to revert +the `get_raft.cmake` file back to the original cuGraph branch. ## Run tests @@ -240,7 +171,10 @@ Note: This conda installation only applies to Linux and Python versions 3.8/3.10 ### (OPTIONAL) Set environment variable on activation -It is possible to configure the conda environment to set environmental variables on activation. Providing instructions to set PATH to include the CUDA toolkit bin directory and LD_LIBRARY_PATH to include the CUDA lib64 directory will be helpful. +It is possible to configure the conda environment to set environment variables +on activation. Providing instructions to set PATH to include the CUDA toolkit +bin directory and LD_LIBRARY_PATH to include the CUDA lib64 directory will be +helpful. ```bash cd ~/anaconda3/envs/cugraph_dev @@ -271,7 +205,8 @@ unset LD_LIBRARY_PATH ## Creating documentation -Python API documentation can be generated from _./docs/cugraph directory_. Or through using "./build.sh docs" +Python API documentation can be generated from _./docs/cugraph directory_. Or +through using "./build.sh docs" ## Attribution Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md diff --git a/python/cugraph-pyg/cugraph_pyg/nn/conv/__init__.py b/python/cugraph-pyg/cugraph_pyg/nn/conv/__init__.py index 9c9dcdb43bb..bef3a023b93 100644 --- a/python/cugraph-pyg/cugraph_pyg/nn/conv/__init__.py +++ b/python/cugraph-pyg/cugraph_pyg/nn/conv/__init__.py @@ -13,6 +13,7 @@ from .gat_conv import GATConv from .gatv2_conv import GATv2Conv +from .hetero_gat_conv import HeteroGATConv from .rgcn_conv import RGCNConv from .sage_conv import SAGEConv from .transformer_conv import TransformerConv @@ -20,6 +21,7 @@ __all__ = [ "GATConv", "GATv2Conv", + "HeteroGATConv", "RGCNConv", "SAGEConv", "TransformerConv", diff --git a/python/cugraph-pyg/cugraph_pyg/nn/conv/hetero_gat_conv.py b/python/cugraph-pyg/cugraph_pyg/nn/conv/hetero_gat_conv.py new file mode 100644 index 00000000000..3b717552a96 --- /dev/null +++ b/python/cugraph-pyg/cugraph_pyg/nn/conv/hetero_gat_conv.py @@ -0,0 +1,265 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Union +from collections import defaultdict + +from cugraph.utilities.utils import import_optional +from pylibcugraphops.pytorch.operators import mha_gat_n2n + +from .base import BaseConv + +torch = import_optional("torch") +torch_geometric = import_optional("torch_geometric") + + +class HeteroGATConv(BaseConv): + r"""The graph attentional operator on heterogeneous graphs, where a separate + `GATConv` is applied on the homogeneous graph for each edge type. Compared + with directly wrapping `GATConv`s with `HeteroConv`, `HeteroGATConv` fuses + all the linear transformation associated with each node type together into 1 + GEMM call, to improve the performance on GPUs. + + Parameters + ---------- + in_channels : int or Dict[str, int]) + Size of each input sample of every node type. + + out_channels : int + Size of each output sample. + + node_types : List[str] + List of Node types. + + edge_types : List[Tuple[str, str, str]] + List of Edge types. + + heads : int, optional (default=1) + Number of multi-head-attentions. + + concat : bool, optional (default=True): + If set to :obj:`False`, the multi-head attentions are averaged instead + of concatenated. + + negative_slope : float, optional (default=0.2) + LeakyReLU angle of the negative slope. + + bias : bool, optional (default=True) + If set to :obj:`False`, the layer will not learn an additive bias. + + aggr : str, optional (default="sum") + The aggregation scheme to use for grouping node embeddings generated by + different relations. Choose from "sum", "mean", "min", "max". + """ + + def __init__( + self, + in_channels: Union[int, dict[str, int]], + out_channels: int, + node_types: list[str], + edge_types: list[tuple[str, str, str]], + heads: int = 1, + concat: bool = True, + negative_slope: float = 0.2, + bias: bool = True, + aggr: str = "sum", + ): + major, minor, patch = torch_geometric.__version__.split(".")[:3] + pyg_version = tuple(map(int, [major, minor, patch])) + if pyg_version < (2, 4, 0): + raise RuntimeError(f"{self.__class__.__name__} requires pyg >= 2.4.0.") + + super().__init__() + + if isinstance(in_channels, int): + in_channels = dict.fromkeys(node_types, in_channels) + self.in_channels = in_channels + self.out_channels = out_channels + + self.node_types = node_types + self.edge_types = edge_types + self.num_heads = heads + self.concat_heads = concat + + self.negative_slope = negative_slope + self.aggr = aggr + + self.relations_per_ntype = defaultdict(lambda: ([], [])) + + lin_weights = dict.fromkeys(self.node_types) + attn_weights = dict.fromkeys(self.edge_types) + biases = dict.fromkeys(self.edge_types) + + ParameterDict = torch_geometric.nn.parameter_dict.ParameterDict + + for edge_type in self.edge_types: + src_type, _, dst_type = edge_type + self.relations_per_ntype[src_type][0].append(edge_type) + if src_type != dst_type: + self.relations_per_ntype[dst_type][1].append(edge_type) + + attn_weights[edge_type] = torch.empty( + 2 * self.num_heads * self.out_channels + ) + + if bias and concat: + biases[edge_type] = torch.empty(self.num_heads * out_channels) + elif bias: + biases[edge_type] = torch.empty(out_channels) + else: + biases[edge_type] = None + + for ntype in self.node_types: + n_src_rel = len(self.relations_per_ntype[ntype][0]) + n_dst_rel = len(self.relations_per_ntype[ntype][1]) + n_rel = n_src_rel + n_dst_rel + + lin_weights[ntype] = torch.empty( + (n_rel * self.num_heads * self.out_channels, self.in_channels[ntype]) + ) + + self.lin_weights = ParameterDict(lin_weights) + self.attn_weights = ParameterDict(attn_weights) + + if bias: + self.bias = ParameterDict(biases) + else: + self.register_parameter("bias", None) + + self.reset_parameters() + + def split_tensors( + self, x_fused_dict: dict[str, torch.Tensor], dim: int + ) -> tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]: + """Split fused tensors into chunks based on edge types. + + Parameters + ---------- + x_fused_dict : dict[str, torch.Tensor] + A dictionary to hold node feature for each node type. The key is + node type; the value is a fused tensor that account for all + relations for that node type. + + dim : int + Dimension along which to split the fused tensor. + + Returns + ------- + x_src_dict : dict[str, torch.Tensor] + A dictionary to hold source node feature for each relation graph. + + x_dst_dict : dict[str, torch.Tensor] + A dictionary to hold destination node feature for each relation graph. + """ + x_src_dict = dict.fromkeys(self.edge_types) + x_dst_dict = dict.fromkeys(self.edge_types) + + for ntype, t in x_fused_dict.items(): + n_src_rel = len(self.relations_per_ntype[ntype][0]) + n_dst_rel = len(self.relations_per_ntype[ntype][1]) + n_rel = n_src_rel + n_dst_rel + t_list = torch.chunk(t, chunks=n_rel, dim=dim) + + for i, src_rel in enumerate(self.relations_per_ntype[ntype][0]): + x_src_dict[src_rel] = t_list[i] + + for i, dst_rel in enumerate(self.relations_per_ntype[ntype][1]): + x_dst_dict[dst_rel] = t_list[i + n_src_rel] + + return x_src_dict, x_dst_dict + + def reset_parameters(self, seed: Optional[int] = None): + if seed is not None: + torch.manual_seed(seed) + + w_src, w_dst = self.split_tensors(self.lin_weights, dim=0) + + for edge_type in self.edge_types: + src_type, _, dst_type = edge_type + + # lin_src + torch_geometric.nn.inits.glorot(w_src[edge_type]) + + # lin_dst + if src_type != dst_type: + torch_geometric.nn.inits.glorot(w_dst[edge_type]) + + # attn_weights + torch_geometric.nn.inits.glorot( + self.attn_weights[edge_type].view(-1, self.num_heads, self.out_channels) + ) + + # bias + if self.bias is not None: + torch_geometric.nn.inits.zeros(self.bias[edge_type]) + + def forward( + self, + x_dict: dict[str, torch.Tensor], + edge_index_dict: dict[tuple[str, str, str], torch.Tensor], + ) -> dict[str, torch.Tensor]: + feat_dict = dict.fromkeys(x_dict.keys()) + + for ntype, x in x_dict.items(): + feat_dict[ntype] = x @ self.lin_weights[ntype].T + + x_src_dict, x_dst_dict = self.split_tensors(feat_dict, dim=1) + + out_dict = defaultdict(list) + + for edge_type, edge_index in edge_index_dict.items(): + src_type, _, dst_type = edge_type + + csc = BaseConv.to_csc( + edge_index, (x_dict[src_type].size(0), x_dict[dst_type].size(0)) + ) + + if src_type == dst_type: + graph = self.get_cugraph( + csc, + bipartite=False, + ) + out = mha_gat_n2n( + x_src_dict[edge_type], + self.attn_weights[edge_type], + graph, + num_heads=self.num_heads, + activation="LeakyReLU", + negative_slope=self.negative_slope, + concat_heads=self.concat_heads, + ) + + else: + graph = self.get_cugraph( + csc, + bipartite=True, + ) + out = mha_gat_n2n( + (x_src_dict[edge_type], x_dst_dict[edge_type]), + self.attn_weights[edge_type], + graph, + num_heads=self.num_heads, + activation="LeakyReLU", + negative_slope=self.negative_slope, + concat_heads=self.concat_heads, + ) + + if self.bias is not None: + out = out + self.bias[edge_type] + + out_dict[dst_type].append(out) + + for key, value in out_dict.items(): + out_dict[key] = torch_geometric.nn.conv.hetero_conv.group(value, self.aggr) + + return out_dict diff --git a/python/cugraph-pyg/cugraph_pyg/tests/conftest.py b/python/cugraph-pyg/cugraph_pyg/tests/conftest.py index 1512901822a..30994289f9c 100644 --- a/python/cugraph-pyg/cugraph_pyg/tests/conftest.py +++ b/python/cugraph-pyg/cugraph_pyg/tests/conftest.py @@ -284,3 +284,32 @@ def basic_pyg_graph_2(): ) size = (10, 10) return edge_index, size + + +@pytest.fixture +def sample_pyg_hetero_data(): + torch.manual_seed(12345) + raw_data_dict = { + "v0": torch.randn(6, 3), + "v1": torch.randn(7, 2), + "v2": torch.randn(5, 4), + ("v2", "e0", "v1"): torch.tensor([[0, 2, 2, 4, 4], [4, 3, 6, 0, 1]]), + ("v1", "e1", "v1"): torch.tensor( + [[0, 2, 2, 2, 3, 5, 5], [4, 0, 4, 5, 3, 0, 1]] + ), + ("v0", "e2", "v0"): torch.tensor([[0, 2, 2, 3, 5, 5], [1, 1, 5, 1, 1, 2]]), + ("v1", "e3", "v2"): torch.tensor( + [[0, 1, 1, 2, 4, 5, 6], [1, 2, 3, 1, 2, 2, 2]] + ), + ("v0", "e4", "v2"): torch.tensor([[1, 1, 3, 3, 4, 4], [1, 4, 1, 4, 0, 3]]), + } + + # create a nested dictionary to facilitate PyG's HeteroData construction + hetero_data_dict = {} + for key, value in raw_data_dict.items(): + if isinstance(key, tuple): + hetero_data_dict[key] = {"edge_index": value} + else: + hetero_data_dict[key] = {"x": value} + + return hetero_data_dict diff --git a/python/cugraph-pyg/cugraph_pyg/tests/nn/test_hetero_gat_conv.py b/python/cugraph-pyg/cugraph_pyg/tests/nn/test_hetero_gat_conv.py new file mode 100644 index 00000000000..1c841a17df7 --- /dev/null +++ b/python/cugraph-pyg/cugraph_pyg/tests/nn/test_hetero_gat_conv.py @@ -0,0 +1,132 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from cugraph_pyg.nn import HeteroGATConv as CuGraphHeteroGATConv +from cugraph.utilities.utils import import_optional, MissingModule + +torch = import_optional("torch") +torch_geometric = import_optional("torch_geometric") + +ATOL = 1e-6 + + +@pytest.mark.cugraph_ops +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") +@pytest.mark.skipif( + isinstance(torch_geometric, MissingModule), reason="torch_geometric not available" +) +@pytest.mark.parametrize("heads", [1, 3, 10]) +@pytest.mark.parametrize("aggr", ["sum", "mean"]) +def test_hetero_gat_conv_equality(sample_pyg_hetero_data, aggr, heads): + major, minor, patch = torch_geometric.__version__.split(".")[:3] + pyg_version = tuple(map(int, [major, minor, patch])) + if pyg_version < (2, 4, 0): + pytest.skip("Skipping HeteroGATConv test") + + from torch_geometric.data import HeteroData + from torch_geometric.nn import HeteroConv, GATConv + + device = torch.device("cuda:0") + data = HeteroData(sample_pyg_hetero_data).to(device) + + in_channels_dict = {k: v.size(1) for k, v in data.x_dict.items()} + out_channels = 2 + + convs_dict = {} + kwargs1 = dict(heads=heads, add_self_loops=False, bias=False) + for edge_type in data.edge_types: + src_t, _, dst_t = edge_type + in_channels_src, in_channels_dst = data.x_dict[src_t].size(-1), data.x_dict[ + dst_t + ].size(-1) + if src_t == dst_t: + convs_dict[edge_type] = GATConv(in_channels_src, out_channels, **kwargs1) + else: + convs_dict[edge_type] = GATConv( + (in_channels_src, in_channels_dst), out_channels, **kwargs1 + ) + + conv1 = HeteroConv(convs_dict, aggr=aggr).to(device) + kwargs2 = dict( + heads=heads, + aggr=aggr, + node_types=data.node_types, + edge_types=data.edge_types, + bias=False, + ) + conv2 = CuGraphHeteroGATConv(in_channels_dict, out_channels, **kwargs2).to(device) + + # copy over linear and attention weights + w_src, w_dst = conv2.split_tensors(conv2.lin_weights, dim=0) + with torch.no_grad(): + for edge_type in conv2.edge_types: + src_t, _, dst_t = edge_type + w_src[edge_type][:, :] = conv1.convs[edge_type].lin_src.weight[:, :] + if w_dst[edge_type] is not None: + w_dst[edge_type][:, :] = conv1.convs[edge_type].lin_dst.weight[:, :] + + conv2.attn_weights[edge_type][: heads * out_channels] = conv1.convs[ + edge_type + ].att_src.data.flatten() + conv2.attn_weights[edge_type][heads * out_channels :] = conv1.convs[ + edge_type + ].att_dst.data.flatten() + + out1 = conv1(data.x_dict, data.edge_index_dict) + out2 = conv2(data.x_dict, data.edge_index_dict) + + for node_type in data.node_types: + assert torch.allclose(out1[node_type], out2[node_type], atol=ATOL) + + loss1 = 0 + loss2 = 0 + for node_type in data.node_types: + loss1 += out1[node_type].mean() + loss2 += out2[node_type].mean() + + loss1.backward() + loss2.backward() + + # check gradient w.r.t attention weights + out_dim = heads * out_channels + for edge_type in conv2.edge_types: + assert torch.allclose( + conv1.convs[edge_type].att_src.grad.flatten(), + conv2.attn_weights[edge_type].grad[:out_dim], + atol=ATOL, + ) + assert torch.allclose( + conv1.convs[edge_type].att_dst.grad.flatten(), + conv2.attn_weights[edge_type].grad[out_dim:], + atol=ATOL, + ) + + # check gradient w.r.t linear weights + grad_lin_weights_ref = dict.fromkeys(out1.keys()) + for node_t, (rels_as_src, rels_as_dst) in conv2.relations_per_ntype.items(): + grad_list = [] + for rel_t in rels_as_src: + grad_list.append(conv1.convs[rel_t].lin_src.weight.grad.clone()) + for rel_t in rels_as_dst: + grad_list.append(conv1.convs[rel_t].lin_dst.weight.grad.clone()) + assert len(grad_list) > 0 + grad_lin_weights_ref[node_t] = torch.vstack(grad_list) + + for node_type in conv2.lin_weights: + assert torch.allclose( + grad_lin_weights_ref[node_type], + conv2.lin_weights[node_type].grad, + atol=ATOL, + ) diff --git a/python/cugraph/CMakeLists.txt b/python/cugraph/CMakeLists.txt index a1ec12c6e07..99936b23a8c 100644 --- a/python/cugraph/CMakeLists.txt +++ b/python/cugraph/CMakeLists.txt @@ -38,7 +38,6 @@ project( option(FIND_CUGRAPH_CPP "Search for existing CUGRAPH C++ installations before defaulting to local files" OFF ) -option(CUGRAPH_BUILD_WHEELS "Whether this build is generating a Python wheel." OFF) option(USE_CUGRAPH_OPS "Enable all functions that call cugraph-ops" ON) if(NOT USE_CUGRAPH_OPS) @@ -59,22 +58,14 @@ if(NOT cugraph_FOUND) set(BUILD_TESTS OFF) set(BUILD_CUGRAPH_MG_TESTS OFF) set(BUILD_CUGRAPH_OPS_CPP_TESTS OFF) - - set(_exclude_from_all "") - if(CUGRAPH_BUILD_WHEELS) - # Statically link dependencies if building wheels - set(CUDA_STATIC_RUNTIME ON) - set(USE_RAFT_STATIC ON) - set(CUGRAPH_COMPILE_RAFT_LIB ON) - set(CUGRAPH_USE_CUGRAPH_OPS_STATIC ON) - set(CUGRAPH_EXCLUDE_CUGRAPH_OPS_FROM_ALL ON) - set(ALLOW_CLONE_CUGRAPH_OPS ON) - - # Don't install the cuML C++ targets into wheels - set(_exclude_from_all EXCLUDE_FROM_ALL) - endif() - - add_subdirectory(../../cpp cugraph-cpp ${_exclude_from_all}) + set(CUDA_STATIC_RUNTIME ON) + set(USE_RAFT_STATIC ON) + set(CUGRAPH_COMPILE_RAFT_LIB ON) + set(CUGRAPH_USE_CUGRAPH_OPS_STATIC ON) + set(CUGRAPH_EXCLUDE_CUGRAPH_OPS_FROM_ALL ON) + set(ALLOW_CLONE_CUGRAPH_OPS ON) + + add_subdirectory(../../cpp cugraph-cpp EXCLUDE_FROM_ALL) set(cython_lib_dir cugraph) install(TARGETS cugraph DESTINATION ${cython_lib_dir}) diff --git a/python/cugraph/cugraph/dask/common/mg_utils.py b/python/cugraph/cugraph/dask/common/mg_utils.py index 6acda48c9da..b04f293dc0e 100644 --- a/python/cugraph/cugraph/dask/common/mg_utils.py +++ b/python/cugraph/cugraph/dask/common/mg_utils.py @@ -12,7 +12,7 @@ # limitations under the License. import os - +import gc import numba.cuda @@ -68,3 +68,8 @@ def get_visible_devices(): else: visible_devices = _visible_devices.strip().split(",") return visible_devices + + +def run_gc_on_dask_cluster(client): + gc.collect() + client.run(gc.collect) diff --git a/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py b/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py index f666900b226..319435575cc 100644 --- a/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py +++ b/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py @@ -37,8 +37,8 @@ from cugraph.dask.common.part_utils import ( get_persisted_df_worker_map, persist_dask_df_equal_parts_per_worker, - _chunk_lst, ) +from cugraph.dask.common.mg_utils import run_gc_on_dask_cluster from cugraph.dask import get_n_workers import cugraph.dask.comms.comms as Comms @@ -171,7 +171,6 @@ def __from_edgelist( store_transposed=False, legacy_renum_only=False, ): - if not isinstance(input_ddf, dask_cudf.DataFrame): raise TypeError("input should be a dask_cudf dataFrame") @@ -275,7 +274,6 @@ def __from_edgelist( ) value_col = None else: - source_col, dest_col, value_col = symmetrize( input_ddf, source, @@ -350,9 +348,11 @@ def __from_edgelist( is_symmetric=not self.properties.directed, ) ddf = ddf.repartition(npartitions=len(workers) * 2) - ddf_keys = ddf.to_delayed() workers = _client.scheduler_info()["workers"].keys() - ddf_keys_ls = _chunk_lst(ddf_keys, len(workers)) + persisted_keys_d = persist_dask_df_equal_parts_per_worker( + ddf, _client, return_type="dict" + ) + del ddf delayed_tasks_d = { w: delayed(simpleDistributedGraphImpl._make_plc_graph)( @@ -367,19 +367,19 @@ def __from_edgelist( self.edge_id_type, self.edge_type_id_type, ) - for w, edata in zip(workers, ddf_keys_ls) + for w, edata in persisted_keys_d.items() } + del persisted_keys_d self._plc_graph = { w: _client.compute( delayed_task, workers=w, allow_other_workers=False, pure=False ) for w, delayed_task in delayed_tasks_d.items() } - wait(list(self._plc_graph.values())) - del ddf_keys del delayed_tasks_d - gc.collect() - _client.run(gc.collect) + run_gc_on_dask_cluster(_client) + wait(list(self._plc_graph.values())) + run_gc_on_dask_cluster(_client) @property def renumbered(self): @@ -945,7 +945,6 @@ def convert_to_cudf(cp_arrays: cp.ndarray) -> cudf.Series: def _call_plc_select_random_vertices( mg_graph_x, sID: bytes, random_state: int, num_vertices: int ) -> cudf.Series: - cp_arrays = pylibcugraph_select_random_vertices( graph=mg_graph_x, resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()), @@ -961,7 +960,6 @@ def _mg_call_plc_select_random_vertices( random_state: int, num_vertices: int, ) -> dask_cudf.Series: - result = [ client.submit( _call_plc_select_random_vertices, diff --git a/python/pylibcugraph/CMakeLists.txt b/python/pylibcugraph/CMakeLists.txt index 7d5dc790ad0..e1250cb2edb 100644 --- a/python/pylibcugraph/CMakeLists.txt +++ b/python/pylibcugraph/CMakeLists.txt @@ -38,7 +38,6 @@ project( option(FIND_CUGRAPH_CPP "Search for existing CUGRAPH C++ installations before defaulting to local files" OFF ) -option(CUGRAPH_BUILD_WHEELS "Whether we're building a wheel for pypi" OFF) option(USE_CUGRAPH_OPS "Enable all functions that call cugraph-ops" ON) if(NOT USE_CUGRAPH_OPS) @@ -59,22 +58,14 @@ if (NOT cugraph_FOUND) set(BUILD_TESTS OFF) set(BUILD_CUGRAPH_MG_TESTS OFF) set(BUILD_CUGRAPH_OPS_CPP_TESTS OFF) - - set(_exclude_from_all "") - if(CUGRAPH_BUILD_WHEELS) - # Statically link dependencies if building wheels - set(CUDA_STATIC_RUNTIME ON) - set(USE_RAFT_STATIC ON) - set(CUGRAPH_COMPILE_RAFT_LIB ON) - set(CUGRAPH_USE_CUGRAPH_OPS_STATIC ON) - set(CUGRAPH_EXCLUDE_CUGRAPH_OPS_FROM_ALL ON) - set(ALLOW_CLONE_CUGRAPH_OPS ON) - - # Don't install the cuML C++ targets into wheels - set(_exclude_from_all EXCLUDE_FROM_ALL) - endif() - - add_subdirectory(../../cpp cugraph-cpp ${_exclude_from_all}) + set(CUDA_STATIC_RUNTIME ON) + set(USE_RAFT_STATIC ON) + set(CUGRAPH_COMPILE_RAFT_LIB ON) + set(CUGRAPH_USE_CUGRAPH_OPS_STATIC ON) + set(CUGRAPH_EXCLUDE_CUGRAPH_OPS_FROM_ALL ON) + set(ALLOW_CLONE_CUGRAPH_OPS ON) + + add_subdirectory(../../cpp cugraph-cpp EXCLUDE_FROM_ALL) set(cython_lib_dir pylibcugraph) install(TARGETS cugraph DESTINATION ${cython_lib_dir})