Skip to content

Commit

Permalink
Merge branch 'branch-24.12' into branch-24.12-update-bench-algos
Browse files Browse the repository at this point in the history
  • Loading branch information
nv-rliu authored Oct 15, 2024
2 parents 8f0e39f + 21fe9bf commit 4fc10b7
Show file tree
Hide file tree
Showing 20 changed files with 75 additions and 79 deletions.
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ repos:
types: [python]
language: python
pass_filenames: false
additional_dependencies: ["networkx>=3.3"]
additional_dependencies: ["networkx>=3.4"]
- repo: local
hooks:
- id: nx-cugraph-readme-update
Expand All @@ -78,4 +78,4 @@ repos:
types_or: [python, markdown]
language: python
pass_filenames: false
additional_dependencies: ["networkx>=3.3"]
additional_dependencies: ["networkx>=3.4"]
1 change: 0 additions & 1 deletion ci/test_wheel.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
set -eoxu pipefail

package_name=$1
package_dir=$2

python_package_name=$(echo ${package_name}|sed 's/-/_/g')

Expand Down
25 changes: 12 additions & 13 deletions ci/test_wheel_cugraph-dgl.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,16 @@
set -eoxu pipefail

package_name="cugraph-dgl"
package_dir="python/cugraph-dgl"

python_package_name=$(echo ${package_name}|sed 's/-/_/g')

mkdir -p ./dist
RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"

# Download wheels built during this job.
# Download the pylibcugraph, cugraph, and cugraph-dgl built in the previous step
RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-deps
RAPIDS_PY_WHEEL_NAME="cugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-deps
python -m pip install ./local-deps/*.whl

# use 'ls' to expand wildcard before adding `[extra]` requires for pip
RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" RAPIDS_PY_WHEEL_PURE="1" rapids-download-wheels-from-s3 ./dist
# pip creates wheels using python package names
python -m pip install $(ls ./dist/${python_package_name}*.whl)[test]


# determine pytorch and DGL sources
PKG_CUDA_VER="$(echo ${CUDA_VERSION} | cut -d '.' -f1,2 | tr -d '.')"
PKG_CUDA_VER_MAJOR=${PKG_CUDA_VER:0:2}
if [[ "${PKG_CUDA_VER_MAJOR}" == "12" ]]; then
Expand All @@ -32,8 +24,15 @@ fi
PYTORCH_URL="https://download.pytorch.org/whl/cu${PYTORCH_CUDA_VER}"
DGL_URL="https://data.dgl.ai/wheels/torch-2.3/cu${PYTORCH_CUDA_VER}/repo.html"

rapids-logger "Installing PyTorch and DGL"
rapids-retry python -m pip install torch==2.3.0 --index-url ${PYTORCH_URL}
rapids-retry python -m pip install dgl==2.4.0 --find-links ${DGL_URL}
# echo to expand wildcard before adding `[extra]` requires for pip
python -m pip install \
-v \
--extra-index-url "${PYTORCH_URL}" \
--find-links "${DGL_URL}" \
"$(echo ./local-deps/pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}*.whl)" \
"$(echo ./local-deps/cugraph_${RAPIDS_PY_CUDA_SUFFIX}*.whl)" \
"$(echo ./dist/cugraph_dgl_${RAPIDS_PY_CUDA_SUFFIX}*.whl)[test]" \
'dgl==2.4.0' \
'torch>=2.3.0,<2.4'

python -m pytest python/cugraph-dgl/tests
19 changes: 9 additions & 10 deletions ci/test_wheel_cugraph-equivariant.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,14 @@
set -eoxu pipefail

package_name="cugraph-equivariant"
package_dir="python/cugraph-equivariant"

python_package_name=$(echo ${package_name}|sed 's/-/_/g')

mkdir -p ./dist
RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"

# use 'ls' to expand wildcard before adding `[extra]` requires for pip
# Download the cugraph-equivariant built in the previous step
RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" RAPIDS_PY_WHEEL_PURE="1" rapids-download-wheels-from-s3 ./dist
# pip creates wheels using python package names
python -m pip install $(ls ./dist/${python_package_name}*.whl)[test]


# determine pytorch source
PKG_CUDA_VER="$(echo ${CUDA_VERSION} | cut -d '.' -f1,2 | tr -d '.')"
PKG_CUDA_VER_MAJOR=${PKG_CUDA_VER:0:2}
if [[ "${PKG_CUDA_VER_MAJOR}" == "12" ]]; then
Expand All @@ -26,8 +21,12 @@ else
fi
PYTORCH_URL="https://download.pytorch.org/whl/cu${PYTORCH_CUDA_VER}"

rapids-logger "Installing PyTorch and e3nn"
rapids-retry python -m pip install torch --index-url ${PYTORCH_URL}
rapids-retry python -m pip install e3nn
# echo to expand wildcard before adding `[extra]` requires for pip
python -m pip install \
-v \
--extra-index-url "${PYTORCH_URL}" \
"$(echo ./dist/cugraph_equivariant_${RAPIDS_PY_CUDA_SUFFIX}*.whl)[test]" \
'e3nn' \
'torch>=2.3.0,<2.4'

python -m pytest python/cugraph-equivariant/cugraph_equivariant/tests
47 changes: 23 additions & 24 deletions ci/test_wheel_cugraph-pyg.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,45 +4,44 @@
set -eoxu pipefail

package_name="cugraph-pyg"
package_dir="python/cugraph-pyg"

python_package_name=$(echo ${package_name}|sed 's/-/_/g')

mkdir -p ./dist
RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"

# Download wheels built during this job.
# Download the pylibcugraph, cugraph, and cugraph-pyg built in the previous step
RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-deps
RAPIDS_PY_WHEEL_NAME="cugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-deps
python -m pip install ./local-deps/*.whl

# use 'ls' to expand wildcard before adding `[extra]` requires for pip
RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" RAPIDS_PY_WHEEL_PURE="1" rapids-download-wheels-from-s3 ./dist
# pip creates wheels using python package names
python -m pip install $(ls ./dist/${python_package_name}*.whl)[test]

# RAPIDS_DATASET_ROOT_DIR is used by test scripts
export RAPIDS_DATASET_ROOT_DIR="$(realpath datasets)"

# Used to skip certain examples in CI due to memory limitations
export CI_RUN=1

# determine pytorch and pyg sources
if [[ "${CUDA_VERSION}" == "11.8.0" ]]; then
PYTORCH_URL="https://download.pytorch.org/whl/cu118"
PYG_URL="https://data.pyg.org/whl/torch-2.3.0+cu118.html"
else
PYTORCH_URL="https://download.pytorch.org/whl/cu121"
PYG_URL="https://data.pyg.org/whl/torch-2.3.0+cu121.html"
fi
rapids-logger "Installing PyTorch and PyG dependencies"
rapids-retry python -m pip install torch==2.3.0 --index-url ${PYTORCH_URL}
rapids-retry python -m pip install "torch-geometric>=2.5,<2.6"
rapids-retry python -m pip install \
ogb \
pyg_lib \
torch_scatter \
torch_sparse \
-f ${PYG_URL}

# echo to expand wildcard before adding `[extra]` requires for pip
python -m pip install \
-v \
--extra-index-url "${PYTORCH_URL}" \
--find-links "${PYG_URL}" \
"$(echo ./local-deps/pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}*.whl)" \
"$(echo ./local-deps/cugraph_${RAPIDS_PY_CUDA_SUFFIX}*.whl)" \
"$(echo ./dist/cugraph_pyg_${RAPIDS_PY_CUDA_SUFFIX}*.whl)[test]" \
'ogb' \
'pyg_lib' \
'torch>=2.3.0,<2.4' \
'torch-geometric>=2.5,<2.6' \
'torch_scatter' \
'torch_sparse'

# RAPIDS_DATASET_ROOT_DIR is used by test scripts
export RAPIDS_DATASET_ROOT_DIR="$(realpath datasets)"

# Used to skip certain examples in CI due to memory limitations
export CI_RUN=1

rapids-logger "pytest cugraph-pyg (single GPU)"
pushd python/cugraph-pyg/cugraph_pyg
Expand Down
4 changes: 2 additions & 2 deletions ci/test_wheel_cugraph.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
# Copyright (c) 2023-2024, NVIDIA CORPORATION.

set -eoxu pipefail

Expand All @@ -8,4 +8,4 @@ RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"
RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-pylibcugraph-dep
python -m pip install --no-deps ./local-pylibcugraph-dep/pylibcugraph*.whl

./ci/test_wheel.sh cugraph python/cugraph
./ci/test_wheel.sh cugraph
2 changes: 1 addition & 1 deletion ci/test_wheel_nx-cugraph.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@ RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"
RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-deps
python -m pip install ./local-deps/*.whl

./ci/test_wheel.sh nx-cugraph python/nx-cugraph
./ci/test_wheel.sh nx-cugraph
4 changes: 2 additions & 2 deletions ci/test_wheel_pylibcugraph.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
# Copyright (c) 2023-2024, NVIDIA CORPORATION.

set -eoxu pipefail

./ci/test_wheel.sh pylibcugraph python/pylibcugraph
./ci/test_wheel.sh pylibcugraph
14 changes: 7 additions & 7 deletions cpp/src/detail/graph_partition_utils.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ struct compute_gpu_id_from_ext_vertex_t {

__host__ __device__ int operator()(vertex_t v) const
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
cuco::murmurhash3_32<vertex_t> hash_func{};
auto vertex_partition_id = static_cast<int>(hash_func(v) % comm_size);
return partition_manager::compute_global_comm_rank_from_vertex_partition_id(
major_comm_size, minor_comm_size, vertex_partition_id);
Expand All @@ -58,7 +58,7 @@ struct compute_gpu_id_from_ext_edge_id_t {

__host__ __device__ int operator()(edge_t e) const
{
cuco::detail::MurmurHash3_32<edge_t> hash_func{};
cuco::murmurhash3_32<edge_t> hash_func{};
auto vertex_partition_id = static_cast<int>(hash_func(e) % comm_size);
return partition_manager::compute_global_comm_rank_from_vertex_partition_id(
major_comm_size, minor_comm_size, vertex_partition_id);
Expand Down Expand Up @@ -88,7 +88,7 @@ struct compute_vertex_partition_id_from_ext_vertex_t {

__host__ __device__ int operator()(vertex_t v) const
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
cuco::murmurhash3_32<vertex_t> hash_func{};
return hash_func(v) % comm_size;
}
};
Expand All @@ -114,7 +114,7 @@ struct compute_gpu_id_from_ext_edge_endpoints_t {

__host__ __device__ int operator()(vertex_t major, vertex_t minor) const
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
cuco::murmurhash3_32<vertex_t> hash_func{};
auto major_vertex_partition_id = static_cast<int>(hash_func(major) % comm_size);
auto minor_vertex_partition_id = static_cast<int>(hash_func(minor) % comm_size);
auto major_comm_rank = major_vertex_partition_id % major_comm_size;
Expand All @@ -126,7 +126,7 @@ struct compute_gpu_id_from_ext_edge_endpoints_t {
__host__ __device__ int operator()(
thrust::tuple<vertex_t, vertex_t> pair /* major, minor */) const
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
cuco::murmurhash3_32<vertex_t> hash_func{};
auto major_vertex_partition_id = static_cast<int>(hash_func(thrust::get<0>(pair)) % comm_size);
auto minor_vertex_partition_id = static_cast<int>(hash_func(thrust::get<1>(pair)) % comm_size);
auto major_comm_rank = major_vertex_partition_id % major_comm_size;
Expand Down Expand Up @@ -192,15 +192,15 @@ struct compute_edge_partition_id_from_ext_edge_endpoints_t {

__host__ __device__ int operator()(vertex_t major, vertex_t minor) const
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
cuco::murmurhash3_32<vertex_t> hash_func{};
return (hash_func(major) % comm_size) * minor_comm_size +
(hash_func(minor) % comm_size) / major_comm_size;
}

__host__ __device__ int operator()(
thrust::tuple<vertex_t, vertex_t> pair /* major, minor */) const
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
cuco::murmurhash3_32<vertex_t> hash_func{};
return (hash_func(thrust::get<0>(pair)) % comm_size) * minor_comm_size +
(hash_func(thrust::get<1>(pair)) % comm_size) / major_comm_size;
}
Expand Down
5 changes: 3 additions & 2 deletions cpp/src/structure/remove_multi_edges_impl.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@

#include <rmm/device_uvector.hpp>

#include <cuda/std/cstddef>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/iterator/zip_iterator.h>
Expand All @@ -53,8 +54,8 @@ struct hash_src_dst_pair {
vertex_t pair[2];
pair[0] = thrust::get<0>(t);
pair[1] = thrust::get<1>(t);
cuco::detail::MurmurHash3_32<vertex_t*> hash_func{};
return hash_func.compute_hash(reinterpret_cast<std::byte*>(pair), 2 * sizeof(vertex_t)) %
cuco::murmurhash3_32<vertex_t*> hash_func{};
return hash_func.compute_hash(reinterpret_cast<cuda::std::byte*>(pair), 2 * sizeof(vertex_t)) %
num_groups;
}
};
Expand Down
2 changes: 1 addition & 1 deletion cpp/tests/prims/mg_count_if_v.cu
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ struct test_predicate {
test_predicate(int mod_count) : mod(mod_count) {}
__device__ bool operator()(vertex_t, const vertex_t& val)
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
cuco::murmurhash3_32<vertex_t> hash_func{};
return (0 == (hash_func(val) % mod));
}
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ class Tests_MGPerVPairTransformDstNbrIntersection
cugraph::get_dataframe_buffer_begin(mg_vertex_pair_buffer),
cugraph::get_dataframe_buffer_end(mg_vertex_pair_buffer),
[comm_rank, num_vertices = mg_graph_view.number_of_vertices()] __device__(size_t i) {
cuco::detail::MurmurHash3_32<size_t>
cuco::murmurhash3_32<size_t>
hash_func{}; // use hash_func to generate arbitrary vertex pairs
auto v0 = static_cast<vertex_t>(hash_func(i + comm_rank) % num_vertices);
auto v1 = static_cast<vertex_t>(hash_func(i + num_vertices + comm_rank) % num_vertices);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ class Tests_MGPerVPairTransformDstNbrIntersection
cugraph::get_dataframe_buffer_begin(mg_vertex_pair_buffer),
cugraph::get_dataframe_buffer_end(mg_vertex_pair_buffer),
[comm_rank, num_vertices = mg_graph_view.number_of_vertices()] __device__(size_t i) {
cuco::detail::MurmurHash3_32<size_t>
cuco::murmurhash3_32<size_t>
hash_func{}; // use hash_func to generate arbitrary vertex pairs
auto v0 = static_cast<vertex_t>(hash_func(i + comm_rank) % num_vertices);
auto v1 = static_cast<vertex_t>(hash_func(i + num_vertices + comm_rank) % num_vertices);
Expand Down
2 changes: 1 addition & 1 deletion cpp/tests/prims/mg_transform_reduce_v.cu
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ struct v_op_t {

__device__ auto operator()(vertex_t, vertex_t val) const
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
cuco::murmurhash3_32<vertex_t> hash_func{};
return cugraph::test::detail::make_property_value<property_t>(hash_func(val) % mod);
}
};
Expand Down
4 changes: 2 additions & 2 deletions cpp/tests/utilities/property_generator_kernels.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ struct vertex_property_transform {
{
static_assert(cugraph::is_thrust_tuple_of_arithmetic<property_t>::value ||
std::is_arithmetic_v<property_t>);
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
cuco::murmurhash3_32<vertex_t> hash_func{};
return make_property_value<property_t>(hash_func(v) % mod);
}
};
Expand All @@ -74,7 +74,7 @@ struct edge_property_transform {
{
static_assert(cugraph::is_thrust_tuple_of_arithmetic<property_t>::value ||
std::is_arithmetic_v<property_t>);
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
cuco::murmurhash3_32<vertex_t> hash_func{};
return make_property_value<property_t>(hash_func(src + dst) % mod);
}
};
Expand Down
4 changes: 2 additions & 2 deletions python/cugraph/cugraph/structure/graph_primtypes.pxd
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Copyright (c) 2019-2024, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand All @@ -21,7 +21,7 @@ from libcpp.memory cimport unique_ptr
from libcpp.utility cimport pair
from libcpp.vector cimport vector
from pylibraft.common.handle cimport *
from rmm._lib.device_buffer cimport device_buffer
from rmm.librmm.device_buffer cimport device_buffer

cdef extern from "cugraph/legacy/graph.hpp" namespace "cugraph::legacy":

Expand Down
4 changes: 2 additions & 2 deletions python/cugraph/cugraph/structure/graph_primtypes.pyx
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand All @@ -20,7 +20,7 @@ import numpy as np
from libc.stdint cimport uintptr_t
from libcpp.utility cimport move

from rmm._lib.device_buffer cimport DeviceBuffer
from rmm.pylibrmm.device_buffer cimport DeviceBuffer
from cudf.core.buffer import as_buffer
import cudf

Expand Down
2 changes: 1 addition & 1 deletion python/cugraph/cugraph/structure/graph_utilities.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ from libcpp.memory cimport unique_ptr
from libcpp.utility cimport pair
from libcpp.vector cimport vector

from rmm._lib.device_buffer cimport device_buffer
from rmm.librmm.device_buffer cimport device_buffer

from pylibraft.common.handle cimport handle_t

Expand Down
4 changes: 2 additions & 2 deletions python/nx-cugraph/_nx_cugraph/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
"backend_name": "cugraph",
"project": "nx-cugraph",
"package": "nx_cugraph",
"url": f"https://rapids.ai/nx-cugraph",
"url": "https://rapids.ai/nx-cugraph",
"short_summary": "GPU-accelerated backend.",
# "description": "TODO",
"functions": {
Expand Down Expand Up @@ -180,7 +180,7 @@
"ego_graph": "Weighted ego_graph with negative cycles is not yet supported. `NotImplementedError` will be raised if there are negative `distance` edge weights.",
"eigenvector_centrality": "`nstart` parameter is not used, but it is checked for validity.",
"from_pandas_edgelist": "cudf.DataFrame inputs also supported; value columns with str is unsuppported.",
"generic_bfs_edges": "`neighbors` and `sort_neighbors` parameters are not yet supported.",
"generic_bfs_edges": "`neighbors` parameter is not yet supported.",
"katz_centrality": "`nstart` isn't used (but is checked), and `normalized=False` is not supported.",
"louvain_communities": "`seed` parameter is currently ignored, and self-loops are not yet supported.",
"pagerank": "`dangling` parameter is not supported, but it is checked for validity.",
Expand Down
Loading

0 comments on commit 4fc10b7

Please sign in to comment.