Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Forward Merge branch-24.06 into branch-24.08 #4454

Merged
merged 27 commits into from
Jun 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
0056302
Update operators.rst (#4339)
raybellwaves May 20, 2024
ed5f27f
Update pip devcontainers to UCX v1.15.0 (#4360)
trxcllnt May 20, 2024
b9f6e8c
add --rm and --name to devcontainer run args (#4361)
trxcllnt May 20, 2024
624e961
nx-cugraph: add `ego_graph` (#4395)
eriknw May 21, 2024
6bd08d2
test sphinx mapping to networkx (#4323)
raybellwaves May 21, 2024
2779f32
MNMG Approximation Algorithm for the Weighted Matching Problem (#4315)
naimnv May 21, 2024
5b72af3
Fix a bug in kv_store_t implementation (#4434)
naimnv May 21, 2024
8d8b4fd
Update DGL_support.md (#4327)
raybellwaves May 21, 2024
ddfaacf
DOC: doc-update-link-for-cugraphops (#4279)
raybellwaves May 22, 2024
0d64b72
Fix stream synchronization in MTMG graph construction (#4275)
ChuckHastings May 23, 2024
e6c842f
Biased sampling primitive (#4430)
seunghwak May 24, 2024
6c333d3
Call New `replicate_edgelist` Function (#4441)
nv-rliu May 24, 2024
30465c2
Fix bug in kv_store_t's insertion methods (#4444)
naimnv May 25, 2024
1c3f3a8
Move edge triangle count to the stable API (#4382)
jnke2016 May 28, 2024
3156569
Enable expression-based Dask Dataframe support (#4325)
rjzamora May 28, 2024
562b5a5
Pin torch version in `cugraph-dgl` wheel test (#4447)
tingyu66 May 28, 2024
169d162
adding notebook to demo nx_cugraph (#4366)
acostadon May 29, 2024
4c797bf
Fix building cugraph with CCCL main (#4404)
trxcllnt May 29, 2024
04e8000
Fixed links and added c++ docs per issue 4431 (#4435)
acostadon May 29, 2024
507f732
first copy of general cugraph tutorial. (#4396)
acostadon May 29, 2024
563c06e
Adds benchmark for `nx_cugraph.ego_graph` (#4451)
rlratzel May 30, 2024
797a036
Distributed Sampling in cuGraph-PyG (#4384)
alexbarghi-nv May 30, 2024
1667f7a
[FEA] New WholeGraph Feature Store for PyG (#4432)
alexbarghi-nv May 30, 2024
30954bb
Merge branch-24.06 into branch-24.08
nv-rliu May 31, 2024
d540ea8
Updates pylibwholegraph version from 24.06 to 24.08
rlratzel May 31, 2024
d9b24e1
Updates pylibwholegraph version from 24.6 to 24.8 in generated files.
rlratzel May 31, 2024
08534cd
apply devcontainer.json changes from https://github.com/rapidsai/cugr…
trxcllnt Jun 3, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .devcontainer/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,11 @@ FROM ${BASE} as pip-base

ENV DEFAULT_VIRTUAL_ENV=rapids

RUN apt update -y \
&& DEBIAN_FRONTEND=noninteractive apt install -y \
libblas-dev liblapack-dev \
&& rm -rf /tmp/* /var/tmp/* /var/cache/apt/* /var/lib/apt/lists/*;

FROM ${BASE} as conda-base

ENV DEFAULT_CONDA_ENV=rapids
Expand Down
5 changes: 5 additions & 0 deletions .devcontainer/cuda11.8-conda/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@
"BASE": "rapidsai/devcontainers:24.08-cpp-cuda11.8-mambaforge-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-24.08-cuda11.8-conda"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.8": {}
Expand Down
11 changes: 6 additions & 5 deletions .devcontainer/cuda11.8-pip/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,16 @@
"args": {
"CUDA": "11.8",
"PYTHON_PACKAGE_MANAGER": "pip",
"BASE": "rapidsai/devcontainers:24.08-cpp-cuda11.8-ubuntu22.04"
"BASE": "rapidsai/devcontainers:24.08-cpp-cuda11.8-ucx1.15.0-openmpi-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-24.08-cuda11.8-pip"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/ucx:24.8": {
"version": "1.14.1"
},
"ghcr.io/rapidsai/devcontainers/features/cuda:24.8": {
"version": "11.8",
"installcuBLAS": true,
Expand All @@ -23,7 +25,6 @@
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.8": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/ucx",
"ghcr.io/rapidsai/devcontainers/features/cuda",
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
Expand Down
5 changes: 5 additions & 0 deletions .devcontainer/cuda12.2-conda/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@
"BASE": "rapidsai/devcontainers:24.08-cpp-mambaforge-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-24.08-cuda12.2-conda"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.8": {}
Expand Down
11 changes: 6 additions & 5 deletions .devcontainer/cuda12.2-pip/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,16 @@
"args": {
"CUDA": "12.2",
"PYTHON_PACKAGE_MANAGER": "pip",
"BASE": "rapidsai/devcontainers:24.08-cpp-cuda12.2-ubuntu22.04"
"BASE": "rapidsai/devcontainers:24.08-cpp-cuda12.2-ucx1.15.0-openmpi-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-24.08-cuda12.2-pip"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/ucx:24.8": {
"version": "1.14.1"
},
"ghcr.io/rapidsai/devcontainers/features/cuda:24.8": {
"version": "12.2",
"installcuBLAS": true,
Expand All @@ -23,7 +25,6 @@
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.8": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/ucx",
"ghcr.io/rapidsai/devcontainers/features/cuda",
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -196,5 +196,5 @@ jobs:
extra-repo-deploy-key: CUGRAPH_OPS_SSH_PRIVATE_DEPLOY_KEY
build_command: |
sccache -z;
build-all --verbose -j$(nproc --ignore=1);
build-all --verbose -j$(nproc --ignore=1) -DBUILD_CUGRAPH_MG_TESTS=ON;
sccache -s;
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ def generate_rmat_dataset(
del label_df
gc.collect()

dask_label_df = dask_cudf.from_dask_dataframe(dask_label_df)
dask_label_df = dask_label_df.to_backend("cudf")

node_offsets = {"paper": 0}
edge_offsets = {("paper", "cites", "paper"): 0}
Expand Down
17 changes: 17 additions & 0 deletions benchmarks/nx-cugraph/pytest-based/bench_algos.py
Original file line number Diff line number Diff line change
Expand Up @@ -848,6 +848,23 @@ def bench_weakly_connected_components(benchmark, graph_obj, backend_wrapper):
assert type(result) is list


def bench_ego_graph(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
node = get_highest_degree_node(graph_obj)
result = benchmark.pedantic(
target=backend_wrapper(nx.ego_graph),
args=(G,),
kwargs=dict(
n=node,
radius=100,
),
rounds=rounds,
iterations=iterations,
warmup_rounds=warmup_rounds,
)
assert isinstance(result, (nx.Graph, nxcg.Graph))


@pytest.mark.skip(reason="benchmark not implemented")
def bench_complete_bipartite_graph(benchmark, graph_obj, backend_wrapper):
pass
Expand Down
2 changes: 1 addition & 1 deletion ci/build_wheel.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ if ! rapids-is-release-build; then
alpha_spec=',>=0.0.0a0'
fi

for dep in rmm cudf cugraph raft-dask pylibcugraph pylibcugraphops pylibraft ucx-py; do
for dep in rmm cudf cugraph raft-dask pylibcugraph pylibcugraphops pylibwholegraph pylibraft ucx-py; do
sed -r -i "s/${dep}==(.*)\"/${dep}${PACKAGE_CUDA_SUFFIX}==\1${alpha_spec}\"/g" ${pyproject_file}
done

Expand Down
1 change: 1 addition & 0 deletions ci/release/update-version.sh
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ find .devcontainer/ -type f -name devcontainer.json -print0 | while IFS= read -r
sed_runner "s@rapidsai/devcontainers/features/ucx:[0-9.]*@rapidsai/devcontainers/features/ucx:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/cuda:[0-9.]*@rapidsai/devcontainers/features/cuda:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/rapids-build-utils:[0-9.]*@rapidsai/devcontainers/features/rapids-build-utils:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapids-\${localWorkspaceFolderBasename}-[0-9.]*@rapids-\${localWorkspaceFolderBasename}-${NEXT_SHORT_TAG}@g" "${filename}"
done

sed_runner "s/:[0-9][0-9]\.[0-9][0-9]/:${NEXT_SHORT_TAG}/" ./notebooks/README.md
Expand Down
5 changes: 4 additions & 1 deletion ci/run_cugraph_pyg_pytests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@ set -euo pipefail
# Support invoking run_cugraph_pyg_pytests.sh outside the script directory
cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")"/../python/cugraph-pyg/cugraph_pyg

pytest --cache-clear --ignore=tests/mg "$@" .
pytest --cache-clear --benchmark-disable "$@" .

# Used to skip certain examples in CI due to memory limitations
export CI_RUN=1

# Test examples
for e in "$(pwd)"/examples/*.py; do
Expand Down
2 changes: 1 addition & 1 deletion ci/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ if hasArg "--run-python-tests"; then
conda list
cd ${CUGRAPH_ROOT}/python/cugraph-pyg/cugraph_pyg
# rmat is not tested because of MG testing
pytest --cache-clear --junitxml=${CUGRAPH_ROOT}/junit-cugraph-pytests.xml -v --cov-config=.coveragerc --cov=cugraph_pyg --cov-report=xml:${WORKSPACE}/python/cugraph_pyg/cugraph-coverage.xml --cov-report term --ignore=raft --ignore=tests/mg --ignore=tests/int --ignore=tests/generators --benchmark-disable
pytest -sv -m sg --cache-clear --junitxml=${CUGRAPH_ROOT}/junit-cugraph-pytests.xml -v --cov-config=.coveragerc --cov=cugraph_pyg --cov-report=xml:${WORKSPACE}/python/cugraph_pyg/cugraph-coverage.xml --cov-report term --ignore=raft --benchmark-disable
echo "Ran Python pytest for cugraph_pyg : return code was: $?, test script exit code is now: $EXITCODE"

echo "Python pytest for cugraph-service (single-GPU only)..."
Expand Down
11 changes: 4 additions & 7 deletions ci/test_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@

set -euo pipefail

# TODO: Enable dask query planning (by default) once some bugs are fixed.
# xref: https://github.com/rapidsai/cudf/issues/15027
export DASK_DATAFRAME__QUERY_PLANNING=False

# Support invoking test_python.sh outside the script directory
cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")"/../

Expand Down Expand Up @@ -217,13 +213,14 @@ if [[ "${RAPIDS_CUDA_VERSION}" == "11.8.0" ]]; then

# Install pyg dependencies (which requires pip)

pip install ogb
pip install \
ogb \
tensordict

pip install \
pyg_lib \
torch_scatter \
torch_sparse \
torch_cluster \
torch_spline_conv \
-f ${PYG_URL}

rapids-print-env
Expand Down
4 changes: 0 additions & 4 deletions ci/test_wheel.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@

set -eoxu pipefail

# TODO: Enable dask query planning (by default) once some bugs are fixed.
# xref: https://github.com/rapidsai/cudf/issues/15027
export DASK_DATAFRAME__QUERY_PLANNING=False

package_name=$1
package_dir=$2

Expand Down
12 changes: 11 additions & 1 deletion ci/test_wheel_cugraph-dgl.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,18 @@ fi
PYTORCH_URL="https://download.pytorch.org/whl/cu${PYTORCH_CUDA_VER}"
DGL_URL="https://data.dgl.ai/wheels/cu${PYTORCH_CUDA_VER}/repo.html"

# Starting from 2.2, PyTorch wheels depend on nvidia-nccl-cuxx>=2.19 wheel and
# dynamically link to NCCL. RAPIDS CUDA 11 CI images have an older NCCL version that
# might shadow the newer NCCL required by PyTorch during import (when importing
# `cupy` before `torch`).
if [[ "${NCCL_VERSION}" < "2.19" ]]; then
PYTORCH_VER="2.1.0"
else
PYTORCH_VER="2.3.0"
fi

rapids-logger "Installing PyTorch and DGL"
rapids-retry python -m pip install torch --index-url ${PYTORCH_URL}
rapids-retry python -m pip install "torch==${PYTORCH_VER}" --index-url ${PYTORCH_URL}
rapids-retry python -m pip install dgl==2.0.0 --find-links ${DGL_URL}

python -m pytest python/cugraph-dgl/tests
8 changes: 5 additions & 3 deletions ci/test_wheel_cugraph-pyg.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ python -m pip install $(ls ./dist/${python_package_name}*.whl)[test]
# RAPIDS_DATASET_ROOT_DIR is used by test scripts
export RAPIDS_DATASET_ROOT_DIR="$(realpath datasets)"

# Used to skip certain examples in CI due to memory limitations
export CI_RUN=1

if [[ "${CUDA_VERSION}" == "11.8.0" ]]; then
PYTORCH_URL="https://download.pytorch.org/whl/cu118"
PYG_URL="https://data.pyg.org/whl/torch-2.1.0+cu118.html"
Expand All @@ -39,15 +42,14 @@ rapids-retry python -m pip install \
pyg_lib \
torch_scatter \
torch_sparse \
torch_cluster \
torch_spline_conv \
tensordict \
-f ${PYG_URL}

rapids-logger "pytest cugraph-pyg (single GPU)"
pushd python/cugraph-pyg/cugraph_pyg
python -m pytest \
--cache-clear \
--ignore=tests/mg \
--benchmark-disable \
tests
# Test examples
for e in "$(pwd)"/examples/*.py; do
Expand Down
1 change: 1 addition & 0 deletions conda/recipes/cugraph-pyg/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ requirements:
- cupy >=12.0.0
- cugraph ={{ version }}
- pylibcugraphops ={{ minor_version }}
- tensordict >=0.1.2
- pyg >=2.5,<2.6

tests:
Expand Down
7 changes: 5 additions & 2 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -92,14 +92,14 @@ set(CUGRAPH_CXX_FLAGS "")
set(CUGRAPH_CUDA_FLAGS "")

if(CMAKE_COMPILER_IS_GNUCXX)
list(APPEND CUGRAPH_CXX_FLAGS -Werror -Wno-error=deprecated-declarations)
list(APPEND CUGRAPH_CXX_FLAGS -Werror -Wno-error=deprecated-declarations -Wno-deprecated-declarations -DRAFT_HIDE_DEPRECATION_WARNINGS)
endif(CMAKE_COMPILER_IS_GNUCXX)


message("-- Building for GPU_ARCHS = ${CMAKE_CUDA_ARCHITECTURES}")

list(APPEND CUGRAPH_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)
list(APPEND CUGRAPH_CUDA_FLAGS -Werror=cross-execution-space-call -Wno-deprecated-declarations -Xptxas=--disable-warnings)
list(APPEND CUGRAPH_CUDA_FLAGS -Werror=cross-execution-space-call -Wno-deprecated-declarations -DRAFT_HIDE_DEPRECATION_WARNINGS -Xptxas=--disable-warnings)
list(APPEND CUGRAPH_CUDA_FLAGS -Xcompiler=-Wall,-Wno-error=sign-compare,-Wno-error=unused-but-set-variable)
list(APPEND CUGRAPH_CUDA_FLAGS -Xfatbin=-compress-all)

Expand Down Expand Up @@ -180,6 +180,7 @@ set(CUGRAPH_SOURCES
src/community/detail/refine_sg.cu
src/community/detail/refine_mg.cu
src/community/edge_triangle_count_sg.cu
src/community/edge_triangle_count_mg.cu
src/community/detail/maximal_independent_moves_sg.cu
src/community/detail/maximal_independent_moves_mg.cu
src/detail/utility_wrappers.cu
Expand Down Expand Up @@ -288,6 +289,8 @@ set(CUGRAPH_SOURCES
src/structure/symmetrize_edgelist_mg.cu
src/community/triangle_count_sg.cu
src/community/triangle_count_mg.cu
src/community/approx_weighted_matching_sg.cu
src/community/approx_weighted_matching_mg.cu
src/traversal/k_hop_nbrs_sg.cu
src/traversal/k_hop_nbrs_mg.cu
src/mtmg/vertex_result.cu
Expand Down
44 changes: 44 additions & 0 deletions cpp/include/cugraph/algorithms.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2007,6 +2007,24 @@ void triangle_count(raft::handle_t const& handle,
raft::device_span<edge_t> counts,
bool do_expensive_check = false);

/*
* @brief Compute edge triangle counts.
*
* Compute edge triangle counts for the entire set of edges.
*
* @tparam vertex_t Type of vertex identifiers. Needs to be an integral type.
* @tparam edge_t Type of edge identifiers. Needs to be an integral type.
* @tparam multi_gpu Flag indicating whether template instantiation should target single-GPU (false)
* @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and
* handles to various CUDA libraries) to run graph algorithms.
* @param graph_view Graph view object.
*
* @return edge_property_t containing the edge triangle count
*/
template <typename vertex_t, typename edge_t, bool multi_gpu>
edge_property_t<graph_view_t<vertex_t, edge_t, false, multi_gpu>, edge_t> edge_triangle_count(
raft::handle_t const& handle, graph_view_t<vertex_t, edge_t, false, multi_gpu> const& graph_view);

/*
* @brief Compute K-Truss.
*
Expand Down Expand Up @@ -2368,6 +2386,32 @@ rmm::device_uvector<vertex_t> vertex_coloring(
graph_view_t<vertex_t, edge_t, false, multi_gpu> const& graph_view,
raft::random::RngState& rng_state);

/*
* @brief Approximate Weighted Matching
*
* A matching in an undirected graph G = (V, E) is a pairing of adjacent vertices
* such that each vertex is matched with at most one other vertex, the objective
* being to match as many vertices as possible or to maximise the sum of the
* weights of the matched edges. Here we provide an implementation of an
* approximation algorithm to the weighted Maximum matching. See
* https://web.archive.org/web/20081031230449id_/http://www.ii.uib.no/~fredrikm/fredrik/papers/CP75.pdf
* for further information.
*
* @tparam vertex_t Type of vertex identifiers. Needs to be an integral type.
* @tparam edge_t Type of edge identifiers. Needs to be an integral type.
* @tparam multi_gpu Flag indicating whether template instantiation should target single-GPU (false)
* @param[in] handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator,
* and handles to various CUDA libraries) to run graph algorithms.
* @param[in] graph_view Graph view object.
* @param[in] edge_weight_view View object holding edge weights for @p graph_view.
* @return A tuple of device vector of matched vertex ids and sum of the weights of the matched
* edges.
*/
template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
std::tuple<rmm::device_uvector<vertex_t>, weight_t> approximate_weighted_matching(
raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, false, multi_gpu> const& graph_view,
edge_property_view_t<edge_t, weight_t const*> edge_weight_view);
} // namespace cugraph

/**
Expand Down
Loading
Loading