Skip to content

Commit

Permalink
Merge branch 'branch-24.04' into fix-cuco-merge-conflicts
Browse files Browse the repository at this point in the history
  • Loading branch information
PointKernel authored Jan 30, 2024
2 parents 384e7bd + 00f906f commit 7e7d685
Show file tree
Hide file tree
Showing 69 changed files with 2,611 additions and 483 deletions.
19 changes: 19 additions & 0 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -173,3 +173,22 @@ jobs:
sha: ${{ inputs.sha }}
date: ${{ inputs.date }}
package-name: cugraph-pyg
wheel-build-cugraph-equivariant:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: ${{ inputs.build_type || 'branch' }}
branch: ${{ inputs.branch }}
sha: ${{ inputs.sha }}
date: ${{ inputs.date }}
script: ci/build_wheel_cugraph-equivariant.sh
wheel-publish-cugraph-equivariant:
needs: wheel-build-cugraph-equivariant
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: ${{ inputs.build_type || 'branch' }}
branch: ${{ inputs.branch }}
sha: ${{ inputs.sha }}
date: ${{ inputs.date }}
package-name: cugraph-equivariant
16 changes: 16 additions & 0 deletions .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ jobs:
- wheel-tests-cugraph-dgl
- wheel-build-cugraph-pyg
- wheel-tests-cugraph-pyg
- wheel-build-cugraph-equivariant
- wheel-tests-cugraph-equivariant
- devcontainer
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
Expand Down Expand Up @@ -161,6 +163,20 @@ jobs:
build_type: pull-request
script: ci/test_wheel_cugraph-pyg.sh
matrix_filter: map(select(.ARCH == "amd64" and .CUDA_VER == "11.8.0"))
wheel-build-cugraph-equivariant:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: pull-request
script: ci/build_wheel_cugraph-equivariant.sh
wheel-tests-cugraph-equivariant:
needs: wheel-build-cugraph-equivariant
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: pull-request
script: ci/test_wheel_cugraph-equivariant.sh
matrix_filter: map(select(.ARCH == "amd64"))
devcontainer:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
Expand Down
9 changes: 9 additions & 0 deletions .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -75,3 +75,12 @@ jobs:
date: ${{ inputs.date }}
sha: ${{ inputs.sha }}
script: ci/test_wheel_cugraph-pyg.sh
wheel-tests-cugraph-equivariant:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: nightly
branch: ${{ inputs.branch }}
date: ${{ inputs.date }}
sha: ${{ inputs.sha }}
script: ci/test_wheel_cugraph-equivariant.sh
15 changes: 13 additions & 2 deletions build.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/bin/bash

# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Copyright (c) 2019-2024, NVIDIA CORPORATION.

# cugraph build script

Expand Down Expand Up @@ -31,6 +31,7 @@ VALIDARGS="
cugraph-service
cugraph-pyg
cugraph-dgl
cugraph-equivariant
nx-cugraph
cpp-mgtests
cpp-mtmgtests
Expand Down Expand Up @@ -60,6 +61,7 @@ HELP="$0 [<target> ...] [<flag> ...]
cugraph-service - build the cugraph-service_client and cugraph-service_server Python package
cugraph-pyg - build the cugraph-pyg Python package
cugraph-dgl - build the cugraph-dgl extensions for DGL
cugraph-equivariant - build the cugraph-equivariant Python package
nx-cugraph - build the nx-cugraph Python package
cpp-mgtests - build libcugraph and libcugraph_etl MG tests. Builds MPI communicator, adding MPI as a dependency.
cpp-mtmgtests - build libcugraph MTMG tests. Adds UCX as a dependency (temporary).
Expand Down Expand Up @@ -222,7 +224,7 @@ if hasArg uninstall; then
# removes the latest one and leaves the others installed. build.sh uninstall
# can be run multiple times to remove all of them, but that is not obvious.
pip uninstall -y pylibcugraph cugraph cugraph-service-client cugraph-service-server \
cugraph-dgl cugraph-pyg nx-cugraph
cugraph-dgl cugraph-pyg cugraph-equivariant nx-cugraph
fi

if hasArg clean; then
Expand Down Expand Up @@ -359,6 +361,15 @@ if hasArg cugraph-dgl || hasArg all; then
fi
fi

# Build and install the cugraph-equivariant Python package
if hasArg cugraph-equivariant || hasArg all; then
if hasArg --clean; then
cleanPythonDir ${REPODIR}/python/cugraph-equivariant
else
python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/cugraph-equivariant
fi
fi

# Build and install the nx-cugraph Python package
if hasArg nx-cugraph || hasArg all; then
if hasArg --clean; then
Expand Down
5 changes: 5 additions & 0 deletions ci/build_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -89,4 +89,9 @@ if [[ ${RAPIDS_CUDA_MAJOR} == "11" ]]; then
conda/recipes/cugraph-dgl
fi

rapids-conda-retry mambabuild \
--no-test \
--channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \
conda/recipes/cugraph-equivariant

rapids-upload-conda-to-s3 python
3 changes: 2 additions & 1 deletion ci/build_wheel.sh
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ python -m pip wheel . -w dist -vvv --no-deps --disable-pip-version-check
# pure-python packages should not have auditwheel run on them.
if [[ ${package_name} == "nx-cugraph" ]] || \
[[ ${package_name} == "cugraph-dgl" ]] || \
[[ ${package_name} == "cugraph-pyg" ]]; then
[[ ${package_name} == "cugraph-pyg" ]] || \
[[ ${package_name} == "cugraph-equivariant" ]]; then
RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" rapids-upload-wheels-to-s3 dist
else
mkdir -p final_dist
Expand Down
6 changes: 6 additions & 0 deletions ci/build_wheel_cugraph-equivariant.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/bash
# Copyright (c) 2024, NVIDIA CORPORATION.

set -euo pipefail

./ci/build_wheel.sh cugraph-equivariant python/cugraph-equivariant
41 changes: 41 additions & 0 deletions ci/test_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -247,5 +247,46 @@ else
rapids-logger "skipping cugraph_pyg pytest on CUDA != 11.8"
fi

# test cugraph-equivariant
if [[ "${RAPIDS_CUDA_VERSION}" == "11.8.0" ]]; then
if [[ "${RUNNER_ARCH}" != "ARM64" ]]; then
# Reuse cugraph-dgl's test env for cugraph-equivariant
set +u
conda activate test_cugraph_dgl
set -u
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
--channel "${PYTHON_CHANNEL}" \
--channel pytorch \
--channel nvidia \
cugraph-equivariant
pip install e3nn==0.5.1

rapids-print-env

rapids-logger "pytest cugraph-equivariant"
pushd python/cugraph-equivariant/cugraph_equivariant
pytest \
--cache-clear \
--junitxml="${RAPIDS_TESTS_DIR}/junit-cugraph-equivariant.xml" \
--cov-config=../../.coveragerc \
--cov=cugraph_equivariant \
--cov-report=xml:"${RAPIDS_COVERAGE_DIR}/cugraph-equivariant-coverage.xml" \
--cov-report=term \
.
popd

# Reactivate the test environment back
set +u
conda deactivate
conda activate test
set -u
else
rapids-logger "skipping cugraph-equivariant pytest on ARM64"
fi
else
rapids-logger "skipping cugraph-equivariant pytest on CUDA!=11.8"
fi

rapids-logger "Test script exiting with value: $EXITCODE"
exit ${EXITCODE}
33 changes: 33 additions & 0 deletions ci/test_wheel_cugraph-equivariant.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
#!/bin/bash
# Copyright (c) 2024, NVIDIA CORPORATION.

set -eoxu pipefail

package_name="cugraph-equivariant"
package_dir="python/cugraph-equivariant"

python_package_name=$(echo ${package_name}|sed 's/-/_/g')

mkdir -p ./dist
RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"

# use 'ls' to expand wildcard before adding `[extra]` requires for pip
RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./dist
# pip creates wheels using python package names
python -m pip install $(ls ./dist/${python_package_name}*.whl)[test]


PKG_CUDA_VER="$(echo ${CUDA_VERSION} | cut -d '.' -f1,2 | tr -d '.')"
PKG_CUDA_VER_MAJOR=${PKG_CUDA_VER:0:2}
if [[ "${PKG_CUDA_VER_MAJOR}" == "12" ]]; then
PYTORCH_CUDA_VER="121"
else
PYTORCH_CUDA_VER=$PKG_CUDA_VER
fi
PYTORCH_URL="https://download.pytorch.org/whl/cu${PYTORCH_CUDA_VER}"

rapids-logger "Installing PyTorch and e3nn"
rapids-retry python -m pip install torch --index-url ${PYTORCH_URL}
rapids-retry python -m pip install e3nn

python -m pytest python/cugraph-equivariant/cugraph_equivariant/tests
7 changes: 7 additions & 0 deletions conda/recipes/cugraph-equivariant/build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/usr/bin/env bash

# Copyright (c) 2024, NVIDIA CORPORATION.

# This assumes the script is executed from the root of the repo directory

./build.sh cugraph-equivariant
37 changes: 37 additions & 0 deletions conda/recipes/cugraph-equivariant/meta.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# Copyright (c) 2024, NVIDIA CORPORATION.

{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}

package:
name: cugraph-equivariant
version: {{ version }}

source:
path: ../../..

build:
number: {{ GIT_DESCRIBE_NUMBER }}
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}

requirements:
host:
- python
run:
- pylibcugraphops ={{ minor_version }}
- python

tests:
imports:
- cugraph_equivariant

about:
home: https://rapids.ai/
dev_url: https://github.com/rapidsai/cugraph
license: Apache-2.0
license_file: ../../../LICENSE
summary: GPU-accelerated equivariant convolutional layers.
22 changes: 21 additions & 1 deletion cpp/include/cugraph/edge_partition_device_view.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -298,6 +298,20 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t
return major_range_first_ + major_offset;
}

__device__ thrust::optional<vertex_t> major_idx_from_major_nocheck(vertex_t major) const noexcept
{
if (major_hypersparse_first_ && (major >= *major_hypersparse_first_)) {
auto major_hypersparse_idx =
detail::major_hypersparse_idx_from_major_nocheck_impl(*dcs_nzd_vertices_, major);
return major_hypersparse_idx
? thrust::make_optional((*major_hypersparse_first_ - major_range_first_) +
*major_hypersparse_idx)
: thrust::nullopt;
} else {
return major - major_range_first_;
}
}

__device__ vertex_t major_from_major_idx_nocheck(vertex_t major_idx) const noexcept
{
if (major_hypersparse_first_) {
Expand Down Expand Up @@ -339,6 +353,7 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t
return dcs_nzd_vertices_ ? thrust::optional<vertex_t const*>{(*dcs_nzd_vertices_).data()}
: thrust::nullopt;
}

__host__ __device__ thrust::optional<vertex_t> dcs_nzd_vertex_count() const
{
return dcs_nzd_vertices_
Expand Down Expand Up @@ -460,6 +475,11 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t
return major_offset;
}

__device__ thrust::optional<vertex_t> major_idx_from_major_nocheck(vertex_t major) const noexcept
{
return major_offset_from_major_nocheck(major);
}

__device__ vertex_t major_from_major_idx_nocheck(vertex_t major_idx) const noexcept
{
return major_from_major_offset_nocheck(major_idx);
Expand Down
25 changes: 24 additions & 1 deletion cpp/include/cugraph/graph_view.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -631,6 +631,19 @@ class graph_view_t<vertex_t, edge_t, store_transposed, multi_gpu, std::enable_if
edge_t count_self_loops(raft::handle_t const& handle) const;
edge_t count_multi_edges(raft::handle_t const& handle) const;

rmm::device_uvector<bool> has_edge(raft::handle_t const& handle,
/* (edge_srcs, edge_dsts) should be pre-shuffled */
raft::device_span<vertex_t const> edge_srcs,
raft::device_span<vertex_t const> edge_dsts,
bool do_expensive_check = false);

rmm::device_uvector<edge_t> compute_multiplicity(
raft::handle_t const& handle,
/* (edge_srcs, edge_dsts) should be pre-shuffled */
raft::device_span<vertex_t const> edge_srcs,
raft::device_span<vertex_t const> edge_dsts,
bool do_expensive_check = false);

template <bool transposed = is_storage_transposed>
std::enable_if_t<transposed, std::optional<raft::device_span<vertex_t const>>>
local_sorted_unique_edge_srcs() const
Expand Down Expand Up @@ -928,6 +941,16 @@ class graph_view_t<vertex_t, edge_t, store_transposed, multi_gpu, std::enable_if
edge_t count_self_loops(raft::handle_t const& handle) const;
edge_t count_multi_edges(raft::handle_t const& handle) const;

rmm::device_uvector<bool> has_edge(raft::handle_t const& handle,
raft::device_span<vertex_t const> edge_srcs,
raft::device_span<vertex_t const> edge_dsts,
bool do_expensive_check = false);

rmm::device_uvector<edge_t> compute_multiplicity(raft::handle_t const& handle,
raft::device_span<vertex_t const> edge_srcs,
raft::device_span<vertex_t const> edge_dsts,
bool do_expensive_check = false);

template <bool transposed = is_storage_transposed>
std::enable_if_t<transposed, std::optional<raft::device_span<vertex_t const>>>
local_sorted_unique_edge_srcs() const
Expand Down
4 changes: 2 additions & 2 deletions cpp/src/centrality/eigenvector_centrality_impl.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
* Copyright (c) 2022-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -117,7 +117,7 @@ rmm::device_uvector<weight_t> eigenvector_centrality(
edge_src_centralities.view(),
edge_dst_dummy_property_t{}.view(),
edge_dummy_property_t{}.view(),
[] __device__(vertex_t, vertex_t, auto src_val, auto, auto) { return src_val * 1.0; },
[] __device__(vertex_t, vertex_t, auto src_val, auto, auto) { return src_val; },
weight_t{0},
reduce_op::plus<weight_t>{},
centralities.begin());
Expand Down
4 changes: 2 additions & 2 deletions cpp/src/link_analysis/pagerank_impl.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -288,7 +288,7 @@ centrality_algorithm_metadata_t pagerank(
edge_dst_dummy_property_t{}.view(),
edge_dummy_property_t{}.view(),
[alpha] __device__(vertex_t, vertex_t, auto src_val, auto, auto) {
return src_val * 1.0 * alpha;
return src_val * alpha;
},
unvarying_part,
reduce_op::plus<result_t>{},
Expand Down
Loading

0 comments on commit 7e7d685

Please sign in to comment.