Skip to content

Commit

Permalink
Merge branch 'branch-24.02' into perf-testing-dgl
Browse files Browse the repository at this point in the history
  • Loading branch information
alexbarghi-nv authored Jan 30, 2024
2 parents 041bc7c + 3ff2abd commit 1ca7615
Show file tree
Hide file tree
Showing 37 changed files with 1,310 additions and 194 deletions.
19 changes: 19 additions & 0 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -173,3 +173,22 @@ jobs:
sha: ${{ inputs.sha }}
date: ${{ inputs.date }}
package-name: cugraph-pyg
wheel-build-cugraph-equivariant:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: ${{ inputs.build_type || 'branch' }}
branch: ${{ inputs.branch }}
sha: ${{ inputs.sha }}
date: ${{ inputs.date }}
script: ci/build_wheel_cugraph-equivariant.sh
wheel-publish-cugraph-equivariant:
needs: wheel-build-cugraph-equivariant
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: ${{ inputs.build_type || 'branch' }}
branch: ${{ inputs.branch }}
sha: ${{ inputs.sha }}
date: ${{ inputs.date }}
package-name: cugraph-equivariant
16 changes: 16 additions & 0 deletions .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ jobs:
- wheel-tests-cugraph-dgl
- wheel-build-cugraph-pyg
- wheel-tests-cugraph-pyg
- wheel-build-cugraph-equivariant
- wheel-tests-cugraph-equivariant
- devcontainer
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
Expand Down Expand Up @@ -161,6 +163,20 @@ jobs:
build_type: pull-request
script: ci/test_wheel_cugraph-pyg.sh
matrix_filter: map(select(.ARCH == "amd64" and .CUDA_VER == "11.8.0"))
wheel-build-cugraph-equivariant:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: pull-request
script: ci/build_wheel_cugraph-equivariant.sh
wheel-tests-cugraph-equivariant:
needs: wheel-build-cugraph-equivariant
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: pull-request
script: ci/test_wheel_cugraph-equivariant.sh
matrix_filter: map(select(.ARCH == "amd64"))
devcontainer:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
Expand Down
9 changes: 9 additions & 0 deletions .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -75,3 +75,12 @@ jobs:
date: ${{ inputs.date }}
sha: ${{ inputs.sha }}
script: ci/test_wheel_cugraph-pyg.sh
wheel-tests-cugraph-equivariant:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: nightly
branch: ${{ inputs.branch }}
date: ${{ inputs.date }}
sha: ${{ inputs.sha }}
script: ci/test_wheel_cugraph-equivariant.sh
15 changes: 13 additions & 2 deletions build.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/bin/bash

# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Copyright (c) 2019-2024, NVIDIA CORPORATION.

# cugraph build script

Expand Down Expand Up @@ -31,6 +31,7 @@ VALIDARGS="
cugraph-service
cugraph-pyg
cugraph-dgl
cugraph-equivariant
nx-cugraph
cpp-mgtests
cpp-mtmgtests
Expand Down Expand Up @@ -60,6 +61,7 @@ HELP="$0 [<target> ...] [<flag> ...]
cugraph-service - build the cugraph-service_client and cugraph-service_server Python package
cugraph-pyg - build the cugraph-pyg Python package
cugraph-dgl - build the cugraph-dgl extensions for DGL
cugraph-equivariant - build the cugraph-equivariant Python package
nx-cugraph - build the nx-cugraph Python package
cpp-mgtests - build libcugraph and libcugraph_etl MG tests. Builds MPI communicator, adding MPI as a dependency.
cpp-mtmgtests - build libcugraph MTMG tests. Adds UCX as a dependency (temporary).
Expand Down Expand Up @@ -222,7 +224,7 @@ if hasArg uninstall; then
# removes the latest one and leaves the others installed. build.sh uninstall
# can be run multiple times to remove all of them, but that is not obvious.
pip uninstall -y pylibcugraph cugraph cugraph-service-client cugraph-service-server \
cugraph-dgl cugraph-pyg nx-cugraph
cugraph-dgl cugraph-pyg cugraph-equivariant nx-cugraph
fi

if hasArg clean; then
Expand Down Expand Up @@ -359,6 +361,15 @@ if hasArg cugraph-dgl || hasArg all; then
fi
fi

# Build and install the cugraph-equivariant Python package
if hasArg cugraph-equivariant || hasArg all; then
if hasArg --clean; then
cleanPythonDir ${REPODIR}/python/cugraph-equivariant
else
python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/cugraph-equivariant
fi
fi

# Build and install the nx-cugraph Python package
if hasArg nx-cugraph || hasArg all; then
if hasArg --clean; then
Expand Down
5 changes: 5 additions & 0 deletions ci/build_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -89,4 +89,9 @@ if [[ ${RAPIDS_CUDA_MAJOR} == "11" ]]; then
conda/recipes/cugraph-dgl
fi

rapids-conda-retry mambabuild \
--no-test \
--channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \
conda/recipes/cugraph-equivariant

rapids-upload-conda-to-s3 python
3 changes: 2 additions & 1 deletion ci/build_wheel.sh
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ python -m pip wheel . -w dist -vvv --no-deps --disable-pip-version-check
# pure-python packages should not have auditwheel run on them.
if [[ ${package_name} == "nx-cugraph" ]] || \
[[ ${package_name} == "cugraph-dgl" ]] || \
[[ ${package_name} == "cugraph-pyg" ]]; then
[[ ${package_name} == "cugraph-pyg" ]] || \
[[ ${package_name} == "cugraph-equivariant" ]]; then
RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" rapids-upload-wheels-to-s3 dist
else
mkdir -p final_dist
Expand Down
6 changes: 6 additions & 0 deletions ci/build_wheel_cugraph-equivariant.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/bash
# Copyright (c) 2024, NVIDIA CORPORATION.

set -euo pipefail

./ci/build_wheel.sh cugraph-equivariant python/cugraph-equivariant
41 changes: 41 additions & 0 deletions ci/test_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -247,5 +247,46 @@ else
rapids-logger "skipping cugraph_pyg pytest on CUDA != 11.8"
fi

# test cugraph-equivariant
if [[ "${RAPIDS_CUDA_VERSION}" == "11.8.0" ]]; then
if [[ "${RUNNER_ARCH}" != "ARM64" ]]; then
# Reuse cugraph-dgl's test env for cugraph-equivariant
set +u
conda activate test_cugraph_dgl
set -u
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
--channel "${PYTHON_CHANNEL}" \
--channel pytorch \
--channel nvidia \
cugraph-equivariant
pip install e3nn==0.5.1

rapids-print-env

rapids-logger "pytest cugraph-equivariant"
pushd python/cugraph-equivariant/cugraph_equivariant
pytest \
--cache-clear \
--junitxml="${RAPIDS_TESTS_DIR}/junit-cugraph-equivariant.xml" \
--cov-config=../../.coveragerc \
--cov=cugraph_equivariant \
--cov-report=xml:"${RAPIDS_COVERAGE_DIR}/cugraph-equivariant-coverage.xml" \
--cov-report=term \
.
popd

# Reactivate the test environment back
set +u
conda deactivate
conda activate test
set -u
else
rapids-logger "skipping cugraph-equivariant pytest on ARM64"
fi
else
rapids-logger "skipping cugraph-equivariant pytest on CUDA!=11.8"
fi

rapids-logger "Test script exiting with value: $EXITCODE"
exit ${EXITCODE}
33 changes: 33 additions & 0 deletions ci/test_wheel_cugraph-equivariant.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
#!/bin/bash
# Copyright (c) 2024, NVIDIA CORPORATION.

set -eoxu pipefail

package_name="cugraph-equivariant"
package_dir="python/cugraph-equivariant"

python_package_name=$(echo ${package_name}|sed 's/-/_/g')

mkdir -p ./dist
RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"

# use 'ls' to expand wildcard before adding `[extra]` requires for pip
RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./dist
# pip creates wheels using python package names
python -m pip install $(ls ./dist/${python_package_name}*.whl)[test]


PKG_CUDA_VER="$(echo ${CUDA_VERSION} | cut -d '.' -f1,2 | tr -d '.')"
PKG_CUDA_VER_MAJOR=${PKG_CUDA_VER:0:2}
if [[ "${PKG_CUDA_VER_MAJOR}" == "12" ]]; then
PYTORCH_CUDA_VER="121"
else
PYTORCH_CUDA_VER=$PKG_CUDA_VER
fi
PYTORCH_URL="https://download.pytorch.org/whl/cu${PYTORCH_CUDA_VER}"

rapids-logger "Installing PyTorch and e3nn"
rapids-retry python -m pip install torch --index-url ${PYTORCH_URL}
rapids-retry python -m pip install e3nn

python -m pytest python/cugraph-equivariant/cugraph_equivariant/tests
7 changes: 7 additions & 0 deletions conda/recipes/cugraph-equivariant/build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/usr/bin/env bash

# Copyright (c) 2024, NVIDIA CORPORATION.

# This assumes the script is executed from the root of the repo directory

./build.sh cugraph-equivariant
37 changes: 37 additions & 0 deletions conda/recipes/cugraph-equivariant/meta.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# Copyright (c) 2024, NVIDIA CORPORATION.

{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}

package:
name: cugraph-equivariant
version: {{ version }}

source:
path: ../../..

build:
number: {{ GIT_DESCRIBE_NUMBER }}
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}

requirements:
host:
- python
run:
- pylibcugraphops ={{ minor_version }}
- python

tests:
imports:
- cugraph_equivariant

about:
home: https://rapids.ai/
dev_url: https://github.com/rapidsai/cugraph
license: Apache-2.0
license_file: ../../../LICENSE
summary: GPU-accelerated equivariant convolutional layers.
4 changes: 2 additions & 2 deletions cpp/src/centrality/eigenvector_centrality_impl.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
* Copyright (c) 2022-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -117,7 +117,7 @@ rmm::device_uvector<weight_t> eigenvector_centrality(
edge_src_centralities.view(),
edge_dst_dummy_property_t{}.view(),
edge_dummy_property_t{}.view(),
[] __device__(vertex_t, vertex_t, auto src_val, auto, auto) { return src_val * 1.0; },
[] __device__(vertex_t, vertex_t, auto src_val, auto, auto) { return src_val; },
weight_t{0},
reduce_op::plus<weight_t>{},
centralities.begin());
Expand Down
4 changes: 2 additions & 2 deletions cpp/src/link_analysis/pagerank_impl.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -288,7 +288,7 @@ centrality_algorithm_metadata_t pagerank(
edge_dst_dummy_property_t{}.view(),
edge_dummy_property_t{}.view(),
[alpha] __device__(vertex_t, vertex_t, auto src_val, auto, auto) {
return src_val * 1.0 * alpha;
return src_val * alpha;
},
unvarying_part,
reduce_op::plus<result_t>{},
Expand Down
60 changes: 60 additions & 0 deletions cpp/src/prims/detail/prim_functors.cuh
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once

#include <cugraph/edge_partition_device_view.cuh>

namespace cugraph {

namespace detail {

template <typename GraphViewType,
typename EdgePartitionSrcValueInputWrapper,
typename EdgePartitionDstValueInputWrapper,
typename EdgePartitionEdgeValueInputWrapper,
typename EdgeOp>
struct call_e_op_t {
edge_partition_device_view_t<typename GraphViewType::vertex_type,
typename GraphViewType::edge_type,
GraphViewType::is_multi_gpu> const& edge_partition{};
EdgePartitionSrcValueInputWrapper const& edge_partition_src_value_input{};
EdgePartitionDstValueInputWrapper const& edge_partition_dst_value_input{};
EdgePartitionEdgeValueInputWrapper const& edge_partition_e_value_input{};
EdgeOp const& e_op{};
typename GraphViewType::vertex_type major{};
typename GraphViewType::vertex_type major_offset{};
typename GraphViewType::vertex_type const* indices{nullptr};
typename GraphViewType::edge_type edge_offset{};

__device__ auto operator()(typename GraphViewType::edge_type i) const
{
auto minor = indices[i];
auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor);
auto src = GraphViewType::is_storage_transposed ? minor : major;
auto dst = GraphViewType::is_storage_transposed ? major : minor;
auto src_offset = GraphViewType::is_storage_transposed ? minor_offset : major_offset;
auto dst_offset = GraphViewType::is_storage_transposed ? major_offset : minor_offset;
return e_op(src,
dst,
edge_partition_src_value_input.get(src_offset),
edge_partition_dst_value_input.get(dst_offset),
edge_partition_e_value_input.get(edge_offset + i));
}
};

} // namespace detail

} // namespace cugraph
Loading

0 comments on commit 1ca7615

Please sign in to comment.