Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Forward-merge branch-23.12 to branch-24.02 #3999

Merged
merged 9 commits into from
Nov 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ on:
default: nightly

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }}
cancel-in-progress: true

jobs:
Expand Down
7 changes: 5 additions & 2 deletions ci/build_wheel.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,11 @@ for dep in rmm cudf raft-dask pylibcugraph pylibraft ucx-py; do
sed -r -i "s/${dep}==(.*)\"/${dep}${PACKAGE_CUDA_SUFFIX}==\1${alpha_spec}\"/g" ${pyproject_file}
done

# dask-cuda doesn't get a suffix, but it does get an alpha spec.
sed -r -i "s/dask-cuda==(.*)\"/dask-cuda==\1${alpha_spec}\"/g" ${pyproject_file}
# dask-cuda & rapids-dask-dependency doesn't get a suffix, but it does get an alpha spec.
for dep in dask-cuda rapids-dask-dependency; do
sed -r -i "s/${dep}==(.*)\"/${dep}==\1${alpha_spec}\"/g" ${pyproject_file}
done


if [[ $PACKAGE_CUDA_SUFFIX == "-cu12" ]]; then
sed -i "s/cupy-cuda11x/cupy-cuda12x/g" ${pyproject_file}
Expand Down
1 change: 1 addition & 0 deletions ci/release/update-version.sh
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ DEPENDENCIES=(
raft-dask
rmm
ucx-py
rapids-dask-dependency
)
for DEP in "${DEPENDENCIES[@]}"; do
for FILE in dependencies.yaml conda/environments/*.yaml python/cugraph-{pyg,dgl}/conda/*.yaml; do
Expand Down
31 changes: 15 additions & 16 deletions ci/test_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -197,27 +197,26 @@ if [[ "${RAPIDS_CUDA_VERSION}" == "11.8.0" ]]; then
conda activate test_cugraph_pyg
set -u

# Install pytorch
# Will automatically install built dependencies of cuGraph-PyG
rapids-mamba-retry install \
--force-reinstall \
--channel pyg \
--channel "${CPP_CHANNEL}" \
--channel "${PYTHON_CHANNEL}" \
--channel pytorch \
--channel nvidia \
'pyg=2.3' \
'pytorch=2.0.0' \
'pytorch-cuda=11.8'
--channel pyg \
--channel rapidsai-nightly \
"cugraph-pyg" \
"pytorch>=2.0,<2.1" \
"pytorch-cuda=11.8"

# Install pyg dependencies (which requires pip)
pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.0.0+cu118.html

rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
--channel "${PYTHON_CHANNEL}" \
libcugraph \
pylibcugraph \
pylibcugraphops \
cugraph \
cugraph-pyg
pip install \
pyg_lib \
torch_scatter \
torch_sparse \
torch_cluster \
torch_spline_conv \
-f https://data.pyg.org/whl/torch-2.0.0+cu118.html

rapids-print-env

Expand Down
3 changes: 0 additions & 3 deletions ci/test_wheel_cugraph.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,4 @@ RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"
RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-pylibcugraph-dep
python -m pip install --no-deps ./local-pylibcugraph-dep/pylibcugraph*.whl

# Always install latest dask for testing
python -m pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main

./ci/test_wheel.sh cugraph python/cugraph
4 changes: 1 addition & 3 deletions conda/environments/all_cuda-118_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,8 @@ dependencies:
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- dask-core>=2023.9.2
- dask-cuda==23.12.*
- dask-cudf==23.12.*
- dask>=2023.7.1
- distributed>=2023.7.1
- doxygen
- fsspec>=0.6.0
- gcc_linux-64=11.*
Expand Down Expand Up @@ -62,6 +59,7 @@ dependencies:
- pytest-xdist
- python-louvain
- raft-dask==23.12.*
- rapids-dask-dependency==23.12.*
- recommonmark
- requests
- rmm==23.12.*
Expand Down
4 changes: 1 addition & 3 deletions conda/environments/all_cuda-120_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,8 @@ dependencies:
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- dask-core>=2023.9.2
- dask-cuda==23.12.*
- dask-cudf==23.12.*
- dask>=2023.7.1
- distributed>=2023.7.1
- doxygen
- fsspec>=0.6.0
- gcc_linux-64=11.*
Expand Down Expand Up @@ -61,6 +58,7 @@ dependencies:
- pytest-xdist
- python-louvain
- raft-dask==23.12.*
- rapids-dask-dependency==23.12.*
- recommonmark
- requests
- rmm==23.12.*
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/cugraph-dgl/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ requirements:
- dgl >=1.1.0.cu*
- numba >=0.57
- numpy >=1.21
- pylibcugraphops ={{ version }}
- pylibcugraphops ={{ minor_version }}
- python
- pytorch

Expand Down
6 changes: 3 additions & 3 deletions conda/recipes/cugraph-pyg/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ requirements:
- python
- scikit-build >=0.13.1
run:
- distributed >=2023.9.2
- rapids-dask-dependency ={{ minor_version }}
- numba >=0.57
- numpy >=1.21
- python
- pytorch >=2.0
- cupy >=12.0.0
- cugraph ={{ version }}
- pylibcugraphops ={{ version }}
- pyg >=2.3,<2.4
- pylibcugraphops ={{ minor_version }}
- pyg >=2.3,<2.5

tests:
imports:
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/cugraph-service/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,10 @@ outputs:
- cupy >=12.0.0
- dask-cuda ={{ minor_version }}
- dask-cudf ={{ minor_version }}
- distributed >=2023.9.2
- numba >=0.57
- numpy >=1.21
- python
- rapids-dask-dependency ={{ minor_version }}
- thriftpy2 >=0.4.15
- ucx-py {{ ucx_py_version }}

Expand Down
4 changes: 1 addition & 3 deletions conda/recipes/cugraph/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -76,15 +76,13 @@ requirements:
- cupy >=12.0.0
- dask-cuda ={{ minor_version }}
- dask-cudf ={{ minor_version }}
- dask >=2023.9.2
- dask-core >=2023.9.2
- distributed >=2023.9.2
- fsspec>=0.6.0
- libcugraph ={{ version }}
- pylibcugraph ={{ version }}
- pylibraft ={{ minor_version }}
- python
- raft-dask ={{ minor_version }}
- rapids-dask-dependency ={{ minor_version }}
- requests
- ucx-proc=*=gpu
- ucx-py {{ ucx_py_version }}
Expand Down
5 changes: 5 additions & 0 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,11 @@ rapids_cpm_init()
# lags behind.
###

# Need to make sure rmm is found before cuco so that rmm patches the libcudacxx
# directory to be found by cuco.
include(${rapids-cmake-dir}/cpm/rmm.cmake)
rapids_cpm_rmm(BUILD_EXPORT_SET cugraph-exports
INSTALL_EXPORT_SET cugraph-exports)
# Putting this before raft to override RAFT from pulling them in.
include(cmake/thirdparty/get_libcudacxx.cmake)
include(${rapids-cmake-dir}/cpm/cuco.cmake)
Expand Down
45 changes: 0 additions & 45 deletions cpp/include/cugraph/algorithms.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -464,51 +464,6 @@ k_truss_subgraph(raft::handle_t const& handle,
size_t number_of_vertices,
int k);

// FIXME: Internally distances is of int (signed 32-bit) data type, but current
// template uses data from VT, ET, WT from the legacy::GraphCSR View even if weights
// are not considered
/**
* @Synopsis Performs a breadth first search traversal of a graph starting from a vertex.
*
* @throws cugraph::logic_error with a custom message when an error occurs.
*
* @tparam VT Type of vertex identifiers. Supported value : int (signed,
* 32-bit)
* @tparam ET Type of edge identifiers. Supported value : int (signed,
* 32-bit)
* @tparam WT Type of edge weights. Supported values : int (signed, 32-bit)
*
* @param[in] handle Library handle (RAFT). If a communicator is set in the handle,
the multi GPU version will be selected.
* @param[in] graph cuGraph graph descriptor, should contain the connectivity
* information as a CSR
*
* @param[out] distances If set to a valid pointer, this is populated by distance of
* every vertex in the graph from the starting vertex
*
* @param[out] predecessors If set to a valid pointer, this is populated by bfs traversal
* predecessor of every vertex
*
* @param[out] sp_counters If set to a valid pointer, this is populated by bfs traversal
* shortest_path counter of every vertex
*
* @param[in] start_vertex The starting vertex for breadth first search traversal
*
* @param[in] directed Treat the input graph as directed
*
* @param[in] mg_batch If set to true use SG BFS path when comms are initialized.
*
*/
template <typename VT, typename ET, typename WT>
void bfs(raft::handle_t const& handle,
legacy::GraphCSRView<VT, ET, WT> const& graph,
VT* distances,
VT* predecessors,
double* sp_counters,
const VT start_vertex,
bool directed = true,
bool mg_batch = false);

/**
* @brief Compute Hungarian algorithm on a weighted bipartite graph
*
Expand Down
8 changes: 1 addition & 7 deletions cpp/include/cugraph/utilities/device_comm.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -806,9 +806,6 @@ device_sendrecv(raft::comms::comms_t const& comm,
size_t constexpr tuple_size =
thrust::tuple_size<typename thrust::iterator_traits<InputIterator>::value_type>::value;

// FIXME: NCCL 2.7 supports only one ncclSend and one ncclRecv for a source rank and destination
// rank inside ncclGroupStart/ncclGroupEnd, so we cannot place this inside
// ncclGroupStart/ncclGroupEnd, this restriction will be lifted in NCCL 2.8
detail::device_sendrecv_tuple_iterator_element_impl<InputIterator,
OutputIterator,
size_t{0},
Expand Down Expand Up @@ -866,9 +863,6 @@ device_multicast_sendrecv(raft::comms::comms_t const& comm,
size_t constexpr tuple_size =
thrust::tuple_size<typename thrust::iterator_traits<InputIterator>::value_type>::value;

// FIXME: NCCL 2.7 supports only one ncclSend and one ncclRecv for a source rank and destination
// rank inside ncclGroupStart/ncclGroupEnd, so we cannot place this inside
// ncclGroupStart/ncclGroupEnd, this restriction will be lifted in NCCL 2.8
detail::device_multicast_sendrecv_tuple_iterator_element_impl<InputIterator,
OutputIterator,
size_t{0},
Expand Down
Loading
Loading