Skip to content

Commit

Permalink
Merge branch 'branch-23.10' into cugraph-pyg-loader-improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
alexbarghi-nv authored Sep 21, 2023
2 parents 2a799a6 + db5073d commit 6581f47
Show file tree
Hide file tree
Showing 103 changed files with 8,377 additions and 4,113 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ jobs:
arch: "amd64"
branch: ${{ inputs.branch }}
build_type: ${{ inputs.build_type || 'branch' }}
container_image: "rapidsai/ci:cuda11.8.0-ubuntu22.04-py3.10"
container_image: "rapidsai/ci-conda:cuda11.8.0-ubuntu22.04-py3.10"
date: ${{ inputs.date }}
node_type: "gpu-v100-latest-1"
run_script: "ci/build_docs.sh"
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ jobs:
build_type: pull-request
node_type: "gpu-v100-latest-1"
arch: "amd64"
container_image: "rapidsai/ci:cuda11.8.0-ubuntu22.04-py3.10"
container_image: "rapidsai/ci-conda:cuda11.8.0-ubuntu22.04-py3.10"
run_script: "ci/test_notebooks.sh"
docs-build:
needs: conda-python-build
Expand All @@ -73,7 +73,7 @@ jobs:
build_type: pull-request
node_type: "gpu-v100-latest-1"
arch: "amd64"
container_image: "rapidsai/ci:cuda11.8.0-ubuntu22.04-py3.10"
container_image: "rapidsai/ci-conda:cuda11.8.0-ubuntu22.04-py3.10"
run_script: "ci/build_docs.sh"
wheel-build-pylibcugraph:
needs: checks
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ repos:
additional_dependencies:
- flake8==6.0.0
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v16.0.1
rev: v16.0.6
hooks:
- id: clang-format
exclude: |
Expand Down
1 change: 1 addition & 0 deletions conda/recipes/cugraph-dgl/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ requirements:
- dgl >=1.1.0.cu*
- numba >=0.57
- numpy >=1.21
- pylibcugraphops ={{ version }}
- python
- pytorch

Expand Down
5 changes: 3 additions & 2 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,7 @@ endif()

include(cmake/thirdparty/get_nccl.cmake)
include(cmake/thirdparty/get_cuhornet.cmake)
include(cmake/thirdparty/get_ucp.cmake)

if(BUILD_TESTS)
include(cmake/thirdparty/get_gtest.cmake)
Expand Down Expand Up @@ -195,8 +196,6 @@ set(CUGRAPH_SOURCES
src/utilities/path_retrieval.cu
src/structure/legacy/graph.cu
src/linear_assignment/legacy/hungarian.cu
src/link_prediction/legacy/jaccard.cu
src/link_prediction/legacy/overlap.cu
src/link_prediction/jaccard_sg.cu
src/link_prediction/sorensen_sg.cu
src/link_prediction/overlap_sg.cu
Expand Down Expand Up @@ -228,6 +227,7 @@ set(CUGRAPH_SOURCES
src/sampling/uniform_neighbor_sampling_mg.cpp
src/sampling/uniform_neighbor_sampling_sg.cpp
src/sampling/renumber_sampled_edgelist_sg.cu
src/sampling/sampling_post_processing_sg.cu
src/cores/core_number_sg.cu
src/cores/core_number_mg.cu
src/cores/k_core_sg.cu
Expand Down Expand Up @@ -291,6 +291,7 @@ set(CUGRAPH_SOURCES
src/community/triangle_count_mg.cu
src/traversal/k_hop_nbrs_sg.cu
src/traversal/k_hop_nbrs_mg.cu
src/mtmg/vertex_result.cu
)

if(USE_CUGRAPH_OPS)
Expand Down
35 changes: 35 additions & 0 deletions cpp/cmake/thirdparty/get_ucp.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================

function(find_and_configure_ucp)

if(TARGET UCP::UCP)
return()
endif()

rapids_find_generate_module(UCP
HEADER_NAMES ucp.h
LIBRARY_NAMES ucp
INCLUDE_SUFFIXES ucp/api
)

# Currently UCP has no CMake build-system so we require
# it built and installed on the machine already
rapids_find_package(UCP REQUIRED)

endfunction()

find_and_configure_ucp()
4 changes: 2 additions & 2 deletions cpp/include/cugraph/detail/utility_wrappers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ namespace detail {
* @param[in] stream_view stream view
* @param[out] d_value device array to fill
* @param[in] size number of elements in array
* @param[in] min_value minimum value
* @param[in] max_value maximum value
* @param[in] min_value minimum value (inclusive)
* @param[in] max_value maximum value (exclusive)
* @param[in] rng_state The RngState instance holding pseudo-random number generator state.
*
*/
Expand Down
4 changes: 4 additions & 0 deletions cpp/include/cugraph/graph_functions.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -919,6 +919,10 @@ rmm::device_uvector<vertex_t> select_random_vertices(
/**
* @brief renumber sampling output
*
* @deprecated This API will be deprecated and will be replaced by the
* renumber_and_compress_sampled_edgelist and renumber_and_sort_sampled_edgelist functions in
* sampling_functions.hpp.
*
* This function renumbers sampling function (e.g. uniform_neighbor_sample) outputs satisfying the
* following requirements.
*
Expand Down
39 changes: 39 additions & 0 deletions cpp/include/cugraph/mtmg/detail/device_shared_device_span.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <cugraph/mtmg/detail/device_shared_wrapper.hpp>
#include <raft/core/device_span.hpp>

namespace cugraph {
namespace mtmg {
namespace detail {

/**
* @brief Wrap an object to be available for each GPU
*
* In the MTMG environment we need the ability to manage a collection of objects
* that are associated with a particular GPU, and fetch the objects from an
* arbitrary GPU thread. This object will wrap any object and allow it to be
* accessed from different threads.
*/
template <typename T>
using device_shared_device_span_t = device_shared_wrapper_t<raft::device_span<T>>;

} // namespace detail
} // namespace mtmg
} // namespace cugraph
58 changes: 58 additions & 0 deletions cpp/include/cugraph/mtmg/detail/device_shared_device_vector.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <cugraph/mtmg/detail/device_shared_device_span.hpp>
#include <rmm/device_uvector.hpp>

namespace cugraph {
namespace mtmg {
namespace detail {

/**
* @brief Wrap an object to be available for each GPU
*
* In the MTMG environment we need the ability to manage a collection of objects
* that are associated with a particular GPU, and fetch the objects from an
* arbitrary GPU thread. This object will wrap any object and allow it to be
* accessed from different threads.
*/
template <typename T>
class device_shared_device_vector_t : public device_shared_wrapper_t<rmm::device_uvector<T>> {
using parent_t = detail::device_shared_wrapper_t<rmm::device_uvector<T>>;

public:
/**
* @brief Create a device_shared_device_span (read only view)
*/
auto view()
{
std::lock_guard<std::mutex> lock(parent_t::lock_);

device_shared_device_span_t<T const> result;

std::for_each(parent_t::objects_.begin(), parent_t::objects_.end(), [&result](auto& p) {
result.set(p.first, raft::device_span<T const>{p.second.data(), p.second.size()});
});

return result;
}
};

} // namespace detail
} // namespace mtmg
} // namespace cugraph
123 changes: 123 additions & 0 deletions cpp/include/cugraph/mtmg/detail/device_shared_wrapper.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <cugraph/mtmg/handle.hpp>
#include <cugraph/utilities/error.hpp>

#include <map>
#include <mutex>

namespace cugraph {
namespace mtmg {
namespace detail {

/**
* @brief Wrap an object to be available for each GPU
*
* In the MTMG environment we need the ability to manage a collection of objects
* that are associated with a particular GPU, and fetch the objects from an
* arbitrary GPU thread. This object will wrap any object and allow it to be
* accessed from different threads.
*/
template <typename T>
class device_shared_wrapper_t {
public:
using wrapped_t = T;

device_shared_wrapper_t() = default;
device_shared_wrapper_t(device_shared_wrapper_t&& other) : objects_{std::move(other.objects_)} {}
device_shared_wrapper_t& operator=(device_shared_wrapper_t&& other)
{
objects_ = std::move(other.objects_);
return *this;
}

/**
* @brief Move a wrapped object into the wrapper for this thread
*
* @param handle Handle is used to identify the GPU we associated this object with
* @param obj Wrapped object
*/
void set(cugraph::mtmg::handle_t const& handle, wrapped_t&& obj)
{
std::lock_guard<std::mutex> lock(lock_);

auto pos = objects_.find(handle.get_local_rank());
CUGRAPH_EXPECTS(pos == objects_.end(), "Cannot overwrite wrapped object");

objects_.insert(std::make_pair(handle.get_local_rank(), std::move(obj)));
}

/**
* @brief Move a wrapped object into the wrapper for this thread
*
* @param local_rank Identify which GPU to associated this object with
* @param obj Wrapped object
*/
void set(int local_rank, wrapped_t&& obj)
{
std::lock_guard<std::mutex> lock(lock_);

auto pos = objects_.find(local_rank);
CUGRAPH_EXPECTS(pos == objects_.end(), "Cannot overwrite wrapped object");

objects_.insert(std::make_pair(local_rank, std::move(obj)));
}

public:
/**
* @brief Get reference to an object for a particular thread
*
* @param handle Handle is used to identify the GPU we associated this object with
* @return Reference to the wrapped object
*/
wrapped_t& get(cugraph::mtmg::handle_t const& handle)
{
std::lock_guard<std::mutex> lock(lock_);

auto pos = objects_.find(handle.get_local_rank());
CUGRAPH_EXPECTS(pos != objects_.end(), "Uninitialized wrapped object");

return pos->second;
}

/**
* @brief Get the pointer to an object for a particular thread from this wrapper
*
* @param handle Handle is used to identify the GPU we associated this object with
* @return Shared pointer the wrapped object
*/
wrapped_t const& get(cugraph::mtmg::handle_t const& handle) const
{
std::lock_guard<std::mutex> lock(lock_);

auto pos = objects_.find(handle.get_local_rank());

CUGRAPH_EXPECTS(pos != objects_.end(), "Uninitialized wrapped object");

return pos->second;
}

protected:
mutable std::mutex lock_{};
std::map<int, wrapped_t> objects_{};
};

} // namespace detail
} // namespace mtmg
} // namespace cugraph
Loading

0 comments on commit 6581f47

Please sign in to comment.