diff --git a/python/cuvs/cuvs/common/CMakeLists.txt b/python/cuvs/cuvs/common/CMakeLists.txt index b477fdb32..066a73d35 100644 --- a/python/cuvs/cuvs/common/CMakeLists.txt +++ b/python/cuvs/cuvs/common/CMakeLists.txt @@ -13,7 +13,7 @@ # ============================================================================= # Set the list of Cython files to build -set(cython_sources cydlpack.pyx exceptions.pyx) +set(cython_sources cydlpack.pyx exceptions.pyx resources.pyx) set(linked_libraries cuvs::cuvs cuvs_c) # Build all of the Cython targets diff --git a/python/cuvs/cuvs/common/__init__.py b/python/cuvs/cuvs/common/__init__.py index eb5666659..e66418306 100644 --- a/python/cuvs/cuvs/common/__init__.py +++ b/python/cuvs/cuvs/common/__init__.py @@ -13,6 +13,6 @@ # limitations under the License. -from .temp_raft import auto_sync_resources +from .resources import Resources, auto_sync_resources -__all__ = ["auto_sync_resources"] +__all__ = ["auto_sync_resources", "Resources"] diff --git a/python/cuvs/cuvs/common/c_api.pxd b/python/cuvs/cuvs/common/c_api.pxd index e4e62bf94..f99fd5348 100644 --- a/python/cuvs/cuvs/common/c_api.pxd +++ b/python/cuvs/cuvs/common/c_api.pxd @@ -30,4 +30,5 @@ cdef extern from "cuvs/core/c_api.h": cuvsError_t cuvsResourcesCreate(cuvsResources_t* res) cuvsError_t cuvsResourcesDestroy(cuvsResources_t res) cuvsError_t cuvsStreamSet(cuvsResources_t res, cudaStream_t stream) + cuvsError_t cuvsStreamSync(cuvsResources_t res) const char * cuvsGetLastErrorText() diff --git a/python/cuvs/cuvs/common/resources.pxd b/python/cuvs/cuvs/common/resources.pxd new file mode 100644 index 000000000..69a5c894f --- /dev/null +++ b/python/cuvs/cuvs/common/resources.pxd @@ -0,0 +1,22 @@ +# +# Copyright (c) 2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# cython: language_level=3 + +from cuvs.common.c_api cimport cuvsResources_t + + +cdef class Resources: + cdef cuvsResources_t c_obj diff --git a/python/cuvs/cuvs/common/resources.pyx b/python/cuvs/cuvs/common/resources.pyx new file mode 100644 index 000000000..c0b72ae34 --- /dev/null +++ b/python/cuvs/cuvs/common/resources.pyx @@ -0,0 +1,120 @@ +# +# Copyright (c) 2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# cython: language_level=3 + +import functools + +from cuda.ccudart cimport cudaStream_t + +from cuvs.common.c_api cimport ( + cuvsResources_t, + cuvsResourcesCreate, + cuvsResourcesDestroy, + cuvsStreamSet, + cuvsStreamSync, +) + +from cuvs.common.exceptions import check_cuvs + + +cdef class Resources: + """ + Resources is a lightweight python wrapper around the corresponding + C++ class of resources exposed by RAFT's C++ interface. Refer to + the header file raft/core/resources.hpp for interface level + details of this struct. + + Parameters + ---------- + stream : Optional stream to use for ordering CUDA instructions + + Examples + -------- + + Basic usage: + + >>> from cuvs.common import Resources + >>> handle = Resources() + >>> + >>> # call algos here + >>> + >>> # final sync of all work launched in the stream of this handle + >>> handle.sync() + + Using a cuPy stream with cuVS Resources: + + >>> import cupy + >>> from cuvs.common import Resources + >>> + >>> cupy_stream = cupy.cuda.Stream() + >>> handle = Resources(stream=cupy_stream.ptr) + """ + + def __cinit__(self, stream=None): + check_cuvs(cuvsResourcesCreate(&self.c_obj)) + if stream: + check_cuvs(cuvsStreamSet(self.c_obj, stream)) + + def sync(self): + check_cuvs(cuvsStreamSync(self.c_obj)) + + def get_c_obj(self): + """ + Return the pointer to the underlying c_obj as a size_t + """ + return self.c_obj + + def __dealloc__(self): + check_cuvs(cuvsResourcesDestroy(self.c_obj)) + + +_resources_param_string = """ + resources : Optional cuVS Resource handle for reusing CUDA resources. + If Resources aren't supplied, CUDA resources will be + allocated inside this function and synchronized before the + function exits. If resources are supplied, you will need to + explicitly synchronize yourself by calling `resources.sync()` + before accessing the output. +""".strip() + + +def auto_sync_resources(f): + """Decorator to automatically call sync on a cuVS Resources object when + it isn't passed to a function. + + When a resources=None is passed to the wrapped function, this decorator + will automatically create a default resources for the function, and + call sync on that resources when the function exits. + + This will also insert the appropriate docstring for the resources parameter + """ + + @functools.wraps(f) + def wrapper(*args, resources=None, **kwargs): + sync_resources = resources is None + resources = resources if resources is not None else Resources() + + ret_value = f(*args, resources=resources, **kwargs) + + if sync_resources: + resources.sync() + + return ret_value + + wrapper.__doc__ = wrapper.__doc__.format( + resources_docstring=_resources_param_string + ) + return wrapper diff --git a/python/cuvs/cuvs/neighbors/cagra/cagra.pyx b/python/cuvs/cuvs/neighbors/cagra/cagra.pyx index d64ac72ee..818c93940 100644 --- a/python/cuvs/cuvs/neighbors/cagra/cagra.pyx +++ b/python/cuvs/cuvs/neighbors/cagra/cagra.pyx @@ -19,20 +19,14 @@ import numpy as np cimport cuvs.common.cydlpack -from cuvs.common.temp_raft import auto_sync_resources +from cuvs.common.resources import auto_sync_resources from cython.operator cimport dereference as deref from libcpp cimport bool, cast from cuvs.common cimport cydlpack -from pylibraft.common import ( - DeviceResources, - Stream, - auto_convert_output, - cai_wrapper, - device_ndarray, -) +from pylibraft.common import auto_convert_output, cai_wrapper, device_ndarray from pylibraft.common.cai_wrapper import wrap_array from pylibraft.common.interruptible import cuda_interruptible from pylibraft.neighbors.common import _check_input_array @@ -46,12 +40,6 @@ from libc.stdint cimport ( uintptr_t, ) -from cuvs.common.c_api cimport ( - cuvsError_t, - cuvsResources_t, - cuvsResourcesCreate, -) - from cuvs.common.exceptions import check_cuvs @@ -86,7 +74,6 @@ cdef class IndexParams: graph_degree=64, build_algo="ivf_pq", nn_descent_niter=20): - cuvsCagraIndexParamsCreate(&self.params) # todo (dgd): enable once other metrics are present @@ -136,7 +123,6 @@ cdef class Index: check_cuvs(cuvsCagraIndexCreate(&self.index)) def __dealloc__(self): - cdef cuvsError_t index_destroy_status if self.index is not NULL: check_cuvs(cuvsCagraIndexDestroy(self.index)) @@ -202,20 +188,17 @@ def build_index(IndexParams index_params, dataset, resources=None): _check_input_array(dataset_ai, [np.dtype('float32'), np.dtype('byte'), np.dtype('ubyte')]) - cdef cuvsResources_t res_ - cdef cuvsError_t cstat - - check_cuvs(cuvsResourcesCreate(&res_)) - cdef Index idx = Index() cdef cuvsError_t build_status cdef cydlpack.DLManagedTensor* dataset_dlpack = \ cydlpack.dlpack_c(dataset_ai) cdef cuvsCagraIndexParams* params = index_params.params + cdef cuvsResources_t res = resources.get_c_obj() + with cuda_interruptible(): check_cuvs(cuvsCagraBuild( - res_, + res, params, dataset_dlpack, idx.index @@ -445,11 +428,6 @@ def search(SearchParams search_params, if not index.trained: raise ValueError("Index needs to be built before calling search.") - cdef cuvsResources_t res_ - cdef cuvsError_t cstat - - check_cuvs(cuvsResourcesCreate(&res_)) - # todo(dgd): we can make the check of dtype a parameter of wrap_array # in RAFT to make this a single call queries_cai = wrap_array(queries) @@ -480,10 +458,11 @@ def search(SearchParams search_params, cydlpack.dlpack_c(neighbors_cai) cdef cydlpack.DLManagedTensor* distances_dlpack = \ cydlpack.dlpack_c(distances_cai) + cdef cuvsResources_t res = resources.get_c_obj() with cuda_interruptible(): check_cuvs(cuvsCagraSearch( - res_, + res, params, index.index, queries_dlpack, diff --git a/python/cuvs/cuvs/test/test_doctests.py b/python/cuvs/cuvs/test/test_doctests.py index 6d56ffaa2..d334aa322 100644 --- a/python/cuvs/cuvs/test/test_doctests.py +++ b/python/cuvs/cuvs/test/test_doctests.py @@ -90,6 +90,7 @@ def _find_doctests_in_obj(obj, finder=None, criteria=None): # doctests for here DOC_STRINGS = list(_find_doctests_in_obj(cuvs.neighbors)) DOC_STRINGS.extend(_find_doctests_in_obj(cuvs.neighbors.cagra)) +DOC_STRINGS.extend(_find_doctests_in_obj(cuvs.common)) @pytest.mark.parametrize(