diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index e7fa6d4ee42..c045db32c65 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -53,7 +53,7 @@ dependencies: - pytest-cov - pytest-xdist - python-louvain -- pytorch>=2.3 +- pytorch>=2.3,<2.5a0 - raft-dask==24.12.*,>=0.0.0a0 - rapids-build-backend>=0.3.1,<0.4.0.dev0 - rapids-dask-dependency==24.12.*,>=0.0.0a0 diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml index 7b7ac92b59b..333d9dc8c2f 100644 --- a/conda/environments/all_cuda-125_arch-x86_64.yaml +++ b/conda/environments/all_cuda-125_arch-x86_64.yaml @@ -58,7 +58,7 @@ dependencies: - pytest-cov - pytest-xdist - python-louvain -- pytorch>=2.3 +- pytorch>=2.3,<2.5a0 - raft-dask==24.12.*,>=0.0.0a0 - rapids-build-backend>=0.3.1,<0.4.0.dev0 - rapids-dask-dependency==24.12.*,>=0.0.0a0 diff --git a/dependencies.yaml b/dependencies.yaml index 7b25ac05d62..d081655f8a9 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -507,7 +507,7 @@ dependencies: common: - output_types: [conda] packages: - - &pytorch_conda pytorch>=2.3 + - &pytorch_conda pytorch>=2.3,<2.5a0 - torchdata - pydantic - ogb diff --git a/python/cugraph/cugraph/tests/data_store/test_gnn_feat_storage_wholegraph.py b/python/cugraph/cugraph/tests/data_store/test_gnn_feat_storage_wholegraph.py index 30336490312..f760ef3e1ba 100644 --- a/python/cugraph/cugraph/tests/data_store/test_gnn_feat_storage_wholegraph.py +++ b/python/cugraph/cugraph/tests/data_store/test_gnn_feat_storage_wholegraph.py @@ -15,6 +15,8 @@ import numpy as np import os +import numba.cuda + from cugraph.gnn import FeatureStore from cugraph.utilities.utils import import_optional, MissingModule @@ -25,6 +27,11 @@ wgth = import_optional("pylibwholegraph.torch") +def get_cudart_version(): + major, minor = numba.cuda.runtime.get_version() + return major * 1000 + minor * 10 + + def runtest(rank: int, world_size: int): torch.cuda.set_device(rank) @@ -66,6 +73,9 @@ def runtest(rank: int, world_size: int): @pytest.mark.skipif( isinstance(pylibwholegraph, MissingModule), reason="wholegraph not available" ) +@pytest.mark.skipif( + get_cudart_version() < 11080, reason="not compatible with CUDA < 11.8" +) def test_feature_storage_wholegraph_backend(): world_size = torch.cuda.device_count() print("gpu count:", world_size) @@ -81,6 +91,9 @@ def test_feature_storage_wholegraph_backend(): @pytest.mark.skipif( isinstance(pylibwholegraph, MissingModule), reason="wholegraph not available" ) +@pytest.mark.skipif( + get_cudart_version() < 11080, reason="not compatible with CUDA < 11.8" +) def test_feature_storage_wholegraph_backend_mg(): world_size = torch.cuda.device_count() print("gpu count:", world_size)