Skip to content

Commit

Permalink
set max pt version, skip 11.4 tests using wg
Browse files Browse the repository at this point in the history
  • Loading branch information
alexbarghi-nv committed Dec 5, 2024
1 parent 18505a8 commit 7e4a15e
Show file tree
Hide file tree
Showing 4 changed files with 11 additions and 3 deletions.
2 changes: 1 addition & 1 deletion conda/environments/all_cuda-118_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ dependencies:
- pytest-cov
- pytest-xdist
- python-louvain
- pytorch>=2.3
- pytorch>=2.3,<2.5
- raft-dask==24.12.*,>=0.0.0a0
- rapids-build-backend>=0.3.1,<0.4.0.dev0
- rapids-dask-dependency==24.12.*,>=0.0.0a0
Expand Down
2 changes: 1 addition & 1 deletion conda/environments/all_cuda-125_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ dependencies:
- pytest-cov
- pytest-xdist
- python-louvain
- pytorch>=2.3
- pytorch>=2.3,<2.5
- raft-dask==24.12.*,>=0.0.0a0
- rapids-build-backend>=0.3.1,<0.4.0.dev0
- rapids-dask-dependency==24.12.*,>=0.0.0a0
Expand Down
2 changes: 1 addition & 1 deletion dependencies.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,7 @@ dependencies:
common:
- output_types: [conda]
packages:
- &pytorch_conda pytorch>=2.3
- &pytorch_conda pytorch>=2.3,<2.5
- torchdata
- pydantic
- ogb
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
import numpy as np
import os

from cuda import cudart

from cugraph.gnn import FeatureStore

from cugraph.utilities.utils import import_optional, MissingModule
Expand Down Expand Up @@ -66,6 +68,9 @@ def runtest(rank: int, world_size: int):
@pytest.mark.skipif(
isinstance(pylibwholegraph, MissingModule), reason="wholegraph not available"
)
@pytest.mark.skipif(
cudart.cudaRuntimeGetVersion()[1] < 11080, reason="not compatible with CUDA < 11.8"
)
def test_feature_storage_wholegraph_backend():
world_size = torch.cuda.device_count()
print("gpu count:", world_size)
Expand All @@ -81,6 +86,9 @@ def test_feature_storage_wholegraph_backend():
@pytest.mark.skipif(
isinstance(pylibwholegraph, MissingModule), reason="wholegraph not available"
)
@pytest.mark.skipif(
cudart.cudaRuntimeGetVersion()[1] < 11080, reason="not compatible with CUDA < 11.8"
)
def test_feature_storage_wholegraph_backend_mg():
world_size = torch.cuda.device_count()
print("gpu count:", world_size)
Expand Down

0 comments on commit 7e4a15e

Please sign in to comment.