From 4d61f15953fa35ca634ac8999c8a3c908ea4e850 Mon Sep 17 00:00:00 2001 From: Thomas Klijnsma Date: Tue, 12 Dec 2023 20:12:03 +0000 Subject: [PATCH] Fixing CPU-only workflow --- setup.py | 31 +++++++--------- tests/test_extensions.py | 12 +++++- tests/test_knn.py | 29 +++++++++++++-- torch_cmspepr/__init__.py | 6 ++- torch_cmspepr/select_knn.py | 74 ++++++++++++++++++++++++++++++++++++- 5 files changed, 127 insertions(+), 25 deletions(-) diff --git a/setup.py b/setup.py index 0c10eea..134abb4 100644 --- a/setup.py +++ b/setup.py @@ -35,25 +35,22 @@ extra_compile_args={'cxx': ['-O2']}, extra_link_args=['-s'] ) -extensions_cpu = [ - CppExtension('select_knn_cpu', ['extensions/select_knn_cpu.cpp'], **cpu_kwargs) +extensions = [ + CppExtension('select_knn_cpu', ['extensions/select_knn_cpu.cpp'], **cpu_kwargs), ] -cuda_kwargs = dict( - include_dirs=[extensions_dir], - extra_compile_args={'cxx': ['-O2'], 'nvcc': ['--expt-relaxed-constexpr', '-O2']}, - extra_link_args=['-s'] - ) -extensions_cuda = [ - CUDAExtension( - 'select_knn_cuda', - ['extensions/select_knn_cuda.cpp', 'extensions/select_knn_cuda_kernel.cu'], - **cuda_kwargs +if DO_CUDA: + cuda_kwargs = dict( + include_dirs=[extensions_dir], + extra_compile_args={'cxx': ['-O2'], 'nvcc': ['--expt-relaxed-constexpr', '-O2']}, + extra_link_args=['-s'] ) - ] - -extensions = [] -if DO_CPU: extensions.extend(extensions_cpu) -if DO_CUDA: extensions.extend(extensions_cuda) + extensions.extend([ + CUDAExtension( + 'select_knn_cuda', + ['extensions/select_knn_cuda.cpp', 'extensions/select_knn_cuda_kernel.cu'], + **cuda_kwargs + ), + ]) # Print extensions diff --git a/tests/test_extensions.py b/tests/test_extensions.py index 51f5a59..d904742 100644 --- a/tests/test_extensions.py +++ b/tests/test_extensions.py @@ -1,5 +1,6 @@ import os.path as osp import torch +import pytest # 4 points on a diagonal line with d^2 = 0.1^2+0.1^2 = 0.02 between them. # 1 point very far away. @@ -54,8 +55,14 @@ ) SO_DIR = osp.dirname(osp.dirname(osp.abspath(__file__))) +CPU_INSTALLED = osp.isfile(osp.join(SO_DIR, 'select_knn_cpu.so')) +CUDA_INSTALLED = osp.isfile(osp.join(SO_DIR, 'select_knn_cuda.so')) +@pytest.mark.skipif( + not CPU_INSTALLED, + reason='CPU extension for select_knn not installed', +) def test_select_knn_op_cpu(): torch.ops.load_library(osp.join(SO_DIR, 'select_knn_cpu.so')) neigh_indices, neigh_dist_sq = torch.ops.select_knn_cpu.select_knn_cpu( @@ -77,7 +84,10 @@ def test_select_knn_op_cpu(): assert torch.allclose(neigh_indices, expected_neigh_indices) assert torch.allclose(neigh_dist_sq, expected_neigh_dist_sq) - +@pytest.mark.skipif( + not CUDA_INSTALLED, + reason='CUDA extension for select_knn not installed', +) def test_select_knn_op_cuda(): gpu = torch.device('cuda') torch.ops.load_library(osp.join(SO_DIR, 'select_knn_cuda.so')) diff --git a/tests/test_knn.py b/tests/test_knn.py index c619e2e..46a6870 100644 --- a/tests/test_knn.py +++ b/tests/test_knn.py @@ -1,3 +1,5 @@ +import os.path as osp +import pytest import torch # 4 points on a diagonal line with d^2 = 0.1^2+0.1^2 = 0.02 between them. @@ -57,7 +59,14 @@ ] ) +SO_DIR = osp.dirname(osp.dirname(osp.abspath(__file__))) +CPU_INSTALLED = osp.isfile(osp.join(SO_DIR, 'select_knn_cpu.so')) +CUDA_INSTALLED = osp.isfile(osp.join(SO_DIR, 'select_knn_cuda.so')) +@pytest.mark.skipif( + not CPU_INSTALLED, + reason='CPU extension for select_knn not installed', +) def test_knn_graph_cpu(): from torch_cmspepr import knn_graph @@ -86,7 +95,10 @@ def test_knn_graph_cpu(): print(expected) assert torch.allclose(edge_index, expected) - +@pytest.mark.skipif( + not CPU_INSTALLED, + reason='CPU extension for select_knn not installed', +) def test_knn_graph_cpu_1dim(): from torch_cmspepr import knn_graph @@ -104,7 +116,10 @@ def test_knn_graph_cpu_1dim(): print(expected) assert torch.allclose(edge_index, expected) - +@pytest.mark.skipif( + not CUDA_INSTALLED, + reason='CUDA extension for select_knn not installed', +) def test_knn_graph_cuda(): from torch_cmspepr import knn_graph @@ -125,7 +140,10 @@ def test_knn_graph_cuda(): print(expected_edge_index_loop) assert torch.allclose(edge_index, expected_edge_index_loop.to(gpu)) - +@pytest.mark.skipif( + not CPU_INSTALLED, + reason='CPU extension for select_knn not installed', +) def test_select_knn_cpu(): from torch_cmspepr import select_knn @@ -141,7 +159,10 @@ def test_select_knn_cpu(): assert torch.allclose(neigh_indices, expected_neigh_indices) assert torch.allclose(neigh_dist_sq, expected_neigh_dist_sq) - +@pytest.mark.skipif( + not CUDA_INSTALLED, + reason='CUDA extension for select_knn not installed', +) def test_select_knn_cuda(): from torch_cmspepr import select_knn diff --git a/torch_cmspepr/__init__.py b/torch_cmspepr/__init__.py index 6a5ffa0..31cc9de 100644 --- a/torch_cmspepr/__init__.py +++ b/torch_cmspepr/__init__.py @@ -3,7 +3,7 @@ import logging import torch -__version__ = '1.0.0' +__version__ = '1.0.1' def setup_logger(name: str = "cmspepr") -> logging.Logger: @@ -37,6 +37,9 @@ def setup_logger(name: str = "cmspepr") -> logging.Logger: logger = setup_logger() +# Keep track of which ops were successfully loaded +_loaded_ops = set() + # Load the extensions as ops def load_ops(so_file): @@ -44,6 +47,7 @@ def load_ops(so_file): logger.error(f'Could not load op: No file {so_file}') else: torch.ops.load_library(so_file) + _loaded_ops.add(osp.basename(so_file)) THISDIR = osp.dirname(osp.abspath(__file__)) diff --git a/torch_cmspepr/select_knn.py b/torch_cmspepr/select_knn.py index bf33d5a..8d2d8b9 100644 --- a/torch_cmspepr/select_knn.py +++ b/torch_cmspepr/select_knn.py @@ -1,5 +1,75 @@ from typing import Optional, Tuple import torch +from torch_cmspepr import _loaded_ops + +# JIT compile the interface to the extensions. +# Do not try to compile torch.ops.select_knn_* if those ops aren't actually loaded! +if 'select_knn_cpu.so' in _loaded_ops: + + @torch.jit.script + def select_knn_cpu( + x: torch.Tensor, + row_splits: torch.Tensor, + mask: torch.Tensor, + k: int, + max_radius: float, + mask_mode: int, + ) -> Tuple[torch.Tensor, torch.Tensor]: + return torch.ops.select_knn_cpu.select_knn_cpu( + x, + row_splits, + mask, + k, + max_radius, + mask_mode, + ) + +else: + + @torch.jit.script + def select_knn_cpu( + x: torch.Tensor, + row_splits: torch.Tensor, + mask: torch.Tensor, + k: int, + max_radius: float, + mask_mode: int, + ) -> Tuple[torch.Tensor, torch.Tensor]: + raise Exception('CPU extension for select_knn not installed') + + +if 'select_knn_cuda.so' in _loaded_ops: + + @torch.jit.script + def select_knn_cuda( + x: torch.Tensor, + row_splits: torch.Tensor, + mask: torch.Tensor, + k: int, + max_radius: float, + mask_mode: int, + ) -> Tuple[torch.Tensor, torch.Tensor]: + return torch.ops.select_knn_cuda.select_knn_cuda( + x, + row_splits, + mask, + k, + max_radius, + mask_mode, + ) + +else: + + @torch.jit.script + def select_knn_cuda( + x: torch.Tensor, + row_splits: torch.Tensor, + mask: torch.Tensor, + k: int, + max_radius: float, + mask_mode: int, + ) -> Tuple[torch.Tensor, torch.Tensor]: + raise Exception('CUDA extension for select_knn not installed') @torch.jit.script @@ -64,7 +134,7 @@ def select_knn( torch.cumsum(counts, 0, out=row_splits[1:]) if x.device == torch.device('cpu'): - return torch.ops.select_knn_cpu.select_knn_cpu( + return select_knn_cpu( x, row_splits, mask, @@ -73,7 +143,7 @@ def select_knn( mask_mode, ) else: - return torch.ops.select_knn_cuda.select_knn_cuda( + return select_knn_cuda( x, row_splits, mask,