Skip to content

Commit

Permalink
Merge branch 'master' into add_weighted_graph
Browse files Browse the repository at this point in the history
  • Loading branch information
thunderock authored Jul 8, 2024
2 parents ce19d0f + 616704a commit 0176da8
Show file tree
Hide file tree
Showing 31 changed files with 122 additions and 82 deletions.
19 changes: 6 additions & 13 deletions .github/workflows/building-conda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,14 @@ jobs:
fail-fast: false
matrix:
# We have trouble building for Windows - drop for now.
os: [ubuntu-20.04, macos-11] # windows-2019
python-version: ['3.8', '3.9', '3.10', '3.11']
torch-version: [2.0.0, 2.1.0]
cuda-version: ['cpu', 'cu117', 'cu118', 'cu121']
os: [ubuntu-20.04] # windows-2019
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
torch-version: [2.3.0] # [2.1.0, 2.2.0, 2.3.0]
cuda-version: ['cpu', 'cu118', 'cu121']
exclude:
- torch-version: 2.0.0
cuda-version: 'cu121'
- python-version: '3.12' # Python 3.12 not yet supported in `conda-build`.
- torch-version: 2.1.0
cuda-version: 'cu117'
- os: macos-11
cuda-version: 'cu117'
- os: macos-11
cuda-version: 'cu118'
- os: macos-11
cuda-version: 'cu121'
python-version: '3.12'

steps:
- uses: actions/checkout@v2
Expand Down
25 changes: 12 additions & 13 deletions .github/workflows/building.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,16 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-20.04, macos-11, windows-2019]
python-version: ['3.8', '3.9', '3.10', '3.11']
torch-version: [2.0.0, 2.1.0]
cuda-version: ['cpu', 'cu117', 'cu118', 'cu121']
os: [ubuntu-20.04, macos-14, windows-2019]
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
torch-version: [2.3.0] # [2.1.0, 2.2.0, 2.3.0]
cuda-version: ['cpu', 'cu118', 'cu121']
exclude:
- torch-version: 2.0.0
cuda-version: 'cu121'
- torch-version: 2.1.0
cuda-version: 'cu117'
- os: macos-11
cuda-version: 'cu117'
- os: macos-11
python-version: '3.12'
- os: macos-14
cuda-version: 'cu118'
- os: macos-11
- os: macos-14
cuda-version: 'cu121'

steps:
Expand All @@ -36,8 +32,11 @@ jobs:
- name: Upgrade pip
run: |
pip install --upgrade setuptools
pip install scipy==1.10.1 # Python 3.8 support
pip list
- name: Install scipy
if: ${{ matrix.python-version == '3.8' }}
run: |
pip install scipy==1.10.1
- name: Free Disk Space (Ubuntu)
if: ${{ runner.os == 'Linux' }}
Expand Down
10 changes: 7 additions & 3 deletions .github/workflows/testing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
matrix:
os: [ubuntu-latest, windows-latest]
python-version: [3.8]
torch-version: [2.0.0, 2.1.0]
torch-version: [2.2.0, 2.3.0]

steps:
- uses: actions/checkout@v2
Expand All @@ -29,9 +29,13 @@ jobs:
run: |
pip install torch==${{ matrix.torch-version }} --extra-index-url https://download.pytorch.org/whl/cpu
- name: Install scipy
if: ${{ matrix.python-version == '3.8' }}
run: |
pip install scipy==1.10.1
- name: Install main package
run: |
pip install scipy==1.10.1 # Python 3.8 support
python setup.py develop
- name: Run test-suite
Expand All @@ -40,7 +44,7 @@ jobs:
pytest --cov --cov-report=xml
- name: Upload coverage
uses: codecov/codecov-action@v1
uses: codecov/codecov-action@v4
if: success()
with:
fail_ci_if_error: false
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
cmake_minimum_required(VERSION 3.0)
project(torchcluster)
set(CMAKE_CXX_STANDARD 14)
set(TORCHCLUSTER_VERSION 1.6.2)
set(TORCHCLUSTER_VERSION 1.6.3)

option(WITH_CUDA "Enable CUDA support" OFF)
option(WITH_PYTHON "Link to Python when building" ON)
Expand Down
18 changes: 9 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,12 @@ conda install pytorch-cluster -c pyg

We alternatively provide pip wheels for all major OS/PyTorch/CUDA combinations, see [here](https://data.pyg.org/whl).

#### PyTorch 2.1
#### PyTorch 2.3

To install the binaries for PyTorch 2.1.0, simply run
To install the binaries for PyTorch 2.3.0, simply run

```
pip install torch-cluster -f https://data.pyg.org/whl/torch-2.1.0+${CUDA}.html
pip install torch-cluster -f https://data.pyg.org/whl/torch-2.3.0+${CUDA}.html
```

where `${CUDA}` should be replaced by either `cpu`, `cu118`, or `cu121` depending on your PyTorch installation.
Expand All @@ -59,23 +59,23 @@ where `${CUDA}` should be replaced by either `cpu`, `cu118`, or `cu121` dependin
| **Windows** ||||
| **macOS** || | |

#### PyTorch 2.0
#### PyTorch 2.2

To install the binaries for PyTorch 2.0.0, simply run
To install the binaries for PyTorch 2.2.0, simply run

```
pip install torch-cluster -f https://data.pyg.org/whl/torch-2.0.0+${CUDA}.html
pip install torch-cluster -f https://data.pyg.org/whl/torch-2.2.0+${CUDA}.html
```

where `${CUDA}` should be replaced by either `cpu`, `cu117`, or `cu118` depending on your PyTorch installation.
where `${CUDA}` should be replaced by either `cpu`, `cu118`, or `cu121` depending on your PyTorch installation.

| | `cpu` | `cu117` | `cu118` |
| | `cpu` | `cu118` | `cu121` |
|-------------|-------|---------|---------|
| **Linux** ||||
| **Windows** ||||
| **macOS** || | |

**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1, PyTorch 1.9.0, PyTorch 1.10.0/1.10.1/1.10.2, PyTorch 1.11.0, PyTorch 1.12.0/1.12.1 and PyTorch 1.13.0/1.13.1 (following the same procedure).
**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1, PyTorch 1.9.0, PyTorch 1.10.0/1.10.1/1.10.2, PyTorch 1.11.0, PyTorch 1.12.0/1.12.1, PyTorch 1.13.0/1.13.1, PyTorch 2.0.0/2.0.1, and PyTorch 2.1.0/2.1.1/2.1.2 (following the same procedure).
For older versions, you need to explicitly specify the latest supported version number or install via `pip install --no-index` in order to prevent a manual installation from source.
You can look up the latest supported version number [here](https://data.pyg.org/whl).

Expand Down
2 changes: 1 addition & 1 deletion conda/pytorch-cluster/README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
```
./build_conda.sh 3.9 2.1.0 cu118 # python, pytorch and cuda version
./build_conda.sh 3.11 2.3.0 cu118 # python, pytorch and cuda version
```
2 changes: 1 addition & 1 deletion conda/pytorch-cluster/meta.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
package:
name: pytorch-cluster
version: 1.6.2
version: 1.6.3

source:
path: ../..
Expand Down
2 changes: 1 addition & 1 deletion csrc/cuda/fps_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ torch::Tensor fps_cuda(torch::Tensor src, torch::Tensor ptr,
CHECK_CUDA(ptr);
CHECK_CUDA(ratio);
CHECK_INPUT(ptr.dim() == 1);
cudaSetDevice(src.get_device());
c10::cuda::MaybeSetDevice(src.get_device());

src = src.view({src.size(0), -1}).contiguous();
ptr = ptr.contiguous();
Expand Down
2 changes: 1 addition & 1 deletion csrc/cuda/graclus_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ torch::Tensor graclus_cuda(torch::Tensor rowptr, torch::Tensor col,
CHECK_INPUT(optional_weight.value().dim() == 1);
CHECK_INPUT(optional_weight.value().numel() == col.numel());
}
cudaSetDevice(rowptr.get_device());
c10::cuda::MaybeSetDevice(rowptr.get_device());

int64_t num_nodes = rowptr.numel() - 1;
auto out = torch::full(num_nodes, -1, rowptr.options());
Expand Down
2 changes: 1 addition & 1 deletion csrc/cuda/grid_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ torch::Tensor grid_cuda(torch::Tensor pos, torch::Tensor size,
torch::optional<torch::Tensor> optional_end) {
CHECK_CUDA(pos);
CHECK_CUDA(size);
cudaSetDevice(pos.get_device());
c10::cuda::MaybeSetDevice(pos.get_device());

if (optional_start.has_value())
CHECK_CUDA(optional_start.value());
Expand Down
2 changes: 1 addition & 1 deletion csrc/cuda/knn_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ torch::Tensor knn_cuda(const torch::Tensor x, const torch::Tensor y,

CHECK_INPUT(ptr_x.value().numel() == ptr_y.value().numel());

cudaSetDevice(x.get_device());
c10::cuda::MaybeSetDevice(x.get_device());

auto row = torch::empty({y.size(0) * k}, ptr_y.value().options());
auto col = torch::full(y.size(0) * k, -1, ptr_y.value().options());
Expand Down
2 changes: 1 addition & 1 deletion csrc/cuda/nearest_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ torch::Tensor nearest_cuda(torch::Tensor x, torch::Tensor y,
CHECK_CUDA(y);
CHECK_CUDA(ptr_x);
CHECK_CUDA(ptr_y);
cudaSetDevice(x.get_device());
c10::cuda::MaybeSetDevice(x.get_device());

x = x.view({x.size(0), -1}).contiguous();
y = y.view({y.size(0), -1}).contiguous();
Expand Down
20 changes: 10 additions & 10 deletions csrc/cuda/radius_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ torch::Tensor radius_cuda(const torch::Tensor x, const torch::Tensor y,
CHECK_INPUT(y.dim() == 2);
CHECK_INPUT(x.size(1) == y.size(1));

cudaSetDevice(x.get_device());
c10::cuda::MaybeSetDevice(x.get_device());

if (ptr_x.has_value()) {
CHECK_CUDA(ptr_x.value());
Expand All @@ -70,8 +70,6 @@ torch::Tensor radius_cuda(const torch::Tensor x, const torch::Tensor y,

CHECK_INPUT(ptr_x.value().numel() == ptr_y.value().numel());

cudaSetDevice(x.get_device());

auto row =
torch::full(y.size(0) * max_num_neighbors, -1, ptr_y.value().options());
auto col =
Expand All @@ -81,13 +79,15 @@ torch::Tensor radius_cuda(const torch::Tensor x, const torch::Tensor y,

auto stream = at::cuda::getCurrentCUDAStream();
auto scalar_type = x.scalar_type();
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, scalar_type, "_", [&] {
radius_kernel<scalar_t><<<BLOCKS, THREADS, 0, stream>>>(
x.data_ptr<scalar_t>(), y.data_ptr<scalar_t>(),
ptr_x.value().data_ptr<int64_t>(), ptr_y.value().data_ptr<int64_t>(),
row.data_ptr<int64_t>(), col.data_ptr<int64_t>(), r * r, x.size(0),
y.size(0), x.size(1), ptr_x.value().numel() - 1, max_num_neighbors);
});
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "_", [&] {
radius_kernel<scalar_t><<<BLOCKS, THREADS, 0, stream>>>(
x.data_ptr<scalar_t>(), y.data_ptr<scalar_t>(),
ptr_x.value().data_ptr<int64_t>(),
ptr_y.value().data_ptr<int64_t>(), row.data_ptr<int64_t>(),
col.data_ptr<int64_t>(), r * r, x.size(0), y.size(0), x.size(1),
ptr_x.value().numel() - 1, max_num_neighbors);
});

auto mask = row != -1;
return torch::stack({row.masked_select(mask), col.masked_select(mask)}, 0);
Expand Down
2 changes: 1 addition & 1 deletion csrc/cuda/rw_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ random_walk_cuda(torch::Tensor rowptr, torch::Tensor col, torch::Tensor start,
CHECK_CUDA(rowptr);
CHECK_CUDA(col);
CHECK_CUDA(start);
cudaSetDevice(rowptr.get_device());
c10::cuda::MaybeSetDevice(rowptr.get_device());

CHECK_INPUT(rowptr.dim() == 1);
CHECK_INPUT(col.dim() == 1);
Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[build-system]
requires = ["setuptools", "torch"]
build-backend = "setuptools.build_meta"
1 change: 1 addition & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ classifiers =
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Programming Language :: Python :: 3.12
Programming Language :: Python :: 3 :: Only

[aliases]
Expand Down
10 changes: 6 additions & 4 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from torch.utils.cpp_extension import (CUDA_HOME, BuildExtension, CppExtension,
CUDAExtension)

__version__ = '1.6.2'
__version__ = '1.6.3'
URL = 'https://github.com/rusty1s/pytorch_cluster'

WITH_CUDA = False
Expand Down Expand Up @@ -61,9 +61,11 @@ def get_extensions():
print('Compiling without OpenMP...')

# Compile for mac arm64
if (sys.platform == 'darwin' and platform.machine() == 'arm64'):
extra_compile_args['cxx'] += ['-arch', 'arm64']
extra_link_args += ['-arch', 'arm64']
if sys.platform == 'darwin':
extra_compile_args['cxx'] += ['-D_LIBCPP_DISABLE_AVAILABILITY']
if platform.machine == 'arm64':
extra_compile_args['cxx'] += ['-arch', 'arm64']
extra_link_args += ['-arch', 'arm64']

if suffix == 'cuda':
define_macros += [('WITH_CUDA', None)]
Expand Down
4 changes: 4 additions & 0 deletions test/test_graclus.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,7 @@ def test_graclus_cluster(test, dtype, device):

cluster = graclus_cluster(row, col, weight)
assert_correct(row, col, cluster)

jit = torch.jit.script(graclus_cluster)
cluster = jit(row, col, weight)
assert_correct(row, col, cluster)
3 changes: 3 additions & 0 deletions test/test_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,6 @@ def test_grid_cluster(test, dtype, device):

cluster = grid_cluster(pos, size, start, end)
assert cluster.tolist() == test['cluster']

jit = torch.jit.script(grid_cluster)
assert torch.equal(jit(pos, size, start, end), cluster)
9 changes: 9 additions & 0 deletions test/test_knn.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ def test_knn(dtype, device):
edge_index = knn(x, y, 2)
assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 0), (1, 1)])

jit = torch.jit.script(knn)
edge_index = jit(x, y, 2)
assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 0), (1, 1)])

edge_index = knn(x, y, 2, batch_x, batch_y)
assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)])

Expand Down Expand Up @@ -65,6 +69,11 @@ def test_knn_graph(dtype, device):
assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2),
(3, 2), (0, 3), (2, 3)])

jit = torch.jit.script(knn_graph)
edge_index = jit(x, k=2, flow='source_to_target')
assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2),
(3, 2), (0, 3), (2, 3)])


@pytest.mark.parametrize('dtype,device', product([torch.float], devices))
def test_knn_graph_large(dtype, device):
Expand Down
21 changes: 17 additions & 4 deletions test/test_radius.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@
import scipy.spatial
import torch
from torch_cluster import radius, radius_graph
from torch_cluster.testing import devices, grad_dtypes, tensor
from torch_cluster.testing import devices, floating_dtypes, tensor


def to_set(edge_index):
return set([(i, j) for i, j in edge_index.t().tolist()])


@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
@pytest.mark.parametrize('dtype,device', product(floating_dtypes, devices))
def test_radius(dtype, device):
x = tensor([
[-1, -1],
Expand All @@ -35,6 +35,11 @@ def test_radius(dtype, device):
assert to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 1),
(1, 2), (1, 5), (1, 6)])

jit = torch.jit.script(radius)
edge_index = jit(x, y, 2, max_num_neighbors=4)
assert to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 1),
(1, 2), (1, 5), (1, 6)])

edge_index = radius(x, y, 2, batch_x, batch_y, max_num_neighbors=4)
assert to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 5),
(1, 6)])
Expand All @@ -47,7 +52,7 @@ def test_radius(dtype, device):
(1, 6)])


@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
@pytest.mark.parametrize('dtype,device', product(floating_dtypes, devices))
def test_radius_graph(dtype, device):
x = tensor([
[-1, -1],
Expand All @@ -64,12 +69,20 @@ def test_radius_graph(dtype, device):
assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2),
(3, 2), (0, 3), (2, 3)])

jit = torch.jit.script(radius_graph)
edge_index = jit(x, r=2.5, flow='source_to_target')
assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2),
(3, 2), (0, 3), (2, 3)])


@pytest.mark.parametrize('dtype,device', product([torch.float], devices))
def test_radius_graph_large(dtype, device):
x = torch.randn(1000, 3, dtype=dtype, device=device)

edge_index = radius_graph(x, r=0.5, flow='target_to_source', loop=True,
edge_index = radius_graph(x,
r=0.5,
flow='target_to_source',
loop=True,
max_num_neighbors=2000)

tree = scipy.spatial.cKDTree(x.cpu().numpy())
Expand Down
Loading

0 comments on commit 0176da8

Please sign in to comment.