Skip to content

Commit

Permalink
Merge branch 'main' into l1-solver
Browse files Browse the repository at this point in the history
  • Loading branch information
sgsellan authored Oct 29, 2024
2 parents b40d5ab + db3bcb0 commit 0f945e8
Show file tree
Hide file tree
Showing 25 changed files with 69 additions and 58 deletions.
4 changes: 1 addition & 3 deletions .github/workflows/ciwheels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,9 @@ jobs:
strategy:
fail-fast: false
matrix:
cpversion: ["cp36", "cp37", "cp38", "cp39", "cp310", "cp311", "cp312"]
cpversion: ["cp37", "cp38", "cp39", "cp310", "cp311", "cp312"]
os: [ { runs-on: ubuntu-latest, cibw-arch: manylinux_x86_64}, { runs-on: macos-latest, cibw-arch: macosx_x86_64}, { runs-on: macos-latest, cibw-arch: macosx_arm64}, { runs-on: windows-latest, cibw-arch: win_amd64} ]
exclude:
- os: { runs-on: macos-latest, cibw-arch: macosx_arm64}
cpversion: "cp36"
- os: { runs-on: macos-latest, cibw-arch: macosx_arm64}
cpversion: "cp37"

Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/linux_build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ jobs:
- name: Install dependencies
shell: bash -l {0}
run: |
python -m pip install "numpy<=1.96.0"
python -m pip install scipy
python -m pip install "numpy>=1.16.5,<2.2"
python -m pip install "scipy>=1.6,<1.15"
python -m pip install scikit-image
python -m pip install scs
sudo apt-get install \
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/macos_build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ jobs:
- name: Install dependencies
shell: bash -l {0}
run: |
${{ steps.installpython.outputs.python-path }} -m pip install "numpy<=1.96.0"
${{ steps.installpython.outputs.python-path }} -m pip install scipy
${{ steps.installpython.outputs.python-path }} -m pip install "numpy>=1.16.5,<2.2"
${{ steps.installpython.outputs.python-path }} -m pip install "scipy>=1.6,<1.15"
${{ steps.installpython.outputs.python-path }} -m pip install scikit-image
${{ steps.installpython.outputs.python-path }} -m pip install scs
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/windows_build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ jobs:
- name: Install dependencies (Windows)
shell: bash -l {0}
run: |
python -m pip install "numpy<=1.96.0"
python -m pip install scipy
python -m pip install "numpy>=1.16.5,<2.2"
python -m pip install "scipy>=1.6,<1.15"
python -m pip install scikit-image
python -m pip install scs
Expand Down
8 changes: 8 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,10 +1,18 @@
cmake_minimum_required(VERSION 3.10)

# https://discourse.cmake.org/t/msvc-runtime-library-completely-ignored/10004
cmake_policy(SET CMP0091 NEW)

project(Bindings
DESCRIPTION
"Python bindings"
)

# MSVC needs explicit configuration for multithreading
# Select a multi-threaded statically-linked runtime library
# with or without debug information depending on the configuration
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")

set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
Expand Down
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,12 @@ library. We are thankful to them:
- [Otman Benchekroun](https://github.com/otmanon) ([PR #59](https://github.com/sgsellan/gpytoolbox/pull/59))
- [Abhishek Madan](https://github.com/abhimadan) ([PR #103](https://github.com/sgsellan/gpytoolbox/pull/103))
- [Lukas Hermann](https://github.com/lsh) ([PR #135](https://github.com/sgsellan/gpytoolbox/pull/135))
- [Haoyang Wu](https://github.com/H-YWu) ([PR #142](https://github.com/sgsellan/gpytoolbox/pull/142))
- [Dylan Rowe](https://github.com/d-r-o-w-e) ([PR #144](https://github.com/sgsellan/gpytoolbox/pull/144))




<!-- Most of the functionality in this library is python-only, and it requires no
installation. To use it, simply clone this repository
```bash
Expand Down
2 changes: 1 addition & 1 deletion ext/pybind11
Submodule pybind11 updated 196 files
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
build-verbosity = "3"
[build-system]
requires = [
"numpy<=1.96.0",
"scipy",
"numpy>=1.16.5,<2.2",
"scipy>=1.6,<1.15",
"setuptools>=42",
"wheel",
"cmake>=3.16",
Expand Down
4 changes: 2 additions & 2 deletions src/gpytoolbox/approximate_hausdorff_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def approximate_hausdorff_distance(v1,f1,v2,f2,use_cpp=True):
current_best_guess_hd = 0.0
for i in range(v1.shape[0]):
# print("Vertex %d of %d" % (i+1,v1.shape[0]))
current_best_guess_dviB = np.Inf # current best guess for d(vi,B)
current_best_guess_dviB = np.inf # current best guess for d(vi,B)

queue = [0]
while (len(queue)>0 and current_best_guess_dviB>current_best_guess_hd):
Expand Down Expand Up @@ -108,7 +108,7 @@ def approximate_hausdorff_distance(v1,f1,v2,f2,use_cpp=True):
# Now we do the other side, i.e., max(d(vB,A))
for i in range(v2.shape[0]):
# print("Vertex %d of %d" % (i+1,v2.shape[0]))
current_best_guess_dviA = np.Inf
current_best_guess_dviA = np.inf
queue = [0]
while (len(queue)>0 and current_best_guess_dviA>current_best_guess_hd):
q2 = queue.pop()
Expand Down
2 changes: 2 additions & 0 deletions src/gpytoolbox/biharmonic_energy_intrinsic.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def _curved_hessian_energy(l_sq, F, n):
Q = sp.sparse.block_diag([
Q, sp.sparse.csr_matrix((n-Q.shape[0], n-Q.shape[0]))],
format='csr')
else:
Q = sp.sparse.csr_matrix(Q)

return Q

2 changes: 1 addition & 1 deletion src/gpytoolbox/minimum_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def minimum_distance(v1,f1,v2,f2):

first_queue_pair = [0,0]
queue = [first_queue_pair]
current_best_guess = np.Inf
current_best_guess = np.inf
while len(queue)>0:
q1, q2 = queue.pop()
# print("-----------")
Expand Down
2 changes: 1 addition & 1 deletion src/gpytoolbox/ray_box_intersect.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def ray_box_intersect(origin,dir,center,width):
whichPlane = 0
minB = center - 0.5*width
maxB = center + 0.5*width
hit_coord = np.Inf*np.ones(dim)
hit_coord = np.inf*np.ones(dim)

for i in range(dim):
if (origin[i]<minB[i]):
Expand Down
4 changes: 2 additions & 2 deletions src/gpytoolbox/ray_mesh_intersect.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def __init__(self,cam_pos,cam_dir,V,F):
self.V = V
self.F = F
self.dim = V.shape[1]
self.t = np.Inf
self.t = np.inf
self.id = -1
self.lmbd = np.array([0,0,0])
def traversal_function(self,q,C,W,CH,tri_indices,split_dim,is_leaf):
Expand Down Expand Up @@ -100,7 +100,7 @@ def ray_mesh_intersect(cam_pos,cam_dir,V,F,use_embree=True,C=None,W=None,CH=None

ts, ids, lambdas = _ray_mesh_intersect_cpp_impl(cam_pos.astype(np.float64),cam_dir.astype(np.float64),V.astype(np.float64),F.astype(np.int32))
else:
ts = np.Inf*np.ones(cam_pos.shape[0])
ts = np.inf*np.ones(cam_pos.shape[0])
ids = -np.ones(cam_pos.shape[0],dtype=int)
lambdas = np.zeros((cam_pos.shape[0],3))
# print("building tree")
Expand Down
4 changes: 2 additions & 2 deletions src/gpytoolbox/ray_polyline_intersect.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def ray_polyline_intersect(position, direction, polyline_vertices, max_distance
Returns
-------
x : numpy double array
Vector of intersection point coordinates (np.Inf if no intersection)
Vector of intersection point coordinates (np.inf if no intersection)
n : numpy double array
Vector of polyline normal at intersection (zeros if no intersection)
ind : int
Expand Down Expand Up @@ -53,7 +53,7 @@ def ray_polyline_intersect(position, direction, polyline_vertices, max_distance
if (EC is None):
EC = edge_indices(polyline_vertices.shape[0],closed=True)
ind = -1
x = np.array([np.Inf, np.Inf])
x = np.array([np.inf, np.inf])
n = np.array([0.0, 0.0])
distance_to_hit = max_distance
for i in range(0,EC.shape[0]):
Expand Down
16 changes: 8 additions & 8 deletions src/gpytoolbox/ray_triangle_intersect.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ def ray_triangle_intersect(origin,dir, v0, v1, v2, return_negative=False):
det = np.dot(v0v1,pvec)

if (np.abs(det) < 1e-6):
t = np.Inf
hit = np.Inf*np.ones(3)
t = np.inf
hit = np.inf*np.ones(3)
is_hit = False
return t,is_hit, hit

Expand All @@ -70,17 +70,17 @@ def ray_triangle_intersect(origin,dir, v0, v1, v2, return_negative=False):
u = np.dot(tvec,pvec) * invDet

if (u < 0 or u > 1):
t = np.Inf
hit = np.Inf*np.ones(3)
t = np.inf
hit = np.inf*np.ones(3)
is_hit = False
return t,is_hit, hit

qvec = np.cross(tvec,v0v1)
v = np.dot(dir,qvec)*invDet

if ((v < 0) or ((u + v) > 1)):
t = np.Inf
hit = np.Inf*np.ones(3)
t = np.inf
hit = np.inf*np.ones(3)
is_hit = False
return t,is_hit, hit

Expand All @@ -89,8 +89,8 @@ def ray_triangle_intersect(origin,dir, v0, v1, v2, return_negative=False):
is_hit = True
if (not return_negative):
if(t<0):
t = np.Inf
hit = np.Inf*np.ones(3)
t = np.inf
hit = np.inf*np.ones(3)
is_hit = False
return t,is_hit, hit

Expand Down
16 changes: 8 additions & 8 deletions src/gpytoolbox/reach_for_the_arcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def reach_for_the_arcs(U, S,
local_search_iters=20,
local_search_t=0.01,
tol=1e-4,
clamp_value=np.Inf,
clamp_value=np.inf,
force_cpu=False,
parallel=False,
verbose=False):
Expand Down Expand Up @@ -68,7 +68,7 @@ def reach_for_the_arcs(U, S,
locally make feasible step
tol : float, optional (default 1e-4)
tolerance for determining whether a point is inside a sphere
clamp_value : float, optional (default np.Inf)
clamp_value : float, optional (default np.inf)
value to which the SDF is clamped for clamped SDF reconstruction
force_cpu : bool, optional (default False)
whether to force rasterization onto the CPU
Expand Down Expand Up @@ -259,7 +259,7 @@ def _sdf_to_point_cloud(U, S,
batch_size=10000,
num_rasterization_spheres=0,
tol=1e-4,
clamp_value=np.Inf,
clamp_value=np.inf,
force_cpu=False,
parallel=False,
verbose=False):
Expand Down Expand Up @@ -291,7 +291,7 @@ def _sdf_to_point_cloud(U, S,
Set to zero to use all spheres.
tol : float, optional (default 1e-4)
tolerance for determining whether a point is inside a sphere
clamp_value : float, optional (default np.Inf)
clamp_value : float, optional (default np.inf)
value to which the SDF is clamped for clamped SDF reconstruction
parallel : bool, optional (default False)
whether to parallelize the algorithm or not
Expand Down Expand Up @@ -461,7 +461,7 @@ def _locally_make_feasible(U, S, P,
local_search_iters=20,
batch_size=10000,
tol=1e-4,
clamp_value=np.Inf,
clamp_value=np.inf,
parallel=False,
verbose=False):
"""Given a number of SDF samples and points, tries to make each point
Expand All @@ -488,7 +488,7 @@ def _locally_make_feasible(U, S, P,
how many points in one batch. Set to 0 to disable batching.
tol : float, optional (default 1e-4)
tolerance for determining whether a point is inside a sphere
clamp_value : float, optional (default np.Inf)
clamp_value : float, optional (default np.inf)
value to which the SDF is clamped for clamped SDF reconstruction
parallel : bool, optional (default False)
whether to parallelize the algorithm or not
Expand Down Expand Up @@ -557,7 +557,7 @@ def _fine_tune_point_cloud(U, S, P, N, f,
local_search_iters=20,
local_search_t=0.01,
tol=1e-4,
clamp_value=np.Inf,
clamp_value=np.inf,
parallel=False,
verbose=False):
"""Improve the point cloud with respect to the SDF such that the
Expand Down Expand Up @@ -602,7 +602,7 @@ def _fine_tune_point_cloud(U, S, P, N, f,
locally make feasible step
tol : float, optional (default 1e-4)
tolerance for determining whether a point is inside a sphere
clamp_value : float, optional (default np.Inf)
clamp_value : float, optional (default np.inf)
value to which the SDF is clamped for clamped SDF reconstruction
parallel : bool, optional (default False)
whether to parallelize the algorithm or not
Expand Down
14 changes: 7 additions & 7 deletions src/gpytoolbox/reach_for_the_spheres.py
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@ def reach_for_the_spheres_iteration(state,
'remesh_iterations':1,
'batch_size':20000,
'fix_boundary':False,
'clamp':np.Inf, 'pseudosdf_interior':False},
'clamp':np.inf, 'pseudosdf_interior':False},
3: {'max_iter':20000, 'tol':1e-2, 'h':0.2,
'linesearch':True, 'min_t':1e-6, 'max_t':50.,
'dt':10.,
Expand All @@ -507,7 +507,7 @@ def reach_for_the_spheres_iteration(state,
'visualize':False,
'batch_size':20000,
'fix_boundary':False,
'clamp':np.Inf, 'pseudosdf_interior':False}
'clamp':np.inf, 'pseudosdf_interior':False}
}
if max_iter is None:
max_iter = default_params[dim]['max_iter']
Expand Down Expand Up @@ -566,11 +566,11 @@ def reach_for_the_spheres_iteration(state,
if state.its is None:
state.its = 0
if state.best_performance is None:
state.best_performance = np.Inf
state.best_performance = np.inf
if state.convergence_counter is None:
state.convergence_counter = 0
if state.best_avg_error is None:
state.best_avg_error = np.Inf
state.best_avg_error = np.inf
# if state.use_features is None:
# state.use_features = False
if state.V_last_converged is None:
Expand Down Expand Up @@ -699,7 +699,7 @@ def reach_for_the_spheres_iteration(state,
if state.h>state.min_h:
state.V_last_converged = state.V.copy()
state.F_last_converged = state.F.copy()
state.best_avg_error = np.Inf
state.best_avg_error = np.inf
state.convergence_counter = 0
state.h = np.maximum(state.h/2,state.min_h)
if state.convergence_counter > 100 or F_invalid.shape[0] == 0:
Expand All @@ -713,9 +713,9 @@ def reach_for_the_spheres_iteration(state,
state.U_batch = state.U.copy()
state.S_batch = state.S.copy()
state.resample_counter += 1
state.best_performance = np.Inf
state.best_performance = np.inf
state.convergence_counter = 0
state.best_avg_error = np.Inf
state.best_avg_error = np.inf
if verbose:
print(f"Resampled, I now have {state.U.shape[0]} sample points.")
else:
Expand Down
6 changes: 3 additions & 3 deletions src/gpytoolbox/squared_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@ def __init__(self,V,F,ptest):
self.F = F
self.dim = V.shape[1]
self.ptest = ptest
self.current_best_guess = np.Inf
self.current_best_guess = np.inf
self.current_best_element = -1
self.others = []
self.num_traversal = 0
self.traversed = []
# Auxiliary function which finds the distance of point to rectangle
def sdBox(self,p,center,width):
q = np.abs(p - center) - 0.5*width
maxval = -np.Inf
maxval = -np.inf
for i in range(self.dim):
maxval = np.maximum(maxval,q[i])
return np.linalg.norm((np.maximum(q,0.0))) + np.minimum(maxval,0.0)
Expand Down Expand Up @@ -183,7 +183,7 @@ def squared_distance(P,V,F=None,use_cpp=False,use_aabb=False,C=None,W=None,CH=No
# Loop over every element
t = None
for j in range(P.shape[0]):
min_sqrd_dist = np.Inf
min_sqrd_dist = np.inf
ind = -1
best_lmb = []
for i in range(F.shape[0]):
Expand Down
2 changes: 1 addition & 1 deletion src/gpytoolbox/subdivide.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def _loop_triangle_mesh(V,F,return_matrix):
# Compute, for each vertex, the β needed for Loop.
n_adj = np.bincount(E.ravel(), minlength=n)
n_adj[n_adj==0] = -1
β = np.where(n_adj<3, np.NAN, np.where(n_adj==3, 3./16., (3./8.)/n_adj))
β = np.where(n_adj<3, np.nan, np.where(n_adj==3, 3./16., (3./8.)/n_adj))

# We always compute the matrix S since we need it to construct Vu
i = np.concatenate((
Expand Down
2 changes: 1 addition & 1 deletion src/gpytoolbox/traverse_aabbtree.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class test_closest_point_traversal:
def __init__(self,P,ptest):
self.P = P
self.ptest = ptest
self.current_best_guess = np.Inf
self.current_best_guess = np.inf
self.current_best_element = -1
self.others = []
# Auxiliary function which finds the distance of point to rectangle
Expand Down
2 changes: 1 addition & 1 deletion test/test_minimum_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def test_cube(self):
U2[:,j] += random_displacements[i]
dist = gpytoolbox.minimum_distance(V,F,U2,G)
# self.assertTrue(np.isclose(dist,0.0,atol=1e-2))
dist_gt = np.clip(random_displacements[i]-1,0,np.Inf)
dist_gt = np.clip(random_displacements[i]-1,0,np.inf)
# print(dist_gt,dist)
self.assertTrue(np.isclose(dist,dist_gt,atol=1e-4))
def test_bunny_faces(self):
Expand Down
Loading

0 comments on commit 0f945e8

Please sign in to comment.