Skip to content

Commit

Permalink
updates
Browse files Browse the repository at this point in the history
  • Loading branch information
cgcgcg committed Jul 18, 2023
1 parent 4c5e61b commit 3fc3b98
Show file tree
Hide file tree
Showing 89 changed files with 2,795 additions and 1,025 deletions.
2 changes: 2 additions & 0 deletions base/PyNucleus_base/CSR_LinearOperator_decl_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,5 @@ cdef class {SCALAR_label}CSR_LinearOperator({SCALAR_label}LinearOperator):
cdef {SCALAR}_t getEntry(self, INDEX_t I, INDEX_t J)
cdef void setEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t val)
cpdef {SCALAR_label}CSR_LinearOperator getBlockDiagonal(self, sparseGraph blocks)


2 changes: 2 additions & 0 deletions base/PyNucleus_base/CSR_LinearOperator_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -472,3 +472,5 @@ cdef BOOL_t sort_indices{SCALAR_label}(INDEX_t[::1] indptr,
kk -= 1
indices[kk] = j
return wasSorted


11 changes: 11 additions & 0 deletions base/PyNucleus_base/DenseLinearOperator_decl_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,14 @@ cdef class {SCALAR_label}Dense_SubBlock_LinearOperator({SCALAR_label}LinearOpera
cdef:
dict lookupI, lookupJ
public {SCALAR}_t[:, :] data


cdef class {SCALAR_label}Dense_VectorLinearOperator({SCALAR_label}VectorLinearOperator):
cdef:
public {SCALAR}_t[:, :, ::1] data

cdef INDEX_t matvec(self,
{SCALAR}_t[::1] x,
{SCALAR}_t[:, ::1] y) except -1
cdef void getEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val)
cdef void setEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val)
87 changes: 87 additions & 0 deletions base/PyNucleus_base/DenseLinearOperator_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -156,3 +156,90 @@ cdef class {SCALAR_label}Dense_SubBlock_LinearOperator({SCALAR_label}LinearOpera
j = self.lookupJ.get(J, -1)
if i >= 0 and j >= 0:
self.data[i, j] += val


cdef class {SCALAR_label}Dense_VectorLinearOperator({SCALAR_label}VectorLinearOperator):
def __init__(self,
{SCALAR}_t[:, :, ::1] data):
{SCALAR_label}VectorLinearOperator.__init__(self,
data.shape[0],
data.shape[1],
data.shape[2])
self.data = data

cdef INDEX_t matvec(self,
{SCALAR}_t[::1] x,
{SCALAR}_t[:, ::1] y) except -1:
cdef:
INDEX_t i, j, k
for i in range(self.num_rows):
for k in range(self.vectorSize):
y[i, k] = 0.
for j in range(self.num_columns):
for k in range(self.vectorSize):
y[i, k] += self.data[i, j, k]*x[j]
return 0

cdef INDEX_t matvec_no_overwrite(self,
{SCALAR}_t[::1] x,
{SCALAR}_t[:, ::1] y) except -1:
cdef:
INDEX_t i, j, k
for i in range(self.num_rows):
for j in range(self.num_columns):
for k in range(self.vectorSize):
y[i, k] += self.data[i, j, k]*x[j]
return 0

def isSparse(self):
return False

def toarray(self):
return np.array(self.data, copy=False, dtype={SCALAR})

cdef void getEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val):
cdef:
INDEX_t k
for k in range(self.vectorSize):
val[k] = self.data[I, J, k]

cdef void setEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val):
cdef:
INDEX_t k
for k in range(self.vectorSize):
self.data[I, J, k] = val[k]

cdef void addToEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val):
cdef:
INDEX_t k
for k in range(self.vectorSize):
self.data[I, J, k] += val[k]

@staticmethod
def zeros(INDEX_t num_rows, INDEX_t num_columns, INDEX_t vectorSize):
return Dense_LinearOperator(np.zeros((num_rows, num_columns, vectorSize), dtype={SCALAR}))

@staticmethod
def ones(INDEX_t num_rows, INDEX_t num_columns, INDEX_t vectorSize):
return Dense_LinearOperator(np.ones((num_rows, num_columns, vectorSize), dtype={SCALAR}))

@staticmethod
def empty(INDEX_t num_rows, INDEX_t num_columns, INDEX_t vectorSize):
return Dense_LinearOperator(uninitialized((num_rows, num_columns, vectorSize), dtype={SCALAR}))

def getMemorySize(self):
return self.data.shape[0]*self.data.shape[1]*self.data.shape[2]*sizeof({SCALAR}_t)

def __repr__(self):
sizeInMB = self.getMemorySize() >> 20
if sizeInMB > 100:
return '<%dx%dx%d %s, %d MB>' % (self.num_rows,
self.num_columns,
self.vectorSize,
self.__class__.__name__,
sizeInMB)
else:
return '<%dx%dx%d %s>' % (self.num_rows,
self.num_columns,
self.vectorSize,
self.__class__.__name__)
9 changes: 9 additions & 0 deletions base/PyNucleus_base/DiagonalLinearOperator_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,15 @@ cdef class {SCALAR_label}diagonalOperator({SCALAR_label}LinearOperator):
y[i] = self.data[i]*x[i]
return 0

cdef INDEX_t matvec_no_overwrite(self,
{SCALAR}_t[::1] x,
{SCALAR}_t[::1] y) except -1:
cdef:
INDEX_t i
for i in range(self.num_rows):
y[i] += self.data[i]*x[i]
return 0

cdef {SCALAR}_t getEntry(self, INDEX_t I, INDEX_t J):
if I == J:
return self.data[I]
Expand Down
15 changes: 15 additions & 0 deletions base/PyNucleus_base/LinearOperator_decl_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -92,3 +92,18 @@ cdef class {SCALAR_label}Product_Linear_Operator({SCALAR_label}LinearOperator):
{SCALAR}_t[::1] rhs,
{SCALAR}_t[::1] result,
BOOL_t simpleResidual=*)


cdef class {SCALAR_label}VectorLinearOperator:
cdef:
public INDEX_t num_rows, num_columns, vectorSize
{SCALAR}_t[::1] _diagonal
cdef INDEX_t matvec(self,
{SCALAR}_t[::1] x,
{SCALAR}_t[:, ::1] y) except -1
cdef INDEX_t matvec_no_overwrite(self,
{SCALAR}_t[::1] x,
{SCALAR}_t[:, ::1] y) except -1
cdef void addToEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val)
cdef void getEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val)
cdef void setEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val)
101 changes: 101 additions & 0 deletions base/PyNucleus_base/LinearOperator_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -494,3 +494,104 @@ cdef class {SCALAR_label}Product_Linear_Operator({SCALAR_label}LinearOperator):

def __repr__(self):
return '{}*{}'.format(self.A, self.B)


cdef class {SCALAR_label}VectorLinearOperator:
def __init__(self, int num_rows, int num_columns, int vectorSize):
self.num_rows = num_rows
self.num_columns = num_columns
self.vectorSize = vectorSize

cdef INDEX_t matvec(self,
{SCALAR}_t[::1] x,
{SCALAR}_t[:, ::1] y) except -1:
return -1

cdef INDEX_t matvec_no_overwrite(self,
{SCALAR}_t[::1] x,
{SCALAR}_t[:, ::1] y) except -1:
return -1

def __call__(self,
{SCALAR}_t[::1] x,
{SCALAR}_t[:, ::1] y,
BOOL_t no_overwrite=False):
if no_overwrite:
self.matvec_no_overwrite(x, y)
else:
self.matvec(x, y)

property shape:
def __get__(self):
return (self.num_rows, self.num_columns, self.vectorSize)

def isSparse(self):
raise NotImplementedError()

def to_csr(self):
raise NotImplementedError()

def to_dense(self):
return Dense_LinearOperator(self.toarray())

def toarray(self):
return self.to_csr().toarray()

def toLinearOperator(self):
def matvec(x):
if x.ndim == 1:
return self*x
elif x.ndim == 2 and x.shape[1] == 1:
if x.flags.c_contiguous:
return self*x[:, 0]
else:
y = np.zeros((x.shape[0]), dtype=x.dtype)
y[:] = x[:, 0]
return self*y
else:
raise NotImplementedError()

from scipy.sparse.linalg import LinearOperator as ScipyLinearOperator
return ScipyLinearOperator(shape=self.shape, matvec=matvec)

# def getDenseOpFromApply(self):
# cdef:
# INDEX_t i
# {SCALAR}_t[::1] x = np.zeros((self.shape[1]), dtype={SCALAR})
# {SCALAR}_t[::1, :] B = np.zeros(self.shape, dtype={SCALAR}, order='F')
# for i in range(self.shape[1]):
# if i > 0:
# x[i-1] = 0.
# x[i] = 1.
# self.matvec(x, B[:, i])
# return np.ascontiguousarray(B)

def __getstate__(self):
return

def __setstate__(self, state):
pass

def __repr__(self):
return '<%dx%dx%d %s>' % (self.num_rows,
self.num_columns,
self.vectorSize,
self.__class__.__name__)

cdef void setEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val):
raise NotImplementedError()

def setEntry_py(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val):
self.setEntry(I, J, val)

cdef void addToEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val):
raise NotImplementedError()

def addToEntry_py(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val):
self.addToEntry(I, J, val)

cdef void getEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val):
raise NotImplementedError()

def getEntry_py(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val):
return self.getEntry(I, J, val)
2 changes: 2 additions & 0 deletions base/PyNucleus_base/SSS_LinearOperator_decl_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,5 @@ cdef class {SCALAR_label}SSS_LinearOperator({SCALAR_label}LinearOperator):
{SCALAR}_t[::1] y) except -1
cdef void setEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t val)
cdef {SCALAR}_t getEntry({SCALAR_label}SSS_LinearOperator self, INDEX_t I, INDEX_t J)


2 changes: 2 additions & 0 deletions base/PyNucleus_base/SSS_LinearOperator_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -326,3 +326,5 @@ cdef class {SCALAR_label}SSS_LinearOperator({SCALAR_label}LinearOperator):
self.diagonal[i] *= scaling
for i in range(self.data.shape[0]):
self.data[i] *= scaling


1 change: 1 addition & 0 deletions base/PyNucleus_base/utilsFem.py
Original file line number Diff line number Diff line change
Expand Up @@ -1479,6 +1479,7 @@ def __call__(self):
if (newValue != oldValue).any():
needToBuild = True
elif newValue != oldValue:
dependencyLogger.log(self.logLevel, 'Values for {} differ: \'{}\' != \'{}\', calling \'{}\''.format(prop, oldValue, newValue, self.fun.__name__))
needToBuild = True
except Exception as e:
dependencyLogger.log(logging.WARN, 'Cannot compare values {}, {} for property \'{}\', exception {}, force call \'{}\''.format(oldValue, newValue, prop, e, self.fun.__name__))
Expand Down
3 changes: 2 additions & 1 deletion drivers/runNonlocal.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
# If you want to use this code, please refer to the README.rst and LICENSE files. #
###################################################################################

from mpi4py import MPI
import numpy as np
from PyNucleus import driver, DIRICHLET, NEUMANN
from PyNucleus.nl import (nonlocalPoissonProblem,
Expand All @@ -15,7 +16,7 @@

description = """Solves a nonlocal Poisson problem with finite horizon."""

d = driver(description=description)
d = driver(MPI.COMM_WORLD, description=description)
p = nonlocalPoissonProblem(d)
discrProblem = discretizedNonlocalProblem(d, p)

Expand Down
2 changes: 1 addition & 1 deletion drivers/runParallelGMG.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@
A.residual_py(x, rhs, r)
resNorm = r.norm(False)
rate.add('Rate of convergence P'+label, (resNorm/r0)**(1/numIter), tested=False if label == 'BICGSTAB' else None)
its.add('Number of iterations P'+label, numIter, aTol=1 if label == 'BICGSTAB' else None)
its.add('Number of iterations P'+label, numIter, aTol=2 if label == 'BICGSTAB' else None)
res.add('Residual norm P'+label, resNorm)
resHist.add('P'+label, residuals, tested=False if label == 'BICGSTAB' else None)

Expand Down
33 changes: 0 additions & 33 deletions fem/PyNucleus_fem/DoFMaps.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -2295,39 +2295,6 @@ def generateLocalMassMatrix(DoFMap dm, DoFMap dm2=None):
return generic_matrix(entries)


cdef class lookupFunction(function):
cdef:
meshBase mesh
public DoFMap dm
public REAL_t[::1] u
public cellFinder2 cellFinder

def __init__(self, meshBase mesh, DoFMap dm, REAL_t[::1] u, cellFinder2 cF=None):
self.mesh = mesh
self.dm = dm
self.u = u
if cF is None:
self.cellFinder = cellFinder2(self.mesh)
else:
self.cellFinder = cF

cdef REAL_t eval(self, REAL_t[::1] x):
cdef:
shapeFunction shapeFun
REAL_t val
INDEX_t cellNo, dof, k
cellNo = self.cellFinder.findCell(x)
if cellNo == -1:
return 0.
val = 0.
for k in range(self.dm.dofs_per_element):
dof = self.dm.cell2dof(cellNo, k)
if dof >= 0:
shapeFun = self.dm.localShapeFunctions[k]
val += shapeFun.eval(self.cellFinder.bary)*self.u[dof]
return val


cdef class elementSizeFunction(function):
cdef:
meshBase mesh
Expand Down
2 changes: 1 addition & 1 deletion fem/PyNucleus_fem/factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
radialIndicator,
fractalDiffusivity, expDiffusivity,
componentVectorFunction)
from . DoFMaps import lookupFunction
from . lookupFunction import lookupFunction


rhsFunSin1D = _rhsFunSin1D()
Expand Down
Loading

0 comments on commit 3fc3b98

Please sign in to comment.