Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

updates #22

Merged
merged 1 commit into from
Oct 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion base/PyNucleus_base/CSR_LinearOperator_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -520,7 +520,16 @@ cdef class {SCALAR_label}CSR_VectorLinearOperator({SCALAR_label}VectorLinearOper
return self

def toarray(self):
return self.to_csr().toarray()
cdef:
{SCALAR}_t[:, :, ::1] data
INDEX_t i, jj, j, k
data = np.zeros((self.num_rows, self.num_columns, self.vectorSize), dtype={SCALAR})
for i in range(self.indptr.shape[0]-1):
for jj in range(self.indptr[i], self.indptr[i+1]):
j = self.indices[jj]
for k in range(self.data.shape[1]):
data[i, j, k] = self.data[jj, k]
return np.array(data, copy=False)

cdef void setEntry({SCALAR_label}CSR_VectorLinearOperator self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val):
cdef:
Expand Down
19 changes: 19 additions & 0 deletions base/PyNucleus_base/LinearOperator_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,13 @@ cdef class {SCALAR_label}LinearOperator:
return Dense_LinearOperator.HDF5read(node)
elif node.attrs['type'] == 'diagonal':
return diagonalOperator.HDF5read(node)
elif node.attrs['type'] == 'interpolationOperator':
return interpolationOperator.HDF5read(node)
elif node.attrs['type'] == 'multiIntervalInterpolationOperator':
return multiIntervalInterpolationOperator.HDF5read(node)
elif node.attrs['type'] == 'h2':
from PyNucleus_nl.clusterMethodCy import H2Matrix
return H2Matrix.HDF5read(node)
else:
raise NotImplementedError(node.attrs['type'])

Expand Down Expand Up @@ -533,6 +540,18 @@ cdef class {SCALAR_label}VectorLinearOperator:
else:
self.matvec(x, y)

def __mul__(self, x):
cdef:
np.ndarray[{SCALAR}_t, ndim=2] y
{SCALAR}_t[::1] x_mv
try:
x_mv = x
y = np.zeros((self.num_rows, self.vectorSize), dtype={SCALAR})
self(x, y)
return y
except Exception as e:
raise NotImplementedError('Cannot multiply {} with {}:\n{}'.format(self, x, e))

property shape:
def __get__(self):
return (self.num_rows, self.num_columns, self.vectorSize)
Expand Down
1 change: 1 addition & 0 deletions base/PyNucleus_base/SSS_LinearOperator_decl_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ cdef class {SCALAR_label}SSS_VectorLinearOperator({SCALAR_label}VectorLinearOper
cdef:
public INDEX_t[::1] indptr, indices
public {SCALAR}_t[:, ::1] data, diagonal
{SCALAR}_t[::1] temp
public BOOL_t indices_sorted
cdef INDEX_t matvec(self,
{SCALAR}_t[::1] x,
Expand Down
15 changes: 15 additions & 0 deletions base/PyNucleus_base/SSS_LinearOperator_{SCALAR}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -538,3 +538,18 @@ cdef class {SCALAR_label}SSS_VectorLinearOperator({SCALAR_label}VectorLinearOper
diagonal = np.array(self.diagonal, copy=True)
other = {SCALAR_label}SSS_VectorLinearOperator(self.indices, self.indptr, data, diagonal)
return other

def toarray(self):
cdef:
{SCALAR}_t[:, :, ::1] data
INDEX_t i, k, jj, j
data = np.zeros((self.num_rows, self.num_columns, self.vectorSize), dtype={SCALAR})
for i in range(self.indptr.shape[0]-1):
for k in range(self.diagonal.shape[1]):
data[i, i, k] = self.diagonal[i, k]
for jj in range(self.indptr[i], self.indptr[i+1]):
j = self.indices[jj]
for k in range(self.data.shape[1]):
data[i, j, k] = self.data[jj, k]
data[j, i, k] = self.data[jj, k]
return np.array(data, copy=False)
2 changes: 1 addition & 1 deletion base/PyNucleus_base/io.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ cdef class Map:
return -1

def getPIDs(self, INDEX_t gid):
return self.GID_PID[np.where(np.array(self.GID_PID[:, 0], copy=False) == gid), 1]
return np.array(self.GID_PID)[np.where(np.array(self.GID_PID[:, 0], copy=False) == gid), 1]

cpdef INDEX_t getLocalNumElements(self, INDEX_t pid):
return self.localNumElements[pid]
Expand Down
60 changes: 59 additions & 1 deletion base/PyNucleus_base/linear_operators.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -1351,12 +1351,37 @@ cdef class interpolationOperator(sumMultiplyOperator):
def __repr__(self):
return '<%dx%d %s with %d interpolation nodes>' % (self.num_rows, self.num_columns, self.__class__.__name__, self.numInterpolationNodes)

def HDF5write(self, node):
node.attrs['type'] = 'interpolationOperator'
node.attrs['left'] = self.left
node.attrs['right'] = self.right
node.create_dataset('nodes', data=np.array(self.nodes, copy=False))
for i in range(len(self.ops)):
grp = node.create_group(str(i))
self.ops[i].HDF5write(grp)

@staticmethod
def HDF5read(node):
left = node.attrs['left']
right = node.attrs['right']
nodes = np.array(node['nodes'], dtype=REAL)
ops = []
for i in range(nodes.shape[0]):
ops.append(LinearOperator.HDF5read(node[str(i)]))
return interpolationOperator(ops, nodes, left, right)

cpdef void assure_constructed(self):
for i in range(len(self.ops)):
if isinstance(self.ops[i], delayedConstructionOperator):
self.ops[i].assure_constructed()


cdef class multiIntervalInterpolationOperator(LinearOperator):
cdef:
public list ops
INDEX_t selected
REAL_t left, right
readonly REAL_t left
readonly REAL_t right

def __init__(self, list intervals, list nodes, list ops):
shape = ops[0][0].shape
Expand All @@ -1371,6 +1396,12 @@ cdef class multiIntervalInterpolationOperator(LinearOperator):
self.ops.append(interpolationOperator(ops[k], nodes[k], left, right))
self.selected = -1

def get(self):
if self.selected != -1:
return self.ops[self.selected].val
else:
return np.nan

def set(self, REAL_t val, BOOL_t derivative=False):
cdef:
interpolationOperator op
Expand Down Expand Up @@ -1434,6 +1465,29 @@ cdef class multiIntervalInterpolationOperator(LinearOperator):
def isSparse(self):
return self.getSelectedOp().isSparse()

def HDF5write(self, node):
node.attrs['type'] = 'multiIntervalInterpolationOperator'
for i in range(len(self.ops)):
grp = node.create_group(str(i))
self.ops[i].HDF5write(grp)

@staticmethod
def HDF5read(node):
numOps = len(node)
ops = []
nodes = []
intervals = []
for i in range(numOps):
op = LinearOperator.HDF5read(node[str(i)])
ops.append(op.ops)
nodes.append(op.nodes)
intervals.append((op.left, op.right))
return multiIntervalInterpolationOperator(intervals, nodes, ops)

cpdef void assure_constructed(self):
for i in range(len(self.ops)):
self.ops[i].assure_constructed()


cdef class delayedConstructionOperator(LinearOperator):
def __init__(self, INDEX_t numRows, INDEX_t numCols):
Expand Down Expand Up @@ -1490,3 +1544,7 @@ cdef class delayedConstructionOperator(LinearOperator):
def isSparse(self):
self.assure_constructed()
return self.A.isSparse()

def HDF5write(self, node):
self.assure_constructed()
self.A.HDF5write(node)
10 changes: 8 additions & 2 deletions base/PyNucleus_base/performanceLogger.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ cpdef void endMemRegion(str key):


cdef class FakeTimer:
def __init__(self):
def __init__(self, str key=''):
pass

cdef void start(self):
Expand Down Expand Up @@ -176,7 +176,13 @@ cdef class PLogger(FakePLogger):
s = ''
for key in sorted(self.values.keys()):
if totalsOnly:
s += '{}: {} ({} calls)\n'.format(str(key), sum(self.values[key]), len(self.values[key]))
try:
s += '{}: {} ({} calls)\n'.format(str(key), sum(self.values[key]), len(self.values[key]))
except TypeError:
if len(self.values[key]):
s += str(key) +': ' + self.values[key][0].__repr__() + '\n'
else:
s += str(key) +': ' + self.values[key].__repr__() + '\n'
else:
s += str(key) +': ' + self.values[key].__repr__() + '\n'
return s
Expand Down
10 changes: 10 additions & 0 deletions base/PyNucleus_base/tupleDict_{VALUE}.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -264,3 +264,13 @@ cdef class tupleDict{VALUE}:
print(i, j, self.indexL[i][j], self.indexL[i][j+1])
return False
return True

def toDict(self):
cdef:
INDEX_t e[2]
{VALUE_t} val
dict d = {}
self.startIter()
while self.next(e, &val):
d[(e[0], e[1])] = val
return d
13 changes: 11 additions & 2 deletions base/PyNucleus_base/utilsFem.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,10 @@ def mergeOrdered(a_list, b_list):
val = data[key]
# (number of calls, min over calls, mean over calls, med over calls, max over calls)
# on rank
data2[key] = (len(val), np.min(val), np.mean(val), np.median(val), np.max(val))
try:
data2[key] = (len(val), np.min(val), np.mean(val), np.median(val), np.max(val))
except:
pass
data = data2
# gather data for all ranks
if self.comm is not None:
Expand Down Expand Up @@ -1475,15 +1478,21 @@ def __call__(self):
newValue = getattr(self.baseObj, prop)
oldValue = self.cached_args.get(prop, None)
args.append(newValue)
cached_args[prop] = newValue
# TODO: keep hash?
try:
if isinstance(newValue, np.ndarray):
cached_args[prop] = newValue.copy()
if (newValue != oldValue).any():
dependencyLogger.log(self.logLevel, 'Values for {} differ: \'{}\' != \'{}\', calling \'{}\''.format(prop, oldValue, newValue, self.fun.__name__))
needToBuild = True
else:
dependencyLogger.log(self.logLevel, 'Values for {} are identical: \'{}\' == \'{}\''.format(prop, oldValue, newValue))
elif newValue != oldValue:
cached_args[prop] = newValue
dependencyLogger.log(self.logLevel, 'Values for {} differ: \'{}\' != \'{}\', calling \'{}\''.format(prop, oldValue, newValue, self.fun.__name__))
needToBuild = True
else:
dependencyLogger.log(self.logLevel, 'Values for {} are identical: \'{}\' == \'{}\''.format(prop, oldValue, newValue))
except Exception as e:
dependencyLogger.log(logging.WARN, 'Cannot compare values {}, {} for property \'{}\', exception {}, force call \'{}\''.format(oldValue, newValue, prop, e, self.fun.__name__))
needToBuild = True
Expand Down
2 changes: 1 addition & 1 deletion docs/example2.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ The singularity :math:`\beta` of the kernel depends on the family of kernels:

- fractional type: :math:`\beta(x,y)=d+2s(x,y)`, where :math:`d` is the spatial dimension and :math:`s(x,y)` is the fractional order.
- constant type :math:`\beta(x,y)=0`
- peridynamic type :math:`\beta(x,y)=-1`
- peridynamic type :math:`\beta(x,y)=1`

At present, the only implemented interaction regions are balls in the 2-norm:

Expand Down
8 changes: 6 additions & 2 deletions drivers/runFractional.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

d = driver(MPI.COMM_WORLD)
d.add('saveOperators', False)
d.add('vtkOutput', "")
p = fractionalLaplacianProblem(d, False)
discrProblem = discretizedNonlocalProblem(d, p)

Expand Down Expand Up @@ -60,9 +61,12 @@
if p.element != 'P0':
plotDefaults['shading'] = 'gouraud'

if d.startPlot('solution'):
if p.dim < 3 and d.startPlot('solution'):
mS.plotSolution()
if mS.error is not None and d.startPlot('error'):
if p.dim < 3 and mS.error is not None and d.startPlot('error'):
mS.error.plot(**plotDefaults)

if d.vtkOutput != "":
mS.exportVTK(d.vtkOutput)

d.finish()
4 changes: 2 additions & 2 deletions drivers/runFractionalHeat.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,9 @@
if p.element != 'P0':
plotDefaults['shading'] = 'gouraud'

if d.startPlot('solution'):
if p.dim < 3 and d.startPlot('solution'):
mS.plotSolution()
if mS.error is not None and d.startPlot('error'):
if p.dim < 3 and mS.error is not None and d.startPlot('error'):
mS.error.plot(**plotDefaults)

d.finish()
7 changes: 3 additions & 4 deletions drivers/testDistOp.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,8 +207,7 @@
if d.buildDistributedH2Bcast:
with d.timer('distributed, bcast build'):
A_distributedH2Bcast = dm.assembleNonlocal(nPP.kernel, matrixFormat='H2', comm=d.comm,
params={'assembleOnRoot': False,
'forceUnsymmetric': True})
params={'assembleOnRoot': False})
with d.timer('distributed, bcast matvec'):
print('Distributed: ', A_distributedH2Bcast)
y_distributedH2Bcast = A_distributedH2Bcast*x
Expand All @@ -220,7 +219,6 @@
with d.timer('distributed, halo build'):
A_distributedH2 = dm.assembleNonlocal(nPP.kernel, matrixFormat='H2', comm=d.comm,
params={'assembleOnRoot': False,
'forceUnsymmetric': True,
'localFarFieldIndexing': True},
PLogger=tm.PLogger)
t = d.addOutputGroup('TimersH2', timerOutputGroup(driver=d))
Expand Down Expand Up @@ -345,11 +343,12 @@
cg.maxIter = 1000
u = lcl_dm.zeros()
with d.timer('CG solve'):
cg(b, u)
iterCG = cg(b, u)

residuals = cg.residuals
solveGroup = d.addOutputGroup('solve', tested=True, rTol=1e-1)
solveGroup.add('residual norm', residuals[-1])
solveGroup.add('CG iterations', iterCG)

# pure Neumann condition -> add nullspace components to match analytic solution
if nPP.boundaryCondition in (NEUMANN, HOMOGENEOUS_NEUMANN) and nPP.analyticSolution is not None:
Expand Down
6 changes: 5 additions & 1 deletion fem/PyNucleus_fem/DoFMaps.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@ cdef class DoFMap:
public meshBase mesh #: The underlying mesh
readonly INDEX_t dim #: The spatial dimension of the underlying mesh
BOOL_t reordered
public list localShapeFunctions #: List of local shape functions
public list _localShapeFunctions #: List of local shape functions
void** _localShapeFunctionsPtr
BOOL_t vectorValued
public REAL_t[:, ::1] nodes #: The barycentric coordinates of the DoFs
public REAL_t[:, ::1] dof_dual
public INDEX_t num_dofs #: The number of DoFs of the finite element space
Expand All @@ -46,6 +48,8 @@ cdef class DoFMap:
cpdef void getVertexDoFs(self, INDEX_t[:, ::1] v2d)
cpdef void resetUsingIndicator(self, function indicator)
cpdef void resetUsingFEVector(self, REAL_t[::1] ind)
cdef shapeFunction getLocalShapeFunction(self, INDEX_t dofNo)
cdef vectorShapeFunction getLocalVectorShapeFunction(self, INDEX_t dofNo)


cdef class P1_DoFMap(DoFMap):
Expand Down
Loading
Loading