Skip to content

Commit

Permalink
Merge branch 'devel' into none_dealloc
Browse files Browse the repository at this point in the history
  • Loading branch information
yguclu authored Sep 7, 2023
2 parents 476555f + 56c9d4e commit 6c563b1
Show file tree
Hide file tree
Showing 12 changed files with 1,027 additions and 561 deletions.
2 changes: 1 addition & 1 deletion psydac/api/tests/test_equation.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,4 +58,4 @@ def test_field_and_constant(backend):
xh = equation_h.solve(c=c_value, f=fh)

# Verify that solution is equal to c_value
assert np.allclose(xh.coeffs.toarray(), c_value, rtol=1e-10, atol=1e-16)
assert np.allclose(xh.coeffs.toarray(), c_value, rtol=1e-9, atol=1e-16)
33 changes: 33 additions & 0 deletions psydac/linalg/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,25 @@ def dot(self, a, b):
"""

@abstractmethod
def axpy(self, a, x, y):
"""
Increment the vector y with the a-scaled vector x, i.e. y = a * x + y,
provided that x and y belong to the same vector space V (self).
The scalar value a may be real or complex, depending on the field of V.
Parameters
----------
a : scalar
The scaling coefficient needed for the operation.
x : Vector
The vector which is not modified by this function.
y : Vector
The vector modified by this function (incremented by a * x).
"""

#===============================================================================
class Vector(ABC):
"""
Expand All @@ -76,6 +95,20 @@ def dot(self, other):
assert self.space is other.space
return self.space.dot(self, other)

def mul_iadd(self, a, x):
"""
Compute self += a * x, where x is another vector of the same space.
Parameters
----------
a : scalar
Rescaling coefficient, which can be cast to the correct dtype.
x : Vector
Vector belonging to the same space as self.
"""
self.space.axpy(a, x, self)

#-------------------------------------
# Deferred methods
#-------------------------------------
Expand Down
29 changes: 29 additions & 0 deletions psydac/linalg/block.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,35 @@ def zeros(self):
"""
return BlockVector(self, [Vi.zeros() for Vi in self._spaces])

#...
def axpy(self, a, x, y):
"""
Increment the vector y with the a-scaled vector x, i.e. y = a * x + y,
provided that x and y belong to the same vector space V (self).
The scalar value a may be real or complex, depending on the field of V.
Parameters
----------
a : scalar
The scaling coefficient needed for the operation.
x : BlockVector
The vector which is not modified by this function.
y : BlockVector
The vector modified by this function (incremented by a * x).
"""

assert isinstance(x, BlockVector)
assert isinstance(y, BlockVector)
assert x.space is self
assert y.space is self

for Vi, xi, yi in zip(self.spaces, x.blocks, y.blocks):
Vi.axpy(a, xi, yi)

x._sync = x._sync and y._sync

#--------------------------------------
# Other properties/methods
#--------------------------------------
Expand Down
61 changes: 61 additions & 0 deletions psydac/linalg/kernels/axpy_kernels.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
from pyccel.decorators import template

#========================================================================================================
@template(name='Tarray', types=['float[:]', 'complex[:]'])
@template(name='T', types=['float', 'complex'])
def axpy_1d(alpha: 'T', x: "Tarray", y: "Tarray"):
"""
Kernel for computing y = alpha * x + y.
Parameters
----------
alpha : float | complex
Scaling coefficient.
x, y : 1D Numpy arrays of (float | complex) data
Data of the vectors.
"""
n1, = x.shape
for i1 in range(n1):
y[i1] += alpha * x[i1]

#========================================================================================================
@template(name='Tarray', types=['float[:,:]', 'complex[:,:]'])
@template(name='T', types=['float', 'complex'])
def axpy_2d(alpha: 'T', x: "Tarray", y: "Tarray"):
"""
Kernel for computing y = alpha * x + y.
Parameters
----------
alpha : float | complex
Scaling coefficient.
x, y : 2D Numpy arrays of (float | complex) data
Data of the vectors.
"""
n1, n2 = x.shape
for i1 in range(n1):
for i2 in range(n2):
y[i1, i2] += alpha * x[i1, i2]

#========================================================================================================
@template(name='Tarray', types=['float[:,:,:]', 'complex[:,:,:]'])
@template(name='T', types=['float', 'complex'])
def axpy_3d(alpha: 'T', x: "Tarray", y: "Tarray"):
"""
Kernel for computing y = alpha * x + y.
Parameters
----------
alpha : float | complex
Scaling coefficient.
x, y : 3D Numpy arrays of (float | complex) data
Data of the vectors.
"""
n1, n2, n3 = x.shape
for i1 in range(n1):
for i2 in range(n2):
for i3 in range(n3):
y[i1, i2, i3] += alpha * x[i1, i2, i3]
100 changes: 100 additions & 0 deletions psydac/linalg/kernels/inner_kernels.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
from pyccel.decorators import template

#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!!!!!#
#!!!!!!! Conjugate on the first argument !!!!!!!#
#!!!!!!!!!! This will need an update !!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#

#==============================================================================
@template(name='T', types=['float[:]', 'complex[:]'])
def inner_1d(v1: 'T', v2: 'T', nghost0: 'int64'):
"""
Kernel for computing the inner product (case of two 1D vectors).
Parameters
----------
v1, v2 : 1D NumPy array
Data of the vectors from which we are computing the inner product.
nghost0 : int
Number of ghost cells of the arrays along the index 0.
Returns
-------
res : scalar
Scalar (real or complex) containing the result of the inner product.
"""
shape0, = v1.shape

res = v1[0] - v1[0]
for i0 in range(nghost0, shape0 - nghost0):
res += v1[i0].conjugate() * v2[i0]

return res

#==============================================================================
@template(name='T', types=['float[:,:]', 'complex[:,:]'])
def inner_2d(v1: 'T', v2: 'T', nghost0: 'int64', nghost1: 'int64'):
"""
Kernel for computing the inner product (case of two 2D vectors).
Parameters
----------
v1, v2 : 2D NumPy array
Data of the vectors from which we are computing the inner product.
nghost0 : int
Number of ghost cells of the arrays along the index 0.
nghost1 : int
Number of ghost cells of the arrays along the index 1.
Returns
-------
res : scalar
Scalar (real or complex) containing the result of the inner product.
"""
shape0, shape1 = v1.shape

res = v1[0, 0] - v1[0, 0]
for i0 in range(nghost0, shape0 - nghost0):
for i1 in range(nghost1, shape1 - nghost1):
res += v1[i0, i1].conjugate() * v2[i0, i1]

return res

#==============================================================================
@template(name='T', types=['float[:,:,:]', 'complex[:,:,:]'])
def inner_3d(v1: 'T', v2: 'T', nghost0: 'int64', nghost1: 'int64', nghost2: 'int64'):
"""
Kernel for computing the inner product (case of two 3D vectors).
Parameters
----------
v1, v2 : 3D NumPy array
Data of the vectors from which we are computing the inner product.
nghost0 : int
Number of ghost cells of the arrays along the index 0.
nghost1 : int
Number of ghost cells of the arrays along the index 1.
nghost2 : int
Number of ghost cells of the arrays along the index 2.
Returns
-------
res : scalar
Scalar (real or complex) containing the result of the inner product.
"""
shape0, shape1, shape2 = v1.shape

res = v1[0, 0, 0] - v1[0, 0, 0]
for i0 in range(nghost0, shape0 - nghost0):
for i1 in range(nghost1, shape1 - nghost1):
for i2 in range(nghost2, shape2 - nghost2):
res += v1[i0, i1, i2].conjugate() * v2[i0, i1, i2]

return res
Loading

0 comments on commit 6c563b1

Please sign in to comment.