diff --git a/examples/plot_cgls.py b/examples/plot_cgls.py index e30e4122..713faf71 100644 --- a/examples/plot_cgls.py +++ b/examples/plot_cgls.py @@ -45,7 +45,11 @@ # are then obtained in a :py:class:`pylops_mpi.DistributedArray`. To obtain the # overall inversion of the entire MPIBlockDiag, you can utilize the ``asarray()`` # function of the DistributedArray as shown below. -xinv, istop, niter, r1norm, r2norm, cost = pylops_mpi.cgls(BDiag, y, niter=15, tol=1e-10, show=True) + +# Set initial guess `x0` to zeroes +x0 = pylops_mpi.DistributedArray(BDiag.shape[1], dtype=np.float128) +x0[:] = 0 +xinv, istop, niter, r1norm, r2norm, cost = pylops_mpi.cgls(BDiag, y, x0=x0, niter=15, tol=1e-10, show=True) xinv_array = xinv.asarray() if rank == 0: diff --git a/pylops_mpi/DistributedArray.py b/pylops_mpi/DistributedArray.py index 476f0bc9..f6b367db 100644 --- a/pylops_mpi/DistributedArray.py +++ b/pylops_mpi/DistributedArray.py @@ -593,12 +593,17 @@ class StackedDistributedArray: ---------- distarrays : :obj:`list` List of :class:`pylops_mpi.DistributedArray` objects. - + base_comm : :obj:`mpi4py.MPI.Comm`, optional + Base MPI Communicator. + Defaults to ``mpi4py.MPI.COMM_WORLD``. """ - def __init__(self, distarrays: List): + def __init__(self, distarrays: List, base_comm: MPI.Comm = MPI.COMM_WORLD): self.distarrays = distarrays self.narrays = len(distarrays) + self.base_comm = base_comm + self.rank = base_comm.Get_rank() + self.size = base_comm.Get_size() def __getitem__(self, index): return self.distarrays[index] @@ -674,6 +679,8 @@ def iadd(self, stacked_array): return self def multiply(self, stacked_array): + """Stacked Distributed Multiplication of arrays + """ if isinstance(stacked_array, StackedDistributedArray): self._check_stacked_size(stacked_array) ProductArray = self.copy() @@ -689,6 +696,8 @@ def multiply(self, stacked_array): return ProductArray def dot(self, stacked_array): + """Dot Product of Stacked Distributed Arrays + """ self._check_stacked_size(stacked_array) dotprod = 0. for iarr in range(self.narrays): diff --git a/pylops_mpi/optimization/basic.py b/pylops_mpi/optimization/basic.py index b37f64f1..1f168d59 100644 --- a/pylops_mpi/optimization/basic.py +++ b/pylops_mpi/optimization/basic.py @@ -1,14 +1,19 @@ from typing import Callable, Optional, Tuple, Union from pylops.utils import NDArray -from pylops_mpi import MPILinearOperator, DistributedArray, StackedDistributedArray +from pylops_mpi import ( + MPILinearOperator, + DistributedArray, + StackedDistributedArray, + MPIStackedLinearOperator +) from pylops_mpi.optimization.cls_basic import CG, CGLS def cg( - Op: MPILinearOperator, - y: Union[DistributedArray, StackedDistributedArray] , - x0: Optional[DistributedArray] = None, + Op: Union[MPILinearOperator, MPIStackedLinearOperator], + y: Union[DistributedArray, StackedDistributedArray], + x0: Union[DistributedArray, StackedDistributedArray], niter: int = 10, tol: float = 1e-4, show: bool = False, @@ -17,16 +22,16 @@ def cg( ) -> Tuple[Union[DistributedArray, StackedDistributedArray], int, NDArray]: r"""Conjugate gradient - Solve a square system of equations given an MPILinearOperator ``Op`` and + Solve a square system of equations given either an MPILinearOperator or an MPIStackedLinearOperator ``Op`` and distributed data ``y`` using conjugate gradient iterations. Parameters ---------- - Op : :obj:`pylops_mpi.MPILinearOperator` + Op : :obj:`pylops_mpi.MPILinearOperator` or :obj:`pylops_mpi.MPIStackedLinearOperator` Operator to invert of size :math:`[N \times N]` y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` DistributedArray of size (N,) - x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional + x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Initial guess niter : :obj:`int`, optional Number of iterations @@ -66,9 +71,9 @@ def cg( def cgls( - Op: MPILinearOperator, + Op: Union[MPILinearOperator, MPIStackedLinearOperator], y: Union[DistributedArray, StackedDistributedArray], - x0: Optional[Union[DistributedArray, StackedDistributedArray]] = None, + x0: Union[DistributedArray, StackedDistributedArray], niter: int = 10, damp: float = 0.0, tol: float = 1e-4, @@ -78,16 +83,16 @@ def cgls( ) -> Tuple[DistributedArray, int, int, float, float, NDArray]: r"""Conjugate gradient least squares - Solve an overdetermined system of equations given a MPILinearOperator ``Op`` and + Solve an overdetermined system of equations given either an MPILinearOperator or an MPIStackedLinearOperator``Op`` and distributed data ``y`` using conjugate gradient iterations. Parameters ---------- - Op : :obj:`pylops_mpi.MPILinearOperator` + Op : :obj:`pylops_mpi.MPILinearOperator` or :obj:`pylops_mpi.MPIStackedLinearOperator` MPI Linear Operator to invert of size :math:`[N \times M]` y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` DistributedArray of size (N,) - x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional + x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Initial guess niter : :obj:`int`, optional Number of iterations diff --git a/pylops_mpi/optimization/cls_basic.py b/pylops_mpi/optimization/cls_basic.py index ebadd8c2..b71617c9 100644 --- a/pylops_mpi/optimization/cls_basic.py +++ b/pylops_mpi/optimization/cls_basic.py @@ -11,12 +11,12 @@ class CG(Solver): r"""Conjugate gradient - Solve a square system of equations given an MPILinearOperator ``Op`` and + Solve a square system of equations given either an MPILinearOperator or an MPIStackedLinearOperator ``Op`` and distributed data ``y`` using conjugate gradient iterations. Parameters ---------- - Op : :obj:`pylops_mpi.MPILinearOperator` + Op : :obj:`pylops_mpi.MPILinearOperator` or :obj:`pylops_mpi.MPIStackedLinearOperator` Operator to invert of size :math:`[N \times N]` Notes @@ -42,6 +42,8 @@ def _print_setup(self, xcomplex: bool = False) -> None: print(head1) def _print_step(self, x: Union[DistributedArray, StackedDistributedArray]) -> None: + if isinstance(x, StackedDistributedArray): + x = x.distarrays[0] strx = f"{x[0]:1.2e} " if np.iscomplexobj(x.local_array) else f"{x[0]:11.4e} " msg = f"{self.iiter:6g} " + strx + f"{self.cost[self.iiter]:11.4e}" print(msg) @@ -49,7 +51,7 @@ def _print_step(self, x: Union[DistributedArray, StackedDistributedArray]) -> No def setup( self, y: Union[DistributedArray, StackedDistributedArray], - x0: Optional[Union[DistributedArray, StackedDistributedArray]] = None, + x0: Union[DistributedArray, StackedDistributedArray], niter: Optional[int] = None, tol: float = 1e-4, show: bool = False, @@ -60,9 +62,8 @@ def setup( ---------- y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Data of size (N,) - x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional - Initial guess of size (N,). If ``None``, initialize - internally as zero vector (will always default to :obj:`pylops_mpi.DistributedArray`) + x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` + Initial guess of size (N,). niter : :obj:`int`, optional Number of iterations (default to ``None`` in case a user wants to manually step over the solver) tol : :obj:`float`, optional @@ -81,16 +82,8 @@ def setup( self.niter = niter self.tol = tol - if x0 is None: - self.r = self.y.copy() - x = DistributedArray(global_shape=self.Op.shape[1], - partition=self.r.partition, - local_shapes=self.r.local_shapes, - dtype=y.dtype) - x[:] = 0 - else: - x = x0.copy() - self.r = self.y - self.Op.matvec(x) + x = x0.copy() + self.r = self.y - self.Op.matvec(x) self.rank = x.rank self.c = self.r.copy() self.kold = np.abs(self.r.dot(self.r.conj())) @@ -101,7 +94,10 @@ def setup( self.iiter = 0 if show and self.rank == 0: - self._print_setup(np.iscomplexobj(x.local_array)) + if isinstance(x, StackedDistributedArray): + self._print_setup(np.iscomplexobj([x1.local_array for x1 in x.distarrays])) + else: + self._print_setup(np.iscomplexobj(x.local_array)) return x def step(self, x: Union[DistributedArray, StackedDistributedArray], @@ -204,21 +200,20 @@ def finalize(self, show: bool = False) -> None: def solve( self, y: Union[DistributedArray, StackedDistributedArray], - x0: Optional[Union[DistributedArray, StackedDistributedArray]] = None, + x0: Union[DistributedArray, StackedDistributedArray], niter: int = 10, tol: float = 1e-4, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), - ) -> Tuple[DistributedArray, int, NDArray]: + ) -> Tuple[Union[DistributedArray, StackedDistributedArray], int, NDArray]: r"""Run entire solver Parameters ---------- y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Data of size (N,) - x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional - Initial guess of size (N,). If ``None``, initialize - internally as zero vector (will always default to :obj:`pylops_mpi.DistributedArray`) + x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` + Initial guess of size (N,). niter : :obj:`int`, optional Number of iterations tol : :obj:`float`, optional @@ -250,12 +245,12 @@ def solve( class CGLS(Solver): r"""Conjugate gradient least squares - Solve an overdetermined system of equations given a MPILinearOperator ``Op`` + Solve an overdetermined system of equations given either an MPILinearOperator or an MPIStackedLinearOperator ``Op`` and distributed data ``y`` using conjugate gradient iterations. Parameters ---------- - Op : :obj:`pylops_mpi.MPILinearOperator` + Op : :obj:`pylops_mpi.MPILinearOperator` or :obj:`pylops_mpi.MPIStackedLinearOperator` Operator to invert of size :math:`[N \times M]` Notes @@ -288,6 +283,8 @@ def _print_setup(self, xcomplex: bool = False) -> None: print(head1) def _print_step(self, x: Union[DistributedArray, StackedDistributedArray]) -> None: + if isinstance(x, StackedDistributedArray): + x = x.distarrays[0] strx = f"{x[0]:1.2e} " if np.iscomplexobj(x.local_array) else f"{x[0]:11.4e} " msg = ( f"{self.iiter:6g} " @@ -298,21 +295,20 @@ def _print_step(self, x: Union[DistributedArray, StackedDistributedArray]) -> No def setup(self, y: Union[DistributedArray, StackedDistributedArray], - x0: Optional[DistributedArray] = None, + x0: Union[DistributedArray, StackedDistributedArray], niter: Optional[int] = None, damp: float = 0.0, tol: float = 1e-4, show: bool = False, - ) -> DistributedArray: + ) -> Union[DistributedArray, StackedDistributedArray]: r"""Setup solver Parameters ---------- y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Data of size :math:`[N \times 1]` - x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional - Initial guess of size (M,). If ``None``, Defaults to a zero vector - (will always default to :obj:`pylops_mpi.DistributedArray`) + x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` + Initial guess of size (M,). niter : :obj:`int`, optional Number of iterations (default to ``None`` in case a user wants to manually step over the solver) @@ -334,19 +330,10 @@ def setup(self, self.tol = tol self.niter = niter - # initialize solver - if x0 is None: - self.s = y.copy() - r = self.Op.rmatvec(self.s) - x = DistributedArray(global_shape=self.Op.shape[1], dtype=y.dtype, - local_shapes=r.local_shapes, - partition=r.partition) - x[:] = 0 - else: - x = x0.copy() - self.s = self.y - self.Op.matvec(x) - damped_x = damp * x - r = self.Op.rmatvec(self.s) - damped_x + x = x0.copy() + self.s = self.y - self.Op.matvec(x) + damped_x = x * damp + r = self.Op.rmatvec(self.s) - damped_x self.rank = x.rank self.c = r.copy() self.q = self.Op.matvec(self.c) @@ -361,7 +348,10 @@ def setup(self, # print setup if show and self.rank == 0: - self._print_setup(np.iscomplexobj(x.local_array)) + if isinstance(x, StackedDistributedArray): + self._print_setup(np.iscomplexobj([x1.local_array for x1 in x.distarrays])) + else: + self._print_setup(np.iscomplexobj(x.local_array)) return x def step(self, x: Union[DistributedArray, StackedDistributedArray], @@ -423,7 +413,7 @@ def run(self, Returns ------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray Estimated model of size (M, ). """ @@ -467,7 +457,7 @@ def finalize(self, show: bool = False, **kwargs) -> None: def solve(self, y: Union[DistributedArray, StackedDistributedArray], - x0: Optional[Union[DistributedArray, StackedDistributedArray]] = None, + x0: Union[DistributedArray, StackedDistributedArray], niter: int = 10, damp: float = 0.0, tol: float = 1e-4, @@ -481,8 +471,7 @@ def solve(self, y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Data of size (N, ) x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` - Initial guess of size (M, ). If ``None``, initialize internally as zero vector - (will always default to :obj:`pylops_mpi.DistributedArray`) + Initial guess of size (M, ). niter : :obj:`int`, optional Number of iterations (default to ``None`` in case a user wants to manually step over the solver) diff --git a/tests/test_solver.py b/tests/test_solver.py index 91ba0596..81efb524 100644 --- a/tests/test_solver.py +++ b/tests/test_solver.py @@ -16,8 +16,10 @@ DistributedArray, MPIBlockDiag, MPIHStack, + MPIStackedBlockDiag, MPIVStack, - Partition + Partition, + StackedDistributedArray ) np.random.seed(42) @@ -104,7 +106,10 @@ def test_cg(par): ) x0_global = x0.asarray() else: - x0 = None + # Set TO 0s if x0 = False + x0 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + x0[:] = 0 + x0_global = x0.asarray() y = BDiag_MPI * x xinv = cg(BDiag_MPI, y, x0=x0, niter=par["nx"], tol=1e-5, show=True)[0] assert isinstance(xinv, DistributedArray) @@ -147,7 +152,10 @@ def test_cgls(par): ) x0_global = x0.asarray() else: - x0 = None + # Set TO 0s if x0 = False + x0 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + x0[:] = 0 + x0_global = x0.asarray() y = BDiag_MPI * x xinv = cgls(BDiag_MPI, y, x0=x0, niter=par["nx"], tol=1e-5, show=True)[0] assert isinstance(xinv, DistributedArray) @@ -190,8 +198,10 @@ def test_cgls_broadcastdata(par): ) x0_global = x0.asarray() else: - x0 = None - + # Set TO 0s if x0 = False + x0 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + x0[:] = 0 + x0_global = x0.asarray() y = HStack_MPI @ x assert y.partition is Partition.BROADCAST @@ -235,8 +245,10 @@ def test_cgls_broadcastmodel(par): ) x0_global = x0.asarray() else: - x0 = None - + # Set TO 0s if x0 = False + x0 = DistributedArray(global_shape=par['nx'], dtype=par['dtype'], partition=Partition.BROADCAST) + x0[:] = 0 + x0_global = x0.asarray() y = VStack_MPI @ x assert y.partition is Partition.SCATTER @@ -257,3 +269,131 @@ def test_cgls_broadcastmodel(par): y1 = Vstack @ x_global xinv1 = pylops.cgls(Vstack, y1, x0=x0, niter=par["nx"], tol=1e-5, show=True)[0] assert_allclose(xinv_array, xinv1, rtol=1e-14) + + +@pytest.mark.mpi(min_size=2) +@pytest.mark.parametrize( + "par", [(par1), (par1j), (par2), (par2j), (par3), (par3j), (par4), (par4j)] +) +def test_cg_stacked(par): + """CG with MPIStackedBlockDiag""" + A = np.ones((par["ny"], par["nx"])) + par[ + "imag"] * np.ones((par["ny"], par["nx"])) + Aop = MatrixMult(np.conj(A.T) @ A + 1e-5 * np.eye(par["nx"], dtype=par['dtype']), + dtype=par['dtype']) + # To make positive definite matrix + BDiag_MPI = MPIBlockDiag(ops=[Aop, ]) + StackedBDiag_MPI = MPIStackedBlockDiag(ops=[BDiag_MPI, BDiag_MPI]) + + dist1 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + dist1[:] = np.random.normal(1, 10, par["nx"]) + par["imag"] * np.random.normal(10, 10, par["nx"]) + dist2 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + dist2[:] = np.random.normal(5, 10, par["nx"]) + par["imag"] * np.random.normal(50, 10, par["nx"]) + x = StackedDistributedArray([dist1, dist2]) + x_global = x.asarray() + + if par["x0"]: + dist1_0 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + dist1_0[:] = np.random.normal(0, 10, par["nx"]) + par["imag"] * np.random.normal( + 10, 10, par["nx"] + ) + dist2_0 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + dist2_0[:] = np.random.normal(10, 10, par["nx"]) + par["imag"] * np.random.normal( + 0, 10, par["nx"] + ) + x0 = StackedDistributedArray([dist1_0, dist2_0]) + x0_global = x0.asarray() + else: + # Set TO 0s if x0 = False + dist1_0 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + dist1_0[:] = 0 + dist2_0 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + dist2_0[:] = 0 + x0 = StackedDistributedArray([dist1_0, dist2_0]) + x0_global = x0.asarray() + + y = StackedBDiag_MPI * x + xinv = cg(StackedBDiag_MPI, y, x0=x0, niter=par["nx"], tol=1e-5, show=True)[0] + assert isinstance(xinv, StackedDistributedArray) + xinv_array = xinv.asarray() + + if rank == 0: + mats = [np.ones(shape=(par["ny"], par["nx"])) + par[ + "imag"] * np.ones(shape=(par["ny"], par["nx"])) for i in range(size)] + ops = [MatrixMult(np.conj(mats[i].T) @ mats[i] + 1e-5 * np.eye(par["nx"], dtype=par['dtype']), + dtype=par['dtype']) for i in range(size)] + # To make positive definite matrix + BDiag = BlockDiag(ops=ops, forceflat=True) + StackedBDiag = BlockDiag(ops=[BDiag, BDiag], forceflat=True) + if par["x0"]: + x0 = x0_global + else: + x0 = None + y1 = StackedBDiag * x_global + xinv1 = pylops.cg(StackedBDiag, y1, x0=x0, niter=par["nx"], tol=1e-5, show=True)[0] + assert_allclose(xinv_array, xinv1, rtol=1e-14) + + +@pytest.mark.mpi(min_size=2) +@pytest.mark.parametrize( + "par", [(par1), (par1j), (par2), (par2j), (par3), (par3j), (par4), (par4j)] +) +def test_cgls_stacked(par): + """CGLS with MPIStackedBlockDiag""" + A = np.ones((par["ny"], par["nx"])) + par[ + "imag"] * np.ones((par["ny"], par["nx"])) + Aop = MatrixMult(np.conj(A.T) @ A + 1e-5 * np.eye(par["nx"], dtype=par['dtype']), + dtype=par['dtype']) + # To make positive definite matrix + BDiag_MPI = MPIBlockDiag(ops=[Aop, ]) + VStack_MPI = MPIVStack(ops=[Aop, ]) + StackedBDiag_MPI = MPIStackedBlockDiag(ops=[BDiag_MPI, VStack_MPI]) + + dist1 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + dist1[:] = np.random.normal(1, 10, par["nx"]) + par["imag"] * np.random.normal(10, 10, par["nx"]) + dist2 = DistributedArray(global_shape=par['nx'], partition=Partition.BROADCAST, dtype=par['dtype']) + dist2[:] = np.random.normal(5, 10, dist2.local_shape) + par["imag"] * np.random.normal(50, 10, dist2.local_shape) + x = StackedDistributedArray([dist1, dist2]) + x_global = x.asarray() + + if par["x0"]: + dist1_0 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + dist1_0[:] = np.random.normal(0, 10, par["nx"]) + par["imag"] * np.random.normal( + 10, 10, par["nx"] + ) + dist2_0 = DistributedArray(global_shape=par['nx'], partition=Partition.BROADCAST, dtype=par['dtype']) + dist2_0[:] = np.random.normal(10, 10, dist2_0.local_shape) + par["imag"] * np.random.normal( + 0, 10, dist2_0.local_shape + ) + x0 = StackedDistributedArray([dist1_0, dist2_0]) + x0_global = x0.asarray() + else: + # Set TO 0s if x0 = False + dist1_0 = DistributedArray(global_shape=size * par['nx'], dtype=par['dtype']) + dist1_0[:] = 0 + dist2_0 = DistributedArray(global_shape=par['nx'], partition=Partition.BROADCAST, dtype=par['dtype']) + dist2_0[:] = 0 + x0 = StackedDistributedArray([dist1_0, dist2_0]) + x0_global = x0.asarray() + + y = StackedBDiag_MPI * x + xinv = cgls(StackedBDiag_MPI, y, x0=x0, niter=par["nx"], tol=1e-5, show=True)[0] + assert isinstance(xinv, StackedDistributedArray) + xinv_array = xinv.asarray() + + if rank == 0: + mats = [np.ones(shape=(par["ny"], par["nx"])) + par[ + "imag"] * np.ones(shape=(par["ny"], par["nx"])) for i in range(size)] + ops = [MatrixMult(np.conj(mats[i].T) @ mats[i] + 1e-5 * np.eye(par["nx"], dtype=par['dtype']), + dtype=par['dtype']) for i in range(size)] + # To make positive definite matrix + BDiag = BlockDiag(ops=ops, forceflat=True) + V_Stack = VStack(ops=ops, forceflat=True) + StackedBDiag = BlockDiag(ops=[BDiag, V_Stack], forceflat=True) + if par["x0"]: + x0 = x0_global + else: + x0 = None + y1 = StackedBDiag * x_global + xinv1 = pylops.cgls(StackedBDiag, y1, x0=x0, niter=par["nx"], tol=1e-5, show=True)[0] + assert_allclose(xinv_array, xinv1, rtol=1e-14) diff --git a/tutorials/lsm.py b/tutorials/lsm.py index 8793358a..9db25795 100644 --- a/tutorials/lsm.py +++ b/tutorials/lsm.py @@ -164,7 +164,10 @@ # solver. # Inverse -minv_dist = pylops_mpi.cgls(VStack, d_dist, niter=100, show=True)[0] +# Initializing x0 to zeroes +x0 = pylops_mpi.DistributedArray(VStack.shape[1], partition=pylops_mpi.Partition.BROADCAST) +x0[:] = 0 +minv_dist = pylops_mpi.cgls(VStack, d_dist, x0=x0, niter=100, show=True)[0] minv = minv_dist.asarray().reshape((nx, nz)) d_inv_dist = VStack @ minv_dist d_inv = d_inv_dist.asarray().reshape(nstot, nr, nt)