Skip to content

Commit

Permalink
deploy: 1351e7a
Browse files Browse the repository at this point in the history
  • Loading branch information
mrava87 committed Nov 23, 2024
1 parent 46b13b4 commit 98c48eb
Show file tree
Hide file tree
Showing 64 changed files with 697 additions and 159 deletions.
Binary file modified .doctrees/api/generated/pylops_mpi.DistributedArray.doctree
Binary file not shown.
Binary file modified .doctrees/api/generated/pylops_mpi.Partition.doctree
Binary file not shown.
Binary file not shown.
Binary file modified .doctrees/api/generated/pylops_mpi.waveeqprocessing.MPIMDC.doctree
Binary file not shown.
Binary file modified .doctrees/api/index.doctree
Binary file not shown.
Binary file modified .doctrees/environment.pickle
Binary file not shown.
Binary file modified .doctrees/gallery/plot_cgls.doctree
Binary file not shown.
Binary file modified .doctrees/gallery/plot_derivative.doctree
Binary file not shown.
Binary file modified .doctrees/gallery/plot_distributed_array.doctree
Binary file not shown.
Binary file modified .doctrees/gallery/plot_mpilinop.doctree
Binary file not shown.
Binary file modified .doctrees/gallery/plot_stacked_array.doctree
Binary file not shown.
Binary file modified .doctrees/gallery/plot_stacking.doctree
Binary file not shown.
Binary file modified .doctrees/gallery/sg_execution_times.doctree
Binary file not shown.
Binary file modified .doctrees/sg_execution_times.doctree
Binary file not shown.
Binary file modified .doctrees/tutorials/lsm.doctree
Binary file not shown.
Binary file modified .doctrees/tutorials/mdd.doctree
Binary file not shown.
Binary file modified .doctrees/tutorials/poststack.doctree
Binary file not shown.
Binary file modified .doctrees/tutorials/sg_execution_times.doctree
Binary file not shown.
Binary file modified _downloads/26ee88e64bc1c66a295ff418b6764986/mdd.zip
Binary file not shown.
106 changes: 103 additions & 3 deletions _downloads/475cd62e887dac3f4cecf3ef64657324/plot_distributed_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,18 @@
plt.close("all")
np.random.seed(42)

# MPI parameters
size = MPI.COMM_WORLD.Get_size() # number of nodes
rank = MPI.COMM_WORLD.Get_rank() # rank of current node


# Defining the global shape of the distributed array
global_shape = (10, 5)

###############################################################################
# Let's start by defining the
# class with the input parameters ``global_shape``,
# ``partition``, and ``axis``. Here's an example implementation of the class with ``axis=0``.
# Let's start by defining the class with the input parameters ``global_shape``,
# ``partition``, and ``axis``. Here's an example implementation of the class
# with ``axis=0``.
arr = pylops_mpi.DistributedArray(global_shape=global_shape,
partition=pylops_mpi.Partition.SCATTER,
axis=0)
Expand Down Expand Up @@ -72,6 +77,9 @@
pylops_mpi.plot_local_arrays(arr2, "Distributed Array - 2", vmin=0, vmax=1)

###############################################################################
# Let's move now to consider various operations that one can perform on
# :py:class:`pylops_mpi.DistributedArray` objects.
#
# **Scaling** - Each process operates on its local portion of
# the array and scales the corresponding elements by a given scalar.
scale_arr = .5 * arr1
Expand Down Expand Up @@ -101,3 +109,95 @@
# of the array and multiplies the corresponding elements together.
mult_arr = arr1 * arr2
pylops_mpi.plot_local_arrays(mult_arr, "Multiplication", vmin=0, vmax=1)

###############################################################################
# Finally, let's look at the case where parallelism could be applied over
# multiple axes - and more specifically one belonging to the model/data and one
# to the operator. This kind of "2D"-parallelism requires repeating parts of
# the model/data over groups of ranks. However, when global operations such as
# ``dot`` or ``norm`` are applied on a ``pylops_mpi.DistributedArray`` of
# this kind, we need to ensure that the repeated portions to do all contribute
# to the computation. This can be achieved via the ``mask`` input parameter:
# a list of size equal to the number of ranks, whose elements contain the index
# of the subgroup/subcommunicator (with partial arrays in different groups
# are identical to each other).

# Defining the local and global shape of the distributed array
local_shape = 5
global_shape = local_shape * size

# Create mask
nsub = 2
subsize = max(1, size // nsub)
mask = np.repeat(np.arange(size // subsize), subsize)
if rank == 0:
print("1D masked arrays")
print(f"Mask: {mask}")

# Create and fill the distributed array
x = pylops_mpi.DistributedArray(global_shape=global_shape,
partition=Partition.SCATTER,
mask=mask)
x[:] = (MPI.COMM_WORLD.Get_rank() % subsize + 1.) * np.ones(local_shape)
xloc = x.asarray()

# Dot product
dot = x.dot(x)
dotloc = np.dot(xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)],
xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)])
print(f"Dot check (Rank {rank}): {np.allclose(dot, dotloc)}")

# Norm
norm = x.norm(ord=2)
normloc = np.linalg.norm(xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)],
ord=2)
print(f"Norm check (Rank {rank}): {np.allclose(norm, normloc)}")

###############################################################################
# And with 2d-arrays distributed over axis=1
extra_dim_shape = 2
if rank == 0:
print("2D masked arrays (over axis=1)")

# Create and fill the distributed array
x = pylops_mpi.DistributedArray(global_shape=(extra_dim_shape, global_shape),
partition=Partition.SCATTER,
axis=1, mask=mask)
x[:] = (MPI.COMM_WORLD.Get_rank() % subsize + 1.) * np.ones((extra_dim_shape, local_shape))
xloc = x.asarray()

# Dot product
dot = x.dot(x)
dotloc = np.dot(xloc[:, local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)].ravel(),
xloc[:, local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)].ravel())
print(f"Dot check (Rank {rank}): {np.allclose(dot, dotloc)}")

# Norm
norm = x.norm(ord=2, axis=1)
normloc = np.linalg.norm(xloc[:, local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)],
ord=2, axis=1)
print(f"Norm check (Rank {rank}): {np.allclose(norm, normloc)}")

###############################################################################
# And finally with 2d-arrays distributed over axis=0
if rank == 0:
print("2D masked arrays (over axis=0)")

# Create and fill the distributed array
x = pylops_mpi.DistributedArray(global_shape=(global_shape, extra_dim_shape),
partition=Partition.SCATTER,
axis=0, mask=mask)
x[:] = (MPI.COMM_WORLD.Get_rank() % subsize + 1.) * np.ones((local_shape, extra_dim_shape))
xloc = x.asarray()

# Dot product
dot = x.dot(x)
dotloc = np.dot(xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)].ravel(),
xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)].ravel())
print(f"Dot check (Rank {rank}): {np.allclose(dot, dotloc)}")

# Norm
norm = x.norm(ord=2, axis=0)
normloc = np.linalg.norm(xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)],
ord=2, axis=0)
print(f"Norm check (Rank {rank}): {np.allclose(norm, normloc)}")
Binary file not shown.
Binary file modified _downloads/5efa6748c84dc8238e5f246c1d96dfdd/plot_mdc.zip
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@
},
"outputs": [],
"source": [
"from matplotlib import pyplot as plt\nimport numpy as np\nfrom mpi4py import MPI\n\nfrom pylops_mpi.DistributedArray import local_split, Partition\nimport pylops_mpi\n\nplt.close(\"all\")\nnp.random.seed(42)\n\n# Defining the global shape of the distributed array\nglobal_shape = (10, 5)"
"from matplotlib import pyplot as plt\nimport numpy as np\nfrom mpi4py import MPI\n\nfrom pylops_mpi.DistributedArray import local_split, Partition\nimport pylops_mpi\n\nplt.close(\"all\")\nnp.random.seed(42)\n\n# MPI parameters\nsize = MPI.COMM_WORLD.Get_size() # number of nodes\nrank = MPI.COMM_WORLD.Get_rank() # rank of current node\n\n\n# Defining the global shape of the distributed array\nglobal_shape = (10, 5)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's start by defining the\nclass with the input parameters ``global_shape``,\n``partition``, and ``axis``. Here's an example implementation of the class with ``axis=0``.\n\n"
"Let's start by defining the class with the input parameters ``global_shape``,\n``partition``, and ``axis``. Here's an example implementation of the class\nwith ``axis=0``.\n\n"
]
},
{
Expand Down Expand Up @@ -94,7 +94,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"**Scaling** - Each process operates on its local portion of\nthe array and scales the corresponding elements by a given scalar.\n\n"
"Let's move now to consider various operations that one can perform on\n:py:class:`pylops_mpi.DistributedArray` objects.\n\n**Scaling** - Each process operates on its local portion of\nthe array and scales the corresponding elements by a given scalar.\n\n"
]
},
{
Expand Down Expand Up @@ -179,6 +179,60 @@
"source": [
"mult_arr = arr1 * arr2\npylops_mpi.plot_local_arrays(mult_arr, \"Multiplication\", vmin=0, vmax=1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, let's look at the case where parallelism could be applied over\nmultiple axes - and more specifically one belonging to the model/data and one\nto the operator. This kind of \"2D\"-parallelism requires repeating parts of\nthe model/data over groups of ranks. However, when global operations such as\n``dot`` or ``norm`` are applied on a ``pylops_mpi.DistributedArray`` of\nthis kind, we need to ensure that the repeated portions to do all contribute\nto the computation. This can be achieved via the ``mask`` input parameter:\na list of size equal to the number of ranks, whose elements contain the index\nof the subgroup/subcommunicator (with partial arrays in different groups\nare identical to each other).\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# Defining the local and global shape of the distributed array\nlocal_shape = 5\nglobal_shape = local_shape * size\n\n# Create mask\nnsub = 2\nsubsize = max(1, size // nsub)\nmask = np.repeat(np.arange(size // subsize), subsize)\nif rank == 0:\n print(\"1D masked arrays\")\n print(f\"Mask: {mask}\")\n\n# Create and fill the distributed array\nx = pylops_mpi.DistributedArray(global_shape=global_shape,\n partition=Partition.SCATTER,\n mask=mask)\nx[:] = (MPI.COMM_WORLD.Get_rank() % subsize + 1.) * np.ones(local_shape)\nxloc = x.asarray()\n\n# Dot product\ndot = x.dot(x)\ndotloc = np.dot(xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)],\n xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)])\nprint(f\"Dot check (Rank {rank}): {np.allclose(dot, dotloc)}\")\n\n# Norm\nnorm = x.norm(ord=2)\nnormloc = np.linalg.norm(xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)],\n ord=2)\nprint(f\"Norm check (Rank {rank}): {np.allclose(norm, normloc)}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And with 2d-arrays distributed over axis=1\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"extra_dim_shape = 2\nif rank == 0:\n print(\"2D masked arrays (over axis=1)\")\n\n# Create and fill the distributed array\nx = pylops_mpi.DistributedArray(global_shape=(extra_dim_shape, global_shape),\n partition=Partition.SCATTER,\n axis=1, mask=mask)\nx[:] = (MPI.COMM_WORLD.Get_rank() % subsize + 1.) * np.ones((extra_dim_shape, local_shape))\nxloc = x.asarray()\n\n# Dot product\ndot = x.dot(x)\ndotloc = np.dot(xloc[:, local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)].ravel(),\n xloc[:, local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)].ravel())\nprint(f\"Dot check (Rank {rank}): {np.allclose(dot, dotloc)}\")\n\n# Norm\nnorm = x.norm(ord=2, axis=1)\nnormloc = np.linalg.norm(xloc[:, local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)],\n ord=2, axis=1)\nprint(f\"Norm check (Rank {rank}): {np.allclose(norm, normloc)}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And finally with 2d-arrays distributed over axis=0\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"if rank == 0:\n print(\"2D masked arrays (over axis=0)\")\n\n# Create and fill the distributed array\nx = pylops_mpi.DistributedArray(global_shape=(global_shape, extra_dim_shape),\n partition=Partition.SCATTER,\n axis=0, mask=mask)\nx[:] = (MPI.COMM_WORLD.Get_rank() % subsize + 1.) * np.ones((local_shape, extra_dim_shape))\nxloc = x.asarray()\n\n# Dot product\ndot = x.dot(x)\ndotloc = np.dot(xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)].ravel(),\n xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)].ravel())\nprint(f\"Dot check (Rank {rank}): {np.allclose(dot, dotloc)}\")\n\n# Norm\nnorm = x.norm(ord=2, axis=0)\nnormloc = np.linalg.norm(xloc[local_shape * subsize * (rank // subsize):local_shape * subsize * (rank // subsize + 1)],\n ord=2, axis=0)\nprint(f\"Norm check (Rank {rank}): {np.allclose(norm, normloc)}\")"
]
}
],
"metadata": {
Expand Down
Binary file modified _downloads/9bee2a20ef32623544866c2a2f77dd81/poststack.zip
Binary file not shown.
Binary file modified _downloads/9eba87999dd4935450bd636517fb09a6/lsm.zip
Binary file not shown.
Binary file modified _downloads/ab9fd39eabb992c3726b64c0cafc595c/plot_derivative.zip
Binary file not shown.
Binary file modified _downloads/bd4c14cdaa8a180a55f5c10415c135ce/plot_stacking.zip
Binary file not shown.
Binary file modified _downloads/c0f8f58e22d7f69d4f9f8913b8c63b4b/plot_mpilinop.zip
Binary file not shown.
Binary file modified _downloads/d80673d4defd97e7322148f937b4a88d/plot_cgls.zip
Binary file not shown.
Loading

0 comments on commit 98c48eb

Please sign in to comment.