From 7a7352f290e8bcdf94b4fc87b968ce2bea83214f Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 11 Dec 2024 12:47:37 -0800 Subject: [PATCH 1/6] AMReX/pyAMReX/PICSAR: weekly update (#5501) Anticipating the weekly update due to upcoming retreat. This makes https://github.com/AMReX-Codes/amrex/pull/4255 visible to WarpX. - Weekly update to latest AMReX: ```console ./Tools/Release/updateAMReX.py ``` - Weekly update to latest pyAMReX (no changes since 24.12): ```console ./Tools/Release/updatepyAMReX.py ``` - Weekly update to latest PICSAR (no changes since 24.09): ```console ./Tools/Release/updatePICSAR.py ``` --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 0a68d850e7e..a7cd884039b 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -126,7 +126,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 24.12 && cd - + cd ../amrex && git checkout --detach 96db0a665ff1e6bbe638490fd02d3aafb9188f6b && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 152a2618dee..8a7d11d5b1d 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "24.12" +set(WarpX_amrex_branch "96db0a665ff1e6bbe638490fd02d3aafb9188f6b" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") From 66204feae34ef4039b0ea787849486c1b04ecbf2 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Wed, 11 Dec 2024 12:49:06 -0800 Subject: [PATCH 2/6] Add Dan Barnes' effective potential (semi-implicit) Poisson solver (#5079) This PR adds a new electrostatic solver based on the semi-implicit scheme developed by [Barnes, Journal of Comp. Phys., 424 (2021), 109852](https://www.sciencedirect.com/science/article/pii/S0021999120306264?via%3Dihub) (see Appendix A of reference). The implementation was tested through the simulation of a Langmuir probe placed inside a uniform plasma. To this end a 2d simulation was performed with an initial uniform plasma with a conducting disk inserted in the center of the domain. The disk is kept at 0 V while Neumann boundary conditions are used for the domain boundary. Electrons and hydrogen ions were injected from all the domain boundaries using the `UniformFluxDistribution` in order to simulate the particle flux from a uniform plasma. Thermal boundaries were used for the particles with the same temperatures as the initial plasma particles. The expected outcome of this simulation is that a sheath develops around the "Langmuir probe" with value $V_s = 0.5T_e\ln\left(2\pi\frac{m_e}{2m_p}(1 + \frac{T_i}{T_e})\right) $. A pre-sheath of roughly $0.7T_e$ is also expected to form but the exact value of this depends on the domain size since the pre-sheath extends for many Debye lengths. The figure below shows an example outcome from a simulation as described above in which the semi-implicit factor was set to $C_{SI} = 10$. ![image](https://github.com/user-attachments/assets/de4c4f44-3eb1-4e4b-8297-4c047d1c0d98) Required before merging: - [x] Merging of https://github.com/AMReX-Codes/amrex/pull/3968 - [x] Add example to CI tests - [x] Update documentation --------- Signed-off-by: roelof-groenewald Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Thomas Marks --- Docs/source/refs.bib | 12 + Docs/source/usage/parameters.rst | 17 ++ Examples/Tests/CMakeLists.txt | 1 + .../CMakeLists.txt | 12 + .../analysis.py | 90 ++++++ ...effective_potential_electrostatic_picmi.py | 236 +++++++++++++++ GNUmakefile | 2 +- Python/pywarpx/picmi.py | 19 ++ ...fective_potential_electrostatic_picmi.json | 15 + Source/Diagnostics/Diagnostics.cpp | 5 +- .../ElectrostaticSolvers/CMakeLists.txt | 1 + .../EffectivePotentialES.H | 71 +++++ .../EffectivePotentialES.cpp | 258 +++++++++++++++++ .../ElectrostaticSolvers/Make.package | 1 + Source/Initialization/WarpXInitData.cpp | 4 + Source/Utils/WarpXAlgorithmSelection.H | 1 + Source/WarpX.cpp | 10 +- .../fields/EffectivePotentialPoissonSolver.H | 274 ++++++++++++++++++ Source/ablastr/fields/PoissonSolver.H | 4 +- cmake/dependencies/AMReX.cmake | 2 +- 20 files changed, 1028 insertions(+), 7 deletions(-) create mode 100644 Examples/Tests/effective_potential_electrostatic/CMakeLists.txt create mode 100755 Examples/Tests/effective_potential_electrostatic/analysis.py create mode 100644 Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py create mode 100644 Regression/Checksum/benchmarks_json/test_3d_effective_potential_electrostatic_picmi.json create mode 100644 Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.H create mode 100644 Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp create mode 100644 Source/ablastr/fields/EffectivePotentialPoissonSolver.H diff --git a/Docs/source/refs.bib b/Docs/source/refs.bib index 70b88a0abf8..02251c433d5 100644 --- a/Docs/source/refs.bib +++ b/Docs/source/refs.bib @@ -480,6 +480,18 @@ @article{VayFELB2009 url = {https://doi.org/10.1063/1.3080930}, } +@article{Barnes2021, + title = {Improved C1 shape functions for simplex meshes}, + author = {D.C. Barnes}, + journal = {Journal of Computational Physics}, + volume = {424}, + pages = {109852}, + year = {2021}, + issn = {0021-9991}, + doi = {https://doi.org/10.1016/j.jcp.2020.109852}, + url = {https://www.sciencedirect.com/science/article/pii/S0021999120306264}, +} + @article{Rhee1987, author = {Rhee, M. J. and Schneider, R. F. and Weidman, D. J.}, title = "{Simple time‐resolving Thomson spectrometer}", diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index c3756d44d96..3787acbd639 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -236,6 +236,23 @@ Overall simulation parameters \boldsymbol{\nabla}^2 \phi = - \rho/\epsilon_0 \qquad \boldsymbol{E} = - \boldsymbol{\nabla}\phi \\ \boldsymbol{\nabla}^2 \boldsymbol{A} = - \mu_0 \boldsymbol{j} \qquad \boldsymbol{B} = \boldsymbol{\nabla}\times\boldsymbol{A} + * ``labframe-effective-potential``: Poisson's equation is solved with a modified dielectric function + (resulting in an "effective potential") to create a semi-implicit scheme which is robust to the + numerical instability seen in explicit electrostatic PIC when :math:`\Delta t \omega_{pe} > 2`. + If this option is used the additional parameter ``warpx.effective_potential_factor`` can also be + specified to set the value of :math:`C_{EP}` (default 4). The method is stable for :math:`C_{EP} \geq 1` + regardless of :math:`\Delta t`, however, the larger :math:`C_{EP}` is set, the lower the numerical plasma + frequency will be and therefore care must be taken to not set it so high that the plasma mode + hybridizes with other modes of interest. + Details of the method can be found in Appendix A of :cite:t:`param-Barnes2021` (note that in that paper + the method is referred to as "semi-implicit electrostatic" but here it has been renamed to "effective potential" + to avoid confusion with the semi-implicit method of Chen et al.). + In short, the code solves: + + .. math:: + + \boldsymbol{\nabla}\cdot\left(1+\frac{C_{EP}}{4}\sum_{s \in \text{species}}(\omega_{ps}\Delta t)^2 \right)\boldsymbol{\nabla} \phi = - \rho/\epsilon_0 \qquad \boldsymbol{E} = - \boldsymbol{\nabla}\phi + * ``relativistic``: Poisson's equation is solved **for each species** in their respective rest frame. The corresponding field is mapped back to the simulation frame and will produce both E and B diff --git a/Examples/Tests/CMakeLists.txt b/Examples/Tests/CMakeLists.txt index 6fea9368e78..c4713123ae6 100644 --- a/Examples/Tests/CMakeLists.txt +++ b/Examples/Tests/CMakeLists.txt @@ -71,6 +71,7 @@ add_subdirectory(restart) add_subdirectory(restart_eb) add_subdirectory(rigid_injection) add_subdirectory(scraping) +add_subdirectory(effective_potential_electrostatic) add_subdirectory(silver_mueller) add_subdirectory(single_particle) add_subdirectory(space_charge_initialization) diff --git a/Examples/Tests/effective_potential_electrostatic/CMakeLists.txt b/Examples/Tests/effective_potential_electrostatic/CMakeLists.txt new file mode 100644 index 00000000000..a6545e8c5f3 --- /dev/null +++ b/Examples/Tests/effective_potential_electrostatic/CMakeLists.txt @@ -0,0 +1,12 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_effective_potential_electrostatic_picmi # name + 3 # dims + 2 # nprocs + inputs_test_3d_effective_potential_electrostatic_picmi.py # inputs + analysis.py # analysis + diags/field_diag/ # output + OFF # dependency +) diff --git a/Examples/Tests/effective_potential_electrostatic/analysis.py b/Examples/Tests/effective_potential_electrostatic/analysis.py new file mode 100755 index 00000000000..5aa9b045af0 --- /dev/null +++ b/Examples/Tests/effective_potential_electrostatic/analysis.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +# +# --- Analysis script for the effective potential Poisson solver test. This test +# --- is based on the adiabatic plasma expansion benchmark from Connor et al. (2021) +# --- doi.org/10.1109/TPS.2021.3072353. +# --- The electron density distribution (as a function of radius) is compared +# --- with the analytically calculated density based on the input parameters +# --- of the test simulation at each output timestep. + +import os +import sys + +import dill +import matplotlib.pyplot as plt +import numpy as np +from openpmd_viewer import OpenPMDTimeSeries +from scipy.interpolate import RegularGridInterpolator + +from pywarpx import picmi + +constants = picmi.constants + +# load simulation parameters +with open("sim_parameters.dpkl", "rb") as f: + sim = dill.load(f) + +# characteristic expansion time +tau = sim["sigma_0"] * np.sqrt(sim["M"] / (constants.kb * (sim["T_e"] + sim["T_i"]))) + + +def get_analytic_density(r, t): + expansion_factor = 1.0 + t**2 / tau**2 + T = sim["T_e"] / expansion_factor + sigma = sim["sigma_0"] * np.sqrt(expansion_factor) + return ( + sim["n_plasma"] * (T / sim["T_e"]) ** 1.5 * np.exp(-(r**2) / (2.0 * sigma**2)) + ) + + +def get_radial_function(field, info): + """Helper function to transform Cartesian data to polar data and average + over theta and phi.""" + r_grid = np.linspace(0, np.max(info.z) - 1e-3, 30) + theta_grid = np.linspace(0, 2 * np.pi, 40, endpoint=False) + phi_grid = np.linspace(0, np.pi, 20) + + r, t, p = np.meshgrid(r_grid, theta_grid, phi_grid, indexing="ij") + x_sp = r * np.sin(p) * np.cos(t) + y_sp = r * np.sin(p) * np.sin(t) + z_sp = r * np.cos(p) + + interp = RegularGridInterpolator((info.x, info.y, info.z), field) + field_sp = interp((x_sp, y_sp, z_sp)) + return r_grid, np.mean(field_sp, axis=(1, 2)) + + +diag_dir = "diags/field_diag" +ts = OpenPMDTimeSeries(diag_dir, check_all_files=True) + +rms_errors = np.zeros(len(ts.iterations)) + +for ii, it in enumerate(ts.iterations): + rho_e, info = ts.get_field(field="rho_electrons", iteration=it) + r_grid, n_e = get_radial_function(-rho_e / constants.q_e, info) + + n_e_analytic = get_analytic_density(r_grid, ts.t[ii]) + rms_errors[ii] = ( + np.sqrt(np.mean(np.sum((n_e - n_e_analytic) ** 2))) / n_e_analytic[0] + ) + + plt.plot(r_grid, n_e_analytic, "k--", alpha=0.6) + plt.plot(r_grid, n_e, label=f"t = {ts.t[ii]*1e6:.2f} $\mu$s") + +print("RMS error (%) in density: ", rms_errors) +assert np.all(rms_errors < 0.05) + +plt.ylabel("$n_e$ (m$^{-3}$)") +plt.xlabel("r (m)") +plt.grid() +plt.legend() +plt.show() + +if len(sys.argv) > 1: + sys.path.insert(1, "../../../../warpx/Regression/Checksum/") + import checksumAPI + + filename = sys.argv[1] + + test_name = os.path.split(os.getcwd())[1] + checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") diff --git a/Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py b/Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py new file mode 100644 index 00000000000..27b1728b7b2 --- /dev/null +++ b/Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python3 +# +# --- Test script for the effective potential Poisson solver. This test is based +# --- on the adiabatic plasma expansion benchmark from Connor et al. (2021) +# --- doi.org/10.1109/TPS.2021.3072353. +# --- In the benchmark an expanding plasma ball with Gaussian density distribution +# --- in the radial direction is simulated and the time evolution of the +# --- density of the electron species is compared to an approximate analytic solution. +# --- The example is modified slightly in the following ways: +# --- 1) The original example used an electromagnetic solver with absorbing +# --- boundaries while the present case encloses the plasma in a conducting +# --- sphere. +# --- 2) The domain and plasma parameters for this case has been modified to +# --- set the cell-size and time step such that the explicit electrostatic +# --- solver is unstable. + +import dill +import numpy as np +from mpi4py import MPI as mpi +from scipy.special import erf + +from pywarpx import picmi + +constants = picmi.constants + +comm = mpi.COMM_WORLD + +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=0) + +m_ion = 25 # Ion mass (electron masses) + +# Plasma parameters +n_plasma = 5e12 # Plasma density (m^-3) +sigma_0 = 22 # Initial Gaussian distribution standard deviation (Debye lengths) +T_e = 100.0 # Electron temperature (K) +T_i = 10.0 # Ion temperature (K) + +# Spatial domain +R = 86 # Radius of metallic sphere (Debye lengths) +NZ = 72 # Number of cells in each direction + +# Temporal domain (if not run as a CI test) +LT = 0.6e-6 # Simulation temporal length (s) + +# Numerical parameters +NPARTS = 500000 # Seed number of particles +DT = 0.8 # Time step (electron streaming) + +# Solver parameter +C_EP = 1.0 # Effective potential factor + +####################################################################### +# Calculate various plasma parameters based on the simulation input # +####################################################################### + +# Ion mass (kg) +M = m_ion * constants.m_e + +# Electron plasma frequency (Hz) +f_pe = np.sqrt(constants.q_e**2 * n_plasma / (constants.m_e * constants.ep0)) / ( + 2.0 * np.pi +) + +# Debye length (m) +lambda_e = np.sqrt(constants.ep0 * constants.kb * T_e / (n_plasma * constants.q_e**2)) + +# Thermal velocities (m/s) from v_th = np.sqrt(kT / m) +v_ti = np.sqrt(T_i * constants.kb / M) +v_te = np.sqrt(T_e * constants.kb / constants.m_e) + +R *= lambda_e +sigma_0 *= lambda_e + +dz = 2.0 * R / (NZ - 4) +Lz = dz * NZ +dt = DT * dz / v_te + +total_steps = int(LT / dt) +diag_steps = total_steps // 3 +total_steps = diag_steps * 3 + +# dump attributes needed for analysis to a dill pickle file +if comm.rank == 0: + parameter_dict = { + "sigma_0": sigma_0, + "M": M, + "T_i": T_i, + "T_e": T_e, + "n_plasma": n_plasma, + } + with open("sim_parameters.dpkl", "wb") as f: + dill.dump(parameter_dict, f) + +# print out plasma parameters +if comm.rank == 0: + print( + f"Initializing simulation with input parameters:\n" + f"\tT_e = {T_e:.1f} K\n" + f"\tT_i = {T_i:.1f} K\n" + f"\tn = {n_plasma:.1e} m^-3\n" + ) + print( + f"Plasma parameters:\n" + f"\tlambda_e = {lambda_e:.1e} m\n" + f"\tt_pe = {1.0/f_pe:.1e} s\n" + f"\tv_ti = {v_ti:.1e} m/s\n" + ) + print( + f"Numerical parameters:\n" + f"\tdz/lambda_e = {dz/lambda_e:.2f}\n" + f"\tdt*w_pe = {dt*f_pe*2.0*np.pi:.2f}\n" + f"\tdiag steps = {diag_steps:d}\n" + f"\ttotal steps = {total_steps:d}\n" + ) + + +####################################################################### +# Set geometry and boundary conditions # +####################################################################### + +grid = picmi.Cartesian3DGrid( + number_of_cells=[NZ] * 3, + lower_bound=[-Lz / 2.0] * 3, + upper_bound=[Lz / 2.0] * 3, + lower_boundary_conditions=["neumann"] * 3, + upper_boundary_conditions=["neumann"] * 3, + lower_boundary_conditions_particles=["absorbing"] * 3, + upper_boundary_conditions_particles=["absorbing"] * 3, + warpx_max_grid_size=NZ // 2, +) +simulation.time_step_size = dt +simulation.max_steps = total_steps +simulation.current_deposition_algo = "direct" +simulation.particle_shape = 1 +simulation.verbose = 1 + +####################################################################### +# Insert spherical boundary as EB # +####################################################################### + +embedded_boundary = picmi.EmbeddedBoundary( + implicit_function=f"(x**2+y**2+z**2-{R**2})", + potential=0.0, +) +simulation.embedded_boundary = embedded_boundary + +####################################################################### +# Field solver # +####################################################################### + +solver = picmi.ElectrostaticSolver( + grid=grid, + method="Multigrid", + warpx_effective_potential=True, + warpx_effective_potential_factor=C_EP, + warpx_self_fields_verbosity=0, +) +simulation.solver = solver + +####################################################################### +# Particle types setup # +####################################################################### + +total_parts = ( + n_plasma + * sigma_0**2 + * ( + (2.0 * np.pi) ** 1.5 * sigma_0 * erf(R / (np.sqrt(2) * sigma_0)) + + 4.0 * np.pi * R * np.exp(-(R**2) / (2.0 * sigma_0**2)) + ) +) + +electrons = picmi.Species( + name="electrons", + particle_type="electron", + initial_distribution=picmi.GaussianBunchDistribution( + n_physical_particles=total_parts, + rms_bunch_size=[sigma_0] * 3, + rms_velocity=[v_te] * 3, + ), +) +simulation.add_species( + electrons, + layout=picmi.PseudoRandomLayout(grid=grid, n_macroparticles=NPARTS), +) + +ions = picmi.Species( + name="ions", + charge="q_e", + mass=M, + initial_distribution=picmi.GaussianBunchDistribution( + n_physical_particles=total_parts, + rms_bunch_size=[sigma_0] * 3, + rms_velocity=[v_ti] * 3, + ), +) +simulation.add_species( + ions, + layout=picmi.PseudoRandomLayout(grid=grid, n_macroparticles=NPARTS), +) + +####################################################################### +# Add diagnostics # +####################################################################### + +field_diag = picmi.FieldDiagnostic( + name="field_diag", + grid=grid, + period=diag_steps, + data_list=[ + "E", + "J", + "T_electrons", + "T_ions", + "phi", + "rho_electrons", + "rho_ions", + ], + write_dir="diags", + warpx_format="openpmd", + warpx_openpmd_backend="h5", +) +simulation.add_diagnostic(field_diag) + +####################################################################### +# Initialize simulation # +####################################################################### + +simulation.initialize_inputs() +simulation.initialize_warpx() + +####################################################################### +# Execute simulation # +####################################################################### + +simulation.step() diff --git a/GNUmakefile b/GNUmakefile index 6298dd83369..ef3239e249d 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -43,7 +43,7 @@ USE_RZ = FALSE USE_EB = FALSE USE_LINEAR_SOLVERS_EM = TRUE -USE_LINEAR_SOLVERS_INCFLO = FALSE +USE_LINEAR_SOLVERS_INCFLO = TRUE WARPX_HOME := . include $(WARPX_HOME)/Source/Make.WarpX diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index c5946376d52..de3b26955b7 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1894,6 +1894,16 @@ class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): warpx_self_fields_verbosity: integer, default=2 Level of verbosity for the lab frame solver + warpx_magnetostatic: bool, default=False + Whether to use the magnetostatic solver + + warpx_effective_potential: bool, default=False + Whether to use the effective potential Poisson solver (EP-PIC) + + warpx_effective_potential_factor: float, default=4 + If the effective potential Poisson solver is used, this sets the value + of C_EP (the method is marginally stable at C_EP = 1) + warpx_dt_update_interval: integer, optional (default = -1) How frequently the timestep is updated. Adaptive timestepping is disabled when this is <= 0. @@ -1910,6 +1920,10 @@ def init(self, kw): self.absolute_tolerance = kw.pop("warpx_absolute_tolerance", None) self.self_fields_verbosity = kw.pop("warpx_self_fields_verbosity", None) self.magnetostatic = kw.pop("warpx_magnetostatic", False) + self.effective_potential = kw.pop("warpx_effective_potential", False) + self.effective_potential_factor = kw.pop( + "warpx_effective_potential_factor", None + ) self.cfl = kw.pop("warpx_cfl", None) self.dt_update_interval = kw.pop("warpx_dt_update_interval", None) self.max_dt = kw.pop("warpx_max_dt", None) @@ -1930,6 +1944,11 @@ def solver_initialize_inputs(self): else: if self.magnetostatic: pywarpx.warpx.do_electrostatic = "labframe-electromagnetostatic" + elif self.effective_potential: + pywarpx.warpx.do_electrostatic = "labframe-effective-potential" + pywarpx.warpx.effective_potential_factor = ( + self.effective_potential_factor + ) else: pywarpx.warpx.do_electrostatic = "labframe" pywarpx.warpx.self_fields_required_precision = self.required_precision diff --git a/Regression/Checksum/benchmarks_json/test_3d_effective_potential_electrostatic_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_effective_potential_electrostatic_picmi.json new file mode 100644 index 00000000000..b61da644114 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_effective_potential_electrostatic_picmi.json @@ -0,0 +1,15 @@ +{ + "lev=0": { + "Ex": 148992.08170563323, + "Ey": 148964.62980386653, + "Ez": 149085.76473996745, + "T_electrons": 279.2796849134268, + "T_ions": 32.09677271761641, + "jx": 31.077856013948246, + "jy": 31.425549493321245, + "jz": 31.424168300110658, + "phi": 2002.2518068289028, + "rho_electrons": 0.007909027251581852, + "rho_ions": 0.008266762306092332 + } +} diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index 0f659065185..a60aea40242 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -77,8 +77,9 @@ Diagnostics::BaseReadParameters () if (utils::algorithms::is_in(m_varnames_fields, "phi")){ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( warpx.electrostatic_solver_id==ElectrostaticSolverAlgo::LabFrame || - warpx.electrostatic_solver_id==ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic, - "plot phi only works if do_electrostatic = labframe or do_electrostatic = labframe-electromagnetostatic"); + warpx.electrostatic_solver_id==ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic || + warpx.electrostatic_solver_id==ElectrostaticSolverAlgo::LabFrameEffectivePotential, + "plot phi only works if do_electrostatic = labframe, do_electrostatic = labframe-electromagnetostatic or do_electrostatic = labframe-effective-potential"); } // Sanity check if user requests to plot A diff --git a/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt b/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt index 39c4478c110..db728f6aaba 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt +++ b/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt @@ -2,6 +2,7 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) target_sources(lib_${SD} PRIVATE + EffectivePotentialES.cpp ElectrostaticSolver.cpp LabFrameExplicitES.cpp PoissonBoundaryHandler.cpp diff --git a/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.H b/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.H new file mode 100644 index 00000000000..2ade923211c --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.H @@ -0,0 +1,71 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald (TAE Technologies) + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_EFFECTIVEPOTENTIALES_H_ +#define WARPX_EFFECTIVEPOTENTIALES_H_ + +#include "ElectrostaticSolver.H" + +#include +#include + +class EffectivePotentialES final : public ElectrostaticSolver +{ +public: + + EffectivePotentialES (int nlevs_max) : ElectrostaticSolver (nlevs_max) { + ReadParameters(); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + (nlevs_max == 1), + "Effective potential electrostatic solver only supports one level at present" + ); + } + + void InitData () override; + + void ComputeSpaceChargeField ( + ablastr::fields::MultiFabRegister& fields, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + int max_level) override; + + void ComputeSigma ( amrex::MultiFab& sigma ) const; + + /** + * Compute the potential `phi` by solving the semi-implicit Poisson equation using the Effective Potential method + * with `rho` as the source. + * More specifically, this solves the equation + * \f[ + * \vec{\nabla}\cdot(\sigma\vec{\nabla}) \phi = -\frac{\rho}{\epsilon_0} + * \f] + * \param[out] phi The potential to be computed by this function + * \param[in] rho The total charge density + * \param[in] sigma Represents the modified dielectric + * \param[in] required_precision The relative convergence threshold for the MLMG solver + * \param[in] absolute_tolerance The absolute convergence threshold for the MLMG solver + * \param[in] max_iters The maximum number of iterations allowed for the MLMG solver + * \param[in] verbosity The verbosity setting for the MLMG solver + */ + void computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi + ) const; + void computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, + amrex::MultiFab const& sigma, + amrex::Real required_precision, + amrex::Real absolute_tolerance, + int max_iters, + int verbosity + ) const; + +}; + +#endif // WARPX_EFFECTIVEPOTENTIALES_H_ diff --git a/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp b/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp new file mode 100644 index 00000000000..0a5330b049d --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp @@ -0,0 +1,258 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald (TAE Technologies) + * + * License: BSD-3-Clause-LBNL + */ + +#include "EffectivePotentialES.H" +#include "Fluids/MultiFluidContainer_fwd.H" +#include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" +#include "Particles/MultiParticleContainer_fwd.H" +#include "Utils/Parser/ParserUtils.H" +#include "WarpX.H" + +using namespace amrex; + +void EffectivePotentialES::InitData() { + auto & warpx = WarpX::GetInstance(); + m_poisson_boundary_handler->DefinePhiBCs(warpx.Geom(0)); +} + +void EffectivePotentialES::ComputeSpaceChargeField ( + ablastr::fields::MultiFabRegister& fields, + MultiParticleContainer& mpc, + [[maybe_unused]] MultiFluidContainer* mfl, + int max_level) +{ + WARPX_PROFILE("EffectivePotentialES::ComputeSpaceChargeField"); + + using ablastr::fields::MultiLevelScalarField; + using ablastr::fields::MultiLevelVectorField; + using warpx::fields::FieldType; + + // grab the simulation fields + const MultiLevelScalarField rho_fp = fields.get_mr_levels(FieldType::rho_fp, max_level); + const MultiLevelScalarField rho_cp = fields.get_mr_levels(FieldType::rho_cp, max_level); + const MultiLevelScalarField phi_fp = fields.get_mr_levels(FieldType::phi_fp, max_level); + const MultiLevelVectorField Efield_fp = fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); + + mpc.DepositCharge(rho_fp, 0.0_rt); + if (mfl) { + const int lev = 0; + mfl->DepositCharge(fields, *rho_fp[lev], lev); + } + + // Apply filter, perform MPI exchange, interpolate across levels + const Vector > rho_buf(num_levels); + auto & warpx = WarpX::GetInstance(); + warpx.SyncRho( rho_fp, rho_cp, amrex::GetVecOfPtrs(rho_buf) ); + +#ifndef WARPX_DIM_RZ + for (int lev = 0; lev < num_levels; lev++) { + // Reflect density over PEC boundaries, if needed. + warpx.ApplyRhofieldBoundary(lev, rho_fp[lev], PatchType::fine); + } +#endif + + // set the boundary potentials appropriately + setPhiBC(phi_fp, warpx.gett_new(0)); + + // perform phi calculation + computePhi(rho_fp, phi_fp); + + // Compute the electric field. Note that if an EB is used the electric + // field will be calculated in the computePhi call. + const std::array beta = {0._rt}; + if (!EB::enabled()) { computeE( Efield_fp, phi_fp, beta ); } +} + +void EffectivePotentialES::computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi) const +{ + // Calculate the mass enhancement factor - see Appendix A of + // Barnes, Journal of Comp. Phys., 424 (2021), 109852. + // The "sigma" multifab stores the dressing of the Poisson equation. It + // is a cell-centered multifab. + auto const& ba = convert(rho[0]->boxArray(), IntVect(AMREX_D_DECL(0,0,0))); + MultiFab sigma(ba, rho[0]->DistributionMap(), 1, rho[0]->nGrowVect()); + ComputeSigma(sigma); + + // Use the AMREX MLMG solver + computePhi(rho, phi, sigma, self_fields_required_precision, + self_fields_absolute_tolerance, self_fields_max_iters, + self_fields_verbosity); +} + +void EffectivePotentialES::ComputeSigma (MultiFab& sigma) const +{ + // Reset sigma to 1 + sigma.setVal(1.0_rt); + + // Get the user set value for C_SI (defaults to 4) + amrex::Real C_SI = 4.0; + const ParmParse pp_warpx("warpx"); + utils::parser::queryWithParser(pp_warpx, "effective_potential_factor", C_SI); + + int const lev = 0; + + // sigma is a cell-centered array + amrex::GpuArray const cell_centered = {0, 0, 0}; + // The "coarsening is just 1 i.e. no coarsening" + amrex::GpuArray const coarsen = {1, 1, 1}; + + // GetChargeDensity returns a nodal multifab + // Below we set all the unused dimensions to have cell-centered values for + // rho since these values will be interpolated onto a cell-centered grid + // - if this is not done the Interp function returns nonsense values. +#if defined(WARPX_DIM_3D) + amrex::GpuArray const nodal = {1, 1, 1}; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::GpuArray const nodal = {1, 1, 0}; +#elif defined(WARPX_DIM_1D_Z) + amrex::GpuArray const nodal = {1, 0, 0}; +#endif + + auto& warpx = WarpX::GetInstance(); + auto& mypc = warpx.GetPartContainer(); + + // The effective potential dielectric function is given by + // \varepsilon_{SI} = \varepsilon * (1 + \sum_{i in species} C_{SI}*(w_pi * dt)^2/4) + // Note the use of the plasma frequency in rad/s (not Hz) and the factor of 1/4, + // these choices make it so that C_SI = 1 is the marginal stability threshold. + auto mult_factor = ( + C_SI * warpx.getdt(lev) * warpx.getdt(lev) / (4._rt * PhysConst::ep0) + ); + + // Loop over each species to calculate the Poisson equation dressing + for (auto const& pc : mypc) { + // grab the charge density for this species + auto rho = pc->GetChargeDensity(lev, false); + + // Handle the parallel transfer of guard cells and apply filtering + warpx.ApplyFilterandSumBoundaryRho(lev, lev, *rho, 0, rho->nComp()); + + // get multiplication factor for this species + auto const mult_factor_pc = mult_factor * pc->getCharge() / pc->getMass(); + + // update sigma +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(sigma, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + Array4 const& sigma_arr = sigma.array(mfi); + Array4 const& rho_arr = rho->const_array(mfi); + + // Loop over the cells and update the sigma field + amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Interpolate rho to cell-centered value + auto const rho_cc = ablastr::coarsen::sample::Interp( + rho_arr, nodal, cell_centered, coarsen, i, j, k, 0 + ); + // add species term to sigma: + // C_SI * w_p^2 * dt^2 / 4 = C_SI / 4 * q*rho/(m*eps0) * dt^2 + sigma_arr(i, j, k, 0) += mult_factor_pc * rho_cc; + }); + + } + } +} + + +void EffectivePotentialES::computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, + amrex::MultiFab const& sigma, + amrex::Real required_precision, + amrex::Real absolute_tolerance, + int max_iters, + int verbosity +) const +{ + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + // create a vector to our fields, sorted by level + amrex::Vector sorted_rho; + amrex::Vector sorted_phi; + for (int lev = 0; lev < num_levels; ++lev) { + sorted_rho.emplace_back(rho[lev]); + sorted_phi.emplace_back(phi[lev]); + } + + std::optional post_phi_calculation; +#ifdef AMREX_USE_EB + // TODO: double check no overhead occurs on "m_eb_enabled == false" + std::optional > eb_farray_box_factory; +#else + std::optional > const eb_farray_box_factory; +#endif + auto & warpx = WarpX::GetInstance(); + if (EB::enabled()) + { + // EB: use AMReX to directly calculate the electric field since with EB's the + // simple finite difference scheme in WarpX::computeE sometimes fails + + // TODO: maybe make this a helper function or pass Efield_fp directly + amrex::Vector< + amrex::Array + > e_field; + for (int lev = 0; lev < num_levels; ++lev) { + e_field.push_back( +#if defined(WARPX_DIM_1D_Z) + amrex::Array{ + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) + } +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::Array{ + warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) + } +#elif defined(WARPX_DIM_3D) + amrex::Array{ + warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) + } +#endif + ); + } + post_phi_calculation = EBCalcEfromPhiPerLevel(e_field); + +#ifdef AMREX_USE_EB + amrex::Vector< + amrex::EBFArrayBoxFactory const * + > factories; + for (int lev = 0; lev < num_levels; ++lev) { + factories.push_back(&warpx.fieldEBFactory(lev)); + } + eb_farray_box_factory = factories; +#endif + } + + ablastr::fields::computeEffectivePotentialPhi( + sorted_rho, + sorted_phi, + sigma, + required_precision, + absolute_tolerance, + max_iters, + verbosity, + warpx.Geom(), + warpx.DistributionMap(), + warpx.boxArray(), + WarpX::grid_type, + false, + EB::enabled(), + WarpX::do_single_precision_comms, + warpx.refRatio(), + post_phi_calculation, + *m_poisson_boundary_handler, + warpx.gett_new(0), + eb_farray_box_factory + ); +} diff --git a/Source/FieldSolver/ElectrostaticSolvers/Make.package b/Source/FieldSolver/ElectrostaticSolvers/Make.package index a1d2d78dbb0..673f1c8aa7a 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/Make.package +++ b/Source/FieldSolver/ElectrostaticSolvers/Make.package @@ -1,6 +1,7 @@ CEXE_sources += PoissonBoundaryHandler.cpp CEXE_sources += LabFrameExplicitES.cpp CEXE_sources += RelativisticExplicitES.cpp +CEXE_sources += EffectivePotentialES.cpp CEXE_sources += ElectrostaticSolver.cpp VPATH_LOCATIONS += $(WARPX_HOME)/Source/FieldSolver/ElectrostaticSolvers diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 14d189d0dd5..daecfac8bed 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -278,6 +278,10 @@ WarpX::PrintMainPICparameters () amrex::Print() << "Operation mode: | Electrostatic" << "\n"; amrex::Print() << " | - laboratory frame, electrostatic + magnetostatic" << "\n"; } + else if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameEffectivePotential){ + amrex::Print() << "Operation mode: | Electrostatic" << "\n"; + amrex::Print() << " | - laboratory frame, effective potential scheme" << "\n"; + } else{ amrex::Print() << "Operation mode: | Electromagnetic" << "\n"; } diff --git a/Source/Utils/WarpXAlgorithmSelection.H b/Source/Utils/WarpXAlgorithmSelection.H index 98d2430afc3..187be924666 100644 --- a/Source/Utils/WarpXAlgorithmSelection.H +++ b/Source/Utils/WarpXAlgorithmSelection.H @@ -62,6 +62,7 @@ AMREX_ENUM(ElectrostaticSolverAlgo, Relativistic, LabFrameElectroMagnetostatic, LabFrame, + LabFrameEffectivePotential, Default = None); AMREX_ENUM(PoissonSolverAlgo, diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 772131ea0e7..176a6f63ddf 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -20,6 +20,7 @@ #include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" #include "FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H" #include "FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H" +#include "FieldSolver/ElectrostaticSolvers/EffectivePotentialES.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" @@ -332,6 +333,11 @@ WarpX::WarpX () { m_electrostatic_solver = std::make_unique(nlevs_max); } + // Initialize the effective potential electrostatic solver if required + else if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameEffectivePotential) + { + m_electrostatic_solver = std::make_unique(nlevs_max); + } else { m_electrostatic_solver = std::make_unique(nlevs_max); @@ -2369,6 +2375,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm int rho_ncomps = 0; if( (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) || (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) || + (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameEffectivePotential) || (electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) ) { rho_ncomps = ncomps; } @@ -2389,7 +2396,8 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm } if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || - electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) + electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic || + electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameEffectivePotential ) { const IntVect ngPhi = IntVect( AMREX_D_DECL(1,1,1) ); m_fields.alloc_init(FieldType::phi_fp, lev, amrex::convert(ba, phi_nodal_flag), dm, diff --git a/Source/ablastr/fields/EffectivePotentialPoissonSolver.H b/Source/ablastr/fields/EffectivePotentialPoissonSolver.H new file mode 100644 index 00000000000..c6b5d2c5bcc --- /dev/null +++ b/Source/ablastr/fields/EffectivePotentialPoissonSolver.H @@ -0,0 +1,274 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald (TAE Technologies) + * + * License: BSD-3-Clause-LBNL + */ +/* + * This file was copied and edited from PoissonSolver.H in the same directory. + */ +#ifndef ABLASTR_EFFECTIVE_POTENTIAL_POISSON_SOLVER_H +#define ABLASTR_EFFECTIVE_POTENTIAL_POISSON_SOLVER_H + +#include +#include +#include +#include +#include +#include +#include +#include "PoissonSolver.H" + +#if defined(WARPX_USE_FFT) && defined(WARPX_DIM_3D) +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef AMREX_USE_EB +# include +#endif + +#include +#include + + +namespace ablastr::fields { + +/** Compute the potential `phi` by solving the Poisson equation with a modifed dielectric function + * + * Uses `rho` as a source. This uses the AMReX solver. + * + * More specifically, this solves the equation + * \f[ + * \nabla \cdot \sigma \nabla \phi = - \rho/\epsilon_0 + * \f] + * + * \tparam T_PostPhiCalculationFunctor a calculation per level directly after phi was calculated + * \tparam T_BoundaryHandler handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler + * \tparam T_FArrayBoxFactory usually nothing or an amrex::EBFArrayBoxFactory (EB ONLY) + * \param[in] rho The charge density a given species + * \param[out] phi The potential to be computed by this function + * \param[in] sigma The matrix representing the mass operator used to lower the local plasma frequency + * \param[in] relative_tolerance The relative convergence threshold for the MLMG solver + * \param[in] absolute_tolerance The absolute convergence threshold for the MLMG solver + * \param[in] max_iters The maximum number of iterations allowed for the MLMG solver + * \param[in] verbosity The verbosity setting for the MLMG solver + * \param[in] geom the geometry per level (e.g., from AmrMesh) + * \param[in] dmap the distribution mapping per level (e.g., from AmrMesh) + * \param[in] grids the grids per level (e.g., from AmrMesh) + * \param[in] is_solver_igf_on_lev0 boolean to select the Poisson solver: 1 for FFT on level 0 & Multigrid on other levels, 0 for Multigrid on all levels + * \param[in] do_single_precision_comms perform communications in single precision + * \param[in] rel_ref_ratio mesh refinement ratio between levels (default: 1) + * \param[in] post_phi_calculation perform a calculation per level directly after phi was calculated; required for embedded boundaries (default: none) + * \param[in] boundary_handler a handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler + * \param[in] current_time the current time; required for embedded boundaries (default: none) + * \param[in] eb_farray_box_factory a factory for field data, @see amrex::EBFArrayBoxFactory; required for embedded boundaries (default: none) + */ +template< + typename T_PostPhiCalculationFunctor = std::nullopt_t, + typename T_BoundaryHandler = std::nullopt_t, + typename T_FArrayBoxFactory = void +> +void +computeEffectivePotentialPhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, + amrex::MultiFab const & sigma, + amrex::Real relative_tolerance, + amrex::Real absolute_tolerance, + int max_iters, + int verbosity, + amrex::Vector const& geom, + amrex::Vector const& dmap, + amrex::Vector const& grids, + [[maybe_unused]] utils::enums::GridType grid_type, + bool is_solver_igf_on_lev0, + bool eb_enabled = false, + bool do_single_precision_comms = false, + std::optional > rel_ref_ratio = std::nullopt, + [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, + [[maybe_unused]] T_BoundaryHandler const boundary_handler = std::nullopt, + [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB + [[maybe_unused]] std::optional > eb_farray_box_factory = std::nullopt // only used for EB +) { + using namespace amrex::literals; + + ABLASTR_PROFILE("computeEffectivePotentialPhi"); + + if (!rel_ref_ratio.has_value()) { + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(rho.size() == 1u, + "rel_ref_ratio must be set if mesh-refinement is used"); + rel_ref_ratio = amrex::Vector{{amrex::IntVect(AMREX_D_DECL(1, 1, 1))}}; + } + +#if !defined(AMREX_USE_EB) + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(!eb_enabled, + "Embedded boundary solve requested but not compiled in"); +#endif + if (eb_enabled && std::is_same_v) { + throw std::runtime_error("EB requested by eb_farray_box_factory not provided!"); + } + + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, + "FFT solver cannot be used with effective potential Poisson solve"); + +#ifdef WARPX_DIM_RZ + constexpr bool is_rz = true; +#else + constexpr bool is_rz = false; +#endif + + auto const finest_level = static_cast(rho.size() - 1); + + // determine if rho is zero everywhere + const amrex::Real max_norm_b = getMaxNormRho( + amrex::GetVecOfConstPtrs(rho), finest_level, absolute_tolerance); + + const amrex::LPInfo info; + + for (int lev=0; lev<=finest_level; lev++) { + + // Use the Multigrid (MLMG) solver but first scale rho appropriately + using namespace ablastr::constant::SI; + rho[lev]->mult(-1._rt/ep0); + + std::unique_ptr linop; + // In the presence of EB or RZ the EB enabled linear solver is used + if (eb_enabled) + { +#if defined(AMREX_USE_EB) + auto linop_nodelap = std::make_unique(); + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info, + amrex::Vector{eb_farray_box_factory.value()[lev]} + ); + if constexpr (!std::is_same_v) { + // if the EB potential only depends on time, the potential can be passed + // as a float instead of a callable + if (boundary_handler.phi_EB_only_t) { + linop_nodelap->setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); + } else { + linop_nodelap->setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); + } + } + linop_nodelap->setSigma(lev, sigma); + linop = std::move(linop_nodelap); +#endif + } + else if (is_rz) + { + auto linop_nodelap = std::make_unique(); + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info + ); + linop_nodelap->setRZ(true); + linop_nodelap->setSigma(lev, sigma); + linop = std::move(linop_nodelap); + } + else + { + auto linop_nodelap = std::make_unique(); + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info + ); + linop_nodelap->setSigma(lev, sigma); + linop = std::move(linop_nodelap); + } + + // Set domain boundary conditions + if constexpr (std::is_same_v) { + amrex::Array const lobc = {AMREX_D_DECL( + amrex::LinOpBCType::Dirichlet, + amrex::LinOpBCType::Dirichlet, + amrex::LinOpBCType::Dirichlet + )}; + amrex::Array const hibc = lobc; + linop->setDomainBC(lobc, hibc); + } else { + linop->setDomainBC(boundary_handler.lobc, boundary_handler.hibc); + } + + // Solve the Poisson equation + amrex::MLMG mlmg(*linop); // actual solver defined here + mlmg.setVerbose(verbosity); + mlmg.setMaxIter(max_iters); + mlmg.setAlwaysUseBNorm((max_norm_b > 0)); + + const int ng = int(grid_type == utils::enums::GridType::Collocated); // ghost cells + if (ng) { + // In this case, computeE needs to use ghost nodes data. So we + // ask MLMG to fill BC for us after it solves the problem. + mlmg.setFinalFillBC(true); + } + + // Solve Poisson equation at lev + mlmg.solve( {phi[lev]}, {rho[lev]}, + relative_tolerance, absolute_tolerance ); + + // needed for solving the levels by levels: + // - coarser level is initial guess for finer level + // - coarser level provides boundary values for finer level patch + // Interpolation from phi[lev] to phi[lev+1] + // (This provides both the boundary conditions and initial guess for phi[lev+1]) + if (lev < finest_level) { + const amrex::IntVect& refratio = rel_ref_ratio.value()[lev]; + const int ncomp = linop->getNComp(); + interpolatePhiBetweenLevels(phi[lev], + phi[lev+1], + geom[lev], + do_single_precision_comms, + refratio, + ncomp, + ng); + } + + // Run additional operations, such as calculation of the E field for embedded boundaries + if constexpr (!std::is_same::value) { + if (post_phi_calculation.has_value()) { + post_phi_calculation.value()(mlmg, lev); + } + } + rho[lev]->mult(-ep0); // Multiply rho by epsilon again + } // loop over lev(els) +} + +} // namespace ablastr::fields + +#endif // ABLASTR_EFFECTIVE_POTENTIAL_POISSON_SOLVER_H diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index d7eeecead1b..aa9288fe950 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -178,12 +178,12 @@ inline void interpolatePhiBetweenLevels ( * \param[in] dmap the distribution mapping per level (e.g., from AmrMesh) * \param[in] grids the grids per level (e.g., from AmrMesh) * \param[in] grid_type Integer that corresponds to the type of grid used in the simulation (collocated, staggered, hybrid) - * \param[in] boundary_handler a handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler * \param[in] is_solver_igf_on_lev0 boolean to select the Poisson solver: 1 for FFT on level 0 & Multigrid on other levels, 0 for Multigrid on all levels * \param[in] eb_enabled solve with embedded boundaries * \param[in] do_single_precision_comms perform communications in single precision * \param[in] rel_ref_ratio mesh refinement ratio between levels (default: 1) * \param[in] post_phi_calculation perform a calculation per level directly after phi was calculated; required for embedded boundaries (default: none) + * \param[in] boundary_handler a handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler * \param[in] current_time the current time; required for embedded boundaries (default: none) * \param[in] eb_farray_box_factory a factory for field data, @see amrex::EBFArrayBoxFactory; required for embedded boundaries (default: none) */ @@ -210,7 +210,7 @@ computePhi ( bool do_single_precision_comms = false, std::optional > rel_ref_ratio = std::nullopt, [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, - [[maybe_unused]] T_BoundaryHandler const boundary_handler = std::nullopt, // only used for EB + [[maybe_unused]] T_BoundaryHandler const boundary_handler = std::nullopt, [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB [[maybe_unused]] std::optional > eb_farray_box_factory = std::nullopt // only used for EB ) diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 8a7d11d5b1d..3733b729004 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -99,7 +99,7 @@ macro(find_amrex) set(AMReX_PROBINIT OFF CACHE INTERNAL "") set(AMReX_TINY_PROFILE ON CACHE BOOL "") set(AMReX_LINEAR_SOLVERS_EM ON CACHE INTERNAL "") - set(AMReX_LINEAR_SOLVERS_INCFLO OFF CACHE INTERNAL "") + set(AMReX_LINEAR_SOLVERS_INCFLO ON CACHE INTERNAL "") if(WarpX_ASCENT OR WarpX_SENSEI) set(AMReX_GPU_RDC ON CACHE BOOL "") From 540268f2c05d46b90ac8f33718efa63b47687cae Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Wed, 11 Dec 2024 13:55:18 -0800 Subject: [PATCH 3/6] Add `verbosity` flag for diagnostic output print statements (#5503) This PR passes the `warpx.verbose` value to diagnostics to set whether a message is output when the diagnostic is active. Specifically, this helps with the time-averaged diagnostic where currently a message is print at every step that averaging is done (commonly every step). --------- Signed-off-by: roelof-groenewald --- Source/Diagnostics/BTDiagnostics.cpp | 2 +- .../BoundaryScrapingDiagnostics.cpp | 2 +- Source/Diagnostics/Diagnostics.H | 2 ++ Source/Diagnostics/Diagnostics.cpp | 3 +++ Source/Diagnostics/FlushFormats/FlushFormat.H | 1 + .../FlushFormats/FlushFormatAscent.H | 3 ++- .../FlushFormats/FlushFormatAscent.cpp | 7 ++++-- .../FlushFormats/FlushFormatCatalyst.H | 1 + .../FlushFormats/FlushFormatCatalyst.cpp | 1 + .../FlushFormats/FlushFormatCheckpoint.H | 1 + .../FlushFormats/FlushFormatCheckpoint.cpp | 7 ++++-- .../FlushFormats/FlushFormatOpenPMD.H | 3 ++- .../FlushFormats/FlushFormatOpenPMD.cpp | 21 +++++++++------- .../FlushFormats/FlushFormatPlotfile.H | 1 + .../FlushFormats/FlushFormatPlotfile.cpp | 25 +++++++++++-------- .../FlushFormats/FlushFormatSensei.H | 1 + .../FlushFormats/FlushFormatSensei.cpp | 8 +++--- Source/Diagnostics/FullDiagnostics.cpp | 8 +++--- 18 files changed, 63 insertions(+), 34 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 4939e2fb207..09167452c1a 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -1091,7 +1091,7 @@ BTDiagnostics::Flush (int i_buffer, bool force_flush) m_varnames, m_mf_output.at(i_buffer), m_geom_output.at(i_buffer), warpx.getistep(), labtime, m_output_species.at(i_buffer), nlev_output, file_name, m_file_min_digits, - m_plot_raw_fields, m_plot_raw_fields_guards, + m_plot_raw_fields, m_plot_raw_fields_guards, m_verbose, use_pinned_pc, isBTD, i_buffer, m_buffer_flush_counter.at(i_buffer), m_max_buffer_multifabs.at(i_buffer), m_geom_snapshot.at(i_buffer).at(0), isLastBTDFlush); diff --git a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp index 8df58b6fb28..bcccda48c18 100644 --- a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp +++ b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp @@ -153,7 +153,7 @@ BoundaryScrapingDiagnostics::Flush (int i_buffer, bool /* force_flush */) warpx.gett_new(0), m_output_species.at(i_buffer), nlev_output, file_prefix, - m_file_min_digits, false, false, use_pinned_pc, isBTD, + m_file_min_digits, false, false, m_verbose, use_pinned_pc, isBTD, warpx.getistep(0), bufferID, numBTDBuffers, geom, isLastBTD); diff --git a/Source/Diagnostics/Diagnostics.H b/Source/Diagnostics/Diagnostics.H index d0c70e76c1f..a078bab3597 100644 --- a/Source/Diagnostics/Diagnostics.H +++ b/Source/Diagnostics/Diagnostics.H @@ -190,6 +190,8 @@ public: protected: /** Read Parameters of the base Diagnostics class */ bool BaseReadParameters (); + /** Whether to output a message when diagnostics are output */ + int m_verbose = 2; /** Initialize member variables of the base Diagnostics class. */ void InitBaseData (); /** Initialize m_mf_output vectors and data required to construct the buffers diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index a60aea40242..2b3b6d7df61 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -62,6 +62,9 @@ Diagnostics::BaseReadParameters () std::string dims; pp_geometry.get("dims", dims); + const amrex::ParmParse pp_warpx("warpx"); + pp_warpx.query("verbose", m_verbose); + // Query list of grid fields to write to output const bool varnames_specified = pp_diag_name.queryarr("fields_to_plot", m_varnames_fields); if (!varnames_specified){ diff --git a/Source/Diagnostics/FlushFormats/FlushFormat.H b/Source/Diagnostics/FlushFormats/FlushFormat.H index be1322bd61b..f0a83f7a24b 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormat.H +++ b/Source/Diagnostics/FlushFormats/FlushFormat.H @@ -19,6 +19,7 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatAscent.H b/Source/Diagnostics/FlushFormats/FlushFormatAscent.H index a5a10d77bdc..86ad4b09cfb 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatAscent.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatAscent.H @@ -37,11 +37,12 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, const amrex::Geometry& full_BTD_snapshot = amrex::Geometry(), - bool isLastBTDFlush = false ) const override; + bool isLastBTDFlush = false) const override; FlushFormatAscent () = default; ~FlushFormatAscent() override = default; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp b/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp index 36f0ef05faa..5a405568aa7 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp @@ -18,6 +18,7 @@ FlushFormatAscent::WriteToFile ( const amrex::Vector& particle_diags, int nlev, const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose, const bool /*use_pinned_pc*/, bool isBTD, int /*snapshotID*/, int /*bufferID*/, int /*numBuffers*/, const amrex::Geometry& /*full_BTD_snapshot*/, @@ -32,7 +33,9 @@ FlushFormatAscent::WriteToFile ( "In-situ visualization is not currently supported for back-transformed diagnostics."); const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); - amrex::Print() << Utils::TextMsg::Info("Writing Ascent file " + filename); + if (verbose > 0) { + amrex::Print() << Utils::TextMsg::Info("Writing Ascent file " + filename); + } // wrap mesh data WARPX_PROFILE_VAR("FlushFormatAscent::WriteToFile::MultiLevelToBlueprint", prof_ascent_mesh_blueprint); @@ -67,7 +70,7 @@ FlushFormatAscent::WriteToFile ( #else amrex::ignore_unused(varnames, mf, geom, iteration, time, - particle_diags, nlev, file_min_digits, isBTD); + particle_diags, nlev, file_min_digits, verbose, isBTD); #endif // AMREX_USE_ASCENT amrex::ignore_unused(prefix, plot_raw_fields, plot_raw_fields_guards); } diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.H b/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.H index 6974bf731a6..c5f3b9148c1 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.H @@ -43,6 +43,7 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp index 425c13de6a4..3e542f9f871 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp @@ -127,6 +127,7 @@ FlushFormatCatalyst::WriteToFile ( const amrex::Vector& particle_diags, int nlev, const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int /*verbose*/, bool /*use_pinned_pc*/, bool isBTD, int /*snapshotID*/, int /*bufferID*/, int /*numBuffers*/, const amrex::Geometry& /*full_BTD_snapshot*/, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H index 5c26ac97f61..cb0a6c4b6c7 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H @@ -24,6 +24,7 @@ class FlushFormatCheckpoint final : public FlushFormatPlotfile std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp index 4d721dd6abe..788e040b0ee 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp @@ -39,6 +39,7 @@ FlushFormatCheckpoint::WriteToFile ( const std::string prefix, int file_min_digits, bool /*plot_raw_fields*/, bool /*plot_raw_fields_guards*/, + int verbose, const bool /*use_pinned_pc*/, bool /*isBTD*/, int /*snapshotID*/, int /*bufferID*/, int /*numBuffers*/, @@ -56,8 +57,10 @@ FlushFormatCheckpoint::WriteToFile ( const std::string& checkpointname = amrex::Concatenate(prefix, iteration[0], file_min_digits); - amrex::Print() << Utils::TextMsg::Info( - "Writing checkpoint " + checkpointname); + if (verbose > 0) { + amrex::Print() << Utils::TextMsg::Info( + "Writing checkpoint " + checkpointname); + } // const int nlevels = finestLevel()+1; amrex::PreBuildDirectorHierarchy(checkpointname, default_level_prefix, nlev, true); diff --git a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H index 141760ac2a3..5666d85bf3a 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H @@ -36,11 +36,12 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, const amrex::Geometry& full_BTD_snapshot = amrex::Geometry(), - bool isLastBTDFlush = false ) const override; + bool isLastBTDFlush = false) const override; ~FlushFormatOpenPMD () override = default; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp index e0c8c4ef2d6..aeb26656b46 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp @@ -123,6 +123,7 @@ FlushFormatOpenPMD::WriteToFile ( const amrex::Vector& particle_diags, int output_levels, const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose, const bool use_pinned_pc, bool isBTD, int snapshotID, int bufferID, int numBuffers, const amrex::Geometry& full_BTD_snapshot, @@ -130,16 +131,18 @@ FlushFormatOpenPMD::WriteToFile ( { WARPX_PROFILE("FlushFormatOpenPMD::WriteToFile()"); const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); - if (!isBTD) - { - amrex::Print() << Utils::TextMsg::Info("Writing openPMD file " + filename); - } else - { - amrex::Print() << Utils::TextMsg::Info("Writing buffer " + std::to_string(bufferID+1) + " of " + std::to_string(numBuffers) - + " to snapshot " + std::to_string(snapshotID) + " to openPMD BTD " + prefix); - if (isLastBTDFlush) + if (verbose > 0) { + if (!isBTD) + { + amrex::Print() << Utils::TextMsg::Info("Writing openPMD file " + filename); + } else { - amrex::Print() << Utils::TextMsg::Info("Finished writing snapshot " + std::to_string(snapshotID) + " in openPMD BTD " + prefix); + amrex::Print() << Utils::TextMsg::Info("Writing buffer " + std::to_string(bufferID+1) + " of " + std::to_string(numBuffers) + + " to snapshot " + std::to_string(snapshotID) + " to openPMD BTD " + prefix); + if (isLastBTDFlush) + { + amrex::Print() << Utils::TextMsg::Info("Finished writing snapshot " + std::to_string(snapshotID) + " in openPMD BTD " + prefix); + } } } diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H index c62056b8907..18648c07e69 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H @@ -31,6 +31,7 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp index 0f05496e4c0..879a5986434 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp @@ -66,6 +66,7 @@ FlushFormatPlotfile::WriteToFile ( const amrex::Vector& particle_diags, int nlev, const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose, const bool /*use_pinned_pc*/, bool isBTD, int snapshotID, int bufferID, int numBuffers, const amrex::Geometry& /*full_BTD_snapshot*/, @@ -74,17 +75,19 @@ FlushFormatPlotfile::WriteToFile ( WARPX_PROFILE("FlushFormatPlotfile::WriteToFile()"); auto & warpx = WarpX::GetInstance(); const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); - if (!isBTD) - { - amrex::Print() << Utils::TextMsg::Info("Writing plotfile " + filename); - } else - { - amrex::Print() << Utils::TextMsg::Info("Writing buffer " + std::to_string(bufferID+1) + " of " + std::to_string(numBuffers) - + " to snapshot " + std::to_string(snapshotID) + " in plotfile BTD " + prefix ); - if (isLastBTDFlush) - { - amrex::Print() << Utils::TextMsg::Info("Finished writing snapshot " + std::to_string(snapshotID) + " in plotfile BTD " + filename); - } + if (verbose > 0) { + if (!isBTD) + { + amrex::Print() << Utils::TextMsg::Info("Writing plotfile " + filename); + } else + { + amrex::Print() << Utils::TextMsg::Info("Writing buffer " + std::to_string(bufferID+1) + " of " + std::to_string(numBuffers) + + " to snapshot " + std::to_string(snapshotID) + " in plotfile BTD " + prefix ); + if (isLastBTDFlush) + { + amrex::Print() << Utils::TextMsg::Info("Finished writing snapshot " + std::to_string(snapshotID) + " in plotfile BTD " + filename); + } + } } Vector rfs; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatSensei.H b/Source/Diagnostics/FlushFormats/FlushFormatSensei.H index 45ea40077e4..87ed00e539e 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatSensei.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatSensei.H @@ -57,6 +57,7 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp b/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp index 468ed81ce18..b96c6d76f91 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp @@ -51,7 +51,7 @@ FlushFormatSensei::WriteToFile ( const amrex::Vector& particle_diags, int nlev, const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, - const bool use_pinned_pc, + int verbose, const bool use_pinned_pc, bool isBTD, int /*snapshotID*/, int /*bufferID*/, int /*numBuffers*/, const amrex::Geometry& /*full_BTD_snapshot*/, bool /*isLastBTDFlush*/) const { @@ -63,7 +63,7 @@ FlushFormatSensei::WriteToFile ( #ifndef AMREX_USE_SENSEI_INSITU amrex::ignore_unused(varnames, mf, iteration, time, particle_diags, - isBTD); + verbose, isBTD); #else WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !isBTD, @@ -71,7 +71,9 @@ FlushFormatSensei::WriteToFile ( WARPX_PROFILE("FlushFormatSensei::WriteToFile()"); const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); - amrex::Print() << Utils::TextMsg::Info("Writing Sensei file " + filename); + if (verbose > 0) { + amrex::Print() << Utils::TextMsg::Info("Writing Sensei file " + filename); + } amrex::Vector *mf_ptr = const_cast*>(&mf); diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index 7a8f376cd21..946178fd1b5 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -261,7 +261,8 @@ FullDiagnostics::Flush ( int i_buffer, bool /* force_flush */ ) m_varnames, m_sum_mf_output.at(i_buffer), m_geom_output.at(i_buffer), warpx.getistep(), warpx.gett_new(0), m_output_species.at(i_buffer), nlev_output, m_file_prefix, - m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards); + m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards, + m_verbose); // Reset the values in the dynamic start time-averaged diagnostics after flush if (m_time_average_mode == TimeAverageType::Dynamic) { @@ -281,7 +282,8 @@ FullDiagnostics::Flush ( int i_buffer, bool /* force_flush */ ) m_varnames, m_mf_output.at(i_buffer), m_geom_output.at(i_buffer), warpx.getistep(), warpx.gett_new(0), m_output_species.at(i_buffer), nlev_output, m_file_prefix, - m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards); + m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards, + m_verbose); } FlushRaw(); @@ -340,7 +342,7 @@ FullDiagnostics::DoComputeAndPack (int step, bool force_flush) } } // Print information on when time-averaging is active - if (in_averaging_period) { + if ((m_verbose > 1) && in_averaging_period) { if (step == m_average_start_step) { amrex::Print() << Utils::TextMsg::Info( "Begin time averaging for " + m_diag_name + " and output at step " From f68415ae71dedbdd0758e5afbc66f355ae30eddb Mon Sep 17 00:00:00 2001 From: Olga Shapoval <30510597+oshapoval@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:57:53 -0800 Subject: [PATCH 4/6] Added abort message if 1D PSATD is used (#5500) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added an abort message for cases where 1D PSATD is used, as it’s not implemented for this geometry. --- Source/WarpX.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 176a6f63ddf..965235e1078 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -1086,7 +1086,10 @@ WarpX::ReadParameters () WARPX_ALWAYS_ASSERT_WITH_MESSAGE( electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD, "algo.maxwell_solver = psatd is not supported because WarpX was built without spectral solvers"); #endif - +#if defined(WARPX_DIM_1D_Z) && defined(WARPX_USE_FFT) + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD, + "algo.maxwell_solver = psatd is not available for 1D geometry"); +#endif #ifdef WARPX_DIM_RZ WARPX_ALWAYS_ASSERT_WITH_MESSAGE(Geom(0).isPeriodic(0) == 0, "The problem must not be periodic in the radial direction"); From 5bc47901c512406493ed9eaf56f9be0a81feec62 Mon Sep 17 00:00:00 2001 From: Olga Shapoval <30510597+oshapoval@users.noreply.github.com> Date: Thu, 12 Dec 2024 13:51:21 -0800 Subject: [PATCH 5/6] Added AnalyticFluxDistribution class (#5422) Added an AnalyticFluxDistribution class with a parsed `flux_expression`. Depends on https://github.com/picmi-standard/picmi/pull/121 --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: David Grote --- Docs/requirements.txt | 2 +- Docs/source/usage/python.rst | 4 + Python/pywarpx/picmi.py | 93 +++++++++++++++---- Python/setup.py | 2 +- .../karolina-it4i/install_dependencies.sh | 2 +- requirements.txt | 2 +- 6 files changed, 84 insertions(+), 21 deletions(-) diff --git a/Docs/requirements.txt b/Docs/requirements.txt index 7581638551e..14fafe406fb 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -13,7 +13,7 @@ openpmd-viewer # for checksumAPI # PICMI API docs # note: keep in sync with version in ../requirements.txt -picmistandard==0.31.0 +picmistandard==0.33.0 # for development against an unreleased PICMI version, use: # picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python diff --git a/Docs/source/usage/python.rst b/Docs/source/usage/python.rst index 8b40684feb9..1af884b40e7 100644 --- a/Docs/source/usage/python.rst +++ b/Docs/source/usage/python.rst @@ -146,6 +146,10 @@ Particle distributions can be used for to initialize particles in a particle spe .. autoclass:: pywarpx.picmi.AnalyticDistribution +.. autoclass:: pywarpx.picmi.UniformFluxDistribution + +.. autoclass:: pywarpx.picmi.AnalyticFluxDistribution + .. autoclass:: pywarpx.picmi.ParticleListDistribution Particle layouts determine how to microscopically place macro particles in a grid cell. diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index de3b26955b7..d464c44726f 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -688,14 +688,34 @@ def setup_parse_momentum_functions( ) -class UniformFluxDistribution( - picmistandard.PICMI_UniformFluxDistribution, DensityDistributionBase +class UniformDistribution( + picmistandard.PICMI_UniformDistribution, DensityDistributionBase ): + def distribution_initialize_inputs( + self, species_number, layout, species, density_scale, source_name + ): + self.set_mangle_dict() + self.set_species_attributes(species, layout, source_name) + + # --- Only constant density is supported by this class + species.add_new_group_attr(source_name, "profile", "constant") + species.add_new_group_attr(source_name, "density", self.density) + if density_scale is not None: + species.add_new_group_attr(source_name, "density", density_scale) + + +class FluxDistributionBase(object): + """This is a base class for both uniform and analytic flux distributions.""" + def init(self, kw): self.inject_from_embedded_boundary = kw.pop( "warpx_inject_from_embedded_boundary", False ) + def initialize_flux_profile_func(self, species, density_scale, source_name): + """Initialize the flux profile and flux function.""" + pass + def distribution_initialize_inputs( self, species_number, layout, species, density_scale, source_name ): @@ -703,10 +723,7 @@ def distribution_initialize_inputs( self.set_mangle_dict() self.set_species_attributes(species, layout, source_name) - species.add_new_group_attr(source_name, "flux_profile", "constant") - species.add_new_group_attr(source_name, "flux", self.flux) - if density_scale is not None: - species.add_new_group_attr(source_name, "flux", density_scale) + self.initialize_flux_profile_func(species, density_scale, source_name) if not self.inject_from_embedded_boundary: species.add_new_group_attr( @@ -737,20 +754,62 @@ def distribution_initialize_inputs( ) -class UniformDistribution( - picmistandard.PICMI_UniformDistribution, DensityDistributionBase +class AnalyticFluxDistribution( + picmistandard.PICMI_AnalyticFluxDistribution, + FluxDistributionBase, + DensityDistributionBase, ): - def distribution_initialize_inputs( - self, species_number, layout, species, density_scale, source_name - ): - self.set_mangle_dict() - self.set_species_attributes(species, layout, source_name) + """ + Parameters + ---------- - # --- Only constant density is supported by this class - species.add_new_group_attr(source_name, "profile", "constant") - species.add_new_group_attr(source_name, "density", self.density) + warpx_inject_from_embedded_boundary: bool + When true, the flux is injected from the embedded boundaries instead + of a plane. + """ + + def init(self, kw): + FluxDistributionBase.init(self, kw) + + def initialize_flux_profile_func(self, species, density_scale, source_name): + species.add_new_group_attr(source_name, "flux_profile", "parse_flux_function") if density_scale is not None: - species.add_new_group_attr(source_name, "density", density_scale) + species.add_new_group_attr(source_name, "flux", density_scale) + expression = pywarpx.my_constants.mangle_expression(self.flux, self.mangle_dict) + if density_scale is None: + species.add_new_group_attr( + source_name, "flux_function(x,y,z,t)", expression + ) + else: + species.add_new_group_attr( + source_name, + "flux_function(x,y,z,t)", + "{}*({})".format(density_scale, expression), + ) + + +class UniformFluxDistribution( + picmistandard.PICMI_UniformFluxDistribution, + FluxDistributionBase, + DensityDistributionBase, +): + """ + Parameters + ---------- + + warpx_inject_from_embedded_boundary: bool + When true, the flux is injected from the embedded boundaries instead + of a plane. + """ + + def init(self, kw): + FluxDistributionBase.init(self, kw) + + def initialize_flux_profile_func(self, species, density_scale, source_name): + species.add_new_group_attr(source_name, "flux_profile", "constant") + species.add_new_group_attr(source_name, "flux", self.flux) + if density_scale is not None: + species.add_new_group_attr(source_name, "flux", density_scale) class AnalyticDistribution( diff --git a/Python/setup.py b/Python/setup.py index 8080b62acf4..fa38e14e7ce 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -70,7 +70,7 @@ package_dir={"pywarpx": "pywarpx"}, description="""Wrapper of WarpX""", package_data=package_data, - install_requires=["numpy", "picmistandard==0.31.0", "periodictable"], + install_requires=["numpy", "picmistandard==0.33.0", "periodictable"], python_requires=">=3.8", zip_safe=False, ) diff --git a/Tools/machines/karolina-it4i/install_dependencies.sh b/Tools/machines/karolina-it4i/install_dependencies.sh index 9cc4f1ee144..33d8462b55f 100755 --- a/Tools/machines/karolina-it4i/install_dependencies.sh +++ b/Tools/machines/karolina-it4i/install_dependencies.sh @@ -53,7 +53,7 @@ python -m pip install --user --upgrade matplotlib #python -m pip install --user --upgrade yt # install or update WarpX dependencies -python -m pip install --user --upgrade picmistandard==0.31.0 +python -m pip install --user --upgrade picmistandard==0.33.0 python -m pip install --user --upgrade lasy # optional: for optimas (based on libEnsemble & ax->botorch->gpytorch->pytorch) diff --git a/requirements.txt b/requirements.txt index 2c8b749abe0..e44273328de 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ periodictable~=1.5 # PICMI # note: don't forget to update the version in Docs/requirements.txt, too -picmistandard==0.31.0 +picmistandard==0.33.0 # for development against an unreleased PICMI version, use: #picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python From 7299895094cba9bd2170f7b52f985526255212ec Mon Sep 17 00:00:00 2001 From: David Grote Date: Fri, 13 Dec 2024 02:57:03 -0800 Subject: [PATCH 6/6] Replace quartz with dane (#5507) The Quartz system at LLNL was removed and replaced by Dane. This PR updates the scripts and install instructions for the new machine. --- Docs/source/install/hpc.rst | 2 +- .../install/hpc/{quartz.rst => dane.rst} | 78 +++++++++---------- .../quartz.sbatch => dane-llnl/dane.sbatch} | 11 ++- .../dane_warpx.profile.example} | 30 +++---- .../install_dependencies.sh | 46 +++++------ 5 files changed, 82 insertions(+), 85 deletions(-) rename Docs/source/install/hpc/{quartz.rst => dane.rst} (52%) rename Tools/machines/{quartz-llnl/quartz.sbatch => dane-llnl/dane.sbatch} (78%) rename Tools/machines/{quartz-llnl/quartz_warpx.profile.example => dane-llnl/dane_warpx.profile.example} (62%) rename Tools/machines/{quartz-llnl => dane-llnl}/install_dependencies.sh (58%) diff --git a/Docs/source/install/hpc.rst b/Docs/source/install/hpc.rst index 35884050a59..61e60359e59 100644 --- a/Docs/source/install/hpc.rst +++ b/Docs/source/install/hpc.rst @@ -50,7 +50,7 @@ This section documents quick-start guides for a selection of supercomputers that hpc/perlmutter hpc/pitzer hpc/polaris - hpc/quartz + hpc/dane hpc/summit hpc/taurus hpc/tioga diff --git a/Docs/source/install/hpc/quartz.rst b/Docs/source/install/hpc/dane.rst similarity index 52% rename from Docs/source/install/hpc/quartz.rst rename to Docs/source/install/hpc/dane.rst index a49327e8613..e9af32130f5 100644 --- a/Docs/source/install/hpc/quartz.rst +++ b/Docs/source/install/hpc/dane.rst @@ -1,9 +1,9 @@ -.. _building-quartz: +.. _building-dane: -Quartz (LLNL) +Dane (LLNL) ============= -The `Quartz Intel CPU cluster `_ is located at LLNL. +The `Dane Intel CPU cluster `_ is located at LLNL. Introduction @@ -11,9 +11,7 @@ Introduction If you are new to this system, **please see the following resources**: -* `LLNL user account `__ (login required) -* `Quartz user guide `_ -* Batch system: `Slurm `_ +* `LLNL user account `__ (`documentation `__, login required) * `Production directories `_: @@ -21,7 +19,7 @@ If you are new to this system, **please see the following resources**: * Note that the ``$HOME`` directory and the ``/usr/workspace/$(whoami)`` space are NFS mounted and *not* suitable for production quality data generation. -.. _building-quartz-preparation: +.. _building-dane-preparation: Preparation ----------- @@ -32,23 +30,23 @@ Use the following commands to download the WarpX source code: git clone https://github.com/ECP-WarpX/WarpX.git $HOME/src/warpx -We use system software modules, add environment hints and further dependencies via the file ``$HOME/quartz_warpx.profile``. +We use system software modules, add environment hints and further dependencies via the file ``$HOME/dane_warpx.profile``. Create it now: .. code-block:: bash - cp $HOME/src/warpx/Tools/machines/quartz-llnl/quartz_warpx.profile.example $HOME/quartz_warpx.profile + cp $HOME/src/warpx/Tools/machines/dane-llnl/dane_warpx.profile.example $HOME/dane_warpx.profile .. dropdown:: Script Details :color: light :icon: info :animate: fade-in-slide-down - .. literalinclude:: ../../../../Tools/machines/quartz-llnl/quartz_warpx.profile.example + .. literalinclude:: ../../../../Tools/machines/dane-llnl/dane_warpx.profile.example :language: bash Edit the 2nd line of this script, which sets the ``export proj=""`` variable. -For example, if you are member of the project ``tps``, then run ``vi $HOME/quartz_warpx.profile``. +For example, if you are member of the project ``tps``, then run ``vi $HOME/dane_warpx.profile``. Enter the edit mode by typing ``i`` and edit line 2 to read: .. code-block:: bash @@ -59,29 +57,29 @@ Exit the ``vi`` editor with ``Esc`` and then type ``:wq`` (write & quit). .. important:: - Now, and as the first step on future logins to Quartz, activate these environment settings: + Now, and as the first step on future logins to Dane, activate these environment settings: .. code-block:: bash - source $HOME/quartz_warpx.profile + source $HOME/dane_warpx.profile -Finally, since Quartz does not yet provide software modules for some of our dependencies, install them once: +Finally, since Dane does not yet provide software modules for some of our dependencies, install them once: .. code-block:: bash - bash $HOME/src/warpx/Tools/machines/quartz-llnl/install_dependencies.sh - source /usr/workspace/${USER}/quartz/venvs/warpx-quartz/bin/activate + bash $HOME/src/warpx/Tools/machines/dane-llnl/install_dependencies.sh + source /usr/workspace/${USER}/dane/venvs/warpx-dane/bin/activate .. dropdown:: Script Details :color: light :icon: info :animate: fade-in-slide-down - .. literalinclude:: ../../../../Tools/machines/quartz-llnl/install_dependencies.sh + .. literalinclude:: ../../../../Tools/machines/dane-llnl/install_dependencies.sh :language: bash -.. _building-quartz-compilation: +.. _building-dane-compilation: Compilation ----------- @@ -91,27 +89,27 @@ Use the following :ref:`cmake commands ` to compile the applicat .. code-block:: bash cd $HOME/src/warpx - rm -rf build_quartz + rm -rf build_dane - cmake -S . -B build_quartz -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" - cmake --build build_quartz -j 6 + cmake -S . -B build_dane -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake --build build_dane -j 6 -The WarpX application executables are now in ``$HOME/src/warpx/build_quartz/bin/``. +The WarpX application executables are now in ``$HOME/src/warpx/build_dane/bin/``. Additionally, the following commands will install WarpX as a Python module: .. code-block:: bash - rm -rf build_quartz_py + rm -rf build_dane_py - cmake -S . -B build_quartz_py -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" - cmake --build build_quartz_py -j 6 --target pip_install + cmake -S . -B build_dane_py -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake --build build_dane_py -j 6 --target pip_install -Now, you can :ref:`submit Quartz compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). -Or, you can use the WarpX executables to submit Quartz jobs (:ref:`example inputs `). -For executables, you can reference their location in your :ref:`job script ` or copy them to a location in ``$PROJWORK/$proj/``. +Now, you can :ref:`submit Dane compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). +Or, you can use the WarpX executables to submit Dane jobs (:ref:`example inputs `). +For executables, you can reference their location in your :ref:`job script ` or copy them to a location in ``$PROJWORK/$proj/``. -.. _building-quartz-update: +.. _building-dane-update: Update WarpX & Dependencies --------------------------- @@ -135,34 +133,34 @@ If you already installed WarpX in the past and want to update it, start by getti And, if needed, -- :ref:`update the quartz_warpx.profile file `, +- :ref:`update the dane_warpx.profile file `, - log out and into the system, activate the now updated environment profile as usual, -- :ref:`execute the dependency install scripts `. +- :ref:`execute the dependency install scripts `. -As a last step, clean the build directory ``rm -rf $HOME/src/warpx/build_quartz`` and rebuild WarpX. +As a last step, clean the build directory ``rm -rf $HOME/src/warpx/build_dane`` and rebuild WarpX. -.. _running-cpp-quartz: +.. _running-cpp-dane: Running ------- -.. _running-cpp-quartz-CPUs: +.. _running-cpp-dane-CPUs: -Intel Xeon E5-2695 v4 CPUs +Intel Sapphire Rapids CPUs ^^^^^^^^^^^^^^^^^^^^^^^^^^ -The batch script below can be used to run a WarpX simulation on 2 nodes on the supercomputer Quartz at LLNL. +The batch script below can be used to run a WarpX simulation on 2 nodes on the supercomputer Dane at LLNL. Replace descriptions between chevrons ``<>`` by relevant values, for instance ```` could be ``plasma_mirror_inputs``. -.. literalinclude:: ../../../../Tools/machines/quartz-llnl/quartz.sbatch +.. literalinclude:: ../../../../Tools/machines/dane-llnl/dane.sbatch :language: bash - :caption: You can copy this file from ``Tools/machines/quartz-llnl/quartz.sbatch``. + :caption: You can copy this file from ``Tools/machines/dane-llnl/dane.sbatch``. -To run a simulation, copy the lines above to a file ``quartz.sbatch`` and run +To run a simulation, copy the lines above to a file ``dane.sbatch`` and run .. code-block:: bash - sbatch quartz.sbatch + sbatch dane.sbatch to submit the job. diff --git a/Tools/machines/quartz-llnl/quartz.sbatch b/Tools/machines/dane-llnl/dane.sbatch similarity index 78% rename from Tools/machines/quartz-llnl/quartz.sbatch rename to Tools/machines/dane-llnl/dane.sbatch index 4c1a82ff8e9..b2a114b3f1b 100644 --- a/Tools/machines/quartz-llnl/quartz.sbatch +++ b/Tools/machines/dane-llnl/dane.sbatch @@ -15,15 +15,14 @@ # one MPI rank per half-socket (see below) #SBATCH --tasks-per-node=2 # request all logical (virtual) cores per half-socket -#SBATCH --cpus-per-task=18 +#SBATCH --cpus-per-task=112 -# each Quartz node has 1 socket of Intel Xeon E5-2695 v4 -# each Xeon CPU is divided into 2 bus rings that each have direct L3 access +# each Dane node has 2 sockets of Intel Sapphire Rapids with 56 cores each export WARPX_NMPI_PER_NODE=2 -# each MPI rank per half-socket has 9 physical cores -# or 18 logical (virtual) cores +# each MPI rank per half-socket has 56 physical cores +# or 112 logical (virtual) cores # over-subscribing each physical core with 2x # hyperthreading led to a slight (3.5%) speedup on Cori's Intel Xeon E5-2698 v3, # so we do the same here @@ -33,7 +32,7 @@ export WARPX_NMPI_PER_NODE=2 # for N>9, also equally over close-by logical cores export OMP_PROC_BIND=spread export OMP_PLACES=threads -export OMP_NUM_THREADS=18 +export OMP_NUM_THREADS=112 EXE="" # e.g. ./warpx diff --git a/Tools/machines/quartz-llnl/quartz_warpx.profile.example b/Tools/machines/dane-llnl/dane_warpx.profile.example similarity index 62% rename from Tools/machines/quartz-llnl/quartz_warpx.profile.example rename to Tools/machines/dane-llnl/dane_warpx.profile.example index f296a0738ff..1d272979bd1 100644 --- a/Tools/machines/quartz-llnl/quartz_warpx.profile.example +++ b/Tools/machines/dane-llnl/dane_warpx.profile.example @@ -2,7 +2,7 @@ #export proj="" # edit this and comment in # required dependencies -module load cmake/3.23.1 +module load cmake/3.26.3 module load clang/14.0.6-magic module load mvapich2/2.3.7 @@ -15,38 +15,38 @@ module load boost/1.80.0 # optional: for openPMD support module load hdf5-parallel/1.14.0 -SW_DIR="/usr/workspace/${USER}/quartz" -export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:$CMAKE_PREFIX_PATH +SW_DIR="/usr/workspace/${USER}/dane" +export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.6:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:$CMAKE_PREFIX_PATH export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} # optional: for PSATD in RZ geometry support -export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH -export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.10.26:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.10.26:$CMAKE_PREFIX_PATH +export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.10.26/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.10.26/lib64:$LD_LIBRARY_PATH # optional: for Python bindings -module load python/3.9.12 +module load python/3.12.2 -if [ -d "${SW_DIR}/venvs/warpx-quartz" ] +if [ -d "${SW_DIR}/venvs/warpx-dane" ] then - source ${SW_DIR}/venvs/warpx-quartz/bin/activate + source ${SW_DIR}/venvs/warpx-dane/bin/activate fi # optional: an alias to request an interactive node for two hours -alias getNode="srun --time=0:30:00 --nodes=1 --ntasks-per-node=2 --cpus-per-task=18 -p pdebug --pty bash" +alias getNode="srun --time=0:30:00 --nodes=1 --ntasks-per-node=2 --cpus-per-task=56 -p pdebug --pty bash" # an alias to run a command on a batch node for up to 30min # usage: runNode -alias runNode="srun --time=0:30:00 --nodes=1 --ntasks-per-node=2 --cpus-per-task=18 -p pdebug" +alias runNode="srun --time=0:30:00 --nodes=1 --ntasks-per-node=2 --cpus-per-task=56 -p pdebug" # fix system defaults: do not escape $ with a \ on tab completion shopt -s direxpand -# optimize CPU microarchitecture for Intel Xeon E5-2695 v4 +# optimize CPU microarchitecture for Intel Sapphire Rapids # note: the cc/CC/ftn wrappers below add those -export CXXFLAGS="-march=broadwell" -export CFLAGS="-march=broadwell" +export CXXFLAGS="-march=sapphirerapids" +export CFLAGS="-march=sapphirerapids" # compiler environment hints export CC=$(which clang) diff --git a/Tools/machines/quartz-llnl/install_dependencies.sh b/Tools/machines/dane-llnl/install_dependencies.sh similarity index 58% rename from Tools/machines/quartz-llnl/install_dependencies.sh rename to Tools/machines/dane-llnl/install_dependencies.sh index cfb01769384..0415d7fa8cc 100755 --- a/Tools/machines/quartz-llnl/install_dependencies.sh +++ b/Tools/machines/dane-llnl/install_dependencies.sh @@ -1,10 +1,10 @@ #!/bin/bash # -# Copyright 2023 The WarpX Community +# Copyright 2024 The WarpX Community # # This file is part of WarpX. # -# Author: Axel Huebl +# Author: Axel Huebl, David Grote # License: BSD-3-Clause-LBNL # Exit on first error encountered ############################################# @@ -14,13 +14,13 @@ set -eu -o pipefail # Check: ###################################################################### # -# Was quartz_warpx.profile sourced and configured correctly? -if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your quartz_warpx.profile file! Please edit its line 2 to continue!"; exit 1; fi +# Was dane_warpx.profile sourced and configured correctly? +if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your dane_warpx.profile file! Please edit its line 2 to continue!"; exit 1; fi # Remove old dependencies ##################################################### # -SW_DIR="/usr/workspace/${USER}/quartz" +SW_DIR="/usr/workspace/${USER}/dane" rm -rf ${SW_DIR} mkdir -p ${SW_DIR} @@ -41,13 +41,13 @@ if [ -d ${HOME}/src/c-blosc ] then cd ${HOME}/src/c-blosc git fetch --prune - git checkout v1.21.1 + git checkout v1.21.6 cd - else - git clone -b v1.21.1 https://github.com/Blosc/c-blosc.git ${HOME}/src/c-blosc + git clone -b v1.21.6 https://github.com/Blosc/c-blosc.git ${HOME}/src/c-blosc fi -cmake -S ${HOME}/src/c-blosc -B ${build_dir}/c-blosc-quartz-build -DBUILD_TESTS=OFF -DBUILD_BENCHMARKS=OFF -DDEACTIVATE_AVX2=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/c-blosc-1.21.1 -cmake --build ${build_dir}/c-blosc-quartz-build --target install --parallel 6 +cmake -S ${HOME}/src/c-blosc -B ${build_dir}/c-blosc-dane-build -DBUILD_TESTS=OFF -DBUILD_BENCHMARKS=OFF -DDEACTIVATE_AVX2=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/c-blosc-1.21.6 +cmake --build ${build_dir}/c-blosc-dane-build --target install --parallel 6 # ADIOS2 if [ -d ${HOME}/src/adios2 ] @@ -59,44 +59,44 @@ then else git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git ${HOME}/src/adios2 fi -cmake -S ${HOME}/src/adios2 -B ${build_dir}/adios2-quartz-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_SST=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 -cmake --build ${build_dir}/adios2-quartz-build --target install -j 6 +cmake -S ${HOME}/src/adios2 -B ${build_dir}/adios2-dane-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_SST=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake --build ${build_dir}/adios2-dane-build --target install -j 6 # BLAS++ (for PSATD+RZ) if [ -d ${HOME}/src/blaspp ] then cd ${HOME}/src/blaspp git fetch --prune - git checkout v2024.05.31 + git checkout v2024.10.26 cd - else - git clone -b v2024.05.31 https://github.com/icl-utk-edu/blaspp.git ${HOME}/src/blaspp + git clone -b v2024.10.26 https://github.com/icl-utk-edu/blaspp.git ${HOME}/src/blaspp fi -cmake -S ${HOME}/src/blaspp -B ${build_dir}/blaspp-quartz-build -Duse_openmp=ON -Duse_cmake_find_blas=ON -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=${SW_DIR}/blaspp-2024.05.31 -cmake --build ${build_dir}/blaspp-quartz-build --target install --parallel 6 +cmake -S ${HOME}/src/blaspp -B ${build_dir}/blaspp-dane-build -Duse_openmp=ON -Duse_cmake_find_blas=ON -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=${SW_DIR}/blaspp-2024.10.26 +cmake --build ${build_dir}/blaspp-dane-build --target install --parallel 6 # LAPACK++ (for PSATD+RZ) if [ -d ${HOME}/src/lapackpp ] then cd ${HOME}/src/lapackpp git fetch --prune - git checkout v2024.05.31 + git checkout v2024.10.26 cd - else - git clone -b v2024.05.31 https://github.com/icl-utk-edu/lapackpp.git ${HOME}/src/lapackpp + git clone -b v2024.10.26 https://github.com/icl-utk-edu/lapackpp.git ${HOME}/src/lapackpp fi -CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S ${HOME}/src/lapackpp -B ${build_dir}/lapackpp-quartz-build -Duse_cmake_find_lapack=ON -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=${SW_DIR}/lapackpp-2024.05.31 -cmake --build ${build_dir}/lapackpp-quartz-build --target install --parallel 6 +CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S ${HOME}/src/lapackpp -B ${build_dir}/lapackpp-dane-build -Duse_cmake_find_lapack=ON -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=${SW_DIR}/lapackpp-2024.10.26 +cmake --build ${build_dir}/lapackpp-dane-build --target install --parallel 6 # Python ###################################################################### # python3 -m pip install --upgrade --user virtualenv -rm -rf ${SW_DIR}/venvs/warpx-quartz -python3 -m venv ${SW_DIR}/venvs/warpx-quartz -source ${SW_DIR}/venvs/warpx-quartz/bin/activate +rm -rf ${SW_DIR}/venvs/warpx-dane +python3 -m venv ${SW_DIR}/venvs/warpx-dane +source ${SW_DIR}/venvs/warpx-dane/bin/activate python3 -m pip install --upgrade pip -python3 -m pip cache purge +#python3 -m pip cache purge python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel