diff --git a/.github/workflows/source/ci_matrix.py b/.github/workflows/source/ci_matrix.py index 79f4d878c5d..ff9ea295013 100644 --- a/.github/workflows/source/ci_matrix.py +++ b/.github/workflows/source/ci_matrix.py @@ -1,14 +1,18 @@ #!/usr/bin/env python # Concatenation of tests in each of the 6 elements in CI matrix -f = open('./ci_matrix_elements.txt') ; matrix_elements = f.readlines() ; f.close() +f = open("./ci_matrix_elements.txt") +matrix_elements = f.readlines() +f.close() # All tests read by prepare_file_ci.py -f = open('./ci_all_tests.txt') ; all_tests = f.readlines() ; f.close() +f = open("./ci_all_tests.txt") +all_tests = f.readlines() +f.close() # Now let's make sure these two are equal # Remove these elements from both lists, as they are are not test names -elements_to_remove = ['[main]\n', '[AMReX]\n', '[source]\n', '[extra-PICSAR]\n'] +elements_to_remove = ["[main]\n", "[AMReX]\n", "[source]\n", "[extra-PICSAR]\n"] for element in elements_to_remove: for x in range(matrix_elements.count(element)): matrix_elements.remove(element) @@ -23,4 +27,4 @@ print("Tests in initial list but not in the matrix:") print(list(set(all_tests) - set(matrix_elements))) -assert( matrix_elements == all_tests ) +assert matrix_elements == all_tests diff --git a/.github/workflows/source/makeMakefileForClangTidy.py b/.github/workflows/source/makeMakefileForClangTidy.py index 07809187dbd..13460b9e548 100755 --- a/.github/workflows/source/makeMakefileForClangTidy.py +++ b/.github/workflows/source/makeMakefileForClangTidy.py @@ -14,40 +14,49 @@ def makeMakefileForClangTidy(argv): parser = argparse.ArgumentParser() - parser.add_argument("--input", - help="Ccache log file", - default="ccache.log.txt") - parser.add_argument("--identifier", - help="Unique identifier for finding compilation line in the log file", - default="WarpX/Source") + parser.add_argument("--input", help="Ccache log file", default="ccache.log.txt") + parser.add_argument( + "--identifier", + help="Unique identifier for finding compilation line in the log file", + default="WarpX/Source", + ) # We assume WarpX/Source can be used as an identifier to distinguish # WarpX code from amrex, openMPD, and cmake's temporary files like # build/CMakeFiles/CMakeScratch/TryCompile-hw3x4m/test_mpi.cpp - parser.add_argument("--output", - help="Make file for clang-tidy", - default="clang-tidy-ccache-misses.mak") + parser.add_argument( + "--output", + help="Make file for clang-tidy", + default="clang-tidy-ccache-misses.mak", + ) args = parser.parse_args() fin = open(args.input, "r") fout = open(args.output, "w") fout.write("CLANG_TIDY ?= clang-tidy\n") - fout.write("override CLANG_TIDY_ARGS += --extra-arg=-Wno-unknown-warning-option --extra-arg-before=--driver-mode=g++\n") + fout.write( + "override CLANG_TIDY_ARGS += --extra-arg=-Wno-unknown-warning-option --extra-arg-before=--driver-mode=g++\n" + ) fout.write("\n") fout.write(".SECONDEXPANSION:\n") fout.write("clang-tidy: $$(all_targets)\n") fout.write("\t@echo SUCCESS\n\n") - exe_re = re.compile(r" Executing .*? (-.*{}.*) -c .* -o .* (\S*)".format(args.identifier)) + exe_re = re.compile( + r" Executing .*? (-.*{}.*) -c .* -o .* (\S*)".format(args.identifier) + ) count = 0 for line in fin.readlines(): ret_exe_re = exe_re.search(line) - if (ret_exe_re): + if ret_exe_re: fout.write("target_{}: {}\n".format(count, ret_exe_re.group(2))) - fout.write("\t$(CLANG_TIDY) $(CLANG_TIDY_ARGS) $< -- {}\n".format - (ret_exe_re.group(1))) + fout.write( + "\t$(CLANG_TIDY) $(CLANG_TIDY_ARGS) $< -- {}\n".format( + ret_exe_re.group(1) + ) + ) fout.write("\ttouch target_{}\n\n".format(count)) count = count + 1 @@ -61,5 +70,6 @@ def makeMakefileForClangTidy(argv): fout.close() fin.close() + if __name__ == "__main__": makeMakefileForClangTidy(sys.argv) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cef24aed2cc..a8c3bf5f77d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,44 +66,16 @@ repos: # C++ formatting # clang-format -# Autoremoves unused Python imports -- repo: https://github.com/hadialqattan/pycln - rev: v2.4.0 +# Python: Ruff linter & formatter +# https://docs.astral.sh/ruff/ +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.5.7 hooks: - - id: pycln - name: pycln (python) - -# Sorts Python imports according to PEP8 -# https://www.python.org/dev/peps/pep-0008/#imports -- repo: https://github.com/pycqa/isort - rev: 5.13.2 - hooks: - - id: isort - name: isort (python) - args: ['--profile black'] - -# Python: Flake8 (checks only, does this support auto-fixes?) -#- repo: https://github.com/PyCQA/flake8 -# rev: 4.0.1 -# hooks: -# - id: flake8 -# additional_dependencies: &flake8_dependencies -# - flake8-bugbear -# - pep8-naming -# exclude: ^(docs/.*|tools/.*)$ -# Alternatively: use autopep8? - -# Python Formatting -#- repo: https://github.com/psf/black -# rev: 21.10b0 # Keep in sync with blacken-docs -# hooks: -# - id: black -#- repo: https://github.com/asottile/blacken-docs -# rev: v1.11.0 -# hooks: -# - id: blacken-docs -# additional_dependencies: -# - black==21.10b0 # keep in sync with black hook + # Run the linter + - id: ruff + args: [--fix, --exit-non-zero-on-fix] + # Run the formatter + - id: ruff-format # Jupyter Notebooks: clean up all cell outputs - repo: https://github.com/roy-ht/pre-commit-jupyter diff --git a/Docs/source/conf.py b/Docs/source/conf.py index a053224de02..9dfda6346f9 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -34,7 +34,9 @@ import sphinx_rtd_theme # noqa from pybtex.style.formatting.unsrt import Style as UnsrtStyle -sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../Regression/Checksum')) +module_path = os.path.dirname(os.path.abspath(__file__)) +checksum_path = os.path.join(module_path, "../../Regression/Checksum") +sys.path.insert(0, checksum_path) # -- General configuration ------------------------------------------------ @@ -46,21 +48,22 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.mathjax', - 'sphinx.ext.napoleon', - 'sphinx.ext.viewcode', - 'sphinx_copybutton', - 'sphinx_design', - 'breathe', - 'sphinxcontrib.bibtex' - ] + "sphinx.ext.autodoc", + "sphinx.ext.mathjax", + "sphinx.ext.napoleon", + "sphinx.ext.viewcode", + "sphinx_copybutton", + "sphinx_design", + "breathe", + "sphinxcontrib.bibtex", +] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # Relative path to bibliography file, bibliography style -bibtex_bibfiles = ['latex_theory/allbibs.bib', 'refs.bib'] +bibtex_bibfiles = ["latex_theory/allbibs.bib", "refs.bib"] + # An brief introduction to custom BibTex formatting can be found in the Sphinx documentation: # https://sphinxcontrib-bibtex.readthedocs.io/en/latest/usage.html#bibtex-custom-formatting @@ -77,42 +80,43 @@ def __init__(self, *args, **kwargs): # This option makes the given names of an author abbreviated to just initials. # Example: "Jean-Luc" becomes "J.-L." # Set 'abbreviate_names' to True before calling the superclass (BaseStyle class) initializer - kwargs['abbreviate_names'] = True + kwargs["abbreviate_names"] = True super().__init__(*args, **kwargs) -pybtex.plugin.register_plugin('pybtex.style.formatting', 'warpxbibstyle', WarpXBibStyle) -bibtex_default_style = 'warpxbibstyle' +pybtex.plugin.register_plugin("pybtex.style.formatting", "warpxbibstyle", WarpXBibStyle) + +bibtex_default_style = "warpxbibstyle" # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'WarpX' -copyright = '2017-2021, WarpX collaboration' -author = 'WarpX collaboration' +project = "WarpX" +copyright = "2017-2021, WarpX collaboration" +author = "WarpX collaboration" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u'24.08' +version = "24.08" # The full version, including alpha/beta/rc tags. -release = u'24.08' +release = "24.08" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -120,7 +124,7 @@ def __init__(self, *args, **kwargs): exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -131,14 +135,16 @@ def __init__(self, *args, **kwargs): # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" numfig = True math_eqref_format = "{number}" -numfig_format = {'figure': 'Fig. %s', - 'table': 'Table %s', - 'code-block': 'Listing %s', - 'section': 'Section %s'} +numfig_format = { + "figure": "Fig. %s", + "table": "Table %s", + "code-block": "Listing %s", + "section": "Section %s", +} # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -149,16 +155,16 @@ def __init__(self, *args, **kwargs): # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] html_css_files = [ - 'custom.css', + "custom.css", ] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'WarpXdoc' +htmlhelp_basename = "WarpXdoc" # -- Options for LaTeX output --------------------------------------------- @@ -167,15 +173,12 @@ def __init__(self, *args, **kwargs): # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -185,8 +188,7 @@ def __init__(self, *args, **kwargs): # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'WarpX.tex', 'WarpX Documentation', - 'WarpX collaboration', 'manual'), + (master_doc, "WarpX.tex", "WarpX Documentation", "WarpX collaboration", "manual"), ] @@ -194,10 +196,7 @@ def __init__(self, *args, **kwargs): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'warpx', 'WarpX Documentation', - [author], 1) -] +man_pages = [(master_doc, "warpx", "WarpX Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -206,40 +205,44 @@ def __init__(self, *args, **kwargs): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'WarpX', 'WarpX Documentation', - author, 'WarpX', 'WarpX is an advanced electromagnetic Particle-In-Cell code.', - 'Miscellaneous'), + ( + master_doc, + "WarpX", + "WarpX Documentation", + author, + "WarpX", + "WarpX is an advanced electromagnetic Particle-In-Cell code.", + "Miscellaneous", + ), ] - - # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://amrex-codes.github.io/': None} +intersphinx_mapping = {"https://amrex-codes.github.io/": None} # Setup the breathe extension -breathe_projects = { - "WarpX": "../doxyxml/" -} +breathe_projects = {"WarpX": "../doxyxml/"} breathe_default_project = "WarpX" # Tell sphinx what the primary language being documented is. -primary_domain = 'cpp' +primary_domain = "cpp" # Tell sphinx what the pygments highlight language should be. -highlight_language = 'cpp' +highlight_language = "cpp" # Download AMReX & openPMD-api Doxygen Tagfile to interlink Doxygen docs -url = 'https://amrex-codes.github.io/amrex/docs_xml/doxygen/amrex-doxygen-web.tag.xml' -urllib.request.urlretrieve(url, '../amrex-doxygen-web.tag.xml') +url = "https://amrex-codes.github.io/amrex/docs_xml/doxygen/amrex-doxygen-web.tag.xml" +urllib.request.urlretrieve(url, "../amrex-doxygen-web.tag.xml") -url = 'https://openpmd-api.readthedocs.io/en/latest/_static/doxyhtml/openpmd-api-doxygen-web.tag.xml' -urllib.request.urlretrieve(url, '../openpmd-api-doxygen-web.tag.xml') +url = "https://openpmd-api.readthedocs.io/en/latest/_static/doxyhtml/openpmd-api-doxygen-web.tag.xml" +urllib.request.urlretrieve(url, "../openpmd-api-doxygen-web.tag.xml") # Build Doxygen -subprocess.call('cd ../; doxygen;' - 'mkdir -p source/_static;' - 'cp -r doxyhtml source/_static/;' - 'cp warpx-doxygen-web.tag.xml source/_static/doxyhtml/', - shell=True) +subprocess.call( + "cd ../; doxygen;" + "mkdir -p source/_static;" + "cp -r doxyhtml source/_static/;" + "cp warpx-doxygen-web.tag.xml source/_static/doxyhtml/", + shell=True, +) suppress_warnings = ["bibtex.duplicate_label"] diff --git a/Docs/source/usage/workflows/ml_materials/create_dataset.py b/Docs/source/usage/workflows/ml_materials/create_dataset.py index ecd182c8802..aefae201617 100644 --- a/Docs/source/usage/workflows/ml_materials/create_dataset.py +++ b/Docs/source/usage/workflows/ml_materials/create_dataset.py @@ -19,28 +19,31 @@ c = 2.998e8 ############### + def sanitize_dir_strings(*dir_strings): - """append '/' to a string for concatenation in building up file tree descriptions - """ + """append '/' to a string for concatenation in building up file tree descriptions""" dir_strings = list(dir_strings) for ii, dir_string in enumerate(dir_strings): - if dir_string[-1] != '/': - dir_strings[ii] = dir_string + '/' + if dir_string[-1] != "/": + dir_strings[ii] = dir_string + "/" return dir_strings + def download_and_unzip(url, data_dir): request.urlretrieve(url, data_dir) - with zipfile.ZipFile(data_dir, 'r') as zip_dataset: + with zipfile.ZipFile(data_dir, "r") as zip_dataset: zip_dataset.extractall() -def create_source_target_data(data_dir, - species, - source_index=0, - target_index=-1, - survivor_select_index=-1, - particle_selection=None - ): + +def create_source_target_data( + data_dir, + species, + source_index=0, + target_index=-1, + survivor_select_index=-1, + particle_selection=None, +): """Create dataset from openPMD files Parameters @@ -60,35 +63,38 @@ def create_source_target_data(data_dir, target_stds: 6 element array of source particle coordinate standard deviations relevant times: 2 element array of source and target times """ - data_dir, = sanitize_dir_strings(data_dir) + (data_dir,) = sanitize_dir_strings(data_dir) data_path = data_dir - print('loading openPMD data from', data_path) + print("loading openPMD data from", data_path) ts = OpenPMDTimeSeries(data_path) relevant_times = [ts.t[source_index], ts.t[target_index]] # Manual: Particle tracking START iteration = ts.iterations[survivor_select_index] - pt = ParticleTracker( ts, - species=species, - iteration=iteration, - select=particle_selection) + pt = ParticleTracker( + ts, species=species, iteration=iteration, select=particle_selection + ) # Manual: Particle tracking END #### create normalized source, target data sets #### - print('creating data sets') + print("creating data sets") # Manual: Load openPMD START iteration = ts.iterations[source_index] - source_data = ts.get_particle(species=species, - iteration=iteration, - var_list=['x','y','z','ux','uy','uz'], - select=pt) + source_data = ts.get_particle( + species=species, + iteration=iteration, + var_list=["x", "y", "z", "ux", "uy", "uz"], + select=pt, + ) iteration = ts.iterations[target_index] - target_data = ts.get_particle(species=species, - iteration=iteration, - var_list=['x','y','z','ux','uy','uz'], - select=pt) + target_data = ts.get_particle( + species=species, + iteration=iteration, + var_list=["x", "y", "z", "ux", "uy", "uz"], + select=pt, + ) # Manual: Load openPMD END # Manual: Normalization START @@ -114,51 +120,75 @@ def create_source_target_data(data_dir, target_data = torch.tensor(np.column_stack(target_data)) # Manual: Format data END - return source_data, source_means, source_stds, target_data, target_means, target_stds, relevant_times + return ( + source_data, + source_means, + source_stds, + target_data, + target_means, + target_stds, + relevant_times, + ) -def save_warpx_surrogate_data(dataset_fullpath_filename, - diag_dir, - species, - training_frac, - batch_size, - source_index, - target_index, - survivor_select_index, - particle_selection=None - ): +def save_warpx_surrogate_data( + dataset_fullpath_filename, + diag_dir, + species, + training_frac, + batch_size, + source_index, + target_index, + survivor_select_index, + particle_selection=None, +): source_target_data = create_source_target_data( data_dir=diag_dir, species=species, source_index=source_index, target_index=target_index, survivor_select_index=survivor_select_index, - particle_selection=particle_selection + particle_selection=particle_selection, ) - source_data, source_means, source_stds, target_data, target_means, target_stds, times = source_target_data + ( + source_data, + source_means, + source_stds, + target_data, + target_means, + target_stds, + times, + ) = source_target_data # Manual: Save dataset START - full_dataset = torch.utils.data.TensorDataset(source_data.float(), target_data.float()) + full_dataset = torch.utils.data.TensorDataset( + source_data.float(), target_data.float() + ) n_samples = full_dataset.tensors[0].size(0) - n_train = int(training_frac*n_samples) + n_train = int(training_frac * n_samples) n_test = n_samples - n_train - train_data, test_data = torch.utils.data.random_split(full_dataset, [n_train, n_test]) - - torch.save({'dataset':full_dataset, - 'train_indices':train_data.indices, - 'test_indices':test_data.indices, - 'source_means':source_means, - 'source_stds':source_stds, - 'target_means':target_means, - 'target_stds':target_stds, - 'times':times, - }, - dataset_fullpath_filename - ) + train_data, test_data = torch.utils.data.random_split( + full_dataset, [n_train, n_test] + ) + + torch.save( + { + "dataset": full_dataset, + "train_indices": train_data.indices, + "test_indices": test_data.indices, + "source_means": source_means, + "source_stds": source_stds, + "target_means": target_means, + "target_stds": target_stds, + "times": times, + }, + dataset_fullpath_filename, + ) # Manual: Save dataset END + ######## end utility functions ############# ######## start dataset creation ############ @@ -171,38 +201,40 @@ def save_warpx_surrogate_data(dataset_fullpath_filename, source_index = 0 target_index = 1 survivor_select_index = 1 -batch_size=1200 +batch_size = 1200 training_frac = 0.7 -os.makedirs('datasets', exist_ok=True) +os.makedirs("datasets", exist_ok=True) # improve stage 0 dataset stage_i = 0 -select = {'z':[0.280025, None]} -species = f'beam_stage_{stage_i}' -dataset_filename = f'dataset_{species}.pt' -dataset_file = 'datasets/' + dataset_filename -save_warpx_surrogate_data(dataset_fullpath_filename=dataset_file, - diag_dir=data_dir, - species=species, - training_frac=training_frac, - batch_size=batch_size, - source_index=source_index, - target_index=target_index, - survivor_select_index=survivor_select_index, - particle_selection=select - ) - -for stage_i in range(1,15): - species = f'beam_stage_{stage_i}' - dataset_filename = f'dataset_{species}.pt' - dataset_file = 'datasets/' + dataset_filename - save_warpx_surrogate_data(dataset_fullpath_filename=dataset_file, - diag_dir=data_dir, - species=species, - training_frac=training_frac, - batch_size=batch_size, - source_index=source_index, - target_index=target_index, - survivor_select_index=survivor_select_index - ) +select = {"z": [0.280025, None]} +species = f"beam_stage_{stage_i}" +dataset_filename = f"dataset_{species}.pt" +dataset_file = "datasets/" + dataset_filename +save_warpx_surrogate_data( + dataset_fullpath_filename=dataset_file, + diag_dir=data_dir, + species=species, + training_frac=training_frac, + batch_size=batch_size, + source_index=source_index, + target_index=target_index, + survivor_select_index=survivor_select_index, + particle_selection=select, +) + +for stage_i in range(1, 15): + species = f"beam_stage_{stage_i}" + dataset_filename = f"dataset_{species}.pt" + dataset_file = "datasets/" + dataset_filename + save_warpx_surrogate_data( + dataset_fullpath_filename=dataset_file, + diag_dir=data_dir, + species=species, + training_frac=training_frac, + batch_size=batch_size, + source_index=source_index, + target_index=target_index, + survivor_select_index=survivor_select_index, + ) diff --git a/Docs/source/usage/workflows/ml_materials/neural_network_classes.py b/Docs/source/usage/workflows/ml_materials/neural_network_classes.py index 58b51a1d364..91090ffae3d 100644 --- a/Docs/source/usage/workflows/ml_materials/neural_network_classes.py +++ b/Docs/source/usage/workflows/ml_materials/neural_network_classes.py @@ -16,11 +16,13 @@ class ActivationType(Enum): """ Activation class provides an enumeration type for the supported activation layers """ + ReLU = 1 Tanh = 2 PReLU = 3 Sigmoid = 4 + def get_enum_type(type_to_test, EnumClass): """ Returns the enumeration type associated to type_to_test in EnumClass @@ -42,28 +44,25 @@ def get_enum_type(type_to_test, EnumClass): raise Exception("unsupported type entered") - class ConnectedNN(nn.Module): """ ConnectedNN is a class of fully connected neural networks """ + def __init__(self, layers): super().__init__() self.stack = nn.Sequential(*layers) + def forward(self, x): return self.stack(x) + class OneActNN(ConnectedNN): """ OneActNN is class of fully connected neural networks admitting only one activation function """ - def __init__(self, - n_in, - n_out, - n_hidden_nodes, - n_hidden_layers, - act): + def __init__(self, n_in, n_out, n_hidden_nodes, n_hidden_layers, act): self.n_in = n_in self.n_out = n_out self.n_hidden_layers = n_hidden_layers @@ -84,7 +83,7 @@ def __init__(self, layers += [nn.Sigmoid()] if ii < self.n_hidden_layers - 1: - layers += [nn.Linear(self.n_hidden_nodes,self.n_hidden_nodes)] + layers += [nn.Linear(self.n_hidden_nodes, self.n_hidden_nodes)] layers += [nn.Linear(self.n_hidden_nodes, self.n_out)] diff --git a/Docs/source/usage/workflows/ml_materials/run_warpx_training.py b/Docs/source/usage/workflows/ml_materials/run_warpx_training.py index 054c1b4cfc0..9e6b5682ec7 100644 --- a/Docs/source/usage/workflows/ml_materials/run_warpx_training.py +++ b/Docs/source/usage/workflows/ml_materials/run_warpx_training.py @@ -13,66 +13,68 @@ ep0 = picmi.constants.ep0 # Number of cells -dim = '3' +dim = "3" nx = ny = 128 -nz = 35328 #17664 #8832 -if dim == 'rz': - nr = nx//2 +nz = 35328 # 17664 #8832 +if dim == "rz": + nr = nx // 2 # Computational domain -rmin = 0. -rmax = 128e-6 +rmin = 0.0 +rmax = 128e-6 zmin = -180e-6 -zmax = 0. +zmax = 0.0 # Number of processes for static load balancing # Check with your submit script -num_procs = [1, 1, 64*4] -if dim == 'rz': +num_procs = [1, 1, 64 * 4] +if dim == "rz": num_procs = [1, 64] # Number of time steps -gamma_boost = 60. -beta_boost = np.sqrt(1.-gamma_boost**-2) +gamma_boost = 60.0 +beta_boost = np.sqrt(1.0 - gamma_boost**-2) # Create grid -if dim == 'rz': +if dim == "rz": grid = picmi.CylindricalGrid( number_of_cells=[nr, nz], guard_cells=[32, 32], n_azimuthal_modes=2, lower_bound=[rmin, zmin], upper_bound=[rmax, zmax], - lower_boundary_conditions=['none', 'damped'], - upper_boundary_conditions=['none', 'damped'], - lower_boundary_conditions_particles=['absorbing', 'absorbing'], - upper_boundary_conditions_particles=['absorbing', 'absorbing'], - moving_window_velocity=[0., c], + lower_boundary_conditions=["none", "damped"], + upper_boundary_conditions=["none", "damped"], + lower_boundary_conditions_particles=["absorbing", "absorbing"], + upper_boundary_conditions_particles=["absorbing", "absorbing"], + moving_window_velocity=[0.0, c], warpx_max_grid_size=256, - warpx_blocking_factor=64) + warpx_blocking_factor=64, + ) else: grid = picmi.Cartesian3DGrid( number_of_cells=[nx, ny, nz], guard_cells=[11, 11, 12], lower_bound=[-rmax, -rmax, zmin], upper_bound=[rmax, rmax, zmax], - lower_boundary_conditions=['periodic', 'periodic', 'damped'], - upper_boundary_conditions=['periodic', 'periodic', 'damped'], - lower_boundary_conditions_particles=['periodic', 'periodic', 'absorbing'], - upper_boundary_conditions_particles=['periodic', 'periodic', 'absorbing'], - moving_window_velocity=[0., 0., c], + lower_boundary_conditions=["periodic", "periodic", "damped"], + upper_boundary_conditions=["periodic", "periodic", "damped"], + lower_boundary_conditions_particles=["periodic", "periodic", "absorbing"], + upper_boundary_conditions_particles=["periodic", "periodic", "absorbing"], + moving_window_velocity=[0.0, 0.0, c], warpx_max_grid_size=256, - warpx_blocking_factor=32) + warpx_blocking_factor=32, + ) # plasma region -plasma_rlim = 100.e-6 +plasma_rlim = 100.0e-6 N_stage = 15 L_plasma_bulk = 0.28 -L_ramp = 1.e-9 +L_ramp = 1.0e-9 L_ramp_up = L_ramp L_ramp_down = L_ramp -L_stage = L_plasma_bulk + 2*L_ramp +L_stage = L_plasma_bulk + 2 * L_ramp # focusing # lens external fields @@ -80,124 +82,144 @@ lens_focal_length = 0.015 lens_width = 0.003 -stage_spacing = L_plasma_bulk + 2*lens_focal_length - -def get_species_of_accelerator_stage(stage_idx, stage_zmin, stage_zmax, - stage_xmin=-plasma_rlim, stage_xmax=plasma_rlim, - stage_ymin=-plasma_rlim, stage_ymax=plasma_rlim, - Lplus = L_ramp_up, Lp = L_plasma_bulk, - Lminus = L_ramp_down): +stage_spacing = L_plasma_bulk + 2 * lens_focal_length + + +def get_species_of_accelerator_stage( + stage_idx, + stage_zmin, + stage_zmax, + stage_xmin=-plasma_rlim, + stage_xmax=plasma_rlim, + stage_ymin=-plasma_rlim, + stage_ymax=plasma_rlim, + Lplus=L_ramp_up, + Lp=L_plasma_bulk, + Lminus=L_ramp_down, +): # Parabolic density profile n0 = 1.7e23 - Rc = 40.e-6 + Rc = 40.0e-6 Lstage = Lplus + Lp + Lminus - if not np.isclose(stage_zmax-stage_zmin, Lstage): - print('Warning: zmax disagrees with stage length') + if not np.isclose(stage_zmax - stage_zmin, Lstage): + print("Warning: zmax disagrees with stage length") parabolic_distribution = picmi.AnalyticDistribution( - density_expression= - f'n0*(1.+4.*(x**2+y**2)/(kp**2*Rc**4))*(0.5*(1.-cos(pi*(z-{stage_zmin})/Lplus)))*((z-{stage_zmin})=Lplus)*((z-{stage_zmin})<(Lplus+Lp))' \ - + f'+n0*(1.+4.*(x**2+y**2)/(kp**2*Rc**4))*(0.5*(1.+cos(pi*((z-{stage_zmin})-Lplus-Lp)/Lminus)))*((z-{stage_zmin})>=(Lplus+Lp))*((z-{stage_zmin})<(Lplus+Lp+Lminus))', + density_expression=f"n0*(1.+4.*(x**2+y**2)/(kp**2*Rc**4))*(0.5*(1.-cos(pi*(z-{stage_zmin})/Lplus)))*((z-{stage_zmin})=Lplus)*((z-{stage_zmin})<(Lplus+Lp))" + + f"+n0*(1.+4.*(x**2+y**2)/(kp**2*Rc**4))*(0.5*(1.+cos(pi*((z-{stage_zmin})-Lplus-Lp)/Lminus)))*((z-{stage_zmin})>=(Lplus+Lp))*((z-{stage_zmin})<(Lplus+Lp+Lminus))", pi=3.141592653589793, n0=n0, - kp=q_e/c*math.sqrt(n0/(m_e*ep0)), + kp=q_e / c * math.sqrt(n0 / (m_e * ep0)), Rc=Rc, Lplus=Lplus, Lp=Lp, Lminus=Lminus, lower_bound=[stage_xmin, stage_ymin, stage_zmin], upper_bound=[stage_xmax, stage_ymax, stage_zmax], - fill_in=True) + fill_in=True, + ) electrons = picmi.Species( - particle_type='electron', - name=f'electrons{stage_idx}', - initial_distribution=parabolic_distribution) + particle_type="electron", + name=f"electrons{stage_idx}", + initial_distribution=parabolic_distribution, + ) ions = picmi.Species( - particle_type='proton', - name=f'ions{stage_idx}', - initial_distribution=parabolic_distribution) + particle_type="proton", + name=f"ions{stage_idx}", + initial_distribution=parabolic_distribution, + ) return electrons, ions + species_list = [] for i_stage in range(1): # Add plasma zmin_stage = i_stage * stage_spacing zmax_stage = zmin_stage + L_stage - electrons, ions = get_species_of_accelerator_stage(i_stage+1, zmin_stage, zmax_stage) + electrons, ions = get_species_of_accelerator_stage( + i_stage + 1, zmin_stage, zmax_stage + ) species_list.append(electrons) species_list.append(ions) # add beam to species_list -beam_charge = -10.e-15 # in Coulombs +beam_charge = -10.0e-15 # in Coulombs N_beam_particles = int(1e6) -beam_centroid_z = -107.e-6 -beam_rms_z = 2.e-6 +beam_centroid_z = -107.0e-6 +beam_rms_z = 2.0e-6 beam_gammas = [1960 + 13246 * i_stage for i_stage in range(N_stage)] -#beam_gammas = [1957, 15188, 28432, 41678, 54926, 68174, 81423,94672, 107922,121171] # From 3D run +# beam_gammas = [1957, 15188, 28432, 41678, 54926, 68174, 81423,94672, 107922,121171] # From 3D run beams = [] for i_stage in range(N_stage): beam_gamma = beam_gammas[i_stage] sigma_gamma = 0.06 * beam_gamma gaussian_distribution = picmi.GaussianBunchDistribution( - n_physical_particles= abs(beam_charge) / q_e, - rms_bunch_size=[2.e-6, 2.e-6, beam_rms_z], - rms_velocity=[8*c, 8*c, sigma_gamma*c], - centroid_position=[0., 0., beam_centroid_z], - centroid_velocity=[0., 0., beam_gamma*c], + n_physical_particles=abs(beam_charge) / q_e, + rms_bunch_size=[2.0e-6, 2.0e-6, beam_rms_z], + rms_velocity=[8 * c, 8 * c, sigma_gamma * c], + centroid_position=[0.0, 0.0, beam_centroid_z], + centroid_velocity=[0.0, 0.0, beam_gamma * c], ) beam = picmi.Species( - particle_type='electron', - name=f'beam_stage_{i_stage}', - initial_distribution= gaussian_distribution + particle_type="electron", + name=f"beam_stage_{i_stage}", + initial_distribution=gaussian_distribution, ) beams.append(beam) # Laser antenna_z = -1e-9 profile_t_peak = 1.46764864e-13 + + def get_laser(antenna_z, profile_t_peak, fill_in=True): - profile_focal_distance = 0. + profile_focal_distance = 0.0 laser = picmi.GaussianLaser( wavelength=0.8e-06, waist=36e-06, duration=7.33841e-14, - focal_position=[0., 0., profile_focal_distance + antenna_z], - centroid_position=[0., 0., antenna_z - c*profile_t_peak], - propagation_direction=[0., 0., 1.], - polarization_direction=[0., 1., 0.], + focal_position=[0.0, 0.0, profile_focal_distance + antenna_z], + centroid_position=[0.0, 0.0, antenna_z - c * profile_t_peak], + propagation_direction=[0.0, 0.0, 1.0], + polarization_direction=[0.0, 1.0, 0.0], a0=2.36, - fill_in=fill_in) + fill_in=fill_in, + ) laser_antenna = picmi.LaserAntenna( - position=[0., 0., antenna_z], - normal_vector=[0., 0., 1.]) + position=[0.0, 0.0, antenna_z], normal_vector=[0.0, 0.0, 1.0] + ) return (laser, laser_antenna) + + lasers = [] for i_stage in range(1): fill_in = True if i_stage == 0: fill_in = False lasers.append( - get_laser(antenna_z + i_stage*stage_spacing, - profile_t_peak + i_stage*stage_spacing/c, - fill_in) + get_laser( + antenna_z + i_stage * stage_spacing, + profile_t_peak + i_stage * stage_spacing / c, + fill_in, + ) ) # Electromagnetic solver -psatd_algo = 'multij' -if psatd_algo == 'galilean': - galilean_velocity = [0.,0.] if dim=='3' else [0.] - galilean_velocity += [-c*beta_boost] +psatd_algo = "multij" +if psatd_algo == "galilean": + galilean_velocity = [0.0, 0.0] if dim == "3" else [0.0] + galilean_velocity += [-c * beta_boost] n_pass_z = 1 do_multiJ = None - do_multi_J_n_depositions=None + do_multi_J_n_depositions = None J_in_time = None current_correction = True divE_cleaning = False -elif psatd_algo == 'multij': +elif psatd_algo == "multij": n_pass_z = 4 galilean_velocity = None do_multiJ = True @@ -206,21 +228,23 @@ def get_laser(antenna_z, profile_t_peak, fill_in=True): current_correction = False divE_cleaning = True else: - raise Exception(f'PSATD algorithm \'{psatd_algo}\' is not recognized!\n'\ - 'Valid options are \'multiJ\' or \'galilean\'.') -if dim == 'rz': + raise Exception( + f"PSATD algorithm '{psatd_algo}' is not recognized!\n" + "Valid options are 'multiJ' or 'galilean'." + ) +if dim == "rz": stencil_order = [8, 16] - smoother = picmi.BinomialSmoother(n_pass=[1,n_pass_z]) - grid_type = 'collocated' + smoother = picmi.BinomialSmoother(n_pass=[1, n_pass_z]) + grid_type = "collocated" else: stencil_order = [8, 8, 16] - smoother = picmi.BinomialSmoother(n_pass=[1,1,n_pass_z]) - grid_type = 'hybrid' + smoother = picmi.BinomialSmoother(n_pass=[1, 1, n_pass_z]) + grid_type = "hybrid" solver = picmi.ElectromagneticSolver( grid=grid, - method='PSATD', + method="PSATD", cfl=0.9999, source_smoother=smoother, stencil_order=stencil_order, @@ -228,63 +252,68 @@ def get_laser(antenna_z, profile_t_peak, fill_in=True): warpx_psatd_update_with_rho=True, warpx_current_correction=current_correction, divE_cleaning=divE_cleaning, - warpx_psatd_J_in_time=J_in_time - ) + warpx_psatd_J_in_time=J_in_time, +) # Diagnostics -diag_field_list = ['B', 'E', 'J', 'rho'] -diag_particle_list = ['weighting','position','momentum'] -coarse_btd_end = int((L_plasma_bulk+0.001+stage_spacing*(N_stage-1))*100000) -stage_end_snapshots=[f'{int((L_plasma_bulk+stage_spacing*ii)*100000)}:{int((L_plasma_bulk+stage_spacing*ii)*100000+50)}:5' for ii in range(1)] +diag_field_list = ["B", "E", "J", "rho"] +diag_particle_list = ["weighting", "position", "momentum"] +coarse_btd_end = int((L_plasma_bulk + 0.001 + stage_spacing * (N_stage - 1)) * 100000) +stage_end_snapshots = [ + f"{int((L_plasma_bulk+stage_spacing*ii)*100000)}:{int((L_plasma_bulk+stage_spacing*ii)*100000+50)}:5" + for ii in range(1) +] btd_particle_diag = picmi.LabFrameParticleDiagnostic( - name='lab_particle_diags', + name="lab_particle_diags", species=beams, grid=grid, - num_snapshots=25*N_stage, - #warpx_intervals=', '.join([f':{coarse_btd_end}:1000']+stage_end_snapshots), - warpx_intervals=', '.join(['0:0']+stage_end_snapshots), - dt_snapshots=0.00001/c, + num_snapshots=25 * N_stage, + # warpx_intervals=', '.join([f':{coarse_btd_end}:1000']+stage_end_snapshots), + warpx_intervals=", ".join(["0:0"] + stage_end_snapshots), + dt_snapshots=0.00001 / c, data_list=diag_particle_list, - write_dir='lab_particle_diags', - warpx_format='openpmd', - warpx_openpmd_backend='bp') + write_dir="lab_particle_diags", + warpx_format="openpmd", + warpx_openpmd_backend="bp", +) btd_field_diag = picmi.LabFrameFieldDiagnostic( - name='lab_field_diags', + name="lab_field_diags", grid=grid, - num_snapshots=25*N_stage, - dt_snapshots=stage_spacing/25/c, + num_snapshots=25 * N_stage, + dt_snapshots=stage_spacing / 25 / c, data_list=diag_field_list, - warpx_lower_bound=[-128.e-6, 0.e-6, -180.e-6], - warpx_upper_bound=[128.e-6, 0.e-6, 0.], - write_dir='lab_field_diags', - warpx_format='openpmd', - warpx_openpmd_backend='bp') + warpx_lower_bound=[-128.0e-6, 0.0e-6, -180.0e-6], + warpx_upper_bound=[128.0e-6, 0.0e-6, 0.0], + write_dir="lab_field_diags", + warpx_format="openpmd", + warpx_openpmd_backend="bp", +) field_diag = picmi.FieldDiagnostic( - name='field_diags', + name="field_diags", data_list=diag_field_list, grid=grid, period=100, - write_dir='field_diags', - lower_bound=[-128.e-6, 0.e-6, -180.e-6], - upper_bound=[128.e-6, 0.e-6, 0.], - warpx_format='openpmd', - warpx_openpmd_backend='h5') + write_dir="field_diags", + lower_bound=[-128.0e-6, 0.0e-6, -180.0e-6], + upper_bound=[128.0e-6, 0.0e-6, 0.0], + warpx_format="openpmd", + warpx_openpmd_backend="h5", +) particle_diag = picmi.ParticleDiagnostic( - name='particle_diags', + name="particle_diags", species=beams, period=100, - write_dir='particle_diags', - warpx_format='openpmd', - warpx_openpmd_backend='h5') + write_dir="particle_diags", + warpx_format="openpmd", + warpx_openpmd_backend="h5", +) beamrel_red_diag = picmi.ReducedDiagnostic( - diag_type='BeamRelevant', - name='beamrel', - species=beam, - period=1) + diag_type="BeamRelevant", name="beamrel", species=beam, period=1 +) # Set up simulation sim = picmi.Simulation( @@ -292,40 +321,42 @@ def get_laser(antenna_z, profile_t_peak, fill_in=True): warpx_numprocs=num_procs, warpx_compute_max_step_from_btd=True, verbose=2, - particle_shape='cubic', + particle_shape="cubic", gamma_boost=gamma_boost, - warpx_charge_deposition_algo='standard', - warpx_current_deposition_algo='direct', - warpx_field_gathering_algo='momentum-conserving', - warpx_particle_pusher_algo='vay', + warpx_charge_deposition_algo="standard", + warpx_current_deposition_algo="direct", + warpx_field_gathering_algo="momentum-conserving", + warpx_particle_pusher_algo="vay", warpx_amrex_the_arena_is_managed=False, warpx_amrex_use_gpu_aware_mpi=True, warpx_do_multi_J=do_multiJ, warpx_do_multi_J_n_depositions=do_multi_J_n_depositions, warpx_grid_type=grid_type, # default: 2 for staggered grids, 8 for hybrid grids - warpx_field_centering_order=[16,16,16], + warpx_field_centering_order=[16, 16, 16], # only for hybrid grids, default: 8 - warpx_current_centering_order=[16,16,16] - ) + warpx_current_centering_order=[16, 16, 16], +) for species in species_list: - if dim=='rz': - n_macroparticle_per_cell=[2,4,2] + if dim == "rz": + n_macroparticle_per_cell = [2, 4, 2] else: - n_macroparticle_per_cell=[2,2,2] + n_macroparticle_per_cell = [2, 2, 2] sim.add_species( species, - layout=picmi.GriddedLayout(grid=grid, - n_macroparticle_per_cell=n_macroparticle_per_cell) + layout=picmi.GriddedLayout( + grid=grid, n_macroparticle_per_cell=n_macroparticle_per_cell + ), ) for i_stage in range(N_stage): sim.add_species_through_plane( species=beams[i_stage], layout=picmi.PseudoRandomLayout(grid=grid, n_macroparticles=N_beam_particles), - injection_plane_position=0., - injection_plane_normal_vector=[0.,0.,1.]) + injection_plane_position=0.0, + injection_plane_normal_vector=[0.0, 0.0, 1.0], + ) for i_stage in range(1): # Add laser @@ -334,14 +365,14 @@ def get_laser(antenna_z, profile_t_peak, fill_in=True): # Add diagnostics sim.add_diagnostic(btd_particle_diag) -#sim.add_diagnostic(btd_field_diag) -#sim.add_diagnostic(field_diag) -#sim.add_diagnostic(particle_diag) +# sim.add_diagnostic(btd_field_diag) +# sim.add_diagnostic(field_diag) +# sim.add_diagnostic(particle_diag) # Add reduced diagnostic sim.add_diagnostic(beamrel_red_diag) -sim.write_input_file(f'inputs_training_{N_stage}_stages') +sim.write_input_file(f"inputs_training_{N_stage}_stages") # Advance simulation until last time step sim.step() diff --git a/Docs/source/usage/workflows/ml_materials/train.py b/Docs/source/usage/workflows/ml_materials/train.py index 23b1d0abcd4..957a652e0c4 100644 --- a/Docs/source/usage/workflows/ml_materials/train.py +++ b/Docs/source/usage/workflows/ml_materials/train.py @@ -18,7 +18,7 @@ ############# set model parameters ################# stage_i = 0 -species = f'beam_stage_{stage_i}' +species = f"beam_stage_{stage_i}" source_index = 0 target_index = 1 survivor_select_index = 1 @@ -35,36 +35,52 @@ n_hidden_nodes = 20 n_hidden_layers = 3 -activation_type = 'ReLU' +activation_type = "ReLU" -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -print(f'device={device}') +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +print(f"device={device}") #################### load dataset ################ -dataset_filename = f'dataset_{species}.pt' -dataset_file = 'datasets/' + dataset_filename +dataset_filename = f"dataset_{species}.pt" +dataset_file = "datasets/" + dataset_filename print(f"trying to load dataset+test-train split in {dataset_file}") dataset_with_indices = torch.load(dataset_file) -train_data = torch.utils.data.dataset.Subset(dataset_with_indices['dataset'], dataset_with_indices['train_indices']) -test_data = torch.utils.data.dataset.Subset(dataset_with_indices['dataset'], dataset_with_indices['test_indices']) -source_data = dataset_with_indices['dataset'] -source_means = dataset_with_indices['source_means'] -source_stds = dataset_with_indices['source_stds'] -target_means = dataset_with_indices['target_means'] -target_stds = dataset_with_indices['target_stds'] +train_data = torch.utils.data.dataset.Subset( + dataset_with_indices["dataset"], dataset_with_indices["train_indices"] +) +test_data = torch.utils.data.dataset.Subset( + dataset_with_indices["dataset"], dataset_with_indices["test_indices"] +) +source_data = dataset_with_indices["dataset"] +source_means = dataset_with_indices["source_means"] +source_stds = dataset_with_indices["source_stds"] +target_means = dataset_with_indices["target_means"] +target_stds = dataset_with_indices["target_stds"] print("able to load data and test/train split") ###### move data to device (GPU) if available ######## -source_device = train_data.dataset.tensors[0].to(device) # equivalently, test_data.tensors[0].to(device) +source_device = train_data.dataset.tensors[0].to( + device +) # equivalently, test_data.tensors[0].to(device) target_device = train_data.dataset.tensors[1].to(device) -full_dataset_device = torch.utils.data.TensorDataset(source_device.float(), target_device.float()) - -train_data_device = torch.utils.data.dataset.Subset(full_dataset_device, train_data.indices) -test_data_device = torch.utils.data.dataset.Subset(full_dataset_device, test_data.indices) - -train_loader_device = torch.utils.data.DataLoader(train_data_device, batch_size=batch_size, shuffle=True) -test_loader_device = torch.utils.data.DataLoader(test_data_device, batch_size=batch_size, shuffle=True) +full_dataset_device = torch.utils.data.TensorDataset( + source_device.float(), target_device.float() +) + +train_data_device = torch.utils.data.dataset.Subset( + full_dataset_device, train_data.indices +) +test_data_device = torch.utils.data.dataset.Subset( + full_dataset_device, test_data.indices +) + +train_loader_device = torch.utils.data.DataLoader( + train_data_device, batch_size=batch_size, shuffle=True +) +test_loader_device = torch.utils.data.DataLoader( + test_data_device, batch_size=batch_size, shuffle=True +) test_source_device = test_data_device.dataset.tensors[0] test_target_device = test_data_device.dataset.tensors[1] @@ -74,12 +90,13 @@ ###### create model ########### -model = mynn.OneActNN(n_in = n_in, - n_out = n_out, - n_hidden_nodes=n_hidden_nodes, - n_hidden_layers = n_hidden_layers, - act=activation_type - ) +model = mynn.OneActNN( + n_in=n_in, + n_out=n_out, + n_hidden_nodes=n_hidden_nodes, + n_hidden_layers=n_hidden_layers, + act=activation_type, +) training_time = 0 train_loss_list = [] @@ -87,40 +104,47 @@ model.to(device=device) + ########## train and test functions #### # Manual: Train function START def train(model, optimizer, train_loader, loss_fun): model.train() - total_loss = 0. + total_loss = 0.0 for batch_idx, (data, target) in enumerate(train_loader): - #evaluate network with data + # evaluate network with data output = model(data) - #compute loss - # sum the differences squared, take mean afterward - loss = loss_fun(output, target,reduction='sum') - #backpropagation: step optimizer and reset gradients + # compute loss + # sum the differences squared, take mean afterward + loss = loss_fun(output, target, reduction="sum") + # backpropagation: step optimizer and reset gradients loss.backward() optimizer.step() optimizer.zero_grad() total_loss += loss.item() return total_loss + + # Manual: Train function END + def test(model, test_loader, loss_fun): model.eval() - total_loss = 0. + total_loss = 0.0 with torch.no_grad(): for batch_idx, (data, target) in enumerate(test_loader): output = model(data) - total_loss += loss_fun(output, target, reduction='sum').item() + total_loss += loss_fun(output, target, reduction="sum").item() return total_loss + # Manual: Test function START def test_dataset(model, test_source, test_target, loss_fun): model.eval() with torch.no_grad(): output = model(test_source) - return loss_fun(output, test_target, reduction='sum').item() + return loss_fun(output, test_target, reduction="sum").item() + + # Manual: Test function END ######## training loop ######## @@ -134,33 +158,47 @@ def test_dataset(model, test_source, test_target, loss_fun): for epoch in range(n_epochs): if do_print: t1 = time.time() - ave_train_loss = train(model, optimizer, train_loader_device, loss_fun) / data_dim / training_set_size - ave_test_loss = test_dataset(model, test_source_device, test_target_device, loss_fun) / data_dim / training_set_size + ave_train_loss = ( + train(model, optimizer, train_loader_device, loss_fun) + / data_dim + / training_set_size + ) + ave_test_loss = ( + test_dataset(model, test_source_device, test_target_device, loss_fun) + / data_dim + / training_set_size + ) train_loss_list.append(ave_train_loss) test_loss_list.append(ave_test_loss) if do_print: t2 = time.time() - print('Train Epoch: {:04d} \tTrain Loss: {:.6f} \tTest Loss: {:.6f}, this epoch: {:.3f} s'.format( - epoch + 1, ave_train_loss, ave_test_loss, t2-t1)) + print( + "Train Epoch: {:04d} \tTrain Loss: {:.6f} \tTest Loss: {:.6f}, this epoch: {:.3f} s".format( + epoch + 1, ave_train_loss, ave_test_loss, t2 - t1 + ) + ) # Manual: Training loop END t4 = time.time() -print(f'total training time: {t4-t3:.3f}s') +print(f"total training time: {t4-t3:.3f}s") ######### save model ######### -os.makedirs('models', exist_ok=True) +os.makedirs("models", exist_ok=True) # Manual: Save model START -model.to(device='cpu') -torch.save({ - 'n_hidden_layers':n_hidden_layers, - 'n_hidden_nodes':n_hidden_nodes, - 'activation':activation_type, - 'model_state_dict': model.state_dict(), - 'optimizer_state_dict': optimizer.state_dict(), - 'train_loss_list': train_loss_list, - 'test_loss_list': test_loss_list, - 'training_time': training_time, - }, f'models/{species}_model.pt') +model.to(device="cpu") +torch.save( + { + "n_hidden_layers": n_hidden_layers, + "n_hidden_nodes": n_hidden_nodes, + "activation": activation_type, + "model_state_dict": model.state_dict(), + "optimizer_state_dict": optimizer.state_dict(), + "train_loss_list": train_loss_list, + "test_loss_list": test_loss_list, + "training_time": training_time, + }, + f"models/{species}_model.pt", +) # Manual: Save model END diff --git a/Docs/source/usage/workflows/ml_materials/visualize.py b/Docs/source/usage/workflows/ml_materials/visualize.py index e9f6128b84d..38bce78a91d 100644 --- a/Docs/source/usage/workflows/ml_materials/visualize.py +++ b/Docs/source/usage/workflows/ml_materials/visualize.py @@ -18,67 +18,70 @@ # open model file stage_i = 0 -species = f'beam_stage_{stage_i}' -model_data = torch.load(f'models/{species}_model.pt',map_location=torch.device('cpu')) +species = f"beam_stage_{stage_i}" +model_data = torch.load(f"models/{species}_model.pt", map_location=torch.device("cpu")) data_dim = 6 n_in = data_dim n_out = data_dim -n_hidden_layers = model_data['n_hidden_layers'] -n_hidden_nodes = model_data['n_hidden_nodes'] -activation_type = model_data['activation'] -train_loss_list = model_data['train_loss_list'] -test_loss_list = model_data['test_loss_list'] -training_time = model_data['training_time'] +n_hidden_layers = model_data["n_hidden_layers"] +n_hidden_nodes = model_data["n_hidden_nodes"] +activation_type = model_data["activation"] +train_loss_list = model_data["train_loss_list"] +test_loss_list = model_data["test_loss_list"] +training_time = model_data["training_time"] loss_fun = F.mse_loss n_epochs = len(train_loss_list) -train_counter = np.arange(n_epochs)+1 +train_counter = np.arange(n_epochs) + 1 test_counter = train_counter do_log_plot = False fig, ax = plt.subplots() if do_log_plot: - ax.semilogy(train_counter, train_loss_list, '.-',color='blue',label='training loss') - ax.semilogy(test_counter, test_loss_list, color='green',label='testing loss') + ax.semilogy( + train_counter, train_loss_list, ".-", color="blue", label="training loss" + ) + ax.semilogy(test_counter, test_loss_list, color="green", label="testing loss") else: - ax.plot(train_counter, train_loss_list, '.-',color='blue',label='training loss') - ax.plot(test_counter, test_loss_list, color='green',label='testing loss') -ax.set_xlabel('number of epochs seen') -ax.set_ylabel(' loss') + ax.plot(train_counter, train_loss_list, ".-", color="blue", label="training loss") + ax.plot(test_counter, test_loss_list, color="green", label="testing loss") +ax.set_xlabel("number of epochs seen") +ax.set_ylabel(" loss") ax.legend() -fig_dir = 'figures/' -ax.set_title(f'final test error = {test_loss_list[-1]:.3e} ') +fig_dir = "figures/" +ax.set_title(f"final test error = {test_loss_list[-1]:.3e} ") ax.grid() plt.tight_layout() -plt.savefig(f'{species}_training_testing_error.png') +plt.savefig(f"{species}_training_testing_error.png") ######### plot phase space comparison ####### -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -print(f'device={device}') - -model = mynn.OneActNN(n_in = n_in, - n_out = n_out, - n_hidden_nodes=n_hidden_nodes, - n_hidden_layers = n_hidden_layers, - act = activation_type - ) -model.load_state_dict(model_data['model_state_dict']) +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +print(f"device={device}") + +model = mynn.OneActNN( + n_in=n_in, + n_out=n_out, + n_hidden_nodes=n_hidden_nodes, + n_hidden_layers=n_hidden_layers, + act=activation_type, +) +model.load_state_dict(model_data["model_state_dict"]) model.to(device=device) ###### load model data ############### -dataset_filename = f'dataset_{species}.pt' -dataset_dir = 'datasets/' +dataset_filename = f"dataset_{species}.pt" +dataset_dir = "datasets/" model_input_data = torch.load(dataset_dir + dataset_filename) -dataset = model_input_data['dataset'] -train_indices = model_input_data['train_indices'] -test_indices = model_input_data['test_indices'] -source_means = model_input_data['source_means'] -source_stds = model_input_data['source_stds'] -target_means = model_input_data['target_means'] -target_stds = model_input_data['target_stds'] -source_time, target_time = model_input_data['times'] +dataset = model_input_data["dataset"] +train_indices = model_input_data["train_indices"] +test_indices = model_input_data["test_indices"] +source_means = model_input_data["source_means"] +source_stds = model_input_data["source_stds"] +target_means = model_input_data["target_means"] +target_stds = model_input_data["target_stds"] +source_time, target_time = model_input_data["times"] source = dataset.tensors[0] @@ -86,7 +89,7 @@ test_source_device = test_source.to(device) with torch.no_grad(): evaluation_device = model(test_source_device.float()) -eval_cpu = evaluation_device.to('cpu') +eval_cpu = evaluation_device.to("cpu") target = dataset.tensors[1] test_target = target[test_indices] @@ -95,64 +98,89 @@ eval_cpu_si = eval_cpu * target_stds + target_means target_mu = np.copy(target_si) eval_cpu_mu = np.copy(eval_cpu_si) -target_mu[:,2] -= c*target_time -eval_cpu_mu[:,2] -= c*target_time -target_mu[:,:3] *= 1e6 -eval_cpu_mu[:,:3] *= 1e6 +target_mu[:, 2] -= c * target_time +eval_cpu_mu[:, 2] -= c * target_time +target_mu[:, :3] *= 1e6 +eval_cpu_mu[:, :3] *= 1e6 - -loss_tensor = torch.sum(loss_fun(eval_cpu, - test_target, - reduction='none'), - axis=1)/6 +loss_tensor = torch.sum(loss_fun(eval_cpu, test_target, reduction="none"), axis=1) / 6 loss_array = loss_tensor.detach().numpy() tinds = np.nonzero(loss_array > 0.0)[0] skip = 10 plt.figure() -fig, axT = plt.subplots(3,3) -axes_label = {0:r'x [$\mu$m]', 1:r'y [$\mu$m]', 2:r'z - %.2f cm [$\mu$m]'%(c*target_time),3:r'$p_x$',4:r'$p_y$',5:r'$p_z$'} -xy_inds = [(0,1),(2,0),(2,1)] +fig, axT = plt.subplots(3, 3) +axes_label = { + 0: r"x [$\mu$m]", + 1: r"y [$\mu$m]", + 2: r"z - %.2f cm [$\mu$m]" % (c * target_time), + 3: r"$p_x$", + 4: r"$p_y$", + 5: r"$p_z$", +} +xy_inds = [(0, 1), (2, 0), (2, 1)] + + def set_axes(ax, indx, indy): - ax.scatter(target_mu[::skip,indx],target_mu[::skip,indy],s=8,c='k', label='simulation') - ax.scatter(eval_cpu_mu[::skip,indx],eval_cpu_mu[::skip,indy],marker='*',c=loss_array[::skip],s=0.02, label='surrogate',cmap='YlOrRd') + ax.scatter( + target_mu[::skip, indx], target_mu[::skip, indy], s=8, c="k", label="simulation" + ) + ax.scatter( + eval_cpu_mu[::skip, indx], + eval_cpu_mu[::skip, indy], + marker="*", + c=loss_array[::skip], + s=0.02, + label="surrogate", + cmap="YlOrRd", + ) ax.set_xlabel(axes_label[indx]) ax.set_ylabel(axes_label[indy]) # return for ii in range(3): - ax = axT[0,ii] - indx,indy = xy_inds[ii] - set_axes(ax,indx,indy) + ax = axT[0, ii] + indx, indy = xy_inds[ii] + set_axes(ax, indx, indy) for ii in range(2): - indx,indy = xy_inds[ii] - ax = axT[1,ii] - set_axes(ax,indx+3,indy+3) + indx, indy = xy_inds[ii] + ax = axT[1, ii] + set_axes(ax, indx + 3, indy + 3) for ii in range(3): - ax = axT[2,ii] + ax = axT[2, ii] indx = ii - indy = ii+3 + indy = ii + 3 set_axes(ax, indx, indy) -ax = axT[1,2] +ax = axT[1, 2] indx = 5 indy = 4 -ax.scatter(target_mu[::skip,indx],target_mu[::skip,indy],s=8,c='k', label='simulation') -evalplt = ax.scatter(eval_cpu_mu[::skip,indx],eval_cpu_mu[::skip,indy],marker='*',c=loss_array[::skip],s=2, label='surrogate',cmap='YlOrRd') +ax.scatter( + target_mu[::skip, indx], target_mu[::skip, indy], s=8, c="k", label="simulation" +) +evalplt = ax.scatter( + eval_cpu_mu[::skip, indx], + eval_cpu_mu[::skip, indy], + marker="*", + c=loss_array[::skip], + s=2, + label="surrogate", + cmap="YlOrRd", +) ax.set_xlabel(axes_label[indx]) ax.set_ylabel(axes_label[indy]) cb = plt.colorbar(evalplt, ax=ax) -cb.set_label('MSE loss') +cb.set_label("MSE loss") -fig.suptitle(f'stage {stage_i} prediction') +fig.suptitle(f"stage {stage_i} prediction") plt.tight_layout() -plt.savefig(f'{species}_model_evaluation.png') +plt.savefig(f"{species}_model_evaluation.png") diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py index 93f01e237ed..2477eaf68dd 100644 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py @@ -19,8 +19,8 @@ class PoissonSolver1D(picmi.ElectrostaticSolver): """This solver is maintained as an example of the use of Python callbacks. - However, it is not necessarily needed since the 1D code has the direct tridiagonal - solver implemented.""" + However, it is not necessarily needed since the 1D code has the direct tridiagonal + solver implemented.""" def __init__(self, grid, **kwargs): """Direct solver for the Poisson equation using superLU. This solver is @@ -32,11 +32,13 @@ def __init__(self, grid, **kwargs): """ # Sanity check that this solver is appropriate to use if not isinstance(grid, picmi.Cartesian1DGrid): - raise RuntimeError('Direct solver can only be used on a 1D grid.') + raise RuntimeError("Direct solver can only be used on a 1D grid.") super(PoissonSolver1D, self).__init__( - grid=grid, method=kwargs.pop('method', 'Multigrid'), - required_precision=1, **kwargs + grid=grid, + method=kwargs.pop("method", "Multigrid"), + required_precision=1, + **kwargs, ) def solver_initialize_inputs(self): @@ -66,7 +68,7 @@ def solver_initialize_inputs(self): self.nxguardphi = 1 self.nzguardphi = 1 - self.phi = np.zeros(self.nz + 1 + 2*self.nzguardphi) + self.phi = np.zeros(self.nz + 1 + 2 * self.nzguardphi) self.decompose_matrix() @@ -108,10 +110,8 @@ def solve(self): left_voltage = 0.0 right_voltage = eval( - self.right_voltage, { - 't': self.sim.extension.warpx.gett_new(0), - 'sin': np.sin, 'pi': np.pi - } + self.right_voltage, + {"t": self.sim.extension.warpx.gett_new(0), "sin": np.sin, "pi": np.pi}, ) # Construct b vector @@ -124,31 +124,31 @@ def solve(self): phi = self.lu.solve(b) - self.phi[self.nzguardphi:-self.nzguardphi] = phi + self.phi[self.nzguardphi : -self.nzguardphi] = phi - self.phi[:self.nzguardphi] = left_voltage - self.phi[-self.nzguardphi:] = right_voltage + self.phi[: self.nzguardphi] = left_voltage + self.phi[-self.nzguardphi :] = right_voltage class CapacitiveDischargeExample(object): - '''The following runs a simulation of a parallel plate capacitor seeded + """The following runs a simulation of a parallel plate capacitor seeded with a plasma in the spacing between the plates. A time varying voltage is applied across the capacitor. The groups of 4 values below correspond to the 4 cases simulated by Turner et al. (2013) in their benchmarks of PIC-MCC codes. - ''' + """ - gap = 0.067 # m + gap = 0.067 # m - freq = 13.56e6 # Hz - voltage = [450.0, 200.0, 150.0, 120.0] # V + freq = 13.56e6 # Hz + voltage = [450.0, 200.0, 150.0, 120.0] # V - gas_density = [9.64e20, 32.1e20, 96.4e20, 321e20] # m^-3 - gas_temp = 300.0 # K - m_ion = 6.67e-27 # kg + gas_density = [9.64e20, 32.1e20, 96.4e20, 321e20] # m^-3 + gas_temp = 300.0 # K + m_ion = 6.67e-27 # kg - plasma_density = [2.56e14, 5.12e14, 5.12e14, 3.84e14] # m^-3 - elec_temp = 30000.0 # K + plasma_density = [2.56e14, 5.12e14, 5.12e14, 3.84e14] # m^-3 + elec_temp = 30000.0 # K seed_nppc = 16 * np.array([32, 16, 8, 4]) @@ -206,10 +206,10 @@ def setup_run(self): warpx_max_grid_size=128, lower_bound=[0], upper_bound=[self.gap], - lower_boundary_conditions=['dirichlet'], - upper_boundary_conditions=['dirichlet'], - lower_boundary_conditions_particles=['absorbing'], - upper_boundary_conditions_particles=['absorbing'], + lower_boundary_conditions=["dirichlet"], + upper_boundary_conditions=["dirichlet"], + lower_boundary_conditions_particles=["absorbing"], + upper_boundary_conditions_particles=["absorbing"], warpx_potential_hi_z=self.voltage, ) @@ -228,85 +228,93 @@ def setup_run(self): ####################################################################### self.electrons = picmi.Species( - particle_type='electron', name='electrons', + particle_type="electron", + name="electrons", initial_distribution=picmi.UniformDistribution( density=self.plasma_density, - rms_velocity=[np.sqrt(constants.kb * self.elec_temp / constants.m_e)]*3, - ) + rms_velocity=[np.sqrt(constants.kb * self.elec_temp / constants.m_e)] + * 3, + ), ) self.ions = picmi.Species( - particle_type='He', name='he_ions', - charge='q_e', mass=self.m_ion, + particle_type="He", + name="he_ions", + charge="q_e", + mass=self.m_ion, initial_distribution=picmi.UniformDistribution( density=self.plasma_density, - rms_velocity=[np.sqrt(constants.kb * self.gas_temp / self.m_ion)]*3, - ) + rms_velocity=[np.sqrt(constants.kb * self.gas_temp / self.m_ion)] * 3, + ), ) if self.dsmc: self.neutrals = picmi.Species( - particle_type='He', name='neutrals', - charge=0, mass=self.m_ion, + particle_type="He", + name="neutrals", + charge=0, + mass=self.m_ion, warpx_reflection_model_zlo=1.0, warpx_reflection_model_zhi=1.0, warpx_do_resampling=True, - warpx_resampling_trigger_max_avg_ppc=int(self.seed_nppc*1.5), + warpx_resampling_trigger_max_avg_ppc=int(self.seed_nppc * 1.5), initial_distribution=picmi.UniformDistribution( density=self.gas_density, - rms_velocity=[np.sqrt(constants.kb * self.gas_temp / self.m_ion)]*3, - ) + rms_velocity=[np.sqrt(constants.kb * self.gas_temp / self.m_ion)] + * 3, + ), ) ####################################################################### # Collision initialization # ####################################################################### - cross_sec_direc = '../../../../warpx-data/MCC_cross_sections/He/' + cross_sec_direc = "../../../../warpx-data/MCC_cross_sections/He/" electron_colls = picmi.MCCCollisions( - name='coll_elec', + name="coll_elec", species=self.electrons, background_density=self.gas_density, background_temperature=self.gas_temp, background_mass=self.ions.mass, ndt=self.mcc_subcycling_steps, scattering_processes={ - 'elastic' : { - 'cross_section' : cross_sec_direc+'electron_scattering.dat' + "elastic": { + "cross_section": cross_sec_direc + "electron_scattering.dat" }, - 'excitation1' : { - 'cross_section': cross_sec_direc+'excitation_1.dat', - 'energy' : 19.82 + "excitation1": { + "cross_section": cross_sec_direc + "excitation_1.dat", + "energy": 19.82, }, - 'excitation2' : { - 'cross_section': cross_sec_direc+'excitation_2.dat', - 'energy' : 20.61 + "excitation2": { + "cross_section": cross_sec_direc + "excitation_2.dat", + "energy": 20.61, }, - 'ionization' : { - 'cross_section' : cross_sec_direc+'ionization.dat', - 'energy' : 24.55, - 'species' : self.ions + "ionization": { + "cross_section": cross_sec_direc + "ionization.dat", + "energy": 24.55, + "species": self.ions, }, - } + }, ) - ion_scattering_processes={ - 'elastic': {'cross_section': cross_sec_direc+'ion_scattering.dat'}, - 'back': {'cross_section': cross_sec_direc+'ion_back_scatter.dat'}, + ion_scattering_processes = { + "elastic": {"cross_section": cross_sec_direc + "ion_scattering.dat"}, + "back": {"cross_section": cross_sec_direc + "ion_back_scatter.dat"}, # 'charge_exchange': {'cross_section': cross_sec_direc+'charge_exchange.dat'} } if self.dsmc: ion_colls = picmi.DSMCCollisions( - name='coll_ion', + name="coll_ion", species=[self.ions, self.neutrals], - ndt=5, scattering_processes=ion_scattering_processes + ndt=5, + scattering_processes=ion_scattering_processes, ) else: ion_colls = picmi.MCCCollisions( - name='coll_ion', + name="coll_ion", species=self.ions, background_density=self.gas_density, background_temperature=self.gas_temp, ndt=self.mcc_subcycling_steps, - scattering_processes=ion_scattering_processes + scattering_processes=ion_scattering_processes, ) ####################################################################### @@ -318,28 +326,28 @@ def setup_run(self): time_step_size=self.dt, max_steps=self.max_steps, warpx_collisions=[electron_colls, ion_colls], - verbose=self.test + verbose=self.test, ) self.solver.sim = self.sim self.sim.add_species( self.electrons, - layout = picmi.GriddedLayout( + layout=picmi.GriddedLayout( n_macroparticle_per_cell=[self.seed_nppc], grid=self.grid - ) + ), ) self.sim.add_species( self.ions, - layout = picmi.GriddedLayout( + layout=picmi.GriddedLayout( n_macroparticle_per_cell=[self.seed_nppc], grid=self.grid - ) + ), ) if self.dsmc: self.sim.add_species( self.neutrals, - layout = picmi.GriddedLayout( - n_macroparticle_per_cell=[self.seed_nppc//2], grid=self.grid - ) + layout=picmi.GriddedLayout( + n_macroparticle_per_cell=[self.seed_nppc // 2], grid=self.grid + ), ) self.solver.sim_ext = self.sim.extension @@ -352,30 +360,30 @@ def setup_run(self): ####################################################################### if self.dsmc: - file_prefix = 'Python_dsmc_1d_plt' + file_prefix = "Python_dsmc_1d_plt" else: if self.pythonsolver: - file_prefix = 'Python_background_mcc_1d_plt' + file_prefix = "Python_background_mcc_1d_plt" else: - file_prefix = 'Python_background_mcc_1d_tridiag_plt' + file_prefix = "Python_background_mcc_1d_tridiag_plt" species = [self.electrons, self.ions] if self.dsmc: species.append(self.neutrals) particle_diag = picmi.ParticleDiagnostic( species=species, - name='diag1', + name="diag1", period=0, - write_dir='.', - warpx_file_prefix=file_prefix + write_dir=".", + warpx_file_prefix=file_prefix, ) field_diag = picmi.FieldDiagnostic( - name='diag1', + name="diag1", grid=self.grid, period=0, - data_list=['rho_electrons', 'rho_he_ions'], - write_dir='.', - warpx_file_prefix=file_prefix + data_list=["rho_electrons", "rho_he_ions"], + write_dir=".", + warpx_file_prefix=file_prefix, ) self.sim.add_diagnostic(particle_diag) self.sim.add_diagnostic(field_diag) @@ -388,7 +396,7 @@ def rethermalize_neutrals(self): if step % 1000 != 10: return - if not hasattr(self, 'neutral_cont'): + if not hasattr(self, "neutral_cont"): self.neutral_cont = particle_containers.ParticleContainerWrapper( self.neutrals.name ) @@ -408,14 +416,13 @@ def rethermalize_neutrals(self): def _get_rho_ions(self): # deposit the ion density in rho_fp - he_ions_wrapper = particle_containers.ParticleContainerWrapper('he_ions') + he_ions_wrapper = particle_containers.ParticleContainerWrapper("he_ions") he_ions_wrapper.deposit_charge_density(level=0) rho_data = self.rho_wrapper[...] self.ion_density_array += rho_data / constants.q_e / self.diag_steps def run_sim(self): - self.sim.step(self.max_steps - self.diag_steps) self.rho_wrapper = fields.RhoFPWrapper(0, False) @@ -425,15 +432,15 @@ def run_sim(self): if self.pythonsolver: # confirm that the external solver was run - assert hasattr(self.solver, 'phi') + assert hasattr(self.solver, "phi") if libwarpx.amr.ParallelDescriptor.MyProc() == 0: - np.save(f'ion_density_case_{self.n+1}.npy', self.ion_density_array) + np.save(f"ion_density_case_{self.n+1}.npy", self.ion_density_array) # query the particle z-coordinates if this is run during CI testing # to cover that functionality if self.test: - he_ions_wrapper = particle_containers.ParticleContainerWrapper('he_ions') + he_ions_wrapper = particle_containers.ParticleContainerWrapper("he_ions") nparts = he_ions_wrapper.get_particle_count(local=True) z_coords = np.concatenate(he_ions_wrapper.zp) assert len(z_coords) == nparts @@ -446,28 +453,31 @@ def run_sim(self): parser = argparse.ArgumentParser() parser.add_argument( - '-t', '--test', help='toggle whether this script is run as a short CI test', - action='store_true', + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", ) parser.add_argument( - '-n', help='Test number to run (1 to 4)', required=False, type=int, - default=1 + "-n", help="Test number to run (1 to 4)", required=False, type=int, default=1 ) parser.add_argument( - '--pythonsolver', help='toggle whether to use the Python level solver', - action='store_true' + "--pythonsolver", + help="toggle whether to use the Python level solver", + action="store_true", ) parser.add_argument( - '--dsmc', help='toggle whether to use DSMC for ions in place of MCC', - action='store_true' + "--dsmc", + help="toggle whether to use DSMC for ions in place of MCC", + action="store_true", ) args, left = parser.parse_known_args() -sys.argv = sys.argv[:1]+left +sys.argv = sys.argv[:1] + left if args.n < 1 or args.n > 4: - raise AttributeError('Test number must be an integer from 1 to 4.') + raise AttributeError("Test number must be an integer from 1 to 4.") run = CapacitiveDischargeExample( - n=args.n-1, test=args.test, pythonsolver=args.pythonsolver, dsmc=args.dsmc + n=args.n - 1, test=args.test, pythonsolver=args.pythonsolver, dsmc=args.dsmc ) run.run_sim() diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py index 65baabba605..094a9cc8881 100755 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py +++ b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py @@ -17,19 +17,19 @@ # physics parameters ########################## -D_CA = 0.067 # m +D_CA = 0.067 # m -N_INERT = 9.64e20 # m^-3 -T_INERT = 300.0 # K +N_INERT = 9.64e20 # m^-3 +T_INERT = 300.0 # K -FREQ = 13.56e6 # Hz +FREQ = 13.56e6 # Hz VOLTAGE = 450.0 -M_ION = 6.67e-27 # kg +M_ION = 6.67e-27 # kg -PLASMA_DENSITY = 2.56e14 # m^-3 -T_ELEC = 30000.0 # K +PLASMA_DENSITY = 2.56e14 # m^-3 +T_ELEC = 30000.0 # K DT = 1.0 / (400 * FREQ) @@ -57,8 +57,8 @@ # using superLU decomposition ############################# -class PoissonSolverPseudo1D(picmi.ElectrostaticSolver): +class PoissonSolverPseudo1D(picmi.ElectrostaticSolver): def __init__(self, grid, **kwargs): """Direct solver for the Poisson equation using superLU. This solver is useful for pseudo 1D cases i.e. diode simulations with small x extent. @@ -68,16 +68,17 @@ def __init__(self, grid, **kwargs): solver will be installed. """ super(PoissonSolverPseudo1D, self).__init__( - grid=grid, method=kwargs.pop('method', 'Multigrid'), - required_precision=1, **kwargs + grid=grid, + method=kwargs.pop("method", "Multigrid"), + required_precision=1, + **kwargs, ) self.rho_wrapper = None self.phi_wrapper = None self.time_sum = 0.0 def solver_initialize_inputs(self): - """Grab geometrical quantities from the grid. - """ + """Grab geometrical quantities from the grid.""" self.right_voltage = self.grid.potential_xmax # set WarpX boundary potentials to None since we will handle it @@ -97,7 +98,7 @@ def solver_initialize_inputs(self): self.dz = (self.grid.upper_bound[1] - self.grid.lower_bound[1]) / self.nz if not np.isclose(self.dx, self.dz): - raise RuntimeError('Direct solver requires dx = dz.') + raise RuntimeError("Direct solver requires dx = dz.") self.nxguardrho = 2 self.nzguardrho = 2 @@ -105,8 +106,7 @@ def solver_initialize_inputs(self): self.nzguardphi = 1 self.phi = np.zeros( - (self.nx + 1 + 2*self.nxguardphi, - self.nz + 1 + 2*self.nzguardphi) + (self.nx + 1 + 2 * self.nxguardphi, self.nz + 1 + 2 * self.nzguardphi) ) self.decompose_matrix() @@ -120,24 +120,22 @@ def decompose_matrix(self): self.nzsolve = self.nz + 3 # Set up the computation matrix in order to solve A*phi = rho - A = np.zeros( - (self.nzsolve*self.nxsolve, self.nzsolve*self.nxsolve) - ) + A = np.zeros((self.nzsolve * self.nxsolve, self.nzsolve * self.nxsolve)) kk = 0 for ii in range(self.nxsolve): for jj in range(self.nzsolve): temp = np.zeros((self.nxsolve, self.nzsolve)) if ii == 0 or ii == self.nxsolve - 1: - temp[ii, jj] = 1. + temp[ii, jj] = 1.0 elif ii == 1: temp[ii, jj] = -2.0 - temp[ii-1, jj] = 1.0 - temp[ii+1, jj] = 1.0 + temp[ii - 1, jj] = 1.0 + temp[ii + 1, jj] = 1.0 elif ii == self.nxsolve - 2: temp[ii, jj] = -2.0 - temp[ii+1, jj] = 1.0 - temp[ii-1, jj] = 1.0 + temp[ii + 1, jj] = 1.0 + temp[ii - 1, jj] = 1.0 elif jj == 0: temp[ii, jj] = 1.0 temp[ii, -3] = -1.0 @@ -146,10 +144,10 @@ def decompose_matrix(self): temp[ii, 2] = -1.0 else: temp[ii, jj] = -4.0 - temp[ii, jj+1] = 1.0 - temp[ii, jj-1] = 1.0 - temp[ii-1, jj] = 1.0 - temp[ii+1, jj] = 1.0 + temp[ii, jj + 1] = 1.0 + temp[ii, jj - 1] = 1.0 + temp[ii - 1, jj] = 1.0 + temp[ii + 1, jj] = 1.0 A[kk] = temp.flatten() kk += 1 @@ -177,18 +175,21 @@ def solve(self): calculating phi from rho.""" right_voltage = eval( self.right_voltage, - {'t': sim.extension.warpx.gett_new(0), 'sin': np.sin, 'pi': np.pi} + {"t": sim.extension.warpx.gett_new(0), "sin": np.sin, "pi": np.pi}, ) left_voltage = 0.0 - rho = -self.rho_data[ - self.nxguardrho:-self.nxguardrho, self.nzguardrho:-self.nzguardrho - ] / constants.ep0 + rho = ( + -self.rho_data[ + self.nxguardrho : -self.nxguardrho, self.nzguardrho : -self.nzguardrho + ] + / constants.ep0 + ) # Construct b vector nx, nz = np.shape(rho) - source = np.zeros((nx, nz+2), dtype=np.float32) - source[:,1:-1] = rho * self.dx**2 + source = np.zeros((nx, nz + 2), dtype=np.float32) + source[:, 1:-1] = rho * self.dx**2 source[0] = left_voltage source[-1] = right_voltage @@ -197,16 +198,17 @@ def solve(self): b = source.flatten() flat_phi = self.lu.solve(b) - self.phi[self.nxguardphi:-self.nxguardphi] = ( - flat_phi.reshape(np.shape(source)) + self.phi[self.nxguardphi : -self.nxguardphi] = flat_phi.reshape( + np.shape(source) ) - self.phi[:self.nxguardphi] = left_voltage - self.phi[-self.nxguardphi:] = right_voltage + self.phi[: self.nxguardphi] = left_voltage + self.phi[-self.nxguardphi :] = right_voltage # the electrostatic solver in WarpX keeps the ghost cell values as 0 - self.phi[:,:self.nzguardphi] = 0 - self.phi[:,-self.nzguardphi:] = 0 + self.phi[:, : self.nzguardphi] = 0 + self.phi[:, -self.nzguardphi :] = 0 + ########################## # physics components @@ -216,73 +218,67 @@ def solve(self): v_rms_ion = np.sqrt(constants.kb * T_INERT / M_ION) uniform_plasma_elec = picmi.UniformDistribution( - density = PLASMA_DENSITY, - upper_bound = [None] * 3, - rms_velocity = [v_rms_elec] * 3, - directed_velocity = [0.] * 3 + density=PLASMA_DENSITY, + upper_bound=[None] * 3, + rms_velocity=[v_rms_elec] * 3, + directed_velocity=[0.0] * 3, ) uniform_plasma_ion = picmi.UniformDistribution( - density = PLASMA_DENSITY, - upper_bound = [None] * 3, - rms_velocity = [v_rms_ion] * 3, - directed_velocity = [0.] * 3 + density=PLASMA_DENSITY, + upper_bound=[None] * 3, + rms_velocity=[v_rms_ion] * 3, + directed_velocity=[0.0] * 3, ) electrons = picmi.Species( - particle_type='electron', name='electrons', - initial_distribution=uniform_plasma_elec + particle_type="electron", name="electrons", initial_distribution=uniform_plasma_elec ) ions = picmi.Species( - particle_type='He', name='he_ions', - charge='q_e', - initial_distribution=uniform_plasma_ion + particle_type="He", + name="he_ions", + charge="q_e", + initial_distribution=uniform_plasma_ion, ) # MCC collisions -cross_sec_direc = '../../../../warpx-data/MCC_cross_sections/He/' +cross_sec_direc = "../../../../warpx-data/MCC_cross_sections/He/" mcc_electrons = picmi.MCCCollisions( - name='coll_elec', + name="coll_elec", species=electrons, background_density=N_INERT, background_temperature=T_INERT, background_mass=ions.mass, scattering_processes={ - 'elastic' : { - 'cross_section' : cross_sec_direc+'electron_scattering.dat' - }, - 'excitation1' : { - 'cross_section': cross_sec_direc+'excitation_1.dat', - 'energy' : 19.82 + "elastic": {"cross_section": cross_sec_direc + "electron_scattering.dat"}, + "excitation1": { + "cross_section": cross_sec_direc + "excitation_1.dat", + "energy": 19.82, }, - 'excitation2' : { - 'cross_section': cross_sec_direc+'excitation_2.dat', - 'energy' : 20.61 + "excitation2": { + "cross_section": cross_sec_direc + "excitation_2.dat", + "energy": 20.61, }, - 'ionization' : { - 'cross_section' : cross_sec_direc+'ionization.dat', - 'energy' : 24.55, - 'species' : ions + "ionization": { + "cross_section": cross_sec_direc + "ionization.dat", + "energy": 24.55, + "species": ions, }, - } + }, ) mcc_ions = picmi.MCCCollisions( - name='coll_ion', + name="coll_ion", species=ions, background_density=N_INERT, background_temperature=T_INERT, scattering_processes={ - 'elastic' : { - 'cross_section' : cross_sec_direc+'ion_scattering.dat' - }, - 'back' : { - 'cross_section' : cross_sec_direc+'ion_back_scatter.dat' - }, + "elastic": {"cross_section": cross_sec_direc + "ion_scattering.dat"}, + "back": {"cross_section": cross_sec_direc + "ion_back_scatter.dat"}, # 'charge_exchange' : { # 'cross_section' : cross_sec_direc+'charge_exchange.dat' # } - } + }, ) ########################## @@ -290,17 +286,17 @@ def solve(self): ########################## grid = picmi.Cartesian2DGrid( - number_of_cells = [nx, ny], + number_of_cells=[nx, ny], warpx_max_grid_size=128, - lower_bound = [xmin, ymin], - upper_bound = [xmax, ymax], - bc_xmin = 'dirichlet', - bc_xmax = 'dirichlet', - bc_ymin = 'periodic', - bc_ymax = 'periodic', - warpx_potential_hi_x = "%.1f*sin(2*pi*%.5e*t)" % (VOLTAGE, FREQ), - lower_boundary_conditions_particles=['absorbing', 'periodic'], - upper_boundary_conditions_particles=['absorbing', 'periodic'] + lower_bound=[xmin, ymin], + upper_bound=[xmax, ymax], + bc_xmin="dirichlet", + bc_xmax="dirichlet", + bc_ymin="periodic", + bc_ymax="periodic", + warpx_potential_hi_x="%.1f*sin(2*pi*%.5e*t)" % (VOLTAGE, FREQ), + lower_boundary_conditions_particles=["absorbing", "periodic"], + upper_boundary_conditions_particles=["absorbing", "periodic"], ) # solver = picmi.ElectrostaticSolver( @@ -313,18 +309,18 @@ def solve(self): ########################## particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = diagnostic_intervals, - write_dir = '.', - warpx_file_prefix = 'Python_background_mcc_plt' + name="diag1", + period=diagnostic_intervals, + write_dir=".", + warpx_file_prefix="Python_background_mcc_plt", ) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = diagnostic_intervals, - data_list = ['rho_electrons', 'rho_he_ions'], - write_dir = '.', - warpx_file_prefix = 'Python_background_mcc_plt' + name="diag1", + grid=grid, + period=diagnostic_intervals, + data_list=["rho_electrons", "rho_he_ions"], + write_dir=".", + warpx_file_prefix="Python_background_mcc_plt", ) ########################## @@ -332,23 +328,23 @@ def solve(self): ########################## sim = picmi.Simulation( - solver = solver, - time_step_size = DT, - max_steps = max_steps, - warpx_collisions=[mcc_electrons, mcc_ions] + solver=solver, + time_step_size=DT, + max_steps=max_steps, + warpx_collisions=[mcc_electrons, mcc_ions], ) sim.add_species( electrons, - layout = picmi.GriddedLayout( + layout=picmi.GriddedLayout( n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid - ) + ), ) sim.add_species( ions, - layout = picmi.GriddedLayout( + layout=picmi.GriddedLayout( n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid - ) + ), ) sim.add_diagnostic(particle_diag) @@ -361,4 +357,4 @@ def solve(self): sim.step(max_steps) # confirm that the external solver was run -assert hasattr(solver, 'phi') +assert hasattr(solver, "phi") diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_1d.py b/Examples/Physics_applications/capacitive_discharge/analysis_1d.py index 29b5272d8b1..82d98c38210 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_1d.py @@ -4,6 +4,7 @@ import numpy as np +# fmt: off ref_density = np.array([ 1.27989677e+14, 2.23601330e+14, 2.55400265e+14, 2.55664972e+14, 2.55806841e+14, 2.55806052e+14, 2.55815865e+14, 2.55755151e+14, @@ -39,7 +40,8 @@ 2.56041610e+14, 2.56041551e+14, 2.56088641e+14, 2.23853646e+14, 1.27580207e+14 ]) +# fmt: on -density_data = np.load( 'ion_density_case_1.npy' ) +density_data = np.load("ion_density_case_1.npy") print(repr(density_data)) assert np.allclose(density_data, ref_density) diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py b/Examples/Physics_applications/capacitive_discharge/analysis_2d.py index 472758ec63b..21f5c7714c4 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_2d.py @@ -9,11 +9,10 @@ import sys -sys.path.append('../../../../warpx/Regression/Checksum/') +sys.path.append("../../../../warpx/Regression/Checksum/") import checksumAPI my_check = checksumAPI.evaluate_checksum( - 'background_mcc', 'Python_background_mcc_plt000050', - do_particles=True, rtol=5e-3 + "background_mcc", "Python_background_mcc_plt000050", do_particles=True, rtol=5e-3 ) diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py b/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py index 88d55efa0c9..505521fc1ca 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py @@ -7,7 +7,7 @@ import numpy as np -sys.path.append('../../../../warpx/Regression/Checksum/') +sys.path.append("../../../../warpx/Regression/Checksum/") import checksumAPI @@ -17,6 +17,7 @@ my_check = checksumAPI.evaluate_checksum(test_name, fn, do_particles=True) +# fmt: off ref_density = np.array([ 1.27942709e+14, 2.23579371e+14, 2.55384387e+14, 2.55660663e+14, 2.55830911e+14, 2.55814337e+14, 2.55798906e+14, 2.55744891e+14, @@ -52,7 +53,8 @@ 2.56611124e+14, 2.56344324e+14, 2.56244156e+14, 2.24183727e+14, 1.27909856e+14 ]) +# fmt: on -density_data = np.load( 'ion_density_case_1.npy' ) +density_data = np.load("ion_density_case_1.npy") print(repr(density_data)) assert np.allclose(density_data, ref_density) diff --git a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_1d.py b/Examples/Physics_applications/laser_acceleration/PICMI_inputs_1d.py index d8bdddfaca6..328817c7b49 100755 --- a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_1d.py +++ b/Examples/Physics_applications/laser_acceleration/PICMI_inputs_1d.py @@ -14,7 +14,7 @@ # Physical domain zmin = -56e-06 -zmax = 12e-06 +zmax = 12e-06 # Domain decomposition max_grid_size = 64 @@ -22,16 +22,17 @@ # Create grid grid = picmi.Cartesian1DGrid( - number_of_cells = [nz], - lower_bound = [zmin], - upper_bound = [zmax], - lower_boundary_conditions = ['dirichlet'], - upper_boundary_conditions = ['dirichlet'], - lower_boundary_conditions_particles = ['absorbing'], - upper_boundary_conditions_particles = ['absorbing'], - moving_window_velocity = [c], - warpx_max_grid_size = max_grid_size, - warpx_blocking_factor = blocking_factor) + number_of_cells=[nz], + lower_bound=[zmin], + upper_bound=[zmax], + lower_boundary_conditions=["dirichlet"], + upper_boundary_conditions=["dirichlet"], + lower_boundary_conditions_particles=["absorbing"], + upper_boundary_conditions_particles=["absorbing"], + moving_window_velocity=[c], + warpx_max_grid_size=max_grid_size, + warpx_blocking_factor=blocking_factor, +) # Particles: plasma electrons plasma_density = 2e23 @@ -42,82 +43,82 @@ plasma_ymax = None plasma_zmax = None uniform_distribution = picmi.UniformDistribution( - density = plasma_density, - lower_bound = [plasma_xmin, plasma_ymin, plasma_zmin], - upper_bound = [plasma_xmax, plasma_ymax, plasma_zmax], - fill_in = True) + density=plasma_density, + lower_bound=[plasma_xmin, plasma_ymin, plasma_zmin], + upper_bound=[plasma_xmax, plasma_ymax, plasma_zmax], + fill_in=True, +) electrons = picmi.Species( - particle_type = 'electron', - name = 'electrons', - initial_distribution = uniform_distribution) + particle_type="electron", + name="electrons", + initial_distribution=uniform_distribution, +) # Laser e_max = 16e12 position_z = 9e-06 -profile_t_peak = 30.e-15 +profile_t_peak = 30.0e-15 profile_focal_distance = 100e-06 laser = picmi.GaussianLaser( - wavelength = 0.8e-06, - waist = 5e-06, - duration = 15e-15, - focal_position = [0, 0, profile_focal_distance + position_z], - centroid_position = [0, 0, position_z - c*profile_t_peak], - propagation_direction = [0, 0, 1], - polarization_direction = [0, 1, 0], - E0 = e_max, - fill_in = False) + wavelength=0.8e-06, + waist=5e-06, + duration=15e-15, + focal_position=[0, 0, profile_focal_distance + position_z], + centroid_position=[0, 0, position_z - c * profile_t_peak], + propagation_direction=[0, 0, 1], + polarization_direction=[0, 1, 0], + E0=e_max, + fill_in=False, +) laser_antenna = picmi.LaserAntenna( - position = [0., 0., position_z], - normal_vector = [0, 0, 1]) + position=[0.0, 0.0, position_z], normal_vector=[0, 0, 1] +) # Electromagnetic solver -solver = picmi.ElectromagneticSolver( - grid = grid, - method = 'Yee', - cfl = 0.9, - divE_cleaning = 0) +solver = picmi.ElectromagneticSolver(grid=grid, method="Yee", cfl=0.9, divE_cleaning=0) # Diagnostics -diag_field_list = ['B', 'E', 'J', 'rho'] +diag_field_list = ["B", "E", "J", "rho"] particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 100, - write_dir = '.', - warpx_file_prefix = 'Python_LaserAcceleration_1d_plt') + name="diag1", + period=100, + write_dir=".", + warpx_file_prefix="Python_LaserAcceleration_1d_plt", +) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = 100, - data_list = diag_field_list, - write_dir = '.', - warpx_file_prefix = 'Python_LaserAcceleration_1d_plt') + name="diag1", + grid=grid, + period=100, + data_list=diag_field_list, + write_dir=".", + warpx_file_prefix="Python_LaserAcceleration_1d_plt", +) # Set up simulation sim = picmi.Simulation( - solver = solver, - max_steps = max_steps, - verbose = 1, - particle_shape = 'cubic', - warpx_use_filter = 1, - warpx_serialize_initial_conditions = 1, - warpx_do_dynamic_scheduling = 0) + solver=solver, + max_steps=max_steps, + verbose=1, + particle_shape="cubic", + warpx_use_filter=1, + warpx_serialize_initial_conditions=1, + warpx_do_dynamic_scheduling=0, +) # Add plasma electrons sim.add_species( - electrons, - layout = picmi.GriddedLayout(grid = grid, n_macroparticle_per_cell = [10])) + electrons, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[10]) +) # Add laser -sim.add_laser( - laser, - injection_method = laser_antenna) +sim.add_laser(laser, injection_method=laser_antenna) # Add diagnostics sim.add_diagnostic(particle_diag) sim.add_diagnostic(field_diag) # Write input file that can be used to run with the compiled version -sim.write_input_file(file_name = 'inputs_1d_picmi') +sim.write_input_file(file_name="inputs_1d_picmi") # Initialize inputs and WarpX instance sim.initialize_inputs() diff --git a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_2d.py b/Examples/Physics_applications/laser_acceleration/PICMI_inputs_2d.py index b50e16bfc0a..5e961fea826 100755 --- a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_2d.py +++ b/Examples/Physics_applications/laser_acceleration/PICMI_inputs_2d.py @@ -15,11 +15,11 @@ # Physical domain xmin = -30e-06 -xmax = 30e-06 +xmax = 30e-06 zmin = -56e-06 -zmax = 12e-06 +zmax = 12e-06 xmin_refined = -5e-06 -xmax_refined = 5e-06 +xmax_refined = 5e-06 zmin_refined = -35e-06 zmax_refined = -25e-06 @@ -29,17 +29,18 @@ # Create grid grid = picmi.Cartesian2DGrid( - number_of_cells = [nx, nz], - lower_bound = [xmin, zmin], - upper_bound = [xmax, zmax], - lower_boundary_conditions = ['open', 'open'], - upper_boundary_conditions = ['open', 'open'], - lower_boundary_conditions_particles = ['absorbing', 'absorbing'], - upper_boundary_conditions_particles = ['absorbing', 'absorbing'], - moving_window_velocity = [0., c], - warpx_max_grid_size = max_grid_size, - warpx_blocking_factor = blocking_factor, - refined_regions = [[1, [xmin_refined, zmin_refined], [xmax_refined, zmax_refined]]]) + number_of_cells=[nx, nz], + lower_bound=[xmin, zmin], + upper_bound=[xmax, zmax], + lower_boundary_conditions=["open", "open"], + upper_boundary_conditions=["open", "open"], + lower_boundary_conditions_particles=["absorbing", "absorbing"], + upper_boundary_conditions_particles=["absorbing", "absorbing"], + moving_window_velocity=[0.0, c], + warpx_max_grid_size=max_grid_size, + warpx_blocking_factor=blocking_factor, + refined_regions=[[1, [xmin_refined, zmin_refined], [xmax_refined, zmax_refined]]], +) # Particles: plasma electrons plasma_density = 2e23 @@ -50,111 +51,111 @@ plasma_ymax = None plasma_zmax = None uniform_distribution = picmi.UniformDistribution( - density = plasma_density, - lower_bound = [plasma_xmin, plasma_ymin, plasma_zmin], - upper_bound = [plasma_xmax, plasma_ymax, plasma_zmax], - fill_in = True) + density=plasma_density, + lower_bound=[plasma_xmin, plasma_ymin, plasma_zmin], + upper_bound=[plasma_xmax, plasma_ymax, plasma_zmax], + fill_in=True, +) electrons = picmi.Species( - particle_type = 'electron', - name = 'electrons', - initial_distribution = uniform_distribution) + particle_type="electron", + name="electrons", + initial_distribution=uniform_distribution, +) # Particles: beam electrons q_tot = 1e-12 -x_m = 0. -y_m = 0. +x_m = 0.0 +y_m = 0.0 z_m = -28e-06 x_rms = 0.5e-06 y_rms = 0.5e-06 z_rms = 0.5e-06 -ux_m = 0. -uy_m = 0. -uz_m = 500. -ux_th = 2. -uy_th = 2. -uz_th = 50. +ux_m = 0.0 +uy_m = 0.0 +uz_m = 500.0 +ux_th = 2.0 +uy_th = 2.0 +uz_th = 50.0 gaussian_bunch_distribution = picmi.GaussianBunchDistribution( - n_physical_particles = q_tot / q_e, - rms_bunch_size = [x_rms, y_rms, z_rms], - rms_velocity = [c*ux_th, c*uy_th, c*uz_th], - centroid_position = [x_m, y_m, z_m], - centroid_velocity = [c*ux_m, c*uy_m, c*uz_m]) + n_physical_particles=q_tot / q_e, + rms_bunch_size=[x_rms, y_rms, z_rms], + rms_velocity=[c * ux_th, c * uy_th, c * uz_th], + centroid_position=[x_m, y_m, z_m], + centroid_velocity=[c * ux_m, c * uy_m, c * uz_m], +) beam = picmi.Species( - particle_type = 'electron', - name = 'beam', - initial_distribution = gaussian_bunch_distribution) + particle_type="electron", + name="beam", + initial_distribution=gaussian_bunch_distribution, +) # Laser e_max = 16e12 position_z = 9e-06 -profile_t_peak = 30.e-15 +profile_t_peak = 30.0e-15 profile_focal_distance = 100e-06 laser = picmi.GaussianLaser( - wavelength = 0.8e-06, - waist = 5e-06, - duration = 15e-15, - focal_position = [0, 0, profile_focal_distance + position_z], - centroid_position = [0, 0, position_z - c*profile_t_peak], - propagation_direction = [0, 0, 1], - polarization_direction = [0, 1, 0], - E0 = e_max, - fill_in = False) + wavelength=0.8e-06, + waist=5e-06, + duration=15e-15, + focal_position=[0, 0, profile_focal_distance + position_z], + centroid_position=[0, 0, position_z - c * profile_t_peak], + propagation_direction=[0, 0, 1], + polarization_direction=[0, 1, 0], + E0=e_max, + fill_in=False, +) laser_antenna = picmi.LaserAntenna( - position = [0., 0., position_z], - normal_vector = [0, 0, 1]) + position=[0.0, 0.0, position_z], normal_vector=[0, 0, 1] +) # Electromagnetic solver -solver = picmi.ElectromagneticSolver( - grid = grid, - method = 'Yee', - cfl = 1., - divE_cleaning = 0) +solver = picmi.ElectromagneticSolver(grid=grid, method="Yee", cfl=1.0, divE_cleaning=0) # Diagnostics -diag_field_list = ['B', 'E', 'J', 'rho'] +diag_field_list = ["B", "E", "J", "rho"] particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 200, - write_dir = '.', - warpx_file_prefix = 'Python_LaserAccelerationMR_plt') + name="diag1", + period=200, + write_dir=".", + warpx_file_prefix="Python_LaserAccelerationMR_plt", +) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = 200, - data_list = diag_field_list, - write_dir = '.', - warpx_file_prefix = 'Python_LaserAccelerationMR_plt') + name="diag1", + grid=grid, + period=200, + data_list=diag_field_list, + write_dir=".", + warpx_file_prefix="Python_LaserAccelerationMR_plt", +) # Set up simulation sim = picmi.Simulation( - solver = solver, - max_steps = max_steps, - verbose = 1, - particle_shape = 'cubic', - warpx_use_filter = 1, - warpx_serialize_initial_conditions = 1) + solver=solver, + max_steps=max_steps, + verbose=1, + particle_shape="cubic", + warpx_use_filter=1, + warpx_serialize_initial_conditions=1, +) # Add plasma electrons sim.add_species( - electrons, - layout = picmi.GriddedLayout(grid = grid, n_macroparticle_per_cell = [1, 1, 1])) + electrons, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[1, 1, 1]) +) # Add beam electrons -sim.add_species( - beam, - layout = picmi.PseudoRandomLayout(grid = grid, n_macroparticles = 100)) +sim.add_species(beam, layout=picmi.PseudoRandomLayout(grid=grid, n_macroparticles=100)) # Add laser -sim.add_laser( - laser, - injection_method = laser_antenna) +sim.add_laser(laser, injection_method=laser_antenna) # Add diagnostics sim.add_diagnostic(particle_diag) sim.add_diagnostic(field_diag) # Write input file that can be used to run with the compiled version -sim.write_input_file(file_name = 'inputs_2d_picmi') +sim.write_input_file(file_name="inputs_2d_picmi") # Initialize inputs and WarpX instance sim.initialize_inputs() diff --git a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py b/Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py index 13bf492e203..4a736b7cc2b 100755 --- a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py +++ b/Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py @@ -16,11 +16,11 @@ # Physical domain xmin = -30e-06 -xmax = 30e-06 +xmax = 30e-06 ymin = -30e-06 -ymax = 30e-06 +ymax = 30e-06 zmin = -56e-06 -zmax = 12e-06 +zmax = 12e-06 # Domain decomposition max_grid_size = 64 @@ -28,16 +28,17 @@ # Create grid grid = picmi.Cartesian3DGrid( - number_of_cells = [nx, ny, nz], - lower_bound = [xmin, ymin, zmin], - upper_bound = [xmax, ymax, zmax], - lower_boundary_conditions = ['periodic', 'periodic', 'dirichlet'], - upper_boundary_conditions = ['periodic', 'periodic', 'dirichlet'], - lower_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'], - upper_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'], - moving_window_velocity = [0., 0., c], - warpx_max_grid_size = max_grid_size, - warpx_blocking_factor = blocking_factor) + number_of_cells=[nx, ny, nz], + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["periodic", "periodic", "dirichlet"], + upper_boundary_conditions=["periodic", "periodic", "dirichlet"], + lower_boundary_conditions_particles=["periodic", "periodic", "absorbing"], + upper_boundary_conditions_particles=["periodic", "periodic", "absorbing"], + moving_window_velocity=[0.0, 0.0, c], + warpx_max_grid_size=max_grid_size, + warpx_blocking_factor=blocking_factor, +) # Particles: plasma electrons plasma_density = 2e23 @@ -48,114 +49,114 @@ plasma_ymax = 20e-06 plasma_zmax = None uniform_distribution = picmi.UniformDistribution( - density = plasma_density, - lower_bound = [plasma_xmin, plasma_ymin, plasma_zmin], - upper_bound = [plasma_xmax, plasma_ymax, plasma_zmax], - fill_in = True) + density=plasma_density, + lower_bound=[plasma_xmin, plasma_ymin, plasma_zmin], + upper_bound=[plasma_xmax, plasma_ymax, plasma_zmax], + fill_in=True, +) electrons = picmi.Species( - particle_type = 'electron', - name = 'electrons', - initial_distribution = uniform_distribution, - warpx_add_int_attributes = {'regionofinterest': "(z>12.0e-6) * (z<13.0e-6)"}, - warpx_add_real_attributes = {'initialenergy': "ux*ux + uy*uy + uz*uz"}) + particle_type="electron", + name="electrons", + initial_distribution=uniform_distribution, + warpx_add_int_attributes={"regionofinterest": "(z>12.0e-6) * (z<13.0e-6)"}, + warpx_add_real_attributes={"initialenergy": "ux*ux + uy*uy + uz*uz"}, +) # Particles: beam electrons q_tot = 1e-12 -x_m = 0. -y_m = 0. +x_m = 0.0 +y_m = 0.0 z_m = -28e-06 x_rms = 0.5e-06 y_rms = 0.5e-06 z_rms = 0.5e-06 -ux_m = 0. -uy_m = 0. -uz_m = 500. -ux_th = 2. -uy_th = 2. -uz_th = 50. +ux_m = 0.0 +uy_m = 0.0 +uz_m = 500.0 +ux_th = 2.0 +uy_th = 2.0 +uz_th = 50.0 gaussian_bunch_distribution = picmi.GaussianBunchDistribution( - n_physical_particles = q_tot / q_e, - rms_bunch_size = [x_rms, y_rms, z_rms], - rms_velocity = [c*ux_th, c*uy_th, c*uz_th], - centroid_position = [x_m, y_m, z_m], - centroid_velocity = [c*ux_m, c*uy_m, c*uz_m]) + n_physical_particles=q_tot / q_e, + rms_bunch_size=[x_rms, y_rms, z_rms], + rms_velocity=[c * ux_th, c * uy_th, c * uz_th], + centroid_position=[x_m, y_m, z_m], + centroid_velocity=[c * ux_m, c * uy_m, c * uz_m], +) beam = picmi.Species( - particle_type = 'electron', - name = 'beam', - initial_distribution = gaussian_bunch_distribution) + particle_type="electron", + name="beam", + initial_distribution=gaussian_bunch_distribution, +) # Laser e_max = 16e12 position_z = 9e-06 -profile_t_peak = 30.e-15 +profile_t_peak = 30.0e-15 profile_focal_distance = 100e-06 laser = picmi.GaussianLaser( - wavelength = 0.8e-06, - waist = 5e-06, - duration = 15e-15, - focal_position = [0, 0, profile_focal_distance + position_z], - centroid_position = [0, 0, position_z - c*profile_t_peak], - propagation_direction = [0, 0, 1], - polarization_direction = [0, 1, 0], - E0 = e_max, - fill_in = False) + wavelength=0.8e-06, + waist=5e-06, + duration=15e-15, + focal_position=[0, 0, profile_focal_distance + position_z], + centroid_position=[0, 0, position_z - c * profile_t_peak], + propagation_direction=[0, 0, 1], + polarization_direction=[0, 1, 0], + E0=e_max, + fill_in=False, +) laser_antenna = picmi.LaserAntenna( - position = [0., 0., position_z], - normal_vector = [0, 0, 1]) + position=[0.0, 0.0, position_z], normal_vector=[0, 0, 1] +) # Electromagnetic solver -solver = picmi.ElectromagneticSolver( - grid = grid, - method = 'Yee', - cfl = 1., - divE_cleaning = 0) +solver = picmi.ElectromagneticSolver(grid=grid, method="Yee", cfl=1.0, divE_cleaning=0) # Diagnostics -diag_field_list = ['B', 'E', 'J', 'rho'] +diag_field_list = ["B", "E", "J", "rho"] particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 100, - write_dir = '.', - warpx_file_prefix = 'Python_LaserAcceleration_plt') + name="diag1", + period=100, + write_dir=".", + warpx_file_prefix="Python_LaserAcceleration_plt", +) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = 100, - data_list = diag_field_list, - write_dir = '.', - warpx_file_prefix = 'Python_LaserAcceleration_plt') + name="diag1", + grid=grid, + period=100, + data_list=diag_field_list, + write_dir=".", + warpx_file_prefix="Python_LaserAcceleration_plt", +) # Set up simulation sim = picmi.Simulation( - solver = solver, - max_steps = max_steps, - verbose = 1, - particle_shape = 'cubic', - warpx_use_filter = 1, - warpx_serialize_initial_conditions = 1, - warpx_do_dynamic_scheduling = 0) + solver=solver, + max_steps=max_steps, + verbose=1, + particle_shape="cubic", + warpx_use_filter=1, + warpx_serialize_initial_conditions=1, + warpx_do_dynamic_scheduling=0, +) # Add plasma electrons sim.add_species( - electrons, - layout = picmi.GriddedLayout(grid = grid, n_macroparticle_per_cell = [1, 1, 1])) + electrons, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[1, 1, 1]) +) # Add beam electrons -sim.add_species( - beam, - layout = picmi.PseudoRandomLayout(grid = grid, n_macroparticles = 100)) +sim.add_species(beam, layout=picmi.PseudoRandomLayout(grid=grid, n_macroparticles=100)) # Add laser -sim.add_laser( - laser, - injection_method = laser_antenna) +sim.add_laser(laser, injection_method=laser_antenna) # Add diagnostics sim.add_diagnostic(particle_diag) sim.add_diagnostic(field_diag) # Write input file that can be used to run with the compiled version -sim.write_input_file(file_name = 'inputs_3d_picmi') +sim.write_input_file(file_name="inputs_3d_picmi") # Initialize inputs and WarpX instance sim.initialize_inputs() diff --git a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_rz.py b/Examples/Physics_applications/laser_acceleration/PICMI_inputs_rz.py index 7f09db8d6b3..c19dc09dcb1 100755 --- a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_rz.py +++ b/Examples/Physics_applications/laser_acceleration/PICMI_inputs_rz.py @@ -14,10 +14,10 @@ nz = 512 # Physical domain -rmin = 0 -rmax = 30e-06 +rmin = 0 +rmax = 30e-06 zmin = -56e-06 -zmax = 12e-06 +zmax = 12e-06 # Domain decomposition max_grid_size = 64 @@ -25,17 +25,18 @@ # Create grid grid = picmi.CylindricalGrid( - number_of_cells = [nr, nz], - n_azimuthal_modes = 2, - lower_bound = [rmin, zmin], - upper_bound = [rmax, zmax], - lower_boundary_conditions = ['none', 'dirichlet'], - upper_boundary_conditions = ['dirichlet', 'dirichlet'], - lower_boundary_conditions_particles = ['none', 'absorbing'], - upper_boundary_conditions_particles = ['absorbing', 'absorbing'], - moving_window_velocity = [0., c], - warpx_max_grid_size = max_grid_size, - warpx_blocking_factor = blocking_factor) + number_of_cells=[nr, nz], + n_azimuthal_modes=2, + lower_bound=[rmin, zmin], + upper_bound=[rmax, zmax], + lower_boundary_conditions=["none", "dirichlet"], + upper_boundary_conditions=["dirichlet", "dirichlet"], + lower_boundary_conditions_particles=["none", "absorbing"], + upper_boundary_conditions_particles=["absorbing", "absorbing"], + moving_window_velocity=[0.0, c], + warpx_max_grid_size=max_grid_size, + warpx_blocking_factor=blocking_factor, +) # Particles: plasma electrons plasma_density = 2e23 @@ -46,114 +47,114 @@ plasma_ymax = None plasma_zmax = None uniform_distribution = picmi.UniformDistribution( - density = plasma_density, - lower_bound = [plasma_xmin, plasma_ymin, plasma_zmin], - upper_bound = [plasma_xmax, plasma_ymax, plasma_zmax], - fill_in = True) + density=plasma_density, + lower_bound=[plasma_xmin, plasma_ymin, plasma_zmin], + upper_bound=[plasma_xmax, plasma_ymax, plasma_zmax], + fill_in=True, +) electrons = picmi.Species( - particle_type = 'electron', - name = 'electrons', - initial_distribution = uniform_distribution) + particle_type="electron", + name="electrons", + initial_distribution=uniform_distribution, +) # Particles: beam electrons q_tot = 1e-12 -x_m = 0. -y_m = 0. +x_m = 0.0 +y_m = 0.0 z_m = -28e-06 x_rms = 0.5e-06 y_rms = 0.5e-06 z_rms = 0.5e-06 -ux_m = 0. -uy_m = 0. -uz_m = 500. -ux_th = 2. -uy_th = 2. -uz_th = 50. +ux_m = 0.0 +uy_m = 0.0 +uz_m = 500.0 +ux_th = 2.0 +uy_th = 2.0 +uz_th = 50.0 gaussian_bunch_distribution = picmi.GaussianBunchDistribution( - n_physical_particles = q_tot / q_e, - rms_bunch_size = [x_rms, y_rms, z_rms], - rms_velocity = [c*ux_th, c*uy_th, c*uz_th], - centroid_position = [x_m, y_m, z_m], - centroid_velocity = [c*ux_m, c*uy_m, c*uz_m]) + n_physical_particles=q_tot / q_e, + rms_bunch_size=[x_rms, y_rms, z_rms], + rms_velocity=[c * ux_th, c * uy_th, c * uz_th], + centroid_position=[x_m, y_m, z_m], + centroid_velocity=[c * ux_m, c * uy_m, c * uz_m], +) beam = picmi.Species( - particle_type = 'electron', - name = 'beam', - initial_distribution = gaussian_bunch_distribution) + particle_type="electron", + name="beam", + initial_distribution=gaussian_bunch_distribution, +) # Laser e_max = 16e12 position_z = 9e-06 -profile_t_peak = 30.e-15 +profile_t_peak = 30.0e-15 profile_focal_distance = 100e-06 laser = picmi.GaussianLaser( - wavelength = 0.8e-06, - waist = 5e-06, - duration = 15e-15, - focal_position = [0, 0, profile_focal_distance + position_z], - centroid_position = [0, 0, position_z - c*profile_t_peak], - propagation_direction = [0, 0, 1], - polarization_direction = [0, 1, 0], - E0 = e_max, - fill_in = False) + wavelength=0.8e-06, + waist=5e-06, + duration=15e-15, + focal_position=[0, 0, profile_focal_distance + position_z], + centroid_position=[0, 0, position_z - c * profile_t_peak], + propagation_direction=[0, 0, 1], + polarization_direction=[0, 1, 0], + E0=e_max, + fill_in=False, +) laser_antenna = picmi.LaserAntenna( - position = [0., 0., position_z], - normal_vector = [0, 0, 1]) + position=[0.0, 0.0, position_z], normal_vector=[0, 0, 1] +) # Electromagnetic solver -solver = picmi.ElectromagneticSolver( - grid = grid, - method = 'Yee', - cfl = 1., - divE_cleaning = 0) +solver = picmi.ElectromagneticSolver(grid=grid, method="Yee", cfl=1.0, divE_cleaning=0) # Diagnostics -diag_field_list = ['B', 'E', 'J', 'rho'] +diag_field_list = ["B", "E", "J", "rho"] field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = 10, - data_list = diag_field_list, - warpx_dump_rz_modes = 1, - write_dir = '.', - warpx_file_prefix = 'Python_LaserAccelerationRZ_plt') -diag_particle_list = ['weighting', 'momentum'] + name="diag1", + grid=grid, + period=10, + data_list=diag_field_list, + warpx_dump_rz_modes=1, + write_dir=".", + warpx_file_prefix="Python_LaserAccelerationRZ_plt", +) +diag_particle_list = ["weighting", "momentum"] particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 10, - species = [electrons, beam], - data_list = diag_particle_list, - write_dir = '.', - warpx_file_prefix = 'Python_LaserAccelerationRZ_plt') + name="diag1", + period=10, + species=[electrons, beam], + data_list=diag_particle_list, + write_dir=".", + warpx_file_prefix="Python_LaserAccelerationRZ_plt", +) # Set up simulation sim = picmi.Simulation( - solver = solver, - max_steps = max_steps, - verbose = 1, - particle_shape = 'cubic', - warpx_use_filter = 0) + solver=solver, + max_steps=max_steps, + verbose=1, + particle_shape="cubic", + warpx_use_filter=0, +) # Add plasma electrons sim.add_species( - electrons, - layout = picmi.GriddedLayout(grid = grid, n_macroparticle_per_cell = [1, 4, 1])) + electrons, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[1, 4, 1]) +) # Add beam electrons -sim.add_species( - beam, - layout = picmi.PseudoRandomLayout(grid = grid, n_macroparticles = 100)) +sim.add_species(beam, layout=picmi.PseudoRandomLayout(grid=grid, n_macroparticles=100)) # Add laser -sim.add_laser( - laser, - injection_method = laser_antenna) +sim.add_laser(laser, injection_method=laser_antenna) # Add diagnostics sim.add_diagnostic(field_diag) sim.add_diagnostic(particle_diag) # Write input file that can be used to run with the compiled version -sim.write_input_file(file_name = 'inputs_rz_picmi') +sim.write_input_file(file_name="inputs_rz_picmi") # Initialize inputs and WarpX instance sim.initialize_inputs() diff --git a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluids.py b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluids.py index a33b82ebc02..593036bc3f6 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluids.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluids.py @@ -15,7 +15,7 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import yt @@ -24,23 +24,25 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file fn = sys.argv[1] # Parameters (these parameters must match the parameters in `inputs.multi.rt`) -n0 = 20.e23 +n0 = 20.0e23 # Plasma frequency -wp = np.sqrt((n0*e**2)/(m_e*epsilon_0)) -kp = wp/c -tau = 15.e-15 +wp = np.sqrt((n0 * e**2) / (m_e * epsilon_0)) +kp = wp / c +tau = 15.0e-15 a0 = 2.491668 -e = -e #Electrons +e = -e # Electrons lambda_laser = 0.8e-6 -zmin = -20e-6; zmax = 100.e-6; Nz = 10240 +zmin = -20e-6 +zmax = 100.0e-6 +Nz = 10240 # Compute the theory @@ -51,17 +53,22 @@ # ODE Function def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): phi1, phi2 = phi - a_sq = a0**2 * np.exp(-2 * (xi - xi_0)**2 / (c**2 * tau**2))*np.sin(2*np.pi*(xi - xi_0)/lambda_laser)**2 + a_sq = ( + a0**2 + * np.exp(-2 * (xi - xi_0) ** 2 / (c**2 * tau**2)) + * np.sin(2 * np.pi * (xi - xi_0) / lambda_laser) ** 2 + ) dphi1_dxi = phi2 - dphi2_dxi = kp**2 * ((1 + a_sq) / (2 * (1 + phi1)**2) - 0.5) + dphi2_dxi = kp**2 * ((1 + a_sq) / (2 * (1 + phi1) ** 2) - 0.5) return [dphi1_dxi, dphi2_dxi] + # Call odeint to solve the ODE xi_span = [-20e-6, 100e-6] xi_0 = 0e-6 phi0 = [0.0, 0.0] -dxi = (zmax-zmin)/Nz -xi = zmin + dxi*( 0.5 + np.arange(Nz) ) +dxi = (zmax - zmin) / Nz +xi = zmin + dxi * (0.5 + np.arange(Nz)) phi = odeint(odefcn, phi0, xi, args=(kp, a0, c, tau, xi_0, lambda_laser)) # Change array direction to match the simulations @@ -72,23 +79,27 @@ def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): Ez = -phi[:, 1] # Compute the derived quantities -a_sq = a0**2 * np.exp(-2 * (xi - xi_0)**2 / (c**2 * tau**2)) *np.sin(2*np.pi*(xi - xi_0)/lambda_laser)**2 +a_sq = ( + a0**2 + * np.exp(-2 * (xi - xi_0) ** 2 / (c**2 * tau**2)) + * np.sin(2 * np.pi * (xi - xi_0) / lambda_laser) ** 2 +) gamma_perp_sq = 1 + a_sq -n = n0 * (gamma_perp_sq + (1 + phi2)**2) / (2 * (1 + phi2)**2) -uz = (gamma_perp_sq - (1 + phi2)**2) / (2 * (1 + phi2)) -gamma = (gamma_perp_sq + (1 + phi2)**2) / (2 * (1 + phi2)) +n = n0 * (gamma_perp_sq + (1 + phi2) ** 2) / (2 * (1 + phi2) ** 2) +uz = (gamma_perp_sq - (1 + phi2) ** 2) / (2 * (1 + phi2)) +gamma = (gamma_perp_sq + (1 + phi2) ** 2) / (2 * (1 + phi2)) # Theory Components [convert to si] uz *= c -J_th = np.multiply( np.divide(uz,gamma), n ) +J_th = np.multiply(np.divide(uz, gamma), n) J_th *= e -rho_th = e*n +rho_th = e * n E_th = Ez -E_th *= ((m_e*c*c)/e) -V_th = np.divide(uz,gamma) +E_th *= (m_e * c * c) / e +V_th = np.divide(uz, gamma) V_th /= c # Remove the ions -rho_th = rho_th - e*n0 +rho_th = rho_th - e * n0 # Dictate which region to compare solutions over # (Currently this is the full domain) @@ -98,56 +109,64 @@ def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): # Read the file ds = yt.load(fn) t0 = ds.current_time.to_value() -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, - dims=ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) # Check the validity of the fields error_rel = 0 -for field in ['Ez']: - E_sim = data[('mesh',field)].to_ndarray()[:,0,0] - #E_th = get_theoretical_field(field, t0) - max_error = abs(E_sim[min_i:max_i]-E_th[min_i:max_i]).max()/abs(E_th[min_i:max_i]).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) +for field in ["Ez"]: + E_sim = data[("mesh", field)].to_ndarray()[:, 0, 0] + # E_th = get_theoretical_field(field, t0) + max_error = ( + abs(E_sim[min_i:max_i] - E_th[min_i:max_i]).max() / abs(E_th[min_i:max_i]).max() + ) + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Check the validity of the currents -for field in ['Jz']: - J_sim = data[('mesh',field)].to_ndarray()[:,0,0] - #J_th = get_theoretical_J_field(field, t0) - max_error = abs(J_sim[min_i:max_i]-J_th[min_i:max_i]).max()/abs(J_th[min_i:max_i]).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) +for field in ["Jz"]: + J_sim = data[("mesh", field)].to_ndarray()[:, 0, 0] + # J_th = get_theoretical_J_field(field, t0) + max_error = ( + abs(J_sim[min_i:max_i] - J_th[min_i:max_i]).max() / abs(J_th[min_i:max_i]).max() + ) + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Check the validity of the charge -for field in ['rho']: - rho_sim = data[('boxlib',field)].to_ndarray()[:,0,0] - #rho_th = get_theoretical_rho_field(field, t0) - max_error = abs(rho_sim[min_i:max_i]-rho_th[min_i:max_i]).max()/abs(rho_th[min_i:max_i]).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) - -V_sim = np.divide(J_sim,rho_sim) +for field in ["rho"]: + rho_sim = data[("boxlib", field)].to_ndarray()[:, 0, 0] + # rho_th = get_theoretical_rho_field(field, t0) + max_error = ( + abs(rho_sim[min_i:max_i] - rho_th[min_i:max_i]).max() + / abs(rho_th[min_i:max_i]).max() + ) + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) + +V_sim = np.divide(J_sim, rho_sim) V_sim /= c # Create a figure with 2 rows and 2 columns fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 8)) # Titles and labels -titles = ['Ez', 'rho', 'Jz', 'Vz/c'] -xlabel = r'Xi' -ylabel = ['Ez', 'rho', 'Jz', 'Vz/c'] +titles = ["Ez", "rho", "Jz", "Vz/c"] +xlabel = r"Xi" +ylabel = ["Ez", "rho", "Jz", "Vz/c"] # Plotting loop for i in range(3): ax = axes[i // 2, i % 2] # Get the current subplot # Plot theoretical data - ax.plot(xi, [E_th, rho_th, J_th, V_th][i], label='Theoretical') + ax.plot(xi, [E_th, rho_th, J_th, V_th][i], label="Theoretical") # Plot simulated data - ax.plot(xi, [E_sim, rho_sim, J_sim, V_sim][i], label='Simulated') + ax.plot(xi, [E_sim, rho_sim, J_sim, V_sim][i], label="Simulated") # Set titles and labels - ax.set_title(f'{titles[i]} vs Xi') + ax.set_title(f"{titles[i]} vs Xi") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel[i]) @@ -158,7 +177,7 @@ def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): plt.tight_layout() # Save the figure -plt.savefig('wfa_fluid_nonlinear_1d_analysis.png') +plt.savefig("wfa_fluid_nonlinear_1d_analysis.png") plt.show() @@ -168,7 +187,7 @@ def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluids_boosted.py b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluids_boosted.py index 30301996921..934d298c6b7 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluids_boosted.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluids_boosted.py @@ -15,7 +15,7 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import yt @@ -24,23 +24,25 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file fn = sys.argv[1] # Parameters (these parameters must match the parameters in `inputs.multi.rt`) -n0 = 20.e23 +n0 = 20.0e23 # Plasma frequency -wp = np.sqrt((n0*e**2)/(m_e*epsilon_0)) -kp = wp/c -tau = 15.e-15 +wp = np.sqrt((n0 * e**2) / (m_e * epsilon_0)) +kp = wp / c +tau = 15.0e-15 a0 = 2.491668 -e = -e #Electrons +e = -e # Electrons lambda_laser = 0.8e-6 -zmin = -20e-6; zmax = 100.e-6; Nz = 4864 +zmin = -20e-6 +zmax = 100.0e-6 +Nz = 4864 # Compute the theory @@ -51,17 +53,22 @@ # ODE Function def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): phi1, phi2 = phi - a_sq = a0**2 * np.exp(-2 * (xi - xi_0)**2 / (c**2 * tau**2))*np.sin(2*np.pi*(xi - xi_0)/lambda_laser)**2 + a_sq = ( + a0**2 + * np.exp(-2 * (xi - xi_0) ** 2 / (c**2 * tau**2)) + * np.sin(2 * np.pi * (xi - xi_0) / lambda_laser) ** 2 + ) dphi1_dxi = phi2 - dphi2_dxi = kp**2 * ((1 + a_sq) / (2 * (1 + phi1)**2) - 0.5) + dphi2_dxi = kp**2 * ((1 + a_sq) / (2 * (1 + phi1) ** 2) - 0.5) return [dphi1_dxi, dphi2_dxi] + # Call odeint to solve the ODE xi_span = [-20e-6, 100e-6] xi_0 = 0e-6 phi0 = [0.0, 0.0] -dxi = (zmax-zmin)/Nz -xi = zmin + dxi*( 0.5 + np.arange(Nz) ) +dxi = (zmax - zmin) / Nz +xi = zmin + dxi * (0.5 + np.arange(Nz)) phi = odeint(odefcn, phi0, xi, args=(kp, a0, c, tau, xi_0, lambda_laser)) # Change array direction to match the simulations @@ -72,23 +79,27 @@ def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): Ez = -phi[:, 1] # Compute the derived quantities -a_sq = a0**2 * np.exp(-2 * (xi - xi_0)**2 / (c**2 * tau**2)) *np.sin(2*np.pi*(xi - xi_0)/lambda_laser)**2 +a_sq = ( + a0**2 + * np.exp(-2 * (xi - xi_0) ** 2 / (c**2 * tau**2)) + * np.sin(2 * np.pi * (xi - xi_0) / lambda_laser) ** 2 +) gamma_perp_sq = 1 + a_sq -n = n0 * (gamma_perp_sq + (1 + phi2)**2) / (2 * (1 + phi2)**2) -uz = (gamma_perp_sq - (1 + phi2)**2) / (2 * (1 + phi2)) -gamma = (gamma_perp_sq + (1 + phi2)**2) / (2 * (1 + phi2)) +n = n0 * (gamma_perp_sq + (1 + phi2) ** 2) / (2 * (1 + phi2) ** 2) +uz = (gamma_perp_sq - (1 + phi2) ** 2) / (2 * (1 + phi2)) +gamma = (gamma_perp_sq + (1 + phi2) ** 2) / (2 * (1 + phi2)) # Theory Components [convert to si] uz *= c -J_th = np.multiply( np.divide(uz,gamma), n ) +J_th = np.multiply(np.divide(uz, gamma), n) J_th *= e -rho_th = e*n +rho_th = e * n E_th = Ez -E_th *= ((m_e*c*c)/e) -V_th = np.divide(uz,gamma) +E_th *= (m_e * c * c) / e +V_th = np.divide(uz, gamma) V_th /= c # Remove the ions -rho_th = rho_th - e*n0 +rho_th = rho_th - e * n0 # Dictate which region to compare solutions over (cuttoff 0's from BTD extra) min_i = 200 @@ -97,56 +108,64 @@ def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): # Read the file ds = yt.load(fn) t0 = ds.current_time.to_value() -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, - dims=ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) # Check the validity of the fields error_rel = 0 -for field in ['Ez']: - E_sim = data[('mesh',field)].to_ndarray()[:,0,0] - #E_th = get_theoretical_field(field, t0) - max_error = abs(E_sim[min_i:max_i]-E_th[min_i:max_i]).max()/abs(E_th[min_i:max_i]).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) +for field in ["Ez"]: + E_sim = data[("mesh", field)].to_ndarray()[:, 0, 0] + # E_th = get_theoretical_field(field, t0) + max_error = ( + abs(E_sim[min_i:max_i] - E_th[min_i:max_i]).max() / abs(E_th[min_i:max_i]).max() + ) + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Check the validity of the currents -for field in ['Jz']: - J_sim = data[('mesh',field)].to_ndarray()[:,0,0] - #J_th = get_theoretical_J_field(field, t0) - max_error = abs(J_sim[min_i:max_i]-J_th[min_i:max_i]).max()/abs(J_th[min_i:max_i]).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) +for field in ["Jz"]: + J_sim = data[("mesh", field)].to_ndarray()[:, 0, 0] + # J_th = get_theoretical_J_field(field, t0) + max_error = ( + abs(J_sim[min_i:max_i] - J_th[min_i:max_i]).max() / abs(J_th[min_i:max_i]).max() + ) + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Check the validity of the charge -for field in ['rho']: - rho_sim = data[('boxlib',field)].to_ndarray()[:,0,0] - #rho_th = get_theoretical_rho_field(field, t0) - max_error = abs(rho_sim[min_i:max_i]-rho_th[min_i:max_i]).max()/abs(rho_th[min_i:max_i]).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) - -V_sim = np.divide(J_sim,rho_sim) +for field in ["rho"]: + rho_sim = data[("boxlib", field)].to_ndarray()[:, 0, 0] + # rho_th = get_theoretical_rho_field(field, t0) + max_error = ( + abs(rho_sim[min_i:max_i] - rho_th[min_i:max_i]).max() + / abs(rho_th[min_i:max_i]).max() + ) + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) + +V_sim = np.divide(J_sim, rho_sim) V_sim /= c # Create a figure with 2 rows and 2 columns fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 8)) # Titles and labels -titles = ['Ez', 'rho', 'Jz', 'Vz/c'] -xlabel = r'Xi' -ylabel = ['Ez', 'rho', 'Jz', 'Vz/c'] +titles = ["Ez", "rho", "Jz", "Vz/c"] +xlabel = r"Xi" +ylabel = ["Ez", "rho", "Jz", "Vz/c"] # Plotting loop for i in range(3): ax = axes[i // 2, i % 2] # Get the current subplot # Plot theoretical data - ax.plot(xi, [E_th, rho_th, J_th, V_th][i], label='Theoretical') + ax.plot(xi, [E_th, rho_th, J_th, V_th][i], label="Theoretical") # Plot simulated data - ax.plot(xi, [E_sim, rho_sim, J_sim, V_sim][i], label='Simulated') + ax.plot(xi, [E_sim, rho_sim, J_sim, V_sim][i], label="Simulated") # Set titles and labels - ax.set_title(f'{titles[i]} vs Xi') + ax.set_title(f"{titles[i]} vs Xi") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel[i]) @@ -157,7 +176,7 @@ def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): plt.tight_layout() # Save the figure -plt.savefig('wfa_fluid_nonlinear_1d_analysis.png') +plt.savefig("wfa_fluid_nonlinear_1d_analysis.png") plt.show() @@ -167,7 +186,7 @@ def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py b/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py index a66c838fe9d..bc7fac15247 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py @@ -15,9 +15,8 @@ import yt yt.funcs.mylog.setLevel(50) -import numpy as np -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file @@ -28,7 +27,7 @@ # count the number of particles ad = ds.all_data() -np = ad['electrons', 'particle_id'].size +ps = ad["electrons", "particle_id"].size # the number of coarse particle streams n_coarse = 10 @@ -46,19 +45,19 @@ # Refined only transversely. Longitudinal spacing between particles in each stream is the same in both coarse and fine regions rr_longitudinal = 1 -np_expected = (n_coarse + n_fine*rr_longitudinal)*(n_0 + n_move) +np_expected = (n_coarse + n_fine * rr_longitudinal) * (n_0 + n_move) -assert( np == np_expected ) +assert ps == np_expected # Test uniformity of rho, by taking a slice of rho that # crosses the edge of the refined injection region # (but is ahead of the mesh refinement patch) ds.force_periodicity() ad = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -rho = ad['rho'].to_ndarray().squeeze() +rho = ad["rho"].to_ndarray().squeeze() rho_slice = rho[13:51, 475] # Test uniformity up to 0.5% relative variation -assert( rho_slice.std() < 0.005*abs(rho_slice.mean()) ) +assert rho_slice.std() < 0.005 * abs(rho_slice.mean()) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Physics_applications/laser_acceleration/plot_3d.py b/Examples/Physics_applications/laser_acceleration/plot_3d.py index 00222ff43c8..34e3770726b 100755 --- a/Examples/Physics_applications/laser_acceleration/plot_3d.py +++ b/Examples/Physics_applications/laser_acceleration/plot_3d.py @@ -36,5 +36,6 @@ def plot_lwfa(): fig.tight_layout() plt.show() + if __name__ == "__main__": plot_lwfa() diff --git a/Examples/Physics_applications/laser_ion/PICMI_inputs_2d.py b/Examples/Physics_applications/laser_ion/PICMI_inputs_2d.py index 9f7a2aacfca..e268d1d6c69 100755 --- a/Examples/Physics_applications/laser_ion/PICMI_inputs_2d.py +++ b/Examples/Physics_applications/laser_ion/PICMI_inputs_2d.py @@ -15,8 +15,8 @@ # proper resolution for 30 n_c (dx<=3.33nm) incl. acc. length # (>=6x V100) # --> choose larger `max_grid_size` and `blocking_factor` for 1 to 8 grids per GPU accordingly -#nx = 7488 -#nz = 14720 +# nx = 7488 +# nz = 14720 # Number of cells nx = 384 @@ -37,12 +37,13 @@ number_of_cells=[nx, nz], lower_bound=[xmin, zmin], upper_bound=[xmax, zmax], - lower_boundary_conditions=['open', 'open'], - upper_boundary_conditions=['open', 'open'], - lower_boundary_conditions_particles=['absorbing', 'absorbing'], - upper_boundary_conditions_particles=['absorbing', 'absorbing'], + lower_boundary_conditions=["open", "open"], + upper_boundary_conditions=["open", "open"], + lower_boundary_conditions_particles=["absorbing", "absorbing"], + upper_boundary_conditions_particles=["absorbing", "absorbing"], warpx_max_grid_size=max_grid_size, - warpx_blocking_factor=blocking_factor) + warpx_blocking_factor=blocking_factor, +) # Particles: plasma parameters # critical plasma density @@ -61,7 +62,9 @@ preplasma_Lcut = 2.0e-6 # [m] hard cutoff from surface plasma_r0 = 2.5e-6 # [m] radius or half-thickness plasma_eps_z = 0.05e-6 # [m] small offset in z to make zmin, zmax interval larger than 2*(r0 + Lcut) -plasma_creation_limit_z = plasma_r0 + preplasma_Lcut + plasma_eps_z # [m] upper limit in z for particle creation +plasma_creation_limit_z = ( + plasma_r0 + preplasma_Lcut + plasma_eps_z +) # [m] upper limit in z for particle creation plasma_xmin = None plasma_ymin = None @@ -70,17 +73,17 @@ plasma_ymax = None plasma_zmax = plasma_creation_limit_z -density_expression_str = f'{plasma_density}*((abs(z)<={plasma_r0}) + (abs(z)<{plasma_r0}+{preplasma_Lcut}) * (abs(z)>{plasma_r0}) * exp(-(abs(z)-{plasma_r0})/{preplasma_L}))' +density_expression_str = f"{plasma_density}*((abs(z)<={plasma_r0}) + (abs(z)<{plasma_r0}+{preplasma_Lcut}) * (abs(z)>{plasma_r0}) * exp(-(abs(z)-{plasma_r0})/{preplasma_L}))" slab_with_ramp_dist_hydrogen = picmi.AnalyticDistribution( density_expression=density_expression_str, lower_bound=[plasma_xmin, plasma_ymin, plasma_zmin], - upper_bound=[plasma_xmax, plasma_ymax, plasma_zmax] + upper_bound=[plasma_xmax, plasma_ymax, plasma_zmax], ) # thermal velocity spread for electrons in gamma*beta -ux_th = .01 -uz_th = .01 +ux_th = 0.01 +uz_th = 0.01 slab_with_ramp_dist_electrons = picmi.AnalyticDistribution( density_expression=density_expression_str, @@ -88,20 +91,20 @@ upper_bound=[plasma_xmax, plasma_ymax, plasma_zmax], # if `momentum_expressions` and `momentum_spread_expressions` are unset, # a Gaussian momentum distribution is assumed given that `rms_velocity` has any non-zero elements - rms_velocity=[c*ux_th, 0., c*uz_th] # thermal velocity spread in m/s + rms_velocity=[c * ux_th, 0.0, c * uz_th], # thermal velocity spread in m/s ) electrons = picmi.Species( - particle_type='electron', - name='electrons', + particle_type="electron", + name="electrons", initial_distribution=slab_with_ramp_dist_electrons, ) hydrogen = picmi.Species( - particle_type='proton', - name='hydrogen', + particle_type="proton", + name="hydrogen", initial_distribution=slab_with_ramp_dist_hydrogen, - warpx_add_real_attributes = {"orig_x": "x", "orig_z": "z"} + warpx_add_real_attributes={"orig_x": "x", "orig_z": "z"}, ) # Laser @@ -109,182 +112,186 @@ # a0 = 16, lambda_0 = 0.8mu -> e_max = 64.22 TV/m e_max = 64.22e12 position_z = -4.0e-06 -profile_t_peak = 50.e-15 +profile_t_peak = 50.0e-15 profile_focal_distance = 4.0e-06 laser = picmi.GaussianLaser( wavelength=0.8e-06, - waist=4.e-06, - duration=30.e-15, + waist=4.0e-06, + duration=30.0e-15, focal_position=[0, 0, profile_focal_distance + position_z], centroid_position=[0, 0, position_z - c * profile_t_peak], propagation_direction=[0, 0, 1], polarization_direction=[1, 0, 0], E0=e_max, - fill_in=False) + fill_in=False, +) laser_antenna = picmi.LaserAntenna( - position=[0., 0., position_z], - normal_vector=[0, 0, 1]) + position=[0.0, 0.0, position_z], normal_vector=[0, 0, 1] +) # Electromagnetic solver solver = picmi.ElectromagneticSolver( grid=grid, - method='Yee', + method="Yee", cfl=0.999, divE_cleaning=0, - #warpx_pml_ncell=10 + # warpx_pml_ncell=10 ) # Diagnostics particle_diag = picmi.ParticleDiagnostic( - name='Python_LaserIonAcc2d_plt', + name="Python_LaserIonAcc2d_plt", period=100, - write_dir='./diags', - warpx_format='openpmd', - warpx_openpmd_backend='h5', + write_dir="./diags", + warpx_format="openpmd", + warpx_openpmd_backend="h5", # demonstration of a spatial and momentum filter - warpx_plot_filter_function='(uz>=0) * (x<1.0e-6) * (x>-1.0e-6)' + warpx_plot_filter_function="(uz>=0) * (x<1.0e-6) * (x>-1.0e-6)", ) # reduce resolution of output fields coarsening_ratio = [4, 4] ncell_field = [] -for (ncell_comp, cr) in zip([nx,nz], coarsening_ratio): - ncell_field.append(int(ncell_comp/cr)) +for ncell_comp, cr in zip([nx, nz], coarsening_ratio): + ncell_field.append(int(ncell_comp / cr)) field_diag = picmi.FieldDiagnostic( - name='Python_LaserIonAcc2d_plt', + name="Python_LaserIonAcc2d_plt", grid=grid, period=100, number_of_cells=ncell_field, - data_list=['B', 'E', 'J', 'rho', 'rho_electrons', 'rho_hydrogen'], - write_dir='./diags', - warpx_format='openpmd', - warpx_openpmd_backend='h5' + data_list=["B", "E", "J", "rho", "rho_electrons", "rho_hydrogen"], + write_dir="./diags", + warpx_format="openpmd", + warpx_openpmd_backend="h5", ) particle_fw_diag = picmi.ParticleDiagnostic( - name='openPMDfw', + name="openPMDfw", period=100, - write_dir='./diags', - warpx_format='openpmd', - warpx_openpmd_backend='h5', - warpx_plot_filter_function='(uz>=0) * (x<1.0e-6) * (x>-1.0e-6)' + write_dir="./diags", + warpx_format="openpmd", + warpx_openpmd_backend="h5", + warpx_plot_filter_function="(uz>=0) * (x<1.0e-6) * (x>-1.0e-6)", ) particle_bw_diag = picmi.ParticleDiagnostic( - name='openPMDbw', + name="openPMDbw", period=100, - write_dir='./diags', - warpx_format='openpmd', - warpx_openpmd_backend='h5', - warpx_plot_filter_function='(uz<0)' + write_dir="./diags", + warpx_format="openpmd", + warpx_openpmd_backend="h5", + warpx_plot_filter_function="(uz<0)", ) # histograms with 2.0 degree acceptance angle in fw direction # 2 deg * pi / 180 : 0.03490658503 rad # half-angle +/- : 0.017453292515 rad histuH_rdiag = picmi.ReducedDiagnostic( - diag_type='ParticleHistogram', - name='histuH', + diag_type="ParticleHistogram", + name="histuH", period=100, species=hydrogen, bin_number=1000, bin_min=0.0, bin_max=0.474, # 100 MeV protons - histogram_function='u2=ux*ux+uy*uy+uz*uz; if(u2>0, sqrt(u2), 0.0)', - filter_function='u2=ux*ux+uy*uy+uz*uz; if(u2>0, abs(acos(uz / sqrt(u2))) <= 0.017453, 0)') + histogram_function="u2=ux*ux+uy*uy+uz*uz; if(u2>0, sqrt(u2), 0.0)", + filter_function="u2=ux*ux+uy*uy+uz*uz; if(u2>0, abs(acos(uz / sqrt(u2))) <= 0.017453, 0)", +) histue_rdiag = picmi.ReducedDiagnostic( - diag_type='ParticleHistogram', - name='histue', + diag_type="ParticleHistogram", + name="histue", period=100, species=electrons, bin_number=1000, bin_min=0.0, bin_max=197.0, # 100 MeV electrons - histogram_function='u2=ux*ux+uy*uy+uz*uz; if(u2>0, sqrt(u2), 0.0)', - filter_function='u2=ux*ux+uy*uy+uz*uz; if(u2>0, abs(acos(uz / sqrt(u2))) <= 0.017453, 0)') + histogram_function="u2=ux*ux+uy*uy+uz*uz; if(u2>0, sqrt(u2), 0.0)", + filter_function="u2=ux*ux+uy*uy+uz*uz; if(u2>0, abs(acos(uz / sqrt(u2))) <= 0.017453, 0)", +) # just a test entry to make sure that the histogram filter is purely optional: # this one just records uz of all hydrogen ions, independent of their pointing histuzAll_rdiag = picmi.ReducedDiagnostic( - diag_type='ParticleHistogram', - name='histuzAll', + diag_type="ParticleHistogram", + name="histuzAll", period=100, species=hydrogen, bin_number=1000, bin_min=-0.474, bin_max=0.474, - histogram_function='uz') + histogram_function="uz", +) field_probe_z_rdiag = picmi.ReducedDiagnostic( - diag_type='FieldProbe', - name='FieldProbe_Z', + diag_type="FieldProbe", + name="FieldProbe_Z", period=100, integrate=0, - probe_geometry='Line', + probe_geometry="Line", x_probe=0.0, z_probe=-5.0e-6, x1_probe=0.0, z1_probe=25.0e-6, - resolution=3712) + resolution=3712, +) field_probe_scat_point_rdiag = picmi.ReducedDiagnostic( - diag_type='FieldProbe', - name='FieldProbe_ScatPoint', + diag_type="FieldProbe", + name="FieldProbe_ScatPoint", period=1, integrate=0, - probe_geometry='Point', + probe_geometry="Point", x_probe=0.0, - z_probe=15.0e-6) + z_probe=15.0e-6, +) field_probe_scat_line_rdiag = picmi.ReducedDiagnostic( - diag_type='FieldProbe', - name='FieldProbe_ScatLine', + diag_type="FieldProbe", + name="FieldProbe_ScatLine", period=100, integrate=1, - probe_geometry='Line', + probe_geometry="Line", x_probe=-2.5e-6, z_probe=15.0e-6, x1_probe=2.5e-6, z1_probe=15e-6, - resolution=201) + resolution=201, +) load_balance_costs_rdiag = picmi.ReducedDiagnostic( - diag_type='LoadBalanceCosts', - name='LBC', - period=100) + diag_type="LoadBalanceCosts", name="LBC", period=100 +) # Set up simulation sim = picmi.Simulation( solver=solver, max_time=stop_time, # need to remove `max_step` to run this far verbose=1, - particle_shape='cubic', + particle_shape="cubic", warpx_numprocs=[1, 2], # deactivate `numprocs` for dynamic load balancing warpx_use_filter=1, warpx_load_balance_intervals=100, - warpx_load_balance_costs_update='heuristic' + warpx_load_balance_costs_update="heuristic", ) # Add plasma electrons sim.add_species( electrons, - layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[2,2]) + layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[2, 2]), # for more realistic simulations, try to avoid that macro-particles represent more than 1 n_c - #layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[4,8]) + # layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[4,8]) ) # Add hydrogen ions sim.add_species( hydrogen, - layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[2,2]) + layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[2, 2]), # for more realistic simulations, try to avoid that macro-particles represent more than 1 n_c - #layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[4,8]) + # layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[4,8]) ) # Add laser -sim.add_laser( - laser, - injection_method=laser_antenna) +sim.add_laser(laser, injection_method=laser_antenna) # Add full diagnostics sim.add_diagnostic(particle_diag) @@ -302,7 +309,7 @@ # TODO: make ParticleHistogram2D available # Write input file that can be used to run with the compiled version -sim.write_input_file(file_name='inputs_2d_picmi') +sim.write_input_file(file_name="inputs_2d_picmi") # Initialize inputs and WarpX instance sim.initialize_inputs() diff --git a/Examples/Physics_applications/laser_ion/analysis_histogram_2D.py b/Examples/Physics_applications/laser_ion/analysis_histogram_2D.py index a262a2373e5..06d3bb42c8e 100644 --- a/Examples/Physics_applications/laser_ion/analysis_histogram_2D.py +++ b/Examples/Physics_applications/laser_ion/analysis_histogram_2D.py @@ -9,24 +9,29 @@ import numpy as np from openpmd_viewer import OpenPMDTimeSeries -parser = argparse.ArgumentParser(description='Process a 2D histogram name and an integer.') +parser = argparse.ArgumentParser( + description="Process a 2D histogram name and an integer." +) parser.add_argument("hist2D", help="Folder name of the reduced diagnostic.") -parser.add_argument("iter", help="Iteration number of the simulation that is plotted. Enter a number from the list of iterations or 'All' if you want all plots.") +parser.add_argument( + "iter", + help="Iteration number of the simulation that is plotted. Enter a number from the list of iterations or 'All' if you want all plots.", +) args = parser.parse_args() -path = 'diags/reducedfiles/' + args.hist2D +path = "diags/reducedfiles/" + args.hist2D ts = OpenPMDTimeSeries(path) it = ts.iterations data, info = ts.get_field(field="data", iteration=0, plot=True) -print('The available iterations of the simulation are:', it) -print('The axes of the histogram are (0: ordinate ; 1: abscissa):', info.axes) -print('The data shape is:', data.shape) +print("The available iterations of the simulation are:", it) +print("The axes of the histogram are (0: ordinate ; 1: abscissa):", info.axes) +print("The data shape is:", data.shape) # Add the simulation time to the title once this information # is available in the "info" FieldMetaInformation object. -if args.iter == 'All' : +if args.iter == "All": for it_idx, i in enumerate(it): plt.figure() data, info = ts.get_field(field="data", iteration=i, plot=False) @@ -35,14 +40,20 @@ ordinate_name = info.axes[0] # This might be 'z' or something else ordinate_values = getattr(info, ordinate_name, None) - plt.pcolormesh(abscissa_values/1e-6, ordinate_values, data, norm=colors.LogNorm(), rasterized=True) + plt.pcolormesh( + abscissa_values / 1e-6, + ordinate_values, + data, + norm=colors.LogNorm(), + rasterized=True, + ) plt.title(args.hist2D + f" Time: {ts.t[it_idx]:.2e} s (Iteration: {i:d})") - plt.xlabel(info.axes[1]+r' ($\mu$m)') - plt.ylabel(info.axes[0]+r' ($m_\mathrm{species} c$)') + plt.xlabel(info.axes[1] + r" ($\mu$m)") + plt.ylabel(info.axes[0] + r" ($m_\mathrm{species} c$)") plt.colorbar() plt.tight_layout() - plt.savefig('Histogram_2D_' + args.hist2D + '_iteration_' + str(i) + '.png') -else : + plt.savefig("Histogram_2D_" + args.hist2D + "_iteration_" + str(i) + ".png") +else: i = int(args.iter) it_idx = np.where(i == it)[0][0] plt.figure() @@ -52,10 +63,16 @@ ordinate_name = info.axes[0] # This might be 'z' or something else ordinate_values = getattr(info, ordinate_name, None) - plt.pcolormesh(abscissa_values/1e-6, ordinate_values, data, norm=colors.LogNorm(), rasterized=True) + plt.pcolormesh( + abscissa_values / 1e-6, + ordinate_values, + data, + norm=colors.LogNorm(), + rasterized=True, + ) plt.title(args.hist2D + f" Time: {ts.t[it_idx]:.2e} s (Iteration: {i:d})") - plt.xlabel(info.axes[1]+r' ($\mu$m)') - plt.ylabel(info.axes[0]+r' ($m_\mathrm{species} c$)') + plt.xlabel(info.axes[1] + r" ($\mu$m)") + plt.ylabel(info.axes[0] + r" ($m_\mathrm{species} c$)") plt.colorbar() plt.tight_layout() - plt.savefig('Histogram_2D_' + args.hist2D + '_iteration_' + str(i) + '.png') + plt.savefig("Histogram_2D_" + args.hist2D + "_iteration_" + str(i) + ".png") diff --git a/Examples/Physics_applications/laser_ion/plot_2d.py b/Examples/Physics_applications/laser_ion/plot_2d.py index 736203e85ea..f8a3b05d8a3 100644 --- a/Examples/Physics_applications/laser_ion/plot_2d.py +++ b/Examples/Physics_applications/laser_ion/plot_2d.py @@ -21,7 +21,8 @@ from matplotlib.colors import TwoSlopeNorm from openpmd_viewer import OpenPMDTimeSeries -plt.rcParams.update({'font.size':16}) +plt.rcParams.update({"font.size": 16}) + def create_analysis_dir(directory): if not os.path.exists(directory): @@ -40,7 +41,9 @@ def visualize_density_iteration(ts, iteration, out_dir): # Physics parameters lambda_L = 800e-9 # Laser wavelength in meters omega_L = 2 * np.pi * sc.c / lambda_L # Laser frequency in seconds - n_c = sc.m_e * sc.epsilon_0 * omega_L**2 / sc.elementary_charge**2 # Critical plasma density in meters^(-3) + n_c = ( + sc.m_e * sc.epsilon_0 * omega_L**2 / sc.elementary_charge**2 + ) # Critical plasma density in meters^(-3) micron = 1e-6 # Simulation parameters @@ -66,24 +69,49 @@ def visualize_density_iteration(ts, iteration, out_dir): # Plotting # Electron density - im0 = axs[0].pcolormesh(zax[::nr]/micron, xax[::nr]/micron, -rho_e.T[::nr, ::nr], - vmin=0, vmax=n_max, cmap="Reds", rasterized=True) + im0 = axs[0].pcolormesh( + zax[::nr] / micron, + xax[::nr] / micron, + -rho_e.T[::nr, ::nr], + vmin=0, + vmax=n_max, + cmap="Reds", + rasterized=True, + ) plt.colorbar(im0, ax=axs[0], label=r"$n_\mathrm{\,e}\ (n_\mathrm{c})$") # Hydrogen density - im1 = axs[1].pcolormesh(zax[::nr]/micron, xax[::nr]/micron, rho_d.T[::nr, ::nr], - vmin=0, vmax=n_max, cmap="Blues", rasterized=True) + im1 = axs[1].pcolormesh( + zax[::nr] / micron, + xax[::nr] / micron, + rho_d.T[::nr, ::nr], + vmin=0, + vmax=n_max, + cmap="Blues", + rasterized=True, + ) plt.colorbar(im1, ax=axs[1], label=r"$n_\mathrm{\,H}\ (n_\mathrm{c})$") # Masked electron density - divnorm = TwoSlopeNorm(vmin=-7., vcenter=0., vmax=2) + divnorm = TwoSlopeNorm(vmin=-7.0, vcenter=0.0, vmax=2) masked_data = np.ma.masked_where(rho_e.T == 0, rho_e.T) my_cmap = plt.cm.PiYG_r.copy() - my_cmap.set_bad(color='black') - im2 = axs[2].pcolormesh(zax[::nr]/micron, xax[::nr]/micron, np.log(-masked_data[::nr, ::nr]), - norm=divnorm, cmap=my_cmap, rasterized=True) - plt.colorbar(im2, ax=axs[2], ticks=[-6, -3, 0, 1, 2], extend='both', - label=r"$\log n_\mathrm{\,e}\ (n_\mathrm{c})$") + my_cmap.set_bad(color="black") + im2 = axs[2].pcolormesh( + zax[::nr] / micron, + xax[::nr] / micron, + np.log(-masked_data[::nr, ::nr]), + norm=divnorm, + cmap=my_cmap, + rasterized=True, + ) + plt.colorbar( + im2, + ax=axs[2], + ticks=[-6, -3, 0, 1, 2], + extend="both", + label=r"$\log n_\mathrm{\,e}\ (n_\mathrm{c})$", + ) # Axis labels and title for ax in axs: @@ -98,8 +126,8 @@ def visualize_density_iteration(ts, iteration, out_dir): plt.savefig(f"{out_dir}/densities_{it:06d}.png") -def visualize_field_iteration(ts, iteration, out_dir): +def visualize_field_iteration(ts, iteration, out_dir): # Additional parameters nr = 1 # Number to decrease resolution micron = 1e-6 @@ -110,35 +138,50 @@ def visualize_field_iteration(ts, iteration, out_dir): time = ts.t[ii] Ex, Ex_info = ts.get_field(field="E", coord="x", iteration=it) - Exmax = np.max(np.abs([np.min(Ex),np.max(Ex)])) + Exmax = np.max(np.abs([np.min(Ex), np.max(Ex)])) By, By_info = ts.get_field(field="B", coord="y", iteration=it) - Bymax = np.max(np.abs([np.min(By),np.max(By)])) + Bymax = np.max(np.abs([np.min(By), np.max(By)])) Ez, Ez_info = ts.get_field(field="E", coord="z", iteration=it) - Ezmax = np.max(np.abs([np.min(Ez),np.max(Ez)])) + Ezmax = np.max(np.abs([np.min(Ez), np.max(Ez)])) # Axes setup - fig,axs = plt.subplots(3, 1, figsize=(5, 8)) + fig, axs = plt.subplots(3, 1, figsize=(5, 8)) xax, zax = Ex_info.x, Ex_info.z # Plotting im0 = axs[0].pcolormesh( - zax[::nr]/micron,xax[::nr]/micron,Ex.T[::nr,::nr], - vmin=-Exmax, vmax=Exmax, - cmap="RdBu", rasterized=True) + zax[::nr] / micron, + xax[::nr] / micron, + Ex.T[::nr, ::nr], + vmin=-Exmax, + vmax=Exmax, + cmap="RdBu", + rasterized=True, + ) - plt.colorbar(im0,ax=axs[00], label=r"$E_x$ (V/m)") + plt.colorbar(im0, ax=axs[00], label=r"$E_x$ (V/m)") im1 = axs[1].pcolormesh( - zax[::nr]/micron,xax[::nr]/micron,By.T[::nr,::nr], - vmin=-Bymax, vmax=Bymax, - cmap="RdBu", rasterized=True) - plt.colorbar(im1,ax=axs[1], label=r"$B_y$ (T)") + zax[::nr] / micron, + xax[::nr] / micron, + By.T[::nr, ::nr], + vmin=-Bymax, + vmax=Bymax, + cmap="RdBu", + rasterized=True, + ) + plt.colorbar(im1, ax=axs[1], label=r"$B_y$ (T)") im2 = axs[2].pcolormesh( - zax[::nr]/micron,xax[::nr]/micron,Ez.T[::nr,::nr], - vmin=-Ezmax, vmax=Ezmax, - cmap="RdBu", rasterized=True) - plt.colorbar(im2,ax=axs[2],label=r"$E_z$ (V/m)") + zax[::nr] / micron, + xax[::nr] / micron, + Ez.T[::nr, ::nr], + vmin=-Ezmax, + vmax=Ezmax, + cmap="RdBu", + rasterized=True, + ) + plt.colorbar(im2, ax=axs[2], label=r"$E_z$ (V/m)") # Axis labels and title for ax in axs: @@ -153,42 +196,48 @@ def visualize_field_iteration(ts, iteration, out_dir): plt.savefig(f"{out_dir}/fields_{it:06d}.png") -def visualize_particle_histogram_iteration(diag_name="histuH", species="hydrogen", iteration=1000, out_dir="./analysis"): +def visualize_particle_histogram_iteration( + diag_name="histuH", species="hydrogen", iteration=1000, out_dir="./analysis" +): it = iteration if species == "hydrogen": # proton rest energy in eV - mc2 = sc.m_p/sc.electron_volt * sc.c**2 + mc2 = sc.m_p / sc.electron_volt * sc.c**2 elif species == "electron": - mc2 = sc.m_e/sc.electron_volt * sc.c**2 + mc2 = sc.m_e / sc.electron_volt * sc.c**2 else: - raise NotImplementedError("The only implemented presets for this analysis script are `electron` or `hydrogen`.") + raise NotImplementedError( + "The only implemented presets for this analysis script are `electron` or `hydrogen`." + ) - fs = 1.e-15 - MeV = 1.e6 + fs = 1.0e-15 + MeV = 1.0e6 - df = pd.read_csv(f"./diags/reducedfiles/{diag_name}.txt",delimiter=r'\s+') + df = pd.read_csv(f"./diags/reducedfiles/{diag_name}.txt", delimiter=r"\s+") # the columns look like this: # #[0]step() [1]time(s) [2]bin1=0.000220() [3]bin2=0.000660() [4]bin3=0.001100() # matches words, strings surrounded by " ' ", dots, minus signs and e for scientific notation in numbers - nested_list = [re.findall(r"[\w'\.]+",col) for col in df.columns] + nested_list = [re.findall(r"[\w'\.]+", col) for col in df.columns] - index = pd.MultiIndex.from_tuples(nested_list, names=('column#', 'name', 'bin value')) + index = pd.MultiIndex.from_tuples( + nested_list, names=("column#", "name", "bin value") + ) - df.columns = (index) + df.columns = index steps = df.values[:, 0].astype(int) ii = np.where(steps == it)[0][0] time = df.values[:, 1] data = df.values[:, 2:] edge_vals = np.array([float(row[2]) for row in df.columns[2:]]) - edges_MeV = (np.sqrt(edge_vals**2 + 1)-1) * mc2 / MeV + edges_MeV = (np.sqrt(edge_vals**2 + 1) - 1) * mc2 / MeV time_fs = time / fs - fig,ax = plt.subplots(1,1) + fig, ax = plt.subplots(1, 1) ax.plot(edges_MeV, data[ii, :]) ax.set_yscale("log") @@ -202,17 +251,42 @@ def visualize_particle_histogram_iteration(diag_name="histuH", species="hydrogen if __name__ == "__main__": - # Argument parsing - parser = argparse.ArgumentParser(description='Visualize Laser-Ion Accelerator Densities and Fields') - parser.add_argument('-d', '--diag_dir', type=str, default='./diags/diag1', help='Directory containing density and field diagnostics') - parser.add_argument('-i', '--iteration', type=int, default=None, help='Specific iteration to visualize') - parser.add_argument('-hn', '--histogram_name', type=str, default='histuH', help='Name of histogram diagnostic to visualize') - parser.add_argument('-hs', '--histogram_species', type=str, default='hydrogen', help='Particle species in the visualized histogram diagnostic') + parser = argparse.ArgumentParser( + description="Visualize Laser-Ion Accelerator Densities and Fields" + ) + parser.add_argument( + "-d", + "--diag_dir", + type=str, + default="./diags/diag1", + help="Directory containing density and field diagnostics", + ) + parser.add_argument( + "-i", + "--iteration", + type=int, + default=None, + help="Specific iteration to visualize", + ) + parser.add_argument( + "-hn", + "--histogram_name", + type=str, + default="histuH", + help="Name of histogram diagnostic to visualize", + ) + parser.add_argument( + "-hs", + "--histogram_species", + type=str, + default="hydrogen", + help="Particle species in the visualized histogram diagnostic", + ) args = parser.parse_args() # Create analysis directory - analysis_dir = 'analysis' + analysis_dir = "analysis" create_analysis_dir(analysis_dir) # Loading the time series @@ -221,9 +295,13 @@ def visualize_particle_histogram_iteration(diag_name="histuH", species="hydrogen if args.iteration is not None: visualize_density_iteration(ts, args.iteration, analysis_dir) visualize_field_iteration(ts, args.iteration, analysis_dir) - visualize_particle_histogram_iteration(args.histogram_name, args.histogram_species, args.iteration, analysis_dir) + visualize_particle_histogram_iteration( + args.histogram_name, args.histogram_species, args.iteration, analysis_dir + ) else: for it in ts.iterations: visualize_density_iteration(ts, it, analysis_dir) visualize_field_iteration(ts, it, analysis_dir) - visualize_particle_histogram_iteration(args.histogram_name, args.histogram_species, it, analysis_dir) + visualize_particle_histogram_iteration( + args.histogram_name, args.histogram_species, it, analysis_dir + ) diff --git a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration.py b/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration.py index 296aea48b35..596f6962618 100755 --- a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration.py +++ b/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration.py @@ -2,7 +2,7 @@ from pywarpx import picmi -#from warp import picmi +# from warp import picmi constants = picmi.constants @@ -10,71 +10,97 @@ ny = 64 nz = 64 -xmin = -200.e-6 -xmax = +200.e-6 -ymin = -200.e-6 -ymax = +200.e-6 -zmin = -200.e-6 -zmax = +200.e-6 +xmin = -200.0e-6 +xmax = +200.0e-6 +ymin = -200.0e-6 +ymax = +200.0e-6 +zmin = -200.0e-6 +zmax = +200.0e-6 -moving_window_velocity = [0., 0., constants.c] +moving_window_velocity = [0.0, 0.0, constants.c] number_per_cell_each_dim = [2, 2, 1] max_steps = 10 -grid = picmi.Cartesian3DGrid(number_of_cells = [nx, ny, nz], - lower_bound = [xmin, ymin, zmin], - upper_bound = [xmax, ymax, zmax], - lower_boundary_conditions = ['periodic', 'periodic', 'open'], - upper_boundary_conditions = ['periodic', 'periodic', 'open'], - lower_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'], - upper_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'], - moving_window_velocity = moving_window_velocity, - warpx_max_grid_size=32) +grid = picmi.Cartesian3DGrid( + number_of_cells=[nx, ny, nz], + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["periodic", "periodic", "open"], + upper_boundary_conditions=["periodic", "periodic", "open"], + lower_boundary_conditions_particles=["periodic", "periodic", "absorbing"], + upper_boundary_conditions_particles=["periodic", "periodic", "absorbing"], + moving_window_velocity=moving_window_velocity, + warpx_max_grid_size=32, +) solver = picmi.ElectromagneticSolver(grid=grid, cfl=1) -beam_distribution = picmi.UniformDistribution(density = 1.e23, - lower_bound = [-20.e-6, -20.e-6, -150.e-6], - upper_bound = [+20.e-6, +20.e-6, -100.e-6], - directed_velocity = [0., 0., 1.e9]) - -plasma_distribution = picmi.UniformDistribution(density = 1.e22, - lower_bound = [-200.e-6, -200.e-6, 0.], - upper_bound = [+200.e-6, +200.e-6, None], - fill_in = True) - -beam = picmi.Species(particle_type='electron', name='beam', initial_distribution=beam_distribution) -plasma = picmi.Species(particle_type='electron', name='plasma', initial_distribution=plasma_distribution) - -sim = picmi.Simulation(solver = solver, - max_steps = max_steps, - verbose = 1, - warpx_current_deposition_algo = 'esirkepov', - warpx_use_filter = 0) - -sim.add_species(beam, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim)) -sim.add_species(plasma, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim)) - -field_diag = picmi.FieldDiagnostic(name = 'diag1', - grid = grid, - period = max_steps, - data_list = ['Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz', 'part_per_cell'], - write_dir = '.', - warpx_file_prefix = 'Python_PlasmaAcceleration_plt') - -part_diag = picmi.ParticleDiagnostic(name = 'diag1', - period = max_steps, - species = [beam, plasma], - data_list = ['ux', 'uy', 'uz', 'weighting']) +beam_distribution = picmi.UniformDistribution( + density=1.0e23, + lower_bound=[-20.0e-6, -20.0e-6, -150.0e-6], + upper_bound=[+20.0e-6, +20.0e-6, -100.0e-6], + directed_velocity=[0.0, 0.0, 1.0e9], +) + +plasma_distribution = picmi.UniformDistribution( + density=1.0e22, + lower_bound=[-200.0e-6, -200.0e-6, 0.0], + upper_bound=[+200.0e-6, +200.0e-6, None], + fill_in=True, +) + +beam = picmi.Species( + particle_type="electron", name="beam", initial_distribution=beam_distribution +) +plasma = picmi.Species( + particle_type="electron", name="plasma", initial_distribution=plasma_distribution +) + +sim = picmi.Simulation( + solver=solver, + max_steps=max_steps, + verbose=1, + warpx_current_deposition_algo="esirkepov", + warpx_use_filter=0, +) + +sim.add_species( + beam, + layout=picmi.GriddedLayout( + grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim + ), +) +sim.add_species( + plasma, + layout=picmi.GriddedLayout( + grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim + ), +) + +field_diag = picmi.FieldDiagnostic( + name="diag1", + grid=grid, + period=max_steps, + data_list=["Ex", "Ey", "Ez", "Jx", "Jy", "Jz", "part_per_cell"], + write_dir=".", + warpx_file_prefix="Python_PlasmaAcceleration_plt", +) + +part_diag = picmi.ParticleDiagnostic( + name="diag1", + period=max_steps, + species=[beam, plasma], + data_list=["ux", "uy", "uz", "weighting"], +) sim.add_diagnostic(field_diag) sim.add_diagnostic(part_diag) # write_inputs will create an inputs file that can be used to run # with the compiled version. -#sim.write_input_file(file_name = 'inputs_from_PICMI') +# sim.write_input_file(file_name = 'inputs_from_PICMI') # Alternatively, sim.step will run WarpX, controlling it from Python sim.step() diff --git a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_1d.py b/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_1d.py index 27f7236204e..7bb08bc2e8e 100755 --- a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_1d.py +++ b/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_1d.py @@ -2,14 +2,14 @@ from pywarpx import picmi -#from warp import picmi +# from warp import picmi constants = picmi.constants nz = 64 -zmin = -200.e-6 -zmax = +200.e-6 +zmin = -200.0e-6 +zmax = +200.0e-6 moving_window_velocity = [constants.c] @@ -17,58 +17,84 @@ max_steps = 1000 -grid = picmi.Cartesian1DGrid(number_of_cells = [nz], - lower_bound = [zmin], - upper_bound = [zmax], - lower_boundary_conditions = ['dirichlet'], - upper_boundary_conditions = ['dirichlet'], - lower_boundary_conditions_particles = ['absorbing'], - upper_boundary_conditions_particles = ['absorbing'], - moving_window_velocity = moving_window_velocity, - warpx_max_grid_size=32) +grid = picmi.Cartesian1DGrid( + number_of_cells=[nz], + lower_bound=[zmin], + upper_bound=[zmax], + lower_boundary_conditions=["dirichlet"], + upper_boundary_conditions=["dirichlet"], + lower_boundary_conditions_particles=["absorbing"], + upper_boundary_conditions_particles=["absorbing"], + moving_window_velocity=moving_window_velocity, + warpx_max_grid_size=32, +) solver = picmi.ElectromagneticSolver(grid=grid, cfl=0.999) -beam_distribution = picmi.UniformDistribution(density = 1.e23, - lower_bound = [None, None, -150.e-6], - upper_bound = [None, None, -100.e-6], - directed_velocity = [0., 0., 1.e9]) - -plasma_distribution = picmi.UniformDistribution(density = 1.e22, - lower_bound = [None, None, 0.], - upper_bound = [None, None, None], - fill_in = True) - -beam = picmi.Species(particle_type='electron', name='beam', initial_distribution=beam_distribution) -plasma = picmi.Species(particle_type='electron', name='plasma', initial_distribution=plasma_distribution) - -sim = picmi.Simulation(solver = solver, - max_steps = max_steps, - verbose = 1, - warpx_current_deposition_algo = 'esirkepov', - warpx_use_filter = 0) - -sim.add_species(beam, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim)) -sim.add_species(plasma, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim)) - -field_diag = picmi.FieldDiagnostic(name = 'diag1', - grid = grid, - period = max_steps, - data_list = ['Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz', 'part_per_cell'], - write_dir = '.', - warpx_file_prefix = 'Python_PlasmaAcceleration1d_plt') - -part_diag = picmi.ParticleDiagnostic(name = 'diag1', - period = max_steps, - species = [beam, plasma], - data_list = ['ux', 'uy', 'uz', 'weighting']) +beam_distribution = picmi.UniformDistribution( + density=1.0e23, + lower_bound=[None, None, -150.0e-6], + upper_bound=[None, None, -100.0e-6], + directed_velocity=[0.0, 0.0, 1.0e9], +) + +plasma_distribution = picmi.UniformDistribution( + density=1.0e22, + lower_bound=[None, None, 0.0], + upper_bound=[None, None, None], + fill_in=True, +) + +beam = picmi.Species( + particle_type="electron", name="beam", initial_distribution=beam_distribution +) +plasma = picmi.Species( + particle_type="electron", name="plasma", initial_distribution=plasma_distribution +) + +sim = picmi.Simulation( + solver=solver, + max_steps=max_steps, + verbose=1, + warpx_current_deposition_algo="esirkepov", + warpx_use_filter=0, +) + +sim.add_species( + beam, + layout=picmi.GriddedLayout( + grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim + ), +) +sim.add_species( + plasma, + layout=picmi.GriddedLayout( + grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim + ), +) + +field_diag = picmi.FieldDiagnostic( + name="diag1", + grid=grid, + period=max_steps, + data_list=["Ex", "Ey", "Ez", "Jx", "Jy", "Jz", "part_per_cell"], + write_dir=".", + warpx_file_prefix="Python_PlasmaAcceleration1d_plt", +) + +part_diag = picmi.ParticleDiagnostic( + name="diag1", + period=max_steps, + species=[beam, plasma], + data_list=["ux", "uy", "uz", "weighting"], +) sim.add_diagnostic(field_diag) sim.add_diagnostic(part_diag) # write_inputs will create an inputs file that can be used to run # with the compiled version. -#sim.write_input_file(file_name = 'inputs_from_PICMI') +# sim.write_input_file(file_name = 'inputs_from_PICMI') # Alternatively, sim.step will run WarpX, controlling it from Python sim.step() diff --git a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py b/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py index 52a9729a1fb..df5e9e9808c 100755 --- a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py +++ b/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py @@ -2,7 +2,7 @@ from pywarpx import picmi -#from warp import picmi +# from warp import picmi constants = picmi.constants @@ -10,76 +10,102 @@ ny = 64 nz = 64 -xmin = -200.e-6 -xmax = +200.e-6 -ymin = -200.e-6 -ymax = +200.e-6 -zmin = -200.e-6 -zmax = +200.e-6 +xmin = -200.0e-6 +xmax = +200.0e-6 +ymin = -200.0e-6 +ymax = +200.0e-6 +zmin = -200.0e-6 +zmax = +200.0e-6 -moving_window_velocity = [0., 0., constants.c] +moving_window_velocity = [0.0, 0.0, constants.c] number_per_cell_each_dim = [4, 4, 4] -grid = picmi.Cartesian3DGrid(number_of_cells = [nx, ny, nz], - lower_bound = [xmin, ymin, zmin], - upper_bound = [xmax, ymax, zmax], - lower_boundary_conditions = ['periodic', 'periodic', 'open'], - upper_boundary_conditions = ['periodic', 'periodic', 'open'], - lower_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'], - upper_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'], - moving_window_velocity = moving_window_velocity, - #refined_regions = [[1, [-25e-6, -25e-6, -200.e-6], [25e-6, 25e-6, 200.e-6]]], # as argument - warpx_max_grid_size=128, warpx_blocking_factor=16) +grid = picmi.Cartesian3DGrid( + number_of_cells=[nx, ny, nz], + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["periodic", "periodic", "open"], + upper_boundary_conditions=["periodic", "periodic", "open"], + lower_boundary_conditions_particles=["periodic", "periodic", "absorbing"], + upper_boundary_conditions_particles=["periodic", "periodic", "absorbing"], + moving_window_velocity=moving_window_velocity, + # refined_regions = [[1, [-25e-6, -25e-6, -200.e-6], [25e-6, 25e-6, 200.e-6]]], # as argument + warpx_max_grid_size=128, + warpx_blocking_factor=16, +) # --- As a separate function call (instead of refined_regions argument) -grid.add_refined_region(level = 1, - lo = [-25e-6, -25e-6, -200.e-6], - hi = [25e-6, 25e-6, 200.e-6]) - -solver = picmi.ElectromagneticSolver(grid=grid, cfl=1, - warpx_pml_ncell = 10) - -beam_distribution = picmi.UniformDistribution(density = 1.e23, - lower_bound = [-20.e-6, -20.e-6, -150.e-6], - upper_bound = [+20.e-6, +20.e-6, -100.e-6], - directed_velocity = [0., 0., 1.e9]) - -plasma_distribution = picmi.UniformDistribution(density = 1.e22, - lower_bound = [-200.e-6, -200.e-6, 0.], - upper_bound = [+200.e-6, +200.e-6, None], - fill_in = True) - -beam = picmi.Species(particle_type='electron', name='beam', initial_distribution=beam_distribution) -plasma = picmi.Species(particle_type='electron', name='plasma', initial_distribution=plasma_distribution) - -sim = picmi.Simulation(solver = solver, - max_steps = 2, - verbose = 1, - warpx_current_deposition_algo = 'esirkepov', - warpx_use_filter = 0) - -sim.add_species(beam, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim)) -sim.add_species(plasma, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim)) - -field_diag = picmi.FieldDiagnostic(name = 'diag1', - grid = grid, - period = 2, - data_list = ['Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz', 'part_per_cell'], - write_dir = '.', - warpx_file_prefix = 'Python_PlasmaAccelerationMR_plt') - -part_diag = picmi.ParticleDiagnostic(name = 'diag1', - period = 2, - species = [beam, plasma], - data_list = ['ux', 'uy', 'uz', 'weighting']) +grid.add_refined_region( + level=1, lo=[-25e-6, -25e-6, -200.0e-6], hi=[25e-6, 25e-6, 200.0e-6] +) + +solver = picmi.ElectromagneticSolver(grid=grid, cfl=1, warpx_pml_ncell=10) + +beam_distribution = picmi.UniformDistribution( + density=1.0e23, + lower_bound=[-20.0e-6, -20.0e-6, -150.0e-6], + upper_bound=[+20.0e-6, +20.0e-6, -100.0e-6], + directed_velocity=[0.0, 0.0, 1.0e9], +) + +plasma_distribution = picmi.UniformDistribution( + density=1.0e22, + lower_bound=[-200.0e-6, -200.0e-6, 0.0], + upper_bound=[+200.0e-6, +200.0e-6, None], + fill_in=True, +) + +beam = picmi.Species( + particle_type="electron", name="beam", initial_distribution=beam_distribution +) +plasma = picmi.Species( + particle_type="electron", name="plasma", initial_distribution=plasma_distribution +) + +sim = picmi.Simulation( + solver=solver, + max_steps=2, + verbose=1, + warpx_current_deposition_algo="esirkepov", + warpx_use_filter=0, +) + +sim.add_species( + beam, + layout=picmi.GriddedLayout( + grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim + ), +) +sim.add_species( + plasma, + layout=picmi.GriddedLayout( + grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim + ), +) + +field_diag = picmi.FieldDiagnostic( + name="diag1", + grid=grid, + period=2, + data_list=["Ex", "Ey", "Ez", "Jx", "Jy", "Jz", "part_per_cell"], + write_dir=".", + warpx_file_prefix="Python_PlasmaAccelerationMR_plt", +) + +part_diag = picmi.ParticleDiagnostic( + name="diag1", + period=2, + species=[beam, plasma], + data_list=["ux", "uy", "uz", "weighting"], +) sim.add_diagnostic(field_diag) sim.add_diagnostic(part_diag) # write_inputs will create an inputs file that can be used to run # with the compiled version. -#sim.write_input_file(file_name = 'inputs_from_PICMI.mr') +# sim.write_input_file(file_name = 'inputs_from_PICMI.mr') # Alternatively, sim.step will run WarpX, controlling it from Python sim.step() diff --git a/Examples/Physics_applications/spacecraft_charging/PICMI_inputs_rz.py b/Examples/Physics_applications/spacecraft_charging/PICMI_inputs_rz.py index 5b57b59fbe3..b0f4b6e9b39 100644 --- a/Examples/Physics_applications/spacecraft_charging/PICMI_inputs_rz.py +++ b/Examples/Physics_applications/spacecraft_charging/PICMI_inputs_rz.py @@ -29,9 +29,10 @@ class SpaceChargeFieldCorrector(object): correct field around the spacecraft, at each timestep (taking into account the charge that has been collected on the spacecraft) """ + def __init__(self): self.saved_first_iteration_fields = False - self.spacecraft_potential = 1. # Initial voltage: 1V + self.spacecraft_potential = 1.0 # Initial voltage: 1V self.spacecraft_capacitance = None def correct_space_charge_fields(self, q=None): @@ -49,23 +50,25 @@ def correct_space_charge_fields(self, q=None): # Correct fields so as to recover the actual charge Er = ExWrapper(include_ghosts=True) - Er[...] += (q - q_v)*self.normalized_Er + Er[...] += (q - q_v) * self.normalized_Er Ez = EzWrapper(include_ghosts=True) - Ez[...] += (q - q_v)*self.normalized_Ez + Ez[...] += (q - q_v) * self.normalized_Ez phi = PhiFPWrapper(include_ghosts=True) - phi[...] += (q - q_v)*self.normalized_phi - self.spacecraft_potential += (q - q_v)*self.spacecraft_capacitance - sim.extension.warpx.set_potential_on_eb( "%f" %self.spacecraft_potential ) - print('Setting potential to %f' %self.spacecraft_potential) + phi[...] += (q - q_v) * self.normalized_phi + self.spacecraft_potential += (q - q_v) * self.spacecraft_capacitance + sim.extension.warpx.set_potential_on_eb("%f" % self.spacecraft_potential) + print("Setting potential to %f" % self.spacecraft_potential) # Confirm that the charge on the spacecraft is now correct compute_virtual_charge_on_spacecraft() - def save_normalized_vacuum_Efields(self,): + def save_normalized_vacuum_Efields( + self, + ): # Compute the charge that WarpX thinks there is on the spacecraft # from phi and rho after the Poisson solver q_v = compute_virtual_charge_on_spacecraft() - self.spacecraft_capacitance = 1./q_v # the potential was set to 1V + self.spacecraft_capacitance = 1.0 / q_v # the potential was set to 1V # Check that this iteration corresponded to a vacuum solve rho = RhoFPWrapper(include_ghosts=False) @@ -73,15 +76,15 @@ def save_normalized_vacuum_Efields(self,): # In principle, we should check that `rho` is exactly 0 # However, due to machine precision errors when adding the charge # of ions and electrons, this can be slightly different than 0 - assert np.all( abs(rho[...]) < 1.e-11 ) + assert np.all(abs(rho[...]) < 1.0e-11) # Record fields - Er = ExWrapper(include_ghosts=True)[:,:] - self.normalized_Er = Er[...] /q_v - Ez = EzWrapper(include_ghosts=True)[:,:] - self.normalized_Ez = Ez[...] /q_v - phi = PhiFPWrapper(include_ghosts=True)[:,:] - self.normalized_phi = phi[...] /q_v + Er = ExWrapper(include_ghosts=True)[:, :] + self.normalized_Er = Er[...] / q_v + Ez = EzWrapper(include_ghosts=True)[:, :] + self.normalized_Ez = Ez[...] / q_v + phi = PhiFPWrapper(include_ghosts=True)[:, :] + self.normalized_phi = phi[...] / q_v self.saved_first_iteration_fields = True self.correct_space_charge_fields(q=0) @@ -95,33 +98,39 @@ def compute_virtual_charge_on_spacecraft(): that WarpX thinks there should be on the spacecraft. """ # Get global array for the whole domain (across MPI ranks) - phi = PhiFPWrapper(include_ghosts=False)[:,:] - rho = RhoFPWrapper(include_ghosts=False)[:,:] + phi = PhiFPWrapper(include_ghosts=False)[:, :] + rho = RhoFPWrapper(include_ghosts=False)[:, :] # Check that this codes correspond to the global size of the box - assert phi.shape == (nr+1, nz+1) - assert rho.shape == (nr+1, nz+1) + assert phi.shape == (nr + 1, nz + 1) + assert rho.shape == (nr + 1, nz + 1) dr, dz = sim.extension.warpx.Geom(lev=0).data().CellSize() # Compute integral of grad phi over surfaces of the domain - r = np.linspace(rmin, rmax, len(phi), endpoint=False) + (rmax - rmin) / (2 * len(phi)) #shift of the r points because the derivaties are calculated in the middle - face_z0 = 2 * np.pi * 1./dz * ( (phi[:,0]-phi[:,1]) * r ).sum() * dr #here I am assuming that phi is a numpy array that can handle elementwise mult - face_zend = 2 * np.pi * 1./dz * ( (phi[:,-1]-phi[:,-2]) * r ).sum() * dr - face_rend = 2 * np.pi * 1./dr*((phi[-1,:]-phi[-2,:]) * rmax).sum() * dz + r = np.linspace(rmin, rmax, len(phi), endpoint=False) + (rmax - rmin) / ( + 2 * len(phi) + ) # shift of the r points because the derivaties are calculated in the middle + face_z0 = ( + 2 * np.pi * 1.0 / dz * ((phi[:, 0] - phi[:, 1]) * r).sum() * dr + ) # here I am assuming that phi is a numpy array that can handle elementwise mult + face_zend = 2 * np.pi * 1.0 / dz * ((phi[:, -1] - phi[:, -2]) * r).sum() * dr + face_rend = 2 * np.pi * 1.0 / dr * ((phi[-1, :] - phi[-2, :]) * rmax).sum() * dz grad_phi_integral = face_z0 + face_zend + face_rend # Compute integral of rho over volume of the domain # (i.e. total charge of the plasma particles) - rho_integral = (rho[1:nr-1,1:nz-1] * r[1:nr-1,np.newaxis]).sum()*dr*dz + rho_integral = ( + (rho[1 : nr - 1, 1 : nz - 1] * r[1 : nr - 1, np.newaxis]).sum() * dr * dz + ) # Due to an oddity in WarpX (which will probably be solved later) # we need to multiply `rho` by `-epsilon_0` to get the correct charge - rho_integral *= 2 * np.pi * -scc.epsilon_0 #does this oddity still exist? + rho_integral *= 2 * np.pi * -scc.epsilon_0 # does this oddity still exist? # Compute charge of the spacecraft, based on Gauss theorem - q_spacecraft = - rho_integral - scc.epsilon_0 * grad_phi_integral - print('Virtual charge on the spacecraft: %e' %q_spacecraft) + q_spacecraft = -rho_integral - scc.epsilon_0 * grad_phi_integral + print("Virtual charge on the spacecraft: %e" % q_spacecraft) return q_spacecraft @@ -131,19 +140,19 @@ def compute_actual_charge_on_spacecraft(): by counting how many electrons and protons were collected by the WarpX embedded boundary (EB) """ - charge = {'electrons': -scc.e, 'protons': scc.e} + charge = {"electrons": -scc.e, "protons": scc.e} q_spacecraft = 0 particle_buffer = ParticleBoundaryBufferWrapper() for species in charge.keys(): - weights = particle_buffer.get_particle_boundary_buffer(species, 'eb', 'w', 0) + weights = particle_buffer.get_particle_boundary_buffer(species, "eb", "w", 0) sum_weights_over_tiles = sum([w.sum() for w in weights]) # Reduce across all MPI ranks ntot = float(mpi.COMM_WORLD.allreduce(sum_weights_over_tiles, op=mpi.SUM)) - print('Total number of %s collected on spacecraft: %e'%(species, ntot)) + print("Total number of %s collected on spacecraft: %e" % (species, ntot)) q_spacecraft += ntot * charge[species] - print('Actual charge on the spacecraft: %e' %q_spacecraft) + print("Actual charge on the spacecraft: %e" % q_spacecraft) return q_spacecraft @@ -151,7 +160,7 @@ def compute_actual_charge_on_spacecraft(): # numerics parameters ########################## -dt=1.27e-8 +dt = 1.27e-8 # --- Nb time steps max_steps = 1000 @@ -159,54 +168,62 @@ def compute_actual_charge_on_spacecraft(): # --- grid nr = 40 -nz= 80 +nz = 80 rmin = 0.0 rmax = 3 zmin = -3 zmax = 3 -number_per_cell =5 -number_per_cell_each_dim = [10,1, 1] +number_per_cell = 5 +number_per_cell_each_dim = [10, 1, 1] ########################## # physics components ########################## -n = 7.0e9 #plasma density #particles/m^3 -Te = 85 #Electron temp in eV -Ti = 0.05 * Te #Ion temp in eV -qe = picmi.constants.q_e #elementary charge -m_e = picmi.constants.m_e #electron mass -m_i = 1836.0 * m_e #mass of ion +n = 7.0e9 # plasma density #particles/m^3 +Te = 85 # Electron temp in eV +Ti = 0.05 * Te # Ion temp in eV +qe = picmi.constants.q_e # elementary charge +m_e = picmi.constants.m_e # electron mass +m_i = 1836.0 * m_e # mass of ion v_eth = (qe * Te / m_e) ** 0.5 v_pth = (qe * Ti / m_i) ** 0.5 # nothing to change in the distribution function? -e_dist = picmi.UniformDistribution(density = n, rms_velocity=[v_eth, v_eth, v_eth] ) +e_dist = picmi.UniformDistribution(density=n, rms_velocity=[v_eth, v_eth, v_eth]) e_dist2 = picmi.UniformFluxDistribution( - flux=n*v_eth/(2*np.pi)**.5, # Flux for Gaussian with vmean=0 + flux=n * v_eth / (2 * np.pi) ** 0.5, # Flux for Gaussian with vmean=0 surface_flux_position=3, - flux_direction=-1, flux_normal_axis='r', + flux_direction=-1, + flux_normal_axis="r", gaussian_flux_momentum_distribution=True, - rms_velocity=[v_eth, v_eth, v_eth] ) -electrons = picmi.Species(particle_type='electron', - name='electrons', - initial_distribution=[e_dist,e_dist2], - warpx_save_particles_at_eb=1) + rms_velocity=[v_eth, v_eth, v_eth], +) +electrons = picmi.Species( + particle_type="electron", + name="electrons", + initial_distribution=[e_dist, e_dist2], + warpx_save_particles_at_eb=1, +) -p_dist = picmi.UniformDistribution(density = n, rms_velocity=[v_pth, v_pth, v_pth] ) +p_dist = picmi.UniformDistribution(density=n, rms_velocity=[v_pth, v_pth, v_pth]) p_dist2 = picmi.UniformFluxDistribution( - flux=n*v_pth/(2*np.pi)**.5, # Flux for Gaussian with vmean=0 + flux=n * v_pth / (2 * np.pi) ** 0.5, # Flux for Gaussian with vmean=0 surface_flux_position=3, - flux_direction=-1, flux_normal_axis='r', + flux_direction=-1, + flux_normal_axis="r", gaussian_flux_momentum_distribution=True, - rms_velocity=[v_pth, v_pth, v_pth] ) -protons = picmi.Species(particle_type='proton', - name='protons', - initial_distribution=[p_dist,p_dist2], - warpx_save_particles_at_eb=1) + rms_velocity=[v_pth, v_pth, v_pth], +) +protons = picmi.Species( + particle_type="proton", + name="protons", + initial_distribution=[p_dist, p_dist2], + warpx_save_particles_at_eb=1, +) ########################## @@ -214,25 +231,24 @@ def compute_actual_charge_on_spacecraft(): ########################## grid = picmi.CylindricalGrid( - number_of_cells = [nr, nz], - n_azimuthal_modes = 1, - lower_bound = [rmin, zmin], - upper_bound = [rmax, zmax], - lower_boundary_conditions = ['none', 'dirichlet'], - upper_boundary_conditions = ['dirichlet', 'dirichlet'], - lower_boundary_conditions_particles = ['none', 'reflecting'], - upper_boundary_conditions_particles = ['absorbing', 'reflecting'] + number_of_cells=[nr, nz], + n_azimuthal_modes=1, + lower_bound=[rmin, zmin], + upper_bound=[rmax, zmax], + lower_boundary_conditions=["none", "dirichlet"], + upper_boundary_conditions=["dirichlet", "dirichlet"], + lower_boundary_conditions_particles=["none", "reflecting"], + upper_boundary_conditions_particles=["absorbing", "reflecting"], ) solver = picmi.ElectrostaticSolver( - grid=grid, method='Multigrid', - warpx_absolute_tolerance=1e-7 + grid=grid, method="Multigrid", warpx_absolute_tolerance=1e-7 ) embedded_boundary = picmi.EmbeddedBoundary( implicit_function="-(x**2+y**2+z**2-radius**2)", - potential=1., # arbitrary value ; this will be corrected by a callback function - radius = 0.3277 + potential=1.0, # arbitrary value ; this will be corrected by a callback function + radius=0.3277, ) @@ -241,22 +257,22 @@ def compute_actual_charge_on_spacecraft(): ########################## field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = diagnostic_interval, - data_list = ['Er', 'Ez', 'phi', 'rho', - 'rho_electrons', 'rho_protons'], - warpx_format = 'openpmd', - write_dir = '.', - warpx_file_prefix = 'spacecraft_charging_plt' + name="diag1", + grid=grid, + period=diagnostic_interval, + data_list=["Er", "Ez", "phi", "rho", "rho_electrons", "rho_protons"], + warpx_format="openpmd", + write_dir=".", + warpx_file_prefix="spacecraft_charging_plt", ) -part_diag = picmi.ParticleDiagnostic(name = 'diag1', - period = diagnostic_interval, - species = [electrons, protons], - warpx_format = 'openpmd', - write_dir = '.', - warpx_file_prefix = 'spacecraft_charging_plt' +part_diag = picmi.ParticleDiagnostic( + name="diag1", + period=diagnostic_interval, + species=[electrons, protons], + warpx_format="openpmd", + write_dir=".", + warpx_file_prefix="spacecraft_charging_plt", ) ########################## @@ -264,23 +280,21 @@ def compute_actual_charge_on_spacecraft(): ########################## sim = picmi.Simulation( - solver = solver, - time_step_size = dt, - max_steps = max_steps, + solver=solver, + time_step_size=dt, + max_steps=max_steps, warpx_embedded_boundary=embedded_boundary, warpx_amrex_the_arena_is_managed=1, - warpx_random_seed=1 + warpx_random_seed=1, ) -layout1=picmi.GriddedLayout(n_macroparticle_per_cell=number_per_cell_each_dim, - grid=grid) -layout2=picmi.PseudoRandomLayout(n_macroparticles_per_cell=number_per_cell, - grid=grid) -sim.add_species(electrons, - layout = [layout1,layout2]) +layout1 = picmi.GriddedLayout( + n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid +) +layout2 = picmi.PseudoRandomLayout(n_macroparticles_per_cell=number_per_cell, grid=grid) +sim.add_species(electrons, layout=[layout1, layout2]) -sim.add_species(protons, - layout = [layout1,layout2]) +sim.add_species(protons, layout=[layout1, layout2]) sim.add_diagnostic(field_diag) sim.add_diagnostic(part_diag) @@ -291,7 +305,7 @@ def compute_actual_charge_on_spacecraft(): spc = SpaceChargeFieldCorrector() -installafterInitEsolve( spc.save_normalized_vacuum_Efields ) -installafterEsolve( spc.correct_space_charge_fields ) +installafterInitEsolve(spc.save_normalized_vacuum_Efields) +installafterEsolve(spc.correct_space_charge_fields) sim.step(max_steps) diff --git a/Examples/Physics_applications/spacecraft_charging/analysis.py b/Examples/Physics_applications/spacecraft_charging/analysis.py index ef75fd1a10a..11374d9fc95 100755 --- a/Examples/Physics_applications/spacecraft_charging/analysis.py +++ b/Examples/Physics_applications/spacecraft_charging/analysis.py @@ -22,57 +22,59 @@ from scipy.optimize import curve_fit yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Open plotfile specified in command line filename = sys.argv[1] test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format='openpmd') +checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") -ts = OpenPMDTimeSeries('./spacecraft_charging_plt') +ts = OpenPMDTimeSeries("./spacecraft_charging_plt") dt = 1.27e-8 -t=[] -phi=[] +t = [] +phi = [] it = ts.iterations for i in it: - phi_i = ts.get_field('phi',iteration=i,plot=False) + phi_i = ts.get_field("phi", iteration=i, plot=False) # Find the minimum value among all grids for this iteration phi_min = np.min(phi_i[0]) phi.append(phi_min) - t.append(dt*i) + t.append(dt * i) def func(x, v0, tau): - - return v0 * (1-np.exp(-np.array(x) / tau)) - + return v0 * (1 - np.exp(-np.array(x) / tau)) popt, pcov = curve_fit(func, t, phi) -plt.plot(t,phi, label='modelisation') -plt.plot(t, func(t, *popt), 'r-',label='fit: v0=%5.3f, tau=%5.9f' % (popt[0], popt[1])) +plt.plot(t, phi, label="modelisation") +plt.plot(t, func(t, *popt), "r-", label="fit: v0=%5.3f, tau=%5.9f" % (popt[0], popt[1])) plt.legend() -plt.savefig('min_phi_analysis.png') +plt.savefig("min_phi_analysis.png") -print('fit parameters between the min(phi) curve over the time and the function v0(1-exp(-t/tau)):') -print('v0=%5.3f, tau=%5.9f' % (popt[0], popt[1])) +print( + "fit parameters between the min(phi) curve over the time and the function v0(1-exp(-t/tau)):" +) +print("v0=%5.3f, tau=%5.9f" % (popt[0], popt[1])) -tolerance_v0=0.04 -tolerance_tau=0.04 -print("tolerance for v0 = "+ str(tolerance_v0 *100) + '%') -print("tolerance for tau = "+ str(tolerance_tau*100) + '%') +tolerance_v0 = 0.04 +tolerance_tau = 0.04 +print("tolerance for v0 = " + str(tolerance_v0 * 100) + "%") +print("tolerance for tau = " + str(tolerance_tau * 100) + "%") -mean_v0=-151.347 -mean_tau=0.000004351 +mean_v0 = -151.347 +mean_tau = 0.000004351 -diff_v0=np.abs((popt[0]-mean_v0)/mean_v0) -diff_tau=np.abs((popt[1]-mean_tau)/mean_tau) +diff_v0 = np.abs((popt[0] - mean_v0) / mean_v0) +diff_tau = np.abs((popt[1] - mean_tau) / mean_tau) -print("percentage error for v0 = "+ str(diff_v0 *100) + '%') -print("percentage error for tau = "+ str(diff_tau*100) + '%') +print("percentage error for v0 = " + str(diff_v0 * 100) + "%") +print("percentage error for tau = " + str(diff_tau * 100) + "%") -assert (diff_v0 < tolerance_v0) and (diff_tau < tolerance_tau), 'Test spacecraft_charging did not pass' +assert (diff_v0 < tolerance_v0) and ( + diff_tau < tolerance_tau +), "Test spacecraft_charging did not pass" diff --git a/Examples/Tests/AcceleratorLattice/analysis.py b/Examples/Tests/AcceleratorLattice/analysis.py index 9c6589825a1..6f76fd86855 100755 --- a/Examples/Tests/AcceleratorLattice/analysis.py +++ b/Examples/Tests/AcceleratorLattice/analysis.py @@ -23,94 +23,113 @@ from scipy.constants import c, e, m_e yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) ad = ds.all_data() -gamma_boost = float(ds.parameters.get('warpx.gamma_boost', 1.)) -uz_boost = np.sqrt(gamma_boost*gamma_boost - 1.)*c +gamma_boost = float(ds.parameters.get("warpx.gamma_boost", 1.0)) +uz_boost = np.sqrt(gamma_boost * gamma_boost - 1.0) * c # Fetch the final particle position -xx_sim = ad['electron', 'particle_position_x'].v[0] -zz_sim = ad['electron', 'particle_position_z'].v[0] -ux_sim = ad['electron', 'particle_momentum_x'].v[0]/m_e +xx_sim = ad["electron", "particle_position_x"].v[0] +zz_sim = ad["electron", "particle_position_z"].v[0] +ux_sim = ad["electron", "particle_momentum_x"].v[0] / m_e -if gamma_boost > 1.: +if gamma_boost > 1.0: # The simulation data is in the boosted frame. # Transform the z position to the lab frame. time = ds.current_time.value - zz_sim = gamma_boost*zz_sim + uz_boost*time + zz_sim = gamma_boost * zz_sim + uz_boost * time # Fetch the quadrupole lattice data quad_starts = [] quad_lengths = [] quad_strengths_E = [] -z_location = 0. +z_location = 0.0 + + def read_lattice(rootname, z_location): - lattice_elements = ds.parameters.get(f'{rootname}.elements').split() + lattice_elements = ds.parameters.get(f"{rootname}.elements").split() for element in lattice_elements: - element_type = ds.parameters.get(f'{element}.type') - if element_type == 'drift': - length = float(ds.parameters.get(f'{element}.ds')) + element_type = ds.parameters.get(f"{element}.type") + if element_type == "drift": + length = float(ds.parameters.get(f"{element}.ds")) z_location += length - elif element_type == 'quad': - length = float(ds.parameters.get(f'{element}.ds')) + elif element_type == "quad": + length = float(ds.parameters.get(f"{element}.ds")) quad_starts.append(z_location) quad_lengths.append(length) - quad_strengths_E.append(float(ds.parameters.get(f'{element}.dEdx'))) + quad_strengths_E.append(float(ds.parameters.get(f"{element}.dEdx"))) z_location += length - elif element_type == 'line': + elif element_type == "line": z_location = read_lattice(element, z_location) return z_location -read_lattice('lattice', z_location) + +read_lattice("lattice", z_location) # Fetch the initial position of the particle -x0 = [float(x) for x in ds.parameters.get('electron.single_particle_pos').split()] -ux0 = [float(x)*c for x in ds.parameters.get('electron.single_particle_u').split()] +x0 = [float(x) for x in ds.parameters.get("electron.single_particle_pos").split()] +ux0 = [float(x) * c for x in ds.parameters.get("electron.single_particle_u").split()] xx = x0[0] zz = x0[2] ux = ux0[0] uz = ux0[2] -gamma = np.sqrt(uz**2/c**2 + 1.) -vz = uz/gamma +gamma = np.sqrt(uz**2 / c**2 + 1.0) +vz = uz / gamma + def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): """Use analytic solution of a particle with a transverse dependent field""" - kb0 = np.sqrt(e/(m_e*gamma*vz0**2)*abs(lens_strength)) - if lens_strength >= 0.: - x1 = x0*np.cos(kb0*lens_length) + (vx0/vz0)/kb0*np.sin(kb0*lens_length) - vx1 = vz0*(-kb0*x0*np.sin(kb0*lens_length) + (vx0/vz0)*np.cos(kb0*lens_length)) + kb0 = np.sqrt(e / (m_e * gamma * vz0**2) * abs(lens_strength)) + if lens_strength >= 0.0: + x1 = x0 * np.cos(kb0 * lens_length) + (vx0 / vz0) / kb0 * np.sin( + kb0 * lens_length + ) + vx1 = vz0 * ( + -kb0 * x0 * np.sin(kb0 * lens_length) + + (vx0 / vz0) * np.cos(kb0 * lens_length) + ) else: - x1 = x0*np.cosh(kb0*lens_length) + (vx0/vz0)/kb0*np.sinh(kb0*lens_length) - vx1 = vz0*(+kb0*x0*np.sinh(kb0*lens_length) + (vx0/vz0)*np.cosh(kb0*lens_length)) + x1 = x0 * np.cosh(kb0 * lens_length) + (vx0 / vz0) / kb0 * np.sinh( + kb0 * lens_length + ) + vx1 = vz0 * ( + +kb0 * x0 * np.sinh(kb0 * lens_length) + + (vx0 / vz0) * np.cosh(kb0 * lens_length) + ) return x1, vx1 + # Integrate the particle using the analytic solution for i in range(len(quad_starts)): z_lens = quad_starts[i] - vx = ux/gamma - dt = (z_lens - zz)/vz - xx = xx + dt*vx + vx = ux / gamma + dt = (z_lens - zz) / vz + xx = xx + dt * vx xx, vx = applylens(xx, vx, vz, gamma, quad_lengths[i], quad_strengths_E[i]) - ux = gamma*vx + ux = gamma * vx zz = z_lens + quad_lengths[i] -dt = (zz_sim - zz)/vz -vx = ux/gamma -xx = xx + dt*vx +dt = (zz_sim - zz) / vz +vx = ux / gamma +xx = xx + dt * vx # Compare the analytic to the simulated final values -print(f'Error in x position is {abs(np.abs((xx - xx_sim)/xx))}, which should be < 0.01') -print(f'Error in x velocity is {abs(np.abs((ux - ux_sim)/ux))}, which should be < 0.002') - -assert abs(np.abs((xx - xx_sim)/xx)) < 0.01, Exception('error in x particle position') -assert abs(np.abs((ux - ux_sim)/ux)) < 0.002, Exception('error in x particle velocity') +print(f"Error in x position is {abs(np.abs((xx - xx_sim)/xx))}, which should be < 0.01") +print( + f"Error in x velocity is {abs(np.abs((ux - ux_sim)/ux))}, which should be < 0.002" +) + +assert abs(np.abs((xx - xx_sim) / xx)) < 0.01, Exception("error in x particle position") +assert abs(np.abs((ux - ux_sim) / ux)) < 0.002, Exception( + "error in x particle velocity" +) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/Implicit/PICMI_inputs_vandb_jfnk_2d.py b/Examples/Tests/Implicit/PICMI_inputs_vandb_jfnk_2d.py index 2f919124e13..a2ed607e873 100755 --- a/Examples/Tests/Implicit/PICMI_inputs_vandb_jfnk_2d.py +++ b/Examples/Tests/Implicit/PICMI_inputs_vandb_jfnk_2d.py @@ -12,16 +12,16 @@ # physics parameters ########################## -n0 = 1.e30 # m^-3 -Ti = 100. # eV -Te = 100. # eV -wpe = constants.q_e*np.sqrt(n0/(constants.m_e*constants.ep0)) -de0 = constants.c/wpe -nppcz = 10 # number of particles/cell in z -dt = 0.1/wpe # s +n0 = 1.0e30 # m^-3 +Ti = 100.0 # eV +Te = 100.0 # eV +wpe = constants.q_e * np.sqrt(n0 / (constants.m_e * constants.ep0)) +de0 = constants.c / wpe +nppcz = 10 # number of particles/cell in z +dt = 0.1 / wpe # s -vthe = np.sqrt(Te*constants.q_e/constants.m_e) -vthi = np.sqrt(Ti*constants.q_e/constants.m_p) +vthe = np.sqrt(Te * constants.q_e / constants.m_e) +vthi = np.sqrt(Ti * constants.q_e / constants.m_p) ########################## # numerics parameters @@ -35,10 +35,10 @@ nx = 40 ny = 40 -xmin = 0. -ymin = 0. -xmax = 10.0*de0 -ymax = 10.0*de0 +xmin = 0.0 +ymin = 0.0 +xmax = 10.0 * de0 +ymax = 10.0 * de0 number_per_cell_each_dim = [nppcz, nppcz] @@ -46,93 +46,121 @@ # physics components ########################## -electrons_uniform_plasma = picmi.UniformDistribution(density = n0, - rms_velocity = [vthe, vthe, vthe]) +electrons_uniform_plasma = picmi.UniformDistribution( + density=n0, rms_velocity=[vthe, vthe, vthe] +) -electrons = picmi.Species(particle_type='electron', name='electrons', initial_distribution=electrons_uniform_plasma) +electrons = picmi.Species( + particle_type="electron", + name="electrons", + initial_distribution=electrons_uniform_plasma, +) -protons_uniform_plasma = picmi.UniformDistribution(density = n0, - rms_velocity = [vthi, vthi, vthi]) +protons_uniform_plasma = picmi.UniformDistribution( + density=n0, rms_velocity=[vthi, vthi, vthi] +) -protons = picmi.Species(particle_type='proton', name='protons', initial_distribution=protons_uniform_plasma) +protons = picmi.Species( + particle_type="proton", name="protons", initial_distribution=protons_uniform_plasma +) ########################## # numerics components ########################## -grid = picmi.Cartesian2DGrid(number_of_cells = [nx, ny], - lower_bound = [xmin, ymin], - upper_bound = [xmax, ymax], - lower_boundary_conditions = ['periodic', 'periodic'], - upper_boundary_conditions = ['periodic', 'periodic'], - warpx_max_grid_size = 8, - warpx_blocking_factor = 8) - -solver = picmi.ElectromagneticSolver(grid = grid, - method = 'Yee') - -GMRES_solver = picmi.GMRESLinearSolver(verbose_int = 2, - max_iterations = 1000, - relative_tolerance = 1.0e-8, - absolute_tolerance = 0.0) - -newton_solver = picmi.NewtonNonlinearSolver(verbose = True, - max_iterations = 20, - relative_tolerance = 1.0e-12, - absolute_tolerance = 0.0, - require_convergence = False, - linear_solver = GMRES_solver, - max_particle_iterations = 21, - particle_tolerance = 1.0e-12) - -evolve_scheme = picmi.ThetaImplicitEMEvolveScheme(theta = 0.5, - nonlinear_solver = newton_solver) +grid = picmi.Cartesian2DGrid( + number_of_cells=[nx, ny], + lower_bound=[xmin, ymin], + upper_bound=[xmax, ymax], + lower_boundary_conditions=["periodic", "periodic"], + upper_boundary_conditions=["periodic", "periodic"], + warpx_max_grid_size=8, + warpx_blocking_factor=8, +) + +solver = picmi.ElectromagneticSolver(grid=grid, method="Yee") + +GMRES_solver = picmi.GMRESLinearSolver( + verbose_int=2, + max_iterations=1000, + relative_tolerance=1.0e-8, + absolute_tolerance=0.0, +) + +newton_solver = picmi.NewtonNonlinearSolver( + verbose=True, + max_iterations=20, + relative_tolerance=1.0e-12, + absolute_tolerance=0.0, + require_convergence=False, + linear_solver=GMRES_solver, + max_particle_iterations=21, + particle_tolerance=1.0e-12, +) + +evolve_scheme = picmi.ThetaImplicitEMEvolveScheme( + theta=0.5, nonlinear_solver=newton_solver +) ########################## # diagnostics ########################## -field_diag1 = picmi.FieldDiagnostic(name = 'diag1', - grid = grid, - period = diagnostic_intervals, - data_list = ['Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz', 'Jx', 'Jy', 'Jz', 'rho', 'divE'], - write_dir = '.', - warpx_file_prefix = 'ThetaImplicitJFNK_VandB_2d_PICMI_plt') - -part_diag1 = picmi.ParticleDiagnostic(name = 'diag1', - period = diagnostic_intervals, - species = [electrons, protons], - data_list = ['weighting', 'position', 'momentum'], - write_dir = '.', - warpx_file_prefix = 'ThetaImplicitJFNK_VandB_2d_PICMI_plt') - -particle_energy_diag = picmi.ReducedDiagnostic(diag_type = 'ParticleEnergy', - name = 'particle_energy', - period = 1) - -field_energy_diag = picmi.ReducedDiagnostic(diag_type = 'FieldEnergy', - name = 'field_energy', - period = 1) +field_diag1 = picmi.FieldDiagnostic( + name="diag1", + grid=grid, + period=diagnostic_intervals, + data_list=["Ex", "Ey", "Ez", "Bx", "By", "Bz", "Jx", "Jy", "Jz", "rho", "divE"], + write_dir=".", + warpx_file_prefix="ThetaImplicitJFNK_VandB_2d_PICMI_plt", +) + +part_diag1 = picmi.ParticleDiagnostic( + name="diag1", + period=diagnostic_intervals, + species=[electrons, protons], + data_list=["weighting", "position", "momentum"], + write_dir=".", + warpx_file_prefix="ThetaImplicitJFNK_VandB_2d_PICMI_plt", +) + +particle_energy_diag = picmi.ReducedDiagnostic( + diag_type="ParticleEnergy", name="particle_energy", period=1 +) + +field_energy_diag = picmi.ReducedDiagnostic( + diag_type="FieldEnergy", name="field_energy", period=1 +) ########################## # simulation setup ########################## -sim = picmi.Simulation(solver = solver, - particle_shape = 2, - time_step_size = dt, - max_steps = max_steps, - verbose = 1, - warpx_evolve_scheme = evolve_scheme, - warpx_current_deposition_algo = 'villasenor', - warpx_particle_pusher_algo = 'boris', - warpx_serialize_initial_conditions = 1, - warpx_use_filter = 0) - -sim.add_species(electrons, - layout = picmi.GriddedLayout(n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid)) -sim.add_species(protons, - layout = picmi.GriddedLayout(n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid)) +sim = picmi.Simulation( + solver=solver, + particle_shape=2, + time_step_size=dt, + max_steps=max_steps, + verbose=1, + warpx_evolve_scheme=evolve_scheme, + warpx_current_deposition_algo="villasenor", + warpx_particle_pusher_algo="boris", + warpx_serialize_initial_conditions=1, + warpx_use_filter=0, +) + +sim.add_species( + electrons, + layout=picmi.GriddedLayout( + n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid + ), +) +sim.add_species( + protons, + layout=picmi.GriddedLayout( + n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid + ), +) sim.add_diagnostic(field_diag1) sim.add_diagnostic(part_diag1) @@ -145,7 +173,7 @@ # write_inputs will create an inputs file that can be used to run # with the compiled version. -sim.write_input_file(file_name = 'inputs2d_from_PICMI') +sim.write_input_file(file_name="inputs2d_from_PICMI") # Alternatively, sim.step will run WarpX, controlling it from Python sim.step() diff --git a/Examples/Tests/Implicit/analysis_1d.py b/Examples/Tests/Implicit/analysis_1d.py index 0e20b925df5..af4515968f9 100755 --- a/Examples/Tests/Implicit/analysis_1d.py +++ b/Examples/Tests/Implicit/analysis_1d.py @@ -15,30 +15,30 @@ import numpy as np -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file fn = sys.argv[1] -field_energy = np.loadtxt('diags/reducedfiles/field_energy.txt', skiprows=1) -particle_energy = np.loadtxt('diags/reducedfiles/particle_energy.txt', skiprows=1) +field_energy = np.loadtxt("diags/reducedfiles/field_energy.txt", skiprows=1) +particle_energy = np.loadtxt("diags/reducedfiles/particle_energy.txt", skiprows=1) -total_energy = field_energy[:,2] + particle_energy[:,2] +total_energy = field_energy[:, 2] + particle_energy[:, 2] -delta_E = (total_energy - total_energy[0])/total_energy[0] +delta_E = (total_energy - total_energy[0]) / total_energy[0] max_delta_E = np.abs(delta_E).max() -if re.match('SemiImplicitPicard_1d', fn): +if re.match("SemiImplicitPicard_1d", fn): tolerance_rel = 2.5e-5 -elif re.match('ThetaImplicitPicard_1d', fn): +elif re.match("ThetaImplicitPicard_1d", fn): # This case should have near machine precision conservation of energy - tolerance_rel = 1.e-14 + tolerance_rel = 1.0e-14 print(f"max change in energy: {max_delta_E}") print(f"tolerance: {tolerance_rel}") -assert( max_delta_E < tolerance_rel ) +assert max_delta_E < tolerance_rel test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/Implicit/analysis_vandb_jfnk_2d.py b/Examples/Tests/Implicit/analysis_vandb_jfnk_2d.py index 85faab61fcc..3c962eb91ea 100755 --- a/Examples/Tests/Implicit/analysis_vandb_jfnk_2d.py +++ b/Examples/Tests/Implicit/analysis_vandb_jfnk_2d.py @@ -17,52 +17,54 @@ import yt from scipy.constants import e, epsilon_0 -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file fn = sys.argv[1] -field_energy = np.loadtxt('diags/reducedfiles/field_energy.txt', skiprows=1) -particle_energy = np.loadtxt('diags/reducedfiles/particle_energy.txt', skiprows=1) +field_energy = np.loadtxt("diags/reducedfiles/field_energy.txt", skiprows=1) +particle_energy = np.loadtxt("diags/reducedfiles/particle_energy.txt", skiprows=1) -total_energy = field_energy[:,2] + particle_energy[:,2] +total_energy = field_energy[:, 2] + particle_energy[:, 2] -delta_E = (total_energy - total_energy[0])/total_energy[0] +delta_E = (total_energy - total_energy[0]) / total_energy[0] max_delta_E = np.abs(delta_E).max() # This case should have near machine precision conservation of energy -tolerance_rel_energy = 2.e-14 -tolerance_rel_charge = 2.e-15 +tolerance_rel_energy = 2.0e-14 +tolerance_rel_charge = 2.0e-15 print(f"max change in energy: {max_delta_E}") print(f"tolerance: {tolerance_rel_energy}") -assert( max_delta_E < tolerance_rel_energy ) +assert max_delta_E < tolerance_rel_energy # check for machine precision conservation of charge density -n0 = 1.e30 +n0 = 1.0e30 pltdir = sys.argv[1] ds = yt.load(pltdir) -data = ds.covering_grid(level = 0, left_edge = ds.domain_left_edge, dims = ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) -divE = data['boxlib', 'divE'].value -rho = data['boxlib', 'rho'].value +divE = data["boxlib", "divE"].value +rho = data["boxlib", "rho"].value # compute local error in Gauss's law -drho = (rho - epsilon_0*divE)/e/n0 +drho = (rho - epsilon_0 * divE) / e / n0 # compute RMS on in error on the grid nX = drho.shape[0] nZ = drho.shape[1] -drho2_avg = (drho**2).sum()/(nX*nZ) +drho2_avg = (drho**2).sum() / (nX * nZ) drho_rms = np.sqrt(drho2_avg) print(f"rms error in charge conservation: {drho_rms}") print(f"tolerance: {tolerance_rel_charge}") -assert( drho_rms < tolerance_rel_charge ) +assert drho_rms < tolerance_rel_charge test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/LoadExternalField/PICMI_inputs_3d_grid_fields.py b/Examples/Tests/LoadExternalField/PICMI_inputs_3d_grid_fields.py index d128d9c10e0..f71bb171ee3 100644 --- a/Examples/Tests/LoadExternalField/PICMI_inputs_3d_grid_fields.py +++ b/Examples/Tests/LoadExternalField/PICMI_inputs_3d_grid_fields.py @@ -42,45 +42,47 @@ ################################# ion_dist = picmi.ParticleListDistribution( - x=0.0, - y=0.2, - z=2.5, - ux=9.5e-05*constants.c, - uy=0.0*constants.c, - uz=1.34e-4*constants.c, - weight=1.0 - ) + x=0.0, + y=0.2, + z=2.5, + ux=9.5e-05 * constants.c, + uy=0.0 * constants.c, + uz=1.34e-4 * constants.c, + weight=1.0, +) ions = picmi.Species( - particle_type='H', - name='proton', charge='q_e',mass="m_p", - warpx_do_not_deposit=1, - initial_distribution=ion_dist - ) + particle_type="H", + name="proton", + charge="q_e", + mass="m_p", + warpx_do_not_deposit=1, + initial_distribution=ion_dist, +) ################################# ######## INITIAL FIELD ########## ################################# initial_field = picmi.LoadInitialField( - read_fields_from_path="../../../../openPMD-example-datasets/example-femm-3d.h5", - load_E=False - ) + read_fields_from_path="../../../../openPMD-example-datasets/example-femm-3d.h5", + load_E=False, +) ################################# ###### GRID AND SOLVER ########## ################################# grid = picmi.Cartesian3DGrid( - number_of_cells=[nx, ny, nz], - warpx_max_grid_size=max_grid_size, - lower_bound=[xmin, ymin, zmin], - upper_bound=[xmax, ymax, zmax], - lower_boundary_conditions=['dirichlet', 'dirichlet', 'dirichlet'], - upper_boundary_conditions=['dirichlet', 'dirichlet', 'dirichlet'], - lower_boundary_conditions_particles=['absorbing', 'absorbing', 'absorbing'], - upper_boundary_conditions_particles=['absorbing', 'absorbing', 'absorbing'] - ) + number_of_cells=[nx, ny, nz], + warpx_max_grid_size=max_grid_size, + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["dirichlet", "dirichlet", "dirichlet"], + upper_boundary_conditions=["dirichlet", "dirichlet", "dirichlet"], + lower_boundary_conditions_particles=["absorbing", "absorbing", "absorbing"], + upper_boundary_conditions_particles=["absorbing", "absorbing", "absorbing"], +) solver = picmi.ElectrostaticSolver(grid=grid) ################################# @@ -88,46 +90,46 @@ ################################# particle_diag = picmi.ParticleDiagnostic( - name='diag1', - period=300, - species=[ions], - data_list = ['ux', 'uy', 'uz', 'x', 'y', 'z', 'weighting'], - write_dir='.', - warpx_file_prefix='Python_LoadExternalGridField3D_plt' - ) + name="diag1", + period=300, + species=[ions], + data_list=["ux", "uy", "uz", "x", "y", "z", "weighting"], + write_dir=".", + warpx_file_prefix="Python_LoadExternalGridField3D_plt", +) field_diag = picmi.FieldDiagnostic( - name='diag1', - grid=grid, - period=300, - data_list = ['Bx', 'By', 'Bz', 'Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz'], - write_dir='.', - warpx_file_prefix='Python_LoadExternalGridField3D_plt' - ) + name="diag1", + grid=grid, + period=300, + data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], + write_dir=".", + warpx_file_prefix="Python_LoadExternalGridField3D_plt", +) ################################# ####### SIMULATION SETUP ######## ################################# sim = picmi.Simulation( - solver=solver, - max_steps=max_steps, - verbose=verbose, - warpx_serialize_initial_conditions=False, - warpx_grid_type='collocated', - warpx_do_dynamic_scheduling=False, - warpx_use_filter=use_filter, - time_step_size=dt, - particle_shape=particle_shape - ) + solver=solver, + max_steps=max_steps, + verbose=verbose, + warpx_serialize_initial_conditions=False, + warpx_grid_type="collocated", + warpx_do_dynamic_scheduling=False, + warpx_use_filter=use_filter, + time_step_size=dt, + particle_shape=particle_shape, +) sim.add_applied_field(initial_field) sim.add_species( - ions, - layout=picmi.PseudoRandomLayout( - n_macroparticles_per_cell=number_per_cell, grid=grid - ) - ) + ions, + layout=picmi.PseudoRandomLayout( + n_macroparticles_per_cell=number_per_cell, grid=grid + ), +) sim.add_diagnostic(field_diag) sim.add_diagnostic(particle_diag) @@ -136,5 +138,5 @@ ##### SIMULATION EXECUTION ###### ################################# -#sim.write_input_file('PICMI_inputs_3d') +# sim.write_input_file('PICMI_inputs_3d') sim.step(max_steps) diff --git a/Examples/Tests/LoadExternalField/PICMI_inputs_3d_particle_fields.py b/Examples/Tests/LoadExternalField/PICMI_inputs_3d_particle_fields.py index 7bf7c5a084c..90b1ac78474 100644 --- a/Examples/Tests/LoadExternalField/PICMI_inputs_3d_particle_fields.py +++ b/Examples/Tests/LoadExternalField/PICMI_inputs_3d_particle_fields.py @@ -42,45 +42,47 @@ ################################# ion_dist = picmi.ParticleListDistribution( - x=0.0, - y=0.2, - z=2.5, - ux=9.5e-05*constants.c, - uy=0.0*constants.c, - uz=1.34e-4*constants.c, - weight=1.0 - ) + x=0.0, + y=0.2, + z=2.5, + ux=9.5e-05 * constants.c, + uy=0.0 * constants.c, + uz=1.34e-4 * constants.c, + weight=1.0, +) ions = picmi.Species( - particle_type='H', - name='proton', charge='q_e',mass="m_p", - warpx_do_not_deposit=1, - initial_distribution=ion_dist - ) + particle_type="H", + name="proton", + charge="q_e", + mass="m_p", + warpx_do_not_deposit=1, + initial_distribution=ion_dist, +) ################################# ######## INITIAL FIELD ########## ################################# applied_field = picmi.LoadAppliedField( - read_fields_from_path="../../../../openPMD-example-datasets/example-femm-3d.h5", - load_E=False - ) + read_fields_from_path="../../../../openPMD-example-datasets/example-femm-3d.h5", + load_E=False, +) ################################# ###### GRID AND SOLVER ########## ################################# grid = picmi.Cartesian3DGrid( - number_of_cells=[nx, ny, nz], - warpx_max_grid_size=max_grid_size, - lower_bound=[xmin, ymin, zmin], - upper_bound=[xmax, ymax, zmax], - lower_boundary_conditions=['dirichlet', 'dirichlet', 'dirichlet'], - upper_boundary_conditions=['dirichlet', 'dirichlet', 'dirichlet'], - lower_boundary_conditions_particles=['absorbing', 'absorbing', 'absorbing'], - upper_boundary_conditions_particles=['absorbing', 'absorbing', 'absorbing'] - ) + number_of_cells=[nx, ny, nz], + warpx_max_grid_size=max_grid_size, + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["dirichlet", "dirichlet", "dirichlet"], + upper_boundary_conditions=["dirichlet", "dirichlet", "dirichlet"], + lower_boundary_conditions_particles=["absorbing", "absorbing", "absorbing"], + upper_boundary_conditions_particles=["absorbing", "absorbing", "absorbing"], +) solver = picmi.ElectrostaticSolver(grid=grid) ################################# @@ -88,46 +90,46 @@ ################################# particle_diag = picmi.ParticleDiagnostic( - name='diag1', - period=300, - species=[ions], - data_list = ['ux', 'uy', 'uz', 'x', 'y', 'z', 'weighting'], - write_dir='.', - warpx_file_prefix='Python_LoadExternalParticleField3D_plt' - ) + name="diag1", + period=300, + species=[ions], + data_list=["ux", "uy", "uz", "x", "y", "z", "weighting"], + write_dir=".", + warpx_file_prefix="Python_LoadExternalParticleField3D_plt", +) field_diag = picmi.FieldDiagnostic( - name='diag1', - grid=grid, - period=300, - data_list = ['Bx', 'By', 'Bz', 'Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz'], - write_dir='.', - warpx_file_prefix='Python_LoadExternalParticleField3D_plt' - ) + name="diag1", + grid=grid, + period=300, + data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], + write_dir=".", + warpx_file_prefix="Python_LoadExternalParticleField3D_plt", +) ################################# ####### SIMULATION SETUP ######## ################################# sim = picmi.Simulation( - solver=solver, - max_steps=max_steps, - verbose=verbose, - warpx_serialize_initial_conditions=False, - warpx_grid_type='collocated', - warpx_do_dynamic_scheduling=False, - warpx_use_filter=use_filter, - time_step_size=dt, - particle_shape=particle_shape - ) + solver=solver, + max_steps=max_steps, + verbose=verbose, + warpx_serialize_initial_conditions=False, + warpx_grid_type="collocated", + warpx_do_dynamic_scheduling=False, + warpx_use_filter=use_filter, + time_step_size=dt, + particle_shape=particle_shape, +) sim.add_applied_field(applied_field) sim.add_species( - ions, - layout=picmi.PseudoRandomLayout( - n_macroparticles_per_cell=number_per_cell, grid=grid - ) - ) + ions, + layout=picmi.PseudoRandomLayout( + n_macroparticles_per_cell=number_per_cell, grid=grid + ), +) sim.add_diagnostic(field_diag) sim.add_diagnostic(particle_diag) @@ -136,5 +138,5 @@ ##### SIMULATION EXECUTION ###### ################################# -#sim.write_input_file('PICMI_inputs_3d') +# sim.write_input_file('PICMI_inputs_3d') sim.step(max_steps) diff --git a/Examples/Tests/LoadExternalField/analysis_3d.py b/Examples/Tests/LoadExternalField/analysis_3d.py index 0539448f873..0865584d683 100755 --- a/Examples/Tests/LoadExternalField/analysis_3d.py +++ b/Examples/Tests/LoadExternalField/analysis_3d.py @@ -22,7 +22,7 @@ import numpy as np import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI tolerance = 1.0e-8 @@ -32,17 +32,17 @@ filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) ad = ds.all_data() -x = ad['proton','particle_position_x'].to_ndarray() -y = ad['proton','particle_position_y'].to_ndarray() -z = ad['proton','particle_position_z'].to_ndarray() +x = ad["proton", "particle_position_x"].to_ndarray() +y = ad["proton", "particle_position_y"].to_ndarray() +z = ad["proton", "particle_position_z"].to_ndarray() -error = np.min(np.sqrt((x-x0)**2+(y-y0)**2+(z-z0)**2)) +error = np.min(np.sqrt((x - x0) ** 2 + (y - y0) ** 2 + (z - z0) ** 2)) -print('error = ', error) -print('tolerance = ', tolerance) -assert(error < tolerance) +print("error = ", error) +print("tolerance = ", tolerance) +assert error < tolerance test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/LoadExternalField/analysis_rz.py b/Examples/Tests/LoadExternalField/analysis_rz.py index fd82fbbdac6..75d9c084718 100755 --- a/Examples/Tests/LoadExternalField/analysis_rz.py +++ b/Examples/Tests/LoadExternalField/analysis_rz.py @@ -22,7 +22,7 @@ import numpy as np import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI tolerance = 1.0e-8 @@ -30,16 +30,16 @@ z0 = 4.3632492 filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) ad = ds.all_data() -r = ad['proton','particle_position_x'].to_ndarray() -z = ad['proton','particle_position_y'].to_ndarray() +r = ad["proton", "particle_position_x"].to_ndarray() +z = ad["proton", "particle_position_y"].to_ndarray() -error = np.min(np.sqrt((r-r0)**2+(z-z0)**2)) +error = np.min(np.sqrt((r - r0) ** 2 + (z - z0) ** 2)) -print('error = ', error) -print('tolerance = ', tolerance) -assert(error < tolerance) +print("error = ", error) +print("tolerance = ", tolerance) +assert error < tolerance test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/boosted_diags/analysis.py b/Examples/Tests/boosted_diags/analysis.py index 2b21184dc3d..62956133af6 100755 --- a/Examples/Tests/boosted_diags/analysis.py +++ b/Examples/Tests/boosted_diags/analysis.py @@ -7,14 +7,14 @@ # License: BSD-3-Clause-LBNL -''' +""" Analysis script of a WarpX simulation in a boosted frame. The simulation runs in a boosted frame, and the analysis is done in the lab frame, i.e., on the back-transformed diagnostics for the full 3D simulation and an x-z slice at y=y_center. The field-data, Ez, along z, at (x_center,y_center,:) is compared between the full back-transformed diagnostic and the reduced diagnostic (i.e., x-z slice) . -''' +""" import os import sys @@ -26,7 +26,7 @@ yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] @@ -38,23 +38,22 @@ # Read data from new back-transformed diagnostics (plotfile) ds_plotfile = yt.load(filename) data = ds_plotfile.covering_grid( - level=0, - left_edge=ds_plotfile.domain_left_edge, - dims=ds_plotfile.domain_dimensions) -Ez_plotfile = data[('mesh', 'Ez')].to_ndarray() + level=0, left_edge=ds_plotfile.domain_left_edge, dims=ds_plotfile.domain_dimensions +) +Ez_plotfile = data[("mesh", "Ez")].to_ndarray() # Read data from new back-transformed diagnostics (openPMD) series = io.Series("./diags/diag2/openpmd_%T.h5", io.Access.read_only) ds_openpmd = series.iterations[3] -Ez_openpmd = ds_openpmd.meshes['E']['z'].load_chunk() +Ez_openpmd = ds_openpmd.meshes["E"]["z"].load_chunk() Ez_openpmd = Ez_openpmd.transpose() series.flush() # Compare arrays to check consistency between new BTD formats (plotfile and openPMD) -assert(np.allclose(Ez_plotfile, Ez_openpmd, rtol=rtol, atol=atol)) +assert np.allclose(Ez_plotfile, Ez_openpmd, rtol=rtol, atol=atol) # Check that particle random sub-selection has been applied -ts = OpenPMDTimeSeries('./diags/diag2/') -w, = ts.get_particle(['w'], species='beam', iteration=3) +ts = OpenPMDTimeSeries("./diags/diag2/") +(w,) = ts.get_particle(["w"], species="beam", iteration=3) assert (400 < len(w)) & (len(w) < 600) test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/boundaries/analysis.py b/Examples/Tests/boundaries/analysis.py index 9c108b16196..be76a728a1f 100755 --- a/Examples/Tests/boundaries/analysis.py +++ b/Examples/Tests/boundaries/analysis.py @@ -22,21 +22,21 @@ from scipy.constants import c, m_e yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # The min and max size of the box along the three axis. -dmin = -1. -dmax = +1. +dmin = -1.0 +dmax = +1.0 # Open plotfile specified in command line filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) ad = ds.all_data() time = ds.current_time.to_value() -filename0 = filename[:-5] + '00000' -ds0 = yt.load( filename0 ) +filename0 = filename[:-5] + "00000" +ds0 = yt.load(filename0) ad0 = ds0.all_data() # Read in the particle initial values and the current values. @@ -44,42 +44,44 @@ # differently in the diagnostic files. # For the absorbing particles, an extra particle was added that won't be absorbed # so that there will be something to read in here. -r_id0 = ad0['reflecting_particles', 'particle_id'].v -a_id0 = ad0['absorbing_particles', 'particle_id'].v -p_id0 = ad0['periodic_particles', 'particle_id'].v - -xx0 = ad0['reflecting_particles', 'particle_position_x'].v[np.argsort(r_id0)] -zz0 = ad0['periodic_particles', 'particle_position_z'].v[np.argsort(p_id0)] - -ux0 = ad0['reflecting_particles', 'particle_momentum_x'].v[np.argsort(r_id0)]/m_e/c -uz0 = ad0['periodic_particles', 'particle_momentum_z'].v[np.argsort(p_id0)]/m_e/c -gx0 = np.sqrt(1. + ux0**2) -gz0 = np.sqrt(1. + uz0**2) -vx0 = ux0/gx0*c -vz0 = uz0/gz0*c - -r_id = ad['reflecting_particles', 'particle_id'].v -a_id = ad['absorbing_particles', 'particle_id'].v -p_id = ad['periodic_particles', 'particle_id'].v - -xx = ad['reflecting_particles', 'particle_position_x'].v[np.argsort(r_id)] -zz = ad['periodic_particles', 'particle_position_z'].v[np.argsort(p_id)] - -ux = ad['reflecting_particles', 'particle_momentum_x'].v[np.argsort(r_id)]/m_e/c -uz = ad['periodic_particles', 'particle_momentum_z'].v[np.argsort(p_id)]/m_e/c -gx = np.sqrt(1. + ux**2) -gz = np.sqrt(1. + uz**2) -vx = ux/gx*c -vz = uz/gz*c +r_id0 = ad0["reflecting_particles", "particle_id"].v +a_id0 = ad0["absorbing_particles", "particle_id"].v +p_id0 = ad0["periodic_particles", "particle_id"].v + +xx0 = ad0["reflecting_particles", "particle_position_x"].v[np.argsort(r_id0)] +zz0 = ad0["periodic_particles", "particle_position_z"].v[np.argsort(p_id0)] + +ux0 = ad0["reflecting_particles", "particle_momentum_x"].v[np.argsort(r_id0)] / m_e / c +uz0 = ad0["periodic_particles", "particle_momentum_z"].v[np.argsort(p_id0)] / m_e / c +gx0 = np.sqrt(1.0 + ux0**2) +gz0 = np.sqrt(1.0 + uz0**2) +vx0 = ux0 / gx0 * c +vz0 = uz0 / gz0 * c + +r_id = ad["reflecting_particles", "particle_id"].v +a_id = ad["absorbing_particles", "particle_id"].v +p_id = ad["periodic_particles", "particle_id"].v + +xx = ad["reflecting_particles", "particle_position_x"].v[np.argsort(r_id)] +zz = ad["periodic_particles", "particle_position_z"].v[np.argsort(p_id)] + +ux = ad["reflecting_particles", "particle_momentum_x"].v[np.argsort(r_id)] / m_e / c +uz = ad["periodic_particles", "particle_momentum_z"].v[np.argsort(p_id)] / m_e / c +gx = np.sqrt(1.0 + ux**2) +gz = np.sqrt(1.0 + uz**2) +vx = ux / gx * c +vz = uz / gz * c + def do_reflect(x): if x < dmin: - return 2.*dmin - x + return 2.0 * dmin - x elif x > dmax: - return 2.*dmax - x + return 2.0 * dmax - x else: return x + def do_periodic(x): if x < dmin: return x + (dmax - dmin) @@ -88,21 +90,26 @@ def do_periodic(x): else: return x + # Calculate the analytic value of the current particle locations and # apply the appropriate boundary conditions. -xxa = xx0 + vx0*time +xxa = xx0 + vx0 * time xxa[0] = do_reflect(xxa[0]) xxa[1] = do_reflect(xxa[1]) -zza = zz0 + vz0*time +zza = zz0 + vz0 * time zza[0] = do_periodic(zza[0]) zza[1] = do_periodic(zza[1]) -assert (len(a_id) == 1), 'Absorbing particles not absorbed' -assert (np.all(vx == -vx0)), 'Reflecting particle velocity not correct' -assert (np.all(vz == +vz0)), 'Periodic particle velocity not correct' -assert (np.all(np.abs((xx - xxa)/xx) < 1.e-15)), 'Reflecting particle position not correct' -assert (np.all(np.abs((zz - zza)/zz) < 1.e-15)), 'Periodic particle position not correct' +assert len(a_id) == 1, "Absorbing particles not absorbed" +assert np.all(vx == -vx0), "Reflecting particle velocity not correct" +assert np.all(vz == +vz0), "Periodic particle velocity not correct" +assert np.all( + np.abs((xx - xxa) / xx) < 1.0e-15 +), "Reflecting particle position not correct" +assert np.all( + np.abs((zz - zza) / zz) < 1.0e-15 +), "Periodic particle position not correct" test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py b/Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py index e5d35b2ba2e..5002b4c80b3 100755 --- a/Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py +++ b/Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py @@ -16,49 +16,47 @@ from scipy.constants import c, e, m_e from scipy.optimize import curve_fit -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -def gaussian_laser( z, a0, z0_phase, z0_prop, ctau, lambda0 ): +def gaussian_laser(z, a0, z0_phase, z0_prop, ctau, lambda0): """ Returns a Gaussian laser profile """ - k0 = 2*np.pi/lambda0 - E0 = a0*m_e*c**2*k0/e - return( E0*np.exp( - (z-z0_prop)**2/ctau**2 ) \ - *np.cos( k0*(z-z0_phase) ) ) + k0 = 2 * np.pi / lambda0 + E0 = a0 * m_e * c**2 * k0 / e + return E0 * np.exp(-((z - z0_prop) ** 2) / ctau**2) * np.cos(k0 * (z - z0_phase)) + # Fit the on-axis profile to extract the phase (a.k.a. CEP) def fit_function(z, z0_phase): - return( gaussian_laser( z, a0, z0_phase, - z0_b+Lprop_b, ctau0, lambda0 ) ) + return gaussian_laser(z, a0, z0_phase, z0_b + Lprop_b, ctau0, lambda0) + plotfile = sys.argv[1] # The values must be consistent with the values provided in the simulation input -t_current = 80e-15 # Time of the snapshot1 -c = 299792458 -z0_antenna = -1.e-6 # position of laser -lambda0 = 0.8e-6 # wavelength of the signal -tau0 = 10e-15 # duration of the signal +t_current = 80e-15 # Time of the snapshot1 +z0_antenna = -1.0e-6 # position of laser +lambda0 = 0.8e-6 # wavelength of the signal +tau0 = 10e-15 # duration of the signal ctau0 = tau0 * c -a0 = 15 # amplitude -t_peak = 20e-15 # Time at which laser reaches its peak -Lprop_b = c*t_current +a0 = 15 # amplitude +t_peak = 20e-15 # Time at which laser reaches its peak +Lprop_b = c * t_current z0_b = z0_antenna - c * t_peak -ts = OpenPMDTimeSeries('./diags/back_rz') -Ex, info = ts.get_field('E', 'x', iteration=1, slice_across='r') +ts = OpenPMDTimeSeries("./diags/back_rz") +Ex, info = ts.get_field("E", "x", iteration=1, slice_across="r") -fit_result = curve_fit( fit_function, info.z, Ex, - p0=np.array([z0_b+Lprop_b]) ) +fit_result = curve_fit(fit_function, info.z, Ex, p0=np.array([z0_b + Lprop_b])) z0_fit = fit_result[0] -Ex_fit = gaussian_laser( info.z, a0, z0_fit, z0_b+Lprop_b, ctau0, lambda0) +Ex_fit = gaussian_laser(info.z, a0, z0_fit, z0_b + Lprop_b, ctau0, lambda0) ## Check that the a0 agrees within 5% of the predicted value -assert np.allclose( Ex, Ex_fit, atol=0.18*Ex.max() ) +assert np.allclose(Ex, Ex_fit, atol=0.18 * Ex.max()) # Checksum regression analysis test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/collider_relevant_diags/analysis_multiple_particles.py b/Examples/Tests/collider_relevant_diags/analysis_multiple_particles.py index b23bb69d52c..ab624bdac7e 100755 --- a/Examples/Tests/collider_relevant_diags/analysis_multiple_particles.py +++ b/Examples/Tests/collider_relevant_diags/analysis_multiple_particles.py @@ -8,140 +8,176 @@ import pandas as pd from scipy.constants import c, e, hbar, m_e -sys.path.append('../../../../warpx/Regression/Checksum/') +sys.path.append("../../../../warpx/Regression/Checksum/") import checksumAPI -sys.path.append('../../../../warpx/Tools/Parser/') +sys.path.append("../../../../warpx/Tools/Parser/") from input_file_parser import parse_input_file -E_crit = m_e**2*c**3/(e*hbar) -B_crit = m_e**2*c**2/(e*hbar) +E_crit = m_e**2 * c**3 / (e * hbar) +B_crit = m_e**2 * c**2 / (e * hbar) + def chi(ux, uy, uz, Ex, Ey, Ez, Bx, By, Bz): - gamma = np.sqrt(1.+ux**2+uy**2+uz**2) + gamma = np.sqrt(1.0 + ux**2 + uy**2 + uz**2) vx = ux / gamma * c vy = uy / gamma * c vz = uz / gamma * c - tmp1x = Ex + vy*Bz - vz*By - tmp1y = Ey - vx*Bz + vz*Bx - tmp1z = Ez + vx*By - vy*Bx - tmp2 = (Ex*vx + Ey*vy + Ez*vz)/c - chi = gamma/E_crit*np.sqrt(tmp1x**2+tmp1y**2+tmp1z**2 - tmp2**2) + tmp1x = Ex + vy * Bz - vz * By + tmp1y = Ey - vx * Bz + vz * Bx + tmp1z = Ez + vx * By - vy * Bx + tmp2 = (Ex * vx + Ey * vy + Ez * vz) / c + chi = gamma / E_crit * np.sqrt(tmp1x**2 + tmp1y**2 + tmp1z**2 - tmp2**2) return chi + def dL_dt(): - series = io.Series("diags/diag2/openpmd_%T.h5",io.Access.read_only) + series = io.Series("diags/diag2/openpmd_%T.h5", io.Access.read_only) iterations = np.asarray(series.iterations) lumi = [] - for n,ts in enumerate(iterations): + for n, ts in enumerate(iterations): it = series.iterations[ts] rho1 = it.meshes["rho_beam_e"] dV = np.prod(rho1.grid_spacing) rho1 = it.meshes["rho_beam_e"][io.Mesh_Record_Component.SCALAR].load_chunk() rho2 = it.meshes["rho_beam_p"][io.Mesh_Record_Component.SCALAR].load_chunk() - beam_e_charge = it.particles["beam_e"]["charge"][io.Mesh_Record_Component.SCALAR].load_chunk() - beam_p_charge = it.particles["beam_p"]["charge"][io.Mesh_Record_Component.SCALAR].load_chunk() + beam_e_charge = it.particles["beam_e"]["charge"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + beam_p_charge = it.particles["beam_p"]["charge"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() q1 = beam_e_charge[0] if not np.all(beam_e_charge == q1): - sys.exit('beam_e particles do not have the same charge') + sys.exit("beam_e particles do not have the same charge") q2 = beam_p_charge[0] if not np.all(beam_p_charge == q2): - sys.exit('beam_p particles do not have the same charge') + sys.exit("beam_p particles do not have the same charge") series.flush() - n1 = rho1/q1 - n2 = rho2/q2 - l = 2*np.sum(n1*n2)*dV*c - lumi.append(l) + n1 = rho1 / q1 + n2 = rho2 / q2 + ln = 2 * np.sum(n1 * n2) * dV * c + lumi.append(ln) return lumi -input_dict = parse_input_file('inputs_3d_multiple_particles') -Ex, Ey, Ez = [float(w) for w in input_dict['particles.E_external_particle']] -Bx, By, Bz = [float(w) for w in input_dict['particles.B_external_particle']] - -CollDiagFname='diags/reducedfiles/ColliderRelevant_beam_e_beam_p.txt' -df = pd.read_csv(CollDiagFname, sep=" ", header=0) -for species in ['beam_p', 'beam_e']: +input_dict = parse_input_file("inputs_3d_multiple_particles") +Ex, Ey, Ez = [float(w) for w in input_dict["particles.E_external_particle"]] +Bx, By, Bz = [float(w) for w in input_dict["particles.B_external_particle"]] - ux1, ux2, ux3 = [float(w) for w in input_dict[f'{species}.multiple_particles_ux']] - uy1, uy2, uy3 = [float(w) for w in input_dict[f'{species}.multiple_particles_uy']] - uz1, uz2, uz3 = [float(w) for w in input_dict[f'{species}.multiple_particles_uz']] - - x = np.array([float(w) for w in input_dict[f'{species}.multiple_particles_pos_x']]) - y = np.array([float(w) for w in input_dict[f'{species}.multiple_particles_pos_y']]) - - w = np.array([float(w) for w in input_dict[f'{species}.multiple_particles_weight']]) +CollDiagFname = "diags/reducedfiles/ColliderRelevant_beam_e_beam_p.txt" +df = pd.read_csv(CollDiagFname, sep=" ", header=0) - CHI_ANALYTICAL = np.array([chi(ux1, uy1, uz1, Ex, Ey, Ez, Bx, By, Bz), - chi(ux2, uy2, uz2, Ex, Ey, Ez, Bx, By, Bz), - chi(ux3, uy3, uz3, Ex, Ey, Ez, Bx, By, Bz)]) - THETAX = np.array([np.arctan2(ux1, uz1), np.arctan2(ux2, uz2), np.arctan2(ux3, uz3)]) - THETAY = np.array([np.arctan2(uy1, uz1), np.arctan2(uy2, uz2), np.arctan2(uy3, uz3)]) +for species in ["beam_p", "beam_e"]: + ux1, ux2, ux3 = [float(w) for w in input_dict[f"{species}.multiple_particles_ux"]] + uy1, uy2, uy3 = [float(w) for w in input_dict[f"{species}.multiple_particles_uy"]] + uz1, uz2, uz3 = [float(w) for w in input_dict[f"{species}.multiple_particles_uz"]] + + x = np.array([float(w) for w in input_dict[f"{species}.multiple_particles_pos_x"]]) + y = np.array([float(w) for w in input_dict[f"{species}.multiple_particles_pos_y"]]) + + w = np.array([float(w) for w in input_dict[f"{species}.multiple_particles_weight"]]) + + CHI_ANALYTICAL = np.array( + [ + chi(ux1, uy1, uz1, Ex, Ey, Ez, Bx, By, Bz), + chi(ux2, uy2, uz2, Ex, Ey, Ez, Bx, By, Bz), + chi(ux3, uy3, uz3, Ex, Ey, Ez, Bx, By, Bz), + ] + ) + THETAX = np.array( + [np.arctan2(ux1, uz1), np.arctan2(ux2, uz2), np.arctan2(ux3, uz3)] + ) + THETAY = np.array( + [np.arctan2(uy1, uz1), np.arctan2(uy2, uz2), np.arctan2(uy3, uz3)] + ) # CHI MAX - fname=f'diags/reducedfiles/ParticleExtrema_{species}.txt' - chimax_pe = np.loadtxt(fname)[:,19] - chimax_cr = df[[col for col in df.columns if f'chi_max_{species}' in col]].to_numpy() + fname = f"diags/reducedfiles/ParticleExtrema_{species}.txt" + chimax_pe = np.loadtxt(fname)[:, 19] + chimax_cr = df[ + [col for col in df.columns if f"chi_max_{species}" in col] + ].to_numpy() assert np.allclose(np.max(CHI_ANALYTICAL), chimax_cr, rtol=1e-8) assert np.allclose(chimax_pe, chimax_cr, rtol=1e-8) # CHI MIN - fname=f'diags/reducedfiles/ParticleExtrema_{species}.txt' - chimin_pe = np.loadtxt(fname)[:,18] - chimin_cr = df[[col for col in df.columns if f'chi_min_{species}' in col]].to_numpy() + fname = f"diags/reducedfiles/ParticleExtrema_{species}.txt" + chimin_pe = np.loadtxt(fname)[:, 18] + chimin_cr = df[ + [col for col in df.columns if f"chi_min_{species}" in col] + ].to_numpy() assert np.allclose(np.min(CHI_ANALYTICAL), chimin_cr, rtol=1e-8) assert np.allclose(chimin_pe, chimin_cr, rtol=1e-8) # CHI AVERAGE - chiave_cr = df[[col for col in df.columns if f'chi_ave_{species}' in col]].to_numpy() + chiave_cr = df[ + [col for col in df.columns if f"chi_ave_{species}" in col] + ].to_numpy() assert np.allclose(np.average(CHI_ANALYTICAL, weights=w), chiave_cr, rtol=1e-8) # X AVE STD - x_ave_cr = df[[col for col in df.columns if f']x_ave_{species}' in col]].to_numpy() - x_std_cr = df[[col for col in df.columns if f']x_std_{species}' in col]].to_numpy() + x_ave_cr = df[[col for col in df.columns if f"]x_ave_{species}" in col]].to_numpy() + x_std_cr = df[[col for col in df.columns if f"]x_std_{species}" in col]].to_numpy() x_ave = np.average(x, weights=w) - x_std = np.sqrt(np.average((x-x_ave)**2, weights=w)) + x_std = np.sqrt(np.average((x - x_ave) ** 2, weights=w)) assert np.allclose(x_ave, x_ave_cr, rtol=1e-8) assert np.allclose(x_std, x_std_cr, rtol=1e-8) # Y AVE STD - y_ave_cr = df[[col for col in df.columns if f']y_ave_{species}' in col]].to_numpy() - y_std_cr = df[[col for col in df.columns if f']y_std_{species}' in col]].to_numpy() + y_ave_cr = df[[col for col in df.columns if f"]y_ave_{species}" in col]].to_numpy() + y_std_cr = df[[col for col in df.columns if f"]y_std_{species}" in col]].to_numpy() y_ave = np.average(y, weights=w) - y_std = np.sqrt(np.average((y-y_ave)**2, weights=w)) + y_std = np.sqrt(np.average((y - y_ave) ** 2, weights=w)) assert np.allclose(y_ave, y_ave_cr, rtol=1e-8) assert np.allclose(y_std, y_std_cr, rtol=1e-8) # THETA X MIN AVE MAX STD - thetax_min_cr = df[[col for col in df.columns if f'theta_x_min_{species}' in col]].to_numpy() - thetax_ave_cr = df[[col for col in df.columns if f'theta_x_ave_{species}' in col]].to_numpy() - thetax_max_cr = df[[col for col in df.columns if f'theta_x_max_{species}' in col]].to_numpy() - thetax_std_cr = df[[col for col in df.columns if f'theta_x_std_{species}' in col]].to_numpy() + thetax_min_cr = df[ + [col for col in df.columns if f"theta_x_min_{species}" in col] + ].to_numpy() + thetax_ave_cr = df[ + [col for col in df.columns if f"theta_x_ave_{species}" in col] + ].to_numpy() + thetax_max_cr = df[ + [col for col in df.columns if f"theta_x_max_{species}" in col] + ].to_numpy() + thetax_std_cr = df[ + [col for col in df.columns if f"theta_x_std_{species}" in col] + ].to_numpy() thetax_min = np.min(THETAX) thetax_ave = np.average(THETAX, weights=w) thetax_max = np.max(THETAX) - thetax_std = np.sqrt(np.average((THETAX-thetax_ave)**2, weights=w)) + thetax_std = np.sqrt(np.average((THETAX - thetax_ave) ** 2, weights=w)) assert np.allclose(thetax_min, thetax_min_cr, rtol=1e-8) assert np.allclose(thetax_ave, thetax_ave_cr, rtol=1e-8) assert np.allclose(thetax_max, thetax_max_cr, rtol=1e-8) assert np.allclose(thetax_std, thetax_std_cr, rtol=1e-8) # THETA Y MIN AVE MAX STD - thetay_min_cr = df[[col for col in df.columns if f'theta_y_min_{species}' in col]].to_numpy() - thetay_ave_cr = df[[col for col in df.columns if f'theta_y_ave_{species}' in col]].to_numpy() - thetay_max_cr = df[[col for col in df.columns if f'theta_y_max_{species}' in col]].to_numpy() - thetay_std_cr = df[[col for col in df.columns if f'theta_y_std_{species}' in col]].to_numpy() + thetay_min_cr = df[ + [col for col in df.columns if f"theta_y_min_{species}" in col] + ].to_numpy() + thetay_ave_cr = df[ + [col for col in df.columns if f"theta_y_ave_{species}" in col] + ].to_numpy() + thetay_max_cr = df[ + [col for col in df.columns if f"theta_y_max_{species}" in col] + ].to_numpy() + thetay_std_cr = df[ + [col for col in df.columns if f"theta_y_std_{species}" in col] + ].to_numpy() thetay_min = np.min(THETAY) thetay_ave = np.average(THETAY, weights=w) thetay_max = np.max(THETAY) - thetay_std = np.sqrt(np.average((THETAY-thetay_ave)**2, weights=w)) + thetay_std = np.sqrt(np.average((THETAY - thetay_ave) ** 2, weights=w)) assert np.allclose(thetay_min, thetay_min_cr, rtol=1e-8) assert np.allclose(thetay_ave, thetay_ave_cr, rtol=1e-8) assert np.allclose(thetay_max, thetay_max_cr, rtol=1e-8) assert np.allclose(thetay_std, thetay_std_cr, rtol=1e-8) # dL/dt - dL_dt_cr = df[[col for col in df.columns if 'dL_dt' in col]].to_numpy() + dL_dt_cr = df[[col for col in df.columns if "dL_dt" in col]].to_numpy() assert np.allclose(dL_dt_cr, dL_dt(), rtol=1e-8) # Checksum analysis diff --git a/Examples/Tests/collision/PICMI_inputs_2d.py b/Examples/Tests/collision/PICMI_inputs_2d.py index 99e217b0afc..2a66bea5046 100755 --- a/Examples/Tests/collision/PICMI_inputs_2d.py +++ b/Examples/Tests/collision/PICMI_inputs_2d.py @@ -25,8 +25,8 @@ ymax = xmax plasma_density = 1e21 -elec_rms_velocity = 0.044237441120300*constants.c -ion_rms_velocity = 0.006256118919701*constants.c +elec_rms_velocity = 0.044237441120300 * constants.c +ion_rms_velocity = 0.006256118919701 * constants.c number_per_cell = 200 ################################# @@ -45,26 +45,28 @@ elec_dist = picmi.UniformDistribution( density=plasma_density, - rms_velocity=[elec_rms_velocity]*3, - directed_velocity=[elec_rms_velocity, 0., 0.] + rms_velocity=[elec_rms_velocity] * 3, + directed_velocity=[elec_rms_velocity, 0.0, 0.0], ) ion_dist = picmi.UniformDistribution( density=plasma_density, - rms_velocity=[ion_rms_velocity]*3, + rms_velocity=[ion_rms_velocity] * 3, ) electrons = picmi.Species( - particle_type='electron', name='electron', + particle_type="electron", + name="electron", warpx_do_not_deposit=1, initial_distribution=elec_dist, ) ions = picmi.Species( - particle_type='H', - name='ion', charge='q_e', + particle_type="H", + name="ion", + charge="q_e", mass="5*m_e", warpx_do_not_deposit=1, - initial_distribution=ion_dist + initial_distribution=ion_dist, ) ################################# @@ -72,19 +74,13 @@ ################################# collision1 = picmi.CoulombCollisions( - name='collisions1', - species=[electrons, ions], - CoulombLog=15.9 + name="collisions1", species=[electrons, ions], CoulombLog=15.9 ) collision2 = picmi.CoulombCollisions( - name='collisions2', - species=[electrons, electrons], - CoulombLog=15.9 + name="collisions2", species=[electrons, electrons], CoulombLog=15.9 ) collision3 = picmi.CoulombCollisions( - name='collisions3', - species=[ions, ions], - CoulombLog=15.9 + name="collisions3", species=[ions, ions], CoulombLog=15.9 ) ################################# @@ -97,8 +93,8 @@ warpx_blocking_factor=max_grid_size, lower_bound=[xmin, ymin], upper_bound=[xmax, ymax], - lower_boundary_conditions=['periodic', 'periodic'], - upper_boundary_conditions=['periodic', 'periodic'], + lower_boundary_conditions=["periodic", "periodic"], + upper_boundary_conditions=["periodic", "periodic"], ) solver = picmi.ElectromagneticSolver(grid=grid, cfl=cfl) @@ -107,18 +103,15 @@ ################################# particle_diag = picmi.ParticleDiagnostic( - name='diag1', - period=10, - write_dir='.', - warpx_file_prefix='Python_collisionXZ_plt' + name="diag1", period=10, write_dir=".", warpx_file_prefix="Python_collisionXZ_plt" ) field_diag = picmi.FieldDiagnostic( - name='diag1', + name="diag1", grid=grid, period=10, data_list=[], - write_dir='.', - warpx_file_prefix='Python_collisionXZ_plt' + write_dir=".", + warpx_file_prefix="Python_collisionXZ_plt", ) ################################# @@ -130,20 +123,20 @@ max_steps=max_steps, verbose=verbose, warpx_serialize_initial_conditions=serialize_initial_conditions, - warpx_collisions=[collision1, collision2, collision3] + warpx_collisions=[collision1, collision2, collision3], ) sim.add_species( electrons, layout=picmi.PseudoRandomLayout( n_macroparticles_per_cell=number_per_cell, grid=grid - ) + ), ) sim.add_species( ions, layout=picmi.PseudoRandomLayout( n_macroparticles_per_cell=number_per_cell, grid=grid - ) + ), ) sim.add_diagnostic(particle_diag) @@ -153,5 +146,5 @@ ##### SIMULATION EXECUTION ###### ################################# -#sim.write_input_file('PICMI_inputs_2d') +# sim.write_input_file('PICMI_inputs_2d') sim.step(max_steps) diff --git a/Examples/Tests/collision/analysis_collision_1d.py b/Examples/Tests/collision/analysis_collision_1d.py index 46f8160c88f..1888696953e 100755 --- a/Examples/Tests/collision/analysis_collision_1d.py +++ b/Examples/Tests/collision/analysis_collision_1d.py @@ -22,24 +22,26 @@ import yt from scipy.constants import e -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file last_fn = sys.argv[1] ds = yt.load(last_fn) -data = ds.covering_grid(level = 0, left_edge = ds.domain_left_edge, dims = ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) # carbon 12 ion (mass = 12*amu - 6*me) mass = 1.992100316897910e-26 # Separate macroparticles from group A (low weight) and group B (high weight) # by sorting based on weight -sorted_indices = data['ions','particle_weight'].argsort() -sorted_wp = data['ions', 'particle_weight'][sorted_indices].value -sorted_px = data['ions', 'particle_momentum_x'][sorted_indices].value -sorted_py = data['ions', 'particle_momentum_y'][sorted_indices].value -sorted_pz = data['ions', 'particle_momentum_z'][sorted_indices].value +sorted_indices = data["ions", "particle_weight"].argsort() +sorted_wp = data["ions", "particle_weight"][sorted_indices].value +sorted_px = data["ions", "particle_momentum_x"][sorted_indices].value +sorted_py = data["ions", "particle_momentum_y"][sorted_indices].value +sorted_pz = data["ions", "particle_momentum_z"][sorted_indices].value # Find the index 'Npmin' that separates macroparticles from group A and group B Np = len(sorted_wp) @@ -67,59 +69,59 @@ sorted_wp_sum = np.abs(sorted_wp).sum() # compute mean velocities -wAtot = wpA*NpA -wBtot = wpB*NpB - -uBx = uBy = uBz = 0. -for i in range(NpBs,NpBe): - uBx += wpB*sorted_px[i] - uBy += wpB*sorted_py[i] - uBz += wpB*sorted_pz[i] -uBx /= (mass*wBtot) # [m/s] -uBy /= (mass*wBtot) # [m/s] -uBz /= (mass*wBtot) # [m/s] - -uAx = uAy = uAz = 0. -for i in range(NpAs,NpAe): - uAx += wpA*sorted_px[i] - uAy += wpA*sorted_py[i] - uAz += wpA*sorted_pz[i] -uAx /= (mass*wAtot) # [m/s] -uAy /= (mass*wAtot) # [m/s] -uAz /= (mass*wAtot) # [m/s] +wAtot = wpA * NpA +wBtot = wpB * NpB + +uBx = uBy = uBz = 0.0 +for i in range(NpBs, NpBe): + uBx += wpB * sorted_px[i] + uBy += wpB * sorted_py[i] + uBz += wpB * sorted_pz[i] +uBx /= mass * wBtot # [m/s] +uBy /= mass * wBtot # [m/s] +uBz /= mass * wBtot # [m/s] + +uAx = uAy = uAz = 0.0 +for i in range(NpAs, NpAe): + uAx += wpA * sorted_px[i] + uAy += wpA * sorted_py[i] + uAz += wpA * sorted_pz[i] +uAx /= mass * wAtot # [m/s] +uAy /= mass * wAtot # [m/s] +uAz /= mass * wAtot # [m/s] # compute temperatures -TBx = TBy = TBz = 0. -for i in range(NpBs,NpBe): - TBx += wpB*(sorted_px[i]/mass - uBx)**2 - TBy += wpB*(sorted_py[i]/mass - uBy)**2 - TBz += wpB*(sorted_pz[i]/mass - uBz)**2 -TBx *= mass/(e*wBtot) -TBy *= mass/(e*wBtot) -TBz *= mass/(e*wBtot) - -TAx = TAy = TAz = 0. -for i in range(NpAs,NpAe): - TAx += wpA*(sorted_px[i]/mass - uAx)**2 - TAy += wpA*(sorted_py[i]/mass - uAy)**2 - TAz += wpA*(sorted_pz[i]/mass - uAz)**2 -TAx *= mass/(e*wAtot) -TAy *= mass/(e*wAtot) -TAz *= mass/(e*wAtot) +TBx = TBy = TBz = 0.0 +for i in range(NpBs, NpBe): + TBx += wpB * (sorted_px[i] / mass - uBx) ** 2 + TBy += wpB * (sorted_py[i] / mass - uBy) ** 2 + TBz += wpB * (sorted_pz[i] / mass - uBz) ** 2 +TBx *= mass / (e * wBtot) +TBy *= mass / (e * wBtot) +TBz *= mass / (e * wBtot) + +TAx = TAy = TAz = 0.0 +for i in range(NpAs, NpAe): + TAx += wpA * (sorted_px[i] / mass - uAx) ** 2 + TAy += wpA * (sorted_py[i] / mass - uAy) ** 2 + TAz += wpA * (sorted_pz[i] / mass - uAz) ** 2 +TAx *= mass / (e * wAtot) +TAy *= mass / (e * wAtot) +TAz *= mass / (e * wAtot) TApar = TAz -TAperp = (TAx + TAy)/2.0 -TA = (TAx + TAy + TAz)/3.0 +TAperp = (TAx + TAy) / 2.0 +TA = (TAx + TAy + TAz) / 3.0 TBpar = TBz -TBperp = (TBx + TBy)/2.0 -TB = (TBx + TBy + TBz)/3.0 +TBperp = (TBx + TBy) / 2.0 +TB = (TBx + TBy + TBz) / 3.0 -TApar_30ps_soln = 6.15e3 # TA parallel solution at t = 30 ps -error = np.abs(TApar-TApar_30ps_soln)/TApar_30ps_soln +TApar_30ps_soln = 6.15e3 # TA parallel solution at t = 30 ps +error = np.abs(TApar - TApar_30ps_soln) / TApar_30ps_soln tolerance = 0.02 -print('TApar at 30ps error = ', error) -print('tolerance = ', tolerance) +print("TApar at 30ps error = ", error) +print("tolerance = ", tolerance) assert error < tolerance test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/collision/analysis_collision_2d.py b/Examples/Tests/collision/analysis_collision_2d.py index 8ef251b0ace..92153f0870e 100755 --- a/Examples/Tests/collision/analysis_collision_2d.py +++ b/Examples/Tests/collision/analysis_collision_2d.py @@ -32,7 +32,7 @@ import post_processing_utils import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI tolerance = 0.001 @@ -42,7 +42,7 @@ ni = ng * 200 np = ne + ni -c = 299792458.0 +c = 299792458.0 me = 9.10938356e-31 mi = me * 5.0 @@ -50,39 +50,40 @@ ## fit. # exponential fit coefficients -a = 0.04330638981264072 +a = 0.04330638981264072 b = -0.11588277796546632 last_fn = sys.argv[1] -if (last_fn[-1] == "/"): last_fn = last_fn[:-1] -last_it = last_fn[-6:] # i.e., 000150 +if last_fn[-1] == "/": + last_fn = last_fn[:-1] +last_it = last_fn[-6:] # i.e., 000150 prefix = last_fn[:-6] # i.e., diags/diag1 # Collect all output files in fn_list (names match pattern prefix + arbitrary number) -fn_list = glob.glob(prefix + '*[0-9]') +fn_list = glob.glob(prefix + "*[0-9]") error = 0.0 nt = 0 for fn in fn_list: # load file - ds = yt.load( fn ) - ad = ds.all_data() - px = ad[('all', 'particle_momentum_x')].to_ndarray() + ds = yt.load(fn) + ad = ds.all_data() + px = ad[("all", "particle_momentum_x")].to_ndarray() # get time index j j = int(fn[-5:]) # compute error - vxe = numpy.mean(px[ 0:ne])/me/c - vxi = numpy.mean(px[ne:np])/mi/c + vxe = numpy.mean(px[0:ne]) / me / c + vxi = numpy.mean(px[ne:np]) / mi / c vxd = vxe - vxi - fit = a*math.exp(b*j) - error = error + abs(fit-vxd) + fit = a * math.exp(b * j) + error = error + abs(fit - vxd) nt = nt + 1 error = error / nt -print('error = ', error) -print('tolerance = ', tolerance) -assert(error < tolerance) +print("error = ", error) +print("tolerance = ", tolerance) +assert error < tolerance # The second part of the analysis is not done for the Python test # since the particle filter function is not accessible from PICMI yet @@ -97,18 +98,21 @@ parser_filter_fn = "diags/diag_parser_filter" + last_it parser_filter_expression = "(x>200) * (z<200) * (px-3*pz>0)" -post_processing_utils.check_particle_filter(last_fn, parser_filter_fn, parser_filter_expression, - dim, species_name) +post_processing_utils.check_particle_filter( + last_fn, parser_filter_fn, parser_filter_expression, dim, species_name +) uniform_filter_fn = "diags/diag_uniform_filter" + last_it uniform_filter_expression = "ids%6 == 0" -post_processing_utils.check_particle_filter(last_fn, uniform_filter_fn, uniform_filter_expression, - dim, species_name) +post_processing_utils.check_particle_filter( + last_fn, uniform_filter_fn, uniform_filter_expression, dim, species_name +) random_filter_fn = "diags/diag_random_filter" + last_it random_fraction = 0.77 -post_processing_utils.check_random_filter(last_fn, random_filter_fn, random_fraction, - dim, species_name) +post_processing_utils.check_random_filter( + last_fn, random_filter_fn, random_fraction, dim, species_name +) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, last_fn) diff --git a/Examples/Tests/collision/analysis_collision_3d.py b/Examples/Tests/collision/analysis_collision_3d.py index 335e94791ab..0a1b016a227 100755 --- a/Examples/Tests/collision/analysis_collision_3d.py +++ b/Examples/Tests/collision/analysis_collision_3d.py @@ -32,7 +32,7 @@ import post_processing_utils import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI tolerance = 0.001 @@ -42,7 +42,7 @@ ni = ng * 200 np = ne + ni -c = 299792458.0 +c = 299792458.0 me = 9.10938356e-31 mi = me * 5.0 @@ -50,40 +50,41 @@ ## fit. # exponential fit coefficients -a = 0.041817463099883 +a = 0.041817463099883 b = -0.083851393560288 last_fn = sys.argv[1] -if (last_fn[-1] == "/"): last_fn = last_fn[:-1] -last_it = last_fn[-6:] # i.e., 000150 +if last_fn[-1] == "/": + last_fn = last_fn[:-1] +last_it = last_fn[-6:] # i.e., 000150 prefix = last_fn[:-6] # i.e., diags/diag1 # Collect all output files in fn_list (names match pattern prefix + arbitrary number) -fn_list = glob.glob(prefix + '*[0-9]') +fn_list = glob.glob(prefix + "*[0-9]") error = 0.0 nt = 0 for fn in fn_list: # load file - ds = yt.load( fn ) - ad = ds.all_data() - pxe = ad['electron', 'particle_momentum_x'].to_ndarray() - pxi = ad['ion', 'particle_momentum_x'].to_ndarray() + ds = yt.load(fn) + ad = ds.all_data() + pxe = ad["electron", "particle_momentum_x"].to_ndarray() + pxi = ad["ion", "particle_momentum_x"].to_ndarray() # get time index j j = int(fn[-5:]) # compute error - vxe = numpy.mean(pxe)/me/c - vxi = numpy.mean(pxi)/mi/c + vxe = numpy.mean(pxe) / me / c + vxi = numpy.mean(pxi) / mi / c vxd = vxe - vxi - fit = a*math.exp(b*j) - error = error + abs(fit-vxd) + fit = a * math.exp(b * j) + error = error + abs(fit - vxd) nt = nt + 1 error = error / nt -print('error = ', error) -print('tolerance = ', tolerance) -assert(error < tolerance) +print("error = ", error) +print("tolerance = ", tolerance) +assert error < tolerance ## In the second part of the test, we verify that the diagnostic particle filter function works as @@ -94,18 +95,21 @@ parser_filter_fn = "diags/diag_parser_filter" + last_it parser_filter_expression = "(px*py*pz < 0) * (np.sqrt(x**2+y**2+z**2)<100)" -post_processing_utils.check_particle_filter(last_fn, parser_filter_fn, parser_filter_expression, - dim, species_name) +post_processing_utils.check_particle_filter( + last_fn, parser_filter_fn, parser_filter_expression, dim, species_name +) uniform_filter_fn = "diags/diag_uniform_filter" + last_it uniform_filter_expression = "ids%11 == 0" -post_processing_utils.check_particle_filter(last_fn, uniform_filter_fn, uniform_filter_expression, - dim, species_name) +post_processing_utils.check_particle_filter( + last_fn, uniform_filter_fn, uniform_filter_expression, dim, species_name +) random_filter_fn = "diags/diag_random_filter" + last_it random_fraction = 0.88 -post_processing_utils.check_random_filter(last_fn, random_filter_fn, random_fraction, - dim, species_name) +post_processing_utils.check_random_filter( + last_fn, random_filter_fn, random_fraction, dim, species_name +) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, last_fn) diff --git a/Examples/Tests/collision/analysis_collision_3d_isotropization.py b/Examples/Tests/collision/analysis_collision_3d_isotropization.py index ba029760e8b..6386ce74812 100755 --- a/Examples/Tests/collision/analysis_collision_3d_isotropization.py +++ b/Examples/Tests/collision/analysis_collision_3d_isotropization.py @@ -18,7 +18,7 @@ import scipy.constants as sc import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI e = sc.e @@ -29,34 +29,40 @@ dt = 1.4e-17 ne = 1.116e28 log = 2.0 -T_par = 5.62*e -T_per = 5.1*e +T_par = 5.62 * e +T_per = 5.1 * e -A = 1.0 - T_per/T_par -mu = (e**4*ne*log/(8.0*pi**1.5*ep0**2*m**0.5*T_par**1.5) - *A**(-2)*(-3.0+(3.0-A)*np.arctanh(A**0.5)/A**0.5)) +A = 1.0 - T_per / T_par +mu = ( + e**4 + * ne + * log + / (8.0 * pi**1.5 * ep0**2 * m**0.5 * T_par**1.5) + * A ** (-2) + * (-3.0 + (3.0 - A) * np.arctanh(A**0.5) / A**0.5) +) fn = sys.argv[1] ds = yt.load(fn) ad = ds.all_data() -vx = ad['electron', 'particle_momentum_x'].to_ndarray()/m -vy = ad['electron', 'particle_momentum_y'].to_ndarray()/m -Tx = np.mean(vx**2)*m/e -Ty = np.mean(vy**2)*m/e +vx = ad["electron", "particle_momentum_x"].to_ndarray() / m +vy = ad["electron", "particle_momentum_y"].to_ndarray() / m +Tx = np.mean(vx**2) * m / e +Ty = np.mean(vy**2) * m / e nt = 100 Tx0 = T_par Ty0 = T_per -for _ in range(nt-1): - Tx0 = Tx0 + dt*mu*(Ty0-Tx0)*2.0 - Ty0 = Ty0 + dt*mu*(Tx0-Ty0) +for _ in range(nt - 1): + Tx0 = Tx0 + dt * mu * (Ty0 - Tx0) * 2.0 + Ty0 = Ty0 + dt * mu * (Tx0 - Ty0) tolerance = 0.05 -error = np.maximum(abs(Tx-Tx0/e)/Tx, abs(Ty-Ty0/e)/Ty) +error = np.maximum(abs(Tx - Tx0 / e) / Tx, abs(Ty - Ty0 / e) / Ty) -print(f'error = {error}') -print(f'tolerance = {tolerance}') -assert(error < tolerance) +print(f"error = {error}") +print(f"tolerance = {tolerance}") +assert error < tolerance test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/collision/analysis_collision_rz.py b/Examples/Tests/collision/analysis_collision_rz.py index 8c275fcacd4..168d8a8a7cf 100755 --- a/Examples/Tests/collision/analysis_collision_rz.py +++ b/Examples/Tests/collision/analysis_collision_rz.py @@ -23,36 +23,37 @@ import numpy as np import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI tolerance = 1.0e-15 last_fn = sys.argv[1] -if (last_fn[-1] == "/"): last_fn = last_fn[:-1] +if last_fn[-1] == "/": + last_fn = last_fn[:-1] fn_list = glob(last_fn[:-5] + "?????") for fn in fn_list: # get time index j j = int(fn[-5:]) - if j==0: + if j == 0: # load file - ds = yt.load( fn ) + ds = yt.load(fn) ad = ds.all_data() - px1 = ad['particle_momentum_x'].to_ndarray() - py1 = ad['particle_momentum_y'].to_ndarray() - if j==150: + px1 = ad["particle_momentum_x"].to_ndarray() + py1 = ad["particle_momentum_y"].to_ndarray() + if j == 150: # load file - ds = yt.load( fn ) + ds = yt.load(fn) ad = ds.all_data() - px2 = ad['particle_momentum_x'].to_ndarray() - py2 = ad['particle_momentum_y'].to_ndarray() + px2 = ad["particle_momentum_x"].to_ndarray() + py2 = ad["particle_momentum_y"].to_ndarray() -error = np.max( abs(px1-px2)+abs(py1-py2) ) +error = np.max(abs(px1 - px2) + abs(py1 - py2)) -print('error = ', error) -print('tolerance = ', tolerance) -assert(error < tolerance) +print("error = ", error) +print("tolerance = ", tolerance) +assert error < tolerance test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, last_fn, do_particles=False) diff --git a/Examples/Tests/divb_cleaning/analysis.py b/Examples/Tests/divb_cleaning/analysis.py index a3523218ca6..1692d14b632 100755 --- a/Examples/Tests/divb_cleaning/analysis.py +++ b/Examples/Tests/divb_cleaning/analysis.py @@ -8,7 +8,7 @@ import sys -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import os import numpy as np @@ -24,17 +24,23 @@ fn = sys.argv[1] # Load yt data -ds_old = yt.load('divb_cleaning_3d_plt000398') -ds_mid = yt.load('divb_cleaning_3d_plt000399') -ds_new = yt.load(fn) # this is the last plotfile - -ad_old = ds_old.covering_grid(level = 0, left_edge = ds_old.domain_left_edge, dims = ds_old.domain_dimensions) -ad_mid = ds_mid.covering_grid(level = 0, left_edge = ds_mid.domain_left_edge, dims = ds_mid.domain_dimensions) -ad_new = ds_new.covering_grid(level = 0, left_edge = ds_new.domain_left_edge, dims = ds_new.domain_dimensions) - -G_old = ad_old['boxlib', 'G'].v.squeeze() -G_new = ad_new['boxlib', 'G'].v.squeeze() -divB = ad_mid['boxlib', 'divB'].v.squeeze() +ds_old = yt.load("divb_cleaning_3d_plt000398") +ds_mid = yt.load("divb_cleaning_3d_plt000399") +ds_new = yt.load(fn) # this is the last plotfile + +ad_old = ds_old.covering_grid( + level=0, left_edge=ds_old.domain_left_edge, dims=ds_old.domain_dimensions +) +ad_mid = ds_mid.covering_grid( + level=0, left_edge=ds_mid.domain_left_edge, dims=ds_mid.domain_dimensions +) +ad_new = ds_new.covering_grid( + level=0, left_edge=ds_new.domain_left_edge, dims=ds_new.domain_dimensions +) + +G_old = ad_old["boxlib", "G"].v.squeeze() +G_new = ad_new["boxlib", "G"].v.squeeze() +divB = ad_mid["boxlib", "divB"].v.squeeze() # Check max norm of error on c2 * div(B) = dG/dt # (the time interval between old and new is 2*dt) @@ -45,11 +51,11 @@ rel_error = np.amax(abs(x - y)) / np.amax(abs(y)) tolerance = 1e-1 -assert(rel_error < tolerance) +assert rel_error < tolerance test_name = os.path.split(os.getcwd())[1] -if re.search('single_precision', fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3) +if re.search("single_precision", fn): + checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) else: checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/dive_cleaning/analysis.py b/Examples/Tests/dive_cleaning/analysis.py index a9b52455baa..9d92767fa05 100755 --- a/Examples/Tests/dive_cleaning/analysis.py +++ b/Examples/Tests/dive_cleaning/analysis.py @@ -14,11 +14,12 @@ This script verifies that the field at the end of the simulation corresponds to the theoretical field of a Gaussian beam. """ + import sys import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np import scipy.constants as scc @@ -28,88 +29,101 @@ yt.funcs.mylog.setLevel(0) # Parameters from the Simulation -Qtot = -1.e-20 -r0 = 2.e-6 +Qtot = -1.0e-20 +r0 = 2.0e-6 # Open data file filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) # yt 4.0+ has rounding issues with our domain data: # RuntimeError: yt attempted to read outside the boundaries # of a non-periodic domain along dimension 0. -if 'force_periodicity' in dir(ds): ds.force_periodicity() +if "force_periodicity" in dir(ds): + ds.force_periodicity() # Extract data -ad0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) +ad0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) Ex_array = ad0[("mesh", "Ex")].to_ndarray().squeeze() if ds.dimensionality == 2: # Rename the z dimension as y, so as to make this script work for 2d and 3d Ey_array = ad0[("mesh", "Ez")].to_ndarray().squeeze() - E_array = ( Ex_array**2 + Ey_array**2 )**.5 + E_array = (Ex_array**2 + Ey_array**2) ** 0.5 relative_tolerance = 0.1 elif ds.dimensionality == 3: Ey_array = ad0[("mesh", "Ey")].to_ndarray() Ez_array = ad0[("mesh", "Ez")].to_ndarray() - E_array = ( Ex_array**2 + Ey_array**2 + Ez_array**2 )**.5 + E_array = (Ex_array**2 + Ey_array**2 + Ez_array**2) ** 0.5 relative_tolerance = 0.165 # Extract grid coordinates -Nx, Ny, Nz = ds.domain_dimensions +Nx, Ny, Nz = ds.domain_dimensions xmin, ymin, zmin = ds.domain_left_edge.v Lx, Ly, Lz = ds.domain_width.v -x = xmin + Lx/Nx*(0.5+np.arange(Nx)) -y = ymin + Ly/Ny*(0.5+np.arange(Ny)) -z = zmin + Lz/Nz*(0.5+np.arange(Nz)) +x = xmin + Lx / Nx * (0.5 + np.arange(Nx)) +y = ymin + Ly / Ny * (0.5 + np.arange(Ny)) +z = zmin + Lz / Nz * (0.5 + np.arange(Nz)) # Compute theoretical field if ds.dimensionality == 2: - x_2d, y_2d = np.meshgrid(x, y, indexing='ij') + x_2d, y_2d = np.meshgrid(x, y, indexing="ij") r2 = x_2d**2 + y_2d**2 - factor = (Qtot/r0)/(2*np.pi*scc.epsilon_0*r2) * (1-np.exp(-r2/(2*r0**2))) + factor = ( + (Qtot / r0) / (2 * np.pi * scc.epsilon_0 * r2) * (1 - np.exp(-r2 / (2 * r0**2))) + ) Ex_th = x_2d * factor Ey_th = y_2d * factor - E_th = ( Ex_th**2 + Ey_th**2 )**.5 + E_th = (Ex_th**2 + Ey_th**2) ** 0.5 elif ds.dimensionality == 3: - x_2d, y_2d, z_2d = np.meshgrid(x, y, z, indexing='ij') + x_2d, y_2d, z_2d = np.meshgrid(x, y, z, indexing="ij") r2 = x_2d**2 + y_2d**2 + z_2d**2 - factor = Qtot/(4*np.pi*scc.epsilon_0*r2**1.5) * gammainc(3./2, r2/(2.*r0**2)) - Ex_th = factor*x_2d - Ey_th = factor*y_2d - Ez_th = factor*z_2d - E_th = ( Ex_th**2 + Ey_th**2 + Ez_th**2 )**.5 + factor = ( + Qtot + / (4 * np.pi * scc.epsilon_0 * r2**1.5) + * gammainc(3.0 / 2, r2 / (2.0 * r0**2)) + ) + Ex_th = factor * x_2d + Ey_th = factor * y_2d + Ez_th = factor * z_2d + E_th = (Ex_th**2 + Ey_th**2 + Ez_th**2) ** 0.5 + # Plot theory and data def make_2d(arr): if arr.ndim == 3: - return arr[:,:,Nz//2] + return arr[:, :, Nz // 2] else: return arr -plt.figure(figsize=(10,10)) + + +plt.figure(figsize=(10, 10)) plt.subplot(221) -plt.title('E: Theory') +plt.title("E: Theory") plt.imshow(make_2d(E_th)) plt.colorbar() plt.subplot(222) -plt.title('E: Simulation') +plt.title("E: Simulation") plt.imshow(make_2d(E_array)) plt.colorbar() plt.subplot(223) -plt.title('E: Diff') -plt.imshow(make_2d(E_th-E_array)) +plt.title("E: Diff") +plt.imshow(make_2d(E_th - E_array)) plt.colorbar() plt.subplot(224) -plt.title('E: Relative diff') -plt.imshow(make_2d((E_th-E_array)/E_th)) +plt.title("E: Relative diff") +plt.imshow(make_2d((E_th - E_array) / E_th)) plt.colorbar() -plt.savefig('Comparison.png') +plt.savefig("Comparison.png") + # Automatically check the results def check(E, E_th, label): - print( 'Relative error in %s: %.3f'%( - label, abs(E-E_th).max()/E_th.max())) - assert np.allclose( E, E_th, atol=relative_tolerance*E_th.max() ) + print("Relative error in %s: %.3f" % (label, abs(E - E_th).max() / E_th.max())) + assert np.allclose(E, E_th, atol=relative_tolerance * E_th.max()) + -check( Ex_array, Ex_th, 'Ex' ) -check( Ey_array, Ey_th, 'Ey' ) +check(Ex_array, Ex_th, "Ex") +check(Ey_array, Ey_th, "Ey") if ds.dimensionality == 3: - check( Ez_array, Ez_th, 'Ez' ) + check(Ez_array, Ez_th, "Ez") diff --git a/Examples/Tests/electrostatic_dirichlet_bc/PICMI_inputs_2d.py b/Examples/Tests/electrostatic_dirichlet_bc/PICMI_inputs_2d.py index e4dd530c3bc..5a1c531fe3a 100755 --- a/Examples/Tests/electrostatic_dirichlet_bc/PICMI_inputs_2d.py +++ b/Examples/Tests/electrostatic_dirichlet_bc/PICMI_inputs_2d.py @@ -36,21 +36,21 @@ ########################## grid = picmi.Cartesian2DGrid( - number_of_cells = [nx, ny], - lower_bound = [xmin, ymin], - upper_bound = [xmax, ymax], - lower_boundary_conditions = ['dirichlet', 'periodic'], - upper_boundary_conditions = ['dirichlet', 'periodic'], - lower_boundary_conditions_particles = ['absorbing', 'periodic'], - upper_boundary_conditions_particles = ['absorbing', 'periodic'], - warpx_potential_lo_x = V_xmin, - warpx_potential_hi_x = V_xmax, - moving_window_velocity = None, - warpx_max_grid_size = 32 + number_of_cells=[nx, ny], + lower_bound=[xmin, ymin], + upper_bound=[xmax, ymax], + lower_boundary_conditions=["dirichlet", "periodic"], + upper_boundary_conditions=["dirichlet", "periodic"], + lower_boundary_conditions_particles=["absorbing", "periodic"], + upper_boundary_conditions_particles=["absorbing", "periodic"], + warpx_potential_lo_x=V_xmin, + warpx_potential_hi_x=V_xmax, + moving_window_velocity=None, + warpx_max_grid_size=32, ) solver = picmi.ElectrostaticSolver( - grid=grid, method='Multigrid', required_precision=1e-6 + grid=grid, method="Multigrid", required_precision=1e-6 ) @@ -59,18 +59,15 @@ ########################## particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 4, - write_dir = '.', - warpx_file_prefix = 'Python_dirichletbc_plt' + name="diag1", period=4, write_dir=".", warpx_file_prefix="Python_dirichletbc_plt" ) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = 4, - data_list = ['phi'], - write_dir = '.', - warpx_file_prefix = 'Python_dirichletbc_plt' + name="diag1", + grid=grid, + period=4, + data_list=["phi"], + write_dir=".", + warpx_file_prefix="Python_dirichletbc_plt", ) ########################## @@ -78,11 +75,11 @@ ########################## sim = picmi.Simulation( - solver = solver, - time_step_size = dt, - max_steps = max_steps, - particle_shape = None, - verbose = 0 + solver=solver, + time_step_size=dt, + max_steps=max_steps, + particle_shape=None, + verbose=0, ) sim.add_diagnostic(particle_diag) @@ -94,7 +91,7 @@ # write_inputs will create an inputs file that can be used to run # with the compiled version. -#sim.write_input_file(file_name = 'inputs_from_PICMI') +# sim.write_input_file(file_name = 'inputs_from_PICMI') # Alternatively, sim.step will run WarpX, controlling it from Python sim.step(max_steps) diff --git a/Examples/Tests/electrostatic_dirichlet_bc/analysis.py b/Examples/Tests/electrostatic_dirichlet_bc/analysis.py index eae2f17243c..91e84fd8864 100755 --- a/Examples/Tests/electrostatic_dirichlet_bc/analysis.py +++ b/Examples/Tests/electrostatic_dirichlet_bc/analysis.py @@ -18,9 +18,9 @@ import numpy as np import yt -files = sorted(glob.glob('dirichletbc_plt*'))[1:] +files = sorted(glob.glob("dirichletbc_plt*"))[1:] if len(files) == 0: - files = sorted(glob.glob('Python_dirichletbc_plt*'))[1:] + files = sorted(glob.glob("Python_dirichletbc_plt*"))[1:] assert len(files) > 0 times = np.ones(len(files)) @@ -28,15 +28,13 @@ potentials_hi = np.zeros(len(files)) for ii, file in enumerate(files): - ds = yt.load( file ) - times[ii] = ( - ds.current_time.item() - ) + ds = yt.load(file) + times[ii] = ds.current_time.item() data = ds.covering_grid( level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions ) - potentials_lo[ii] = np.mean(data['phi'].to_ndarray()[0]) - potentials_hi[ii] = np.mean(data['phi'].to_ndarray()[-1]) + potentials_lo[ii] = np.mean(data["phi"].to_ndarray()[0]) + potentials_hi[ii] = np.mean(data["phi"].to_ndarray()[-1]) expected_potentials_lo = 150.0 * np.sin(2.0 * np.pi * 6.78e6 * times) expected_potentials_hi = 450.0 * np.sin(2.0 * np.pi * 13.56e6 * times) diff --git a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py index f25348f4c9c..4acd868a148 100755 --- a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py +++ b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py @@ -17,6 +17,7 @@ known analytic solution. While the radius r(t) is not analytically known, its inverse t(r) can be solved for exactly. """ + import os import re import sys @@ -27,53 +28,54 @@ from scipy.constants import c from scipy.optimize import fsolve -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI yt.funcs.mylog.setLevel(0) # Open plotfile specified in command line filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) t_max = ds.current_time.item() # time of simulation # Parse test name and check if particle_shape = 4 is used -emass_10 = True if re.search('emass_10', filename) else False +emass_10 = True if re.search("emass_10", filename) else False if emass_10: l2_tolerance = 0.096 m_e = 10 else: l2_tolerance = 0.05 - m_e = 9.10938356e-31 #Electron mass in kg + m_e = 9.10938356e-31 # Electron mass in kg ndims = np.count_nonzero(ds.domain_dimensions > 1) if ndims == 2: - xmin, zmin = [float(x) for x in ds.parameters.get('geometry.prob_lo').split()] - xmax, zmax = [float(x) for x in ds.parameters.get('geometry.prob_hi').split()] - nx, nz = [int(n) for n in ds.parameters['amr.n_cell'].split()] + xmin, zmin = [float(x) for x in ds.parameters.get("geometry.prob_lo").split()] + xmax, zmax = [float(x) for x in ds.parameters.get("geometry.prob_hi").split()] + nx, nz = [int(n) for n in ds.parameters["amr.n_cell"].split()] ymin, ymax = xmin, xmax ny = nx else: - xmin, ymin, zmin = [float(x) for x in ds.parameters.get('geometry.prob_lo').split()] - xmax, ymax, zmax = [float(x) for x in ds.parameters.get('geometry.prob_hi').split()] - nx, ny, nz = [int(n) for n in ds.parameters['amr.n_cell'].split()] + xmin, ymin, zmin = [float(x) for x in ds.parameters.get("geometry.prob_lo").split()] + xmax, ymax, zmax = [float(x) for x in ds.parameters.get("geometry.prob_hi").split()] + nx, ny, nz = [int(n) for n in ds.parameters["amr.n_cell"].split()] -dx = (xmax - xmin)/nx -dy = (ymax - ymin)/ny -dz = (zmax - zmin)/nz +dx = (xmax - xmin) / nx +dy = (ymax - ymin) / ny +dz = (zmax - zmin) / nz # Grid location of the axis -ix0 = round((0. - xmin)/dx) -iy0 = round((0. - ymin)/dy) -iz0 = round((0. - zmin)/dz) +ix0 = round((0.0 - xmin) / dx) +iy0 = round((0.0 - ymin) / dy) +iz0 = round((0.0 - zmin) / dz) # Constants -eps_0 = 8.8541878128e-12 #Vacuum Permittivity in C/(V*m) -q_e = -1.60217662e-19 #Electron charge in C -pi = np.pi #Circular constant of the universe -r_0 = 0.1 #Initial radius of sphere -q_tot = -1e-15 #Total charge of sphere in C +eps_0 = 8.8541878128e-12 # Vacuum Permittivity in C/(V*m) +q_e = -1.60217662e-19 # Electron charge in C +pi = np.pi # Circular constant of the universe +r_0 = 0.1 # Initial radius of sphere +q_tot = -1e-15 # Total charge of sphere in C + # Define functions for exact forms of v(r), t(r), Er(r) with r as the radius of # the sphere. The sphere starts with initial radius r_0 and this radius expands @@ -86,43 +88,59 @@ # The E was calculated at the end of the last time step def v_exact(r): return np.sqrt(q_e * q_tot / (2 * pi * m_e * eps_0) * (1 / r_0 - 1 / r)) + + def t_exact(r): - return np.sqrt(r_0 ** 3 * 2 * pi * m_e * eps_0 / (q_e * q_tot)) * (np.sqrt(r / r_0 - 1) * np.sqrt(r / r_0) + np.log(np.sqrt(r / r_0 - 1) + np.sqrt(r / r_0))) + return np.sqrt(r_0**3 * 2 * pi * m_e * eps_0 / (q_e * q_tot)) * ( + np.sqrt(r / r_0 - 1) * np.sqrt(r / r_0) + + np.log(np.sqrt(r / r_0 - 1) + np.sqrt(r / r_0)) + ) + + def func(rho): - return t_exact(rho) - t_max #Objective function to find r(t_max) -r_end = fsolve(func,r_0)[0] #Numerically solve for r(t_max) + return t_exact(rho) - t_max # Objective function to find r(t_max) + + +r_end = fsolve(func, r_0)[0] # Numerically solve for r(t_max) + + def E_exact(r): - return np.sign(r) * (q_tot / (4 * pi * eps_0 * r ** 2) * (abs(r) >= r_end) + q_tot * abs(r) / (4 * pi * eps_0 * r_end ** 3) * (abs(r) < r_end)) + return np.sign(r) * ( + q_tot / (4 * pi * eps_0 * r**2) * (abs(r) >= r_end) + + q_tot * abs(r) / (4 * pi * eps_0 * r_end**3) * (abs(r) < r_end) + ) + # Load data pertaining to fields -data = ds.covering_grid(level=0, - left_edge=ds.domain_left_edge, - dims=ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) # Extract the E field along the axes # if ndims == 2: -if ds.parameters['geometry.dims'] == 'RZ': - Ex = data[('boxlib','Er')].to_ndarray() - Ex_axis = Ex[:,iz0,0] +if ds.parameters["geometry.dims"] == "RZ": + Ex = data[("boxlib", "Er")].to_ndarray() + Ex_axis = Ex[:, iz0, 0] Ey_axis = Ex_axis - Ez = data[('boxlib','Ez')].to_ndarray() - Ez_axis = Ez[ix0,:,0] + Ez = data[("boxlib", "Ez")].to_ndarray() + Ez_axis = Ez[ix0, :, 0] else: - Ex = data[('mesh','Ex')].to_ndarray() - Ex_axis = Ex[:,iy0,iz0] - Ey = data[('mesh','Ey')].to_ndarray() - Ey_axis = Ey[ix0,:,iz0] - Ez = data[('mesh','Ez')].to_ndarray() - Ez_axis = Ez[ix0,iy0,:] + Ex = data[("mesh", "Ex")].to_ndarray() + Ex_axis = Ex[:, iy0, iz0] + Ey = data[("mesh", "Ey")].to_ndarray() + Ey_axis = Ey[ix0, :, iz0] + Ez = data[("mesh", "Ez")].to_ndarray() + Ez_axis = Ez[ix0, iy0, :] + def calculate_error(E_axis, xmin, dx, nx): # Compute cell centers for grid - x_cell_centers = np.linspace(xmin+dx/2.,xmax-dx/2.,nx) + x_cell_centers = np.linspace(xmin + dx / 2.0, xmax - dx / 2.0, nx) # Extract subgrid away from boundary (exact solution assumes infinite/open # domain but WarpX solution assumes perfect conducting walls) - ix1 = round((xmin/2. - xmin)/dx) - ix2 = round((xmax/2. - xmin)/dx) + ix1 = round((xmin / 2.0 - xmin) / dx) + ix2 = round((xmax / 2.0 - xmin) / dx) x_sub_grid = x_cell_centers[ix1:ix2] # Exact solution of field along Cartesian axes @@ -132,36 +150,47 @@ def calculate_error(E_axis, xmin, dx, nx): E_grid = E_axis[ix1:ix2] # Define approximate L2 norm error between exact and numerical solutions - L2_error = (np.sqrt(sum((E_exact_grid - E_grid)**2)) - / np.sqrt(sum((E_exact_grid)**2))) + L2_error = np.sqrt(sum((E_exact_grid - E_grid) ** 2)) / np.sqrt( + sum((E_exact_grid) ** 2) + ) return L2_error + L2_error_x = calculate_error(Ex_axis, xmin, dx, nx) L2_error_y = calculate_error(Ey_axis, ymin, dy, ny) L2_error_z = calculate_error(Ez_axis, zmin, dz, nz) -print("L2 error along x-axis = %s" %L2_error_x) -print("L2 error along y-axis = %s" %L2_error_y) -print("L2 error along z-axis = %s" %L2_error_z) +print("L2 error along x-axis = %s" % L2_error_x) +print("L2 error along y-axis = %s" % L2_error_y) +print("L2 error along z-axis = %s" % L2_error_z) assert L2_error_x < l2_tolerance assert L2_error_y < l2_tolerance assert L2_error_z < l2_tolerance + # Check conservation of energy def return_energies(iteration): - ux, uy, uz, phi, m, q, w = ts.get_particle(['ux', 'uy', 'uz', 'phi', 'mass', 'charge', 'w'], iteration=iteration) - E_kinetic = (w*m*c**2 * (np.sqrt(1 + ux**2 + uy**2 + uz**2) - 1)).sum() - E_potential = 0.5*(w*q*phi).sum() # potential energy of particles in their own space-charge field: includes factor 1/2 + ux, uy, uz, phi, m, q, w = ts.get_particle( + ["ux", "uy", "uz", "phi", "mass", "charge", "w"], iteration=iteration + ) + E_kinetic = (w * m * c**2 * (np.sqrt(1 + ux**2 + uy**2 + uz**2) - 1)).sum() + E_potential = ( + 0.5 * (w * q * phi).sum() + ) # potential energy of particles in their own space-charge field: includes factor 1/2 return E_kinetic, E_potential -ts = OpenPMDTimeSeries('./diags/diag2') -if 'phi' in ts.avail_record_components['electron']: + + +ts = OpenPMDTimeSeries("./diags/diag2") +if "phi" in ts.avail_record_components["electron"]: # phi is only available when this script is run with the labframe poisson solver - print('Checking conservation of energy') + print("Checking conservation of energy") Ek_i, Ep_i = return_energies(0) Ek_f, Ep_f = return_energies(30) - assert Ep_f < 0.7*Ep_i # Check that potential energy changes significantly - assert abs( (Ek_i + Ep_i) - (Ek_f + Ep_f) ) < 0.003 * (Ek_i + Ep_i) # Check conservation of energy + assert Ep_f < 0.7 * Ep_i # Check that potential energy changes significantly + assert abs((Ek_i + Ep_i) - (Ek_f + Ep_f)) < 0.003 * ( + Ek_i + Ep_i + ) # Check conservation of energy # Checksum regression analysis test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/electrostatic_sphere_eb/PICMI_inputs_3d.py b/Examples/Tests/electrostatic_sphere_eb/PICMI_inputs_3d.py index 55fbc87bd9e..97f52a69c72 100755 --- a/Examples/Tests/electrostatic_sphere_eb/PICMI_inputs_3d.py +++ b/Examples/Tests/electrostatic_sphere_eb/PICMI_inputs_3d.py @@ -39,31 +39,31 @@ ########################## grid = picmi.Cartesian3DGrid( - number_of_cells = [nx, ny, nz], - lower_bound = [xmin, ymin, zmin], - upper_bound = [xmax, ymax, zmax], - lower_boundary_conditions = ['dirichlet', 'dirichlet', 'dirichlet'], - upper_boundary_conditions = ['dirichlet', 'dirichlet', 'dirichlet'], - lower_boundary_conditions_particles = ['absorbing', 'absorbing', 'absorbing'], - upper_boundary_conditions_particles = ['absorbing', 'absorbing', 'absorbing'], - warpx_potential_lo_x = V_domain_boundary, - warpx_potential_hi_x = V_domain_boundary, - warpx_potential_lo_y = V_domain_boundary, - warpx_potential_hi_y = V_domain_boundary, - warpx_potential_lo_z = V_domain_boundary, - warpx_potential_hi_z = V_domain_boundary, + number_of_cells=[nx, ny, nz], + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["dirichlet", "dirichlet", "dirichlet"], + upper_boundary_conditions=["dirichlet", "dirichlet", "dirichlet"], + lower_boundary_conditions_particles=["absorbing", "absorbing", "absorbing"], + upper_boundary_conditions_particles=["absorbing", "absorbing", "absorbing"], + warpx_potential_lo_x=V_domain_boundary, + warpx_potential_hi_x=V_domain_boundary, + warpx_potential_lo_y=V_domain_boundary, + warpx_potential_hi_y=V_domain_boundary, + warpx_potential_lo_z=V_domain_boundary, + warpx_potential_hi_z=V_domain_boundary, warpx_blocking_factor=8, - warpx_max_grid_size = 128 + warpx_max_grid_size=128, ) solver = picmi.ElectrostaticSolver( - grid=grid, method='Multigrid', required_precision=1e-7 + grid=grid, method="Multigrid", required_precision=1e-7 ) embedded_boundary = picmi.EmbeddedBoundary( implicit_function="-(x**2+y**2+z**2-radius**2)", potential=V_embedded_boundary, - radius = 0.1 + radius=0.1, ) ########################## @@ -71,41 +71,41 @@ ########################## particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 1, - write_dir = '.', - warpx_file_prefix = 'Python_ElectrostaticSphereEB_plt' + name="diag1", + period=1, + write_dir=".", + warpx_file_prefix="Python_ElectrostaticSphereEB_plt", ) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = 1, - data_list = ['Ex', 'Ey', 'Ez', 'phi', 'rho'], - write_dir = '.', - warpx_file_prefix = 'Python_ElectrostaticSphereEB_plt' + name="diag1", + grid=grid, + period=1, + data_list=["Ex", "Ey", "Ez", "phi", "rho"], + write_dir=".", + warpx_file_prefix="Python_ElectrostaticSphereEB_plt", ) reduced_diag = picmi.ReducedDiagnostic( - diag_type = 'ChargeOnEB', - name = 'eb_charge', - period = 1) + diag_type="ChargeOnEB", name="eb_charge", period=1 +) reduced_diag_one_eighth = picmi.ReducedDiagnostic( - diag_type = 'ChargeOnEB', - name = 'eb_charge_one_eighth', - weighting_function = '(x>0)*(y>0)*(z>0)', - period = 1) + diag_type="ChargeOnEB", + name="eb_charge_one_eighth", + weighting_function="(x>0)*(y>0)*(z>0)", + period=1, +) ########################## # simulation setup ########################## sim = picmi.Simulation( - solver = solver, - time_step_size = dt, - max_steps = max_steps, + solver=solver, + time_step_size=dt, + max_steps=max_steps, warpx_embedded_boundary=embedded_boundary, - warpx_field_gathering_algo='momentum-conserving' + warpx_field_gathering_algo="momentum-conserving", ) sim.add_diagnostic(particle_diag) diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis.py b/Examples/Tests/electrostatic_sphere_eb/analysis.py index bf2725616b5..71b3bfa3aa5 100755 --- a/Examples/Tests/electrostatic_sphere_eb/analysis.py +++ b/Examples/Tests/electrostatic_sphere_eb/analysis.py @@ -7,7 +7,7 @@ import os import sys -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Check reduced diagnostics for charge on EB @@ -15,19 +15,19 @@ from scipy.constants import epsilon_0 # Theoretical charge on the embedded boundary, for sphere at potential phi_0 -phi_0 = 1. # V -R = 0.1 # m -q_th = 4*np.pi*epsilon_0*phi_0*R -print('Theoretical charge: ', q_th) - -data = np.loadtxt('diags/reducedfiles/eb_charge.txt') -q_sim = data[1,2] -print('Simulation charge: ', q_sim) -assert abs((q_sim-q_th)/q_th) < 0.06 - -data_eighth = np.loadtxt('diags/reducedfiles/eb_charge_one_eighth.txt') -q_sim_eighth = data_eighth[1,2] -assert abs((q_sim_eighth-q_th/8)/(q_th/8)) < 0.06 +phi_0 = 1.0 # V +R = 0.1 # m +q_th = 4 * np.pi * epsilon_0 * phi_0 * R +print("Theoretical charge: ", q_th) + +data = np.loadtxt("diags/reducedfiles/eb_charge.txt") +q_sim = data[1, 2] +print("Simulation charge: ", q_sim) +assert abs((q_sim - q_th) / q_th) < 0.06 + +data_eighth = np.loadtxt("diags/reducedfiles/eb_charge_one_eighth.txt") +q_sim_eighth = data_eighth[1, 2] +assert abs((q_sim_eighth - q_th / 8) / (q_th / 8)) < 0.06 filename = sys.argv[1] test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py b/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py index 18aba4322f0..b33f19488d0 100755 --- a/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py +++ b/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py @@ -23,47 +23,49 @@ import yt from unyt import m -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI tolerance = 0.0041 fn = sys.argv[1] -ds = yt.load( fn ) +ds = yt.load(fn) -all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -phi = all_data_level_0['boxlib', 'phi'].v.squeeze() -Er = all_data_level_0['boxlib', 'Er'].v.squeeze() +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +phi = all_data_level_0["boxlib", "phi"].v.squeeze() +Er = all_data_level_0["boxlib", "Er"].v.squeeze() -Dx = ds.domain_width/ds.domain_dimensions +Dx = ds.domain_width / ds.domain_dimensions dr = Dx[0] rmin = ds.domain_left_edge[0] rmax = ds.domain_right_edge[0] nr = phi.shape[0] -r = np.linspace(rmin+dr/2.,rmax-dr/2.,nr) -B = 1.0/np.log(0.1/0.5) -A = -B*np.log(0.5) +r = np.linspace(rmin + dr / 2.0, rmax - dr / 2.0, nr) +B = 1.0 / np.log(0.1 / 0.5) +A = -B * np.log(0.5) err = 0.0 errmax_phi = 0.0 errmax_Er = 0.0 for i in range(len(r)): # outside EB and last cutcell - if r[i] > 0.1*m + dr: - phi_theory = A+B*np.log(r[i]) - Er_theory = -B/float(r[i]) - err = abs( phi_theory - phi[i,:] ).max() / phi_theory - if err>errmax_phi: + if r[i] > 0.1 * m + dr: + phi_theory = A + B * np.log(r[i]) + Er_theory = -B / float(r[i]) + err = abs(phi_theory - phi[i, :]).max() / phi_theory + if err > errmax_phi: errmax_phi = err - err = abs( Er_theory - Er[i,:] ).max() / Er_theory + err = abs(Er_theory - Er[i, :]).max() / Er_theory # Exclude the last inaccurate interpolation. - if err>errmax_Er and i errmax_Er and i < len(r) - 1: errmax_Er = err -print('max error of phi = ', errmax_phi) -print('max error of Er = ', errmax_Er) -print('tolerance = ', tolerance) -assert(errmax_phi= (0.1+dr))) + rmin = np.min(np.argwhere(r >= (0.1 + dr))) rmax = -1 r = r[rmin:rmax] - phi_sim = phi_sim[:,rmin:rmax] - Er_sim = Er_sim[:,rmin:rmax] + phi_sim = phi_sim[:, rmin:rmax] + Er_sim = Er_sim[:, rmin:rmax] + + phi_theory = A + B * np.log(r) + phi_theory = np.tile(phi_theory, (phi_sim.shape[0], 1)) + phi_error = np.max(np.abs(phi_theory - phi_sim) / np.abs(phi_theory)) - phi_theory = A + B*np.log(r) - phi_theory = np.tile(phi_theory, (phi_sim.shape[0],1)) - phi_error = np.max(np.abs(phi_theory-phi_sim) / np.abs(phi_theory)) + Er_theory = -B / r + Er_theory = np.tile(Er_theory, (Er_sim.shape[0], 1)) + Er_error = np.max(np.abs(Er_theory - Er_sim) / np.abs(Er_theory)) - Er_theory = -B/r - Er_theory = np.tile(Er_theory, (Er_sim.shape[0],1)) - Er_error = np.max(np.abs(Er_theory-Er_sim) / np.abs(Er_theory)) + print(f"max error of phi[lev={level}]: {phi_error}") + print(f"max error of Er[lev={level}]: {Er_error}") + assert phi_error < tolerance + assert Er_error < tolerance - print(f'max error of phi[lev={level}]: {phi_error}') - print(f'max error of Er[lev={level}]: {Er_error}') - assert(phi_error < tolerance) - assert(Er_error < tolerance) ts = OpenPMDTimeSeries(fn) -level_fields = [field for field in ts.avail_fields if 'lvl' in field] +level_fields = [field for field in ts.avail_fields if "lvl" in field] nlevels = 0 if level_fields == [] else int(level_fields[-1][-1]) -for level in range(nlevels+1): - get_error_per_lev(ts,level) +for level in range(nlevels + 1): + get_error_per_lev(ts, level) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn, output_format="openpmd") diff --git a/Examples/Tests/embedded_boundary_cube/analysis_fields.py b/Examples/Tests/embedded_boundary_cube/analysis_fields.py index dc6af9d57d2..1890c1d9aea 100755 --- a/Examples/Tests/embedded_boundary_cube/analysis_fields.py +++ b/Examples/Tests/embedded_boundary_cube/analysis_fields.py @@ -8,7 +8,7 @@ import yt from scipy.constants import c, mu_0, pi -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # This is a script that analyses the simulation results from @@ -26,9 +26,9 @@ hi = [0.8, 0.8, 0.8] lo = [-0.8, -0.8, -0.8] ncells = [48, 48, 48] -dx = (hi[0] - lo[0])/ncells[0] -dy = (hi[1] - lo[1])/ncells[1] -dz = (hi[2] - lo[2])/ncells[2] +dx = (hi[0] - lo[0]) / ncells[0] +dy = (hi[1] - lo[1]) / ncells[1] +dz = (hi[2] - lo[2]) / ncells[2] m = 0 n = 1 p = 1 @@ -40,17 +40,19 @@ # Open the right plot file filename = sys.argv[1] ds = yt.load(filename) -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) # Parse test name and check whether this use the macroscopic solver # (i.e. solving the equation in a dielectric) -macroscopic = True if re.search( 'macroscopic', filename ) else False +macroscopic = True if re.search("macroscopic", filename) else False # Calculate frequency of the mode oscillation -omega = np.sqrt( h_2 ) * c +omega = np.sqrt(h_2) * c if macroscopic: # Relative permittivity used in this test: epsilon_r = 1.5 - omega *= 1./np.sqrt(1.5) + omega *= 1.0 / np.sqrt(1.5) t = ds.current_time.to_value() @@ -61,39 +63,50 @@ for i in range(ncells[0]): for j in range(ncells[1]): for k in range(ncells[2]): - x = i*dx + lo[0] - y = (j+0.5)*dy + lo[1] - z = k*dz + lo[2] - - By_th[i, j, k] = -2/h_2*mu_0*(n * pi/Ly)*(p * pi/Lz) * (np.cos(m * pi/Lx * (x - Lx/2)) * - np.sin(n * pi/Ly * (y - Ly/2)) * - np.cos(p * pi/Lz * (z - Lz/2)) * - (-Lx/2 <= x < Lx/2) * - (-Ly/2 <= y < Ly/2) * - (-Lz/2 <= z < Lz/2) * - np.cos(omega * t)) - - x = i*dx + lo[0] - y = j*dy + lo[1] - z = (k+0.5)*dz + lo[2] - Bz_th[i, j, k] = mu_0*(np.cos(m * pi/Lx * (x - Lx/2)) * - np.cos(n * pi/Ly * (y - Ly/2)) * - np.sin(p * pi/Lz * (z - Lz/2)) * - (-Lx/2 <= x < Lx/2) * - (-Ly/2 <= y < Ly/2) * - (-Lz/2 <= z < Lz/2) * - np.cos(omega * t)) + x = i * dx + lo[0] + y = (j + 0.5) * dy + lo[1] + z = k * dz + lo[2] + + By_th[i, j, k] = ( + -2 + / h_2 + * mu_0 + * (n * pi / Ly) + * (p * pi / Lz) + * ( + np.cos(m * pi / Lx * (x - Lx / 2)) + * np.sin(n * pi / Ly * (y - Ly / 2)) + * np.cos(p * pi / Lz * (z - Lz / 2)) + * (-Lx / 2 <= x < Lx / 2) + * (-Ly / 2 <= y < Ly / 2) + * (-Lz / 2 <= z < Lz / 2) + * np.cos(omega * t) + ) + ) + + x = i * dx + lo[0] + y = j * dy + lo[1] + z = (k + 0.5) * dz + lo[2] + Bz_th[i, j, k] = mu_0 * ( + np.cos(m * pi / Lx * (x - Lx / 2)) + * np.cos(n * pi / Ly * (y - Ly / 2)) + * np.sin(p * pi / Lz * (z - Lz / 2)) + * (-Lx / 2 <= x < Lx / 2) + * (-Ly / 2 <= y < Ly / 2) + * (-Lz / 2 <= z < Lz / 2) + * np.cos(omega * t) + ) rel_tol_err = 1e-1 # Compute relative l^2 error on By -By_sim = data[('mesh','By')].to_ndarray() -rel_err_y = np.sqrt( np.sum(np.square(By_sim - By_th)) / np.sum(np.square(By_th))) -assert(rel_err_y < rel_tol_err) +By_sim = data[("mesh", "By")].to_ndarray() +rel_err_y = np.sqrt(np.sum(np.square(By_sim - By_th)) / np.sum(np.square(By_th))) +assert rel_err_y < rel_tol_err # Compute relative l^2 error on Bz -Bz_sim = data[('mesh','Bz')].to_ndarray() -rel_err_z = np.sqrt( np.sum(np.square(Bz_sim - Bz_th)) / np.sum(np.square(Bz_th))) -assert(rel_err_z < rel_tol_err) +Bz_sim = data[("mesh", "Bz")].to_ndarray() +rel_err_z = np.sqrt(np.sum(np.square(Bz_sim - Bz_th)) / np.sum(np.square(Bz_th))) +assert rel_err_z < rel_tol_err test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py b/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py index 8faa299025e..70a5b7d46c5 100755 --- a/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py +++ b/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py @@ -7,7 +7,7 @@ import yt from scipy.constants import c, mu_0, pi -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # This is a script that analyses the simulation results from @@ -32,7 +32,9 @@ # Open the right plot file filename = sys.argv[1] ds = yt.load(filename) -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) t = ds.current_time.to_value() @@ -40,24 +42,26 @@ By_th = np.zeros(ncells) for i in range(ncells[0]): for j in range(ncells[1]): - x = (i+0.5) * dx + lo[0] - z = (j+0.5) * dz + lo[1] + x = (i + 0.5) * dx + lo[0] + z = (j + 0.5) * dz + lo[1] - By_th[i, j, 0] = mu_0 * (np.cos(m * pi / Lx * (x - Lx / 2)) * - np.cos(n * pi / Lz * (z - Lz / 2)) * - (-Lx / 2 <= x < Lx / 2) * - (-Lz / 2 <= z < Lz / 2) * - np.cos(np.pi / Lx * c * t)) + By_th[i, j, 0] = mu_0 * ( + np.cos(m * pi / Lx * (x - Lx / 2)) + * np.cos(n * pi / Lz * (z - Lz / 2)) + * (-Lx / 2 <= x < Lx / 2) + * (-Lz / 2 <= z < Lz / 2) + * np.cos(np.pi / Lx * c * t) + ) rel_tol_err = 1e-3 # Compute relative l^2 error on By -By_sim = data['By'].to_ndarray() +By_sim = data["By"].to_ndarray() rel_err_y = np.sqrt(np.sum(np.square(By_sim - By_th)) / np.sum(np.square(By_th))) -assert (rel_err_y < rel_tol_err) +assert rel_err_y < rel_tol_err # Compute relative l^2 error on Ey -Ey_sim = data['Ey'].to_ndarray() -rel_err_y = np.sqrt(np.sum(np.square(Ey_sim/c - By_th)) / np.sum(np.square(By_th))) +Ey_sim = data["Ey"].to_ndarray() +rel_err_y = np.sqrt(np.sum(np.square(Ey_sim / c - By_th)) / np.sum(np.square(By_th))) test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py index da344f332a1..bef85259f17 100755 --- a/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py +++ b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py @@ -6,6 +6,7 @@ occurs along the angle given by the theoretical Airy pattern, i.e. theta_diffraction = 1.22 * lambda / d """ + import os import sys @@ -13,29 +14,34 @@ from openpmd_viewer import OpenPMDTimeSeries from scipy.ndimage import gaussian_filter1d -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -ts = OpenPMDTimeSeries('./EmbeddedBoundaryDiffraction_plt/') +ts = OpenPMDTimeSeries("./EmbeddedBoundaryDiffraction_plt/") # Extract the intensity as a function of r and z -Ex, info = ts.get_field('E', 'x', iteration=300) -I = gaussian_filter1d(Ex**2, sigma=5, axis=0) # Extract intensity by averaging E^2 over wavelength -irmax = np.argmax( I, axis=-1) +Ex, info = ts.get_field("E", "x", iteration=300) +In = gaussian_filter1d( + Ex**2, sigma=5, axis=0 +) # Extract intensity by averaging E^2 over wavelength +irmax = np.argmax(In, axis=-1) + # Find the radius of the first minimum, as a function of z def r_first_minimum(iz): - ir = len(info.r)//2 - while I[iz, ir+1] < I[iz, ir]: + ir = len(info.r) // 2 + while In[iz, ir + 1] < In[iz, ir]: ir += 1 return info.r[ir] -r = np.array([ r_first_minimum(iz) for iz in range(len(info.z)) ]) + + +r = np.array([r_first_minimum(iz) for iz in range(len(info.z))]) # Check that this corresponds to the prediction from the Airy pattern -theta_diffraction = np.arcsin(1.22*0.1/0.4)/2 -assert np.all( abs(r[50:] - theta_diffraction*info.z[50:]) < 0.03 ) +theta_diffraction = np.arcsin(1.22 * 0.1 / 0.4) / 2 +assert np.all(abs(r[50:] - theta_diffraction * info.z[50:]) < 0.03) # Open the right plot file filename = sys.argv[1] test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format='openpmd') +checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") diff --git a/Examples/Tests/embedded_boundary_python_api/PICMI_inputs_EB_API.py b/Examples/Tests/embedded_boundary_python_api/PICMI_inputs_EB_API.py index faec3ed4668..45d57e606b4 100755 --- a/Examples/Tests/embedded_boundary_python_api/PICMI_inputs_EB_API.py +++ b/Examples/Tests/embedded_boundary_python_api/PICMI_inputs_EB_API.py @@ -15,42 +15,42 @@ nz = 64 # mesh bounds for domain -xmin = -32*unit -xmax = 32*unit -ymin = -32*unit -ymax = 32*unit -zmin = -32*unit -zmax = 32*unit +xmin = -32 * unit +xmax = 32 * unit +ymin = -32 * unit +ymax = 32 * unit +zmin = -32 * unit +zmax = 32 * unit ########################## # numerics components ########################## -lower_boundary_conditions = ['open', 'dirichlet', 'periodic'] -upper_boundary_conditions = ['open', 'dirichlet', 'periodic'] +lower_boundary_conditions = ["open", "dirichlet", "periodic"] +upper_boundary_conditions = ["open", "dirichlet", "periodic"] grid = picmi.Cartesian3DGrid( - number_of_cells = [nx, ny, nz], - lower_bound = [xmin, ymin, zmin], - upper_bound = [xmax, ymax, zmax], - lower_boundary_conditions = lower_boundary_conditions, - upper_boundary_conditions = upper_boundary_conditions, - lower_boundary_conditions_particles = ['absorbing', 'absorbing', 'periodic'], - upper_boundary_conditions_particles = ['absorbing', 'absorbing', 'periodic'], - moving_window_velocity = None, - warpx_max_grid_size = 64 + number_of_cells=[nx, ny, nz], + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=lower_boundary_conditions, + upper_boundary_conditions=upper_boundary_conditions, + lower_boundary_conditions_particles=["absorbing", "absorbing", "periodic"], + upper_boundary_conditions_particles=["absorbing", "absorbing", "periodic"], + moving_window_velocity=None, + warpx_max_grid_size=64, ) flag_correct_div = False -solver = picmi.ElectromagneticSolver(grid=grid, method='Yee', cfl=1.) +solver = picmi.ElectromagneticSolver(grid=grid, method="Yee", cfl=1.0) -n_cavity=30 -L_cavity = n_cavity*unit +n_cavity = 30 +L_cavity = n_cavity * unit embedded_boundary = picmi.EmbeddedBoundary( implicit_function="max(max(max(x-L_cavity/2,-L_cavity/2-x),max(y-L_cavity/2,-L_cavity/2-y)),max(z-L_cavity/2,-L_cavity/2-z))", - L_cavity=L_cavity + L_cavity=L_cavity, ) @@ -59,18 +59,18 @@ ########################## particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 1, - write_dir = '.', - warpx_file_prefix = "embedded_boundary_python_API_plt" + name="diag1", + period=1, + write_dir=".", + warpx_file_prefix="embedded_boundary_python_API_plt", ) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = 1, - data_list = ['Ex'], - write_dir = '.', - warpx_file_prefix = "embedded_boundary_python_API_plt" + name="diag1", + grid=grid, + period=1, + data_list=["Ex"], + write_dir=".", + warpx_file_prefix="embedded_boundary_python_API_plt", ) ########################## @@ -78,10 +78,10 @@ ########################## sim = picmi.Simulation( - solver = solver, - max_steps = max_steps, + solver=solver, + max_steps=max_steps, warpx_embedded_boundary=embedded_boundary, - verbose = 1 + verbose=1, ) sim.add_diagnostic(particle_diag) @@ -100,83 +100,89 @@ print("======== Testing the wrappers of m_edge_lengths =========") -ly_slice_x = edge_lengths_y[nx//2,:,:] -lz_slice_x = edge_lengths_z[nx//2,:,:] +ly_slice_x = edge_lengths_y[nx // 2, :, :] +lz_slice_x = edge_lengths_z[nx // 2, :, :] -n_edge_y_lo = (ny - 30)//2 -n_edge_y_hi = ny - (ny - 30)//2 -n_edge_z_lo = (nz - 30)//2 -n_edge_z_hi = nz - (nz - 30)//2 +n_edge_y_lo = (ny - 30) // 2 +n_edge_y_hi = ny - (ny - 30) // 2 +n_edge_z_lo = (nz - 30) // 2 +n_edge_z_hi = nz - (nz - 30) // 2 -perimeter_slice_x = (np.sum(ly_slice_x[n_edge_y_lo:n_edge_y_hi, n_edge_z_lo+1]) + - np.sum(ly_slice_x[n_edge_y_lo:n_edge_y_hi, n_edge_z_hi-1]) + - np.sum(lz_slice_x[n_edge_y_lo+1, n_edge_z_lo:n_edge_z_hi]) + - np.sum(lz_slice_x[n_edge_y_hi-1, n_edge_z_lo:n_edge_z_hi])) +perimeter_slice_x = ( + np.sum(ly_slice_x[n_edge_y_lo:n_edge_y_hi, n_edge_z_lo + 1]) + + np.sum(ly_slice_x[n_edge_y_lo:n_edge_y_hi, n_edge_z_hi - 1]) + + np.sum(lz_slice_x[n_edge_y_lo + 1, n_edge_z_lo:n_edge_z_hi]) + + np.sum(lz_slice_x[n_edge_y_hi - 1, n_edge_z_lo:n_edge_z_hi]) +) -perimeter_slice_x_true = L_cavity*4 +perimeter_slice_x_true = L_cavity * 4 print("Perimeter of the middle x-slice:", perimeter_slice_x) assert np.isclose(perimeter_slice_x, perimeter_slice_x_true, rtol=1e-05, atol=1e-08) -lx_slice_y = edge_lengths_x[:,ny//2,:] -lz_slice_y = edge_lengths_z[:,ny//2,:] +lx_slice_y = edge_lengths_x[:, ny // 2, :] +lz_slice_y = edge_lengths_z[:, ny // 2, :] -n_edge_x_lo = (nx - 30)//2 -n_edge_x_hi = nx - (nx - 30)//2 -n_edge_z_lo = (nz - 30)//2 -n_edge_z_hi = nz - (nz - 30)//2 +n_edge_x_lo = (nx - 30) // 2 +n_edge_x_hi = nx - (nx - 30) // 2 +n_edge_z_lo = (nz - 30) // 2 +n_edge_z_hi = nz - (nz - 30) // 2 -perimeter_slice_y = (np.sum(lx_slice_y[n_edge_x_lo:n_edge_x_hi, n_edge_z_lo+1]) + - np.sum(lx_slice_y[n_edge_x_lo:n_edge_x_hi, n_edge_z_hi-1]) + - np.sum(lz_slice_y[n_edge_x_lo+1, n_edge_z_lo:n_edge_z_hi]) + - np.sum(lz_slice_y[n_edge_x_hi-1, n_edge_z_lo:n_edge_z_hi])) +perimeter_slice_y = ( + np.sum(lx_slice_y[n_edge_x_lo:n_edge_x_hi, n_edge_z_lo + 1]) + + np.sum(lx_slice_y[n_edge_x_lo:n_edge_x_hi, n_edge_z_hi - 1]) + + np.sum(lz_slice_y[n_edge_x_lo + 1, n_edge_z_lo:n_edge_z_hi]) + + np.sum(lz_slice_y[n_edge_x_hi - 1, n_edge_z_lo:n_edge_z_hi]) +) -perimeter_slice_y_true = L_cavity*4 +perimeter_slice_y_true = L_cavity * 4 print("Perimeter of the middle y-slice:", perimeter_slice_y) assert np.isclose(perimeter_slice_y, perimeter_slice_y_true, rtol=1e-05, atol=1e-08) -lx_slice_z = edge_lengths_x[:,:,nz//2] -ly_slice_z = edge_lengths_y[:,:,nz//2] +lx_slice_z = edge_lengths_x[:, :, nz // 2] +ly_slice_z = edge_lengths_y[:, :, nz // 2] -n_edge_x_lo = (nx - 30)//2 -n_edge_x_hi = nx - (nx - 30)//2 -n_edge_y_lo = (ny - 30)//2 -n_edge_y_hi = ny - (ny - 30)//2 +n_edge_x_lo = (nx - 30) // 2 +n_edge_x_hi = nx - (nx - 30) // 2 +n_edge_y_lo = (ny - 30) // 2 +n_edge_y_hi = ny - (ny - 30) // 2 -perimeter_slice_z = (np.sum(lx_slice_z[n_edge_x_lo:n_edge_x_hi, n_edge_y_lo+1]) + - np.sum(lx_slice_z[n_edge_x_lo:n_edge_x_hi, n_edge_y_hi-1]) + - np.sum(ly_slice_z[n_edge_x_lo+1, n_edge_y_lo:n_edge_y_hi]) + - np.sum(ly_slice_z[n_edge_x_hi-1, n_edge_y_lo:n_edge_y_hi])) +perimeter_slice_z = ( + np.sum(lx_slice_z[n_edge_x_lo:n_edge_x_hi, n_edge_y_lo + 1]) + + np.sum(lx_slice_z[n_edge_x_lo:n_edge_x_hi, n_edge_y_hi - 1]) + + np.sum(ly_slice_z[n_edge_x_lo + 1, n_edge_y_lo:n_edge_y_hi]) + + np.sum(ly_slice_z[n_edge_x_hi - 1, n_edge_y_lo:n_edge_y_hi]) +) -perimeter_slice_z_true = L_cavity*4 +perimeter_slice_z_true = L_cavity * 4 print("Perimeter of the middle z-slice:", perimeter_slice_z) assert np.isclose(perimeter_slice_z, perimeter_slice_z_true, rtol=1e-05, atol=1e-08) print("======== Testing the wrappers of m_face_areas =========") -Sx_slice = np.sum(face_areas_x[nx//2,:,:]) -dx = (xmax-xmin)/nx -Ax = dx*dx -Sx_slice_true = L_cavity*L_cavity - 2*Ax +Sx_slice = np.sum(face_areas_x[nx // 2, :, :]) +dx = (xmax - xmin) / nx +Ax = dx * dx +Sx_slice_true = L_cavity * L_cavity - 2 * Ax print("Area of the middle x-slice:", Sx_slice) assert np.isclose(Sx_slice, Sx_slice_true, rtol=1e-05, atol=1e-08) -Sy_slice = np.sum(face_areas_y[:,ny//2,:]) -dy = (ymax-ymin)/ny -Ay = dy*dy -Sy_slice_true = L_cavity*L_cavity - 2*Ay +Sy_slice = np.sum(face_areas_y[:, ny // 2, :]) +dy = (ymax - ymin) / ny +Ay = dy * dy +Sy_slice_true = L_cavity * L_cavity - 2 * Ay print("Area of the middle y-slice:", Sx_slice) assert np.isclose(Sy_slice, Sy_slice_true, rtol=1e-05, atol=1e-08) -Sz_slice = np.sum(face_areas_z[:,:,nz//2]) -dz = (zmax-zmin)/nz -Az = dz*dz -Sz_slice_true = L_cavity*L_cavity - 2*Az +Sz_slice = np.sum(face_areas_z[:, :, nz // 2]) +dz = (zmax - zmin) / nz +Az = dz * dz +Sz_slice_true = L_cavity * L_cavity - 2 * Az print("Area of the middle z-slice:", Sz_slice) assert np.isclose(Sz_slice, Sz_slice_true, rtol=1e-05, atol=1e-08) diff --git a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields.py index e849958468f..968ebe395a5 100755 --- a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields.py +++ b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields.py @@ -14,7 +14,7 @@ import yt from scipy.constants import c, mu_0, pi -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # This is a script that analyses the simulation results from @@ -38,25 +38,27 @@ Ly = 1 Lz = 1 h_2 = (m * pi / Lx) ** 2 + (n * pi / Ly) ** 2 + (p * pi / Lz) ** 2 -theta = np.pi/6 +theta = np.pi / 6 # Open the right plot file filename = sys.argv[1] ds = yt.load(filename) -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) t = ds.current_time.to_value() rel_tol_err = 1e-2 my_grid = ds.index.grids[0] -By_sim = my_grid['raw', 'By_fp'].squeeze().v -Bz_sim = my_grid['raw', 'Bz_fp'].squeeze().v +By_sim = my_grid["raw", "By_fp"].squeeze().v +Bz_sim = my_grid["raw", "Bz_fp"].squeeze().v ncells = np.array(np.shape(By_sim[:, :, :, 0])) -dx = (hi[0] - lo[0])/ncells[0] -dy = (hi[1] - lo[1])/ncells[1] -dz = (hi[2] - lo[2])/ncells[2] +dx = (hi[0] - lo[0]) / ncells[0] +dy = (hi[1] - lo[1]) / ncells[1] +dz = (hi[2] - lo[2]) / ncells[2] # Compute the analytic solution Bx_th = np.zeros(ncells) @@ -65,54 +67,82 @@ for i in range(ncells[0]): for j in range(ncells[1]): for k in range(ncells[2]): - x0 = (i+0.5)*dx + lo[0] - y0 = j*dy + lo[1] - z0 = (k+0.5)*dz + lo[2] + x0 = (i + 0.5) * dx + lo[0] + y0 = j * dy + lo[1] + z0 = (k + 0.5) * dz + lo[2] x = x0 - y = y0*np.cos(-theta)-z0*np.sin(-theta) - z = y0*np.sin(-theta)+z0*np.cos(-theta) - By = -2/h_2*mu_0*(n * pi/Ly)*(p * pi/Lz) * (np.cos(m * pi/Lx * (x - Lx/2)) * - np.sin(n * pi/Ly * (y - Ly/2)) * - np.cos(p * pi/Lz * (z - Lz/2)) * - np.cos(np.sqrt(2) * - np.pi / Lx * c * t)) - - Bz = mu_0*(np.cos(m * pi/Lx * (x - Lx/2)) * - np.cos(n * pi/Ly * (y - Ly/2)) * - np.sin(p * pi/Lz * (z - Lz/2)) * - np.cos(np.sqrt(2) * np.pi / Lx * c * t)) - - By_th[i, j, k] = (By*np.cos(theta) - Bz*np.sin(theta))*(By_sim[i, j, k, 0] != 0) - - x0 = (i+0.5)*dx + lo[0] - y0 = (j+0.5)*dy + lo[1] - z0 = k*dz + lo[2] + y = y0 * np.cos(-theta) - z0 * np.sin(-theta) + z = y0 * np.sin(-theta) + z0 * np.cos(-theta) + By = ( + -2 + / h_2 + * mu_0 + * (n * pi / Ly) + * (p * pi / Lz) + * ( + np.cos(m * pi / Lx * (x - Lx / 2)) + * np.sin(n * pi / Ly * (y - Ly / 2)) + * np.cos(p * pi / Lz * (z - Lz / 2)) + * np.cos(np.sqrt(2) * np.pi / Lx * c * t) + ) + ) + + Bz = mu_0 * ( + np.cos(m * pi / Lx * (x - Lx / 2)) + * np.cos(n * pi / Ly * (y - Ly / 2)) + * np.sin(p * pi / Lz * (z - Lz / 2)) + * np.cos(np.sqrt(2) * np.pi / Lx * c * t) + ) + + By_th[i, j, k] = (By * np.cos(theta) - Bz * np.sin(theta)) * ( + By_sim[i, j, k, 0] != 0 + ) + + x0 = (i + 0.5) * dx + lo[0] + y0 = (j + 0.5) * dy + lo[1] + z0 = k * dz + lo[2] x = x0 - y = y0*np.cos(-theta)-z0*np.sin(-theta) - z = y0*np.sin(-theta)+z0*np.cos(-theta) - - By = -2/h_2*mu_0*(n * pi/Ly)*(p * pi/Lz) * (np.cos(m * pi/Lx * (x - Lx/2)) * - np.sin(n * pi/Ly * (y - Ly/2)) * - np.cos(p * pi/Lz * (z - Lz/2)) * - np.cos(np.sqrt(2) * - np.pi / Lx * c * t)) - - Bz = mu_0*(np.cos(m * pi/Lx * (x - Lx/2)) * - np.cos(n * pi/Ly * (y - Ly/2)) * - np.sin(p * pi/Lz * (z - Lz/2)) * - np.cos(np.sqrt(2) * np.pi / Lx * c * t)) - - Bz_th[i, j, k] = (By*np.sin(theta) + Bz*np.cos(theta))*(Bz_sim[i, j, k, 0] != 0) + y = y0 * np.cos(-theta) - z0 * np.sin(-theta) + z = y0 * np.sin(-theta) + z0 * np.cos(-theta) + + By = ( + -2 + / h_2 + * mu_0 + * (n * pi / Ly) + * (p * pi / Lz) + * ( + np.cos(m * pi / Lx * (x - Lx / 2)) + * np.sin(n * pi / Ly * (y - Ly / 2)) + * np.cos(p * pi / Lz * (z - Lz / 2)) + * np.cos(np.sqrt(2) * np.pi / Lx * c * t) + ) + ) + + Bz = mu_0 * ( + np.cos(m * pi / Lx * (x - Lx / 2)) + * np.cos(n * pi / Ly * (y - Ly / 2)) + * np.sin(p * pi / Lz * (z - Lz / 2)) + * np.cos(np.sqrt(2) * np.pi / Lx * c * t) + ) + + Bz_th[i, j, k] = (By * np.sin(theta) + Bz * np.cos(theta)) * ( + Bz_sim[i, j, k, 0] != 0 + ) # Compute relative l^2 error on By -rel_err_y = np.sqrt( np.sum(np.square(By_sim[:, :, :, 0] - By_th)) / np.sum(np.square(By_th))) -assert(rel_err_y < rel_tol_err) +rel_err_y = np.sqrt( + np.sum(np.square(By_sim[:, :, :, 0] - By_th)) / np.sum(np.square(By_th)) +) +assert rel_err_y < rel_tol_err # Compute relative l^2 error on Bz -rel_err_z = np.sqrt( np.sum(np.square(Bz_sim[:, :, :, 0] - Bz_th)) / np.sum(np.square(Bz_th))) -assert(rel_err_z < rel_tol_err) +rel_err_z = np.sqrt( + np.sum(np.square(Bz_sim[:, :, :, 0] - Bz_th)) / np.sum(np.square(Bz_th)) +) +assert rel_err_z < rel_tol_err test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py index dcdbc83a729..6f3904e8764 100755 --- a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py +++ b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py @@ -7,7 +7,7 @@ import yt from scipy.constants import c, mu_0, pi -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # This is a script that analyses the simulation results from @@ -32,14 +32,16 @@ # Open the right plot file filename = sys.argv[1] ds = yt.load(filename) -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) my_grid = ds.index.grids[0] -By_sim = my_grid['By'].squeeze().v +By_sim = my_grid["By"].squeeze().v t = ds.current_time.to_value() -theta = np.pi/8 +theta = np.pi / 8 # Compute the analytic solution By_th = np.zeros(ncells) @@ -47,18 +49,24 @@ for j in range(ncells[1]): x = i * dx + lo[0] z = j * dz + lo[1] - xr = x*np.cos(-theta) + z*np.sin(-theta) - zr = -x*np.sin(-theta) + z*np.cos(-theta) + xr = x * np.cos(-theta) + z * np.sin(-theta) + zr = -x * np.sin(-theta) + z * np.cos(-theta) - By_th[i, j] = mu_0 * (np.cos(m * pi / Lx * (xr - Lx / 2)) * - np.cos(n * pi / Lz * (zr - Lz / 2)) * - np.cos(np.pi / Lx * c * t))*(By_sim[i, j] != 0) + By_th[i, j] = ( + mu_0 + * ( + np.cos(m * pi / Lx * (xr - Lx / 2)) + * np.cos(n * pi / Lz * (zr - Lz / 2)) + * np.cos(np.pi / Lx * c * t) + ) + * (By_sim[i, j] != 0) + ) rel_tol_err = 1e-1 # Compute relative l^2 error on By rel_err_y = np.sqrt(np.sum(np.square(By_sim - By_th)) / np.sum(np.square(By_th))) -assert (rel_err_y < rel_tol_err) +assert rel_err_y < rel_tol_err test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/embedded_circle/analysis.py b/Examples/Tests/embedded_circle/analysis.py index 6401b47bb90..569ca40dce4 100755 --- a/Examples/Tests/embedded_circle/analysis.py +++ b/Examples/Tests/embedded_circle/analysis.py @@ -3,7 +3,7 @@ import os import sys -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file diff --git a/Examples/Tests/energy_conserving_thermal_plasma/analysis.py b/Examples/Tests/energy_conserving_thermal_plasma/analysis.py index 43e9b6d9822..4cf7b4ff4e6 100755 --- a/Examples/Tests/energy_conserving_thermal_plasma/analysis.py +++ b/Examples/Tests/energy_conserving_thermal_plasma/analysis.py @@ -17,21 +17,21 @@ import numpy as np -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file fn = sys.argv[1] # Get energy as a function of time, from reduced diagnostics -EFdata = np.genfromtxt('./diags/reducedfiles/EF.txt') # Field energy -EPdata = np.genfromtxt('./diags/reducedfiles/EP.txt') # Particle energy -field_energy = EFdata[:,2] -particle_energy = EPdata[:,2] +EFdata = np.genfromtxt("./diags/reducedfiles/EF.txt") # Field energy +EPdata = np.genfromtxt("./diags/reducedfiles/EP.txt") # Particle energy +field_energy = EFdata[:, 2] +particle_energy = EPdata[:, 2] E = field_energy + particle_energy -print(abs(E-E[0])/E[0]) +print(abs(E - E[0]) / E[0]) # Check that the energy is conserved to 0.3% -assert np.all( abs(E-E[0])/E[0] < 0.003 ) +assert np.all(abs(E - E[0]) / E[0] < 0.003) # Checksum test test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/field_probe/analysis_field_probe.py b/Examples/Tests/field_probe/analysis_field_probe.py index e167942d77c..57085fb7cdc 100755 --- a/Examples/Tests/field_probe/analysis_field_probe.py +++ b/Examples/Tests/field_probe/analysis_field_probe.py @@ -17,27 +17,31 @@ test will check if the detected EM flux matches expected values, which can be solved analytically. """ + import numpy as np import pandas as pd filename = "diags/reducedfiles/FP_line.txt" # Open data file -df = pd.read_csv(filename, sep=' ') -df = df.sort_values(by=['[2]part_x_lev0-(m)']) +df = pd.read_csv(filename, sep=" ") +df = df.sort_values(by=["[2]part_x_lev0-(m)"]) # Select position and Intensity of timestep 500 -x = df.query('`[0]step()` == 500')['[2]part_x_lev0-(m)'] -S = df.query('`[0]step()` == 500')['[11]part_S_lev0-(W*s/m^2)'] +x = df.query("`[0]step()` == 500")["[2]part_x_lev0-(m)"] +S = df.query("`[0]step()` == 500")["[11]part_S_lev0-(W*s/m^2)"] xvals = x.to_numpy() svals = S.to_numpy() # Default intensity is highest measured value for plane # wave interacting with single slit I_0 = np.max(S) -def I_envelope (x, lam = 0.2e-6, a = 0.3e-6, D = 1.7e-6): + + +def I_envelope(x, lam=0.2e-6, a=0.3e-6, D=1.7e-6): arg = np.pi * a / lam * np.sin(np.arctan(x / D)) - return np.sinc( arg / np.pi )**2 + return np.sinc(arg / np.pi) ** 2 + # Count non-outlier values away from simulation boundaries counter = np.arange(60, 140, 2) @@ -47,11 +51,11 @@ def I_envelope (x, lam = 0.2e-6, a = 0.3e-6, D = 1.7e-6): for a in counter: b = I_0 * I_envelope(xvals[a]) c = svals[a] - error += abs((c-b)/b) * 100.0 + error += abs((c - b) / b) * 100.0 averror = error / (len(counter) - 1) # average error range set at 2.5% if averror > 2.5: - print('Average error greater than 2.5%') + print("Average error greater than 2.5%") assert averror < 2.5 diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_3d.py b/Examples/Tests/flux_injection/analysis_flux_injection_3d.py index d0271f6aa94..3840bb72e74 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_3d.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_3d.py @@ -20,6 +20,7 @@ velocity distribution (Gaussian or Gaussian-flux depending on the direction of space) """ + import os import re import sys @@ -30,21 +31,21 @@ from scipy.constants import c, m_e, m_p from scipy.special import erf -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI yt.funcs.mylog.setLevel(0) # Open plotfile specified in command line fn = sys.argv[1] -ds = yt.load( fn ) +ds = yt.load(fn) ad = ds.all_data() t_max = ds.current_time.item() # time of simulation # Total number of electrons expected: # Simulation parameters determine the total number of particles emitted (Ntot) -flux = 1. # in m^-2.s^-1, from the input script -emission_surface = 8*8 # in m^2 +flux = 1.0 # in m^-2.s^-1, from the input script +emission_surface = 8 * 8 # in m^2 Ntot = flux * emission_surface * t_max # Parameters of the histogram @@ -53,92 +54,103 @@ # Define function that histogram and check the data + def gaussian_dist(u, u_th): - return 1./((2*np.pi)**.5*u_th) * np.exp(-u**2/(2*u_th**2) ) + return 1.0 / ((2 * np.pi) ** 0.5 * u_th) * np.exp(-(u**2) / (2 * u_th**2)) + def gaussian_flux_dist(u, u_th, u_m): - normalization_factor = u_th**2 * np.exp(-u_m**2/(2*u_th**2)) + (np.pi/2)**.5*u_m*u_th * (1 + erf(u_m/(2**.5*u_th))) - result = 1./normalization_factor * np.where( u>0, u * np.exp(-(u-u_m)**2/(2*u_th**2)), 0 ) + normalization_factor = u_th**2 * np.exp(-(u_m**2) / (2 * u_th**2)) + ( + np.pi / 2 + ) ** 0.5 * u_m * u_th * (1 + erf(u_m / (2**0.5 * u_th))) + result = ( + 1.0 + / normalization_factor + * np.where(u > 0, u * np.exp(-((u - u_m) ** 2) / (2 * u_th**2)), 0) + ) return result -def compare_gaussian(u, w, u_th, label=''): - du = (hist_range[1]-hist_range[0])/hist_bins - w_hist, u_hist = np.histogram(u, bins=hist_bins, weights=w/du, range=hist_range) - u_hist = 0.5*(u_hist[1:]+u_hist[:-1]) - w_th = Ntot*gaussian_dist(u_hist, u_th) - plt.plot( u_hist, w_hist, label=label+': simulation' ) - plt.plot( u_hist, w_th, '--', label=label+': theory' ) - assert np.allclose( w_hist, w_th, atol=0.07*w_th.max() ) - -def compare_gaussian_flux(u, w, u_th, u_m, label=''): - du = (hist_range[1]-hist_range[0])/hist_bins - w_hist, u_hist = np.histogram(u, bins=hist_bins, weights=w/du, range=hist_range) - u_hist = 0.5*(u_hist[1:]+u_hist[:-1]) - w_th = Ntot*gaussian_flux_dist(u_hist, u_th, u_m) - plt.plot( u_hist, w_hist, label=label+': simulation' ) - plt.plot( u_hist, w_th, '--', label=label+': theory' ) - assert np.allclose( w_hist, w_th, atol=0.05*w_th.max() ) + +def compare_gaussian(u, w, u_th, label=""): + du = (hist_range[1] - hist_range[0]) / hist_bins + w_hist, u_hist = np.histogram(u, bins=hist_bins, weights=w / du, range=hist_range) + u_hist = 0.5 * (u_hist[1:] + u_hist[:-1]) + w_th = Ntot * gaussian_dist(u_hist, u_th) + plt.plot(u_hist, w_hist, label=label + ": simulation") + plt.plot(u_hist, w_th, "--", label=label + ": theory") + assert np.allclose(w_hist, w_th, atol=0.07 * w_th.max()) + + +def compare_gaussian_flux(u, w, u_th, u_m, label=""): + du = (hist_range[1] - hist_range[0]) / hist_bins + w_hist, u_hist = np.histogram(u, bins=hist_bins, weights=w / du, range=hist_range) + u_hist = 0.5 * (u_hist[1:] + u_hist[:-1]) + w_th = Ntot * gaussian_flux_dist(u_hist, u_th, u_m) + plt.plot(u_hist, w_hist, label=label + ": simulation") + plt.plot(u_hist, w_th, "--", label=label + ": theory") + assert np.allclose(w_hist, w_th, atol=0.05 * w_th.max()) + # Load data and perform check -plt.figure(figsize=(8,7)) +plt.figure(figsize=(8, 7)) plt.subplot(221) -plt.title('Electrons u_m=0.07') +plt.title("Electrons u_m=0.07") -ux = ad['electron','particle_momentum_x'].to_ndarray()/(m_e*c) -uy = ad['electron','particle_momentum_y'].to_ndarray()/(m_e*c) -uz = ad['electron','particle_momentum_z'].to_ndarray()/(m_e*c) -w = ad['electron', 'particle_weight'].to_ndarray() +ux = ad["electron", "particle_momentum_x"].to_ndarray() / (m_e * c) +uy = ad["electron", "particle_momentum_y"].to_ndarray() / (m_e * c) +uz = ad["electron", "particle_momentum_z"].to_ndarray() / (m_e * c) +w = ad["electron", "particle_weight"].to_ndarray() -compare_gaussian(ux, w, u_th=0.1, label='u_x') -compare_gaussian_flux(uy, w, u_th=0.1, u_m=0.07, label='u_y') -compare_gaussian(uz, w, u_th=0.1, label='u_z') +compare_gaussian(ux, w, u_th=0.1, label="u_x") +compare_gaussian_flux(uy, w, u_th=0.1, u_m=0.07, label="u_y") +compare_gaussian(uz, w, u_th=0.1, label="u_z") plt.subplot(223) -plt.title('Protons u_m=0.05') +plt.title("Protons u_m=0.05") -ux = ad['proton','particle_momentum_x'].to_ndarray()/(m_p*c) -uy = ad['proton','particle_momentum_y'].to_ndarray()/(m_p*c) -uz = ad['proton','particle_momentum_z'].to_ndarray()/(m_p*c) -w = ad['proton', 'particle_weight'].to_ndarray() +ux = ad["proton", "particle_momentum_x"].to_ndarray() / (m_p * c) +uy = ad["proton", "particle_momentum_y"].to_ndarray() / (m_p * c) +uz = ad["proton", "particle_momentum_z"].to_ndarray() / (m_p * c) +w = ad["proton", "particle_weight"].to_ndarray() -compare_gaussian_flux(-ux, w, u_th=0.1, u_m=0.05, label='u_x') -compare_gaussian(uy, w, u_th=0.1, label='u_y') -compare_gaussian(uz, w, u_th=0.1, label='u_z') +compare_gaussian_flux(-ux, w, u_th=0.1, u_m=0.05, label="u_x") +compare_gaussian(uy, w, u_th=0.1, label="u_y") +compare_gaussian(uz, w, u_th=0.1, label="u_z") plt.subplot(222) -plt.title('Electrons u_m=-0.07') +plt.title("Electrons u_m=-0.07") -ux = ad['electron_negative','particle_momentum_x'].to_ndarray()/(m_e*c) -uy = ad['electron_negative','particle_momentum_y'].to_ndarray()/(m_e*c) -uz = ad['electron_negative','particle_momentum_z'].to_ndarray()/(m_e*c) -w = ad['electron_negative', 'particle_weight'].to_ndarray() +ux = ad["electron_negative", "particle_momentum_x"].to_ndarray() / (m_e * c) +uy = ad["electron_negative", "particle_momentum_y"].to_ndarray() / (m_e * c) +uz = ad["electron_negative", "particle_momentum_z"].to_ndarray() / (m_e * c) +w = ad["electron_negative", "particle_weight"].to_ndarray() -compare_gaussian(ux, w, u_th=0.1, label='u_x') -compare_gaussian(uy, w, u_th=0.1, label='u_y') -compare_gaussian_flux(uz, w, u_th=0.1, u_m=-0.07, label='u_z') +compare_gaussian(ux, w, u_th=0.1, label="u_x") +compare_gaussian(uy, w, u_th=0.1, label="u_y") +compare_gaussian_flux(uz, w, u_th=0.1, u_m=-0.07, label="u_z") plt.legend(loc=(1.02, 0.5)) plt.subplot(224) -plt.title('Protons u_m=-0.05') +plt.title("Protons u_m=-0.05") -ux = ad['proton_negative','particle_momentum_x'].to_ndarray()/(m_p*c) -uy = ad['proton_negative','particle_momentum_y'].to_ndarray()/(m_p*c) -uz = ad['proton_negative','particle_momentum_z'].to_ndarray()/(m_p*c) -w = ad['proton_negative', 'particle_weight'].to_ndarray() +ux = ad["proton_negative", "particle_momentum_x"].to_ndarray() / (m_p * c) +uy = ad["proton_negative", "particle_momentum_y"].to_ndarray() / (m_p * c) +uz = ad["proton_negative", "particle_momentum_z"].to_ndarray() / (m_p * c) +w = ad["proton_negative", "particle_weight"].to_ndarray() -compare_gaussian(ux, w, u_th=0.1, label='u_x') -compare_gaussian(uy, w, u_th=0.1, label='u_y') -compare_gaussian_flux(-uz, w, u_th=0.1, u_m=-0.05, label='u_z') -#plt.legend(loc=0) +compare_gaussian(ux, w, u_th=0.1, label="u_x") +compare_gaussian(uy, w, u_th=0.1, label="u_y") +compare_gaussian_flux(-uz, w, u_th=0.1, u_m=-0.05, label="u_z") +# plt.legend(loc=0) plt.tight_layout() -plt.savefig('Distribution.png') +plt.savefig("Distribution.png") # Verify checksum test_name = os.path.split(os.getcwd())[1] -if re.search( 'single_precision', fn ): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3) +if re.search("single_precision", fn): + checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) else: checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_rz.py b/Examples/Tests/flux_injection/analysis_flux_injection_rz.py index 8ec944c715d..ad73fdb47af 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_rz.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_rz.py @@ -24,6 +24,7 @@ velocity was along the azimuthal direction.) - The total number of electrons corresponds to the expected flux. """ + import os import re import sys @@ -31,35 +32,35 @@ import numpy as np import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI yt.funcs.mylog.setLevel(0) # Open plotfile specified in command line fn = sys.argv[1] -ds = yt.load( fn ) +ds = yt.load(fn) t_max = ds.current_time.item() # time of simulation # Total number of electrons expected: -flux = 1. # in m^-2.s^-1, from the input script -emission_surface = 0.8 # in m^2, +flux = 1.0 # in m^-2.s^-1, from the input script +emission_surface = 0.8 # in m^2, # given that xmin = 1.5, xmax = 1.9, zmin = -1.0, zmax = 1. n_tot = flux * emission_surface * t_max # Read particle data ad = ds.all_data() -r = ad['particle_position_x'].to_ndarray() # Corresponds to the radial coordinate in RZ -w = ad['particle_weight'].to_ndarray() +r = ad["particle_position_x"].to_ndarray() # Corresponds to the radial coordinate in RZ +w = ad["particle_weight"].to_ndarray() # Check that the number of particles matches the expected one -assert np.allclose( w.sum(), n_tot, rtol=0.05 ) +assert np.allclose(w.sum(), n_tot, rtol=0.05) # Check that the particles are at the right radius -assert np.all( (r >= 1.48) & (r <=1.92) ) +assert np.all((r >= 1.48) & (r <= 1.92)) test_name = os.path.split(os.getcwd())[1] -if re.search( 'single_precision', fn ): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3) +if re.search("single_precision", fn): + checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) else: checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py b/Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py index b9d06034394..9ad2fd6b82b 100755 --- a/Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py +++ b/Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py @@ -1,19 +1,25 @@ #!/usr/bin/env python3 -#from warp import picmi +# from warp import picmi import argparse from pywarpx import picmi parser = argparse.ArgumentParser(description="Gaussian beam PICMI example") -parser.add_argument('--diagformat', type=str, - help='Format of the full diagnostics (plotfile, openpmd, ascent, sensei, ...)', - default='plotfile') -parser.add_argument('--fields_to_plot', type=str, - help='List of fields to write to diagnostics', - default=['E', 'B', 'J', 'part_per_cell'], - nargs = '*') +parser.add_argument( + "--diagformat", + type=str, + help="Format of the full diagnostics (plotfile, openpmd, ascent, sensei, ...)", + default="plotfile", +) +parser.add_argument( + "--fields_to_plot", + type=str, + help="List of fields to write to diagnostics", + default=["E", "B", "J", "part_per_cell"], + nargs="*", +) args = parser.parse_args() @@ -23,73 +29,97 @@ ny = 32 nz = 32 -xmin = -2. -xmax = +2. -ymin = -2. -ymax = +2. -zmin = -2. -zmax = +2. +xmin = -2.0 +xmax = +2.0 +ymin = -2.0 +ymax = +2.0 +zmin = -2.0 +zmax = +2.0 number_sim_particles = 32768 total_charge = 8.010883097437485e-07 beam_rms_size = 0.25 -electron_beam_divergence = -0.04*constants.c +electron_beam_divergence = -0.04 * constants.c em_order = 3 -grid = picmi.Cartesian3DGrid(number_of_cells = [nx, ny, nz], - lower_bound = [xmin, ymin, zmin], - upper_bound = [xmax, ymax, zmax], - lower_boundary_conditions = ['periodic', 'periodic', 'open'], - upper_boundary_conditions = ['periodic', 'periodic', 'open'], - lower_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'], - upper_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'], - warpx_max_grid_size=16) - -solver = picmi.ElectromagneticSolver(grid = grid, - cfl = 1., - stencil_order=[em_order,em_order,em_order]) - -electron_beam = picmi.GaussianBunchDistribution(n_physical_particles = total_charge/constants.q_e, - rms_bunch_size = [beam_rms_size, beam_rms_size, beam_rms_size], - velocity_divergence = [electron_beam_divergence, electron_beam_divergence, electron_beam_divergence]) - -proton_beam = picmi.GaussianBunchDistribution(n_physical_particles = total_charge/constants.q_e, - rms_bunch_size = [beam_rms_size, beam_rms_size, beam_rms_size]) - -electrons = picmi.Species(particle_type='electron', name='electrons', initial_distribution=electron_beam) -protons = picmi.Species(particle_type='proton', name='protons', initial_distribution=proton_beam) - -field_diag1 = picmi.FieldDiagnostic(name = 'diag1', - grid = grid, - period = 10, - data_list = args.fields_to_plot, - warpx_format = args.diagformat, - write_dir = '.', - warpx_file_prefix = 'Python_gaussian_beam_plt') - -part_diag1 = picmi.ParticleDiagnostic(name = 'diag1', - period = 10, - species = [electrons, protons], - data_list = ['weighting', 'momentum'], - warpx_format = args.diagformat) - -sim = picmi.Simulation(solver = solver, - max_steps = 10, - verbose = 1, - warpx_current_deposition_algo = 'direct', - warpx_use_filter = 0) - -sim.add_species(electrons, layout=picmi.PseudoRandomLayout(n_macroparticles=number_sim_particles)) -sim.add_species(protons, layout=picmi.PseudoRandomLayout(n_macroparticles=number_sim_particles)) +grid = picmi.Cartesian3DGrid( + number_of_cells=[nx, ny, nz], + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["periodic", "periodic", "open"], + upper_boundary_conditions=["periodic", "periodic", "open"], + lower_boundary_conditions_particles=["periodic", "periodic", "absorbing"], + upper_boundary_conditions_particles=["periodic", "periodic", "absorbing"], + warpx_max_grid_size=16, +) + +solver = picmi.ElectromagneticSolver( + grid=grid, cfl=1.0, stencil_order=[em_order, em_order, em_order] +) + +electron_beam = picmi.GaussianBunchDistribution( + n_physical_particles=total_charge / constants.q_e, + rms_bunch_size=[beam_rms_size, beam_rms_size, beam_rms_size], + velocity_divergence=[ + electron_beam_divergence, + electron_beam_divergence, + electron_beam_divergence, + ], +) + +proton_beam = picmi.GaussianBunchDistribution( + n_physical_particles=total_charge / constants.q_e, + rms_bunch_size=[beam_rms_size, beam_rms_size, beam_rms_size], +) + +electrons = picmi.Species( + particle_type="electron", name="electrons", initial_distribution=electron_beam +) +protons = picmi.Species( + particle_type="proton", name="protons", initial_distribution=proton_beam +) + +field_diag1 = picmi.FieldDiagnostic( + name="diag1", + grid=grid, + period=10, + data_list=args.fields_to_plot, + warpx_format=args.diagformat, + write_dir=".", + warpx_file_prefix="Python_gaussian_beam_plt", +) + +part_diag1 = picmi.ParticleDiagnostic( + name="diag1", + period=10, + species=[electrons, protons], + data_list=["weighting", "momentum"], + warpx_format=args.diagformat, +) + +sim = picmi.Simulation( + solver=solver, + max_steps=10, + verbose=1, + warpx_current_deposition_algo="direct", + warpx_use_filter=0, +) + +sim.add_species( + electrons, layout=picmi.PseudoRandomLayout(n_macroparticles=number_sim_particles) +) +sim.add_species( + protons, layout=picmi.PseudoRandomLayout(n_macroparticles=number_sim_particles) +) sim.add_diagnostic(field_diag1) sim.add_diagnostic(part_diag1) # write_inputs will create an inputs file that can be used to run # with the compiled version. -#sim.write_input_file(file_name = 'inputs_from_PICMI') +# sim.write_input_file(file_name = 'inputs_from_PICMI') # Alternatively, sim.step will run WarpX, controlling it from Python sim.step() diff --git a/Examples/Tests/gaussian_beam/analysis_focusing_beam.py b/Examples/Tests/gaussian_beam/analysis_focusing_beam.py index 4a5fa3b927b..c2318d0cb7d 100755 --- a/Examples/Tests/gaussian_beam/analysis_focusing_beam.py +++ b/Examples/Tests/gaussian_beam/analysis_focusing_beam.py @@ -13,56 +13,63 @@ import numpy as np from scipy.constants import c, eV, m_e, micro, nano -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI from openpmd_viewer import OpenPMDTimeSeries -GeV=1e9*eV -energy = 125.*GeV -gamma = energy/(m_e*c**2) -sigmax = 516.0*nano -sigmay = 7.7*nano -sigmaz = 300.*micro +GeV = 1e9 * eV +energy = 125.0 * GeV +gamma = energy / (m_e * c**2) +sigmax = 516.0 * nano +sigmay = 7.7 * nano +sigmaz = 300.0 * micro nz = 256 -Lz = 20*sigmaz -gridz = np.linspace(-0.5*Lz, 0.5*Lz, nz) +Lz = 20 * sigmaz +gridz = np.linspace(-0.5 * Lz, 0.5 * Lz, nz) tol = gridz[1] - gridz[0] -emitx = 50*micro -emity = 20*nano -focal_distance = 4*sigmaz +emitx = 50 * micro +emity = 20 * nano +focal_distance = 4 * sigmaz + def s(z, sigma0, emit): - '''The theoretical size of a focusing beam (in the absence of space charge), - at position z, given its emittance and size at focus.''' - return np.sqrt(sigma0**2 + emit**2 * (z - focal_distance)**2 / sigma0**2) + """The theoretical size of a focusing beam (in the absence of space charge), + at position z, given its emittance and size at focus.""" + return np.sqrt(sigma0**2 + emit**2 * (z - focal_distance) ** 2 / sigma0**2) + filename = sys.argv[1] -ts = OpenPMDTimeSeries('./diags/openpmd/') +ts = OpenPMDTimeSeries("./diags/openpmd/") -x, y, z, w, = ts.get_particle( ['x', 'y', 'z', 'w'], species='beam1', iteration=0, plot=False) +( + x, + y, + z, + w, +) = ts.get_particle(["x", "y", "z", "w"], species="beam1", iteration=0, plot=False) -imin = np.argmin(np.sqrt((gridz+0.8*focal_distance)**2)) -imax = np.argmin(np.sqrt((gridz-0.8*focal_distance)**2)) +imin = np.argmin(np.sqrt((gridz + 0.8 * focal_distance) ** 2)) +imax = np.argmin(np.sqrt((gridz - 0.8 * focal_distance) ** 2)) sx, sy = [], [] # Compute the size of the beam in each z slice subgrid = gridz[imin:imax] for d in subgrid: - i = np.sqrt((z - d)**2) < tol - if (np.sum(i)!=0): + i = np.sqrt((z - d) ** 2) < tol + if np.sum(i) != 0: mux = np.average(x[i], weights=w[i]) muy = np.average(y[i], weights=w[i]) - sx.append(np.sqrt(np.average((x[i]-mux)**2, weights=w[i]))) - sy.append(np.sqrt(np.average((y[i]-muy)**2, weights=w[i]))) + sx.append(np.sqrt(np.average((x[i] - mux) ** 2, weights=w[i]))) + sy.append(np.sqrt(np.average((y[i] - muy) ** 2, weights=w[i]))) # Theoretical prediction for the size of the beam in each z slice -sx_theory = s(subgrid, sigmax, emitx/gamma) -sy_theory = s(subgrid, sigmay, emity/gamma) +sx_theory = s(subgrid, sigmax, emitx / gamma) +sy_theory = s(subgrid, sigmay, emity / gamma) -assert(np.allclose(sx, sx_theory, rtol=0.051, atol=0)) -assert(np.allclose(sy, sy_theory, rtol=0.038, atol=0)) +assert np.allclose(sx, sx_theory, rtol=0.051, atol=0) +assert np.allclose(sy, sy_theory, rtol=0.038, atol=0) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/initial_distribution/analysis_distribution.py b/Examples/Tests/initial_distribution/analysis_distribution.py index 5a3774133db..6d23c5da1e4 100755 --- a/Examples/Tests/initial_distribution/analysis_distribution.py +++ b/Examples/Tests/initial_distribution/analysis_distribution.py @@ -26,18 +26,18 @@ import scipy.special as scs from read_raw_data import read_reduced_diags, read_reduced_diags_histogram -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] # print tolerance tolerance = 0.02 -print('Tolerance:', tolerance) +print("Tolerance:", tolerance) -#=============================== +# =============================== # gaussian and maxwell-boltzmann -#=============================== +# =============================== # load data bin_value, h1x = read_reduced_diags_histogram("h1x.txt")[2:] @@ -49,31 +49,46 @@ # parameters of theory u_rms = 0.01 -gamma = np.sqrt(1.0+u_rms*u_rms) +gamma = np.sqrt(1.0 + u_rms * u_rms) v_rms = u_rms / gamma * scc.c -n = 1.0e21 -V = 8.0 -db = 0.0016 +n = 1.0e21 +V = 8.0 +db = 0.0016 # compute the analytical solution -f = n*V*scc.c*db*np.exp(-0.5*(bin_value*scc.c/v_rms)**2)/(v_rms*np.sqrt(2.0*scc.pi)) +f = ( + n + * V + * scc.c + * db + * np.exp(-0.5 * (bin_value * scc.c / v_rms) ** 2) + / (v_rms * np.sqrt(2.0 * scc.pi)) +) f_peak = np.amax(f) # compute error # note that parameters are chosen such that gaussian and # maxwell-boltzmann distributions are identical -f1_error = np.sum(np.abs(f-h1x)+np.abs(f-h1y)+np.abs(f-h1z))/bin_value.size / f_peak -f2_error = np.sum(np.abs(f-h2x)+np.abs(f-h2y)+np.abs(f-h2z))/bin_value.size / f_peak - -print('Gaussian distribution difference:', f1_error) -print('Maxwell-Boltzmann distribution difference:', f2_error) - -assert(f1_error < tolerance) -assert(f2_error < tolerance) - -#================ +f1_error = ( + np.sum(np.abs(f - h1x) + np.abs(f - h1y) + np.abs(f - h1z)) + / bin_value.size + / f_peak +) +f2_error = ( + np.sum(np.abs(f - h2x) + np.abs(f - h2y) + np.abs(f - h2z)) + / bin_value.size + / f_peak +) + +print("Gaussian distribution difference:", f1_error) +print("Maxwell-Boltzmann distribution difference:", f2_error) + +assert f1_error < tolerance +assert f2_error < tolerance + +# ================ # maxwell-juttner -#================ +# ================ # load data bin_value, bin_data = read_reduced_diags_histogram("h3.txt")[2:] @@ -81,80 +96,106 @@ # parameters of theory theta = 1.0 -K2 = scs.kn(2,1.0/theta) -n = 1.0e21 -V = 8.0 -db = 0.22 +K2 = scs.kn(2, 1.0 / theta) +n = 1.0e21 +V = 8.0 +db = 0.22 # compute the analytical solution -f = n*V*db * bin_value**2 * np.sqrt(1.0-1.0/bin_value**2) / \ - (theta*K2) * np.exp(-bin_value/theta) +f = ( + n + * V + * db + * bin_value**2 + * np.sqrt(1.0 - 1.0 / bin_value**2) + / (theta * K2) + * np.exp(-bin_value / theta) +) f_peak = np.amax(f) # analytical solution for the filtered histogram: we just filter out gamma values < 5.5 -f_filtered = f*(bin_value > 5.5) +f_filtered = f * (bin_value > 5.5) # compute error -f3_error = np.sum( np.abs(f-bin_data) + np.abs(f_filtered-bin_data_filtered) ) \ - / bin_value.size / f_peak +f3_error = ( + np.sum(np.abs(f - bin_data) + np.abs(f_filtered - bin_data_filtered)) + / bin_value.size + / f_peak +) -print('Maxwell-Juttner distribution difference:', f3_error) +print("Maxwell-Juttner distribution difference:", f3_error) -assert(f3_error < tolerance) +assert f3_error < tolerance -#============== +# ============== # gaussian beam -#============== +# ============== # load data bin_value, h4x = read_reduced_diags_histogram("h4x.txt")[2:] h4y = read_reduced_diags_histogram("h4y.txt")[3] h4z = read_reduced_diags_histogram("h4z.txt")[3] _, bmmntr = read_reduced_diags("bmmntr.txt") -charge = bmmntr['charge'][0] +charge = bmmntr["charge"][0] # parameters of theory x_rms = 0.25 -z_cut = 2. +z_cut = 2.0 q_tot = -1.0e-20 -q_e = -1.602176634e-19 -npart = q_tot/q_e -db = bin_value[1]-bin_value[0] +q_e = -1.602176634e-19 +npart = q_tot / q_e +db = bin_value[1] - bin_value[0] # compute the analytical solution -f_xy = npart*db * np.exp(-0.5*(bin_value/x_rms)**2)/(x_rms*np.sqrt(2.0*scc.pi)) * scs.erf(z_cut/np.sqrt(2.)) -f_z = npart*db * np.exp(-0.5*(bin_value/x_rms)**2)/(x_rms*np.sqrt(2.0*scc.pi)) -f_z[ np.absolute(bin_value) > z_cut * x_rms ] = 0. +f_xy = ( + npart + * db + * np.exp(-0.5 * (bin_value / x_rms) ** 2) + / (x_rms * np.sqrt(2.0 * scc.pi)) + * scs.erf(z_cut / np.sqrt(2.0)) +) +f_z = ( + npart + * db + * np.exp(-0.5 * (bin_value / x_rms) ** 2) + / (x_rms * np.sqrt(2.0 * scc.pi)) +) +f_z[np.absolute(bin_value) > z_cut * x_rms] = 0.0 f_peak = np.amax(f_z) -q_tot_cut = q_tot * scs.erf(z_cut/np.sqrt(2.)) +q_tot_cut = q_tot * scs.erf(z_cut / np.sqrt(2.0)) # compute error -f4_error = np.sum(np.abs(f_xy-h4x)+np.abs(f_xy-h4y)+np.abs(f_z-h4z))/bin_value.size / f_peak +f4_error = ( + np.sum(np.abs(f_xy - h4x) + np.abs(f_xy - h4y) + np.abs(f_z - h4z)) + / bin_value.size + / f_peak +) charge_error = np.abs((q_tot_cut - charge) / q_tot) do_plot = False if do_plot: import matplotlib.pyplot as plt + plt.figure() plt.subplot(121) - plt.plot(bin_value, f_xy, '+-', label='ref') - plt.plot(bin_value, h4x, '+--', label='sim') + plt.plot(bin_value, f_xy, "+-", label="ref") + plt.plot(bin_value, h4x, "+--", label="sim") plt.legend() plt.subplot(122) - plt.plot(bin_value, f_z, '+-', label='ref') - plt.plot(bin_value, h4z, '+--', label='sim') + plt.plot(bin_value, f_z, "+-", label="ref") + plt.plot(bin_value, h4z, "+--", label="sim") plt.legend() - plt.savefig('toto.pdf', bbox_inches='tight') + plt.savefig("toto.pdf", bbox_inches="tight") -print('Gaussian position distribution difference:', f4_error) -assert(f4_error < tolerance) +print("Gaussian position distribution difference:", f4_error) +assert f4_error < tolerance -print('Relative beam charge difference:', charge_error) -assert(charge_error < tolerance) +print("Relative beam charge difference:", charge_error) +assert charge_error < tolerance -#============================================= +# ============================================= # maxwell-juttner with temperature from parser -#============================================= +# ============================================= # load data bin_value, bin_data_neg = read_reduced_diags_histogram("h5_neg.txt")[2:] @@ -164,32 +205,49 @@ # _neg denotes where x<0, _pos where x>0 theta_neg = 1.0 theta_pos = 2.0 -K2_neg = scs.kn(2,1.0/theta_neg) -K2_pos = scs.kn(2,1.0/theta_pos) -n = 1.0e21 -V = 8.0 / 2 # because each of these are for half the domain -db = 0.22 +K2_neg = scs.kn(2, 1.0 / theta_neg) +K2_pos = scs.kn(2, 1.0 / theta_pos) +n = 1.0e21 +V = 8.0 / 2 # because each of these are for half the domain +db = 0.22 # compute the analytical solution for each half of the domain -f_neg = n*V*db * bin_value**2 * np.sqrt(1.0-1.0/bin_value**2) / \ - (theta_neg*K2_neg) * np.exp(-bin_value/theta_neg) +f_neg = ( + n + * V + * db + * bin_value**2 + * np.sqrt(1.0 - 1.0 / bin_value**2) + / (theta_neg * K2_neg) + * np.exp(-bin_value / theta_neg) +) f_neg_peak = np.amax(f_neg) -f_pos = n*V*db * bin_value**2 * np.sqrt(1.0-1.0/bin_value**2) / \ - (theta_pos*K2_pos) * np.exp(-bin_value/theta_pos) +f_pos = ( + n + * V + * db + * bin_value**2 + * np.sqrt(1.0 - 1.0 / bin_value**2) + / (theta_pos * K2_pos) + * np.exp(-bin_value / theta_pos) +) f_pos_peak = np.amax(f_pos) f_peak = max(f_neg_peak, f_pos_peak) # compute error -f5_error = np.sum( np.abs(f_neg-bin_data_neg) + np.abs(f_pos-bin_data_pos) ) \ - / bin_value.size / f_peak +f5_error = ( + np.sum(np.abs(f_neg - bin_data_neg) + np.abs(f_pos - bin_data_pos)) + / bin_value.size + / f_peak +) -print('Maxwell-Juttner parser temperature difference:', f5_error) +print("Maxwell-Juttner parser temperature difference:", f5_error) -assert(f5_error < tolerance) +assert f5_error < tolerance -#============================================== +# ============================================== # maxwell-boltzmann with constant bulk velocity -#============================================== +# ============================================== # load data bin_value_g, bin_data_g = read_reduced_diags_histogram("h6.txt")[2:] @@ -197,14 +255,14 @@ # Expected values for beta and u = beta*gamma beta_const = 0.2 -g_const = 1. / np.sqrt(1. - beta_const * beta_const) +g_const = 1.0 / np.sqrt(1.0 - beta_const * beta_const) uy_const = beta_const * g_const g_bin_size = 0.004 -g_bin_min = 1. +g_bin_min = 1.0 uy_bin_size = 0.04 -uy_bin_min = -1. -V = 8.0 # volume in m^3 -n = 1.0e21 # number density in 1/m^3 +uy_bin_min = -1.0 +V = 8.0 # volume in m^3 +n = 1.0e21 # number density in 1/m^3 f_g = np.zeros_like(bin_value_g) i_g = int(np.floor((g_const - g_bin_min) / g_bin_size)) @@ -215,16 +273,19 @@ i_uy = int(np.floor((-uy_const - uy_bin_min) / uy_bin_size)) f_uy[i_uy] = n * V -f6_error = np.sum(np.abs(f_g - bin_data_g) + np.abs(f_uy - bin_data_uy)) \ - / bin_value_g.size / f_peak +f6_error = ( + np.sum(np.abs(f_g - bin_data_g) + np.abs(f_uy - bin_data_uy)) + / bin_value_g.size + / f_peak +) -print('Maxwell-Boltzmann constant velocity difference:', f6_error) +print("Maxwell-Boltzmann constant velocity difference:", f6_error) -assert(f6_error < tolerance) +assert f6_error < tolerance -#============================================ +# ============================================ # maxwell-boltzmann with parser bulk velocity -#============================================ +# ============================================ # load data bin_value_g, bin_data_g = read_reduced_diags_histogram("h7.txt")[2:] @@ -233,14 +294,14 @@ # Expected values for beta and u = beta*gamma beta_const = 0.2 -g_const = 1. / np.sqrt(1. - beta_const * beta_const) +g_const = 1.0 / np.sqrt(1.0 - beta_const * beta_const) uy_const = beta_const * g_const g_bin_size = 0.004 -g_bin_min = 1. +g_bin_min = 1.0 uy_bin_size = 0.04 -uy_bin_min = -1. -V = 8.0 # volume in m^3 -n = 1.0e21 # number density in 1/m^3 +uy_bin_min = -1.0 +V = 8.0 # volume in m^3 +n = 1.0e21 # number density in 1/m^3 f_g = np.zeros_like(bin_value_g) i_g = int(np.floor((g_const - g_bin_min) / g_bin_size)) @@ -249,23 +310,30 @@ f_uy_neg = np.zeros_like(bin_value_uy) i_uy_neg = int(np.floor((uy_const - uy_bin_min) / uy_bin_size)) -f_uy_neg[i_uy_neg] = n * V / 2. +f_uy_neg[i_uy_neg] = n * V / 2.0 f_uy_pos = np.zeros_like(bin_value_uy) i_uy_pos = int(np.floor((-uy_const - uy_bin_min) / uy_bin_size)) -f_uy_pos[i_uy_pos] = n * V / 2. +f_uy_pos[i_uy_pos] = n * V / 2.0 -f7_error = np.sum(np.abs(f_g - bin_data_g) + np.abs(f_uy_pos - bin_data_uy_pos) \ - + np.abs(f_uy_neg - bin_data_uy_neg)) / bin_value_g.size / f_peak +f7_error = ( + np.sum( + np.abs(f_g - bin_data_g) + + np.abs(f_uy_pos - bin_data_uy_pos) + + np.abs(f_uy_neg - bin_data_uy_neg) + ) + / bin_value_g.size + / f_peak +) -print('Maxwell-Boltzmann parser velocity difference:', f7_error) +print("Maxwell-Boltzmann parser velocity difference:", f7_error) -assert(f7_error < tolerance) +assert f7_error < tolerance -#============================================ +# ============================================ # Cuboid distribution in momentum space -#============================================ +# ============================================ bin_value_x, h8x = read_reduced_diags_histogram("h8x.txt")[2:] bin_value_y, h8y = read_reduced_diags_histogram("h8y.txt")[2:] @@ -284,6 +352,7 @@ # Distributions along the three momentum axes are independent: # we can test them separately + # This counts the number of bins where we expect the distribution to be nonzero def nonzero_bins(bins, low, high): # Bin with nonzero distribution is defined when b_{i+1} > u_min & b_i < u_max @@ -292,7 +361,8 @@ def nonzero_bins(bins, low, high): db = bins[1] - bins[0] loweredges = bins - 0.5 * db upperedges = bins + 0.5 * db - return ((upperedges > low) & (loweredges < high)) + return (upperedges > low) & (loweredges < high) + # Function that checks the validity of the histogram. # We have to call it for each of the axis @@ -313,7 +383,7 @@ def check_validity_uniform(bins, histogram, u_min, u_max, Ntrials=1000): # u_max = u_min (i.e. a delta distribution) if Nbins == 1: # In this case the result should be exact - assert( (histogram[nzbins].item() - 1) < 1e-8 ) + assert (histogram[nzbins].item() - 1) < 1e-8 return @@ -324,7 +394,9 @@ def check_validity_uniform(bins, histogram, u_min, u_max, Ntrials=1000): # Filling a given bin is a binomial process, so we basically test each histogram with the # expected average value to be (x - mu) < 3 sigma - probability = (np.clip(upperedges, u_min, u_max) - np.clip(loweredges, u_min, u_max)) / (u_max - u_min) + probability = ( + np.clip(upperedges, u_min, u_max) - np.clip(loweredges, u_min, u_max) + ) / (u_max - u_min) variance = probability * (1 - probability) nzprob = probability[nzbins] nzhist = histogram[nzbins] @@ -335,6 +407,7 @@ def check_validity_uniform(bins, histogram, u_min, u_max, Ntrials=1000): assert np.all(normalizedvariable < 3 * samplesigma) + # Test the distribution at every time step # (this assumes that no interaction is happening) for timestep in range(len(h8x)): @@ -342,32 +415,41 @@ def check_validity_uniform(bins, histogram, u_min, u_max, Ntrials=1000): check_validity_uniform(bin_value_y, h8y[timestep] / N0, uy_min, uy_max) check_validity_uniform(bin_value_z, h8z[timestep] / N0, uz_min, uz_max) -#================================================= +# ================================================= # Gaussian with parser mean and standard deviation -#================================================= +# ================================================= # load data bin_value_ux, bin_data_ux = read_reduced_diags_histogram("h9x.txt")[2:] bin_value_uy, bin_data_uy = read_reduced_diags_histogram("h9y.txt")[2:] bin_value_uz, bin_data_uz = read_reduced_diags_histogram("h9z.txt")[2:] -def Gaussian(mean, sigma, u): - V = 8.0 # volume in m^3 - n = 1.0e21 # number density in 1/m^3 - return (n*V/(sigma*np.sqrt(2.*np.pi)))*np.exp(-(u - mean)**2/(2.*sigma**2)) - -du = 2./50 -f_ux = Gaussian(0.1 , 0.2 , bin_value_ux)*du -f_uy = Gaussian(0.12, 0.21, bin_value_uy)*du -f_uz = Gaussian(0.14, 0.22, bin_value_uz)*du -f9_error = np.sum(np.abs(f_ux - bin_data_ux)/f_ux.max() - +np.abs(f_uy - bin_data_uy)/f_ux.max() - +np.abs(f_uz - bin_data_uz)/f_uz.max()) / bin_value_ux.size - -print('gaussian_parse_momentum_function velocity difference:', f9_error) - -assert(f9_error < tolerance) +def Gaussian(mean, sigma, u): + V = 8.0 # volume in m^3 + n = 1.0e21 # number density in 1/m^3 + return (n * V / (sigma * np.sqrt(2.0 * np.pi))) * np.exp( + -((u - mean) ** 2) / (2.0 * sigma**2) + ) + + +du = 2.0 / 50 +f_ux = Gaussian(0.1, 0.2, bin_value_ux) * du +f_uy = Gaussian(0.12, 0.21, bin_value_uy) * du +f_uz = Gaussian(0.14, 0.22, bin_value_uz) * du + +f9_error = ( + np.sum( + np.abs(f_ux - bin_data_ux) / f_ux.max() + + np.abs(f_uy - bin_data_uy) / f_ux.max() + + np.abs(f_uz - bin_data_uz) / f_uz.max() + ) + / bin_value_ux.size +) + +print("gaussian_parse_momentum_function velocity difference:", f9_error) + +assert f9_error < tolerance test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/initial_plasma_profile/analysis.py b/Examples/Tests/initial_plasma_profile/analysis.py index b8cfaad1048..f5fc75ee578 100755 --- a/Examples/Tests/initial_plasma_profile/analysis.py +++ b/Examples/Tests/initial_plasma_profile/analysis.py @@ -13,7 +13,7 @@ yt.funcs.mylog.setLevel(50) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Name of the plotfile diff --git a/Examples/Tests/ion_stopping/analysis_ion_stopping.py b/Examples/Tests/ion_stopping/analysis_ion_stopping.py index d7774c14d6b..f1ad3bc8b2b 100755 --- a/Examples/Tests/ion_stopping/analysis_ion_stopping.py +++ b/Examples/Tests/ion_stopping/analysis_ion_stopping.py @@ -19,7 +19,7 @@ import yt from scipy.constants import e, epsilon_0, k, m_e, m_p -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Define constants using the WarpX names for the evals below @@ -27,16 +27,17 @@ kb = k # Tolerance on the error in the final energy (in eV) -tolerance = 1.e-7 +tolerance = 1.0e-7 last_filename = sys.argv[1] # Remove trailing '/' from file name, if necessary -last_filename.rstrip('/') +last_filename.rstrip("/") # Find last iteration in file name, such as 'test_name_plt000001' (last_it = '000001') -last_it = re.search('\d+$', last_filename).group() +last_it = re.search("\d+$", last_filename).group() # Find output prefix in file name, such as 'test_name_plt000001' (prefix = 'test_name_plt') -prefix = last_filename[:-len(last_it)] +prefix = last_filename[: -len(last_it)] + def stopping_from_electrons(ne, Te, Zb, ion_mass): """Calculate the coefficient in equation 14.13 from @@ -46,14 +47,23 @@ def stopping_from_electrons(ne, Te, Zb, ion_mass): Zb: ion charge state ion_mass: (kg) """ - vthe = np.sqrt(3.*Te*e/m_e) - wpe = np.sqrt(ne*e**2/(epsilon_0*m_e)) - lambdadb = vthe/wpe - loglambda = np.log((12.*np.pi/Zb)*(ne*lambdadb**3)) + vthe = np.sqrt(3.0 * Te * e / m_e) + wpe = np.sqrt(ne * e**2 / (epsilon_0 * m_e)) + lambdadb = vthe / wpe + loglambda = np.log((12.0 * np.pi / Zb) * (ne * lambdadb**3)) # Goldston's equation 14.13 - dEdt = - np.sqrt(2.)*ne*Zb**2*e**4*np.sqrt(m_e)*loglambda/(6.*np.pi**1.5*epsilon_0**2*ion_mass*(Te*e)**1.5) + dEdt = ( + -np.sqrt(2.0) + * ne + * Zb**2 + * e**4 + * np.sqrt(m_e) + * loglambda + / (6.0 * np.pi**1.5 * epsilon_0**2 * ion_mass * (Te * e) ** 1.5) + ) return dEdt + def stopping_from_ions(dt, ni, Ti, mi, Zi, Zb, ion_mass, ion_energy): """ ni: background ion density @@ -64,69 +74,83 @@ def stopping_from_ions(dt, ni, Ti, mi, Zi, Zb, ion_mass, ion_energy): ion_mass: (kg) ion_energy: (eV) """ - vthi = np.sqrt(3.*Ti*e/mi) - wpi = np.sqrt(ni*e**2/(epsilon_0*mi)) - lambdadb = vthi/wpi - loglambda = np.log((12.*np.pi/Zb)*(ni*lambdadb**3)) - alpha = np.sqrt(2.)*ni*Zi**2*Zb**2*e**4*np.sqrt(ion_mass)*loglambda/(8.*np.pi*epsilon_0**2*mi) - f1 = np.clip((ion_energy*e)**(3./2.) - 3./2.*alpha*dt, 0., None) - ion_energy = (f1)**(2./3.)/e + vthi = np.sqrt(3.0 * Ti * e / mi) + wpi = np.sqrt(ni * e**2 / (epsilon_0 * mi)) + lambdadb = vthi / wpi + loglambda = np.log((12.0 * np.pi / Zb) * (ni * lambdadb**3)) + alpha = ( + np.sqrt(2.0) + * ni + * Zi**2 + * Zb**2 + * e**4 + * np.sqrt(ion_mass) + * loglambda + / (8.0 * np.pi * epsilon_0**2 * mi) + ) + f1 = np.clip((ion_energy * e) ** (3.0 / 2.0) - 3.0 / 2.0 * alpha * dt, 0.0, None) + ion_energy = (f1) ** (2.0 / 3.0) / e return ion_energy + # Fetch background parameters and initial particle data ds0 = yt.load(f'{prefix}{len(last_it)*"0"}') ad0 = ds0.all_data() -Zb = 1. # Ion charge state +Zb = 1.0 # Ion charge state -ne = float(ds0.parameters['stopping_on_electrons_constant.background_density']) -Te = eval(ds0.parameters['stopping_on_electrons_constant.background_temperature'])*kb/e +ne = float(ds0.parameters["stopping_on_electrons_constant.background_density"]) +Te = ( + eval(ds0.parameters["stopping_on_electrons_constant.background_temperature"]) + * kb + / e +) ion_mass12 = m_p -mi = eval(ds0.parameters['stopping_on_ions_constant.background_mass']) -Zi = float(ds0.parameters['stopping_on_ions_constant.background_charge_state']) -ni = float(ds0.parameters['stopping_on_ions_constant.background_density']) -Ti = eval(ds0.parameters['stopping_on_ions_constant.background_temperature'])*kb/e -ion_mass34 = 4.*m_p +mi = eval(ds0.parameters["stopping_on_ions_constant.background_mass"]) +Zi = float(ds0.parameters["stopping_on_ions_constant.background_charge_state"]) +ni = float(ds0.parameters["stopping_on_ions_constant.background_density"]) +Ti = eval(ds0.parameters["stopping_on_ions_constant.background_temperature"]) * kb / e +ion_mass34 = 4.0 * m_p # For ions1, the background parameters are constants -vx = ad0[('ions1', 'particle_momentum_x')].to_ndarray()/ion_mass12 -vy = ad0[('ions1', 'particle_momentum_y')].to_ndarray()/ion_mass12 -vz = ad0[('ions1', 'particle_momentum_z')].to_ndarray()/ion_mass12 -EE1 = 0.5*ion_mass12*(vx**2 + vy**2 + vz**2)/e +vx = ad0[("ions1", "particle_momentum_x")].to_ndarray() / ion_mass12 +vy = ad0[("ions1", "particle_momentum_y")].to_ndarray() / ion_mass12 +vz = ad0[("ions1", "particle_momentum_z")].to_ndarray() / ion_mass12 +EE1 = 0.5 * ion_mass12 * (vx**2 + vy**2 + vz**2) / e # For ions2, the background parameters are parsed -xx = ad0[('ions2', 'particle_position_x')].to_ndarray()/ion_mass12 -yy = ad0[('ions2', 'particle_position_y')].to_ndarray()/ion_mass12 -ne2 = np.where(xx > 0., 1.e20, 1.e21) -Te2 = np.where(yy > 0., 1., 2.) +xx = ad0[("ions2", "particle_position_x")].to_ndarray() / ion_mass12 +yy = ad0[("ions2", "particle_position_y")].to_ndarray() / ion_mass12 +ne2 = np.where(xx > 0.0, 1.0e20, 1.0e21) +Te2 = np.where(yy > 0.0, 1.0, 2.0) -vx = ad0[('ions2', 'particle_momentum_x')].to_ndarray()/ion_mass12 -vy = ad0[('ions2', 'particle_momentum_y')].to_ndarray()/ion_mass12 -vz = ad0[('ions2', 'particle_momentum_z')].to_ndarray()/ion_mass12 -EE2 = 0.5*ion_mass12*(vx**2 + vy**2 + vz**2)/e +vx = ad0[("ions2", "particle_momentum_x")].to_ndarray() / ion_mass12 +vy = ad0[("ions2", "particle_momentum_y")].to_ndarray() / ion_mass12 +vz = ad0[("ions2", "particle_momentum_z")].to_ndarray() / ion_mass12 +EE2 = 0.5 * ion_mass12 * (vx**2 + vy**2 + vz**2) / e # For ions3, the background parameters are constants -vx = ad0[('ions3', 'particle_momentum_x')].to_ndarray()/ion_mass34 -vy = ad0[('ions3', 'particle_momentum_y')].to_ndarray()/ion_mass34 -vz = ad0[('ions3', 'particle_momentum_z')].to_ndarray()/ion_mass34 -EE3 = 0.5*ion_mass34*(vx**2 + vy**2 + vz**2)/e +vx = ad0[("ions3", "particle_momentum_x")].to_ndarray() / ion_mass34 +vy = ad0[("ions3", "particle_momentum_y")].to_ndarray() / ion_mass34 +vz = ad0[("ions3", "particle_momentum_z")].to_ndarray() / ion_mass34 +EE3 = 0.5 * ion_mass34 * (vx**2 + vy**2 + vz**2) / e # For ions4, the background parameters are parsed -xx = ad0[('ions4', 'particle_position_x')].to_ndarray()/ion_mass34 -yy = ad0[('ions4', 'particle_position_y')].to_ndarray()/ion_mass34 -ni4 = np.where(xx > 0., 1.e20, 1.e21) -Ti4 = np.where(yy > 0., 0.05, 0.10) +xx = ad0[("ions4", "particle_position_x")].to_ndarray() / ion_mass34 +yy = ad0[("ions4", "particle_position_y")].to_ndarray() / ion_mass34 +ni4 = np.where(xx > 0.0, 1.0e20, 1.0e21) +Ti4 = np.where(yy > 0.0, 0.05, 0.10) -vx = ad0[('ions4', 'particle_momentum_x')].to_ndarray()/ion_mass34 -vy = ad0[('ions4', 'particle_momentum_y')].to_ndarray()/ion_mass34 -vz = ad0[('ions4', 'particle_momentum_z')].to_ndarray()/ion_mass34 -EE4 = 0.5*ion_mass34*(vx**2 + vy**2 + vz**2)/e +vx = ad0[("ions4", "particle_momentum_x")].to_ndarray() / ion_mass34 +vy = ad0[("ions4", "particle_momentum_y")].to_ndarray() / ion_mass34 +vz = ad0[("ions4", "particle_momentum_z")].to_ndarray() / ion_mass34 +EE4 = 0.5 * ion_mass34 * (vx**2 + vy**2 + vz**2) / e ds = yt.load(last_filename) ad = ds.all_data() -dt = ds.current_time.to_value()/int(last_it) +dt = ds.current_time.to_value() / int(last_it) # Step through the same number of time steps a_EE1 = EE1 @@ -135,42 +159,42 @@ def stopping_from_ions(dt, ni, Ti, mi, Zi, Zb, ion_mass, ion_energy): a_EE4 = EE4 for it in range(int(last_it)): dEdt1 = stopping_from_electrons(ne, Te, Zb, ion_mass12) - a_EE1 *= np.exp(dEdt1*dt) + a_EE1 *= np.exp(dEdt1 * dt) dEdt2 = stopping_from_electrons(ne2, Te2, Zb, ion_mass12) - a_EE2 *= np.exp(dEdt2*dt) + a_EE2 *= np.exp(dEdt2 * dt) a_EE3 = stopping_from_ions(dt, ni, Ti, mi, Zi, Zb, ion_mass34, a_EE3) a_EE4 = stopping_from_ions(dt, ni4, Ti4, mi, Zi, Zb, ion_mass34, a_EE4) # Fetch the final particle data -vx = ad[('ions1', 'particle_momentum_x')].to_ndarray()/ion_mass12 -vy = ad[('ions1', 'particle_momentum_y')].to_ndarray()/ion_mass12 -vz = ad[('ions1', 'particle_momentum_z')].to_ndarray()/ion_mass12 -EE1 = 0.5*ion_mass12*(vx**2 + vy**2 + vz**2)/e - -vx = ad[('ions2', 'particle_momentum_x')].to_ndarray()/ion_mass12 -vy = ad[('ions2', 'particle_momentum_y')].to_ndarray()/ion_mass12 -vz = ad[('ions2', 'particle_momentum_z')].to_ndarray()/ion_mass12 -EE2 = 0.5*ion_mass12*(vx**2 + vy**2 + vz**2)/e - -vx = ad[('ions3', 'particle_momentum_x')].to_ndarray()/ion_mass34 -vy = ad[('ions3', 'particle_momentum_y')].to_ndarray()/ion_mass34 -vz = ad[('ions3', 'particle_momentum_z')].to_ndarray()/ion_mass34 -EE3 = 0.5*ion_mass34*(vx**2 + vy**2 + vz**2)/e - -vx = ad[('ions4', 'particle_momentum_x')].to_ndarray()/ion_mass34 -vy = ad[('ions4', 'particle_momentum_y')].to_ndarray()/ion_mass34 -vz = ad[('ions4', 'particle_momentum_z')].to_ndarray()/ion_mass34 -EE4 = 0.5*ion_mass34*(vx**2 + vy**2 + vz**2)/e +vx = ad[("ions1", "particle_momentum_x")].to_ndarray() / ion_mass12 +vy = ad[("ions1", "particle_momentum_y")].to_ndarray() / ion_mass12 +vz = ad[("ions1", "particle_momentum_z")].to_ndarray() / ion_mass12 +EE1 = 0.5 * ion_mass12 * (vx**2 + vy**2 + vz**2) / e + +vx = ad[("ions2", "particle_momentum_x")].to_ndarray() / ion_mass12 +vy = ad[("ions2", "particle_momentum_y")].to_ndarray() / ion_mass12 +vz = ad[("ions2", "particle_momentum_z")].to_ndarray() / ion_mass12 +EE2 = 0.5 * ion_mass12 * (vx**2 + vy**2 + vz**2) / e + +vx = ad[("ions3", "particle_momentum_x")].to_ndarray() / ion_mass34 +vy = ad[("ions3", "particle_momentum_y")].to_ndarray() / ion_mass34 +vz = ad[("ions3", "particle_momentum_z")].to_ndarray() / ion_mass34 +EE3 = 0.5 * ion_mass34 * (vx**2 + vy**2 + vz**2) / e + +vx = ad[("ions4", "particle_momentum_x")].to_ndarray() / ion_mass34 +vy = ad[("ions4", "particle_momentum_y")].to_ndarray() / ion_mass34 +vz = ad[("ions4", "particle_momentum_z")].to_ndarray() / ion_mass34 +EE4 = 0.5 * ion_mass34 * (vx**2 + vy**2 + vz**2) / e error1 = np.abs(EE1 - a_EE1) error2 = np.abs(EE2 - a_EE2) error3 = np.abs(EE3 - a_EE3) error4 = np.abs(EE4 - a_EE4) -print('stopping on electrons error with constant = ', error1) -print('stopping on electrons error with parsed = ', error2) -print('stopping on ions error with constant = ', error3) -print('stopping on ions error with parsed = ', error4) -print('tolerance = ', tolerance) +print("stopping on electrons error with constant = ", error1) +print("stopping on electrons error with parsed = ", error2) +print("stopping on ions error with constant = ", error3) +print("stopping on ions error with parsed = ", error4) +print("tolerance = ", tolerance) assert np.all(error1 < tolerance) assert np.all(error2 < tolerance) diff --git a/Examples/Tests/ionization/PICMI_inputs_2d.py b/Examples/Tests/ionization/PICMI_inputs_2d.py index 802bf5435ac..00db8c83ad1 100644 --- a/Examples/Tests/ionization/PICMI_inputs_2d.py +++ b/Examples/Tests/ionization/PICMI_inputs_2d.py @@ -14,9 +14,9 @@ # Physical domain xmin = -5e-06 -xmax = 5e-06 -zmin = 0e-06 -zmax = 20e-06 +xmax = 5e-06 +zmin = 0e-06 +zmax = 20e-06 # Domain decomposition max_grid_size = 64 @@ -24,13 +24,14 @@ # Create grid grid = picmi.Cartesian2DGrid( - number_of_cells = [nx, nz], - lower_bound = [xmin, zmin], - upper_bound = [xmax, zmax], - lower_boundary_conditions = ['periodic', 'open'], - upper_boundary_conditions = ['periodic', 'open'], - warpx_max_grid_size = max_grid_size, - warpx_blocking_factor = blocking_factor) + number_of_cells=[nx, nz], + lower_bound=[xmin, zmin], + upper_bound=[xmax, zmax], + lower_boundary_conditions=["periodic", "open"], + upper_boundary_conditions=["periodic", "open"], + warpx_max_grid_size=max_grid_size, + warpx_blocking_factor=blocking_factor, +) # Particles: electrons and ions ions_density = 1 @@ -41,95 +42,95 @@ ions_ymax = None ions_zmax = 15e-06 uniform_distribution = picmi.UniformDistribution( - density = ions_density, - lower_bound = [ions_xmin, ions_ymin, ions_zmin], - upper_bound = [ions_xmax, ions_ymax, ions_zmax], - fill_in = True) + density=ions_density, + lower_bound=[ions_xmin, ions_ymin, ions_zmin], + upper_bound=[ions_xmax, ions_ymax, ions_zmax], + fill_in=True, +) electrons = picmi.Species( - particle_type = 'electron', - name = 'electrons', - warpx_add_real_attributes = {'orig_z': 'z'}) + particle_type="electron", + name="electrons", + warpx_add_real_attributes={"orig_z": "z"}, +) ions = picmi.Species( - particle_type = 'N', - name = 'ions', - charge_state = 2, - initial_distribution = uniform_distribution, - warpx_add_real_attributes = {'orig_z': 'z'}) + particle_type="N", + name="ions", + charge_state=2, + initial_distribution=uniform_distribution, + warpx_add_real_attributes={"orig_z": "z"}, +) # Field ionization nitrogen_ionization = picmi.FieldIonization( - model = "ADK", # Ammosov-Delone-Krainov model - ionized_species = ions, - product_species = electrons) + model="ADK", # Ammosov-Delone-Krainov model + ionized_species=ions, + product_species=electrons, +) # Laser position_z = 3e-06 -profile_t_peak = 60.e-15 +profile_t_peak = 60.0e-15 laser = picmi.GaussianLaser( - wavelength = 0.8e-06, - waist = 1e10, - duration = 26.685e-15, - focal_position = [0, 0, position_z], - centroid_position = [0, 0, position_z - c*profile_t_peak], - propagation_direction = [0, 0, 1], - polarization_direction = [1, 0, 0], - a0 = 1.8, - fill_in = False) + wavelength=0.8e-06, + waist=1e10, + duration=26.685e-15, + focal_position=[0, 0, position_z], + centroid_position=[0, 0, position_z - c * profile_t_peak], + propagation_direction=[0, 0, 1], + polarization_direction=[1, 0, 0], + a0=1.8, + fill_in=False, +) laser_antenna = picmi.LaserAntenna( - position = [0., 0., position_z], - normal_vector = [0, 0, 1]) + position=[0.0, 0.0, position_z], normal_vector=[0, 0, 1] +) # Electromagnetic solver -solver = picmi.ElectromagneticSolver( - grid = grid, - method = 'CKC', - cfl = 0.999) +solver = picmi.ElectromagneticSolver(grid=grid, method="CKC", cfl=0.999) # Diagnostics particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 10000, - species = [electrons, ions], - data_list = ['ux', 'uy', 'uz', 'x', 'z', 'weighting', 'orig_z'], - write_dir = '.', - warpx_file_prefix = 'Python_ionization_plt') + name="diag1", + period=10000, + species=[electrons, ions], + data_list=["ux", "uy", "uz", "x", "z", "weighting", "orig_z"], + write_dir=".", + warpx_file_prefix="Python_ionization_plt", +) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = 10000, - data_list = ['Bx', 'By', 'Bz', 'Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz'], - write_dir = '.', - warpx_file_prefix = 'Python_ionization_plt') + name="diag1", + grid=grid, + period=10000, + data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], + write_dir=".", + warpx_file_prefix="Python_ionization_plt", +) # Set up simulation sim = picmi.Simulation( - solver = solver, - max_steps = max_steps, - particle_shape = 'linear', - warpx_use_filter = 0) + solver=solver, max_steps=max_steps, particle_shape="linear", warpx_use_filter=0 +) # Add electrons and ions sim.add_species( - electrons, - layout = picmi.GriddedLayout(grid = grid, n_macroparticle_per_cell = [0, 0, 0])) + electrons, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[0, 0, 0]) +) sim.add_species( - ions, - layout = picmi.GriddedLayout(grid = grid, n_macroparticle_per_cell = [2, 1, 1])) + ions, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=[2, 1, 1]) +) # Add field ionization sim.add_interaction(nitrogen_ionization) # Add laser -sim.add_laser( - laser, - injection_method = laser_antenna) +sim.add_laser(laser, injection_method=laser_antenna) # Add diagnostics sim.add_diagnostic(particle_diag) sim.add_diagnostic(field_diag) # Write input file that can be used to run with the compiled version -sim.write_input_file(file_name = 'inputs_2d_picmi') +sim.write_input_file(file_name="inputs_2d_picmi") # Initialize inputs and WarpX instance sim.initialize_inputs() diff --git a/Examples/Tests/ionization/analysis_ionization.py b/Examples/Tests/ionization/analysis_ionization.py index 90657915b50..62d3f839941 100755 --- a/Examples/Tests/ionization/analysis_ionization.py +++ b/Examples/Tests/ionization/analysis_ionization.py @@ -25,17 +25,17 @@ import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Open plotfile specified in command line, and get ion's ionization level. filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) ad = ds.all_data() -ilev = ad['ions', 'particle_ionizationLevel'].v +ilev = ad["ions", "particle_ionizationLevel"].v # Fraction of Nitrogen ions that are N5+. -N5_fraction = ilev[ilev == 5].size/float(ilev.size) +N5_fraction = ilev[ilev == 5].size / float(ilev.size) print("Number of ions: " + str(ilev.size)) print("Number of N5+ : " + str(ilev[ilev == 5].size)) @@ -44,63 +44,68 @@ do_plot = False if do_plot: import matplotlib.pyplot as plt - all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, - dims=ds.domain_dimensions) - F = all_data_level_0['boxlib', 'Ex'].v.squeeze() - extent = [ ds.domain_left_edge[1], ds.domain_right_edge[1], - ds.domain_left_edge[0], ds.domain_right_edge[0] ] + + all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) + F = all_data_level_0["boxlib", "Ex"].v.squeeze() + extent = [ + ds.domain_left_edge[1], + ds.domain_right_edge[1], + ds.domain_left_edge[0], + ds.domain_right_edge[0], + ] ad = ds.all_data() # Plot ions with ionization levels - species = 'ions' - xi = ad[species, 'particle_position_x'].v - zi = ad[species, 'particle_position_y'].v - ii = ad[species, 'particle_ionizationLevel'].v - plt.figure(figsize=(10,10)) + species = "ions" + xi = ad[species, "particle_position_x"].v + zi = ad[species, "particle_position_y"].v + ii = ad[species, "particle_ionizationLevel"].v + plt.figure(figsize=(10, 10)) plt.subplot(211) - plt.imshow(np.abs(F), extent=extent, aspect='auto', - cmap='magma', origin='default') + plt.imshow(np.abs(F), extent=extent, aspect="auto", cmap="magma", origin="default") plt.colorbar() - for lev in range(int(np.max(ii)+1)): - select = (ii == lev) - plt.scatter(zi[select],xi[select],s=.2, - label='ionization level: ' + str(lev)) + for lev in range(int(np.max(ii) + 1)): + select = ii == lev + plt.scatter( + zi[select], xi[select], s=0.2, label="ionization level: " + str(lev) + ) plt.legend() plt.title("abs(Ex) (V/m) and ions") plt.xlabel("z (m)") plt.ylabel("x (m)") plt.subplot(212) - plt.imshow(np.abs(F), extent=extent, aspect='auto', - cmap='magma', origin='default') + plt.imshow(np.abs(F), extent=extent, aspect="auto", cmap="magma", origin="default") plt.colorbar() # Plot electrons - species = 'electrons' + species = "electrons" if species in [x[0] for x in ds.field_list]: - xe = ad[species, 'particle_position_x'].v - ze = ad[species, 'particle_position_y'].v - plt.scatter(ze,xe,s=.1,c='r',label='electrons') + xe = ad[species, "particle_position_x"].v + ze = ad[species, "particle_position_y"].v + plt.scatter(ze, xe, s=0.1, c="r", label="electrons") plt.title("abs(Ex) (V/m) and electrons") plt.xlabel("z (m)") plt.ylabel("x (m)") - plt.savefig("image_ionization.pdf", bbox_inches='tight') + plt.savefig("image_ionization.pdf", bbox_inches="tight") -error_rel = abs(N5_fraction-0.32) / 0.32 +error_rel = abs(N5_fraction - 0.32) / 0.32 tolerance_rel = 0.07 print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel # Check that the user runtime component (if it exists) worked as expected try: - orig_z = ad['electrons', 'particle_orig_z'].v + orig_z = ad["electrons", "particle_orig_z"].v print(f"orig_z: min = {np.min(orig_z)}, max = {np.max(orig_z)}") - assert np.all( (orig_z > 0.0) & (orig_z < 1.5e-5) ) - print('particle_orig_z has reasonable values') + assert np.all((orig_z > 0.0) & (orig_z < 1.5e-5)) + print("particle_orig_z has reasonable values") except yt.utilities.exceptions.YTFieldNotFound: - pass # The backtransformed diagnostic version of the test does not have orig_z + pass # The backtransformed diagnostic version of the test does not have orig_z test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/langmuir/PICMI_inputs_2d.py b/Examples/Tests/langmuir/PICMI_inputs_2d.py index 4b9c3ac300f..11020ac34fb 100755 --- a/Examples/Tests/langmuir/PICMI_inputs_2d.py +++ b/Examples/Tests/langmuir/PICMI_inputs_2d.py @@ -11,9 +11,9 @@ # physics parameters ########################## -plasma_density = 1.e25 -plasma_xmin = 0. -plasma_x_velocity = 0.1*constants.c +plasma_density = 1.0e25 +plasma_xmin = 0.0 +plasma_x_velocity = 0.1 * constants.c ########################## # numerics parameters @@ -27,65 +27,81 @@ nx = 64 ny = 64 -xmin = -20.e-6 -ymin = -20.e-6 -xmax = +20.e-6 -ymax = +20.e-6 +xmin = -20.0e-6 +ymin = -20.0e-6 +xmax = +20.0e-6 +ymax = +20.0e-6 -number_per_cell_each_dim = [2,2] +number_per_cell_each_dim = [2, 2] ########################## # physics components ########################## -uniform_plasma = picmi.UniformDistribution(density = 1.e25, - upper_bound = [0., None, None], - directed_velocity = [0.1*constants.c, 0., 0.]) +uniform_plasma = picmi.UniformDistribution( + density=1.0e25, + upper_bound=[0.0, None, None], + directed_velocity=[0.1 * constants.c, 0.0, 0.0], +) -electrons = picmi.Species(particle_type='electron', name='electrons', initial_distribution=uniform_plasma) +electrons = picmi.Species( + particle_type="electron", name="electrons", initial_distribution=uniform_plasma +) ########################## # numerics components ########################## -grid = picmi.Cartesian2DGrid(number_of_cells = [nx, ny], - lower_bound = [xmin, ymin], - upper_bound = [xmax, ymax], - lower_boundary_conditions = ['periodic', 'periodic'], - upper_boundary_conditions = ['periodic', 'periodic'], - moving_window_velocity = [0., 0., 0.], - warpx_max_grid_size = 32) +grid = picmi.Cartesian2DGrid( + number_of_cells=[nx, ny], + lower_bound=[xmin, ymin], + upper_bound=[xmax, ymax], + lower_boundary_conditions=["periodic", "periodic"], + upper_boundary_conditions=["periodic", "periodic"], + moving_window_velocity=[0.0, 0.0, 0.0], + warpx_max_grid_size=32, +) -solver = picmi.ElectromagneticSolver(grid=grid, cfl=1.) +solver = picmi.ElectromagneticSolver(grid=grid, cfl=1.0) ########################## # diagnostics ########################## -field_diag1 = picmi.FieldDiagnostic(name = 'diag1', - grid = grid, - period = diagnostic_intervals, - data_list = ['Ex', 'Jx'], - write_dir = '.', - warpx_file_prefix = 'Python_Langmuir_2d_plt') - -part_diag1 = picmi.ParticleDiagnostic(name = 'diag1', - period = diagnostic_intervals, - species = [electrons], - data_list = ['weighting', 'ux']) +field_diag1 = picmi.FieldDiagnostic( + name="diag1", + grid=grid, + period=diagnostic_intervals, + data_list=["Ex", "Jx"], + write_dir=".", + warpx_file_prefix="Python_Langmuir_2d_plt", +) + +part_diag1 = picmi.ParticleDiagnostic( + name="diag1", + period=diagnostic_intervals, + species=[electrons], + data_list=["weighting", "ux"], +) ########################## # simulation setup ########################## -sim = picmi.Simulation(solver = solver, - max_steps = max_steps, - verbose = 1, - warpx_current_deposition_algo = 'direct', - warpx_use_filter = 0) - -sim.add_species(electrons, - layout = picmi.GriddedLayout(n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid)) +sim = picmi.Simulation( + solver=solver, + max_steps=max_steps, + verbose=1, + warpx_current_deposition_algo="direct", + warpx_use_filter=0, +) + +sim.add_species( + electrons, + layout=picmi.GriddedLayout( + n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid + ), +) sim.add_diagnostic(field_diag1) sim.add_diagnostic(part_diag1) @@ -96,7 +112,7 @@ # write_inputs will create an inputs file that can be used to run # with the compiled version. -sim.write_input_file(file_name = 'inputs2d_from_PICMI') +sim.write_input_file(file_name="inputs2d_from_PICMI") # Alternatively, sim.step will run WarpX, controlling it from Python sim.step() diff --git a/Examples/Tests/langmuir/PICMI_inputs_3d.py b/Examples/Tests/langmuir/PICMI_inputs_3d.py index 180180f5f45..e5cef203b7e 100755 --- a/Examples/Tests/langmuir/PICMI_inputs_3d.py +++ b/Examples/Tests/langmuir/PICMI_inputs_3d.py @@ -10,9 +10,9 @@ # physics parameters ########################## -plasma_density = 1.e25 -plasma_xmin = 0. -plasma_x_velocity = 0.1*constants.c +plasma_density = 1.0e25 +plasma_xmin = 0.0 +plasma_x_velocity = 0.1 * constants.c ########################## # numerics parameters @@ -27,66 +27,82 @@ ny = 64 nz = 64 -xmin = -20.e-6 -ymin = -20.e-6 -zmin = -20.e-6 -xmax = +20.e-6 -ymax = +20.e-6 -zmax = +20.e-6 +xmin = -20.0e-6 +ymin = -20.0e-6 +zmin = -20.0e-6 +xmax = +20.0e-6 +ymax = +20.0e-6 +zmax = +20.0e-6 -number_per_cell_each_dim = [2,2,2] +number_per_cell_each_dim = [2, 2, 2] ########################## # physics components ########################## -uniform_plasma = picmi.UniformDistribution(density = 1.e25, - upper_bound = [0., None, None], - directed_velocity = [0.1*constants.c, 0., 0.]) +uniform_plasma = picmi.UniformDistribution( + density=1.0e25, + upper_bound=[0.0, None, None], + directed_velocity=[0.1 * constants.c, 0.0, 0.0], +) -electrons = picmi.Species(particle_type='electron', name='electrons', initial_distribution=uniform_plasma) +electrons = picmi.Species( + particle_type="electron", name="electrons", initial_distribution=uniform_plasma +) ########################## # numerics components ########################## -grid = picmi.Cartesian3DGrid(number_of_cells = [nx, ny, nz], - lower_bound = [xmin, ymin, zmin], - upper_bound = [xmax, ymax, zmax], - lower_boundary_conditions = ['periodic', 'periodic', 'periodic'], - upper_boundary_conditions = ['periodic', 'periodic', 'periodic'], - moving_window_velocity = [0., 0., 0.], - warpx_max_grid_size = 32) +grid = picmi.Cartesian3DGrid( + number_of_cells=[nx, ny, nz], + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["periodic", "periodic", "periodic"], + upper_boundary_conditions=["periodic", "periodic", "periodic"], + moving_window_velocity=[0.0, 0.0, 0.0], + warpx_max_grid_size=32, +) -solver = picmi.ElectromagneticSolver(grid=grid, cfl=1.) +solver = picmi.ElectromagneticSolver(grid=grid, cfl=1.0) ########################## # diagnostics ########################## -field_diag1 = picmi.FieldDiagnostic(name = 'diag1', - grid = grid, - period = diagnostic_interval, - data_list = ['Ex', 'Jx'], - write_dir = '.', - warpx_file_prefix = 'Python_Langmuir_plt') - -part_diag1 = picmi.ParticleDiagnostic(name = 'diag1', - period = diagnostic_interval, - species = [electrons], - data_list = ['weighting', 'ux']) +field_diag1 = picmi.FieldDiagnostic( + name="diag1", + grid=grid, + period=diagnostic_interval, + data_list=["Ex", "Jx"], + write_dir=".", + warpx_file_prefix="Python_Langmuir_plt", +) + +part_diag1 = picmi.ParticleDiagnostic( + name="diag1", + period=diagnostic_interval, + species=[electrons], + data_list=["weighting", "ux"], +) ########################## # simulation setup ########################## -sim = picmi.Simulation(solver = solver, - max_steps = max_steps, - verbose = 1, - warpx_current_deposition_algo = 'direct') +sim = picmi.Simulation( + solver=solver, + max_steps=max_steps, + verbose=1, + warpx_current_deposition_algo="direct", +) -sim.add_species(electrons, - layout = picmi.GriddedLayout(n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid)) +sim.add_species( + electrons, + layout=picmi.GriddedLayout( + n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid + ), +) sim.add_diagnostic(field_diag1) sim.add_diagnostic(part_diag1) @@ -97,7 +113,7 @@ # write_inputs will create an inputs file that can be used to run # with the compiled version. -#sim.write_input_file(file_name = 'inputs_from_PICMI') +# sim.write_input_file(file_name = 'inputs_from_PICMI') # Alternatively, sim.step will run WarpX, controlling it from Python sim.step() diff --git a/Examples/Tests/langmuir/PICMI_inputs_rz.py b/Examples/Tests/langmuir/PICMI_inputs_rz.py index 8da03b00469..e1becedd62d 100755 --- a/Examples/Tests/langmuir/PICMI_inputs_rz.py +++ b/Examples/Tests/langmuir/PICMI_inputs_rz.py @@ -6,7 +6,7 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np @@ -18,16 +18,16 @@ # physics parameters ########################## -density = 2.e24 -epsilon0 = 0.001*constants.c -epsilon1 = 0.001*constants.c -epsilon2 = 0.001*constants.c -w0 = 5.e-6 +density = 2.0e24 +epsilon0 = 0.001 * constants.c +epsilon1 = 0.001 * constants.c +epsilon2 = 0.001 * constants.c +w0 = 5.0e-6 n_osc_z = 3 # Plasma frequency -wp = np.sqrt((density*constants.q_e**2)/(constants.m_e*constants.ep0)) -kp = wp/constants.c +wp = np.sqrt((density * constants.q_e**2) / (constants.m_e * constants.ep0)) +kp = wp / constants.c ########################## # numerics parameters @@ -36,13 +36,13 @@ nr = 64 nz = 200 -rmin = 0.e0 -zmin = 0.e0 -rmax = +20.e-6 -zmax = +40.e-6 +rmin = 0.0e0 +zmin = 0.0e0 +rmax = +20.0e-6 +zmax = +40.0e-6 # Wave vector of the wave -k0 = 2.*np.pi*n_osc_z/(zmax - zmin) +k0 = 2.0 * np.pi * n_osc_z / (zmax - zmin) diagnostic_intervals = 40 @@ -50,83 +50,106 @@ # physics components ########################## -uniform_plasma = picmi.UniformDistribution(density = density, - upper_bound = [+18e-6, +18e-6, None], - directed_velocity = [0., 0., 0.]) +uniform_plasma = picmi.UniformDistribution( + density=density, + upper_bound=[+18e-6, +18e-6, None], + directed_velocity=[0.0, 0.0, 0.0], +) -momentum_expressions = ["""+ epsilon0/kp*2*x/w0**2*exp(-(x**2+y**2)/w0**2)*sin(k0*z) +momentum_expressions = [ + """+ epsilon0/kp*2*x/w0**2*exp(-(x**2+y**2)/w0**2)*sin(k0*z) - epsilon1/kp*2/w0*exp(-(x**2+y**2)/w0**2)*sin(k0*z) + epsilon1/kp*4*x**2/w0**3*exp(-(x**2+y**2)/w0**2)*sin(k0*z) - epsilon2/kp*8*x/w0**2*exp(-(x**2+y**2)/w0**2)*sin(k0*z) + epsilon2/kp*8*x*(x**2-y**2)/w0**4*exp(-(x**2+y**2)/w0**2)*sin(k0*z)""", - """+ epsilon0/kp*2*y/w0**2*exp(-(x**2+y**2)/w0**2)*sin(k0*z) + """+ epsilon0/kp*2*y/w0**2*exp(-(x**2+y**2)/w0**2)*sin(k0*z) + epsilon1/kp*4*x*y/w0**3*exp(-(x**2+y**2)/w0**2)*sin(k0*z) + epsilon2/kp*8*y/w0**2*exp(-(x**2+y**2)/w0**2)*sin(k0*z) + epsilon2/kp*8*y*(x**2-y**2)/w0**4*exp(-(x**2+y**2)/w0**2)*sin(k0*z)""", - """- epsilon0/kp*k0*exp(-(x**2+y**2)/w0**2)*cos(k0*z) + """- epsilon0/kp*k0*exp(-(x**2+y**2)/w0**2)*cos(k0*z) - epsilon1/kp*k0*2*x/w0*exp(-(x**2+y**2)/w0**2)*cos(k0*z) - - epsilon2/kp*k0*4*(x**2-y**2)/w0**2*exp(-(x**2+y**2)/w0**2)*cos(k0*z)"""] - -analytic_plasma = picmi.AnalyticDistribution(density_expression = density, - upper_bound = [+18e-6, +18e-6, None], - epsilon0 = epsilon0, - epsilon1 = epsilon1, - epsilon2 = epsilon2, - kp = kp, - k0 = k0, - w0 = w0, - momentum_expressions = momentum_expressions) - -electrons = picmi.Species(particle_type='electron', name='electrons', initial_distribution=analytic_plasma) -protons = picmi.Species(particle_type='proton', name='protons', initial_distribution=uniform_plasma) + - epsilon2/kp*k0*4*(x**2-y**2)/w0**2*exp(-(x**2+y**2)/w0**2)*cos(k0*z)""", +] + +analytic_plasma = picmi.AnalyticDistribution( + density_expression=density, + upper_bound=[+18e-6, +18e-6, None], + epsilon0=epsilon0, + epsilon1=epsilon1, + epsilon2=epsilon2, + kp=kp, + k0=k0, + w0=w0, + momentum_expressions=momentum_expressions, +) + +electrons = picmi.Species( + particle_type="electron", name="electrons", initial_distribution=analytic_plasma +) +protons = picmi.Species( + particle_type="proton", name="protons", initial_distribution=uniform_plasma +) ########################## # numerics components ########################## -grid = picmi.CylindricalGrid(number_of_cells = [nr, nz], - n_azimuthal_modes = 3, - lower_bound = [rmin, zmin], - upper_bound = [rmax, zmax], - lower_boundary_conditions = ['none', 'periodic'], - upper_boundary_conditions = ['none', 'periodic'], - lower_boundary_conditions_particles = ['none', 'periodic'], - upper_boundary_conditions_particles = ['absorbing', 'periodic'], - moving_window_velocity = [0.,0.], - warpx_max_grid_size=64) - -solver = picmi.ElectromagneticSolver(grid=grid, cfl=1.) +grid = picmi.CylindricalGrid( + number_of_cells=[nr, nz], + n_azimuthal_modes=3, + lower_bound=[rmin, zmin], + upper_bound=[rmax, zmax], + lower_boundary_conditions=["none", "periodic"], + upper_boundary_conditions=["none", "periodic"], + lower_boundary_conditions_particles=["none", "periodic"], + upper_boundary_conditions_particles=["absorbing", "periodic"], + moving_window_velocity=[0.0, 0.0], + warpx_max_grid_size=64, +) + +solver = picmi.ElectromagneticSolver(grid=grid, cfl=1.0) ########################## # diagnostics ########################## -field_diag1 = picmi.FieldDiagnostic(name = 'diag1', - grid = grid, - period = diagnostic_intervals, - data_list = ['Er', 'Ez', 'Bt', 'Jr', 'Jz', 'part_per_cell'], - write_dir = '.', - warpx_file_prefix = 'Python_Langmuir_rz_multimode_plt') - -part_diag1 = picmi.ParticleDiagnostic(name = 'diag1', - period = diagnostic_intervals, - species = [electrons], - data_list = ['weighting', 'momentum']) +field_diag1 = picmi.FieldDiagnostic( + name="diag1", + grid=grid, + period=diagnostic_intervals, + data_list=["Er", "Ez", "Bt", "Jr", "Jz", "part_per_cell"], + write_dir=".", + warpx_file_prefix="Python_Langmuir_rz_multimode_plt", +) + +part_diag1 = picmi.ParticleDiagnostic( + name="diag1", + period=diagnostic_intervals, + species=[electrons], + data_list=["weighting", "momentum"], +) ########################## # simulation setup ########################## -sim = picmi.Simulation(solver = solver, - max_steps = 40, - verbose = 1, - warpx_current_deposition_algo = 'esirkepov', - warpx_field_gathering_algo = 'energy-conserving', - warpx_particle_pusher_algo = 'boris', - warpx_use_filter = 0) - -sim.add_species(electrons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[2,16,2], grid=grid)) -sim.add_species(protons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[2,16,2], grid=grid)) +sim = picmi.Simulation( + solver=solver, + max_steps=40, + verbose=1, + warpx_current_deposition_algo="esirkepov", + warpx_field_gathering_algo="energy-conserving", + warpx_particle_pusher_algo="boris", + warpx_use_filter=0, +) + +sim.add_species( + electrons, + layout=picmi.GriddedLayout(n_macroparticle_per_cell=[2, 16, 2], grid=grid), +) +sim.add_species( + protons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[2, 16, 2], grid=grid) +) sim.add_diagnostic(field_diag1) sim.add_diagnostic(part_diag1) @@ -137,7 +160,7 @@ # write_inputs will create an inputs file that can be used to run # with the compiled version. -#sim.write_input_file(file_name='inputsrz_from_PICMI') +# sim.write_input_file(file_name='inputsrz_from_PICMI') # Alternatively, sim.step will run WarpX, controlling it from Python sim.step() @@ -145,37 +168,105 @@ # Below is WarpX specific code to check the results. -def calcEr( z, r, k0, w0, wp, t, epsilons) : + +def calcEr(z, r, k0, w0, wp, t, epsilons): """ Return the radial electric field as an array of the same length as z and r, in the half-plane theta=0 """ Er_array = ( - epsilons[0] * constants.m_e*constants.c/constants.q_e * 2*r/w0**2 * - np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t ) - - epsilons[1] * constants.m_e*constants.c/constants.q_e * 2/w0 * - np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t ) - + epsilons[1] * constants.m_e*constants.c/constants.q_e * 4*r**2/w0**3 * - np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t ) - - epsilons[2] * constants.m_e*constants.c/constants.q_e * 8*r/w0**2 * - np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t ) - + epsilons[2] * constants.m_e*constants.c/constants.q_e * 8*r**3/w0**4 * - np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t )) - return( Er_array ) - -def calcEz( z, r, k0, w0, wp, t, epsilons) : + epsilons[0] + * constants.m_e + * constants.c + / constants.q_e + * 2 + * r + / w0**2 + * np.exp(-(r**2) / w0**2) + * np.sin(k0 * z) + * np.sin(wp * t) + - epsilons[1] + * constants.m_e + * constants.c + / constants.q_e + * 2 + / w0 + * np.exp(-(r**2) / w0**2) + * np.sin(k0 * z) + * np.sin(wp * t) + + epsilons[1] + * constants.m_e + * constants.c + / constants.q_e + * 4 + * r**2 + / w0**3 + * np.exp(-(r**2) / w0**2) + * np.sin(k0 * z) + * np.sin(wp * t) + - epsilons[2] + * constants.m_e + * constants.c + / constants.q_e + * 8 + * r + / w0**2 + * np.exp(-(r**2) / w0**2) + * np.sin(k0 * z) + * np.sin(wp * t) + + epsilons[2] + * constants.m_e + * constants.c + / constants.q_e + * 8 + * r**3 + / w0**4 + * np.exp(-(r**2) / w0**2) + * np.sin(k0 * z) + * np.sin(wp * t) + ) + return Er_array + + +def calcEz(z, r, k0, w0, wp, t, epsilons): """ Return the longitudinal electric field as an array of the same length as z and r, in the half-plane theta=0 """ Ez_array = ( - - epsilons[0] * constants.m_e*constants.c/constants.q_e * k0 * - np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * np.sin( wp*t ) - - epsilons[1] * constants.m_e*constants.c/constants.q_e * k0 * 2*r/w0 * - np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * np.sin( wp*t ) - - epsilons[2] * constants.m_e*constants.c/constants.q_e * k0 * 4*r**2/w0**2 * - np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * np.sin( wp*t )) - return( Ez_array ) + -epsilons[0] + * constants.m_e + * constants.c + / constants.q_e + * k0 + * np.exp(-(r**2) / w0**2) + * np.cos(k0 * z) + * np.sin(wp * t) + - epsilons[1] + * constants.m_e + * constants.c + / constants.q_e + * k0 + * 2 + * r + / w0 + * np.exp(-(r**2) / w0**2) + * np.cos(k0 * z) + * np.sin(wp * t) + - epsilons[2] + * constants.m_e + * constants.c + / constants.q_e + * k0 + * 4 + * r**2 + / w0**2 + * np.exp(-(r**2) / w0**2) + * np.cos(k0 * z) + * np.sin(wp * t) + ) + return Ez_array + # Current time of the simulation t0 = sim.extension.warpx.gett_new(0) @@ -187,52 +278,52 @@ def calcEz( z, r, k0, w0, wp, t, epsilons) : Ex_sim_modes = Ex_sim_wrap[...] Ez_sim_modes = Ez_sim_wrap[...] -rr_Er = Ex_sim_wrap.mesh('r') -zz_Er = Ex_sim_wrap.mesh('z') -rr_Ez = Ez_sim_wrap.mesh('r') -zz_Ez = Ez_sim_wrap.mesh('z') +rr_Er = Ex_sim_wrap.mesh("r") +zz_Er = Ex_sim_wrap.mesh("z") +rr_Ez = Ez_sim_wrap.mesh("r") +zz_Ez = Ez_sim_wrap.mesh("z") -rr_Er = rr_Er[:,np.newaxis]*np.ones(zz_Er.shape[0])[np.newaxis,:] -zz_Er = zz_Er[np.newaxis,:]*np.ones(rr_Er.shape[0])[:,np.newaxis] -rr_Ez = rr_Ez[:,np.newaxis]*np.ones(zz_Ez.shape[0])[np.newaxis,:] -zz_Ez = zz_Ez[np.newaxis,:]*np.ones(rr_Ez.shape[0])[:,np.newaxis] +rr_Er = rr_Er[:, np.newaxis] * np.ones(zz_Er.shape[0])[np.newaxis, :] +zz_Er = zz_Er[np.newaxis, :] * np.ones(rr_Er.shape[0])[:, np.newaxis] +rr_Ez = rr_Ez[:, np.newaxis] * np.ones(zz_Ez.shape[0])[np.newaxis, :] +zz_Ez = zz_Ez[np.newaxis, :] * np.ones(rr_Ez.shape[0])[:, np.newaxis] # Sum the real components to get the field along x-axis (theta = 0) -Er_sim = Ex_sim_modes[:,:,0] + np.sum(Ex_sim_modes[:,:,1::2], axis=2) -Ez_sim = Ez_sim_modes[:,:,0] + np.sum(Ez_sim_modes[:,:,1::2], axis=2) +Er_sim = Ex_sim_modes[:, :, 0] + np.sum(Ex_sim_modes[:, :, 1::2], axis=2) +Ez_sim = Ez_sim_modes[:, :, 0] + np.sum(Ez_sim_modes[:, :, 1::2], axis=2) # The analytical solutions Er_th = calcEr(zz_Er, rr_Er, k0, w0, wp, t0, [epsilon0, epsilon1, epsilon2]) Ez_th = calcEz(zz_Ez, rr_Ez, k0, w0, wp, t0, [epsilon0, epsilon1, epsilon2]) -max_error_Er = abs(Er_sim - Er_th).max()/abs(Er_th).max() -max_error_Ez = abs(Ez_sim - Ez_th).max()/abs(Ez_th).max() -print("Max error Er %e"%max_error_Er) -print("Max error Ez %e"%max_error_Ez) +max_error_Er = abs(Er_sim - Er_th).max() / abs(Er_th).max() +max_error_Ez = abs(Ez_sim - Ez_th).max() / abs(Ez_th).max() +print("Max error Er %e" % max_error_Er) +print("Max error Ez %e" % max_error_Ez) # Plot the last field from the loop (Er at iteration 40) fig, ax = plt.subplots(3) -im = ax[0].imshow( Er_sim, aspect='auto', origin='lower' ) -fig.colorbar(im, ax=ax[0], orientation='vertical') -ax[0].set_title('Er, last iteration (simulation)') -ax[1].imshow( Er_th, aspect='auto', origin='lower' ) -fig.colorbar(im, ax=ax[1], orientation='vertical') -ax[1].set_title('Er, last iteration (theory)') -im = ax[2].imshow( (Er_sim - Er_th)/abs(Er_th).max(), aspect='auto', origin='lower' ) -fig.colorbar(im, ax=ax[2], orientation='vertical') -ax[2].set_title('Er, last iteration (difference)') -plt.savefig('langmuir_multi_rz_multimode_analysis_Er.png') +im = ax[0].imshow(Er_sim, aspect="auto", origin="lower") +fig.colorbar(im, ax=ax[0], orientation="vertical") +ax[0].set_title("Er, last iteration (simulation)") +ax[1].imshow(Er_th, aspect="auto", origin="lower") +fig.colorbar(im, ax=ax[1], orientation="vertical") +ax[1].set_title("Er, last iteration (theory)") +im = ax[2].imshow((Er_sim - Er_th) / abs(Er_th).max(), aspect="auto", origin="lower") +fig.colorbar(im, ax=ax[2], orientation="vertical") +ax[2].set_title("Er, last iteration (difference)") +plt.savefig("langmuir_multi_rz_multimode_analysis_Er.png") fig, ax = plt.subplots(3) -im = ax[0].imshow( Ez_sim, aspect='auto', origin='lower' ) -fig.colorbar(im, ax=ax[0], orientation='vertical') -ax[0].set_title('Ez, last iteration (simulation)') -ax[1].imshow( Ez_th, aspect='auto', origin='lower' ) -fig.colorbar(im, ax=ax[1], orientation='vertical') -ax[1].set_title('Ez, last iteration (theory)') -im = ax[2].imshow( (Ez_sim - Ez_th)/abs(Ez_th).max(), aspect='auto', origin='lower' ) -fig.colorbar(im, ax=ax[2], orientation='vertical') -ax[2].set_title('Ez, last iteration (difference)') -plt.savefig('langmuir_multi_rz_multimode_analysis_Ez.png') +im = ax[0].imshow(Ez_sim, aspect="auto", origin="lower") +fig.colorbar(im, ax=ax[0], orientation="vertical") +ax[0].set_title("Ez, last iteration (simulation)") +ax[1].imshow(Ez_th, aspect="auto", origin="lower") +fig.colorbar(im, ax=ax[1], orientation="vertical") +ax[1].set_title("Ez, last iteration (theory)") +im = ax[2].imshow((Ez_sim - Ez_th) / abs(Ez_th).max(), aspect="auto", origin="lower") +fig.colorbar(im, ax=ax[2], orientation="vertical") +ax[2].set_title("Ez, last iteration (difference)") +plt.savefig("langmuir_multi_rz_multimode_analysis_Ez.png") assert max(max_error_Er, max_error_Ez) < 0.02 diff --git a/Examples/Tests/langmuir/analysis_1d.py b/Examples/Tests/langmuir/analysis_1d.py index 7b05fac6643..3ba21751671 100755 --- a/Examples/Tests/langmuir/analysis_1d.py +++ b/Examples/Tests/langmuir/analysis_1d.py @@ -17,7 +17,7 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import yt @@ -26,94 +26,102 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file fn = sys.argv[1] # Parse test name and check if current correction (psatd.current_correction=1) is applied -current_correction = True if re.search( 'current_correction', fn ) else False +current_correction = True if re.search("current_correction", fn) else False # Parse test name and check if Vay current deposition (algo.current_deposition=vay) is used -vay_deposition = True if re.search( 'Vay_deposition', fn ) else False +vay_deposition = True if re.search("Vay_deposition", fn) else False # Parameters (these parameters must match the parameters in `inputs.multi.rt`) epsilon = 0.01 -n = 4.e24 +n = 4.0e24 n_osc_z = 2 -zmin = -20e-6; zmax = 20.e-6; Nz = 128 +zmin = -20e-6 +zmax = 20.0e-6 +Nz = 128 # Wave vector of the wave -kz = 2.*np.pi*n_osc_z/(zmax-zmin) +kz = 2.0 * np.pi * n_osc_z / (zmax - zmin) # Plasma frequency -wp = np.sqrt((n*e**2)/(m_e*epsilon_0)) +wp = np.sqrt((n * e**2) / (m_e * epsilon_0)) -k = {'Ez':kz} -cos = {'Ez':(1,1,0)} +k = {"Ez": kz} +cos = {"Ez": (1, 1, 0)} -def get_contribution( is_cos, k ): - du = (zmax-zmin)/Nz - u = zmin + du*( 0.5 + np.arange(Nz) ) + +def get_contribution(is_cos, k): + du = (zmax - zmin) / Nz + u = zmin + du * (0.5 + np.arange(Nz)) if is_cos == 1: - return( np.cos(k*u) ) + return np.cos(k * u) else: - return( np.sin(k*u) ) + return np.sin(k * u) + -def get_theoretical_field( field, t ): - amplitude = epsilon * (m_e*c**2*k[field])/e * np.sin(wp*t) +def get_theoretical_field(field, t): + amplitude = epsilon * (m_e * c**2 * k[field]) / e * np.sin(wp * t) cos_flag = cos[field] - z_contribution = get_contribution( cos_flag[2], kz ) + z_contribution = get_contribution(cos_flag[2], kz) E = amplitude * z_contribution - return( E ) + return E + # Read the file ds = yt.load(fn) t0 = ds.current_time.to_value() -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, - dims=ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) # Check the validity of the fields error_rel = 0 -for field in ['Ez']: - E_sim = data[('mesh',field)].to_ndarray()[:,0,0] +for field in ["Ez"]: + E_sim = data[("mesh", field)].to_ndarray()[:, 0, 0] E_th = get_theoretical_field(field, t0) - max_error = abs(E_sim-E_th).max()/abs(E_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(E_sim - E_th).max() / abs(E_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Plot the last field from the loop (Ez at iteration 80) -plt.subplot2grid( (1,2), (0,0) ) -plt.plot( E_sim ) -#plt.colorbar() -plt.title('Ez, last iteration\n(simulation)') -plt.subplot2grid( (1,2), (0,1) ) -plt.plot( E_th ) -#plt.colorbar() -plt.title('Ez, last iteration\n(theory)') +plt.subplot2grid((1, 2), (0, 0)) +plt.plot(E_sim) +# plt.colorbar() +plt.title("Ez, last iteration\n(simulation)") +plt.subplot2grid((1, 2), (0, 1)) +plt.plot(E_th) +# plt.colorbar() +plt.title("Ez, last iteration\n(theory)") plt.tight_layout() -plt.savefig('langmuir_multi_1d_analysis.png') +plt.savefig("langmuir_multi_1d_analysis.png") tolerance_rel = 0.05 print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel # Check relative L-infinity spatial norm of rho/epsilon_0 - div(E) when # current correction (psatd.do_current_correction=1) is applied or when # Vay current deposition (algo.current_deposition=vay) is used if current_correction or vay_deposition: - rho = data[('boxlib','rho')].to_ndarray() - divE = data[('boxlib','divE')].to_ndarray() - error_rel = np.amax( np.abs( divE - rho/epsilon_0 ) ) / np.amax( np.abs( rho/epsilon_0 ) ) - tolerance = 1.e-9 + rho = data[("boxlib", "rho")].to_ndarray() + divE = data[("boxlib", "divE")].to_ndarray() + error_rel = np.amax(np.abs(divE - rho / epsilon_0)) / np.amax( + np.abs(rho / epsilon_0) + ) + tolerance = 1.0e-9 print("Check charge conservation:") print("error_rel = {}".format(error_rel)) print("tolerance = {}".format(tolerance)) - assert( error_rel < tolerance ) + assert error_rel < tolerance test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/langmuir/analysis_2d.py b/Examples/Tests/langmuir/analysis_2d.py index 94f97ca6de8..8914b8b426c 100755 --- a/Examples/Tests/langmuir/analysis_2d.py +++ b/Examples/Tests/langmuir/analysis_2d.py @@ -26,99 +26,113 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file fn = sys.argv[1] # Parse test name and check if current correction (psatd.current_correction=1) is applied -current_correction = True if re.search( 'current_correction', fn ) else False +current_correction = True if re.search("current_correction", fn) else False # Parse test name and check if Vay current deposition (algo.current_deposition=vay) is used -vay_deposition = True if re.search( 'Vay_deposition', fn ) else False +vay_deposition = True if re.search("Vay_deposition", fn) else False # Parse test name and check if particle_shape = 4 is used -particle_shape_4 = True if re.search('particle_shape_4', fn) else False +particle_shape_4 = True if re.search("particle_shape_4", fn) else False # Parameters (these parameters must match the parameters in `inputs.multi.rt`) epsilon = 0.01 -n = 4.e24 +n = 4.0e24 n_osc_x = 2 n_osc_z = 2 -xmin = -20e-6; xmax = 20.e-6; Nx = 128 -zmin = -20e-6; zmax = 20.e-6; Nz = 128 +xmin = -20e-6 +xmax = 20.0e-6 +Nx = 128 +zmin = -20e-6 +zmax = 20.0e-6 +Nz = 128 # Wave vector of the wave -kx = 2.*np.pi*n_osc_x/(xmax-xmin) -kz = 2.*np.pi*n_osc_z/(zmax-zmin) +kx = 2.0 * np.pi * n_osc_x / (xmax - xmin) +kz = 2.0 * np.pi * n_osc_z / (zmax - zmin) # Plasma frequency -wp = np.sqrt((n*e**2)/(m_e*epsilon_0)) +wp = np.sqrt((n * e**2) / (m_e * epsilon_0)) -k = {'Ex':kx, 'Ez':kz} -cos = {'Ex': (0,1,1), 'Ez':(1,1,0)} +k = {"Ex": kx, "Ez": kz} +cos = {"Ex": (0, 1, 1), "Ez": (1, 1, 0)} -def get_contribution( is_cos, k ): - du = (xmax-xmin)/Nx - u = xmin + du*( 0.5 + np.arange(Nx) ) + +def get_contribution(is_cos, k): + du = (xmax - xmin) / Nx + u = xmin + du * (0.5 + np.arange(Nx)) if is_cos == 1: - return( np.cos(k*u) ) + return np.cos(k * u) else: - return( np.sin(k*u) ) + return np.sin(k * u) + -def get_theoretical_field( field, t ): - amplitude = epsilon * (m_e*c**2*k[field])/e * np.sin(wp*t) +def get_theoretical_field(field, t): + amplitude = epsilon * (m_e * c**2 * k[field]) / e * np.sin(wp * t) cos_flag = cos[field] - x_contribution = get_contribution( cos_flag[0], kx ) - z_contribution = get_contribution( cos_flag[2], kz ) + x_contribution = get_contribution(cos_flag[0], kx) + z_contribution = get_contribution(cos_flag[2], kz) + + E = amplitude * x_contribution[:, np.newaxis] * z_contribution[np.newaxis, :] - E = amplitude * x_contribution[:, np.newaxis ] \ - * z_contribution[np.newaxis, :] + return E - return( E ) # Read the file ds = yt.load(fn) t0 = ds.current_time.to_value() -data = ds.covering_grid(level = 0, left_edge = ds.domain_left_edge, dims = ds.domain_dimensions) -edge = np.array([(ds.domain_left_edge[1]).item(), (ds.domain_right_edge[1]).item(), \ - (ds.domain_left_edge[0]).item(), (ds.domain_right_edge[0]).item()]) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +edge = np.array( + [ + (ds.domain_left_edge[1]).item(), + (ds.domain_right_edge[1]).item(), + (ds.domain_left_edge[0]).item(), + (ds.domain_right_edge[0]).item(), + ] +) # Check the validity of the fields error_rel = 0 -for field in ['Ex', 'Ez']: - E_sim = data[('mesh',field)].to_ndarray()[:,:,0] +for field in ["Ex", "Ez"]: + E_sim = data[("mesh", field)].to_ndarray()[:, :, 0] E_th = get_theoretical_field(field, t0) - max_error = abs(E_sim-E_th).max()/abs(E_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(E_sim - E_th).max() / abs(E_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Plot the last field from the loop (Ez at iteration 40) -fig, (ax1, ax2) = plt.subplots(1, 2, dpi = 100) +fig, (ax1, ax2) = plt.subplots(1, 2, dpi=100) # First plot vmin = E_sim.min() vmax = E_sim.max() -cax1 = make_axes_locatable(ax1).append_axes('right', size = '5%', pad = '5%') -im1 = ax1.imshow(E_sim, origin = 'lower', extent = edge, vmin = vmin, vmax = vmax) -cb1 = fig.colorbar(im1, cax = cax1) -ax1.set_xlabel(r'$z$') -ax1.set_ylabel(r'$x$') -ax1.set_title(r'$E_z$ (sim)') +cax1 = make_axes_locatable(ax1).append_axes("right", size="5%", pad="5%") +im1 = ax1.imshow(E_sim, origin="lower", extent=edge, vmin=vmin, vmax=vmax) +cb1 = fig.colorbar(im1, cax=cax1) +ax1.set_xlabel(r"$z$") +ax1.set_ylabel(r"$x$") +ax1.set_title(r"$E_z$ (sim)") # Second plot vmin = E_th.min() vmax = E_th.max() -cax2 = make_axes_locatable(ax2).append_axes('right', size = '5%', pad = '5%') -im2 = ax2.imshow(E_th, origin = 'lower', extent = edge, vmin = vmin, vmax = vmax) -cb2 = fig.colorbar(im2, cax = cax2) -ax2.set_xlabel(r'$z$') -ax2.set_ylabel(r'$x$') -ax2.set_title(r'$E_z$ (theory)') +cax2 = make_axes_locatable(ax2).append_axes("right", size="5%", pad="5%") +im2 = ax2.imshow(E_th, origin="lower", extent=edge, vmin=vmin, vmax=vmax) +cb2 = fig.colorbar(im2, cax=cax2) +ax2.set_xlabel(r"$z$") +ax2.set_ylabel(r"$x$") +ax2.set_title(r"$E_z$ (theory)") # Save figure fig.tight_layout() -fig.savefig('Langmuir_multi_2d_analysis.png', dpi = 200) +fig.savefig("Langmuir_multi_2d_analysis.png", dpi=200) if particle_shape_4: -# lower fidelity, due to smoothing + # lower fidelity, due to smoothing tolerance_rel = 0.07 else: tolerance_rel = 0.05 @@ -126,7 +140,7 @@ def get_theoretical_field( field, t ): print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel # Check relative L-infinity spatial norm of rho/epsilon_0 - div(E) # with current correction (and periodic single box option) or with Vay current deposition @@ -135,13 +149,15 @@ def get_theoretical_field( field, t ): elif vay_deposition: tolerance = 1e-3 if current_correction or vay_deposition: - rho = data[('boxlib','rho')].to_ndarray() - divE = data[('boxlib','divE')].to_ndarray() - error_rel = np.amax( np.abs( divE - rho/epsilon_0 ) ) / np.amax( np.abs( rho/epsilon_0 ) ) + rho = data[("boxlib", "rho")].to_ndarray() + divE = data[("boxlib", "divE")].to_ndarray() + error_rel = np.amax(np.abs(divE - rho / epsilon_0)) / np.amax( + np.abs(rho / epsilon_0) + ) print("Check charge conservation:") print("error_rel = {}".format(error_rel)) print("tolerance = {}".format(tolerance)) - assert( error_rel < tolerance ) + assert error_rel < tolerance test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/langmuir/analysis_3d.py b/Examples/Tests/langmuir/analysis_3d.py index 68334f506ff..6fd58e62de4 100755 --- a/Examples/Tests/langmuir/analysis_3d.py +++ b/Examples/Tests/langmuir/analysis_3d.py @@ -26,128 +26,139 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file fn = sys.argv[1] # Parse test name and check if current correction (psatd.current_correction=1) is applied -current_correction = True if re.search( 'current_correction', fn ) else False +current_correction = True if re.search("current_correction", fn) else False # Parse test name and check if Vay current deposition (algo.current_deposition=vay) is used -vay_deposition = True if re.search( 'Vay_deposition', fn ) else False +vay_deposition = True if re.search("Vay_deposition", fn) else False # Parse test name and check if div(E)/div(B) cleaning (warpx.do_div_cleaning=1) is used -div_cleaning = True if re.search('div_cleaning', fn) else False +div_cleaning = True if re.search("div_cleaning", fn) else False # Parameters (these parameters must match the parameters in `inputs.multi.rt`) epsilon = 0.01 -n = 4.e24 +n = 4.0e24 n_osc_x = 2 n_osc_y = 2 n_osc_z = 2 -lo = [-20.e-6, -20.e-6, -20.e-6] -hi = [ 20.e-6, 20.e-6, 20.e-6] +lo = [-20.0e-6, -20.0e-6, -20.0e-6] +hi = [20.0e-6, 20.0e-6, 20.0e-6] Ncell = [64, 64, 64] # Wave vector of the wave -kx = 2.*np.pi*n_osc_x/(hi[0]-lo[0]) -ky = 2.*np.pi*n_osc_y/(hi[1]-lo[1]) -kz = 2.*np.pi*n_osc_z/(hi[2]-lo[2]) +kx = 2.0 * np.pi * n_osc_x / (hi[0] - lo[0]) +ky = 2.0 * np.pi * n_osc_y / (hi[1] - lo[1]) +kz = 2.0 * np.pi * n_osc_z / (hi[2] - lo[2]) # Plasma frequency -wp = np.sqrt((n*e**2)/(m_e*epsilon_0)) +wp = np.sqrt((n * e**2) / (m_e * epsilon_0)) -k = {'Ex':kx, 'Ey':ky, 'Ez':kz} -cos = {'Ex': (0,1,1), 'Ey':(1,0,1), 'Ez':(1,1,0)} +k = {"Ex": kx, "Ey": ky, "Ez": kz} +cos = {"Ex": (0, 1, 1), "Ey": (1, 0, 1), "Ez": (1, 1, 0)} -def get_contribution( is_cos, k, idim ): - du = (hi[idim]-lo[idim])/Ncell[idim] - u = lo[idim] + du*( 0.5 + np.arange(Ncell[idim]) ) + +def get_contribution(is_cos, k, idim): + du = (hi[idim] - lo[idim]) / Ncell[idim] + u = lo[idim] + du * (0.5 + np.arange(Ncell[idim])) if is_cos[idim] == 1: - return( np.cos(k*u) ) + return np.cos(k * u) else: - return( np.sin(k*u) ) + return np.sin(k * u) + -def get_theoretical_field( field, t ): - amplitude = epsilon * (m_e*c**2*k[field])/e * np.sin(wp*t) +def get_theoretical_field(field, t): + amplitude = epsilon * (m_e * c**2 * k[field]) / e * np.sin(wp * t) cos_flag = cos[field] - x_contribution = get_contribution( cos_flag, kx, 0 ) - y_contribution = get_contribution( cos_flag, ky, 1 ) - z_contribution = get_contribution( cos_flag, kz, 2 ) + x_contribution = get_contribution(cos_flag, kx, 0) + y_contribution = get_contribution(cos_flag, ky, 1) + z_contribution = get_contribution(cos_flag, kz, 2) + + E = ( + amplitude + * x_contribution[:, np.newaxis, np.newaxis] + * y_contribution[np.newaxis, :, np.newaxis] + * z_contribution[np.newaxis, np.newaxis, :] + ) - E = amplitude * x_contribution[:, np.newaxis, np.newaxis] \ - * y_contribution[np.newaxis, :, np.newaxis] \ - * z_contribution[np.newaxis, np.newaxis, :] + return E - return( E ) # Read the file ds = yt.load(fn) # Check that the particle selective output worked: -species = 'electrons' -print('ds.field_list', ds.field_list) -for field in ['particle_weight', - 'particle_momentum_x']: - print('assert that this is in ds.field_list', (species, field)) +species = "electrons" +print("ds.field_list", ds.field_list) +for field in ["particle_weight", "particle_momentum_x"]: + print("assert that this is in ds.field_list", (species, field)) assert (species, field) in ds.field_list -for field in ['particle_momentum_y', - 'particle_momentum_z']: - print('assert that this is NOT in ds.field_list', (species, field)) +for field in ["particle_momentum_y", "particle_momentum_z"]: + print("assert that this is NOT in ds.field_list", (species, field)) assert (species, field) not in ds.field_list -species = 'positrons' -for field in ['particle_momentum_x', - 'particle_momentum_y']: - print('assert that this is NOT in ds.field_list', (species, field)) +species = "positrons" +for field in ["particle_momentum_x", "particle_momentum_y"]: + print("assert that this is NOT in ds.field_list", (species, field)) assert (species, field) not in ds.field_list t0 = ds.current_time.to_value() -data = ds.covering_grid(level = 0, left_edge = ds.domain_left_edge, dims = ds.domain_dimensions) -edge = np.array([(ds.domain_left_edge[2]).item(), (ds.domain_right_edge[2]).item(), \ - (ds.domain_left_edge[0]).item(), (ds.domain_right_edge[0]).item()]) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +edge = np.array( + [ + (ds.domain_left_edge[2]).item(), + (ds.domain_right_edge[2]).item(), + (ds.domain_left_edge[0]).item(), + (ds.domain_right_edge[0]).item(), + ] +) # Check the validity of the fields error_rel = 0 -for field in ['Ex', 'Ey', 'Ez']: - E_sim = data[('mesh',field)].to_ndarray() +for field in ["Ex", "Ey", "Ez"]: + E_sim = data[("mesh", field)].to_ndarray() E_th = get_theoretical_field(field, t0) - max_error = abs(E_sim-E_th).max()/abs(E_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(E_sim - E_th).max() / abs(E_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Plot the last field from the loop (Ez at iteration 40) -fig, (ax1, ax2) = plt.subplots(1, 2, dpi = 100) +fig, (ax1, ax2) = plt.subplots(1, 2, dpi=100) # First plot (slice at y=0) -E_plot = E_sim[:,Ncell[1]//2+1,:] +E_plot = E_sim[:, Ncell[1] // 2 + 1, :] vmin = E_plot.min() vmax = E_plot.max() -cax1 = make_axes_locatable(ax1).append_axes('right', size = '5%', pad = '5%') -im1 = ax1.imshow(E_plot, origin = 'lower', extent = edge, vmin = vmin, vmax = vmax) -cb1 = fig.colorbar(im1, cax = cax1) -ax1.set_xlabel(r'$z$') -ax1.set_ylabel(r'$x$') -ax1.set_title(r'$E_z$ (sim)') +cax1 = make_axes_locatable(ax1).append_axes("right", size="5%", pad="5%") +im1 = ax1.imshow(E_plot, origin="lower", extent=edge, vmin=vmin, vmax=vmax) +cb1 = fig.colorbar(im1, cax=cax1) +ax1.set_xlabel(r"$z$") +ax1.set_ylabel(r"$x$") +ax1.set_title(r"$E_z$ (sim)") # Second plot (slice at y=0) -E_plot = E_th[:,Ncell[1]//2+1,:] +E_plot = E_th[:, Ncell[1] // 2 + 1, :] vmin = E_plot.min() vmax = E_plot.max() -cax2 = make_axes_locatable(ax2).append_axes('right', size = '5%', pad = '5%') -im2 = ax2.imshow(E_plot, origin = 'lower', extent = edge, vmin = vmin, vmax = vmax) -cb2 = fig.colorbar(im2, cax = cax2) -ax2.set_xlabel(r'$z$') -ax2.set_ylabel(r'$x$') -ax2.set_title(r'$E_z$ (theory)') +cax2 = make_axes_locatable(ax2).append_axes("right", size="5%", pad="5%") +im2 = ax2.imshow(E_plot, origin="lower", extent=edge, vmin=vmin, vmax=vmax) +cb2 = fig.colorbar(im2, cax=cax2) +ax2.set_xlabel(r"$z$") +ax2.set_ylabel(r"$x$") +ax2.set_title(r"$E_z$ (theory)") # Save figure fig.tight_layout() -fig.savefig('Langmuir_multi_analysis.png', dpi = 200) +fig.savefig("Langmuir_multi_analysis.png", dpi=200) tolerance_rel = 5e-2 print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel # Check relative L-infinity spatial norm of rho/epsilon_0 - div(E) # with current correction (and periodic single box option) or with Vay current deposition @@ -156,43 +167,51 @@ def get_theoretical_field( field, t ): elif vay_deposition: tolerance = 1e-3 if current_correction or vay_deposition: - rho = data[('boxlib','rho')].to_ndarray() - divE = data[('boxlib','divE')].to_ndarray() - error_rel = np.amax( np.abs( divE - rho/epsilon_0 ) ) / np.amax( np.abs( rho/epsilon_0 ) ) + rho = data[("boxlib", "rho")].to_ndarray() + divE = data[("boxlib", "divE")].to_ndarray() + error_rel = np.amax(np.abs(divE - rho / epsilon_0)) / np.amax( + np.abs(rho / epsilon_0) + ) print("Check charge conservation:") print("error_rel = {}".format(error_rel)) print("tolerance = {}".format(tolerance)) - assert( error_rel < tolerance ) + assert error_rel < tolerance if div_cleaning: - ds_old = yt.load('Langmuir_multi_psatd_div_cleaning_plt000038') - ds_mid = yt.load('Langmuir_multi_psatd_div_cleaning_plt000039') - ds_new = yt.load(fn) # this is the last plotfile - - ad_old = ds_old.covering_grid(level = 0, left_edge = ds_old.domain_left_edge, dims = ds_old.domain_dimensions) - ad_mid = ds_mid.covering_grid(level = 0, left_edge = ds_mid.domain_left_edge, dims = ds_mid.domain_dimensions) - ad_new = ds_new.covering_grid(level = 0, left_edge = ds_new.domain_left_edge, dims = ds_new.domain_dimensions) - - rho = ad_mid['rho'].v.squeeze() - divE = ad_mid['divE'].v.squeeze() - F_old = ad_old['F'].v.squeeze() - F_new = ad_new['F'].v.squeeze() + ds_old = yt.load("Langmuir_multi_psatd_div_cleaning_plt000038") + ds_mid = yt.load("Langmuir_multi_psatd_div_cleaning_plt000039") + ds_new = yt.load(fn) # this is the last plotfile + + ad_old = ds_old.covering_grid( + level=0, left_edge=ds_old.domain_left_edge, dims=ds_old.domain_dimensions + ) + ad_mid = ds_mid.covering_grid( + level=0, left_edge=ds_mid.domain_left_edge, dims=ds_mid.domain_dimensions + ) + ad_new = ds_new.covering_grid( + level=0, left_edge=ds_new.domain_left_edge, dims=ds_new.domain_dimensions + ) + + rho = ad_mid["rho"].v.squeeze() + divE = ad_mid["divE"].v.squeeze() + F_old = ad_old["F"].v.squeeze() + F_new = ad_new["F"].v.squeeze() # Check max norm of error on dF/dt = div(E) - rho/epsilon_0 # (the time interval between the old and new data is 2*dt) dt = 1.203645751e-15 x = F_new - F_old - y = (divE - rho/epsilon_0) * 2 * dt + y = (divE - rho / epsilon_0) * 2 * dt error_rel = np.amax(np.abs(x - y)) / np.amax(np.abs(y)) tolerance = 1e-2 print("Check div(E) cleaning:") print("error_rel = {}".format(error_rel)) print("tolerance = {}".format(tolerance)) - assert(error_rel < tolerance) + assert error_rel < tolerance test_name = os.path.split(os.getcwd())[1] -if re.search( 'single_precision', fn ): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3) +if re.search("single_precision", fn): + checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) else: checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/langmuir/analysis_rz.py b/Examples/Tests/langmuir/analysis_rz.py index e5ae2194123..792394ea573 100755 --- a/Examples/Tests/langmuir/analysis_rz.py +++ b/Examples/Tests/langmuir/analysis_rz.py @@ -19,7 +19,7 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import yt @@ -29,7 +29,7 @@ import post_processing_utils from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file @@ -38,80 +38,104 @@ test_name = os.path.split(os.getcwd())[1] # Parse test name and check if current correction (psatd.current_correction) is applied -current_correction = True if re.search('current_correction', fn) else False +current_correction = True if re.search("current_correction", fn) else False # Parameters (these parameters must match the parameters in `inputs.multi.rz.rt`) epsilon = 0.01 -n = 2.e24 -w0 = 5.e-6 +n = 2.0e24 +w0 = 5.0e-6 n_osc_z = 2 -rmin = 0e-6; rmax = 20.e-6; Nr = 64 -zmin = -20e-6; zmax = 20.e-6; Nz = 128 +rmin = 0e-6 +rmax = 20.0e-6 +Nr = 64 +zmin = -20e-6 +zmax = 20.0e-6 +Nz = 128 # Wave vector of the wave -k0 = 2.*np.pi*n_osc_z/(zmax-zmin) +k0 = 2.0 * np.pi * n_osc_z / (zmax - zmin) # Plasma frequency -wp = np.sqrt((n*e**2)/(m_e*epsilon_0)) -kp = wp/c +wp = np.sqrt((n * e**2) / (m_e * epsilon_0)) +kp = wp / c -def Er( z, r, epsilon, k0, w0, wp, t) : + +def Er(z, r, epsilon, k0, w0, wp, t): """ Return the radial electric field as an array of the same length as z and r, in the half-plane theta=0 """ - Er_array = \ - epsilon * m_e*c**2/e * 2*r/w0**2 * \ - np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t ) - return( Er_array ) - -def Ez( z, r, epsilon, k0, w0, wp, t) : + Er_array = ( + epsilon + * m_e + * c**2 + / e + * 2 + * r + / w0**2 + * np.exp(-(r**2) / w0**2) + * np.sin(k0 * z) + * np.sin(wp * t) + ) + return Er_array + + +def Ez(z, r, epsilon, k0, w0, wp, t): """ Return the longitudinal electric field as an array of the same length as z and r, in the half-plane theta=0 """ - Ez_array = \ - - epsilon * m_e*c**2/e * k0 * \ - np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * np.sin( wp*t ) - return( Ez_array ) + Ez_array = ( + -epsilon + * m_e + * c**2 + / e + * k0 + * np.exp(-(r**2) / w0**2) + * np.cos(k0 * z) + * np.sin(wp * t) + ) + return Ez_array + # Read the file ds = yt.load(fn) t0 = ds.current_time.to_value() -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, - dims=ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) # Get cell centered coordinates -dr = (rmax - rmin)/Nr -dz = (zmax - zmin)/Nz -coords = np.indices([Nr, Nz],'d') -rr = rmin + (coords[0] + 0.5)*dr -zz = zmin + (coords[1] + 0.5)*dz +dr = (rmax - rmin) / Nr +dz = (zmax - zmin) / Nz +coords = np.indices([Nr, Nz], "d") +rr = rmin + (coords[0] + 0.5) * dr +zz = zmin + (coords[1] + 0.5) * dz # Check the validity of the fields overall_max_error = 0 -Er_sim = data[('boxlib','Er')].to_ndarray()[:,:,0] +Er_sim = data[("boxlib", "Er")].to_ndarray()[:, :, 0] Er_th = Er(zz, rr, epsilon, k0, w0, wp, t0) -max_error = abs(Er_sim-Er_th).max()/abs(Er_th).max() -print('Er: Max error: %.2e' %(max_error)) -overall_max_error = max( overall_max_error, max_error ) +max_error = abs(Er_sim - Er_th).max() / abs(Er_th).max() +print("Er: Max error: %.2e" % (max_error)) +overall_max_error = max(overall_max_error, max_error) -Ez_sim = data[('boxlib','Ez')].to_ndarray()[:,:,0] +Ez_sim = data[("boxlib", "Ez")].to_ndarray()[:, :, 0] Ez_th = Ez(zz, rr, epsilon, k0, w0, wp, t0) -max_error = abs(Ez_sim-Ez_th).max()/abs(Ez_th).max() -print('Ez: Max error: %.2e' %(max_error)) -overall_max_error = max( overall_max_error, max_error ) +max_error = abs(Ez_sim - Ez_th).max() / abs(Ez_th).max() +print("Ez: Max error: %.2e" % (max_error)) +overall_max_error = max(overall_max_error, max_error) # Plot the last field from the loop (Ez at iteration 40) -plt.subplot2grid( (1,2), (0,0) ) -plt.imshow( Ez_sim ) +plt.subplot2grid((1, 2), (0, 0)) +plt.imshow(Ez_sim) plt.colorbar() -plt.title('Ez, last iteration\n(simulation)') -plt.subplot2grid( (1,2), (0,1) ) -plt.imshow( Ez_th ) +plt.title("Ez, last iteration\n(simulation)") +plt.subplot2grid((1, 2), (0, 1)) +plt.imshow(Ez_th) plt.colorbar() -plt.title('Ez, last iteration\n(theory)') +plt.title("Ez, last iteration\n(theory)") plt.tight_layout() -plt.savefig(test_name+'_analysis.png') +plt.savefig(test_name + "_analysis.png") error_rel = overall_max_error @@ -120,18 +144,18 @@ def Ez( z, r, epsilon, k0, w0, wp, t) : print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel # Check charge conservation (relative L-infinity norm of error) with current correction if current_correction: - divE = data[('boxlib','divE')].to_ndarray() - rho = data[('boxlib','rho')].to_ndarray() / epsilon_0 + divE = data[("boxlib", "divE")].to_ndarray() + rho = data[("boxlib", "rho")].to_ndarray() / epsilon_0 error_rel = np.amax(np.abs(divE - rho)) / max(np.amax(divE), np.amax(rho)) - tolerance = 1.e-9 + tolerance = 1.0e-9 print("Check charge conservation:") print("error_rel = {}".format(error_rel)) print("tolerance = {}".format(tolerance)) - assert( error_rel < tolerance ) + assert error_rel < tolerance ## In the final past of the test, we verify that the diagnostic particle filter function works as @@ -142,17 +166,20 @@ def Ez( z, r, epsilon, k0, w0, wp, t) : parser_filter_fn = "diags/diag_parser_filter000080" parser_filter_expression = "(py-pz < 0) * (r<10e-6) * (z > 0)" -post_processing_utils.check_particle_filter(fn, parser_filter_fn, parser_filter_expression, - dim, species_name) +post_processing_utils.check_particle_filter( + fn, parser_filter_fn, parser_filter_expression, dim, species_name +) uniform_filter_fn = "diags/diag_uniform_filter000080" uniform_filter_expression = "ids%3 == 0" -post_processing_utils.check_particle_filter(fn, uniform_filter_fn, uniform_filter_expression, - dim, species_name) +post_processing_utils.check_particle_filter( + fn, uniform_filter_fn, uniform_filter_expression, dim, species_name +) random_filter_fn = "diags/diag_random_filter000080" random_fraction = 0.66 -post_processing_utils.check_random_filter(fn, random_filter_fn, random_fraction, - dim, species_name) +post_processing_utils.check_random_filter( + fn, random_filter_fn, random_fraction, dim, species_name +) checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/langmuir_fluids/analysis_1d.py b/Examples/Tests/langmuir_fluids/analysis_1d.py index 2d1a8f69d1d..fa4566b6173 100755 --- a/Examples/Tests/langmuir_fluids/analysis_1d.py +++ b/Examples/Tests/langmuir_fluids/analysis_1d.py @@ -16,7 +16,7 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import yt @@ -25,7 +25,7 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file @@ -33,105 +33,120 @@ # Parameters (these parameters must match the parameters in `inputs.multi.rt`) epsilon = 0.01 -n = 4.e24 +n = 4.0e24 n_osc_z = 2 -zmin = -20e-6; zmax = 20.e-6; Nz = 128 +zmin = -20e-6 +zmax = 20.0e-6 +Nz = 128 # Wave vector of the wave -kz = 2.*np.pi*n_osc_z/(zmax-zmin) +kz = 2.0 * np.pi * n_osc_z / (zmax - zmin) # Plasma frequency -wp = np.sqrt((n*e**2)/(m_e*epsilon_0)) +wp = np.sqrt((n * e**2) / (m_e * epsilon_0)) -k = {'Ez':kz,'Jz':kz} -cos = {'Ez':(1,1,0), 'Jz':(1,1,0)} -cos_rho = {'rho': (1,1,1)} +k = {"Ez": kz, "Jz": kz} +cos = {"Ez": (1, 1, 0), "Jz": (1, 1, 0)} +cos_rho = {"rho": (1, 1, 1)} -def get_contribution( is_cos, k ): - du = (zmax-zmin)/Nz - u = zmin + du*( 0.5 + np.arange(Nz) ) + +def get_contribution(is_cos, k): + du = (zmax - zmin) / Nz + u = zmin + du * (0.5 + np.arange(Nz)) if is_cos == 1: - return( np.cos(k*u) ) + return np.cos(k * u) else: - return( np.sin(k*u) ) + return np.sin(k * u) + -def get_theoretical_field( field, t ): - amplitude = epsilon * (m_e*c**2*k[field])/e * np.sin(wp*t) +def get_theoretical_field(field, t): + amplitude = epsilon * (m_e * c**2 * k[field]) / e * np.sin(wp * t) cos_flag = cos[field] - z_contribution = get_contribution( cos_flag[2], kz ) + z_contribution = get_contribution(cos_flag[2], kz) E = amplitude * z_contribution - return( E ) + return E + -def get_theoretical_J_field( field, t ): +def get_theoretical_J_field(field, t): # wpdt/2 accounts for the Yee halfstep offset of the current - dt = t / 80 # SPECIFIC to config parameters! - amplitude = - epsilon_0 * wp * epsilon * (m_e*c**2*k[field])/e * np.cos(wp*t-wp*dt/2) + dt = t / 80 # SPECIFIC to config parameters! + amplitude = ( + -epsilon_0 + * wp + * epsilon + * (m_e * c**2 * k[field]) + / e + * np.cos(wp * t - wp * dt / 2) + ) cos_flag = cos[field] - z_contribution = get_contribution( cos_flag[2], kz ) + z_contribution = get_contribution(cos_flag[2], kz) - J = amplitude * z_contribution + J = amplitude * z_contribution - return( J ) + return J -def get_theoretical_rho_field( field, t ): - amplitude = epsilon_0 * epsilon * (m_e*c**2*(kz*kz))/e * np.sin(wp*t) + +def get_theoretical_rho_field(field, t): + amplitude = epsilon_0 * epsilon * (m_e * c**2 * (kz * kz)) / e * np.sin(wp * t) cos_flag = cos_rho[field] - z_contribution = get_contribution( cos_flag[2], kz) + z_contribution = get_contribution(cos_flag[2], kz) rho = amplitude * z_contribution - return( rho ) + return rho + # Read the file ds = yt.load(fn) t0 = ds.current_time.to_value() -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, - dims=ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) # Check the validity of the fields error_rel = 0 -for field in ['Ez']: - E_sim = data[('mesh',field)].to_ndarray()[:,0,0] +for field in ["Ez"]: + E_sim = data[("mesh", field)].to_ndarray()[:, 0, 0] E_th = get_theoretical_field(field, t0) - max_error = abs(E_sim-E_th).max()/abs(E_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(E_sim - E_th).max() / abs(E_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Check the validity of the currents -for field in ['Jz']: - J_sim = data[('mesh',field)].to_ndarray()[:,0,0] +for field in ["Jz"]: + J_sim = data[("mesh", field)].to_ndarray()[:, 0, 0] J_th = get_theoretical_J_field(field, t0) - max_error = abs(J_sim-J_th).max()/abs(J_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(J_sim - J_th).max() / abs(J_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Check the validity of the charge -for field in ['rho']: - rho_sim = data[('boxlib',field)].to_ndarray()[:,0,0] +for field in ["rho"]: + rho_sim = data[("boxlib", field)].to_ndarray()[:, 0, 0] rho_th = get_theoretical_rho_field(field, t0) - max_error = abs(rho_sim-rho_th).max()/abs(rho_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(rho_sim - rho_th).max() / abs(rho_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Plot the last field from the loop (Ez at iteration 80) -plt.subplot2grid( (1,2), (0,0) ) -plt.plot( E_sim ) -#plt.colorbar() -plt.title('Ez, last iteration\n(simulation)') -plt.subplot2grid( (1,2), (0,1) ) -plt.plot( E_th ) -#plt.colorbar() -plt.title('Ez, last iteration\n(theory)') +plt.subplot2grid((1, 2), (0, 0)) +plt.plot(E_sim) +# plt.colorbar() +plt.title("Ez, last iteration\n(simulation)") +plt.subplot2grid((1, 2), (0, 1)) +plt.plot(E_th) +# plt.colorbar() +plt.title("Ez, last iteration\n(theory)") plt.tight_layout() -plt.savefig('langmuir_fluid_multi_1d_analysis.png') +plt.savefig("langmuir_fluid_multi_1d_analysis.png") tolerance_rel = 0.05 print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/langmuir_fluids/analysis_2d.py b/Examples/Tests/langmuir_fluids/analysis_2d.py index f7244f87137..d7ecca986e4 100755 --- a/Examples/Tests/langmuir_fluids/analysis_2d.py +++ b/Examples/Tests/langmuir_fluids/analysis_2d.py @@ -25,7 +25,7 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file @@ -33,127 +33,150 @@ # Parameters (these parameters must match the parameters in `inputs.multi.rt`) epsilon = 0.01 -n = 4.e24 +n = 4.0e24 n_osc_x = 2 n_osc_z = 2 -xmin = -20e-6; xmax = 20.e-6; Nx = 128 -zmin = -20e-6; zmax = 20.e-6; Nz = 128 +xmin = -20e-6 +xmax = 20.0e-6 +Nx = 128 +zmin = -20e-6 +zmax = 20.0e-6 +Nz = 128 # Wave vector of the wave -kx = 2.*np.pi*n_osc_x/(xmax-xmin) -kz = 2.*np.pi*n_osc_z/(zmax-zmin) +kx = 2.0 * np.pi * n_osc_x / (xmax - xmin) +kz = 2.0 * np.pi * n_osc_z / (zmax - zmin) # Plasma frequency -wp = np.sqrt((n*e**2)/(m_e*epsilon_0)) +wp = np.sqrt((n * e**2) / (m_e * epsilon_0)) -k = {'Ex':kx, 'Ez':kz, 'Jx':kx, 'Jz':kz} -cos = {'Ex': (0,1,1), 'Ez':(1,1,0),'Jx': (0,1,1), 'Jz':(1,1,0)} -cos_rho = {'rho': (1,1,1)} +k = {"Ex": kx, "Ez": kz, "Jx": kx, "Jz": kz} +cos = {"Ex": (0, 1, 1), "Ez": (1, 1, 0), "Jx": (0, 1, 1), "Jz": (1, 1, 0)} +cos_rho = {"rho": (1, 1, 1)} -def get_contribution( is_cos, k ): - du = (xmax-xmin)/Nx - u = xmin + du*( 0.5 + np.arange(Nx) ) + +def get_contribution(is_cos, k): + du = (xmax - xmin) / Nx + u = xmin + du * (0.5 + np.arange(Nx)) if is_cos == 1: - return( np.cos(k*u) ) + return np.cos(k * u) else: - return( np.sin(k*u) ) + return np.sin(k * u) + -def get_theoretical_field( field, t ): - amplitude = epsilon * (m_e*c**2*k[field])/e * np.sin(wp*t) +def get_theoretical_field(field, t): + amplitude = epsilon * (m_e * c**2 * k[field]) / e * np.sin(wp * t) cos_flag = cos[field] - x_contribution = get_contribution( cos_flag[0], kx ) - z_contribution = get_contribution( cos_flag[2], kz ) + x_contribution = get_contribution(cos_flag[0], kx) + z_contribution = get_contribution(cos_flag[2], kz) + + E = amplitude * x_contribution[:, np.newaxis] * z_contribution[np.newaxis, :] - E = amplitude * x_contribution[:, np.newaxis ] \ - * z_contribution[np.newaxis, :] + return E - return( E ) -def get_theoretical_J_field( field, t ): +def get_theoretical_J_field(field, t): # wpdt/2 accounts for the Yee halfstep offset of the current - dt = t / 40 # SPECIFIC to config parameters! - amplitude = - epsilon_0 * wp * epsilon * (m_e*c**2*k[field])/e * np.cos(wp*t-wp*dt/2) + dt = t / 40 # SPECIFIC to config parameters! + amplitude = ( + -epsilon_0 + * wp + * epsilon + * (m_e * c**2 * k[field]) + / e + * np.cos(wp * t - wp * dt / 2) + ) cos_flag = cos[field] - x_contribution = get_contribution( cos_flag[0], kx ) - z_contribution = get_contribution( cos_flag[2], kz ) + x_contribution = get_contribution(cos_flag[0], kx) + z_contribution = get_contribution(cos_flag[2], kz) - J = amplitude * x_contribution[:, np.newaxis] \ - * z_contribution[np.newaxis, :] + J = amplitude * x_contribution[:, np.newaxis] * z_contribution[np.newaxis, :] - return( J ) + return J -def get_theoretical_rho_field( field, t ): - amplitude = epsilon_0 * epsilon * (m_e*c**2*(kx*kx+kz*kz))/e * np.sin(wp*t) + +def get_theoretical_rho_field(field, t): + amplitude = ( + epsilon_0 * epsilon * (m_e * c**2 * (kx * kx + kz * kz)) / e * np.sin(wp * t) + ) cos_flag = cos_rho[field] - x_contribution = get_contribution( cos_flag[0], kx ) - z_contribution = get_contribution( cos_flag[2], kz ) + x_contribution = get_contribution(cos_flag[0], kx) + z_contribution = get_contribution(cos_flag[2], kz) + + rho = amplitude * x_contribution[:, np.newaxis] * z_contribution[np.newaxis, :] - rho = amplitude * x_contribution[:, np.newaxis] \ - * z_contribution[ np.newaxis, :] + return rho - return( rho ) # Read the file ds = yt.load(fn) t0 = ds.current_time.to_value() -data = ds.covering_grid(level = 0, left_edge = ds.domain_left_edge, dims = ds.domain_dimensions) -edge = np.array([(ds.domain_left_edge[1]).item(), (ds.domain_right_edge[1]).item(), \ - (ds.domain_left_edge[0]).item(), (ds.domain_right_edge[0]).item()]) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +edge = np.array( + [ + (ds.domain_left_edge[1]).item(), + (ds.domain_right_edge[1]).item(), + (ds.domain_left_edge[0]).item(), + (ds.domain_right_edge[0]).item(), + ] +) # Check the validity of the fields error_rel = 0 -for field in ['Ex', 'Ez']: - E_sim = data[('mesh',field)].to_ndarray()[:,:,0] +for field in ["Ex", "Ez"]: + E_sim = data[("mesh", field)].to_ndarray()[:, :, 0] E_th = get_theoretical_field(field, t0) - max_error = abs(E_sim-E_th).max()/abs(E_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(E_sim - E_th).max() / abs(E_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Check the validity of the currents -for field in ['Jx', 'Jz']: - J_sim = data[('mesh',field)].to_ndarray()[:,:,0] +for field in ["Jx", "Jz"]: + J_sim = data[("mesh", field)].to_ndarray()[:, :, 0] J_th = get_theoretical_J_field(field, t0) - max_error = abs(J_sim-J_th).max()/abs(J_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(J_sim - J_th).max() / abs(J_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Check the validity of the charge -for field in ['rho']: - rho_sim = data[('boxlib',field)].to_ndarray()[:,:,0] +for field in ["rho"]: + rho_sim = data[("boxlib", field)].to_ndarray()[:, :, 0] rho_th = get_theoretical_rho_field(field, t0) - max_error = abs(rho_sim-rho_th).max()/abs(rho_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(rho_sim - rho_th).max() / abs(rho_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Plot the last field from the loop (Ez at iteration 40) -fig, (ax1, ax2) = plt.subplots(1, 2, dpi = 100) +fig, (ax1, ax2) = plt.subplots(1, 2, dpi=100) # First plot vmin = E_sim.min() vmax = E_sim.max() -cax1 = make_axes_locatable(ax1).append_axes('right', size = '5%', pad = '5%') -im1 = ax1.imshow(E_sim, origin = 'lower', extent = edge, vmin = vmin, vmax = vmax) -cb1 = fig.colorbar(im1, cax = cax1) -ax1.set_xlabel(r'$z$') -ax1.set_ylabel(r'$x$') -ax1.set_title(r'$E_z$ (sim)') +cax1 = make_axes_locatable(ax1).append_axes("right", size="5%", pad="5%") +im1 = ax1.imshow(E_sim, origin="lower", extent=edge, vmin=vmin, vmax=vmax) +cb1 = fig.colorbar(im1, cax=cax1) +ax1.set_xlabel(r"$z$") +ax1.set_ylabel(r"$x$") +ax1.set_title(r"$E_z$ (sim)") # Second plot vmin = E_th.min() vmax = E_th.max() -cax2 = make_axes_locatable(ax2).append_axes('right', size = '5%', pad = '5%') -im2 = ax2.imshow(E_th, origin = 'lower', extent = edge, vmin = vmin, vmax = vmax) -cb2 = fig.colorbar(im2, cax = cax2) -ax2.set_xlabel(r'$z$') -ax2.set_ylabel(r'$x$') -ax2.set_title(r'$E_z$ (theory)') +cax2 = make_axes_locatable(ax2).append_axes("right", size="5%", pad="5%") +im2 = ax2.imshow(E_th, origin="lower", extent=edge, vmin=vmin, vmax=vmax) +cb2 = fig.colorbar(im2, cax=cax2) +ax2.set_xlabel(r"$z$") +ax2.set_ylabel(r"$x$") +ax2.set_title(r"$E_z$ (theory)") # Save figure fig.tight_layout() -fig.savefig('Langmuir_fluid_multi_2d_analysis.png', dpi = 200) +fig.savefig("Langmuir_fluid_multi_2d_analysis.png", dpi=200) tolerance_rel = 0.05 print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/langmuir_fluids/analysis_3d.py b/Examples/Tests/langmuir_fluids/analysis_3d.py index 686907f103a..321b528b6cb 100755 --- a/Examples/Tests/langmuir_fluids/analysis_3d.py +++ b/Examples/Tests/langmuir_fluids/analysis_3d.py @@ -26,7 +26,7 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file @@ -34,146 +34,188 @@ # Parameters (these parameters must match the parameters in `inputs.multi.rt`) epsilon = 0.01 -n = 4.e24 +n = 4.0e24 n_osc_x = 2 n_osc_y = 2 n_osc_z = 2 -lo = [-20.e-6, -20.e-6, -20.e-6] -hi = [ 20.e-6, 20.e-6, 20.e-6] +lo = [-20.0e-6, -20.0e-6, -20.0e-6] +hi = [20.0e-6, 20.0e-6, 20.0e-6] Ncell = [64, 64, 64] # Wave vector of the wave -kx = 2.*np.pi*n_osc_x/(hi[0]-lo[0]) -ky = 2.*np.pi*n_osc_y/(hi[1]-lo[1]) -kz = 2.*np.pi*n_osc_z/(hi[2]-lo[2]) +kx = 2.0 * np.pi * n_osc_x / (hi[0] - lo[0]) +ky = 2.0 * np.pi * n_osc_y / (hi[1] - lo[1]) +kz = 2.0 * np.pi * n_osc_z / (hi[2] - lo[2]) # Plasma frequency -wp = np.sqrt((n*e**2)/(m_e*epsilon_0)) - -k = {'Ex':kx, 'Ey':ky, 'Ez':kz, 'Jx':kx, 'Jy':ky, 'Jz':kz} -cos = {'Ex': (0,1,1), 'Ey':(1,0,1), 'Ez':(1,1,0),'Jx': (0,1,1), 'Jy':(1,0,1), 'Jz':(1,1,0)} -cos_rho = {'rho': (1,1,1)} - -def get_contribution( is_cos, k, idim ): - du = (hi[idim]-lo[idim])/Ncell[idim] - u = lo[idim] + du*( 0.5 + np.arange(Ncell[idim]) ) +wp = np.sqrt((n * e**2) / (m_e * epsilon_0)) + +k = {"Ex": kx, "Ey": ky, "Ez": kz, "Jx": kx, "Jy": ky, "Jz": kz} +cos = { + "Ex": (0, 1, 1), + "Ey": (1, 0, 1), + "Ez": (1, 1, 0), + "Jx": (0, 1, 1), + "Jy": (1, 0, 1), + "Jz": (1, 1, 0), +} +cos_rho = {"rho": (1, 1, 1)} + + +def get_contribution(is_cos, k, idim): + du = (hi[idim] - lo[idim]) / Ncell[idim] + u = lo[idim] + du * (0.5 + np.arange(Ncell[idim])) if is_cos[idim] == 1: - return( np.cos(k*u) ) + return np.cos(k * u) else: - return( np.sin(k*u) ) + return np.sin(k * u) -def get_theoretical_field( field, t ): - amplitude = epsilon * (m_e*c**2*k[field])/e * np.sin(wp*t) + +def get_theoretical_field(field, t): + amplitude = epsilon * (m_e * c**2 * k[field]) / e * np.sin(wp * t) cos_flag = cos[field] - x_contribution = get_contribution( cos_flag, kx, 0 ) - y_contribution = get_contribution( cos_flag, ky, 1 ) - z_contribution = get_contribution( cos_flag, kz, 2 ) + x_contribution = get_contribution(cos_flag, kx, 0) + y_contribution = get_contribution(cos_flag, ky, 1) + z_contribution = get_contribution(cos_flag, kz, 2) + + E = ( + amplitude + * x_contribution[:, np.newaxis, np.newaxis] + * y_contribution[np.newaxis, :, np.newaxis] + * z_contribution[np.newaxis, np.newaxis, :] + ) - E = amplitude * x_contribution[:, np.newaxis, np.newaxis] \ - * y_contribution[np.newaxis, :, np.newaxis] \ - * z_contribution[np.newaxis, np.newaxis, :] + return E - return( E ) -def get_theoretical_J_field( field, t ): +def get_theoretical_J_field(field, t): # wpdt/2 accounts for the Yee halfstep offset of the current - dt = t / 40 # SPECIFIC to config parameters! - amplitude = - epsilon_0 * wp * epsilon * (m_e*c**2*k[field])/e * np.cos(wp*t-wp*dt/2) + dt = t / 40 # SPECIFIC to config parameters! + amplitude = ( + -epsilon_0 + * wp + * epsilon + * (m_e * c**2 * k[field]) + / e + * np.cos(wp * t - wp * dt / 2) + ) cos_flag = cos[field] - x_contribution = get_contribution( cos_flag, kx, 0 ) - y_contribution = get_contribution( cos_flag, ky, 1 ) - z_contribution = get_contribution( cos_flag, kz, 2 ) - - J = amplitude * x_contribution[:, np.newaxis, np.newaxis] \ - * y_contribution[np.newaxis, :, np.newaxis] \ - * z_contribution[np.newaxis, np.newaxis, :] - - return( J ) - -def get_theoretical_rho_field( field, t ): - amplitude = epsilon_0 * epsilon * (m_e*c**2*(kx*kx+ky*ky+kz*kz))/e * np.sin(wp*t) + x_contribution = get_contribution(cos_flag, kx, 0) + y_contribution = get_contribution(cos_flag, ky, 1) + z_contribution = get_contribution(cos_flag, kz, 2) + + J = ( + amplitude + * x_contribution[:, np.newaxis, np.newaxis] + * y_contribution[np.newaxis, :, np.newaxis] + * z_contribution[np.newaxis, np.newaxis, :] + ) + + return J + + +def get_theoretical_rho_field(field, t): + amplitude = ( + epsilon_0 + * epsilon + * (m_e * c**2 * (kx * kx + ky * ky + kz * kz)) + / e + * np.sin(wp * t) + ) cos_flag = cos_rho[field] - x_contribution = get_contribution( cos_flag, kx, 0 ) - y_contribution = get_contribution( cos_flag, ky, 1 ) - z_contribution = get_contribution( cos_flag, kz, 2 ) + x_contribution = get_contribution(cos_flag, kx, 0) + y_contribution = get_contribution(cos_flag, ky, 1) + z_contribution = get_contribution(cos_flag, kz, 2) + + rho = ( + amplitude + * x_contribution[:, np.newaxis, np.newaxis] + * y_contribution[np.newaxis, :, np.newaxis] + * z_contribution[np.newaxis, np.newaxis, :] + ) - rho = amplitude * x_contribution[:, np.newaxis, np.newaxis] \ - * y_contribution[np.newaxis, :, np.newaxis] \ - * z_contribution[np.newaxis, np.newaxis, :] + return rho - return( rho ) # Read the file ds = yt.load(fn) t0 = ds.current_time.to_value() -data = ds.covering_grid(level = 0, left_edge = ds.domain_left_edge, dims = ds.domain_dimensions) -edge = np.array([(ds.domain_left_edge[2]).item(), (ds.domain_right_edge[2]).item(), \ - (ds.domain_left_edge[0]).item(), (ds.domain_right_edge[0]).item()]) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +edge = np.array( + [ + (ds.domain_left_edge[2]).item(), + (ds.domain_right_edge[2]).item(), + (ds.domain_left_edge[0]).item(), + (ds.domain_right_edge[0]).item(), + ] +) # Check the validity of the fields error_rel = 0 -for field in ['Ex', 'Ey', 'Ez']: - E_sim = data[('mesh',field)].to_ndarray() +for field in ["Ex", "Ey", "Ez"]: + E_sim = data[("mesh", field)].to_ndarray() E_th = get_theoretical_field(field, t0) - max_error = abs(E_sim-E_th).max()/abs(E_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(E_sim - E_th).max() / abs(E_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Check the validity of the currents -for field in ['Jx', 'Jy', 'Jz']: - J_sim = data[('mesh',field)].to_ndarray() +for field in ["Jx", "Jy", "Jz"]: + J_sim = data[("mesh", field)].to_ndarray() J_th = get_theoretical_J_field(field, t0) - max_error = abs(J_sim-J_th).max()/abs(J_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(J_sim - J_th).max() / abs(J_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Check the validity of the charge -for field in ['rho']: - rho_sim = data[('boxlib',field)].to_ndarray() +for field in ["rho"]: + rho_sim = data[("boxlib", field)].to_ndarray() rho_th = get_theoretical_rho_field(field, t0) - max_error = abs(rho_sim-rho_th).max()/abs(rho_th).max() - print('%s: Max error: %.2e' %(field,max_error)) - error_rel = max( error_rel, max_error ) + max_error = abs(rho_sim - rho_th).max() / abs(rho_th).max() + print("%s: Max error: %.2e" % (field, max_error)) + error_rel = max(error_rel, max_error) # Plot the last field from the loop (Ez at iteration 40) -fig, (ax1, ax2) = plt.subplots(1, 2, dpi = 100) +fig, (ax1, ax2) = plt.subplots(1, 2, dpi=100) # First plot (slice at y=0) -E_plot = E_sim[:,Ncell[1]//2+1,:] +E_plot = E_sim[:, Ncell[1] // 2 + 1, :] vmin = E_plot.min() vmax = E_plot.max() -cax1 = make_axes_locatable(ax1).append_axes('right', size = '5%', pad = '5%') -im1 = ax1.imshow(E_plot, origin = 'lower', extent = edge, vmin = vmin, vmax = vmax) -cb1 = fig.colorbar(im1, cax = cax1) -ax1.set_xlabel(r'$z$') -ax1.set_ylabel(r'$x$') -ax1.set_title(r'$E_z$ (sim)') +cax1 = make_axes_locatable(ax1).append_axes("right", size="5%", pad="5%") +im1 = ax1.imshow(E_plot, origin="lower", extent=edge, vmin=vmin, vmax=vmax) +cb1 = fig.colorbar(im1, cax=cax1) +ax1.set_xlabel(r"$z$") +ax1.set_ylabel(r"$x$") +ax1.set_title(r"$E_z$ (sim)") # Second plot (slice at y=0) -E_plot = E_th[:,Ncell[1]//2+1,:] +E_plot = E_th[:, Ncell[1] // 2 + 1, :] vmin = E_plot.min() vmax = E_plot.max() -cax2 = make_axes_locatable(ax2).append_axes('right', size = '5%', pad = '5%') -im2 = ax2.imshow(E_plot, origin = 'lower', extent = edge, vmin = vmin, vmax = vmax) -cb2 = fig.colorbar(im2, cax = cax2) -ax2.set_xlabel(r'$z$') -ax2.set_ylabel(r'$x$') -ax2.set_title(r'$E_z$ (theory)') +cax2 = make_axes_locatable(ax2).append_axes("right", size="5%", pad="5%") +im2 = ax2.imshow(E_plot, origin="lower", extent=edge, vmin=vmin, vmax=vmax) +cb2 = fig.colorbar(im2, cax=cax2) +ax2.set_xlabel(r"$z$") +ax2.set_ylabel(r"$x$") +ax2.set_title(r"$E_z$ (theory)") # Save figure fig.tight_layout() -fig.savefig('Langmuir_fluid_multi_analysis.png', dpi = 200) +fig.savefig("Langmuir_fluid_multi_analysis.png", dpi=200) tolerance_rel = 5e-2 print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel test_name = os.path.split(os.getcwd())[1] -if re.search( 'single_precision', fn ): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3) +if re.search("single_precision", fn): + checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) else: checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/langmuir_fluids/analysis_rz.py b/Examples/Tests/langmuir_fluids/analysis_rz.py index 108d054e75a..f629ddc6626 100755 --- a/Examples/Tests/langmuir_fluids/analysis_rz.py +++ b/Examples/Tests/langmuir_fluids/analysis_rz.py @@ -19,7 +19,7 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import yt @@ -28,7 +28,7 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file @@ -37,130 +37,185 @@ test_name = os.path.split(os.getcwd())[1] # Parse test name and check if current correction (psatd.current_correction) is applied -current_correction = True if re.search('current_correction', fn) else False +current_correction = True if re.search("current_correction", fn) else False # Parameters (these parameters must match the parameters in `inputs.multi.rz.rt`) epsilon = 0.01 -n = 2.e24 -w0 = 5.e-6 +n = 2.0e24 +w0 = 5.0e-6 n_osc_z = 2 -rmin = 0e-6; rmax = 20.e-6; Nr = 64 -zmin = -20e-6; zmax = 20.e-6; Nz = 128 +rmin = 0e-6 +rmax = 20.0e-6 +Nr = 64 +zmin = -20e-6 +zmax = 20.0e-6 +Nz = 128 # Wave vector of the wave -k0 = 2.*np.pi*n_osc_z/(zmax-zmin) +k0 = 2.0 * np.pi * n_osc_z / (zmax - zmin) # Plasma frequency -wp = np.sqrt((n*e**2)/(m_e*epsilon_0)) -kp = wp/c +wp = np.sqrt((n * e**2) / (m_e * epsilon_0)) +kp = wp / c -def Er( z, r, epsilon, k0, w0, wp, t) : + +def Er(z, r, epsilon, k0, w0, wp, t): """ Return the radial electric field as an array of the same length as z and r, in the half-plane theta=0 """ - Er_array = \ - epsilon * m_e*c**2/e * 2*r/w0**2 * \ - np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t ) - return( Er_array ) - -def Ez( z, r, epsilon, k0, w0, wp, t) : + Er_array = ( + epsilon + * m_e + * c**2 + / e + * 2 + * r + / w0**2 + * np.exp(-(r**2) / w0**2) + * np.sin(k0 * z) + * np.sin(wp * t) + ) + return Er_array + + +def Ez(z, r, epsilon, k0, w0, wp, t): """ Return the longitudinal electric field as an array of the same length as z and r, in the half-plane theta=0 """ - Ez_array = \ - - epsilon * m_e*c**2/e * k0 * \ - np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * np.sin( wp*t ) - return( Ez_array ) - -def Jr( z, r, epsilon, k0, w0, wp, t) : + Ez_array = ( + -epsilon + * m_e + * c**2 + / e + * k0 + * np.exp(-(r**2) / w0**2) + * np.cos(k0 * z) + * np.sin(wp * t) + ) + return Ez_array + + +def Jr(z, r, epsilon, k0, w0, wp, t): """ Return the radial current density as an array of the same length as z and r, in the half-plane theta=0 """ - dt = t / 80 # SPECIFIC to config parameters! - Jr_array = \ - - epsilon_0 * epsilon * m_e*c**2/e * 2*r/w0**2 * \ - np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.cos( wp*t -wp*dt/2) * wp #phase_error = wp*dt/2 - return( Jr_array ) - -def Jz( z, r, epsilon, k0, w0, wp, t) : + dt = t / 80 # SPECIFIC to config parameters! + Jr_array = ( + -epsilon_0 + * epsilon + * m_e + * c**2 + / e + * 2 + * r + / w0**2 + * np.exp(-(r**2) / w0**2) + * np.sin(k0 * z) + * np.cos(wp * t - wp * dt / 2) + * wp + ) # phase_error = wp*dt/2 + return Jr_array + + +def Jz(z, r, epsilon, k0, w0, wp, t): """ Return the longitudinal current density as an array of the same length as z and r, in the half-plane theta=0 """ - dt = t / 80 # SPECIFIC to config parameters! - Jz_array = \ - epsilon_0 * epsilon * m_e*c**2/e * k0 * \ - np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * np.cos( wp*t -wp*dt/2) * wp #phase_error = wp*dt/2 - return( Jz_array ) - -def rho( z, r, epsilon, k0, w0, wp, t) : + dt = t / 80 # SPECIFIC to config parameters! + Jz_array = ( + epsilon_0 + * epsilon + * m_e + * c**2 + / e + * k0 + * np.exp(-(r**2) / w0**2) + * np.cos(k0 * z) + * np.cos(wp * t - wp * dt / 2) + * wp + ) # phase_error = wp*dt/2 + return Jz_array + + +def rho(z, r, epsilon, k0, w0, wp, t): """ Return the charge density as an array of the same length as z and r, in the half-plane theta=0 """ - rho_array = \ - epsilon_0 * epsilon * m_e*c**2/e * np.sin( wp*t ) * np.sin( k0*z ) * np.exp( -r**2/w0**2 ) * \ - ((4.0/(w0**2))*(1 - (r**2)/(w0**2)) + k0**2) - return( rho_array ) + rho_array = ( + epsilon_0 + * epsilon + * m_e + * c**2 + / e + * np.sin(wp * t) + * np.sin(k0 * z) + * np.exp(-(r**2) / w0**2) + * ((4.0 / (w0**2)) * (1 - (r**2) / (w0**2)) + k0**2) + ) + return rho_array + # Read the file ds = yt.load(fn) t0 = ds.current_time.to_value() -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, - dims=ds.domain_dimensions) +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) # Get cell centered coordinates -dr = (rmax - rmin)/Nr -dz = (zmax - zmin)/Nz -coords = np.indices([Nr, Nz],'d') -rr = rmin + (coords[0] + 0.5)*dr -zz = zmin + (coords[1] + 0.5)*dz +dr = (rmax - rmin) / Nr +dz = (zmax - zmin) / Nz +coords = np.indices([Nr, Nz], "d") +rr = rmin + (coords[0] + 0.5) * dr +zz = zmin + (coords[1] + 0.5) * dz # Check the validity of the fields overall_max_error = 0 -Er_sim = data[('boxlib','Er')].to_ndarray()[:,:,0] +Er_sim = data[("boxlib", "Er")].to_ndarray()[:, :, 0] Er_th = Er(zz, rr, epsilon, k0, w0, wp, t0) -max_error = abs(Er_sim-Er_th).max()/abs(Er_th).max() -print('Er: Max error: %.2e' %(max_error)) -overall_max_error = max( overall_max_error, max_error ) +max_error = abs(Er_sim - Er_th).max() / abs(Er_th).max() +print("Er: Max error: %.2e" % (max_error)) +overall_max_error = max(overall_max_error, max_error) -Ez_sim = data[('boxlib','Ez')].to_ndarray()[:,:,0] +Ez_sim = data[("boxlib", "Ez")].to_ndarray()[:, :, 0] Ez_th = Ez(zz, rr, epsilon, k0, w0, wp, t0) -max_error = abs(Ez_sim-Ez_th).max()/abs(Ez_th).max() -print('Ez: Max error: %.2e' %(max_error)) -overall_max_error = max( overall_max_error, max_error ) +max_error = abs(Ez_sim - Ez_th).max() / abs(Ez_th).max() +print("Ez: Max error: %.2e" % (max_error)) +overall_max_error = max(overall_max_error, max_error) -Jr_sim = data[('boxlib','jr')].to_ndarray()[:,:,0] +Jr_sim = data[("boxlib", "jr")].to_ndarray()[:, :, 0] Jr_th = Jr(zz, rr, epsilon, k0, w0, wp, t0) -max_error = abs(Jr_sim-Jr_th).max()/abs(Jr_th).max() -print('Jr: Max error: %.2e' %(max_error)) -overall_max_error = max( overall_max_error, max_error ) +max_error = abs(Jr_sim - Jr_th).max() / abs(Jr_th).max() +print("Jr: Max error: %.2e" % (max_error)) +overall_max_error = max(overall_max_error, max_error) -Jz_sim = data[('boxlib','jz')].to_ndarray()[:,:,0] +Jz_sim = data[("boxlib", "jz")].to_ndarray()[:, :, 0] Jz_th = Jz(zz, rr, epsilon, k0, w0, wp, t0) -max_error = abs(Jz_sim-Jz_th).max()/abs(Jz_th).max() -print('Jz: Max error: %.2e' %(max_error)) -overall_max_error = max( overall_max_error, max_error ) +max_error = abs(Jz_sim - Jz_th).max() / abs(Jz_th).max() +print("Jz: Max error: %.2e" % (max_error)) +overall_max_error = max(overall_max_error, max_error) -rho_sim = data[('boxlib','rho')].to_ndarray()[:,:,0] +rho_sim = data[("boxlib", "rho")].to_ndarray()[:, :, 0] rho_th = rho(zz, rr, epsilon, k0, w0, wp, t0) -max_error = abs(rho_sim-rho_th).max()/abs(rho_th).max() -print('rho: Max error: %.2e' %(max_error)) -overall_max_error = max( overall_max_error, max_error ) +max_error = abs(rho_sim - rho_th).max() / abs(rho_th).max() +print("rho: Max error: %.2e" % (max_error)) +overall_max_error = max(overall_max_error, max_error) # Plot the last field from the loop (Ez at iteration 40) -plt.subplot2grid( (1,2), (0,0) ) -plt.imshow( Ez_sim ) +plt.subplot2grid((1, 2), (0, 0)) +plt.imshow(Ez_sim) plt.colorbar() -plt.title('Ez, last iteration\n(simulation)') -plt.subplot2grid( (1,2), (0,1) ) -plt.imshow( Ez_th ) +plt.title("Ez, last iteration\n(simulation)") +plt.subplot2grid((1, 2), (0, 1)) +plt.imshow(Ez_th) plt.colorbar() -plt.title('Ez, last iteration\n(theory)') +plt.title("Ez, last iteration\n(theory)") plt.tight_layout() -plt.savefig(test_name+'_analysis.png') +plt.savefig(test_name + "_analysis.png") error_rel = overall_max_error @@ -169,6 +224,6 @@ def rho( z, r, epsilon, k0, w0, wp, t) : print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/laser_injection/analysis_1d.py b/Examples/Tests/laser_injection/analysis_1d.py index 5aca707ff3a..9215125427d 100755 --- a/Examples/Tests/laser_injection/analysis_1d.py +++ b/Examples/Tests/laser_injection/analysis_1d.py @@ -18,12 +18,12 @@ import matplotlib import yt -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np from scipy.signal import hilbert -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Maximum acceptable error for this test @@ -33,102 +33,114 @@ small_num = 1.0e-8 # Physical parameters -um = 1.e-6 -fs = 1.e-15 +um = 1.0e-6 +fs = 1.0e-15 c = 299792458 # Parameters of the gaussian beam -wavelength = 1.*um -w0 = 5.*um -tt = 10.*fs -t_c = 24.*fs +wavelength = 1.0 * um +w0 = 5.0 * um +tt = 10.0 * fs +t_c = 24.0 * fs E_max = 4e12 # laser direction -dir_vector = np.array([0,0,1.0]) +dir_vector = np.array([0, 0, 1.0]) dir_vector /= np.linalg.norm(dir_vector) # polarization vector -pol_vector = np.array([1.0,1.0,0.0]) +pol_vector = np.array([1.0, 1.0, 0.0]) pol_vector /= np.linalg.norm(pol_vector) + # Calculates the envelope of a Gaussian beam -def gauss_env(T,Z): - '''Function to compute the theory for the envelope - ''' - inv_tau2 = 1./tt/tt - exp_arg = - inv_tau2 / c/c * (Z-T*c)*(Z-T*c) +def gauss_env(T, Z): + """Function to compute the theory for the envelope""" + inv_tau2 = 1.0 / tt / tt + exp_arg = -inv_tau2 / c / c * (Z - T * c) * (Z - T * c) return E_max * np.real(np.exp(exp_arg)) + # Checks envelope and central frequency for a given laser component -def check_component(data, component, t_env_theory, coeff,Z,dz): +def check_component(data, component, t_env_theory, coeff, Z, dz): print("*** Checking " + component + " ***") - field = data['boxlib', component].v.squeeze() + field = data["boxlib", component].v.squeeze() env = abs(hilbert(field)) - env_theory = t_env_theory*np.abs(coeff) + env_theory = t_env_theory * np.abs(coeff) # Plot results - fig = plt.figure(figsize=(12,6)) + fig = plt.figure(figsize=(12, 6)) ax1 = fig.add_subplot(221) - ax1.set_title('PIC field') - ax1.plot(Z,field) + ax1.set_title("PIC field") + ax1.plot(Z, field) ax2 = fig.add_subplot(222) - ax2.set_title('PIC envelope') - ax2.plot(Z,env) + ax2.set_title("PIC envelope") + ax2.plot(Z, env) ax3 = fig.add_subplot(223) - ax3.set_title('Theory envelope') - ax3.plot(Z,env_theory, label="theory") - ax3.plot(Z,env, label="simulation") + ax3.set_title("Theory envelope") + ax3.plot(Z, env_theory, label="theory") + ax3.plot(Z, env, label="simulation") ax3.legend(loc="upper right") ax4 = fig.add_subplot(224) - ax4.set_title('Difference') - ax4.plot(Z,env-env_theory) + ax4.set_title("Difference") + ax4.plot(Z, env - env_theory) plt.tight_layout() - plt.savefig("plt_" + component + ".png", bbox_inches='tight') + plt.savefig("plt_" + component + ".png", bbox_inches="tight") - if(np.abs(coeff) < small_num): + if np.abs(coeff) < small_num: is_field_zero = np.sum(np.abs(env)) < small_num - if is_field_zero : + if is_field_zero: print("[OK] Field component expected to be 0 is ~ 0") - else : + else: print("[FAIL] Field component expected to be 0 is NOT ~ 0") - assert(is_field_zero) + assert is_field_zero print("******\n") return fft_field = np.fft.fft(field) - freq_cols = np.fft.fftfreq(fft_field.shape[0],dz/c) + freq_cols = np.fft.fftfreq(fft_field.shape[0], dz / c) pos_max = np.unravel_index(np.abs(fft_field).argmax(), fft_field.shape) freq = np.abs(freq_cols[pos_max[0]]) - exp_freq = c/wavelength + exp_freq = c / wavelength - relative_error_freq = np.abs(freq-exp_freq)/exp_freq + relative_error_freq = np.abs(freq - exp_freq) / exp_freq is_freq_ok = relative_error_freq < relative_error_threshold - if is_freq_ok : - print("[OK] Relative error frequency: {:6.3f} %".format(relative_error_freq*100)) - else : - print("[FAIL] Relative error frequency: {:6.3f} %".format(relative_error_freq*100)) - assert(is_freq_ok) + if is_freq_ok: + print( + "[OK] Relative error frequency: {:6.3f} %".format(relative_error_freq * 100) + ) + else: + print( + "[FAIL] Relative error frequency: {:6.3f} %".format( + relative_error_freq * 100 + ) + ) + assert is_freq_ok print("******\n") - relative_error_env = np.sum(np.abs(env-env_theory)) / np.sum(np.abs(env_theory)) + relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env_theory)) is_env_ok = relative_error_env < relative_error_threshold - if is_env_ok : - print("[OK] Relative error envelope: {:6.3f} %".format(relative_error_env*100)) - else : - print("[FAIL] Relative error envelope: {:6.3f} %".format(relative_error_env*100)) - assert(is_env_ok) + if is_env_ok: + print( + "[OK] Relative error envelope: {:6.3f} %".format(relative_error_env * 100) + ) + else: + print( + "[FAIL] Relative error envelope: {:6.3f} %".format(relative_error_env * 100) + ) + assert is_env_ok + def check_laser(filename): ds = yt.load(filename) @@ -136,20 +148,26 @@ def check_laser(filename): # yt 4.0+ has rounding issues with our domain data: # RuntimeError: yt attempted to read outside the boundaries # of a non-periodic domain along dimension 0. - if 'force_periodicity' in dir(ds): ds.force_periodicity() + if "force_periodicity" in dir(ds): + ds.force_periodicity() z = np.linspace( - ds.domain_left_edge[0].v, - ds.domain_right_edge[0].v, - ds.domain_dimensions[0]) + ds.domain_left_edge[0].v, ds.domain_right_edge[0].v, ds.domain_dimensions[0] + ) - dz = (ds.domain_right_edge[0].v-ds.domain_left_edge[0].v)/(ds.domain_dimensions[0]-1) + dz = (ds.domain_right_edge[0].v - ds.domain_left_edge[0].v) / ( + ds.domain_dimensions[0] - 1 + ) # Compute the theory for envelope - env_theory = gauss_env(+t_c-ds.current_time.to_value(),z)+gauss_env(-t_c+ds.current_time.to_value(),z) + env_theory = gauss_env(+t_c - ds.current_time.to_value(), z) + gauss_env( + -t_c + ds.current_time.to_value(), z + ) # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) + all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) b_vector = np.cross(dir_vector, pol_vector) @@ -160,12 +178,14 @@ def check_laser(filename): pol_vector[2], b_vector[0], b_vector[1], - b_vector[2]] + b_vector[2], + ] - field_facts = [1, 1, 1, 1/c, 1/c, 1/c] + field_facts = [1, 1, 1, 1 / c, 1 / c, 1 / c] for comp, coeff, field_fact in zip(components, coeffs, field_facts): - check_component(all_data_level_0, comp, field_fact*env_theory, coeff, z, dz) + check_component(all_data_level_0, comp, field_fact * env_theory, coeff, z, dz) + def main(): filename_end = sys.argv[1] @@ -175,5 +195,6 @@ def main(): test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) + if __name__ == "__main__": main() diff --git a/Examples/Tests/laser_injection/analysis_2d.py b/Examples/Tests/laser_injection/analysis_2d.py index 4424fe134bc..c6548e8be1d 100755 --- a/Examples/Tests/laser_injection/analysis_2d.py +++ b/Examples/Tests/laser_injection/analysis_2d.py @@ -22,14 +22,14 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np import yt from mpl_toolkits.axes_grid1 import make_axes_locatable from scipy.signal import hilbert -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Maximum acceptable error for this test @@ -39,147 +39,166 @@ small_num = 1.0e-8 # Physical parameters -um = 1.e-6 -fs = 1.e-15 +um = 1.0e-6 +fs = 1.0e-15 c = 299792458 # Parameters of the gaussian beam -wavelength = 1.*um -w0 = 5.*um -tt = 10.*fs -x_c = 10.*um -t_c = 24.*fs +wavelength = 1.0 * um +w0 = 5.0 * um +tt = 10.0 * fs +x_c = 10.0 * um +t_c = 24.0 * fs # foc_dist = 13.109*um (not actually used) E_max = 4e12 # laser direction -dir_vector = np.array([2.,0,1.0]) +dir_vector = np.array([2.0, 0, 1.0]) dir_vector /= np.linalg.norm(dir_vector) -rot_angle = np.arctan(dir_vector[2]/dir_vector[0]) +rot_angle = np.arctan(dir_vector[2] / dir_vector[0]) # polarization vector -pol_vector = np.array([1.0,1.0,-2.0]) +pol_vector = np.array([1.0, 1.0, -2.0]) pol_vector /= np.linalg.norm(pol_vector) + # Calculates the envelope of a Gaussian beam -def gauss_env(T,XX,ZZ): - '''Function to compute the theory for the envelope - ''' +def gauss_env(T, XX, ZZ): + """Function to compute the theory for the envelope""" - Z = np.cos(rot_angle)*(XX-x_c) + np.sin(rot_angle)*ZZ - X = -np.sin(rot_angle)*(XX-x_c) + np.cos(rot_angle)*ZZ + Z = np.cos(rot_angle) * (XX - x_c) + np.sin(rot_angle) * ZZ + X = -np.sin(rot_angle) * (XX - x_c) + np.cos(rot_angle) * ZZ - inv_tau2 = 1./tt/tt - inv_w_2 = 1.0/(w0*w0) - exp_arg = - (X*X)*inv_w_2 - inv_tau2 / c/c * (Z-T*c)*(Z-T*c) + inv_tau2 = 1.0 / tt / tt + inv_w_2 = 1.0 / (w0 * w0) + exp_arg = -(X * X) * inv_w_2 - inv_tau2 / c / c * (Z - T * c) * (Z - T * c) return E_max * np.real(np.exp(exp_arg)) + # Checks envelope and central frequency for a given laser component -def check_component(data, component, t_env_theory, coeff, X,Z,dx,dz): +def check_component(data, component, t_env_theory, coeff, X, Z, dx, dz): print("*** Checking " + component + " ***") - field = data['boxlib', component].v.squeeze() + field = data["boxlib", component].v.squeeze() env = abs(hilbert(field)) - env_theory = t_env_theory*np.abs(coeff) + env_theory = t_env_theory * np.abs(coeff) # Plot results - fig = plt.figure(figsize=(12,6)) - - ax1 = fig.add_subplot(221, aspect='equal') - ax1.set_title('PIC field') - p1 = ax1.pcolormesh(X,Z,field) - cax1 = make_axes_locatable(ax1).append_axes('right', size='5%', pad=0.05) - fig.colorbar(p1, cax=cax1, orientation='vertical') - - ax2 = fig.add_subplot(222, aspect='equal') - ax2.set_title('PIC envelope') - p2 = ax2.pcolormesh(X,Z,env) - cax2 = make_axes_locatable(ax2).append_axes('right', size='5%', pad=0.05) - fig.colorbar(p2, cax=cax2, orientation='vertical') - - ax3 = fig.add_subplot(223, aspect='equal') - ax3.set_title('Theory envelope') - p3 = ax3.pcolormesh(X,Z,env_theory) - cax3 = make_axes_locatable(ax3).append_axes('right', size='5%', pad=0.05) - fig.colorbar(p3, cax=cax3, orientation='vertical') - - ax4 = fig.add_subplot(224, aspect='equal') - ax4.set_title('Difference') - p4 = ax4.pcolormesh(X,Z,env-env_theory) - cax4 = make_axes_locatable(ax4).append_axes('right', size='5%', pad=0.05) - fig.colorbar(p4, cax=cax4, orientation='vertical') + fig = plt.figure(figsize=(12, 6)) + + ax1 = fig.add_subplot(221, aspect="equal") + ax1.set_title("PIC field") + p1 = ax1.pcolormesh(X, Z, field) + cax1 = make_axes_locatable(ax1).append_axes("right", size="5%", pad=0.05) + fig.colorbar(p1, cax=cax1, orientation="vertical") + + ax2 = fig.add_subplot(222, aspect="equal") + ax2.set_title("PIC envelope") + p2 = ax2.pcolormesh(X, Z, env) + cax2 = make_axes_locatable(ax2).append_axes("right", size="5%", pad=0.05) + fig.colorbar(p2, cax=cax2, orientation="vertical") + + ax3 = fig.add_subplot(223, aspect="equal") + ax3.set_title("Theory envelope") + p3 = ax3.pcolormesh(X, Z, env_theory) + cax3 = make_axes_locatable(ax3).append_axes("right", size="5%", pad=0.05) + fig.colorbar(p3, cax=cax3, orientation="vertical") + + ax4 = fig.add_subplot(224, aspect="equal") + ax4.set_title("Difference") + p4 = ax4.pcolormesh(X, Z, env - env_theory) + cax4 = make_axes_locatable(ax4).append_axes("right", size="5%", pad=0.05) + fig.colorbar(p4, cax=cax4, orientation="vertical") plt.tight_layout() - plt.savefig("plt_" + component + ".png", bbox_inches='tight') + plt.savefig("plt_" + component + ".png", bbox_inches="tight") - if(np.abs(coeff) < small_num): + if np.abs(coeff) < small_num: is_field_zero = np.sum(np.abs(env)) < small_num - if is_field_zero : + if is_field_zero: print("[OK] Field component expected to be 0 is ~ 0") - else : + else: print("[FAIL] Field component expected to be 0 is NOT ~ 0") - assert(is_field_zero) + assert is_field_zero print("******\n") return - relative_error_env = np.sum(np.abs(env-env_theory)) / np.sum(np.abs(env_theory)) + relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env_theory)) is_env_ok = relative_error_env < relative_error_threshold - if is_env_ok : - print("[OK] Relative error envelope: {:6.3f} %".format(relative_error_env*100)) - else : - print("[FAIL] Relative error envelope: {:6.3f} %".format(relative_error_env*100)) - assert(is_env_ok) + if is_env_ok: + print( + "[OK] Relative error envelope: {:6.3f} %".format(relative_error_env * 100) + ) + else: + print( + "[FAIL] Relative error envelope: {:6.3f} %".format(relative_error_env * 100) + ) + assert is_env_ok fft_field = np.fft.fft2(field) - freq_rows = np.fft.fftfreq(fft_field.shape[0],dx/c) - freq_cols = np.fft.fftfreq(fft_field.shape[1],dz/c) + freq_rows = np.fft.fftfreq(fft_field.shape[0], dx / c) + freq_cols = np.fft.fftfreq(fft_field.shape[1], dz / c) pos_max = np.unravel_index(np.abs(fft_field).argmax(), fft_field.shape) - freq = np.sqrt((freq_rows[pos_max[0]])**2 + (freq_cols[pos_max[1]]**2)) - exp_freq = c/wavelength + freq = np.sqrt((freq_rows[pos_max[0]]) ** 2 + (freq_cols[pos_max[1]] ** 2)) + exp_freq = c / wavelength - relative_error_freq = np.abs(freq-exp_freq)/exp_freq + relative_error_freq = np.abs(freq - exp_freq) / exp_freq is_freq_ok = relative_error_freq < relative_error_threshold - if is_freq_ok : - print("[OK] Relative error frequency: {:6.3f} %".format(relative_error_freq*100)) - else : - print("[FAIL] Relative error frequency: {:6.3f} %".format(relative_error_freq*100)) - assert(is_freq_ok) + if is_freq_ok: + print( + "[OK] Relative error frequency: {:6.3f} %".format(relative_error_freq * 100) + ) + else: + print( + "[FAIL] Relative error frequency: {:6.3f} %".format( + relative_error_freq * 100 + ) + ) + assert is_freq_ok print("******\n") + def check_laser(filename): ds = yt.load(filename) # yt 4.0+ has rounding issues with our domain data: # RuntimeError: yt attempted to read outside the boundaries # of a non-periodic domain along dimension 0. - if 'force_periodicity' in dir(ds): ds.force_periodicity() + if "force_periodicity" in dir(ds): + ds.force_periodicity() x = np.linspace( - ds.domain_left_edge[0].v, - ds.domain_right_edge[0].v, - ds.domain_dimensions[0]) + ds.domain_left_edge[0].v, ds.domain_right_edge[0].v, ds.domain_dimensions[0] + ) - dx = (ds.domain_right_edge[0].v-ds.domain_left_edge[0].v)/(ds.domain_dimensions[0]-1) + dx = (ds.domain_right_edge[0].v - ds.domain_left_edge[0].v) / ( + ds.domain_dimensions[0] - 1 + ) z = np.linspace( - ds.domain_left_edge[1].v, - ds.domain_right_edge[1].v, - ds.domain_dimensions[1]) + ds.domain_left_edge[1].v, ds.domain_right_edge[1].v, ds.domain_dimensions[1] + ) - dz = (ds.domain_right_edge[1].v-ds.domain_left_edge[1].v)/(ds.domain_dimensions[1]-1) + dz = (ds.domain_right_edge[1].v - ds.domain_left_edge[1].v) / ( + ds.domain_dimensions[1] - 1 + ) - X, Z = np.meshgrid(x, z, indexing='ij') + X, Z = np.meshgrid(x, z, indexing="ij") # Compute the theory for envelope - env_theory = gauss_env(+t_c-ds.current_time.to_value(),X,Z)+gauss_env(-t_c+ds.current_time.to_value(),X,Z) + env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Z) + gauss_env( + -t_c + ds.current_time.to_value(), X, Z + ) # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) + all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) b_vector = np.cross(dir_vector, pol_vector) @@ -190,12 +209,16 @@ def check_laser(filename): pol_vector[2], b_vector[0], b_vector[1], - b_vector[2]] + b_vector[2], + ] - field_facts = [1, 1, 1, 1/c, 1/c, 1/c] + field_facts = [1, 1, 1, 1 / c, 1 / c, 1 / c] for comp, coeff, field_fact in zip(components, coeffs, field_facts): - check_component(all_data_level_0, comp, field_fact*env_theory, coeff, X, Z, dx, dz) + check_component( + all_data_level_0, comp, field_fact * env_theory, coeff, X, Z, dx, dz + ) + def main(): filename_end = sys.argv[1] @@ -205,5 +228,6 @@ def main(): test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) + if __name__ == "__main__": main() diff --git a/Examples/Tests/laser_injection/analysis_laser.py b/Examples/Tests/laser_injection/analysis_laser.py index 8dde8d6e96d..bf2a03e342c 100755 --- a/Examples/Tests/laser_injection/analysis_laser.py +++ b/Examples/Tests/laser_injection/analysis_laser.py @@ -13,11 +13,11 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file @@ -25,7 +25,7 @@ # you can save an image to be displayed on the website t = np.arange(0.0, 2.0, 0.01) -s = 1 + np.sin(2*np.pi*t) +s = 1 + np.sin(2 * np.pi * t) plt.plot(t, s) plt.savefig("laser_analysis.png") diff --git a/Examples/Tests/laser_injection_from_file/analysis_1d.py b/Examples/Tests/laser_injection_from_file/analysis_1d.py index a595a912858..e9bab5e8783 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_1d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_1d.py @@ -22,101 +22,108 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np +import yt from scipy.constants import c, epsilon_0 from scipy.signal import hilbert -import yt ; yt.funcs.mylog.setLevel(50) +yt.funcs.mylog.setLevel(50) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -#Maximum acceptable error for this test +# Maximum acceptable error for this test relative_error_threshold = 0.065 -#Physical parameters -um = 1.e-6 -fs = 1.e-15 -c = 299792458 +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 -#Parameters of the gaussian beam -wavelength = 1.*um -w0 = 12.*um -tt = 10.*fs -t_c = 20.*fs +# Parameters of the gaussian beam +wavelength = 1.0 * um +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs laser_energy = 1.0 -E_max = np.sqrt( 2*(2/np.pi)**(3/2)*laser_energy / (epsilon_0*w0**2*c*tt) ) +E_max = np.sqrt( + 2 * (2 / np.pi) ** (3 / 2) * laser_energy / (epsilon_0 * w0**2 * c * tt) +) + # Function for the envelope -def gauss_env(T,Z): +def gauss_env(T, Z): # Function to compute the theory for the envelope - inv_tau2 = 1./tt/tt - exp_arg = - inv_tau2 / c/c * (Z-T*c)*(Z-T*c) + inv_tau2 = 1.0 / tt / tt + exp_arg = -inv_tau2 / c / c * (Z - T * c) * (Z - T * c) return E_max * np.real(np.exp(exp_arg)) + def do_analysis(fname, compname, steps): ds = yt.load(fname) - dt = ds.current_time.to_value()/steps + dt = ds.current_time.to_value() / steps z = np.linspace( - ds.domain_left_edge[0].v, - ds.domain_right_edge[0].v, - ds.domain_dimensions[0]) + ds.domain_left_edge[0].v, ds.domain_right_edge[0].v, ds.domain_dimensions[0] + ) # Compute the theory for envelope - env_theory = gauss_env(+t_c-ds.current_time.to_value(), z)+gauss_env(-t_c+ds.current_time.to_value(),z) + env_theory = gauss_env(+t_c - ds.current_time.to_value(), z) + gauss_env( + -t_c + ds.current_time.to_value(), z + ) # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) - F_laser = all_data_level_0['boxlib', 'Ey'].v.squeeze() + all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) + F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() env = abs(hilbert(F_laser)) # Plot results - plt.figure(figsize=(8,8)) + plt.figure(figsize=(8, 8)) plt.subplot(221) - plt.title('PIC field') + plt.title("PIC field") plt.plot(z, F_laser) plt.subplot(222) - plt.title('PIC envelope') + plt.title("PIC envelope") plt.plot(z, env) plt.subplot(223) - plt.title('Theory envelope') - plt.plot(z,env_theory) + plt.title("Theory envelope") + plt.plot(z, env_theory) plt.subplot(224) - plt.title('Difference') - plt.plot(z,env-env_theory) + plt.title("Difference") + plt.plot(z, env - env_theory) plt.tight_layout() - plt.savefig(compname, bbox_inches='tight') + plt.savefig(compname, bbox_inches="tight") - relative_error_env = np.sum(np.abs(env-env_theory)) / np.sum(np.abs(env)) + relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) print("Relative error envelope: ", relative_error_env) - assert(relative_error_env < relative_error_threshold) + assert relative_error_env < relative_error_threshold fft_F_laser = np.fft.fftn(F_laser) - freq_z = np.fft.fftfreq(F_laser.shape[0],dt) + freq_z = np.fft.fftfreq(F_laser.shape[0], dt) pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) freq = np.abs(freq_z[pos_max[0]]) - exp_freq = c/wavelength - relative_error_freq = np.abs(freq-exp_freq)/exp_freq + exp_freq = c / wavelength + relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) - assert(relative_error_freq < relative_error_threshold) - + assert relative_error_freq < relative_error_threshold def launch_analysis(executable): - os.system("./" + executable + " inputs.1d_test diag1.file_prefix=diags/plotfiles/plt") + os.system( + "./" + executable + " inputs.1d_test diag1.file_prefix=diags/plotfiles/plt" + ) do_analysis("diags/plotfiles/plt000251/", "comp_unf.pdf", 251) -def main() : - +def main(): from lasy.laser import Laser from lasy.profiles import GaussianProfile @@ -131,16 +138,17 @@ def main() : laser.normalize(laser_energy, kind="energy") laser.write_to_file("gaussianlaser3d") executables = glob.glob("*.ex") - if len(executables) == 1 : + if len(executables) == 1: launch_analysis(executables[0]) - else : - assert(False) + else: + assert False # Do the checksum test filename_end = "diags/plotfiles/plt000251/" test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) - print('Passed') + print("Passed") + if __name__ == "__main__": main() diff --git a/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py b/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py index 1fcc46d54d3..279b29f14ce 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py +++ b/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py @@ -22,102 +22,113 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np +import yt from scipy.constants import c, epsilon_0 from scipy.signal import hilbert -import yt ; yt.funcs.mylog.setLevel(50) +yt.funcs.mylog.setLevel(50) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -#Maximum acceptable error for this test +# Maximum acceptable error for this test relative_error_threshold = 0.065 -#Physical parameters -um = 1.e-6 -fs = 1.e-15 -c = 299792458 +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 -#Parameters of the gaussian beam -wavelength = 1.*um -w0 = 12.*um -tt = 10.*fs -t_c = 20.*fs +# Parameters of the gaussian beam +wavelength = 1.0 * um +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs laser_energy = 1.0 -E_max = np.sqrt( 2*(2/np.pi)**(3/2)*laser_energy / (epsilon_0*w0**2*c*tt) ) +E_max = np.sqrt( + 2 * (2 / np.pi) ** (3 / 2) * laser_energy / (epsilon_0 * w0**2 * c * tt) +) + # Function for the envelope -def gauss_env(T,Z): +def gauss_env(T, Z): # Function to compute the theory for the envelope - inv_tau2 = 1./tt/tt - exp_arg = - inv_tau2 / c/c * (Z-T*c)*(Z-T*c) + inv_tau2 = 1.0 / tt / tt + exp_arg = -inv_tau2 / c / c * (Z - T * c) * (Z - T * c) return E_max * np.real(np.exp(exp_arg)) + def do_analysis(fname, compname): ds = yt.load(fname) - dz = (ds.domain_right_edge[0].v-ds.domain_left_edge[0].v)/ds.domain_dimensions[0] - dt = dz/c + dz = (ds.domain_right_edge[0].v - ds.domain_left_edge[0].v) / ds.domain_dimensions[ + 0 + ] + dt = dz / c z = np.linspace( - ds.domain_left_edge[0].v, - ds.domain_right_edge[0].v, - ds.domain_dimensions[0]) + ds.domain_left_edge[0].v, ds.domain_right_edge[0].v, ds.domain_dimensions[0] + ) # Compute the theory for envelope - env_theory = gauss_env(+t_c-ds.current_time.to_value(), z)+gauss_env(-t_c+ds.current_time.to_value(),z) + env_theory = gauss_env(+t_c - ds.current_time.to_value(), z) + gauss_env( + -t_c + ds.current_time.to_value(), z + ) # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) - F_laser = all_data_level_0['boxlib', 'Ey'].v.squeeze() + all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) + F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() env = abs(hilbert(F_laser)) # Plot results - plt.figure(figsize=(8,8)) + plt.figure(figsize=(8, 8)) plt.subplot(221) - plt.title('PIC field') + plt.title("PIC field") plt.plot(z, F_laser) plt.subplot(222) - plt.title('PIC envelope') + plt.title("PIC envelope") plt.plot(z, env) plt.subplot(223) - plt.title('Theory envelope') - plt.plot(z,env_theory) + plt.title("Theory envelope") + plt.plot(z, env_theory) plt.subplot(224) - plt.title('Difference') - plt.plot(z,env-env_theory) + plt.title("Difference") + plt.plot(z, env - env_theory) plt.tight_layout() - plt.savefig(compname, bbox_inches='tight') + plt.savefig(compname, bbox_inches="tight") - relative_error_env = np.sum(np.abs(env-env_theory)) / np.sum(np.abs(env)) + relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) print("Relative error envelope: ", relative_error_env) - assert(relative_error_env < relative_error_threshold) + assert relative_error_env < relative_error_threshold fft_F_laser = np.fft.fftn(F_laser) - freq_z = np.fft.fftfreq(F_laser.shape[0],dt) + freq_z = np.fft.fftfreq(F_laser.shape[0], dt) pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) freq = np.abs(freq_z[pos_max[0]]) - exp_freq = c/wavelength - relative_error_freq = np.abs(freq-exp_freq)/exp_freq + exp_freq = c / wavelength + relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) - assert(relative_error_freq < relative_error_threshold) - + assert relative_error_freq < relative_error_threshold def launch_analysis(executable): - os.system("./" + executable + " inputs.1d_boost_test diag1.file_prefix=diags/plotfiles/plt") + os.system( + "./" + + executable + + " inputs.1d_boost_test diag1.file_prefix=diags/plotfiles/plt" + ) do_analysis("diags/plotfiles/plt000001/", "comp_unf.pdf") -def main() : - +def main(): from lasy.laser import Laser from lasy.profiles import GaussianProfile @@ -132,16 +143,17 @@ def main() : laser.normalize(laser_energy, kind="energy") laser.write_to_file("gaussianlaser3d") executables = glob.glob("*.ex") - if len(executables) == 1 : + if len(executables) == 1: launch_analysis(executables[0]) - else : - assert(False) + else: + assert False # Do the checksum test filename_end = "diags/plotfiles/plt000001/" test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) - print('Passed') + print("Passed") + if __name__ == "__main__": main() diff --git a/Examples/Tests/laser_injection_from_file/analysis_2d.py b/Examples/Tests/laser_injection_from_file/analysis_2d.py index 23b7e12dbcd..18c178cea15 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_2d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_2d.py @@ -22,119 +22,134 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np +import yt from scipy.constants import c, epsilon_0 from scipy.signal import hilbert -import yt ; yt.funcs.mylog.setLevel(50) +yt.funcs.mylog.setLevel(50) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -#Maximum acceptable error for this test +# Maximum acceptable error for this test relative_error_threshold = 0.065 -#Physical parameters -um = 1.e-6 -fs = 1.e-15 -c = 299792458 +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 -#Parameters of the gaussian beam -wavelength = 1.*um -w0 = 12.*um -tt = 10.*fs -t_c = 20.*fs +# Parameters of the gaussian beam +wavelength = 1.0 * um +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs laser_energy = 1.0 -E_max = np.sqrt( 2*(2/np.pi)**(3/2)*laser_energy / (epsilon_0*w0**2*c*tt) ) +E_max = np.sqrt( + 2 * (2 / np.pi) ** (3 / 2) * laser_energy / (epsilon_0 * w0**2 * c * tt) +) + # Function for the envelope def gauss_env(T, X, Y, Z): # Function to compute the theory for the envelope - inv_tau2 = 1./tt/tt - inv_w_2 = 1.0/(w0*w0) - exp_arg = - (X*X)*inv_w_2 - (Y*Y)*inv_w_2- inv_tau2 / c/c * (Z-T*c)*(Z-T*c) + inv_tau2 = 1.0 / tt / tt + inv_w_2 = 1.0 / (w0 * w0) + exp_arg = ( + -(X * X) * inv_w_2 + - (Y * Y) * inv_w_2 + - inv_tau2 / c / c * (Z - T * c) * (Z - T * c) + ) return E_max * np.real(np.exp(exp_arg)) + def do_analysis(fname, compname, steps): ds = yt.load(fname) - dt = ds.current_time.to_value()/steps + dt = ds.current_time.to_value() / steps # Define 3D meshes x = np.linspace( - ds.domain_left_edge[0], - ds.domain_right_edge[0], - ds.domain_dimensions[0]).v + ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] + ).v y = np.linspace( - ds.domain_left_edge[1], - ds.domain_right_edge[1], - ds.domain_dimensions[1]).v + ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] + ).v z = np.linspace( - ds.domain_left_edge[ds.dimensionality-1], - ds.domain_right_edge[ds.dimensionality-1], - ds.domain_dimensions[ds.dimensionality-1]).v - X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing='ij') + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_dimensions[ds.dimensionality - 1], + ).v + X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") # Compute the theory for envelope - env_theory = gauss_env(+t_c-ds.current_time.to_value(), X,Y,Z)+gauss_env(-t_c+ds.current_time.to_value(), X,Y,Z) + env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Y, Z) + gauss_env( + -t_c + ds.current_time.to_value(), X, Y, Z + ) # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) - F_laser = all_data_level_0['boxlib', 'Ey'].v.squeeze() + all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) + F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() env = abs(hilbert(F_laser)) - extent = [ds.domain_left_edge[ds.dimensionality-1], ds.domain_right_edge[ds.dimensionality-1], - ds.domain_left_edge[0], ds.domain_right_edge[0] ] - env_theory_slice= env_theory[:,env_theory.shape[1]//2, :] + extent = [ + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_left_edge[0], + ds.domain_right_edge[0], + ] + env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] # Plot results - plt.figure(figsize=(8,6)) + plt.figure(figsize=(8, 6)) plt.subplot(221) - plt.title('PIC field') + plt.title("PIC field") plt.imshow(F_laser, extent=extent) plt.colorbar() plt.subplot(222) - plt.title('PIC envelope') + plt.title("PIC envelope") plt.imshow(env, extent=extent) plt.colorbar() plt.subplot(223) - plt.title('Theory envelope') + plt.title("Theory envelope") plt.imshow(env_theory_slice, extent=extent) plt.colorbar() plt.subplot(224) - plt.title('Difference') - plt.imshow(env-env_theory_slice, extent=extent) + plt.title("Difference") + plt.imshow(env - env_theory_slice, extent=extent) plt.colorbar() plt.tight_layout() - plt.savefig(compname, bbox_inches='tight') + plt.savefig(compname, bbox_inches="tight") - relative_error_env = np.sum(np.abs(env-env_theory_slice)) / np.sum(np.abs(env)) + relative_error_env = np.sum(np.abs(env - env_theory_slice)) / np.sum(np.abs(env)) print("Relative error envelope: ", relative_error_env) - assert(relative_error_env < relative_error_threshold) + assert relative_error_env < relative_error_threshold fft_F_laser = np.fft.fftn(F_laser) - freq_x = np.fft.fftfreq(F_laser.shape[0],dt) - freq_z = np.fft.fftfreq(F_laser.shape[1],dt) + freq_x = np.fft.fftfreq(F_laser.shape[0], dt) + freq_z = np.fft.fftfreq(F_laser.shape[1], dt) pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - freq = np.sqrt((freq_x[pos_max[0]])**2 + (freq_z[pos_max[1]])**2) - exp_freq = c/wavelength - relative_error_freq = np.abs(freq-exp_freq)/exp_freq + freq = np.sqrt((freq_x[pos_max[0]]) ** 2 + (freq_z[pos_max[1]]) ** 2) + exp_freq = c / wavelength + relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) - assert(relative_error_freq < relative_error_threshold) - + assert relative_error_freq < relative_error_threshold def launch_analysis(executable): - os.system("./" + executable + " inputs.2d_test diag1.file_prefix=diags/plotfiles/plt") + os.system( + "./" + executable + " inputs.2d_test diag1.file_prefix=diags/plotfiles/plt" + ) do_analysis("diags/plotfiles/plt000251/", "comp_unf.pdf", 251) -def main() : - +def main(): from lasy.laser import Laser from lasy.profiles import GaussianProfile @@ -149,16 +164,17 @@ def main() : laser.normalize(laser_energy, kind="energy") laser.write_to_file("gaussianlaser3d") executables = glob.glob("*.ex") - if len(executables) == 1 : + if len(executables) == 1: launch_analysis(executables[0]) - else : - assert(False) + else: + assert False # Do the checksum test filename_end = "diags/plotfiles/plt000251/" test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) - print('Passed') + print("Passed") + if __name__ == "__main__": main() diff --git a/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py b/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py index c5bdd84d023..44030261732 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py +++ b/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py @@ -22,194 +22,211 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np +import yt from scipy.signal import hilbert -import yt ; yt.funcs.mylog.setLevel(50) +yt.funcs.mylog.setLevel(50) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -#Maximum acceptable error for this test +# Maximum acceptable error for this test relative_error_threshold = 0.065 -#Physical parameters -um = 1.e-6 -fs = 1.e-15 +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 c = 299792458 -#Parameters of the gaussian beam -wavelength = 1.*um -w0 = 6.*um -tt = 10.*fs -x_c = 0.*um -t_c = 20.*fs -foc_dist = 10*um +# Parameters of the gaussian beam +wavelength = 1.0 * um +w0 = 6.0 * um +tt = 10.0 * fs +x_c = 0.0 * um +t_c = 20.0 * fs +foc_dist = 10 * um E_max = 1e12 -rot_angle = -np.pi/4.0 +rot_angle = -np.pi / 4.0 -#Parameters of the tx grid -x_l = -12.0*um -x_r = 12.0*um +# Parameters of the tx grid +x_l = -12.0 * um +x_r = 12.0 * um x_points = 480 -t_l = 0.0*fs -t_r = 40.0*fs +t_l = 0.0 * fs +t_r = 40.0 * fs t_points = 400 tcoords = np.linspace(t_l, t_r, t_points) xcoords = np.linspace(x_l, x_r, x_points) -def gauss(T,X,Y,opt): + +def gauss(T, X, Y, opt): """Compute the electric field for a Gaussian laser pulse. - This is used to write the binary input file. + This is used to write the binary input file. """ - k0 = 2.0*np.pi/wavelength - inv_tau2 = 1./tt/tt - osc_phase = k0*c*(T-t_c) + k0 = 2.0 * np.pi / wavelength + inv_tau2 = 1.0 / tt / tt + osc_phase = k0 * c * (T - t_c) - diff_factor = 1.0 + 1.0j* foc_dist * 2/(k0*w0*w0) - inv_w_2 = 1.0/(w0*w0*diff_factor) + diff_factor = 1.0 + 1.0j * foc_dist * 2 / (k0 * w0 * w0) + inv_w_2 = 1.0 / (w0 * w0 * diff_factor) pre_fact = np.exp(1.0j * osc_phase) - if opt == '3d': - pre_fact = pre_fact/diff_factor + if opt == "3d": + pre_fact = pre_fact / diff_factor else: - pre_fact = pre_fact/np.sqrt(diff_factor) + pre_fact = pre_fact / np.sqrt(diff_factor) - exp_arg = - (X*X + Y*Y)*inv_w_2 - inv_tau2 * (T-t_c)*(T-t_c) + exp_arg = -(X * X + Y * Y) * inv_w_2 - inv_tau2 * (T - t_c) * (T - t_c) return np.real(pre_fact * np.exp(exp_arg)) + # Function for the envelope -def gauss_env(T,XX,ZZ): - '''Function to compute the theory for the envelope - ''' +def gauss_env(T, XX, ZZ): + """Function to compute the theory for the envelope""" - X = np.cos(rot_angle)*XX + np.sin(rot_angle)*ZZ - Z = -np.sin(rot_angle)*XX + np.cos(rot_angle)*ZZ + X = np.cos(rot_angle) * XX + np.sin(rot_angle) * ZZ + Z = -np.sin(rot_angle) * XX + np.cos(rot_angle) * ZZ - inv_tau2 = 1./tt/tt - inv_w_2 = 1.0/(w0*w0) - exp_arg = - (X*X)*inv_w_2 - inv_tau2 / c/c * (Z-T*c)*(Z-T*c) + inv_tau2 = 1.0 / tt / tt + inv_w_2 = 1.0 / (w0 * w0) + exp_arg = -(X * X) * inv_w_2 - inv_tau2 / c / c * (Z - T * c) * (Z - T * c) return E_max * np.real(np.exp(exp_arg)) + def write_file(fname, x, y, t, E): - """ For a given filename fname, space coordinates x and y, time coordinate t + """For a given filename fname, space coordinates x and y, time coordinate t and field E, write a WarpX-compatible input binary file containing the profile of the laser pulse. This function should be used in the case of a uniform spatio-temporal mesh """ - with open(fname, 'wb') as file: + with open(fname, "wb") as file: flag_unif = 1 - file.write(flag_unif.to_bytes(1, byteorder='little')) - file.write((len(t)).to_bytes(4, byteorder='little', signed=False)) - file.write((len(x)).to_bytes(4, byteorder='little', signed=False)) - file.write((len(y)).to_bytes(4, byteorder='little', signed=False)) + file.write(flag_unif.to_bytes(1, byteorder="little")) + file.write((len(t)).to_bytes(4, byteorder="little", signed=False)) + file.write((len(x)).to_bytes(4, byteorder="little", signed=False)) + file.write((len(y)).to_bytes(4, byteorder="little", signed=False)) file.write(t[0].tobytes()) file.write(t[-1].tobytes()) file.write(x[0].tobytes()) file.write(x[-1].tobytes()) - if len(y) == 1 : + if len(y) == 1: file.write(y[0].tobytes()) - else : + else: file.write(y[0].tobytes()) file.write(y[-1].tobytes()) file.write(E.tobytes()) + def create_gaussian_2d(): - T, X, Y = np.meshgrid(tcoords, xcoords, np.array([0.0]), indexing='ij') - E_t = gauss(T,X,Y,'2d') + T, X, Y = np.meshgrid(tcoords, xcoords, np.array([0.0]), indexing="ij") + E_t = gauss(T, X, Y, "2d") write_file("gauss_2d", xcoords, np.array([0.0]), tcoords, E_t) + def do_analysis(fname, compname, steps): ds = yt.load(fname) - dt = ds.current_time.to_value()/steps + dt = ds.current_time.to_value() / steps # Define 2D meshes x = np.linspace( - ds.domain_left_edge[0], - ds.domain_right_edge[0], - ds.domain_dimensions[0]).v + ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] + ).v z = np.linspace( - ds.domain_left_edge[ds.dimensionality-1], - ds.domain_right_edge[ds.dimensionality-1], - ds.domain_dimensions[ds.dimensionality-1]).v - X, Z = np.meshgrid(x, z, sparse=False, indexing='ij') + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_dimensions[ds.dimensionality - 1], + ).v + X, Z = np.meshgrid(x, z, sparse=False, indexing="ij") # Compute the theory for envelope - env_theory = gauss_env(+t_c-ds.current_time.to_value(), X,Z)+gauss_env(-t_c+ds.current_time.to_value(), X,Z) + env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Z) + gauss_env( + -t_c + ds.current_time.to_value(), X, Z + ) # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) - F_laser = all_data_level_0['boxlib', 'Ey'].v.squeeze() + all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) + F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() env = abs(hilbert(F_laser)) - extent = [ds.domain_left_edge[ds.dimensionality-1], ds.domain_right_edge[ds.dimensionality-1], - ds.domain_left_edge[0], ds.domain_right_edge[0] ] + extent = [ + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_left_edge[0], + ds.domain_right_edge[0], + ] # Plot results - plt.figure(figsize=(8,6)) + plt.figure(figsize=(8, 6)) plt.subplot(221) - plt.title('PIC field') + plt.title("PIC field") plt.imshow(F_laser, extent=extent) plt.colorbar() plt.subplot(222) - plt.title('PIC envelope') + plt.title("PIC envelope") plt.imshow(env, extent=extent) plt.colorbar() plt.subplot(223) - plt.title('Theory envelope') + plt.title("Theory envelope") plt.imshow(env_theory, extent=extent) plt.colorbar() plt.subplot(224) - plt.title('Difference') - plt.imshow(env-env_theory, extent=extent) + plt.title("Difference") + plt.imshow(env - env_theory, extent=extent) plt.colorbar() plt.tight_layout() - plt.savefig(compname, bbox_inches='tight') + plt.savefig(compname, bbox_inches="tight") - relative_error_env = np.sum(np.abs(env-env_theory)) / np.sum(np.abs(env)) + relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) print("Relative error envelope: ", relative_error_env) - assert(relative_error_env < relative_error_threshold) + assert relative_error_env < relative_error_threshold fft_F_laser = np.fft.fft2(F_laser) - freq_rows = np.fft.fftfreq(F_laser.shape[0],dt) - freq_cols = np.fft.fftfreq(F_laser.shape[1],dt) + freq_rows = np.fft.fftfreq(F_laser.shape[0], dt) + freq_cols = np.fft.fftfreq(F_laser.shape[1], dt) pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - freq = np.sqrt((freq_rows[pos_max[0]])**2 + (freq_cols[pos_max[1]]**2)) - exp_freq = c/wavelength + freq = np.sqrt((freq_rows[pos_max[0]]) ** 2 + (freq_cols[pos_max[1]] ** 2)) + exp_freq = c / wavelength - relative_error_freq = np.abs(freq-exp_freq)/exp_freq + relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) - assert(relative_error_freq < relative_error_threshold) - + assert relative_error_freq < relative_error_threshold def launch_analysis(executable): create_gaussian_2d() - os.system("./" + executable + " inputs.2d_test_binary diag1.file_prefix=diags/plotfiles/plt") + os.system( + "./" + + executable + + " inputs.2d_test_binary diag1.file_prefix=diags/plotfiles/plt" + ) do_analysis("diags/plotfiles/plt000250/", "comp_unf.pdf", 250) -def main() : +def main(): executables = glob.glob("*.ex") - if len(executables) == 1 : + if len(executables) == 1: launch_analysis(executables[0]) - else : - assert(False) + else: + assert False # Do the checksum test filename_end = "diags/plotfiles/plt000250/" test_name = "LaserInjectionFromBINARYFile" checksumAPI.evaluate_checksum(test_name, filename_end) - print('Passed') + print("Passed") + if __name__ == "__main__": main() diff --git a/Examples/Tests/laser_injection_from_file/analysis_3d.py b/Examples/Tests/laser_injection_from_file/analysis_3d.py index a66f761b9b1..59fe2c6ce8a 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_3d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_3d.py @@ -22,123 +22,142 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np +import yt from scipy.constants import c, epsilon_0 from scipy.signal import hilbert -import yt ; yt.funcs.mylog.setLevel(50) +yt.funcs.mylog.setLevel(50) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -#Maximum acceptable error for this test +# Maximum acceptable error for this test relative_error_threshold = 0.065 -#Physical parameters -um = 1.e-6 -fs = 1.e-15 -c = 299792458 +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 -#Parameters of the gaussian beam -wavelength = 1.*um -w0 = 12.*um -tt = 10.*fs -t_c = 20.*fs +# Parameters of the gaussian beam +wavelength = 1.0 * um +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs laser_energy = 1.0 -E_max = np.sqrt( 2*(2/np.pi)**(3/2)*laser_energy / (epsilon_0*w0**2*c*tt) ) +E_max = np.sqrt( + 2 * (2 / np.pi) ** (3 / 2) * laser_energy / (epsilon_0 * w0**2 * c * tt) +) + # Function for the envelope def gauss_env(T, X, Y, Z): # Function to compute the theory for the envelope - inv_tau2 = 1./tt/tt - inv_w_2 = 1.0/(w0*w0) - exp_arg = - (X*X)*inv_w_2 - (Y*Y)*inv_w_2- inv_tau2 / c/c * (Z-T*c)*(Z-T*c) + inv_tau2 = 1.0 / tt / tt + inv_w_2 = 1.0 / (w0 * w0) + exp_arg = ( + -(X * X) * inv_w_2 + - (Y * Y) * inv_w_2 + - inv_tau2 / c / c * (Z - T * c) * (Z - T * c) + ) return E_max * np.real(np.exp(exp_arg)) + def do_analysis(fname, compname, steps): ds = yt.load(fname) - dt = ds.current_time.to_value()/steps + dt = ds.current_time.to_value() / steps # Define 3D meshes x = np.linspace( - ds.domain_left_edge[0], - ds.domain_right_edge[0], - ds.domain_dimensions[0]).v + ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] + ).v y = np.linspace( - ds.domain_left_edge[1], - ds.domain_right_edge[1], - ds.domain_dimensions[1]).v + ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] + ).v z = np.linspace( - ds.domain_left_edge[ds.dimensionality-1], - ds.domain_right_edge[ds.dimensionality-1], - ds.domain_dimensions[ds.dimensionality-1]).v - X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing='ij') + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_dimensions[ds.dimensionality - 1], + ).v + X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") # Compute the theory for envelope - env_theory = gauss_env(+t_c-ds.current_time.to_value(), X,Y,Z)+gauss_env(-t_c+ds.current_time.to_value(), X,Y,Z) + env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Y, Z) + gauss_env( + -t_c + ds.current_time.to_value(), X, Y, Z + ) # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) - F_laser = all_data_level_0['boxlib', 'Ey'].v.squeeze() + all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) + F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() env = abs(hilbert(F_laser)) - extent = [ds.domain_left_edge[ds.dimensionality-1], ds.domain_right_edge[ds.dimensionality-1], - ds.domain_left_edge[0], ds.domain_right_edge[0] ] + extent = [ + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_left_edge[0], + ds.domain_right_edge[0], + ] - F_slice= F_laser[:,F_laser.shape[1]//2, :] - env_slice= env[:,env.shape[1]//2, :] - env_theory_slice= env_theory[:,env_theory.shape[1]//2, :] + F_slice = F_laser[:, F_laser.shape[1] // 2, :] + env_slice = env[:, env.shape[1] // 2, :] + env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] # Plot results - plt.figure(figsize=(8,6)) + plt.figure(figsize=(8, 6)) plt.subplot(221) - plt.title('PIC field') + plt.title("PIC field") plt.imshow(F_slice, extent=extent) plt.colorbar() plt.subplot(222) - plt.title('PIC envelope') + plt.title("PIC envelope") plt.imshow(env_slice, extent=extent) plt.colorbar() plt.subplot(223) - plt.title('Theory envelope') + plt.title("Theory envelope") plt.imshow(env_theory_slice, extent=extent) plt.colorbar() plt.subplot(224) - plt.title('Difference') - plt.imshow(env_slice-env_theory_slice, extent=extent) + plt.title("Difference") + plt.imshow(env_slice - env_theory_slice, extent=extent) plt.colorbar() plt.tight_layout() - plt.savefig(compname, bbox_inches='tight') + plt.savefig(compname, bbox_inches="tight") - relative_error_env = np.sum(np.abs(env-env_theory)) / np.sum(np.abs(env)) + relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) print("Relative error envelope: ", relative_error_env) - assert(relative_error_env < relative_error_threshold) + assert relative_error_env < relative_error_threshold fft_F_laser = np.fft.fftn(F_laser) - freq_x = np.fft.fftfreq(F_laser.shape[0],dt) - freq_y = np.fft.fftfreq(F_laser.shape[1],dt) - freq_z = np.fft.fftfreq(F_laser.shape[2],dt) + freq_x = np.fft.fftfreq(F_laser.shape[0], dt) + freq_y = np.fft.fftfreq(F_laser.shape[1], dt) + freq_z = np.fft.fftfreq(F_laser.shape[2], dt) pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - freq = np.sqrt((freq_x[pos_max[0]])**2 + (freq_y[pos_max[1]]**2) + (freq_z[pos_max[2]])**2) - exp_freq = c/wavelength - relative_error_freq = np.abs(freq-exp_freq)/exp_freq + freq = np.sqrt( + (freq_x[pos_max[0]]) ** 2 + + (freq_y[pos_max[1]] ** 2) + + (freq_z[pos_max[2]]) ** 2 + ) + exp_freq = c / wavelength + relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) - assert(relative_error_freq < relative_error_threshold) - + assert relative_error_freq < relative_error_threshold def launch_analysis(executable): - os.system("./" + executable + " inputs.3d_test diag1.file_prefix=diags/plotfiles/plt") + os.system( + "./" + executable + " inputs.3d_test diag1.file_prefix=diags/plotfiles/plt" + ) do_analysis("diags/plotfiles/plt000251/", "comp_unf.pdf", 251) -def main() : - +def main(): from lasy.laser import Laser from lasy.profiles import GaussianProfile @@ -153,16 +172,17 @@ def main() : laser.normalize(laser_energy, kind="energy") laser.write_to_file("gaussianlaser3d") executables = glob.glob("*.ex") - if len(executables) == 1 : + if len(executables) == 1: launch_analysis(executables[0]) - else : - assert(False) + else: + assert False # Do the checksum test filename_end = "diags/plotfiles/plt000251/" test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) - print('Passed') + print("Passed") + if __name__ == "__main__": main() diff --git a/Examples/Tests/laser_injection_from_file/analysis_RZ.py b/Examples/Tests/laser_injection_from_file/analysis_RZ.py index 3667a1d9419..5ebba5b86e2 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_RZ.py +++ b/Examples/Tests/laser_injection_from_file/analysis_RZ.py @@ -22,120 +22,135 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np +import yt from scipy.constants import c, epsilon_0 from scipy.signal import hilbert -import yt ; yt.funcs.mylog.setLevel(50) +yt.funcs.mylog.setLevel(50) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -#Maximum acceptable error for this test +# Maximum acceptable error for this test relative_error_threshold = 0.065 -#Physical parameters -um = 1.e-6 -fs = 1.e-15 -c = 299792458 +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 -#Parameters of the gaussian beam -wavelength = 1.*um -w0 = 12.*um -tt = 10.*fs -t_c = 20.*fs +# Parameters of the gaussian beam +wavelength = 1.0 * um +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs laser_energy = 1.0 -E_max = np.sqrt( 2*(2/np.pi)**(3/2)*laser_energy / (epsilon_0*w0**2*c*tt) ) +E_max = np.sqrt( + 2 * (2 / np.pi) ** (3 / 2) * laser_energy / (epsilon_0 * w0**2 * c * tt) +) + # Function for the envelope def gauss_env(T, X, Y, Z): # Function to compute the theory for the envelope - inv_tau2 = 1./tt/tt - inv_w_2 = 1.0/(w0*w0) - exp_arg = - (X*X)*inv_w_2 - (Y*Y)*inv_w_2- inv_tau2 / c/c * (Z-T*c)*(Z-T*c) + inv_tau2 = 1.0 / tt / tt + inv_w_2 = 1.0 / (w0 * w0) + exp_arg = ( + -(X * X) * inv_w_2 + - (Y * Y) * inv_w_2 + - inv_tau2 / c / c * (Z - T * c) * (Z - T * c) + ) return E_max * np.real(np.exp(exp_arg)) + def do_analysis(fname, compname, steps): ds = yt.load(fname) - dt = ds.current_time.to_value()/steps + dt = ds.current_time.to_value() / steps # Define 3D meshes x = np.linspace( - ds.domain_left_edge[0], - ds.domain_right_edge[0], - ds.domain_dimensions[0]).v + ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] + ).v y = np.linspace( - ds.domain_left_edge[1], - ds.domain_right_edge[1], - ds.domain_dimensions[1]).v + ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] + ).v z = np.linspace( - ds.domain_left_edge[ds.dimensionality-1], - ds.domain_right_edge[ds.dimensionality-1], - ds.domain_dimensions[ds.dimensionality-1]).v - X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing='ij') + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_dimensions[ds.dimensionality - 1], + ).v + X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") # Compute the theory for envelope - env_theory = gauss_env(+t_c-ds.current_time.to_value(), X,Y,Z)+gauss_env(-t_c+ds.current_time.to_value(), X,Y,Z) + env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Y, Z) + gauss_env( + -t_c + ds.current_time.to_value(), X, Y, Z + ) # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) - F_laser = all_data_level_0['boxlib', 'Et'].v.squeeze() + all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) + F_laser = all_data_level_0["boxlib", "Et"].v.squeeze() env = abs(hilbert(F_laser)) - extent = [ds.domain_left_edge[ds.dimensionality-1], ds.domain_right_edge[ds.dimensionality-1], - ds.domain_left_edge[0], ds.domain_right_edge[0] ] + extent = [ + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_left_edge[0], + ds.domain_right_edge[0], + ] - env_theory_slice= env_theory[:,env_theory.shape[1]//2, :] + env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] # Plot results - plt.figure(figsize=(8,6)) + plt.figure(figsize=(8, 6)) plt.subplot(221) - plt.title('PIC field') + plt.title("PIC field") plt.imshow(F_laser, extent=extent) plt.colorbar() plt.subplot(222) - plt.title('PIC envelope') + plt.title("PIC envelope") plt.imshow(env, extent=extent) plt.colorbar() plt.subplot(223) - plt.title('Theory envelope') + plt.title("Theory envelope") plt.imshow(env_theory_slice, extent=extent) plt.colorbar() plt.subplot(224) - plt.title('Difference') - plt.imshow(env-env_theory_slice, extent=extent) + plt.title("Difference") + plt.imshow(env - env_theory_slice, extent=extent) plt.colorbar() plt.tight_layout() - plt.savefig(compname, bbox_inches='tight') + plt.savefig(compname, bbox_inches="tight") - relative_error_env = np.sum(np.abs(env-env_theory_slice)) / np.sum(np.abs(env)) + relative_error_env = np.sum(np.abs(env - env_theory_slice)) / np.sum(np.abs(env)) print("Relative error envelope: ", relative_error_env) - assert(relative_error_env < relative_error_threshold) + assert relative_error_env < relative_error_threshold fft_F_laser = np.fft.fftn(F_laser) - freq_x = np.fft.fftfreq(F_laser.shape[0],dt) - freq_z = np.fft.fftfreq(F_laser.shape[1],dt) + freq_x = np.fft.fftfreq(F_laser.shape[0], dt) + freq_z = np.fft.fftfreq(F_laser.shape[1], dt) pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - freq = np.sqrt((freq_x[pos_max[0]])**2 + (freq_z[pos_max[1]])**2) - exp_freq = c/wavelength - relative_error_freq = np.abs(freq-exp_freq)/exp_freq + freq = np.sqrt((freq_x[pos_max[0]]) ** 2 + (freq_z[pos_max[1]]) ** 2) + exp_freq = c / wavelength + relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) - assert(relative_error_freq < relative_error_threshold) - + assert relative_error_freq < relative_error_threshold def launch_analysis(executable): - os.system("./" + executable + " inputs.RZ_test diag1.file_prefix=diags/plotfiles/plt") + os.system( + "./" + executable + " inputs.RZ_test diag1.file_prefix=diags/plotfiles/plt" + ) do_analysis("diags/plotfiles/plt000252/", "comp_unf.pdf", 252) -def main() : - +def main(): from lasy.laser import Laser from lasy.profiles import GaussianProfile @@ -150,16 +165,17 @@ def main() : laser.normalize(laser_energy, kind="energy") laser.write_to_file("gaussianlaser3d") executables = glob.glob("*.ex") - if len(executables) == 1 : + if len(executables) == 1: launch_analysis(executables[0]) - else : - assert(False) + else: + assert False # Do the checksum test filename_end = "diags/plotfiles/plt000252/" test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) - print('Passed') + print("Passed") + if __name__ == "__main__": main() diff --git a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py index 87d0c6265db..8bc0daea481 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py +++ b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py @@ -22,131 +22,144 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np +import yt from scipy.constants import c, epsilon_0 from scipy.signal import hilbert from scipy.special import genlaguerre -import yt ; yt.funcs.mylog.setLevel(50) +yt.funcs.mylog.setLevel(50) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -#Maximum acceptable error for this test +# Maximum acceptable error for this test relative_error_threshold = 0.065 -#Physical parameters -um = 1.e-6 -fs = 1.e-15 -c = 299792458 +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 -#Parameters of the Laguerre Gaussian beam -wavelength = 1.*um -w0 = 12.*um -tt = 10.*fs -t_c = 20.*fs +# Parameters of the Laguerre Gaussian beam +wavelength = 1.0 * um +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs laser_energy = 1.0 -E_max = np.sqrt( 2*(2/np.pi)**(3/2)*laser_energy / (epsilon_0*w0**2*c*tt) ) +E_max = np.sqrt( + 2 * (2 / np.pi) ** (3 / 2) * laser_energy / (epsilon_0 * w0**2 * c * tt) +) + # Function for the envelope def laguerre_env(T, X, Y, Z, p, m): - if m>0: - complex_position= X -1j * Y + if m > 0: + complex_position = X - 1j * Y else: - complex_position= X +1j * Y - inv_w0_2 = 1.0/(w0**2) - inv_tau2 = 1.0/(tt**2) + complex_position = X + 1j * Y + inv_w0_2 = 1.0 / (w0**2) + inv_tau2 = 1.0 / (tt**2) radius = abs(complex_position) - scaled_rad_squared = (radius**2)*inv_w0_2 + scaled_rad_squared = (radius**2) * inv_w0_2 envelope = ( - ( np.sqrt(2) * complex_position / w0 )** m - * genlaguerre(p, m)(2 * scaled_rad_squared) - * np.exp(-scaled_rad_squared) - * np.exp(-( inv_tau2 / (c**2) ) * (Z-T*c)**2) - ) + (np.sqrt(2) * complex_position / w0) ** m + * genlaguerre(p, m)(2 * scaled_rad_squared) + * np.exp(-scaled_rad_squared) + * np.exp(-(inv_tau2 / (c**2)) * (Z - T * c) ** 2) + ) return E_max * np.real(envelope) + def do_analysis(fname, compname, steps): ds = yt.load(fname) - dt = ds.current_time.to_value()/steps + dt = ds.current_time.to_value() / steps # Define 3D meshes x = np.linspace( - ds.domain_left_edge[0], - ds.domain_right_edge[0], - ds.domain_dimensions[0]).v + ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] + ).v y = np.linspace( - ds.domain_left_edge[1], - ds.domain_right_edge[1], - ds.domain_dimensions[1]).v + ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] + ).v z = np.linspace( - ds.domain_left_edge[ds.dimensionality-1], - ds.domain_right_edge[ds.dimensionality-1], - ds.domain_dimensions[ds.dimensionality-1]).v - X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing='ij') + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_dimensions[ds.dimensionality - 1], + ).v + X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") # Compute the theory for envelope - env_theory = laguerre_env(+t_c-ds.current_time.to_value(), X,Y,Z,p=0,m=1)+laguerre_env(-t_c+ds.current_time.to_value(), X,Y,Z,p=0,m=1) + env_theory = laguerre_env( + +t_c - ds.current_time.to_value(), X, Y, Z, p=0, m=1 + ) + laguerre_env(-t_c + ds.current_time.to_value(), X, Y, Z, p=0, m=1) # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) - F_laser = all_data_level_0['boxlib', 'Et'].v.squeeze() + all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) + F_laser = all_data_level_0["boxlib", "Et"].v.squeeze() env = abs(hilbert(F_laser)) - extent = [ds.domain_left_edge[ds.dimensionality-1], ds.domain_right_edge[ds.dimensionality-1], - ds.domain_left_edge[0], ds.domain_right_edge[0] ] + extent = [ + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_left_edge[0], + ds.domain_right_edge[0], + ] - env_theory_slice= env_theory[:,env_theory.shape[1]//2, :] + env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] # Plot results - plt.figure(figsize=(8,6)) + plt.figure(figsize=(8, 6)) plt.subplot(221) - plt.title('PIC field') + plt.title("PIC field") plt.imshow(F_laser, extent=extent) plt.colorbar() plt.subplot(222) - plt.title('PIC envelope') + plt.title("PIC envelope") plt.imshow(env, extent=extent) plt.colorbar() plt.subplot(223) - plt.title('Theory envelope') + plt.title("Theory envelope") plt.imshow(env_theory_slice, extent=extent) plt.colorbar() plt.subplot(224) - plt.title('Difference') - plt.imshow(env-env_theory_slice, extent=extent) + plt.title("Difference") + plt.imshow(env - env_theory_slice, extent=extent) plt.colorbar() plt.tight_layout() - plt.savefig(compname, bbox_inches='tight') + plt.savefig(compname, bbox_inches="tight") - relative_error_env = np.sum(np.abs(env-env_theory_slice)) / np.sum(np.abs(env)) + relative_error_env = np.sum(np.abs(env - env_theory_slice)) / np.sum(np.abs(env)) print("Relative error envelope: ", relative_error_env) - assert(relative_error_env < relative_error_threshold) + assert relative_error_env < relative_error_threshold fft_F_laser = np.fft.fftn(F_laser) - freq_x = np.fft.fftfreq(F_laser.shape[0],dt) - freq_z = np.fft.fftfreq(F_laser.shape[1],dt) + freq_x = np.fft.fftfreq(F_laser.shape[0], dt) + freq_z = np.fft.fftfreq(F_laser.shape[1], dt) pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - freq = np.sqrt((freq_x[pos_max[0]])**2 + (freq_z[pos_max[1]])**2) - exp_freq = c/wavelength - relative_error_freq = np.abs(freq-exp_freq)/exp_freq + freq = np.sqrt((freq_x[pos_max[0]]) ** 2 + (freq_z[pos_max[1]]) ** 2) + exp_freq = c / wavelength + relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) - assert(relative_error_freq < relative_error_threshold) - + assert relative_error_freq < relative_error_threshold def launch_analysis(executable): - os.system("./" + executable + " inputs.from_RZ_file_test diag1.file_prefix=diags/plotfiles/plt") + os.system( + "./" + + executable + + " inputs.from_RZ_file_test diag1.file_prefix=diags/plotfiles/plt" + ) do_analysis("diags/plotfiles/plt000612/", "comp_unf.pdf", 612) -def main() : - +def main(): from lasy.laser import Laser from lasy.profiles import CombinedLongitudinalTransverseProfile from lasy.profiles.longitudinal import GaussianLongitudinalProfile @@ -155,28 +168,31 @@ def main() : # Create a Laguerre Gaussian laser in RZ geometry using lasy pol = (1, 0) profile = CombinedLongitudinalTransverseProfile( - wavelength,pol,laser_energy, - GaussianLongitudinalProfile(wavelength, tt, t_peak=0), - LaguerreGaussianTransverseProfile(w0, p=0, m=1), + wavelength, + pol, + laser_energy, + GaussianLongitudinalProfile(wavelength, tt, t_peak=0), + LaguerreGaussianTransverseProfile(w0, p=0, m=1), ) dim = "rt" lo = (0e-6, -20e-15) hi = (+25e-6, +20e-15) - npoints = (100,100) + npoints = (100, 100) laser = Laser(dim, lo, hi, npoints, profile, n_azimuthal_modes=2) laser.normalize(laser_energy, kind="energy") laser.write_to_file("laguerrelaserRZ") executables = glob.glob("*.ex") - if len(executables) == 1 : + if len(executables) == 1: launch_analysis(executables[0]) - else : - assert(False) + else: + assert False # Do the checksum test filename_end = "diags/plotfiles/plt000612/" test_name = "LaserInjectionFromRZLASYFile" checksumAPI.evaluate_checksum(test_name, filename_end) - print('Passed') + print("Passed") + if __name__ == "__main__": main() diff --git a/Examples/Tests/magnetostatic_eb/PICMI_inputs_3d.py b/Examples/Tests/magnetostatic_eb/PICMI_inputs_3d.py index 8f205724563..6a3fe9e2988 100755 --- a/Examples/Tests/magnetostatic_eb/PICMI_inputs_3d.py +++ b/Examples/Tests/magnetostatic_eb/PICMI_inputs_3d.py @@ -22,7 +22,7 @@ import matplotlib -matplotlib.use('agg') +matplotlib.use("agg") import matplotlib.pyplot as plt import numpy as np @@ -57,7 +57,7 @@ xmax = 0.25 ymin = -0.25 ymax = 0.25 -zmin = 0. +zmin = 0.0 zmax = 1.0 ########################## @@ -65,20 +65,24 @@ ########################## grid = picmi.Cartesian3DGrid( - number_of_cells = [nx, ny, nz], - lower_bound = [xmin, ymin, zmin], - upper_bound = [xmax, ymax, zmax], - lower_boundary_conditions = ['neumann', 'neumann', 'dirichlet'], - upper_boundary_conditions = ['neumann', 'neumann', 'neumann'], - lower_boundary_conditions_particles = ['absorbing', 'absorbing', 'absorbing'], - upper_boundary_conditions_particles = ['absorbing', 'absorbing', 'absorbing'], - warpx_potential_lo_z = V_domain_boundary, + number_of_cells=[nx, ny, nz], + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["neumann", "neumann", "dirichlet"], + upper_boundary_conditions=["neumann", "neumann", "neumann"], + lower_boundary_conditions_particles=["absorbing", "absorbing", "absorbing"], + upper_boundary_conditions_particles=["absorbing", "absorbing", "absorbing"], + warpx_potential_lo_z=V_domain_boundary, warpx_blocking_factor=8, - warpx_max_grid_size = 32 + warpx_max_grid_size=32, ) solver = picmi.ElectrostaticSolver( - grid=grid, method='Multigrid', required_precision=1e-7,warpx_magnetostatic=True,warpx_self_fields_verbosity=3 + grid=grid, + method="Multigrid", + required_precision=1e-7, + warpx_magnetostatic=True, + warpx_self_fields_verbosity=3, ) r_pipe = 0.2 @@ -86,46 +90,63 @@ embedded_boundary = picmi.EmbeddedBoundary( implicit_function="(x**2+y**2-radius**2)", potential=V_embedded_boundary, - radius = r_pipe + radius=r_pipe, ) # Beam Current Density current = 1000 # A -beam_r = 0.1 # m +beam_r = 0.1 # m -J = current/(np.pi * beam_r**2) -beam_gamma = 10. -vz = con.c*np.sqrt(1. - 1./beam_gamma**2) -n0 = J/(con.e*vz) +J = current / (np.pi * beam_r**2) +beam_gamma = 10.0 +vz = con.c * np.sqrt(1.0 - 1.0 / beam_gamma**2) +n0 = J / (con.e * vz) beam_dist = picmi.AnalyticDistribution( - density_expression='((x**2+y**2)= beam_r and r < r_pipe: - er = -current / (2.*np.pi*r*con.epsilon_0*vz) + er = -current / (2.0 * np.pi * r * con.epsilon_0 * vz) else: er = np.zeros_like(r) return er + # compare to region from 0.5*zmax to 0.9*zmax -z_idx = ((z_vec >= 0.5*zmax) & (z_vec < 0.9*zmax)) +z_idx = (z_vec >= 0.5 * zmax) & (z_vec < 0.9 * zmax) Ex_dat = Ex[...] Ey_dat = Ey[...] -Ex_mean = Ex_dat[:,:,z_idx].mean(axis=2).T -Ey_mean = Ey_dat[:,:,z_idx].mean(axis=2).T +Ex_mean = Ex_dat[:, :, z_idx].mean(axis=2).T +Ey_mean = Ey_dat[:, :, z_idx].mean(axis=2).T Ex_nodal = Ex_mean Ey_nodal = Ey_mean -XM, YM = np.meshgrid(x_vec, y_vec, indexing='xy') +XM, YM = np.meshgrid(x_vec, y_vec, indexing="xy") RM = np.sqrt(XM**2 + YM**2) -THM = np.arctan2(YM,XM) +THM = np.arctan2(YM, XM) Er_mean = np.cos(THM) * Ex_nodal + np.sin(THM) * Ey_nodal r_vec = np.sqrt(x_vec**2 + y_vec**2) -r_idx = (RM < 0.95*r_pipe) +r_idx = RM < 0.95 * r_pipe r_sub = RM[r_idx] plt.figure(1) plt.plot(r_vec, Er_an(r_vec)) -plt.plot(RM.flatten(), Er_mean.flatten(), '.') -plt.legend(['Analytical', 'Electrostatic']) +plt.plot(RM.flatten(), Er_mean.flatten(), ".") +plt.legend(["Analytical", "Electrostatic"]) -er_err = np.abs(Er_mean[r_idx] - Er_an(r_sub)).max()/np.abs(Er_an(r_sub)).max() +er_err = np.abs(Er_mean[r_idx] - Er_an(r_sub)).max() / np.abs(Er_an(r_sub)).max() -plt.ylabel('$E_r$ (V/m)') -plt.xlabel('r (m)') -plt.title("Max % Error: {} %".format(er_err*100.)) +plt.ylabel("$E_r$ (V/m)") +plt.xlabel("r (m)") +plt.title("Max % Error: {} %".format(er_err * 100.0)) plt.tight_layout() -plt.savefig('er_3d.png') +plt.savefig("er_3d.png") -assert (er_err < 0.05), "Er Max Error increased above 5%" +assert er_err < 0.05, "Er Max Error increased above 5%" ######################## # Check B field @@ -221,66 +244,68 @@ def Er_an(r): Bx = fields.BxWrapper() By = fields.ByWrapper() -x_vec = Bx.mesh('x') -y_vec = Bx.mesh('y') -z_vec = Bx.mesh('z') +x_vec = Bx.mesh("x") +y_vec = Bx.mesh("y") +z_vec = Bx.mesh("z") dx = x_vec[1] - x_vec[0] dy = y_vec[1] - y_vec[0] -x_vec = x_vec + dx/2. -y_vec = y_vec + dy/2. +x_vec = x_vec + dx / 2.0 +y_vec = y_vec + dy / 2.0 + @np.vectorize def Bt_an(r): if r < beam_r: - bt = -current * r * con.mu_0 / (2.*np.pi*beam_r**2) + bt = -current * r * con.mu_0 / (2.0 * np.pi * beam_r**2) elif r >= beam_r and r < r_pipe: - bt = -current * con.mu_0 / (2.*np.pi*r) + bt = -current * con.mu_0 / (2.0 * np.pi * r) else: bt = np.zeros_like(r) return bt + # compare to region from 0.25*zmax to 0.75*zmax -z_idx = ((z_vec >= 0.25*zmax) & (z_vec < 0.75*zmax)) +z_idx = (z_vec >= 0.25 * zmax) & (z_vec < 0.75 * zmax) z_sub = z_vec[z_idx] Bx_dat = Bx[...] By_dat = By[...] -Bx_mean = Bx_dat[:,:,z_idx].mean(axis=2).T -By_mean = By_dat[:,:,z_idx].mean(axis=2).T +Bx_mean = Bx_dat[:, :, z_idx].mean(axis=2).T +By_mean = By_dat[:, :, z_idx].mean(axis=2).T # Interpolate B mesh to nodal points excluding last mesh point -Bx_nodal = (Bx_mean[:-1,1:] + Bx_mean[:-1,:-1])/2. -By_nodal = (By_mean[:-1,:-1] + By_mean[1:,:-1])/2. +Bx_nodal = (Bx_mean[:-1, 1:] + Bx_mean[:-1, :-1]) / 2.0 +By_nodal = (By_mean[:-1, :-1] + By_mean[1:, :-1]) / 2.0 x_vec = x_vec[:-1] y_vec = y_vec[:-1] -XM, YM = np.meshgrid(x_vec, y_vec, indexing='xy') +XM, YM = np.meshgrid(x_vec, y_vec, indexing="xy") RM = np.sqrt(XM**2 + YM**2) -THM = np.arctan2(YM,XM) +THM = np.arctan2(YM, XM) -Bt_mean = - np.sin(THM) * Bx_nodal + np.cos(THM) * By_nodal +Bt_mean = -np.sin(THM) * Bx_nodal + np.cos(THM) * By_nodal r_vec = np.sqrt(x_vec**2 + y_vec**2) -r_idx = (RM < 0.95*r_pipe) +r_idx = RM < 0.95 * r_pipe r_sub = RM[r_idx] plt.figure(2) plt.plot(r_vec, Bt_an(r_vec)) -plt.plot(RM[r_idx].flatten(), Bt_mean[r_idx].flatten(), '.') -plt.legend(['Analytical', 'Magnetostatic']) +plt.plot(RM[r_idx].flatten(), Bt_mean[r_idx].flatten(), ".") +plt.legend(["Analytical", "Magnetostatic"]) -bt_err = np.abs(Bt_mean[r_idx] - Bt_an(r_sub)).max()/np.abs(Bt_an(r_sub)).max() +bt_err = np.abs(Bt_mean[r_idx] - Bt_an(r_sub)).max() / np.abs(Bt_an(r_sub)).max() -plt.ylabel('$B_{\Theta}$ (T)') -plt.xlabel('r (m)') -plt.title("Max % Error: {} %".format(bt_err*100.)) +plt.ylabel("$B_{\Theta}$ (T)") +plt.xlabel("r (m)") +plt.title("Max % Error: {} %".format(bt_err * 100.0)) plt.tight_layout() -plt.savefig('bt_3d.png') +plt.savefig("bt_3d.png") -assert (bt_err < 0.05), "Bt Max Error increased above 5%" +assert bt_err < 0.05, "Bt Max Error increased above 5%" diff --git a/Examples/Tests/magnetostatic_eb/PICMI_inputs_rz.py b/Examples/Tests/magnetostatic_eb/PICMI_inputs_rz.py index e46f561f538..0ccf4460dfe 100755 --- a/Examples/Tests/magnetostatic_eb/PICMI_inputs_rz.py +++ b/Examples/Tests/magnetostatic_eb/PICMI_inputs_rz.py @@ -22,7 +22,7 @@ import matplotlib -matplotlib.use('agg') +matplotlib.use("agg") import matplotlib.pyplot as plt import numpy as np @@ -51,10 +51,10 @@ nr = 128 nz = 128 -rmin = 0. +rmin = 0.0 rmax = 0.25 -zmin = 0. +zmin = 0.0 zmax = 1 r_pipe = 0.2 @@ -64,65 +64,74 @@ ########################## grid = picmi.CylindricalGrid( - number_of_cells = [nr, nz], - lower_bound = [rmin, zmin], - upper_bound = [rmax, zmax], - lower_boundary_conditions = ['none', 'dirichlet'], - upper_boundary_conditions = ['neumann', 'neumann'], - lower_boundary_conditions_particles = ['none', 'absorbing'], - upper_boundary_conditions_particles = ['absorbing', 'absorbing'], - warpx_potential_lo_z = V_domain_boundary, + number_of_cells=[nr, nz], + lower_bound=[rmin, zmin], + upper_bound=[rmax, zmax], + lower_boundary_conditions=["none", "dirichlet"], + upper_boundary_conditions=["neumann", "neumann"], + lower_boundary_conditions_particles=["none", "absorbing"], + upper_boundary_conditions_particles=["absorbing", "absorbing"], + warpx_potential_lo_z=V_domain_boundary, warpx_blocking_factor=8, - warpx_max_grid_size = 32 + warpx_max_grid_size=32, ) solver = picmi.ElectrostaticSolver( - grid=grid, method='Multigrid', required_precision=1e-7,warpx_magnetostatic=True,warpx_self_fields_verbosity=3 + grid=grid, + method="Multigrid", + required_precision=1e-7, + warpx_magnetostatic=True, + warpx_self_fields_verbosity=3, ) embedded_boundary = picmi.EmbeddedBoundary( implicit_function="(x**2+y**2-radius**2)", potential=V_embedded_boundary, - radius = r_pipe + radius=r_pipe, ) # Beam Current Density current = 1000 # A -beam_r = 0.1 # m +beam_r = 0.1 # m -J = current/(np.pi * beam_r**2) -beam_gamma = 10. -vz = con.c*np.sqrt(1. - 1./beam_gamma**2) -n0 = J/(con.e*vz) +J = current / (np.pi * beam_r**2) +beam_gamma = 10.0 +vz = con.c * np.sqrt(1.0 - 1.0 / beam_gamma**2) +n0 = J / (con.e * vz) beam_dist = picmi.AnalyticDistribution( - density_expression='((x**2+y**2)= beam_r and r < r_pipe: - er = -current / (2.*np.pi*r*con.epsilon_0*vz) + er = -current / (2.0 * np.pi * r * con.epsilon_0 * vz) else: er = np.zeros_like(r) return er + # compare to region from 0.5*zmax to 0.9*zmax -z_idx = ((z_vec >= 0.5*zmax) & (z_vec < 0.9*zmax)) +z_idx = (z_vec >= 0.5 * zmax) & (z_vec < 0.9 * zmax) Er_dat = Er[...] -r_idx = (r_vec < 0.95*r_pipe) +r_idx = r_vec < 0.95 * r_pipe r_sub = r_vec[r_idx] # Average Er along z_sub -Er_mean = Er_dat[:,z_idx].mean(axis=1) +Er_mean = Er_dat[:, z_idx].mean(axis=1) plt.figure(1) plt.plot(r_vec, Er_an(r_vec)) -plt.plot(r_vec, Er_mean,'--') -plt.legend(['Analytical', 'Electrostatic']) +plt.plot(r_vec, Er_mean, "--") +plt.legend(["Analytical", "Electrostatic"]) -er_err = np.abs(Er_mean[r_idx] - Er_an(r_sub)).max()/np.abs(Er_an(r_sub)).max() +er_err = np.abs(Er_mean[r_idx] - Er_an(r_sub)).max() / np.abs(Er_an(r_sub)).max() -plt.ylabel('$E_r$ (V/m)') -plt.xlabel('r (m)') -plt.title("Max % Error: {} %".format(er_err*100.)) +plt.ylabel("$E_r$ (V/m)") +plt.xlabel("r (m)") +plt.title("Max % Error: {} %".format(er_err * 100.0)) plt.tight_layout() -plt.savefig('er_rz.png') +plt.savefig("er_rz.png") -assert (er_err < 0.02), "Er Error increased above 2%" +assert er_err < 0.02, "Er Error increased above 2%" ######################## # Check B field @@ -202,45 +213,47 @@ def Er_an(r): Bth = fields.ByWrapper() -r_vec = Bth.mesh('r') -z_vec = Bth.mesh('z') +r_vec = Bth.mesh("r") +z_vec = Bth.mesh("z") + +dr = r_vec[1] - r_vec[0] +r_vec = r_vec + dr / 2.0 -dr = r_vec[1]-r_vec[0] -r_vec = r_vec + dr/2. @np.vectorize def Bth_an(r): if r < beam_r: - bt = -current * r * con.mu_0 / (2.*np.pi*beam_r**2) + bt = -current * r * con.mu_0 / (2.0 * np.pi * beam_r**2) elif r >= beam_r and r < r_pipe: - bt = -current * con.mu_0 / (2.*np.pi*r) + bt = -current * con.mu_0 / (2.0 * np.pi * r) else: bt = np.zeros_like(r) return bt + # compare to region from 0.25*zmax to 0.75*zmax -z_idx = ((z_vec >= 0.25*zmax) & (z_vec < 0.75*zmax)) +z_idx = (z_vec >= 0.25 * zmax) & (z_vec < 0.75 * zmax) z_sub = z_vec[z_idx] Bth_dat = Bth[...] -r_idx = (r_vec < 0.95*r_pipe) +r_idx = r_vec < 0.95 * r_pipe r_sub = r_vec[r_idx] # Average Bth along z_idx -Bth_mean = Bth_dat[:,z_idx].mean(axis=1) +Bth_mean = Bth_dat[:, z_idx].mean(axis=1) plt.figure(2) plt.plot(r_vec, Bth_an(r_vec)) -plt.plot(r_vec, Bth_mean,'--') -plt.legend(['Analytical', 'Magnetostatic']) +plt.plot(r_vec, Bth_mean, "--") +plt.legend(["Analytical", "Magnetostatic"]) -bth_err = np.abs(Bth_mean[r_idx] - Bth_an(r_sub)).max()/np.abs(Bth_an(r_sub)).max() +bth_err = np.abs(Bth_mean[r_idx] - Bth_an(r_sub)).max() / np.abs(Bth_an(r_sub)).max() -plt.ylabel('$B_{\Theta}$ (T)') -plt.xlabel('r (m)') -plt.title("Max % Error: {} %".format(bth_err*100.)) +plt.ylabel("$B_{\Theta}$ (T)") +plt.xlabel("r (m)") +plt.title("Max % Error: {} %".format(bth_err * 100.0)) plt.tight_layout() -plt.savefig('bth_rz.png') +plt.savefig("bth_rz.png") -assert (bth_err < 0.02), "Bth Error increased above 2%" +assert bth_err < 0.02, "Bth Error increased above 2%" diff --git a/Examples/Tests/magnetostatic_eb/analysis_rz.py b/Examples/Tests/magnetostatic_eb/analysis_rz.py index 00c270e597c..05aa4a3fe47 100755 --- a/Examples/Tests/magnetostatic_eb/analysis_rz.py +++ b/Examples/Tests/magnetostatic_eb/analysis_rz.py @@ -4,7 +4,7 @@ import re import sys -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file @@ -14,7 +14,7 @@ test_name = os.path.split(os.getcwd())[1] # Run checksum regression test -if re.search( 'single_precision', fn ): - checksumAPI.evaluate_checksum(test_name, fn, rtol=2.e-6, do_particles=False) +if re.search("single_precision", fn): + checksumAPI.evaluate_checksum(test_name, fn, rtol=2.0e-6, do_particles=False) else: checksumAPI.evaluate_checksum(test_name, fn, do_particles=False) diff --git a/Examples/Tests/maxwell_hybrid_qed/analysis_Maxwell_QED_Hybrid.py b/Examples/Tests/maxwell_hybrid_qed/analysis_Maxwell_QED_Hybrid.py index e74e9c524b9..88142362372 100755 --- a/Examples/Tests/maxwell_hybrid_qed/analysis_Maxwell_QED_Hybrid.py +++ b/Examples/Tests/maxwell_hybrid_qed/analysis_Maxwell_QED_Hybrid.py @@ -10,8 +10,9 @@ import numpy as np import scipy.constants as scc +import yt -import yt ; yt.funcs.mylog.setLevel(0) +yt.funcs.mylog.setLevel(0) # Static electric field and quantum parameters, from the input file. Es = 1.0e5 @@ -19,28 +20,36 @@ # Load dataset and get laser field dsQED = yt.load(sys.argv[1]) -QED_all_data_level_0 = dsQED.covering_grid(level=0,left_edge=(dsQED.domain_left_edge), - dims=dsQED.domain_dimensions) -EyQED_2d = QED_all_data_level_0['boxlib', 'Ey'].v.squeeze() +QED_all_data_level_0 = dsQED.covering_grid( + level=0, left_edge=(dsQED.domain_left_edge), dims=dsQED.domain_dimensions +) +EyQED_2d = QED_all_data_level_0["boxlib", "Ey"].v.squeeze() # Extract 1D lineout of the laser field -EyQED = EyQED_2d[EyQED_2d.shape[0]//2,:] +EyQED = EyQED_2d[EyQED_2d.shape[0] // 2, :] # Longitudinal resolution -dz = dsQED.domain_width[1].v/dsQED.domain_dimensions[1] +dz = dsQED.domain_width[1].v / dsQED.domain_dimensions[1] # Initial position of the laser pulse max (from input file) -z_start = 0. +z_start = 0.0 # Final position of the laser pulse max (from plotfile) z_end = dsQED.domain_left_edge[1].v + np.argmax(EyQED) * dz # Compute phase velocity and compare with theory -phase_velocity_pic = (z_end-z_start)/dsQED.current_time.v -phase_velocity_theory = scc.c/np.sqrt((1.+12.*xi*Es**2/scc.epsilon_0)/(1.+4.*xi*Es**2/scc.epsilon_0)) -error_percent = 100.*np.abs(phase_velocity_pic-phase_velocity_theory)/phase_velocity_theory +phase_velocity_pic = (z_end - z_start) / dsQED.current_time.v +phase_velocity_theory = scc.c / np.sqrt( + (1.0 + 12.0 * xi * Es**2 / scc.epsilon_0) / (1.0 + 4.0 * xi * Es**2 / scc.epsilon_0) +) +error_percent = ( + 100.0 * np.abs(phase_velocity_pic - phase_velocity_theory) / phase_velocity_theory +) # Print and assert correctness -print('Simulation velocity: ' + str(phase_velocity_pic)) -print('Theory velocity : ' + str(phase_velocity_theory)) -print('error (%) : ' + str(error_percent) ) -print('Theoretical difference between with/without QED (%): ' + str(100*np.abs(phase_velocity_theory-scc.c)/scc.c)) -assert( error_percent < 1.25 ) +print("Simulation velocity: " + str(phase_velocity_pic)) +print("Theory velocity : " + str(phase_velocity_theory)) +print("error (%) : " + str(error_percent)) +print( + "Theoretical difference between with/without QED (%): " + + str(100 * np.abs(phase_velocity_theory - scc.c) / scc.c) +) +assert error_percent < 1.25 diff --git a/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py b/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py index 8f3d8d5acd1..7bfa47f3164 100755 --- a/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py +++ b/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py @@ -18,35 +18,37 @@ yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI fn = sys.argv[1] -use_MR = re.search( 'nci_correctorMR', fn ) is not None +use_MR = re.search("nci_correctorMR", fn) is not None if use_MR: - energy_corrector_off = 5.e32 - energy_threshold = 1.e28 + energy_corrector_off = 5.0e32 + energy_threshold = 1.0e28 else: energy_corrector_off = 1.5e26 - energy_threshold = 1.e24 + energy_threshold = 1.0e24 # Check EB energy after 1000 timesteps filename = sys.argv[1] -ds = yt.load( filename ) -ad0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -ex = ad0['boxlib', 'Ex'].v -ez = ad0['boxlib', 'Ez'].v -by = ad0['boxlib', 'By'].v -energy = np.sum(ex**2 + ez**2 + scc.c**2*by**2) - -print("use_MR: %s" %use_MR) -print("energy if corrector off (from benchmark): %s" %energy_corrector_off) -print("energy threshold (from benchmark): %s" %energy_threshold) -print("energy from this run: %s" %energy) - -assert( energy < energy_threshold ) +ds = yt.load(filename) +ad0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +ex = ad0["boxlib", "Ex"].v +ez = ad0["boxlib", "Ez"].v +by = ad0["boxlib", "By"].v +energy = np.sum(ex**2 + ez**2 + scc.c**2 * by**2) + +print("use_MR: %s" % use_MR) +print("energy if corrector off (from benchmark): %s" % energy_corrector_off) +print("energy threshold (from benchmark): %s" % energy_threshold) +print("energy from this run: %s" % energy) + +assert energy < energy_threshold test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/nci_psatd_stability/analysis_galilean.py b/Examples/Tests/nci_psatd_stability/analysis_galilean.py index 666d240da8f..40c74ecc5bf 100755 --- a/Examples/Tests/nci_psatd_stability/analysis_galilean.py +++ b/Examples/Tests/nci_psatd_stability/analysis_galilean.py @@ -12,15 +12,17 @@ In both cases, the reference energy corresponds to unstable results due to NCI (suppressed by the Galilean PSATD method, without or with averaging, respectively). """ + import os import re import sys import numpy as np import scipy.constants as scc +import yt -import yt ; yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +yt.funcs.mylog.setLevel(0) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] @@ -29,18 +31,19 @@ current_correction = False time_averaging = False periodic_single_box = False -warpx_used_inputs = open('./warpx_used_inputs', 'r').read() -if re.search('geometry.dims\s*=\s*2', warpx_used_inputs): - dims = '2D' -elif re.search('geometry.dims\s*=\s*RZ', warpx_used_inputs): - dims = 'RZ' -elif re.search('geometry.dims\s*=\s*3', warpx_used_inputs): - dims = '3D' -if re.search('psatd.current_correction\s*=\s*1', warpx_used_inputs): +with open("./warpx_used_inputs", "r") as f: + warpx_used_inputs = f.read() +if re.search("geometry.dims\s*=\s*2", warpx_used_inputs): + dims = "2D" +elif re.search("geometry.dims\s*=\s*RZ", warpx_used_inputs): + dims = "RZ" +elif re.search("geometry.dims\s*=\s*3", warpx_used_inputs): + dims = "3D" +if re.search("psatd.current_correction\s*=\s*1", warpx_used_inputs): current_correction = True -if re.search('psatd.do_time_averaging\s*=\s*1', warpx_used_inputs): +if re.search("psatd.do_time_averaging\s*=\s*1", warpx_used_inputs): time_averaging = True -if re.search('psatd.periodic_single_box_fft\s*=\s*1', warpx_used_inputs): +if re.search("psatd.periodic_single_box_fft\s*=\s*1", warpx_used_inputs): periodic_single_box = True ds = yt.load(filename) @@ -48,21 +51,24 @@ # yt 4.0+ has rounding issues with our domain data: # RuntimeError: yt attempted to read outside the boundaries # of a non-periodic domain along dimension 0. -if 'force_periodicity' in dir(ds): ds.force_periodicity() +if "force_periodicity" in dir(ds): + ds.force_periodicity() -all_data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -if dims == 'RZ': - Ex = all_data['boxlib', 'Er'].squeeze().v - Ey = all_data['boxlib', 'Et'].squeeze().v +all_data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +if dims == "RZ": + Ex = all_data["boxlib", "Er"].squeeze().v + Ey = all_data["boxlib", "Et"].squeeze().v else: - Ex = all_data['boxlib', 'Ex'].squeeze().v - Ey = all_data['boxlib', 'Ey'].squeeze().v -Ez = all_data['boxlib', 'Ez'].squeeze().v + Ex = all_data["boxlib", "Ex"].squeeze().v + Ey = all_data["boxlib", "Ey"].squeeze().v +Ez = all_data["boxlib", "Ez"].squeeze().v # Set reference energy values, and tolerances for numerical stability and charge conservation tol_energy = 1e-8 tol_charge = 1e-9 -if dims == '2D': +if dims == "2D": if not current_correction: energy_ref = 35657.41657683263 if current_correction and periodic_single_box: @@ -74,7 +80,7 @@ if time_averaging: energy_ref = 26208.04843478073 tol_energy = 1e-6 -elif dims == 'RZ': +elif dims == "RZ": if not current_correction: energy_ref = 191002.6526271543 if current_correction and periodic_single_box: @@ -82,7 +88,7 @@ if current_correction and not periodic_single_box: energy_ref = 511671.4108624746 tol_charge = 3e-4 -elif dims == '3D': +elif dims == "3D": if not current_correction: energy_ref = 661285.098907683 if current_correction and periodic_single_box: @@ -95,22 +101,22 @@ tol_energy = 1e-4 # Check numerical stability by comparing electric field energy to reference energy -energy = np.sum(scc.epsilon_0/2*(Ex**2+Ey**2+Ez**2)) +energy = np.sum(scc.epsilon_0 / 2 * (Ex**2 + Ey**2 + Ez**2)) err_energy = energy / energy_ref -print('\nCheck numerical stability:') -print(f'err_energy = {err_energy}') -print(f'tol_energy = {tol_energy}') -assert(err_energy < tol_energy) +print("\nCheck numerical stability:") +print(f"err_energy = {err_energy}") +print(f"tol_energy = {tol_energy}") +assert err_energy < tol_energy # Check charge conservation (relative L-infinity norm of error) with current correction if current_correction: - divE = all_data['boxlib', 'divE'].squeeze().v - rho = all_data['boxlib', 'rho' ].squeeze().v / scc.epsilon_0 + divE = all_data["boxlib", "divE"].squeeze().v + rho = all_data["boxlib", "rho"].squeeze().v / scc.epsilon_0 err_charge = np.amax(np.abs(divE - rho)) / max(np.amax(divE), np.amax(rho)) - print('\nCheck charge conservation:') - print(f'err_charge = {err_charge}') - print(f'tol_charge = {tol_charge}') - assert(err_charge < tol_charge) + print("\nCheck charge conservation:") + print(f"err_charge = {err_charge}") + print(f"tol_charge = {tol_charge}") + assert err_charge < tol_charge test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, rtol=1.e-8) +checksumAPI.evaluate_checksum(test_name, filename, rtol=1.0e-8) diff --git a/Examples/Tests/nci_psatd_stability/analysis_multiJ.py b/Examples/Tests/nci_psatd_stability/analysis_multiJ.py index 1c68b114c1a..2a438d5d22e 100755 --- a/Examples/Tests/nci_psatd_stability/analysis_multiJ.py +++ b/Examples/Tests/nci_psatd_stability/analysis_multiJ.py @@ -9,14 +9,16 @@ energy corresponds to unstable results due to NCI (suppressed by the use of both J and rho constant in time, and with divergence cleaning). """ + import os import sys import numpy as np import scipy.constants as scc +import yt -import yt ; yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +yt.funcs.mylog.setLevel(0) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] @@ -26,24 +28,27 @@ # yt 4.0+ has rounding issues with our domain data: # RuntimeError: yt attempted to read outside the boundaries # of a non-periodic domain along dimension 0. -if 'force_periodicity' in dir(ds): ds.force_periodicity() +if "force_periodicity" in dir(ds): + ds.force_periodicity() -all_data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -Ex = all_data['boxlib', 'Ex'].squeeze().v -Ey = all_data['boxlib', 'Ey'].squeeze().v -Ez = all_data['boxlib', 'Ez'].squeeze().v +all_data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +Ex = all_data["boxlib", "Ex"].squeeze().v +Ey = all_data["boxlib", "Ey"].squeeze().v +Ez = all_data["boxlib", "Ez"].squeeze().v # Set reference energy values, and tolerances for numerical stability and charge conservation tol_energy = 1e-8 energy_ref = 66e6 # Check numerical stability by comparing electric field energy to reference energy -energy = np.sum(scc.epsilon_0/2*(Ex**2+Ey**2+Ez**2)) +energy = np.sum(scc.epsilon_0 / 2 * (Ex**2 + Ey**2 + Ez**2)) err_energy = energy / energy_ref -print('\nCheck numerical stability:') -print(f'err_energy = {err_energy}') -print(f'tol_energy = {tol_energy}') -assert(err_energy < tol_energy) +print("\nCheck numerical stability:") +print(f"err_energy = {err_energy}") +print(f"tol_energy = {tol_energy}") +assert err_energy < tol_energy test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/nodal_electrostatic/analysis_3d.py b/Examples/Tests/nodal_electrostatic/analysis_3d.py index 79a2bfdae20..c8725ce5d95 100755 --- a/Examples/Tests/nodal_electrostatic/analysis_3d.py +++ b/Examples/Tests/nodal_electrostatic/analysis_3d.py @@ -5,21 +5,21 @@ import numpy as np -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file fn = sys.argv[1] # check that the maximum chi value is small -fname = 'diags/reducedfiles/ParticleExtrema_beam_p.txt' -chi_max = np.loadtxt(fname)[:,19] +fname = "diags/reducedfiles/ParticleExtrema_beam_p.txt" +chi_max = np.loadtxt(fname)[:, 19] assert np.all(chi_max < 2e-8) # check that no photons have been produced -fname = 'diags/reducedfiles/ParticleNumber.txt' -pho_num = np.loadtxt(fname)[:,7] -assert(pho_num.all()==0.) +fname = "diags/reducedfiles/ParticleNumber.txt" +pho_num = np.loadtxt(fname)[:, 7] +assert pho_num.all() == 0.0 # Checksum regression analysis test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py b/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py index 858df0b26a6..22de371090c 100755 --- a/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py +++ b/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py @@ -28,28 +28,28 @@ import numpy as np -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Name of the plotfile fn = sys.argv[1] # Load data from reduced diagnostics (physical time and neutron weights) -time = np.loadtxt('./reduced_diags/particle_number.txt', usecols=1) -neutron = np.loadtxt('./reduced_diags/particle_number.txt', usecols=9) +time = np.loadtxt("./reduced_diags/particle_number.txt", usecols=1) +neutron = np.loadtxt("./reduced_diags/particle_number.txt", usecols=9) # Compute reactivity in units of cm^3/s as in equation (61) of [1] -dY_dt = np.abs(neutron[-1]-neutron[0])/(time[-1]-time[0]) -delta_ij = 1 # reactants of the same species -nD = 1e26 # density in 1/m^3 -V = (2e-3)**3 # simulation volume in m^3 -sigma = dY_dt*(1+delta_ij)/(nD**2)/V*(1e2)**3 +dY_dt = np.abs(neutron[-1] - neutron[0]) / (time[-1] - time[0]) +delta_ij = 1 # reactants of the same species +nD = 1e26 # density in 1/m^3 +V = (2e-3) ** 3 # simulation volume in m^3 +sigma = dY_dt * (1 + delta_ij) / (nD**2) / V * (1e2) ** 3 sigma_th = 2.603e-18 -error = np.abs(sigma-sigma_th)/sigma_th +error = np.abs(sigma - sigma_th) / sigma_th tolerance = 2e-2 -print('error = ', error) -print('tolerance = ', tolerance) +print("error = ", error) +print("tolerance = ", tolerance) assert error < tolerance # Compare checksums with benchmark diff --git a/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py b/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py index ea0f323f722..b4f77bb9caa 100755 --- a/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py +++ b/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py @@ -10,7 +10,7 @@ import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI import numpy as np import scipy.constants as scc @@ -59,25 +59,27 @@ ## Please be aware that the relative tolerances are often set empirically in this analysis script, ## so it would not be surprising that some tolerances need to be increased in the future. -default_tol = 1.e-12 # Default relative tolerance +default_tol = 1.0e-12 # Default relative tolerance ## Some physical parameters -keV_to_Joule = scc.e*1e3 -MeV_to_Joule = scc.e*1e6 -barn_to_square_meter = 1.e-28 -m_p = 1.00782503223*scc.m_u # Proton mass -m_b = 11.00930536*scc.m_u # Boron 11 mass -m_reduced = m_p*m_b/(m_p+m_b) -m_a = 4.00260325413*scc.m_u # Alpha (He4) mass -m_be = (8.0053095729+0.00325283863)*scc.m_u # Be8* mass (3.03 MeV ex. state) -Z_boron = 5. -Z_proton = 1. -E_Gamow = (Z_boron*Z_proton*np.pi*scc.fine_structure)**2*2.*m_reduced*scc.c**2 -E_Gamow_MeV = E_Gamow/MeV_to_Joule -E_Gamow_keV = E_Gamow/keV_to_Joule -E_fusion = 5.55610759*MeV_to_Joule # Energy released during p + B -> alpha + Be* -E_decay = 3.12600414*MeV_to_Joule # Energy released during Be* -> 2*alpha -E_fusion_total = E_fusion + E_decay # Energy released during p + B -> 3*alpha +keV_to_Joule = scc.e * 1e3 +MeV_to_Joule = scc.e * 1e6 +barn_to_square_meter = 1.0e-28 +m_p = 1.00782503223 * scc.m_u # Proton mass +m_b = 11.00930536 * scc.m_u # Boron 11 mass +m_reduced = m_p * m_b / (m_p + m_b) +m_a = 4.00260325413 * scc.m_u # Alpha (He4) mass +m_be = (8.0053095729 + 0.00325283863) * scc.m_u # Be8* mass (3.03 MeV ex. state) +Z_boron = 5.0 +Z_proton = 1.0 +E_Gamow = ( + (Z_boron * Z_proton * np.pi * scc.fine_structure) ** 2 * 2.0 * m_reduced * scc.c**2 +) +E_Gamow_MeV = E_Gamow / MeV_to_Joule +E_Gamow_keV = E_Gamow / keV_to_Joule +E_fusion = 5.55610759 * MeV_to_Joule # Energy released during p + B -> alpha + Be* +E_decay = 3.12600414 * MeV_to_Joule # Energy released during Be* -> 2*alpha +E_fusion_total = E_fusion + E_decay # Energy released during p + B -> 3*alpha ## Checks whether this is the 2D or the 3D test is_2D = "2D" in sys.argv[1] @@ -89,44 +91,48 @@ else: size_y = 8 size_z = 16 -dV_total = size_x*size_y*size_z # Total simulation volume +dV_total = size_x * size_y * size_z # Total simulation volume # Volume of a slice corresponding to a single cell in the z direction. In tests 1 and 2, all the # particles of a given species in the same slice have the exact same momentum -dV_slice = size_x*size_y +dV_slice = size_x * size_y if is_2D: - dt = 1./(scc.c*np.sqrt(2.)) + dt = 1.0 / (scc.c * np.sqrt(2.0)) yt_z_string = "particle_position_y" - nppcell_1 = 10000*8 - nppcell_2 = 900*8 + nppcell_1 = 10000 * 8 + nppcell_2 = 900 * 8 else: - dt = 1./(scc.c*np.sqrt(3.)) + dt = 1.0 / (scc.c * np.sqrt(3.0)) yt_z_string = "particle_position_z" nppcell_1 = 10000 nppcell_2 = 900 # In test 1 and 2, the energy in cells number i (in z direction) is typically Energy_step * i**2 -Energy_step = 22.*keV_to_Joule +Energy_step = 22.0 * keV_to_Joule -def is_close(val1, val2, rtol=default_tol, atol=0.): + +def is_close(val1, val2, rtol=default_tol, atol=0.0): ## Wrapper around numpy.isclose, used to override the default tolerances. return np.isclose(val1, val2, rtol=rtol, atol=atol) + def add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix): - data_dict[prefix+"_px_"+suffix] = yt_ad[species_name, "particle_momentum_x"].v - data_dict[prefix+"_py_"+suffix] = yt_ad[species_name, "particle_momentum_y"].v - data_dict[prefix+"_pz_"+suffix] = yt_ad[species_name, "particle_momentum_z"].v - data_dict[prefix+"_w_"+suffix] = yt_ad[species_name, "particle_weight"].v - data_dict[prefix+"_id_"+suffix] = yt_ad[species_name, "particle_id"].v - data_dict[prefix+"_cpu_"+suffix] = yt_ad[species_name, "particle_cpu"].v - data_dict[prefix+"_z_"+suffix] = yt_ad[species_name, yt_z_string].v + data_dict[prefix + "_px_" + suffix] = yt_ad[species_name, "particle_momentum_x"].v + data_dict[prefix + "_py_" + suffix] = yt_ad[species_name, "particle_momentum_y"].v + data_dict[prefix + "_pz_" + suffix] = yt_ad[species_name, "particle_momentum_z"].v + data_dict[prefix + "_w_" + suffix] = yt_ad[species_name, "particle_weight"].v + data_dict[prefix + "_id_" + suffix] = yt_ad[species_name, "particle_id"].v + data_dict[prefix + "_cpu_" + suffix] = yt_ad[species_name, "particle_cpu"].v + data_dict[prefix + "_z_" + suffix] = yt_ad[species_name, yt_z_string].v + def add_empty_species_to_dict(data_dict, species_name, prefix, suffix): - data_dict[prefix+"_px_"+suffix] = np.empty(0) - data_dict[prefix+"_py_"+suffix] = np.empty(0) - data_dict[prefix+"_pz_"+suffix] = np.empty(0) - data_dict[prefix+"_w_"+suffix] = np.empty(0) - data_dict[prefix+"_id_"+suffix] = np.empty(0) - data_dict[prefix+"_cpu_"+suffix] = np.empty(0) - data_dict[prefix+"_z_"+suffix] = np.empty(0) + data_dict[prefix + "_px_" + suffix] = np.empty(0) + data_dict[prefix + "_py_" + suffix] = np.empty(0) + data_dict[prefix + "_pz_" + suffix] = np.empty(0) + data_dict[prefix + "_w_" + suffix] = np.empty(0) + data_dict[prefix + "_id_" + suffix] = np.empty(0) + data_dict[prefix + "_cpu_" + suffix] = np.empty(0) + data_dict[prefix + "_z_" + suffix] = np.empty(0) + def add_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix): try: @@ -138,65 +144,79 @@ def add_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix): ## entirely fuses into alphas. add_empty_species_to_dict(data_dict, species_name, prefix, suffix) + def check_particle_number_conservation(data): total_w_proton_start = np.sum(data["proton_w_start"]) - total_w_proton_end = np.sum(data["proton_w_end"]) - total_w_boron_start = np.sum(data["boron_w_start"]) - total_w_boron_end = np.sum(data["boron_w_end"]) + total_w_proton_end = np.sum(data["proton_w_end"]) + total_w_boron_start = np.sum(data["boron_w_start"]) + total_w_boron_end = np.sum(data["boron_w_end"]) consumed_proton = total_w_proton_start - total_w_proton_end - consumed_boron = total_w_boron_start - total_w_boron_end - created_alpha = np.sum(data["alpha_w_end"]) - assert(consumed_proton >= 0.) - assert(consumed_boron >= 0.) - assert(created_alpha >= 0.) + consumed_boron = total_w_boron_start - total_w_boron_end + created_alpha = np.sum(data["alpha_w_end"]) + assert consumed_proton >= 0.0 + assert consumed_boron >= 0.0 + assert created_alpha >= 0.0 ## Check that number of consumed proton and consumed boron are equal assert_scale = max(total_w_proton_start, total_w_boron_start) - assert(is_close(consumed_proton, consumed_boron, rtol = 0., atol = default_tol*assert_scale)) + assert is_close( + consumed_proton, consumed_boron, rtol=0.0, atol=default_tol * assert_scale + ) ## Check that number of consumed particles corresponds to number of produced alpha ## Factor 3 is here because each nuclear fusion reaction produces 3 alphas - assert(is_close(total_w_proton_start, total_w_proton_end + created_alpha/3.)) - assert(is_close(total_w_boron_start, total_w_boron_end + created_alpha/3.)) + assert is_close(total_w_proton_start, total_w_proton_end + created_alpha / 3.0) + assert is_close(total_w_boron_start, total_w_boron_end + created_alpha / 3.0) + def compute_energy_array(data, species_name, suffix, m): ## Relativistic computation of kinetic energy for a given species - psq_array = data[species_name+'_px_'+suffix]**2 + data[species_name+'_py_'+suffix]**2 + \ - data[species_name+'_pz_'+suffix]**2 - rest_energy = m*scc.c**2 - return np.sqrt(psq_array*scc.c**2 + rest_energy**2) - rest_energy + psq_array = ( + data[species_name + "_px_" + suffix] ** 2 + + data[species_name + "_py_" + suffix] ** 2 + + data[species_name + "_pz_" + suffix] ** 2 + ) + rest_energy = m * scc.c**2 + return np.sqrt(psq_array * scc.c**2 + rest_energy**2) - rest_energy + def check_energy_conservation(data): proton_energy_start = compute_energy_array(data, "proton", "start", m_p) - proton_energy_end = compute_energy_array(data, "proton", "end", m_p) - boron_energy_start = compute_energy_array(data, "boron", "start", m_b) - boron_energy_end = compute_energy_array(data, "boron", "end", m_b) - alpha_energy_end = compute_energy_array(data, "alpha", "end", m_a) - total_energy_start = np.sum(proton_energy_start*data["proton_w_start"]) + \ - np.sum(boron_energy_start*data["boron_w_start"]) - total_energy_end = np.sum(proton_energy_end*data["proton_w_end"]) + \ - np.sum(boron_energy_end*data["boron_w_end"]) + \ - np.sum(alpha_energy_end*data["alpha_w_end"]) + proton_energy_end = compute_energy_array(data, "proton", "end", m_p) + boron_energy_start = compute_energy_array(data, "boron", "start", m_b) + boron_energy_end = compute_energy_array(data, "boron", "end", m_b) + alpha_energy_end = compute_energy_array(data, "alpha", "end", m_a) + total_energy_start = np.sum(proton_energy_start * data["proton_w_start"]) + np.sum( + boron_energy_start * data["boron_w_start"] + ) + total_energy_end = ( + np.sum(proton_energy_end * data["proton_w_end"]) + + np.sum(boron_energy_end * data["boron_w_end"]) + + np.sum(alpha_energy_end * data["alpha_w_end"]) + ) ## Factor 3 is here because each nuclear fusion reaction produces 3 alphas - n_fusion_reaction = np.sum(data["alpha_w_end"])/3. - assert(is_close(total_energy_end, - total_energy_start + n_fusion_reaction*E_fusion_total, - rtol = 1.e-8)) + n_fusion_reaction = np.sum(data["alpha_w_end"]) / 3.0 + assert is_close( + total_energy_end, + total_energy_start + n_fusion_reaction * E_fusion_total, + rtol=1.0e-8, + ) + def check_momentum_conservation(data): - proton_total_px_start = np.sum(data["proton_px_start"]*data["proton_w_start"]) - proton_total_py_start = np.sum(data["proton_py_start"]*data["proton_w_start"]) - proton_total_pz_start = np.sum(data["proton_pz_start"]*data["proton_w_start"]) - proton_total_px_end = np.sum(data["proton_px_end"]*data["proton_w_end"]) - proton_total_py_end = np.sum(data["proton_py_end"]*data["proton_w_end"]) - proton_total_pz_end = np.sum(data["proton_pz_end"]*data["proton_w_end"]) - boron_total_px_start = np.sum(data["boron_px_start"]*data["boron_w_start"]) - boron_total_py_start = np.sum(data["boron_py_start"]*data["boron_w_start"]) - boron_total_pz_start = np.sum(data["boron_pz_start"]*data["boron_w_start"]) - boron_total_px_end = np.sum(data["boron_px_end"]*data["boron_w_end"]) - boron_total_py_end = np.sum(data["boron_py_end"]*data["boron_w_end"]) - boron_total_pz_end = np.sum(data["boron_pz_end"]*data["boron_w_end"]) - alpha_total_px_end = np.sum(data["alpha_px_end"]*data["alpha_w_end"]) - alpha_total_py_end = np.sum(data["alpha_py_end"]*data["alpha_w_end"]) - alpha_total_pz_end = np.sum(data["alpha_pz_end"]*data["alpha_w_end"]) + proton_total_px_start = np.sum(data["proton_px_start"] * data["proton_w_start"]) + proton_total_py_start = np.sum(data["proton_py_start"] * data["proton_w_start"]) + proton_total_pz_start = np.sum(data["proton_pz_start"] * data["proton_w_start"]) + proton_total_px_end = np.sum(data["proton_px_end"] * data["proton_w_end"]) + proton_total_py_end = np.sum(data["proton_py_end"] * data["proton_w_end"]) + proton_total_pz_end = np.sum(data["proton_pz_end"] * data["proton_w_end"]) + boron_total_px_start = np.sum(data["boron_px_start"] * data["boron_w_start"]) + boron_total_py_start = np.sum(data["boron_py_start"] * data["boron_w_start"]) + boron_total_pz_start = np.sum(data["boron_pz_start"] * data["boron_w_start"]) + boron_total_px_end = np.sum(data["boron_px_end"] * data["boron_w_end"]) + boron_total_py_end = np.sum(data["boron_py_end"] * data["boron_w_end"]) + boron_total_pz_end = np.sum(data["boron_pz_end"] * data["boron_w_end"]) + alpha_total_px_end = np.sum(data["alpha_px_end"] * data["alpha_w_end"]) + alpha_total_py_end = np.sum(data["alpha_py_end"] * data["alpha_w_end"]) + alpha_total_pz_end = np.sum(data["alpha_pz_end"] * data["alpha_w_end"]) total_px_start = proton_total_px_start + boron_total_px_start total_py_start = proton_total_py_start + boron_total_py_start total_pz_start = proton_total_pz_start + boron_total_pz_start @@ -204,42 +224,45 @@ def check_momentum_conservation(data): total_py_end = proton_total_py_end + boron_total_py_end + alpha_total_py_end total_pz_end = proton_total_pz_end + boron_total_pz_end + alpha_total_pz_end ## Absolute tolerance is needed because sometimes the initial momentum is exactly 0 - assert(is_close(total_px_start, total_px_end, atol=1.e-15)) - assert(is_close(total_py_start, total_py_end, atol=1.e-15)) - assert(is_close(total_pz_start, total_pz_end, atol=1.e-15)) + assert is_close(total_px_start, total_px_end, atol=1.0e-15) + assert is_close(total_py_start, total_py_end, atol=1.0e-15) + assert is_close(total_pz_start, total_pz_end, atol=1.0e-15) + def check_id(data): ## Check that all created particles have unique id + cpu identifier (two particles with ## different cpu can have the same id) - complex_id = data["alpha_id_end"] + 1j*data["alpha_cpu_end"] - assert(complex_id.shape == np.unique(complex_id).shape) + complex_id = data["alpha_id_end"] + 1j * data["alpha_cpu_end"] + assert complex_id.shape == np.unique(complex_id).shape + def basic_product_particles_check(data): ## For each nuclear fusion reaction in the code, we create 6 alpha macroparticles. So the ## total number of alpha macroparticles must be a multiple of 6. num_alpha = data["alpha_w_end"].shape[0] - assert(num_alpha%6 == 0) + assert num_alpha % 6 == 0 ## The weight of the 6 macroparticles coming from a single fusion event should be the same. ## We verify this here. - assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][1::6])) - assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][2::6])) - assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][3::6])) - assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][4::6])) - assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][5::6])) + assert np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][1::6]) + assert np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][2::6]) + assert np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][3::6]) + assert np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][4::6]) + assert np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][5::6]) ## When we create 6 macroparticles, the first has the exact same momentum as the second, the ## third has the same as the fourth and the fifth has the same as the sixth. We verify this ## here - assert(np.array_equal(data["alpha_px_end"][::6], data["alpha_px_end"][1::6])) - assert(np.array_equal(data["alpha_py_end"][::6], data["alpha_py_end"][1::6])) - assert(np.array_equal(data["alpha_pz_end"][::6], data["alpha_pz_end"][1::6])) - assert(np.array_equal(data["alpha_px_end"][2::6], data["alpha_px_end"][3::6])) - assert(np.array_equal(data["alpha_py_end"][2::6], data["alpha_py_end"][3::6])) - assert(np.array_equal(data["alpha_pz_end"][2::6], data["alpha_pz_end"][3::6])) - assert(np.array_equal(data["alpha_px_end"][4::6], data["alpha_px_end"][5::6])) - assert(np.array_equal(data["alpha_py_end"][4::6], data["alpha_py_end"][5::6])) - assert(np.array_equal(data["alpha_pz_end"][4::6], data["alpha_pz_end"][5::6])) + assert np.array_equal(data["alpha_px_end"][::6], data["alpha_px_end"][1::6]) + assert np.array_equal(data["alpha_py_end"][::6], data["alpha_py_end"][1::6]) + assert np.array_equal(data["alpha_pz_end"][::6], data["alpha_pz_end"][1::6]) + assert np.array_equal(data["alpha_px_end"][2::6], data["alpha_px_end"][3::6]) + assert np.array_equal(data["alpha_py_end"][2::6], data["alpha_py_end"][3::6]) + assert np.array_equal(data["alpha_pz_end"][2::6], data["alpha_pz_end"][3::6]) + assert np.array_equal(data["alpha_px_end"][4::6], data["alpha_px_end"][5::6]) + assert np.array_equal(data["alpha_py_end"][4::6], data["alpha_py_end"][5::6]) + assert np.array_equal(data["alpha_pz_end"][4::6], data["alpha_pz_end"][5::6]) + def generic_check(data): check_particle_number_conservation(data) @@ -248,39 +271,43 @@ def generic_check(data): check_id(data) basic_product_particles_check(data) + def check_isotropy(data, relative_tolerance): ## Checks that the alpha particles are emitted isotropically - average_px_sq = np.average(data["alpha_px_end"]*data["alpha_px_end"]) - average_py_sq = np.average(data["alpha_py_end"]*data["alpha_py_end"]) - average_pz_sq = np.average(data["alpha_pz_end"]*data["alpha_pz_end"]) - assert(is_close(average_px_sq, average_py_sq, rtol = relative_tolerance)) - assert(is_close(average_px_sq, average_pz_sq, rtol = relative_tolerance)) + average_px_sq = np.average(data["alpha_px_end"] * data["alpha_px_end"]) + average_py_sq = np.average(data["alpha_py_end"] * data["alpha_py_end"]) + average_pz_sq = np.average(data["alpha_pz_end"] * data["alpha_pz_end"]) + assert is_close(average_px_sq, average_py_sq, rtol=relative_tolerance) + assert is_close(average_px_sq, average_pz_sq, rtol=relative_tolerance) + def astrophysical_factor_lowE(E): ## E is in keV ## Returns astrophysical factor in MeV b using the low energy fit in the range E < 400 keV ## described in equation (3) of A. Tentori and F. Belloni, Nuclear Fusion, 63, 086001 (2023) - C0 = 197. + C0 = 197.0 C1 = 0.269 C2 = 2.54e-4 AL = 1.82e4 - EL = 148. + EL = 148.0 dEL = 2.35 - return C0 + C1*E + C2*E**2 + AL/((E-EL)**2 + dEL**2) + return C0 + C1 * E + C2 * E**2 + AL / ((E - EL) ** 2 + dEL**2) + def astrophysical_factor_midE(E): ## E is in keV ## Returns astrophysical factor in MeV b using the mid energy fit in the range ## 400 keV < E < 668 keV described in equation (4) of A. Tentori and F. Belloni, ## Nuclear Fusion, 63, 086001 (2023) - D0 = 346. - D1 = 150. + D0 = 346.0 + D1 = 150.0 D2 = -59.9 D5 = -0.460 - E_400 = 400. - E_100 = 100. - E_norm = (E - E_400)/E_100 - return D0 + D1*E_norm + D2*E_norm**2 + D5*E_norm**5 + E_400 = 400.0 + E_100 = 100.0 + E_norm = (E - E_400) / E_100 + return D0 + D1 * E_norm + D2 * E_norm**2 + D5 * E_norm**5 + def astrophysical_factor_highE(E): ## E is in keV @@ -292,27 +319,36 @@ def astrophysical_factor_highE(E): A2 = 1.36e6 A3 = 3.71e6 E0 = 640.9 - E1 = 1211. - E2 = 2340. - E3 = 3294. + E1 = 1211.0 + E2 = 2340.0 + E3 = 3294.0 dE0 = 85.5 - dE1 = 414. - dE2 = 221. - dE3 = 351. + dE1 = 414.0 + dE2 = 221.0 + dE3 = 351.0 B = 0.381 - return A0/((E-E0)**2 + dE0**2) + A1/((E-E1)**2 + dE1**2) + \ - A2/((E-E2)**2 + dE2**2) + A3/((E-E3)**2 + dE3**2) + B + return ( + A0 / ((E - E0) ** 2 + dE0**2) + + A1 / ((E - E1) ** 2 + dE1**2) + + A2 / ((E - E2) ** 2 + dE2**2) + + A3 / ((E - E3) ** 2 + dE3**2) + + B + ) + def astrophysical_factor(E): ## E is in keV ## Returns astrophysical factor in MeV b using the fits described in A. Tentori ## and F. Belloni, Nuclear Fusion, 63, 086001 (2023) conditions = [E <= 400, E <= 668, E > 668] - choices = [astrophysical_factor_lowE(E), - astrophysical_factor_midE(E), - astrophysical_factor_highE(E)] + choices = [ + astrophysical_factor_lowE(E), + astrophysical_factor_midE(E), + astrophysical_factor_highE(E), + ] return np.select(conditions, choices) + def pb_cross_section_buck_fit(E): ## E is in MeV ## Returns cross section in b using a power law fit of the data presented in Buck et al., @@ -321,52 +357,75 @@ def pb_cross_section_buck_fit(E): ## Cross section at E = E_start_fit = 3.5 MeV cross_section_start_fit = 0.01277998 slope_fit = -2.661840717596765 - return cross_section_start_fit*(E/E_start_fit)**slope_fit + return cross_section_start_fit * (E / E_start_fit) ** slope_fit + def pb_cross_section(E): ## E is in keV ## Returns cross section in b using the fits described in A. Tentori and F. Belloni, ## Nucl. Fusion, 63, 086001 (2023) for E < 9.76 MeV otherwise returns a power law fit ## of the data in Buck et al., Nuclear Physics A, 398(2), 189-202 (1983) - E_MeV = E/1.e3 + E_MeV = E / 1.0e3 conditions = [E <= 9760, E > 9760] - choices = [astrophysical_factor(E)/E_MeV * np.exp(-np.sqrt(E_Gamow_MeV / E_MeV)), - pb_cross_section_buck_fit(E_MeV)] + choices = [ + astrophysical_factor(E) / E_MeV * np.exp(-np.sqrt(E_Gamow_MeV / E_MeV)), + pb_cross_section_buck_fit(E_MeV), + ] return np.select(conditions, choices) + def E_com_to_p_sq_com(m1, m2, E): ## E is the total (kinetic+mass) energy of a two particle (with mass m1 and m2) system in ## its center of mass frame, in J. ## Returns the square norm of the momentum of each particle in that frame. - E_ratio = E/((m1+m2)*scc.c**2) - return m1*m2*scc.c**2 * (E_ratio**2 - 1) + (m1-m2)**2*scc.c**2/4 * (E_ratio - 1./E_ratio)**2 + E_ratio = E / ((m1 + m2) * scc.c**2) + return ( + m1 * m2 * scc.c**2 * (E_ratio**2 - 1) + + (m1 - m2) ** 2 * scc.c**2 / 4 * (E_ratio - 1.0 / E_ratio) ** 2 + ) + def compute_relative_v_com(E): ## E is the kinetic energy of proton+boron in the center of mass frame, in keV ## Returns the relative velocity between proton and boron in this frame, in m/s - E_J = E*keV_to_Joule + (m_p + m_b)*scc.c**2 + E_J = E * keV_to_Joule + (m_p + m_b) * scc.c**2 p_sq = E_com_to_p_sq_com(m_p, m_b, E_J) p = np.sqrt(p_sq) - gamma_p = np.sqrt(1. + p_sq / (m_p*scc.c)**2) - gamma_b = np.sqrt(1. + p_sq / (m_b*scc.c)**2) - v_p = p/(gamma_p*m_p) - v_b = p/(gamma_b*m_b) - return v_p+v_b + gamma_p = np.sqrt(1.0 + p_sq / (m_p * scc.c) ** 2) + gamma_b = np.sqrt(1.0 + p_sq / (m_b * scc.c) ** 2) + v_p = p / (gamma_p * m_p) + v_b = p / (gamma_b * m_b) + return v_p + v_b + def expected_alpha_weight_com(E_com, proton_density, boron_density, dV, dt): ## Computes expected number of produced alpha particles as a function of energy E_com in the ## center of mass frame. E_com is in keV. - assert(np.all(E_com>=0)) + assert np.all(E_com >= 0) ## Case E_com == 0 is handled manually to avoid division by zero conditions = [E_com == 0, E_com > 0] ## Necessary to avoid division by 0 warning when pb_cross_section is evaluated - E_com_never_zero = np.clip(E_com, 1.e-15, None) - choices = [0., pb_cross_section(E_com_never_zero)*compute_relative_v_com(E_com_never_zero)] + E_com_never_zero = np.clip(E_com, 1.0e-15, None) + choices = [ + 0.0, + pb_cross_section(E_com_never_zero) * compute_relative_v_com(E_com_never_zero), + ] sigma_times_vrel = np.select(conditions, choices) ## Factor 3 is here because each fusion reaction produces 3 alphas - return 3.*proton_density*boron_density*sigma_times_vrel*barn_to_square_meter*dV*dt - -def check_macroparticle_number(data, fusion_probability_target_value, num_pair_per_cell): + return ( + 3.0 + * proton_density + * boron_density + * sigma_times_vrel + * barn_to_square_meter + * dV + * dt + ) + + +def check_macroparticle_number( + data, fusion_probability_target_value, num_pair_per_cell +): ## Checks that the number of macroparticles is as expected for the first and second tests ## The first slice 0 < z < 1 does not contribute to alpha creation @@ -374,65 +433,84 @@ def check_macroparticle_number(data, fusion_probability_target_value, num_pair_p ## In these tests, the fusion_multiplier is so high that the fusion probability per pair is ## equal to the parameter fusion_probability_target_value fusion_probability_per_pair = fusion_probability_target_value - expected_fusion_number = numcells*num_pair_per_cell*fusion_probability_per_pair + expected_fusion_number = numcells * num_pair_per_cell * fusion_probability_per_pair ## Each fusion event produces 6 alpha macroparticles - expected_macroparticle_number = 6.*expected_fusion_number - std_macroparticle_number = 6.*np.sqrt(expected_fusion_number) + expected_macroparticle_number = 6.0 * expected_fusion_number + std_macroparticle_number = 6.0 * np.sqrt(expected_fusion_number) actual_macroparticle_number = data["alpha_w_end"].shape[0] # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions - assert(is_close(actual_macroparticle_number, expected_macroparticle_number, rtol = 0., - atol = 5.*std_macroparticle_number)) + assert is_close( + actual_macroparticle_number, + expected_macroparticle_number, + rtol=0.0, + atol=5.0 * std_macroparticle_number, + ) ## used in subsequent function return expected_fusion_number + def p_sq_boron_frame_to_E_COM_frame(p_proton_sq): # Takes the proton square norm of the momentum in the boron rest frame and returns the total # kinetic energy in the center of mass frame. Everything is in SI units. # Total (kinetic + mass) energy in lab frame - E_lab = np.sqrt(p_proton_sq*scc.c**2 + (m_p*scc.c**2)**2) + m_b*scc.c**2 + E_lab = np.sqrt(p_proton_sq * scc.c**2 + (m_p * scc.c**2) ** 2) + m_b * scc.c**2 # Use invariant E**2 - p**2c**2 of 4-momentum norm to compute energy in center of mass frame - E_com = np.sqrt(E_lab**2 - p_proton_sq*scc.c**2) + E_com = np.sqrt(E_lab**2 - p_proton_sq * scc.c**2) # Corresponding kinetic energy - E_com_kin = E_com - (m_b+m_p)*scc.c**2 - return E_com_kin*(p_proton_sq>0.) + E_com_kin = E_com - (m_b + m_p) * scc.c**2 + return E_com_kin * (p_proton_sq > 0.0) + def p_sq_to_kinetic_energy(p_sq, m): ## Returns the kinetic energy of a particle as a function of its squared momentum. ## Everything is in SI units. - return np.sqrt(p_sq*scc.c**2 + (m*scc.c**2)**2) - (m*scc.c**2) + return np.sqrt(p_sq * scc.c**2 + (m * scc.c**2) ** 2) - (m * scc.c**2) + def compute_E_com1(data): ## Computes kinetic energy (in Joule) in the center of frame for the first test ## Square norm of the momentum of proton/boron as a function of cell number in z direction - p_sq = 2.*m_reduced*(Energy_step*np.arange(size_z)**2) + p_sq = 2.0 * m_reduced * (Energy_step * np.arange(size_z) ** 2) return p_sq_to_kinetic_energy(p_sq, m_b) + p_sq_to_kinetic_energy(p_sq, m_p) + def compute_E_com2(data): ## Computes kinetic energy (in Joule) in the center of frame for the second test ## Square norm of the momentum of the proton as a function of cell number in z direction - p_proton_sq = 2.*m_p*(Energy_step*np.arange(size_z)**2) + p_proton_sq = 2.0 * m_p * (Energy_step * np.arange(size_z) ** 2) return p_sq_boron_frame_to_E_COM_frame(p_proton_sq) -def check_alpha_yield(data, expected_fusion_number, E_com, proton_density, boron_density): + +def check_alpha_yield( + data, expected_fusion_number, E_com, proton_density, boron_density +): ## Checks that the fusion yield is as expected for the first and second tests. ## Proton and boron densities are in m^-3. - alpha_weight_theory = expected_alpha_weight_com(E_com/keV_to_Joule, proton_density, - boron_density, dV_slice, dt) - alpha_weight_simulation = np.histogram(data["alpha_z_end"], bins=size_z, range=(0, size_z), - weights = data["alpha_w_end"])[0] + alpha_weight_theory = expected_alpha_weight_com( + E_com / keV_to_Joule, proton_density, boron_density, dV_slice, dt + ) + alpha_weight_simulation = np.histogram( + data["alpha_z_end"], bins=size_z, range=(0, size_z), weights=data["alpha_w_end"] + )[0] ## -1 is here because the first slice 0 < z < 1 does not contribute to alpha creation - expected_fusion_number_per_slice = expected_fusion_number/(size_z-1) - relative_std_alpha_weight = 1./np.sqrt(expected_fusion_number_per_slice) + expected_fusion_number_per_slice = expected_fusion_number / (size_z - 1) + relative_std_alpha_weight = 1.0 / np.sqrt(expected_fusion_number_per_slice) # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions - assert(np.all(is_close(alpha_weight_theory, alpha_weight_simulation, - rtol = 5.*relative_std_alpha_weight))) + assert np.all( + is_close( + alpha_weight_theory, + alpha_weight_simulation, + rtol=5.0 * relative_std_alpha_weight, + ) + ) + def check_initial_energy1(data, E_com): ## In WarpX, the initial momentum of the alphas is computed assuming that the fusion process @@ -461,7 +539,7 @@ def check_initial_energy1(data, E_com): E_kinetic_com_before = E_com[slice_number] ## Total (kinetic + mass) energy in the lab frame after ## proton + boron 11 -> alpha + beryllium 8 - E_total_com_after = E_kinetic_com_before + E_fusion + (m_a + m_be)*scc.c**2 + E_total_com_after = E_kinetic_com_before + E_fusion + (m_a + m_be) * scc.c**2 ## Corresponding momentum norm squared of alpha1/beryllium p_sq_after = E_com_to_p_sq_com(m_a, m_be, E_total_com_after) ## Corresponding kinetic energy for alpha1 @@ -475,16 +553,21 @@ def check_initial_energy1(data, E_com): ## corresponds to an alpha emitted exactly in the (opposite) direction of the beryllium ## in the center of mass frame. This calculation involves solving a polynomial equation of ## order 2 in p_alpha23. - max_p_alpha23 = 0.5*(np.sqrt(p_sq_after) + \ - np.sqrt(4*m_a*energy_alpha2_plus_3_theory - p_sq_after)) - min_p_alpha23 = 0.5*(np.sqrt(p_sq_after) - \ - np.sqrt(4*m_a*energy_alpha2_plus_3_theory - p_sq_after)) - max_energy_alpha23 = max_p_alpha23**2/(2.*m_a) - min_energy_alpha23 = min_p_alpha23**2/(2.*m_a) + max_p_alpha23 = 0.5 * ( + np.sqrt(p_sq_after) + + np.sqrt(4 * m_a * energy_alpha2_plus_3_theory - p_sq_after) + ) + min_p_alpha23 = 0.5 * ( + np.sqrt(p_sq_after) + - np.sqrt(4 * m_a * energy_alpha2_plus_3_theory - p_sq_after) + ) + max_energy_alpha23 = max_p_alpha23**2 / (2.0 * m_a) + min_energy_alpha23 = min_p_alpha23**2 / (2.0 * m_a) ## Get the energy of all alphas in the slice - energy_alpha_slice = energy_alpha_simulation[(z_alpha >= slice_number)* \ - (z_alpha < (slice_number + 1))] + energy_alpha_slice = energy_alpha_simulation[ + (z_alpha >= slice_number) * (z_alpha < (slice_number + 1)) + ] ## Energy of alphas1 (here, first macroparticle of each fusion event) in the slice energy_alpha1_simulation = energy_alpha_slice[::6] ## Energy of alphas2 (here, third macroparticle of each fusion event) in the slice @@ -492,14 +575,25 @@ def check_initial_energy1(data, E_com): ## Energy of alphas3 (here, fifth macroparticle of each fusion event) in the slice energy_alpha3_simulation = energy_alpha_slice[4::6] - assert(np.all(is_close(energy_alpha1_simulation, energy_alpha1_theory, rtol=5.e-8))) + assert np.all( + is_close(energy_alpha1_simulation, energy_alpha1_theory, rtol=5.0e-8) + ) ## Check that the max / min value are comparable to the analytical value ## The minimum value is checked to be within 20 keV of the analytical value ## The maximum value is checked to be within 1% of the analytical value - assert(is_close(np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=1.e-2 )) - assert(is_close(np.amin(energy_alpha2_simulation), min_energy_alpha23, atol=3.218e-15 )) - assert(is_close(np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=1.e-2 )) - assert(is_close(np.amin(energy_alpha3_simulation), min_energy_alpha23, atol=3.218e-15 )) + assert is_close( + np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=1.0e-2 + ) + assert is_close( + np.amin(energy_alpha2_simulation), min_energy_alpha23, atol=3.218e-15 + ) + assert is_close( + np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=1.0e-2 + ) + assert is_close( + np.amin(energy_alpha3_simulation), min_energy_alpha23, atol=3.218e-15 + ) + def check_initial_energy2(data): ## In WarpX, the initial momentum of the alphas is computed assuming that the fusion process @@ -526,9 +620,9 @@ def check_initial_energy2(data): for slice_number in range(1, size_z): ## For simplicity, all the calculations in this function are done nonrelativistically ## Proton kinetic energy in the lab frame before fusion - E_proton_nonrelativistic = Energy_step*slice_number**2 + E_proton_nonrelativistic = Energy_step * slice_number**2 ## Corresponding square norm of proton momentum - p_proton_sq = 2.*m_p*E_proton_nonrelativistic + p_proton_sq = 2.0 * m_p * E_proton_nonrelativistic ## Kinetic energy in the lab frame after ## proton + boron 11 -> alpha + beryllium 8 E_after_fusion = E_proton_nonrelativistic + E_fusion @@ -537,21 +631,29 @@ def check_initial_energy2(data): ## calculation is done by noting that the maximum (minimum) energy corresponds to an alpha ## emitted exactly in the (opposite) direction of the proton in the lab frame. This ## calculation involves solving a polynomial equation of order 2 in p_alpha1. - max_p_alpha1 = (m_a/m_be*np.sqrt(p_proton_sq) + \ - np.sqrt(-m_a/m_be*p_proton_sq + 2.*E_after_fusion*m_a*(m_a/m_be + 1.))) / \ - (m_a/m_be + 1.) - min_p_alpha1 = (m_a/m_be*np.sqrt(p_proton_sq) - \ - np.sqrt(-m_a/m_be*p_proton_sq + 2.*E_after_fusion*m_a*(m_a/m_be + 1.))) / \ - (m_a/m_be + 1.) - max_energy_alpha1 = max_p_alpha1**2/(2*m_a) - min_energy_alpha1 = min_p_alpha1**2/(2*m_a) + max_p_alpha1 = ( + m_a / m_be * np.sqrt(p_proton_sq) + + np.sqrt( + -m_a / m_be * p_proton_sq + + 2.0 * E_after_fusion * m_a * (m_a / m_be + 1.0) + ) + ) / (m_a / m_be + 1.0) + min_p_alpha1 = ( + m_a / m_be * np.sqrt(p_proton_sq) + - np.sqrt( + -m_a / m_be * p_proton_sq + + 2.0 * E_after_fusion * m_a * (m_a / m_be + 1.0) + ) + ) / (m_a / m_be + 1.0) + max_energy_alpha1 = max_p_alpha1**2 / (2 * m_a) + min_energy_alpha1 = min_p_alpha1**2 / (2 * m_a) ## Corresponding max/min kinetic energy of Beryllium in the lab frame max_E_beryllium = E_after_fusion - min_energy_alpha1 min_E_beryllium = E_after_fusion - max_energy_alpha1 ## Corresponding max/min momentum square of Beryllium in the lab frame - max_p_sq_beryllium = 2.*m_be*max_E_beryllium - min_p_sq_beryllium = 2.*m_be*min_E_beryllium + max_p_sq_beryllium = 2.0 * m_be * max_E_beryllium + min_p_sq_beryllium = 2.0 * m_be * min_E_beryllium ## Corresponding max/min kinetic energy in the lab frame for alpha2 + alpha3 after ## Beryllium decay max_energy_alpha2_plus_3 = max_E_beryllium + E_decay @@ -562,16 +664,21 @@ def check_initial_energy2(data): ## to an alpha emitted exactly in the (opposite) direction of a beryllium with energy ## max_E_beryllium (min_E_beryllium). This calculation involves solving a polynomial ## equation of order 2 in p_alpha23. - max_p_alpha23 = 0.5*(np.sqrt(max_p_sq_beryllium) + \ - np.sqrt(4*m_a*max_energy_alpha2_plus_3 - max_p_sq_beryllium)) - min_p_alpha23 = 0.5*(np.sqrt(min_p_sq_beryllium) - \ - np.sqrt(4*m_a*min_energy_alpha2_plus_3 - min_p_sq_beryllium)) - max_energy_alpha23 = max_p_alpha23**2/(2*m_a) - min_energy_alpha23 = min_p_alpha23**2/(2*m_a) + max_p_alpha23 = 0.5 * ( + np.sqrt(max_p_sq_beryllium) + + np.sqrt(4 * m_a * max_energy_alpha2_plus_3 - max_p_sq_beryllium) + ) + min_p_alpha23 = 0.5 * ( + np.sqrt(min_p_sq_beryllium) + - np.sqrt(4 * m_a * min_energy_alpha2_plus_3 - min_p_sq_beryllium) + ) + max_energy_alpha23 = max_p_alpha23**2 / (2 * m_a) + min_energy_alpha23 = min_p_alpha23**2 / (2 * m_a) ## Get the energy of all alphas in the slice - energy_alpha_slice = energy_alpha_simulation[(z_alpha >= slice_number)* \ - (z_alpha < (slice_number + 1))] + energy_alpha_slice = energy_alpha_simulation[ + (z_alpha >= slice_number) * (z_alpha < (slice_number + 1)) + ] ## Energy of alphas1 (here, first macroparticle of each fusion event) in the slice energy_alpha1_simulation = energy_alpha_slice[::6] ## Energy of alphas2 (here, third macroparticle of each fusion event) in the slice @@ -579,51 +686,73 @@ def check_initial_energy2(data): ## Energy of alphas3 (here, fifth macroparticle of each fusion event) in the slice energy_alpha3_simulation = energy_alpha_slice[4::6] - assert(is_close(np.amax(energy_alpha1_simulation), max_energy_alpha1, rtol=1.e-2)) - assert(is_close(np.amin(energy_alpha1_simulation), min_energy_alpha1, rtol=1.e-2)) + assert is_close( + np.amax(energy_alpha1_simulation), max_energy_alpha1, rtol=1.0e-2 + ) + assert is_close( + np.amin(energy_alpha1_simulation), min_energy_alpha1, rtol=1.0e-2 + ) ## Check that the max / min value are comparable to the analytical value ## The minimum value is checked to be within 200 keV of the analytical value ## The maximum value is checked to be within 5% of the analytical value ## Tolerance is quite high because we don't have a lot of alphas to produce good ## statistics and an event like alpha1 emitted exactly in direction of proton & alpha2 ## emitted exactly in direction opposite to Beryllium is somewhat rare. - assert(is_close(np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=5e-2 )) - assert(is_close(np.amin(energy_alpha2_simulation), min_energy_alpha23, atol=3.218e-14 )) - assert(is_close(np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=5e-2 )) - assert(is_close(np.amin(energy_alpha3_simulation), min_energy_alpha23, atol=3.218e-14 )) + assert is_close( + np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=5e-2 + ) + assert is_close( + np.amin(energy_alpha2_simulation), min_energy_alpha23, atol=3.218e-14 + ) + assert is_close( + np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=5e-2 + ) + assert is_close( + np.amin(energy_alpha3_simulation), min_energy_alpha23, atol=3.218e-14 + ) + def check_xy_isotropy(data): ## Checks that the alpha particles are emitted isotropically in x and y - average_px_sq = np.average(data["alpha_px_end"]*data["alpha_px_end"]) - average_py_sq = np.average(data["alpha_py_end"]*data["alpha_py_end"]) - average_pz_sq = np.average(data["alpha_pz_end"]*data["alpha_pz_end"]) - assert(is_close(average_px_sq, average_py_sq, rtol = 5.e-2)) - assert(average_pz_sq > average_px_sq) - assert(average_pz_sq > average_py_sq) + average_px_sq = np.average(data["alpha_px_end"] * data["alpha_px_end"]) + average_py_sq = np.average(data["alpha_py_end"] * data["alpha_py_end"]) + average_pz_sq = np.average(data["alpha_pz_end"] * data["alpha_pz_end"]) + assert is_close(average_px_sq, average_py_sq, rtol=5.0e-2) + assert average_pz_sq > average_px_sq + assert average_pz_sq > average_py_sq + def sigmav_thermal_fit_lowE_nonresonant(T): ## Temperature T is in keV ## Returns the nonresonant average of cross section multiplied by relative velocity in m^3/s, ## in the range T <= 70 keV, as described by equations 10-14 of A. Tentori and F. Belloni, ## Nuclear Fusion, 63, 086001 (2023). - E0 = (E_Gamow_keV/4.)**(1./3.) * T**(2./3.) - DE0 = 4.*np.sqrt(T*E0/3.) - C0 = 197.*1.e3 - C1 = 0.269*1.e3 - C2 = 2.54e-4*1.e3 - tau = 3.*E0/T - Seff = C0*(1.+5./(12.*tau)) + C1*(E0+35./36.*T) + C2*(E0**2 + 89./36.*E0*T) + E0 = (E_Gamow_keV / 4.0) ** (1.0 / 3.0) * T ** (2.0 / 3.0) + DE0 = 4.0 * np.sqrt(T * E0 / 3.0) + C0 = 197.0 * 1.0e3 + C1 = 0.269 * 1.0e3 + C2 = 2.54e-4 * 1.0e3 + tau = 3.0 * E0 / T + Seff = ( + C0 * (1.0 + 5.0 / (12.0 * tau)) + + C1 * (E0 + 35.0 / 36.0 * T) + + C2 * (E0**2 + 89.0 / 36.0 * E0 * T) + ) ## nonresonant sigma times vrel, in barn meter per second - sigmav_nr_bmps = np.sqrt(2*T*keV_to_Joule/m_reduced) * DE0*Seff/T**2 * np.exp(-tau) + sigmav_nr_bmps = ( + np.sqrt(2 * T * keV_to_Joule / m_reduced) * DE0 * Seff / T**2 * np.exp(-tau) + ) ## Return result in cubic meter per second - return sigmav_nr_bmps*barn_to_square_meter + return sigmav_nr_bmps * barn_to_square_meter + def sigmav_thermal_fit_lowE_resonant(T): ## Temperature T is in keV ## Returns the resonant average of cross section multiplied by relative velocity in m^3/s, ## in the range T <= 70 keV, as described by equation 15 of A. Tentori and F. Belloni, ## Nuclear Fusion, 63, 086001 (2023). - return 5.41e-21 * np.exp(-148./T) / T**(3./2.) + return 5.41e-21 * np.exp(-148.0 / T) / T ** (3.0 / 2.0) + def sigmav_thermal_fit_lowE(T): ## Temperature T is in keV @@ -632,90 +761,105 @@ def sigmav_thermal_fit_lowE(T): ## The fits are valid for T <= 70 keV. return sigmav_thermal_fit_lowE_nonresonant(T) + sigmav_thermal_fit_lowE_resonant(T) + def expected_alpha_thermal(T, proton_density, boron_density, dV, dt): ## Computes the expected number of produced alpha particles when the protons and borons follow ## a Maxwellian distribution with a temperature T, in keV. This uses the thermal fits described ## in A. Tentori and F. Belloni, Nuclear Fusion, 63, 086001 (2023). ## The fit used here is only valid in the range T <= 70 keV. - assert((T >=0) and (T<=70)) + assert (T >= 0) and (T <= 70) sigma_times_vrel = sigmav_thermal_fit_lowE(T) ## Factor 3 is here because each fusion event produces 3 alphas. - return 3.*proton_density*boron_density*sigma_times_vrel*dV*dt + return 3.0 * proton_density * boron_density * sigma_times_vrel * dV * dt + def check_thermal_alpha_yield(data): ## Checks that the number of alpha particles in test3 is as expected - Temperature = 44. # keV - proton_density = 1.e28 # m^-3 - boron_density = 5.e28 # m^-3 + Temperature = 44.0 # keV + proton_density = 1.0e28 # m^-3 + boron_density = 5.0e28 # m^-3 - alpha_weight_theory = expected_alpha_thermal(Temperature, proton_density, boron_density, - dV_total, dt) + alpha_weight_theory = expected_alpha_thermal( + Temperature, proton_density, boron_density, dV_total, dt + ) alpha_weight_simulation = np.sum(data["alpha_w_end"]) - assert(is_close(alpha_weight_theory, alpha_weight_simulation, rtol = 2.e-1)) + assert is_close(alpha_weight_theory, alpha_weight_simulation, rtol=2.0e-1) + def boron_remains(data): ## Checks whether there remains boron macroparticles at the end of the test n_boron_left = data["boron_w_end"].shape[0] - return (n_boron_left > 0) + return n_boron_left > 0 + def specific_check1(data): - check_isotropy(data, relative_tolerance = 3.e-2) - expected_fusion_number = check_macroparticle_number(data, - fusion_probability_target_value = 0.002, - num_pair_per_cell = nppcell_1) + check_isotropy(data, relative_tolerance=3.0e-2) + expected_fusion_number = check_macroparticle_number( + data, fusion_probability_target_value=0.002, num_pair_per_cell=nppcell_1 + ) E_com = compute_E_com1(data) - check_alpha_yield(data, expected_fusion_number, E_com, proton_density = 1., - boron_density = 1.) + check_alpha_yield( + data, expected_fusion_number, E_com, proton_density=1.0, boron_density=1.0 + ) check_initial_energy1(data, E_com) + def specific_check2(data): check_xy_isotropy(data) ## Only 900 particles pairs per cell here because we ignore the 10% of protons that are at rest - expected_fusion_number = check_macroparticle_number(data, - fusion_probability_target_value = 0.02, - num_pair_per_cell = nppcell_2) + expected_fusion_number = check_macroparticle_number( + data, fusion_probability_target_value=0.02, num_pair_per_cell=nppcell_2 + ) E_com = compute_E_com2(data) - check_alpha_yield(data, expected_fusion_number, E_com, proton_density = 1.e20, - boron_density = 1.e26) + check_alpha_yield( + data, expected_fusion_number, E_com, proton_density=1.0e20, boron_density=1.0e26 + ) check_initial_energy2(data) + def specific_check3(data): - check_isotropy(data, relative_tolerance = 1.e-1) + check_isotropy(data, relative_tolerance=1.0e-1) check_thermal_alpha_yield(data) + def specific_check4(data): ## In test 4, the boron initial density is so small that all borons should have fused within a ## timestep dt. We thus assert that no boron remains at the end of the simulation. - assert(not boron_remains(data)) + assert not boron_remains(data) + def specific_check5(data): ## Test 5 is similar to test 4, expect that the parameter fusion_probability_threshold is ## increased to the point that we should severely underestimate the fusion yield. Consequently, ## there should still be borons at the end of the test, which we verify here. - assert(boron_remains(data)) + assert boron_remains(data) + def check_charge_conservation(rho_start, rho_end): - assert(np.all(is_close(rho_start, rho_end, rtol=2.e-11))) + assert np.all(is_close(rho_start, rho_end, rtol=2.0e-11)) + def main(): filename_end = sys.argv[1] - filename_start = filename_end[:-4] + '0000' + filename_start = filename_end[:-4] + "0000" ds_end = yt.load(filename_end) ds_start = yt.load(filename_start) ad_end = ds_end.all_data() ad_start = ds_start.all_data() - field_data_end = ds_end.covering_grid(level=0, left_edge=ds_end.domain_left_edge, - dims=ds_end.domain_dimensions) - field_data_start = ds_start.covering_grid(level=0, left_edge=ds_start.domain_left_edge, - dims=ds_start.domain_dimensions) + field_data_end = ds_end.covering_grid( + level=0, left_edge=ds_end.domain_left_edge, dims=ds_end.domain_dimensions + ) + field_data_start = ds_start.covering_grid( + level=0, left_edge=ds_start.domain_left_edge, dims=ds_start.domain_dimensions + ) ntests = 5 - for i in range(1, ntests+1): - proton_species = "proton"+str(i) - boron_species = "boron"+str(i) - alpha_species = "alpha"+str(i) + for i in range(1, ntests + 1): + proton_species = "proton" + str(i) + boron_species = "boron" + str(i) + alpha_species = "alpha" + str(i) data = {} add_species_to_dict(ad_start, data, proton_species, "proton", "start") add_species_to_dict(ad_start, data, boron_species, "boron", "start") @@ -727,7 +871,7 @@ def main(): generic_check(data) # Checks that are specific to test number i - eval("specific_check"+str(i)+"(data)") + eval("specific_check" + str(i) + "(data)") rho_start = field_data_start["rho"].to_ndarray() rho_end = field_data_end["rho"].to_ndarray() @@ -736,5 +880,6 @@ def main(): test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) + if __name__ == "__main__": main() diff --git a/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py b/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py index d0190e6a330..be1fbb0702a 100755 --- a/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py +++ b/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py @@ -11,7 +11,7 @@ import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI import numpy as np import scipy.constants as scc @@ -46,46 +46,49 @@ ## Please be aware that the relative tolerances are often set empirically in this analysis script, ## so it would not be surprising that some tolerances need to be increased in the future. -default_tol = 1.e-12 # Default relative tolerance +default_tol = 1.0e-12 # Default relative tolerance ## Some physical parameters -keV_to_Joule = scc.e*1e3 -MeV_to_Joule = scc.e*1e6 -barn_to_square_meter = 1.e-28 +keV_to_Joule = scc.e * 1e3 +MeV_to_Joule = scc.e * 1e6 +barn_to_square_meter = 1.0e-28 ## Checks whether this is the 2D or the 3D test -warpx_used_inputs = open('./warpx_used_inputs', 'r').read() -if re.search('geometry.dims = RZ', warpx_used_inputs): +with open("./warpx_used_inputs", "r") as f: + warpx_used_inputs = f.read() +if re.search("geometry.dims = RZ", warpx_used_inputs): is_RZ = True else: is_RZ = False ## Check which kind of test we are doing: D+T or D+D # Define reactants and products -if re.search('tritium', warpx_used_inputs): +if re.search("tritium", warpx_used_inputs): # If tritium appears in the file, than this is the D+T test - reaction_type = 'DT' - reactant_species = ['deuterium', 'tritium'] - product_species = ['helium4', 'neutron'] + reaction_type = "DT" + reactant_species = ["deuterium", "tritium"] + product_species = ["helium4", "neutron"] ntests = 2 - E_fusion = 17.5893*MeV_to_Joule # Energy released during the fusion reaction + E_fusion = 17.5893 * MeV_to_Joule # Energy released during the fusion reaction else: # else, this is the D+D test - reaction_type = 'DD' - reactant_species = ['deuterium', 'hydrogen2'] - product_species = ['helium3', 'neutron'] + reaction_type = "DD" + reactant_species = ["deuterium", "hydrogen2"] + product_species = ["helium3", "neutron"] ntests = 1 - E_fusion = 3.268911e6*MeV_to_Joule + E_fusion = 3.268911e6 * MeV_to_Joule mass = { - 'deuterium': 2.01410177812*scc.m_u, - 'hydrogen2': 2.01410177812*scc.m_u, - 'tritium': 3.0160492779*scc.m_u, - 'helium3': 3.016029*scc.m_u, - 'helium4': 4.00260325413*scc.m_u, - 'neutron': 1.0013784193052508*scc.m_p + "deuterium": 2.01410177812 * scc.m_u, + "hydrogen2": 2.01410177812 * scc.m_u, + "tritium": 3.0160492779 * scc.m_u, + "helium3": 3.016029 * scc.m_u, + "helium4": 4.00260325413 * scc.m_u, + "neutron": 1.0013784193052508 * scc.m_p, } -m_reduced = np.product([mass[s] for s in reactant_species])/np.sum([mass[s] for s in reactant_species]) +m_reduced = np.product([mass[s] for s in reactant_species]) / np.sum( + [mass[s] for s in reactant_species] +) ## Some numerical parameters for this test size_x = 8 @@ -94,10 +97,10 @@ if is_RZ: dV_slice = np.pi * size_x**2 yt_z_string = "particle_position_y" - nppcell_1 = 10000*8 - nppcell_2 = 900*8 + nppcell_1 = 10000 * 8 + nppcell_2 = 900 * 8 else: - dV_slice = size_x*size_y + dV_slice = size_x * size_y yt_z_string = "particle_position_z" nppcell_1 = 10000 nppcell_2 = 900 @@ -105,29 +108,33 @@ # particles of a given species in the same slice have the exact same momentum # In test 1 and 2, the energy in cells number i (in z direction) is typically Energy_step * i**2 -Energy_step = 22.*keV_to_Joule +Energy_step = 22.0 * keV_to_Joule -def is_close(val1, val2, rtol=default_tol, atol=0.): + +def is_close(val1, val2, rtol=default_tol, atol=0.0): ## Wrapper around numpy.isclose, used to override the default tolerances. return np.isclose(val1, val2, rtol=rtol, atol=atol) + def add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix): - data_dict[prefix+"_px_"+suffix] = yt_ad[species_name, "particle_momentum_x"].v - data_dict[prefix+"_py_"+suffix] = yt_ad[species_name, "particle_momentum_y"].v - data_dict[prefix+"_pz_"+suffix] = yt_ad[species_name, "particle_momentum_z"].v - data_dict[prefix+"_w_"+suffix] = yt_ad[species_name, "particle_weight"].v - data_dict[prefix+"_id_"+suffix] = yt_ad[species_name, "particle_id"].v - data_dict[prefix+"_cpu_"+suffix] = yt_ad[species_name, "particle_cpu"].v - data_dict[prefix+"_z_"+suffix] = yt_ad[species_name, yt_z_string].v + data_dict[prefix + "_px_" + suffix] = yt_ad[species_name, "particle_momentum_x"].v + data_dict[prefix + "_py_" + suffix] = yt_ad[species_name, "particle_momentum_y"].v + data_dict[prefix + "_pz_" + suffix] = yt_ad[species_name, "particle_momentum_z"].v + data_dict[prefix + "_w_" + suffix] = yt_ad[species_name, "particle_weight"].v + data_dict[prefix + "_id_" + suffix] = yt_ad[species_name, "particle_id"].v + data_dict[prefix + "_cpu_" + suffix] = yt_ad[species_name, "particle_cpu"].v + data_dict[prefix + "_z_" + suffix] = yt_ad[species_name, yt_z_string].v + def add_empty_species_to_dict(data_dict, species_name, prefix, suffix): - data_dict[prefix+"_px_"+suffix] = np.empty(0) - data_dict[prefix+"_py_"+suffix] = np.empty(0) - data_dict[prefix+"_pz_"+suffix] = np.empty(0) - data_dict[prefix+"_w_"+suffix] = np.empty(0) - data_dict[prefix+"_id_"+suffix] = np.empty(0) - data_dict[prefix+"_cpu_"+suffix] = np.empty(0) - data_dict[prefix+"_z_"+suffix] = np.empty(0) + data_dict[prefix + "_px_" + suffix] = np.empty(0) + data_dict[prefix + "_py_" + suffix] = np.empty(0) + data_dict[prefix + "_pz_" + suffix] = np.empty(0) + data_dict[prefix + "_w_" + suffix] = np.empty(0) + data_dict[prefix + "_id_" + suffix] = np.empty(0) + data_dict[prefix + "_cpu_" + suffix] = np.empty(0) + data_dict[prefix + "_z_" + suffix] = np.empty(0) + def add_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix): try: @@ -138,47 +145,67 @@ def add_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix): ## dictionnary. add_empty_species_to_dict(data_dict, species_name, prefix, suffix) + def check_particle_number_conservation(data): # Check consumption of reactants total_w_reactant1_start = np.sum(data[reactant_species[0] + "_w_start"]) - total_w_reactant1_end = np.sum(data[reactant_species[0] + "_w_end"]) + total_w_reactant1_end = np.sum(data[reactant_species[0] + "_w_end"]) total_w_reactant2_start = np.sum(data[reactant_species[1] + "_w_start"]) - total_w_reactant2_end = np.sum(data[reactant_species[1] + "_w_end"]) + total_w_reactant2_end = np.sum(data[reactant_species[1] + "_w_end"]) consumed_reactant1 = total_w_reactant1_start - total_w_reactant1_end consumed_reactant2 = total_w_reactant2_start - total_w_reactant2_end - assert(consumed_reactant1 >= 0.) - assert(consumed_reactant2 >= 0.) + assert consumed_reactant1 >= 0.0 + assert consumed_reactant2 >= 0.0 ## Check that number of consumed reactants are equal assert_scale = max(total_w_reactant1_start, total_w_reactant2_start) - assert(is_close(consumed_reactant1, consumed_reactant2, rtol = 0., atol = default_tol*assert_scale)) + assert is_close( + consumed_reactant1, + consumed_reactant2, + rtol=0.0, + atol=default_tol * assert_scale, + ) # That the number of products corresponds consumed particles for species_name in product_species: created_product = np.sum(data[species_name + "_w_end"]) - assert(created_product >= 0.) - assert(is_close(total_w_reactant1_start, total_w_reactant1_end + created_product)) - assert(is_close(total_w_reactant2_start, total_w_reactant2_end + created_product)) + assert created_product >= 0.0 + assert is_close( + total_w_reactant1_start, total_w_reactant1_end + created_product + ) + assert is_close( + total_w_reactant2_start, total_w_reactant2_end + created_product + ) + def compute_energy_array(data, species_name, suffix, m): ## Relativistic computation of kinetic energy for a given species - psq_array = data[species_name+'_px_'+suffix]**2 + data[species_name+'_py_'+suffix]**2 + \ - data[species_name+'_pz_'+suffix]**2 - rest_energy = m*scc.c**2 - return np.sqrt(psq_array*scc.c**2 + rest_energy**2) - rest_energy + psq_array = ( + data[species_name + "_px_" + suffix] ** 2 + + data[species_name + "_py_" + suffix] ** 2 + + data[species_name + "_pz_" + suffix] ** 2 + ) + rest_energy = m * scc.c**2 + return np.sqrt(psq_array * scc.c**2 + rest_energy**2) - rest_energy + def check_energy_conservation(data): total_energy_start = 0 for species_name in reactant_species: - total_energy_start += np.sum( data[species_name + "_w_start"] * \ - compute_energy_array(data, species_name, "start", mass[species_name]) ) + total_energy_start += np.sum( + data[species_name + "_w_start"] + * compute_energy_array(data, species_name, "start", mass[species_name]) + ) total_energy_end = 0 for species_name in product_species + reactant_species: - total_energy_end += np.sum( data[species_name + "_w_end"] * \ - compute_energy_array(data, species_name, "end", mass[species_name]) ) + total_energy_end += np.sum( + data[species_name + "_w_end"] + * compute_energy_array(data, species_name, "end", mass[species_name]) + ) n_fusion_reaction = np.sum(data[product_species[0] + "_w_end"]) - assert(is_close(total_energy_end, - total_energy_start + n_fusion_reaction*E_fusion, - rtol = 1.e-8)) + assert is_close( + total_energy_end, total_energy_start + n_fusion_reaction * E_fusion, rtol=1.0e-8 + ) + def check_momentum_conservation(data): total_px_start = 0 @@ -186,33 +213,43 @@ def check_momentum_conservation(data): total_pz_start = 0 for species_name in reactant_species: total_px_start += np.sum( - data[species_name+'_px_start'] * data[species_name+'_w_start']) + data[species_name + "_px_start"] * data[species_name + "_w_start"] + ) total_py_start += np.sum( - data[species_name+'_py_start'] * data[species_name+'_w_start']) + data[species_name + "_py_start"] * data[species_name + "_w_start"] + ) total_pz_start += np.sum( - data[species_name+'_pz_start'] * data[species_name+'_w_start']) + data[species_name + "_pz_start"] * data[species_name + "_w_start"] + ) total_px_end = 0 total_py_end = 0 total_pz_end = 0 for species_name in reactant_species + product_species: total_px_end += np.sum( - data[species_name+'_px_end'] * data[species_name+'_w_end']) + data[species_name + "_px_end"] * data[species_name + "_w_end"] + ) total_py_end += np.sum( - data[species_name+'_py_end'] * data[species_name+'_w_end']) + data[species_name + "_py_end"] * data[species_name + "_w_end"] + ) total_pz_end += np.sum( - data[species_name+'_pz_end'] * data[species_name+'_w_end']) + data[species_name + "_pz_end"] * data[species_name + "_w_end"] + ) ## Absolute tolerance is needed because sometimes the initial momentum is exactly 0 - assert(is_close(total_px_start, total_px_end, atol=1.e-15)) - assert(is_close(total_py_start, total_py_end, atol=1.e-15)) - assert(is_close(total_pz_start, total_pz_end, atol=1.e-15)) + assert is_close(total_px_start, total_px_end, atol=1.0e-15) + assert is_close(total_py_start, total_py_end, atol=1.0e-15) + assert is_close(total_pz_start, total_pz_end, atol=1.0e-15) + def check_id(data): ## Check that all created particles have unique id + cpu identifier (two particles with ## different cpu can have the same id) for species_name in product_species: - complex_id = data[species_name + "_id_end"] + 1j*data[species_name + "_cpu_end"] - assert(complex_id.shape == np.unique(complex_id).shape) + complex_id = ( + data[species_name + "_id_end"] + 1j * data[species_name + "_cpu_end"] + ) + assert complex_id.shape == np.unique(complex_id).shape + def generic_check(data): check_particle_number_conservation(data) @@ -220,31 +257,46 @@ def generic_check(data): check_momentum_conservation(data) check_id(data) + def check_isotropy(data, relative_tolerance): ## Checks that the product particles are emitted isotropically for species_name in product_species: - average_px_sq = np.average(data[species_name+"_px_end"]*data[species_name+"_px_end"]) - average_py_sq = np.average(data[species_name+"_py_end"]*data[species_name+"_py_end"]) - average_pz_sq = np.average(data[species_name+"_pz_end"]*data[species_name+"_pz_end"]) - assert(is_close(average_px_sq, average_py_sq, rtol = relative_tolerance)) - assert(is_close(average_px_sq, average_pz_sq, rtol = relative_tolerance)) + average_px_sq = np.average( + data[species_name + "_px_end"] * data[species_name + "_px_end"] + ) + average_py_sq = np.average( + data[species_name + "_py_end"] * data[species_name + "_py_end"] + ) + average_pz_sq = np.average( + data[species_name + "_pz_end"] * data[species_name + "_pz_end"] + ) + assert is_close(average_px_sq, average_py_sq, rtol=relative_tolerance) + assert is_close(average_px_sq, average_pz_sq, rtol=relative_tolerance) + def check_xy_isotropy(data): ## Checks that the product particles are emitted isotropically in x and y for species_name in product_species: - average_px_sq = np.average(data[species_name+"_px_end"]*data[species_name+"_px_end"]) - average_py_sq = np.average(data[species_name+"_py_end"]*data[species_name+"_py_end"]) - average_pz_sq = np.average(data[species_name+"_pz_end"]*data[species_name+"_pz_end"]) - assert(is_close(average_px_sq, average_py_sq, rtol = 5.e-2)) - assert(average_pz_sq > average_px_sq) - assert(average_pz_sq > average_py_sq) - -def cross_section( E_keV ): + average_px_sq = np.average( + data[species_name + "_px_end"] * data[species_name + "_px_end"] + ) + average_py_sq = np.average( + data[species_name + "_py_end"] * data[species_name + "_py_end"] + ) + average_pz_sq = np.average( + data[species_name + "_pz_end"] * data[species_name + "_pz_end"] + ) + assert is_close(average_px_sq, average_py_sq, rtol=5.0e-2) + assert average_pz_sq > average_px_sq + assert average_pz_sq > average_py_sq + + +def cross_section(E_keV): ## Returns cross section in b, using the analytical fits given ## in H.-S. Bosch and G.M. Hale 1992 Nucl. Fusion 32 611 - joule_to_keV = 1.e-3/scc.e - B_G = scc.pi * scc.alpha * np.sqrt( 2.*m_reduced * scc.c**2 * joule_to_keV ) - if reaction_type == 'DT': + joule_to_keV = 1.0e-3 / scc.e + B_G = scc.pi * scc.alpha * np.sqrt(2.0 * m_reduced * scc.c**2 * joule_to_keV) + if reaction_type == "DT": A1 = 6.927e4 A2 = 7.454e8 A3 = 2.050e6 @@ -254,7 +306,7 @@ def cross_section( E_keV ): B2 = -9.95e-1 B3 = 6.981e-5 B4 = 1.728e-4 - elif reaction_type == 'DD': + elif reaction_type == "DD": A1 = 5.3701e4 A2 = 3.3027e2 A3 = -1.2706e-1 @@ -267,65 +319,93 @@ def cross_section( E_keV ): else: raise RuntimeError(f"Reaction type '{reaction_type}' not implemented.") - astrophysical_factor = (A1 + E_keV*(A2 + E_keV*(A3 + E_keV*(A4 + E_keV*A5)))) / (1 + E_keV*(B1 + E_keV*(B2 + E_keV*(B3 + E_keV*B4)))) - millibarn_to_barn = 1.e-3 - return millibarn_to_barn * astrophysical_factor/E_keV * np.exp(-B_G/np.sqrt(E_keV)) + astrophysical_factor = ( + A1 + E_keV * (A2 + E_keV * (A3 + E_keV * (A4 + E_keV * A5))) + ) / (1 + E_keV * (B1 + E_keV * (B2 + E_keV * (B3 + E_keV * B4)))) + millibarn_to_barn = 1.0e-3 + return ( + millibarn_to_barn * astrophysical_factor / E_keV * np.exp(-B_G / np.sqrt(E_keV)) + ) + def E_com_to_p_sq_com(m1, m2, E): ## E is the total (kinetic+mass) energy of a two particle (with mass m1 and m2) system in ## its center of mass frame, in J. ## Returns the square norm of the momentum of each particle in that frame. - E_ratio = E/((m1+m2)*scc.c**2) - return m1*m2*scc.c**2 * (E_ratio**2 - 1) + (m1-m2)**2*scc.c**2/4 * (E_ratio - 1./E_ratio)**2 + E_ratio = E / ((m1 + m2) * scc.c**2) + return ( + m1 * m2 * scc.c**2 * (E_ratio**2 - 1) + + (m1 - m2) ** 2 * scc.c**2 / 4 * (E_ratio - 1.0 / E_ratio) ** 2 + ) + def compute_relative_v_com(E): ## E is the kinetic energy of reactants in the center of mass frame, in keV ## Returns the relative velocity between reactants in this frame, in m/s m0 = mass[reactant_species[0]] m1 = mass[reactant_species[1]] - E_J = E*keV_to_Joule + (m0 + m1)*scc.c**2 + E_J = E * keV_to_Joule + (m0 + m1) * scc.c**2 p_sq = E_com_to_p_sq_com(m0, m1, E_J) p = np.sqrt(p_sq) - gamma0 = np.sqrt(1. + p_sq / (m0*scc.c)**2) - gamma1 = np.sqrt(1. + p_sq / (m1*scc.c)**2) - v0 = p/(gamma0*m0) - v1 = p/(gamma1*m1) - return v0+v1 + gamma0 = np.sqrt(1.0 + p_sq / (m0 * scc.c) ** 2) + gamma1 = np.sqrt(1.0 + p_sq / (m1 * scc.c) ** 2) + v0 = p / (gamma0 * m0) + v1 = p / (gamma1 * m1) + return v0 + v1 + def expected_weight_com(E_com, reactant0_density, reactant1_density, dV, dt): ## Computes expected number of product particles as a function of energy E_com in the ## center of mass frame. E_com is in keV. - assert(np.all(E_com>=0)) + assert np.all(E_com >= 0) ## Case E_com == 0 is handled manually to avoid division by zero conditions = [E_com == 0, E_com > 0] ## Necessary to avoid division by 0 warning when pb_cross_section is evaluated - E_com_never_zero = np.clip(E_com, 1.e-15, None) - choices = [0., cross_section(E_com_never_zero)*compute_relative_v_com(E_com_never_zero)] + E_com_never_zero = np.clip(E_com, 1.0e-15, None) + choices = [ + 0.0, + cross_section(E_com_never_zero) * compute_relative_v_com(E_com_never_zero), + ] sigma_times_vrel = np.select(conditions, choices) - return reactant0_density*reactant1_density*sigma_times_vrel*barn_to_square_meter*dV*dt - -def check_macroparticle_number(data, fusion_probability_target_value, num_pair_per_cell): + return ( + reactant0_density + * reactant1_density + * sigma_times_vrel + * barn_to_square_meter + * dV + * dt + ) + + +def check_macroparticle_number( + data, fusion_probability_target_value, num_pair_per_cell +): ## Checks that the number of macroparticles is as expected for the first and second tests ## The first slice 0 < z < 1 does not contribute to alpha creation if is_RZ: - numcells = size_x*(size_z-1) + numcells = size_x * (size_z - 1) else: - numcells = size_x*size_y*(size_z-1) + numcells = size_x * size_y * (size_z - 1) ## In these tests, the fusion_multiplier is so high that the fusion probability per pair is ## equal to the parameter fusion_probability_target_value fusion_probability_per_pair = fusion_probability_target_value - expected_fusion_number = numcells*num_pair_per_cell*fusion_probability_per_pair - expected_macroparticle_number = 2*expected_fusion_number - std_macroparticle_number = 2*np.sqrt(expected_fusion_number) + expected_fusion_number = numcells * num_pair_per_cell * fusion_probability_per_pair + expected_macroparticle_number = 2 * expected_fusion_number + std_macroparticle_number = 2 * np.sqrt(expected_fusion_number) actual_macroparticle_number = data[product_species[0] + "_w_end"].shape[0] # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions - assert(is_close(actual_macroparticle_number, expected_macroparticle_number, rtol = 0., - atol = 5.*std_macroparticle_number)) + assert is_close( + actual_macroparticle_number, + expected_macroparticle_number, + rtol=0.0, + atol=5.0 * std_macroparticle_number, + ) ## used in subsequent function return expected_fusion_number + def p_sq_reactant1_frame_to_E_COM_frame(p_reactant0_sq): # Takes the reactant0 square norm of the momentum in the reactant1 rest frame and returns the total # kinetic energy in the center of mass frame. Everything is in SI units. @@ -333,101 +413,143 @@ def p_sq_reactant1_frame_to_E_COM_frame(p_reactant0_sq): m1 = mass[reactant_species[1]] # Total (kinetic + mass) energy in lab frame - E_lab = np.sqrt(p_reactant0_sq*scc.c**2 + (m0*scc.c**2)**2) + m1*scc.c**2 + E_lab = np.sqrt(p_reactant0_sq * scc.c**2 + (m0 * scc.c**2) ** 2) + m1 * scc.c**2 # Use invariant E**2 - p**2c**2 of 4-momentum norm to compute energy in center of mass frame - E_com = np.sqrt(E_lab**2 - p_reactant0_sq*scc.c**2) + E_com = np.sqrt(E_lab**2 - p_reactant0_sq * scc.c**2) # Corresponding kinetic energy - E_com_kin = E_com - (m1+m0)*scc.c**2 - return E_com_kin*(p_reactant0_sq>0.) + E_com_kin = E_com - (m1 + m0) * scc.c**2 + return E_com_kin * (p_reactant0_sq > 0.0) + def p_sq_to_kinetic_energy(p_sq, m): ## Returns the kinetic energy of a particle as a function of its squared momentum. ## Everything is in SI units. - return np.sqrt(p_sq*scc.c**2 + (m*scc.c**2)**2) - (m*scc.c**2) + return np.sqrt(p_sq * scc.c**2 + (m * scc.c**2) ** 2) - (m * scc.c**2) + def compute_E_com1(data): ## Computes kinetic energy (in Joule) in the center of frame for the first test ## Square norm of the momentum of reactant as a function of cell number in z direction - p_sq = 2.*m_reduced*(Energy_step*np.arange(size_z)**2) + p_sq = 2.0 * m_reduced * (Energy_step * np.arange(size_z) ** 2) Ekin = 0 for species_name in reactant_species: - Ekin += p_sq_to_kinetic_energy( p_sq, mass[species_name] ) + Ekin += p_sq_to_kinetic_energy(p_sq, mass[species_name]) return Ekin + def compute_E_com2(data): ## Computes kinetic energy (in Joule) in the center of frame for the second test ## Square norm of the momentum of reactant0 as a function of cell number in z direction - p_reactant0_sq = 2.*mass[reactant_species[0]]*(Energy_step*np.arange(size_z)**2) + p_reactant0_sq = ( + 2.0 * mass[reactant_species[0]] * (Energy_step * np.arange(size_z) ** 2) + ) return p_sq_reactant1_frame_to_E_COM_frame(p_reactant0_sq) -def check_fusion_yield(data, expected_fusion_number, E_com, reactant0_density, reactant1_density, dt): + +def check_fusion_yield( + data, expected_fusion_number, E_com, reactant0_density, reactant1_density, dt +): ## Checks that the fusion yield is as expected for the first and second tests. - product_weight_theory = expected_weight_com(E_com/keV_to_Joule, - reactant0_density, reactant1_density, dV_slice, dt) + product_weight_theory = expected_weight_com( + E_com / keV_to_Joule, reactant0_density, reactant1_density, dV_slice, dt + ) for species_name in product_species: - product_weight_simulation = np.histogram(data[species_name+"_z_end"], - bins=size_z, range=(0, size_z), weights = data[species_name+"_w_end"])[0] + product_weight_simulation = np.histogram( + data[species_name + "_z_end"], + bins=size_z, + range=(0, size_z), + weights=data[species_name + "_w_end"], + )[0] ## -1 is here because the first slice 0 < z < 1 does not contribute to fusion - expected_fusion_number_per_slice = expected_fusion_number/(size_z-1) - relative_std_weight = 1./np.sqrt(expected_fusion_number_per_slice) + expected_fusion_number_per_slice = expected_fusion_number / (size_z - 1) + relative_std_weight = 1.0 / np.sqrt(expected_fusion_number_per_slice) # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions - assert(np.all(is_close(product_weight_theory, product_weight_simulation, - rtol = 5.*relative_std_weight))) + assert np.all( + is_close( + product_weight_theory, + product_weight_simulation, + rtol=5.0 * relative_std_weight, + ) + ) + def specific_check1(data, dt): if not is_RZ: - check_isotropy(data, relative_tolerance = 3.e-2) - expected_fusion_number = check_macroparticle_number(data, - fusion_probability_target_value = 0.002, - num_pair_per_cell = nppcell_1) + check_isotropy(data, relative_tolerance=3.0e-2) + expected_fusion_number = check_macroparticle_number( + data, fusion_probability_target_value=0.002, num_pair_per_cell=nppcell_1 + ) E_com = compute_E_com1(data) - check_fusion_yield(data, expected_fusion_number, E_com, reactant0_density = 1., - reactant1_density = 1., dt=dt) + check_fusion_yield( + data, + expected_fusion_number, + E_com, + reactant0_density=1.0, + reactant1_density=1.0, + dt=dt, + ) + def specific_check2(data, dt): check_xy_isotropy(data) ## Only 900 particles pairs per cell here because we ignore the 10% of reactants that are at rest - expected_fusion_number = check_macroparticle_number(data, - fusion_probability_target_value = 0.02, - num_pair_per_cell = nppcell_2) + expected_fusion_number = check_macroparticle_number( + data, fusion_probability_target_value=0.02, num_pair_per_cell=nppcell_2 + ) E_com = compute_E_com2(data) - check_fusion_yield(data, expected_fusion_number, E_com, reactant0_density = 1.e20, - reactant1_density = 1.e26, dt=dt) + check_fusion_yield( + data, + expected_fusion_number, + E_com, + reactant0_density=1.0e20, + reactant1_density=1.0e26, + dt=dt, + ) + def check_charge_conservation(rho_start, rho_end): - assert(np.all(is_close(rho_start, rho_end, rtol=2.e-11))) + assert np.all(is_close(rho_start, rho_end, rtol=2.0e-11)) + def main(): filename_end = sys.argv[1] - filename_start = filename_end[:-4] + '0000' + filename_start = filename_end[:-4] + "0000" ds_end = yt.load(filename_end) ds_start = yt.load(filename_start) ad_end = ds_end.all_data() ad_start = ds_start.all_data() dt = float(ds_end.current_time - ds_start.current_time) # noqa - field_data_end = ds_end.covering_grid(level=0, left_edge=ds_end.domain_left_edge, - dims=ds_end.domain_dimensions) - field_data_start = ds_start.covering_grid(level=0, left_edge=ds_start.domain_left_edge, - dims=ds_start.domain_dimensions) - - for i in range(1, ntests+1): + field_data_end = ds_end.covering_grid( + level=0, left_edge=ds_end.domain_left_edge, dims=ds_end.domain_dimensions + ) + field_data_start = ds_start.covering_grid( + level=0, left_edge=ds_start.domain_left_edge, dims=ds_start.domain_dimensions + ) + + for i in range(1, ntests + 1): data = {} for species_name in reactant_species: - add_species_to_dict(ad_start, data, species_name+'_'+str(i), species_name, "start") - add_species_to_dict(ad_end, data, species_name+'_'+str(i), species_name, "end") + add_species_to_dict( + ad_start, data, species_name + "_" + str(i), species_name, "start" + ) + add_species_to_dict( + ad_end, data, species_name + "_" + str(i), species_name, "end" + ) for species_name in product_species: - add_species_to_dict(ad_end, data, species_name+'_'+str(i), species_name, "end") + add_species_to_dict( + ad_end, data, species_name + "_" + str(i), species_name, "end" + ) # General checks that are performed for all tests generic_check(data) # Checks that are specific to test number i - eval("specific_check"+str(i)+"(data, dt)") + eval("specific_check" + str(i) + "(data, dt)") rho_start = field_data_start["rho"].to_ndarray() rho_end = field_data_end["rho"].to_ndarray() @@ -436,5 +558,6 @@ def main(): test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) + if __name__ == "__main__": main() diff --git a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py b/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py index a14787fc8e5..11394029062 100644 --- a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py +++ b/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py @@ -21,39 +21,37 @@ comm = mpi.COMM_WORLD -simulation = picmi.Simulation( - warpx_serialize_initial_conditions=True, - verbose=0 -) +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=0) class EMModes(object): - '''The following runs a simulation of an uniform plasma at a set + """The following runs a simulation of an uniform plasma at a set temperature (Te = Ti) with an external magnetic field applied in either the z-direction (parallel to domain) or x-direction (perpendicular to domain). The analysis script (in this same directory) analyzes the output field data for EM modes. This input is based on the EM modes tests as described by Munoz et al. (2018) and tests done by Scott Nicks at TAE Technologies. - ''' + """ + # Applied field parameters - B0 = 0.25 # Initial magnetic field strength (T) - beta = [0.01, 0.1] # Plasma beta, used to calculate temperature + B0 = 0.25 # Initial magnetic field strength (T) + beta = [0.01, 0.1] # Plasma beta, used to calculate temperature # Plasma species parameters - m_ion = [100.0, 400.0] # Ion mass (electron masses) - vA_over_c = [1e-4, 1e-3] # ratio of Alfven speed and the speed of light + m_ion = [100.0, 400.0] # Ion mass (electron masses) + vA_over_c = [1e-4, 1e-3] # ratio of Alfven speed and the speed of light # Spatial domain - Nz = [1024, 1920] # number of cells in z direction - Nx = 8 # number of cells in x (and y) direction for >1 dimensions + Nz = [1024, 1920] # number of cells in z direction + Nx = 8 # number of cells in x (and y) direction for >1 dimensions # Temporal domain (if not run as a CI test) - LT = 300.0 # Simulation temporal length (ion cyclotron periods) + LT = 300.0 # Simulation temporal length (ion cyclotron periods) # Numerical parameters - NPPC = [1024, 256, 64] # Seed number of particles per cell - DZ = 1.0 / 10.0 # Cell size (ion skin depths) - DT = [5e-3, 4e-3] # Time step (ion cyclotron periods) + NPPC = [1024, 256, 64] # Seed number of particles per cell + DZ = 1.0 / 10.0 # Cell size (ion skin depths) + DT = [5e-3, 4e-3] # Time step (ion cyclotron periods) # Plasma resistivity - used to dampen the mode excitation eta = [[1e-7, 1e-7], [1e-7, 1e-5], [1e-7, 1e-4]] @@ -68,7 +66,7 @@ def __init__(self, test, dim, B_dir, verbose): self.verbose = verbose or self.test # sanity check - assert (dim > 0 and dim < 4), f"{dim}-dimensions not a valid input" + assert dim > 0 and dim < 4, f"{dim}-dimensions not a valid input" # get simulation parameters from the defaults given the direction of # the initial B-field and the dimensionality @@ -89,11 +87,11 @@ def __init__(self, test, dim, B_dir, verbose): # if this is a test case run for only a small number of steps self.total_steps = 250 # output diagnostics 20 times per cyclotron period - self.diag_steps = int(1.0/20 / self.DT) + self.diag_steps = int(1.0 / 20 / self.DT) # dump all the current attributes to a dill pickle file if comm.rank == 0: - with open('sim_parameters.dpkl', 'wb') as f: + with open("sim_parameters.dpkl", "wb") as f: dill.dump(self, f) # print out plasma parameters @@ -125,12 +123,12 @@ def __init__(self, test, dim, B_dir, verbose): def get_simulation_parameters(self): """Pick appropriate parameters from the defaults given the direction of the B-field and the simulation dimensionality.""" - if self.B_dir == 'z': + if self.B_dir == "z": idx = 0 self.Bx = 0.0 self.By = 0.0 self.Bz = self.B0 - elif self.B_dir == 'y': + elif self.B_dir == "y": idx = 1 self.Bx = 0.0 self.By = self.B0 @@ -147,8 +145,8 @@ def get_simulation_parameters(self): self.Nz = self.Nz[idx] self.DT = self.DT[idx] - self.NPPC = self.NPPC[self.dim-1] - self.eta = self.eta[self.dim-1][idx] + self.NPPC = self.NPPC[self.dim - 1] + self.eta = self.eta[self.dim - 1][idx] def get_plasma_quantities(self): """Calculate various plasma parameters based on the simulation input.""" @@ -161,14 +159,12 @@ def get_plasma_quantities(self): # Alfven speed (m/s): vA = B / sqrt(mu0 * n * (M + m)) = c * omega_ci / w_pi self.vA = self.vA_over_c * constants.c - self.n_plasma = ( - (self.B0 / self.vA)**2 / (constants.mu0 * (self.M + constants.m_e)) + self.n_plasma = (self.B0 / self.vA) ** 2 / ( + constants.mu0 * (self.M + constants.m_e) ) # Ion plasma frequency (Hz) - self.w_pi = np.sqrt( - constants.q_e**2 * self.n_plasma / (self.M * constants.ep0) - ) + self.w_pi = np.sqrt(constants.q_e**2 * self.n_plasma / (self.M * constants.ep0)) # Skin depth (m) self.l_i = constants.c / self.w_pi @@ -177,7 +173,7 @@ def get_plasma_quantities(self): self.v_ti = np.sqrt(self.beta / 2.0) * self.vA # Temperature (eV) from thermal speed: v_ti = sqrt(kT / M) - self.T_plasma = self.v_ti**2 * self.M / constants.q_e # eV + self.T_plasma = self.v_ti**2 * self.M / constants.q_e # eV # Larmor radius (m) self.rho_i = self.v_ti / self.w_ci @@ -197,16 +193,16 @@ def setup_run(self): grid_object = picmi.Cartesian3DGrid self.grid = grid_object( - number_of_cells=[self.Nx, self.Nx, self.Nz][-self.dim:], + number_of_cells=[self.Nx, self.Nx, self.Nz][-self.dim :], warpx_max_grid_size=self.Nz, - lower_bound=[-self.Lx/2.0, -self.Lx/2.0, 0][-self.dim:], - upper_bound=[self.Lx/2.0, self.Lx/2.0, self.Lz][-self.dim:], - lower_boundary_conditions=['periodic']*self.dim, - upper_boundary_conditions=['periodic']*self.dim + lower_bound=[-self.Lx / 2.0, -self.Lx / 2.0, 0][-self.dim :], + upper_bound=[self.Lx / 2.0, self.Lx / 2.0, self.Lz][-self.dim :], + lower_boundary_conditions=["periodic"] * self.dim, + upper_boundary_conditions=["periodic"] * self.dim, ) simulation.time_step_size = self.dt simulation.max_steps = self.total_steps - simulation.current_deposition_algo = 'direct' + simulation.current_deposition_algo = "direct" simulation.particle_shape = 1 simulation.verbose = self.verbose @@ -216,15 +212,15 @@ def setup_run(self): self.solver = picmi.HybridPICSolver( grid=self.grid, - Te=self.T_plasma, n0=self.n_plasma, plasma_resistivity=self.eta, - substeps=self.substeps + Te=self.T_plasma, + n0=self.n_plasma, + plasma_resistivity=self.eta, + substeps=self.substeps, ) simulation.solver = self.solver B_ext = picmi.AnalyticInitialField( - Bx_expression=self.Bx, - By_expression=self.By, - Bz_expression=self.Bz + Bx_expression=self.Bx, By_expression=self.By, Bz_expression=self.Bz ) simulation.add_applied_field(B_ext) @@ -233,60 +229,62 @@ def setup_run(self): ####################################################################### self.ions = picmi.Species( - name='ions', charge='q_e', mass=self.M, + name="ions", + charge="q_e", + mass=self.M, initial_distribution=picmi.UniformDistribution( density=self.n_plasma, - rms_velocity=[self.v_ti]*3, - ) + rms_velocity=[self.v_ti] * 3, + ), ) simulation.add_species( self.ions, layout=picmi.PseudoRandomLayout( grid=self.grid, n_macroparticles_per_cell=self.NPPC - ) + ), ) ####################################################################### # Add diagnostics # ####################################################################### - if self.B_dir == 'z': - self.output_file_name = 'par_field_data.txt' + if self.B_dir == "z": + self.output_file_name = "par_field_data.txt" else: - self.output_file_name = 'perp_field_data.txt' + self.output_file_name = "perp_field_data.txt" if self.test: particle_diag = picmi.ParticleDiagnostic( - name='field_diag', + name="field_diag", period=self.total_steps, - write_dir='.', - warpx_file_prefix='Python_ohms_law_solver_EM_modes_1d_plt', + write_dir=".", + warpx_file_prefix="Python_ohms_law_solver_EM_modes_1d_plt", # warpx_format = 'openpmd', # warpx_openpmd_backend = 'h5' ) simulation.add_diagnostic(particle_diag) field_diag = picmi.FieldDiagnostic( - name='field_diag', + name="field_diag", grid=self.grid, period=self.total_steps, - data_list=['B', 'E', 'J_displacement'], - write_dir='.', - warpx_file_prefix='Python_ohms_law_solver_EM_modes_1d_plt', + data_list=["B", "E", "J_displacement"], + write_dir=".", + warpx_file_prefix="Python_ohms_law_solver_EM_modes_1d_plt", # warpx_format = 'openpmd', # warpx_openpmd_backend = 'h5' ) simulation.add_diagnostic(field_diag) - if self.B_dir == 'z' or self.dim == 1: + if self.B_dir == "z" or self.dim == 1: line_diag = picmi.ReducedDiagnostic( - diag_type='FieldProbe', - probe_geometry='Line', + diag_type="FieldProbe", + probe_geometry="Line", z_probe=0, z1_probe=self.Lz, resolution=self.Nz - 1, name=self.output_file_name[:-4], period=self.diag_steps, - path='diags/' + path="diags/", ) simulation.add_diagnostic(line_diag) else: @@ -297,10 +295,10 @@ def setup_run(self): except OSError: # diags directory already exists pass - with open(f"diags/{self.output_file_name}", 'w') as f: + with open(f"diags/{self.output_file_name}", "w") as f: f.write( - "[0]step() [1]time(s) [2]z_coord(m) " - "[3]Ez_lev0-(V/m) [4]Bx_lev0-(T) [5]By_lev0-(T)\n" + "[0]step() [1]time(s) [2]z_coord(m) " + "[3]Ez_lev0-(V/m) [4]Bx_lev0-(T) [5]By_lev0-(T)\n" ) ####################################################################### @@ -339,7 +337,7 @@ def _record_average_fields(self): Bx = np.mean(Bx_warpx[:-1], axis=(0, 1)) By = np.mean(By_warpx[:-1], axis=(0, 1)) - with open(f"diags/{self.output_file_name}", 'a') as f: + with open(f"diags/{self.output_file_name}", "a") as f: for ii in range(self.Nz): f.write( f"{step:05d} {t:.10e} {z_vals[ii]:.10e} {Ez[ii]:+.10e} " @@ -353,22 +351,29 @@ def _record_average_fields(self): parser = argparse.ArgumentParser() parser.add_argument( - '-t', '--test', help='toggle whether this script is run as a short CI test', - action='store_true', + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", ) parser.add_argument( - '-d', '--dim', help='Simulation dimension', required=False, type=int, - default=1 + "-d", "--dim", help="Simulation dimension", required=False, type=int, default=1 ) parser.add_argument( - '--bdir', help='Direction of the B-field', required=False, - choices=['x', 'y', 'z'], default='z' + "--bdir", + help="Direction of the B-field", + required=False, + choices=["x", "y", "z"], + default="z", ) parser.add_argument( - '-v', '--verbose', help='Verbose output', action='store_true', + "-v", + "--verbose", + help="Verbose output", + action="store_true", ) args, left = parser.parse_known_args() -sys.argv = sys.argv[:1]+left +sys.argv = sys.argv[:1] + left run = EMModes(test=args.test, dim=args.dim, B_dir=args.bdir, verbose=args.verbose) simulation.step() diff --git a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py b/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py index 9d5cc8fe977..ace91bad4d5 100644 --- a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py +++ b/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py @@ -23,32 +23,33 @@ class CylindricalNormalModes(object): - '''The following runs a simulation of an uniform plasma at a set ion + """The following runs a simulation of an uniform plasma at a set ion temperature (and Te = 0) with an external magnetic field applied in the z-direction (parallel to domain). The analysis script (in this same directory) analyzes the output field data for EM modes. - ''' + """ + # Applied field parameters - B0 = 0.5 # Initial magnetic field strength (T) - beta = 0.01 # Plasma beta, used to calculate temperature + B0 = 0.5 # Initial magnetic field strength (T) + beta = 0.01 # Plasma beta, used to calculate temperature # Plasma species parameters - m_ion = 400.0 # Ion mass (electron masses) - vA_over_c = 5e-3 # ratio of Alfven speed and the speed of light + m_ion = 400.0 # Ion mass (electron masses) + vA_over_c = 5e-3 # ratio of Alfven speed and the speed of light # Spatial domain - Nz = 512 # number of cells in z direction - Nr = 128 # number of cells in r direction + Nz = 512 # number of cells in z direction + Nr = 128 # number of cells in r direction # Temporal domain (if not run as a CI test) - LT = 800.0 # Simulation temporal length (ion cyclotron periods) + LT = 800.0 # Simulation temporal length (ion cyclotron periods) # Numerical parameters - NPPC = 8000 # Seed number of particles per cell - DZ = 0.4 # Cell size (ion skin depths) - DR = 0.4 # Cell size (ion skin depths) - DT = 0.02 # Time step (ion cyclotron periods) + NPPC = 8000 # Seed number of particles per cell + DZ = 0.4 # Cell size (ion skin depths) + DR = 0.4 # Cell size (ion skin depths) + DT = 0.02 # Time step (ion cyclotron periods) # Plasma resistivity - used to dampen the mode excitation eta = 5e-4 @@ -82,7 +83,7 @@ def __init__(self, test, verbose): # dump all the current attributes to a dill pickle file if comm.rank == 0: - with open('sim_parameters.dpkl', 'wb') as f: + with open("sim_parameters.dpkl", "wb") as f: dill.dump(self, f) # print out plasma parameters @@ -106,7 +107,7 @@ def __init__(self, test, verbose): f"\tdt = {self.dt:.1e} s\n" f"\tdiag steps = {self.diag_steps:d}\n" f"\ttotal steps = {self.total_steps:d}\n", - flush=True + flush=True, ) self.setup_run() @@ -121,14 +122,12 @@ def get_plasma_quantities(self): # Alfven speed (m/s): vA = B / sqrt(mu0 * n * (M + m)) = c * omega_ci / w_pi self.vA = self.vA_over_c * constants.c - self.n_plasma = ( - (self.B0 / self.vA)**2 / (constants.mu0 * (self.M + constants.m_e)) + self.n_plasma = (self.B0 / self.vA) ** 2 / ( + constants.mu0 * (self.M + constants.m_e) ) # Ion plasma frequency (Hz) - self.w_pi = np.sqrt( - constants.q_e**2 * self.n_plasma / (self.M * constants.ep0) - ) + self.w_pi = np.sqrt(constants.q_e**2 * self.n_plasma / (self.M * constants.ep0)) # Skin depth (m) self.l_i = constants.c / self.w_pi @@ -137,7 +136,7 @@ def get_plasma_quantities(self): self.v_ti = np.sqrt(self.beta / 2.0) * self.vA # Temperature (eV) from thermal speed: v_ti = sqrt(kT / M) - self.T_plasma = self.v_ti**2 * self.M / constants.q_e # eV + self.T_plasma = self.v_ti**2 * self.M / constants.q_e # eV # Larmor radius (m) self.rho_i = self.v_ti / self.w_ci @@ -152,16 +151,16 @@ def setup_run(self): self.grid = picmi.CylindricalGrid( number_of_cells=[self.Nr, self.Nz], warpx_max_grid_size=self.Nz, - lower_bound=[0, -self.Lz/2.0], - upper_bound=[self.Lr, self.Lz/2.0], - lower_boundary_conditions = ['none', 'periodic'], - upper_boundary_conditions = ['dirichlet', 'periodic'], - lower_boundary_conditions_particles = ['none', 'periodic'], - upper_boundary_conditions_particles = ['reflecting', 'periodic'] + lower_bound=[0, -self.Lz / 2.0], + upper_bound=[self.Lr, self.Lz / 2.0], + lower_boundary_conditions=["none", "periodic"], + upper_boundary_conditions=["dirichlet", "periodic"], + lower_boundary_conditions_particles=["none", "periodic"], + upper_boundary_conditions_particles=["reflecting", "periodic"], ) simulation.time_step_size = self.dt simulation.max_steps = self.total_steps - simulation.current_deposition_algo = 'direct' + simulation.current_deposition_algo = "direct" simulation.particle_shape = 1 simulation.verbose = self.verbose @@ -171,15 +170,15 @@ def setup_run(self): self.solver = picmi.HybridPICSolver( grid=self.grid, - Te=0.0, n0=self.n_plasma, plasma_resistivity=self.eta, + Te=0.0, + n0=self.n_plasma, + plasma_resistivity=self.eta, substeps=self.substeps, - n_floor=self.n_plasma*0.05 + n_floor=self.n_plasma * 0.05, ) simulation.solver = self.solver - B_ext = picmi.AnalyticInitialField( - Bz_expression=self.B0 - ) + B_ext = picmi.AnalyticInitialField(Bz_expression=self.B0) simulation.add_applied_field(B_ext) ####################################################################### @@ -187,17 +186,19 @@ def setup_run(self): ####################################################################### self.ions = picmi.Species( - name='ions', charge='q_e', mass=self.M, + name="ions", + charge="q_e", + mass=self.M, initial_distribution=picmi.UniformDistribution( density=self.n_plasma, - rms_velocity=[self.v_ti]*3, - ) + rms_velocity=[self.v_ti] * 3, + ), ) simulation.add_species( self.ions, layout=picmi.PseudoRandomLayout( grid=self.grid, n_macroparticles_per_cell=self.NPPC - ) + ), ) ####################################################################### @@ -205,26 +206,26 @@ def setup_run(self): ####################################################################### field_diag = picmi.FieldDiagnostic( - name='field_diag', + name="field_diag", grid=self.grid, period=self.diag_steps, - data_list=['B', 'E'], - write_dir='diags', - warpx_file_prefix='field_diags', - warpx_format='openpmd', - warpx_openpmd_backend='h5', + data_list=["B", "E"], + write_dir="diags", + warpx_file_prefix="field_diags", + warpx_format="openpmd", + warpx_openpmd_backend="h5", ) simulation.add_diagnostic(field_diag) # add particle diagnostic for checksum if self.test: part_diag = picmi.ParticleDiagnostic( - name='diag1', + name="diag1", period=self.total_steps, species=[self.ions], - data_list=['ux', 'uy', 'uz', 'weighting'], - write_dir='.', - warpx_file_prefix='Python_ohms_law_solver_EM_modes_rz_plt' + data_list=["ux", "uy", "uz", "weighting"], + write_dir=".", + warpx_file_prefix="Python_ohms_law_solver_EM_modes_rz_plt", ) simulation.add_diagnostic(part_diag) @@ -235,14 +236,19 @@ def setup_run(self): parser = argparse.ArgumentParser() parser.add_argument( - '-t', '--test', help='toggle whether this script is run as a short CI test', - action='store_true', + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", ) parser.add_argument( - '-v', '--verbose', help='Verbose output', action='store_true', + "-v", + "--verbose", + help="Verbose output", + action="store_true", ) args, left = parser.parse_known_args() -sys.argv = sys.argv[:1]+left +sys.argv = sys.argv[:1] + left run = CylindricalNormalModes(test=args.test, verbose=args.verbose) simulation.step() diff --git a/Examples/Tests/ohm_solver_EM_modes/analysis.py b/Examples/Tests/ohm_solver_EM_modes/analysis.py index f2f71cd53ed..36869623ac4 100755 --- a/Examples/Tests/ohm_solver_EM_modes/analysis.py +++ b/Examples/Tests/ohm_solver_EM_modes/analysis.py @@ -11,24 +11,24 @@ constants = picmi.constants -matplotlib.rcParams.update({'font.size': 20}) +matplotlib.rcParams.update({"font.size": 20}) # load simulation parameters -with open('sim_parameters.dpkl', 'rb') as f: +with open("sim_parameters.dpkl", "rb") as f: sim = dill.load(f) -if sim.B_dir == 'z': - field_idx_dict = {'z': 4, 'Ez': 7, 'Bx': 8, 'By': 9} +if sim.B_dir == "z": + field_idx_dict = {"z": 4, "Ez": 7, "Bx": 8, "By": 9} data = np.loadtxt("diags/par_field_data.txt", skiprows=1) else: if sim.dim == 1: - field_idx_dict = {'z': 4, 'Ez': 7, 'Bx': 8, 'By': 9} + field_idx_dict = {"z": 4, "Ez": 7, "Bx": 8, "By": 9} else: - field_idx_dict = {'z': 2, 'Ez': 3, 'Bx': 4, 'By': 5} + field_idx_dict = {"z": 2, "Ez": 3, "Bx": 4, "By": 5} data = np.loadtxt("diags/perp_field_data.txt", skiprows=1) # step, t, z, Ez, Bx, By = raw_data.T -step = data[:,0] +step = data[:, 0] num_steps = len(np.unique(step)) @@ -36,53 +36,56 @@ resolution = len(np.where(step == 0)[0]) - 1 # reshape to separate spatial and time coordinates -sim_data = data.reshape((num_steps, resolution+1, data.shape[1])) +sim_data = data.reshape((num_steps, resolution + 1, data.shape[1])) -z_grid = sim_data[1, :, field_idx_dict['z']] +z_grid = sim_data[1, :, field_idx_dict["z"]] idx = np.argsort(z_grid)[1:] dz = np.mean(np.diff(z_grid[idx])) -dt = np.mean(np.diff(sim_data[:,0,1])) +dt = np.mean(np.diff(sim_data[:, 0, 1])) data = np.zeros((num_steps, resolution, 3)) for i in range(num_steps): - data[i,:,0] = sim_data[i,idx,field_idx_dict['Bx']] - data[i,:,1] = sim_data[i,idx,field_idx_dict['By']] - data[i,:,2] = sim_data[i,idx,field_idx_dict['Ez']] + data[i, :, 0] = sim_data[i, idx, field_idx_dict["Bx"]] + data[i, :, 1] = sim_data[i, idx, field_idx_dict["By"]] + data[i, :, 2] = sim_data[i, idx, field_idx_dict["Ez"]] print(f"Data file contains {num_steps} time snapshots.") print(f"Spatial resolution is {resolution}") + def get_analytic_R_mode(w): return w / np.sqrt(1.0 + abs(w)) + def get_analytic_L_mode(w): return w / np.sqrt(1.0 - abs(w)) -if sim.B_dir == 'z': + +if sim.B_dir == "z": global_norm = ( - 1.0 / (2.0*constants.mu0) - / ((3.0/2)*sim.n_plasma*sim.T_plasma*constants.q_e) + 1.0 + / (2.0 * constants.mu0) + / ((3.0 / 2) * sim.n_plasma * sim.T_plasma * constants.q_e) ) else: global_norm = ( - constants.ep0 / 2.0 - / ((3.0/2)*sim.n_plasma*sim.T_plasma*constants.q_e) + constants.ep0 / 2.0 / ((3.0 / 2) * sim.n_plasma * sim.T_plasma * constants.q_e) ) -if sim.B_dir == 'z': +if sim.B_dir == "z": Bl = (data[:, :, 0] + 1.0j * data[:, :, 1]) / np.sqrt(2.0) field_kw = np.fft.fftshift(np.fft.fft2(Bl)) else: field_kw = np.fft.fftshift(np.fft.fft2(data[:, :, 2])) w_norm = sim.w_ci -if sim.B_dir == 'z': +if sim.B_dir == "z": k_norm = 1.0 / sim.l_i else: k_norm = 1.0 / sim.rho_i -k = 2*np.pi * np.fft.fftshift(np.fft.fftfreq(resolution, dz)) / k_norm -w = 2*np.pi * np.fft.fftshift(np.fft.fftfreq(num_steps, dt)) / w_norm +k = 2 * np.pi * np.fft.fftshift(np.fft.fftfreq(resolution, dz)) / k_norm +w = 2 * np.pi * np.fft.fftshift(np.fft.fftfreq(num_steps, dt)) / w_norm w = -np.flipud(w) # aspect = (xmax-xmin)/(ymax-ymin) / aspect_true @@ -90,7 +93,7 @@ def get_analytic_L_mode(w): fig, ax1 = plt.subplots(1, 1, figsize=(10, 7.25)) -if sim.B_dir == 'z' and sim.dim == 1: +if sim.B_dir == "z" and sim.dim == 1: vmin = -3 vmax = 3.5 else: @@ -98,78 +101,249 @@ def get_analytic_L_mode(w): vmax = None im = ax1.imshow( - np.log10(np.abs(field_kw**2) * global_norm), extent=extent, - aspect="equal", cmap='inferno', vmin=vmin, vmax=vmax + np.log10(np.abs(field_kw**2) * global_norm), + extent=extent, + aspect="equal", + cmap="inferno", + vmin=vmin, + vmax=vmax, ) # Colorbars fig.subplots_adjust(right=0.5) cbar_ax = fig.add_axes([0.525, 0.15, 0.03, 0.7]) -fig.colorbar(im, cax=cbar_ax, orientation='vertical') +fig.colorbar(im, cax=cbar_ax, orientation="vertical") -#cbar_lab = r'$\log_{10}(\frac{|B_{R/L}|^2}{2\mu_0}\frac{2}{3n_0k_BT_e})$' -if sim.B_dir == 'z': - cbar_lab = r'$\log_{10}(\beta_{R/L})$' +# cbar_lab = r'$\log_{10}(\frac{|B_{R/L}|^2}{2\mu_0}\frac{2}{3n_0k_BT_e})$' +if sim.B_dir == "z": + cbar_lab = r"$\log_{10}(\beta_{R/L})$" else: - cbar_lab = r'$\log_{10}(\varepsilon_0|E_z|^2/(3n_0k_BT_e))$' + cbar_lab = r"$\log_{10}(\varepsilon_0|E_z|^2/(3n_0k_BT_e))$" cbar_ax.set_ylabel(cbar_lab, rotation=270, labelpad=30) -if sim.B_dir == 'z': +if sim.B_dir == "z": # plot the L mode - ax1.plot(get_analytic_L_mode(w), np.abs(w), c='limegreen', ls='--', lw=1.25, - label='L mode:\n'+r'$(kl_i)^2=\frac{(\omega/\Omega_i)^2}{1-\omega/\Omega_i}$') + ax1.plot( + get_analytic_L_mode(w), + np.abs(w), + c="limegreen", + ls="--", + lw=1.25, + label="L mode:\n" + r"$(kl_i)^2=\frac{(\omega/\Omega_i)^2}{1-\omega/\Omega_i}$", + ) # plot the R mode - ax1.plot(get_analytic_R_mode(w), -np.abs(w), c='limegreen', ls='-.', lw=1.25, - label='R mode:\n'+r'$(kl_i)^2=\frac{(\omega/\Omega_i)^2}{1+\omega/\Omega_i}$') + ax1.plot( + get_analytic_R_mode(w), + -np.abs(w), + c="limegreen", + ls="-.", + lw=1.25, + label="R mode:\n" + r"$(kl_i)^2=\frac{(\omega/\Omega_i)^2}{1+\omega/\Omega_i}$", + ) - ax1.plot(k,1.0+3.0*sim.v_ti/w_norm*k*k_norm, c='limegreen', ls=':', lw=1.25, label = r'$\omega = \Omega_i + 3v_{th,i} k$') - ax1.plot(k,1.0-3.0*sim.v_ti/w_norm*k*k_norm, c='limegreen', ls=':', lw=1.25) + ax1.plot( + k, + 1.0 + 3.0 * sim.v_ti / w_norm * k * k_norm, + c="limegreen", + ls=":", + lw=1.25, + label=r"$\omega = \Omega_i + 3v_{th,i} k$", + ) + ax1.plot( + k, 1.0 - 3.0 * sim.v_ti / w_norm * k * k_norm, c="limegreen", ls=":", lw=1.25 + ) else: # digitized values from Munoz et al. (2018) - x = [0.006781609195402272, 0.1321379310344828, 0.2671034482758621, 0.3743678160919539, 0.49689655172413794, 0.6143908045977011, 0.766022988505747, 0.885448275862069, 1.0321149425287355, 1.193862068965517, 1.4417701149425288, 1.7736781609195402] - y = [-0.033194664836814436, 0.5306857657503109, 1.100227301968521, 1.5713856842646996, 2.135780760818287, 2.675601492473303, 3.3477291246729854, 3.8469357121413563, 4.4317021915340735, 5.1079898786293265, 6.10275764463696, 7.310074194793499] - ax1.plot(x, y, c='limegreen', ls='-.', lw=1.5, label="X mode") - - x = [3.9732873563218387, 3.6515862068965514, 3.306275862068966, 2.895655172413793, 2.4318850574712645, 2.0747586206896553, 1.8520229885057473, 1.6589195402298849, 1.4594942528735633, 1.2911724137931033, 1.1551264367816092, 1.0335402298850576, 0.8961149425287356, 0.7419770114942528, 0.6141379310344828, 0.4913103448275862] - y = [1.1145945018655916, 1.1193978642192393, 1.1391259596002916, 1.162971222713042, 1.1986533430544237, 1.230389844319595, 1.2649997855641806, 1.3265857528841618, 1.3706737573444268, 1.4368486511986962, 1.4933310460179268, 1.5485268259210019, 1.6386327572157655, 1.7062658146416778, 1.7828194021529358, 1.8533687867221342] - ax1.plot(x, y, c='limegreen', ls=':', lw=2, label="Bernstein modes") - - x = [3.9669885057471266, 3.6533333333333333, 3.3213563218390805, 2.9646896551724136, 2.6106436781609195, 2.2797011494252875, 1.910919540229885, 1.6811724137931034, 1.4499540229885057, 1.2577011494252872, 1.081057471264368, 0.8791494252873564, 0.7153103448275862] - y = [2.2274306300124374, 2.2428271218424327, 2.272505039241755, 2.3084873697302397, 2.3586224642964364, 2.402667581592829, 2.513873997512545, 2.5859673199811297, 2.6586610627439207, 2.7352146502551786, 2.8161427284813656, 2.887850066475104, 2.9455761890466183] - ax1.plot(x, y, c='limegreen', ls=':', lw=2) - - x = [3.9764137931034487, 3.702022988505747, 3.459793103448276, 3.166712643678161, 2.8715862068965516, 2.5285057471264367, 2.2068505747126435, 1.9037011494252871, 1.6009885057471265, 1.3447816091954023, 1.1538850574712645, 0.9490114942528736] - y = [3.3231976669382854, 3.34875841660591, 3.378865205643951, 3.424454260839731, 3.474160483767209, 3.522194107303684, 3.6205343740618434, 3.7040356821203417, 3.785435519149119, 3.868851052879873, 3.9169704507440923, 3.952481022429987] - ax1.plot(x, y, c='limegreen', ls=':', lw=2) - - x = [3.953609195402299, 3.7670114942528734, 3.5917471264367817, 3.39735632183908, 3.1724137931034484, 2.9408045977011494, 2.685977011494253, 2.4593563218390804, 2.2203218390804595, 2.0158850574712646, 1.834183908045977, 1.6522758620689655, 1.4937471264367814, 1.3427586206896551, 1.2075402298850575] - y = [4.427971008277223, 4.458335120298495, 4.481579963117039, 4.495861388686366, 4.544581206844791, 4.587425483552773, 4.638160998413175, 4.698631899472488, 4.757987734271133, 4.813955483123902, 4.862332203971352, 4.892481880173264, 4.9247759145687695, 4.947934983059571, 4.953124329888064] - ax1.plot(x, y, c='limegreen', ls=':', lw=2) + x = [ + 0.006781609195402272, + 0.1321379310344828, + 0.2671034482758621, + 0.3743678160919539, + 0.49689655172413794, + 0.6143908045977011, + 0.766022988505747, + 0.885448275862069, + 1.0321149425287355, + 1.193862068965517, + 1.4417701149425288, + 1.7736781609195402, + ] + y = [ + -0.033194664836814436, + 0.5306857657503109, + 1.100227301968521, + 1.5713856842646996, + 2.135780760818287, + 2.675601492473303, + 3.3477291246729854, + 3.8469357121413563, + 4.4317021915340735, + 5.1079898786293265, + 6.10275764463696, + 7.310074194793499, + ] + ax1.plot(x, y, c="limegreen", ls="-.", lw=1.5, label="X mode") + + x = [ + 3.9732873563218387, + 3.6515862068965514, + 3.306275862068966, + 2.895655172413793, + 2.4318850574712645, + 2.0747586206896553, + 1.8520229885057473, + 1.6589195402298849, + 1.4594942528735633, + 1.2911724137931033, + 1.1551264367816092, + 1.0335402298850576, + 0.8961149425287356, + 0.7419770114942528, + 0.6141379310344828, + 0.4913103448275862, + ] + y = [ + 1.1145945018655916, + 1.1193978642192393, + 1.1391259596002916, + 1.162971222713042, + 1.1986533430544237, + 1.230389844319595, + 1.2649997855641806, + 1.3265857528841618, + 1.3706737573444268, + 1.4368486511986962, + 1.4933310460179268, + 1.5485268259210019, + 1.6386327572157655, + 1.7062658146416778, + 1.7828194021529358, + 1.8533687867221342, + ] + ax1.plot(x, y, c="limegreen", ls=":", lw=2, label="Bernstein modes") + + x = [ + 3.9669885057471266, + 3.6533333333333333, + 3.3213563218390805, + 2.9646896551724136, + 2.6106436781609195, + 2.2797011494252875, + 1.910919540229885, + 1.6811724137931034, + 1.4499540229885057, + 1.2577011494252872, + 1.081057471264368, + 0.8791494252873564, + 0.7153103448275862, + ] + y = [ + 2.2274306300124374, + 2.2428271218424327, + 2.272505039241755, + 2.3084873697302397, + 2.3586224642964364, + 2.402667581592829, + 2.513873997512545, + 2.5859673199811297, + 2.6586610627439207, + 2.7352146502551786, + 2.8161427284813656, + 2.887850066475104, + 2.9455761890466183, + ] + ax1.plot(x, y, c="limegreen", ls=":", lw=2) + + x = [ + 3.9764137931034487, + 3.702022988505747, + 3.459793103448276, + 3.166712643678161, + 2.8715862068965516, + 2.5285057471264367, + 2.2068505747126435, + 1.9037011494252871, + 1.6009885057471265, + 1.3447816091954023, + 1.1538850574712645, + 0.9490114942528736, + ] + y = [ + 3.3231976669382854, + 3.34875841660591, + 3.378865205643951, + 3.424454260839731, + 3.474160483767209, + 3.522194107303684, + 3.6205343740618434, + 3.7040356821203417, + 3.785435519149119, + 3.868851052879873, + 3.9169704507440923, + 3.952481022429987, + ] + ax1.plot(x, y, c="limegreen", ls=":", lw=2) + + x = [ + 3.953609195402299, + 3.7670114942528734, + 3.5917471264367817, + 3.39735632183908, + 3.1724137931034484, + 2.9408045977011494, + 2.685977011494253, + 2.4593563218390804, + 2.2203218390804595, + 2.0158850574712646, + 1.834183908045977, + 1.6522758620689655, + 1.4937471264367814, + 1.3427586206896551, + 1.2075402298850575, + ] + y = [ + 4.427971008277223, + 4.458335120298495, + 4.481579963117039, + 4.495861388686366, + 4.544581206844791, + 4.587425483552773, + 4.638160998413175, + 4.698631899472488, + 4.757987734271133, + 4.813955483123902, + 4.862332203971352, + 4.892481880173264, + 4.9247759145687695, + 4.947934983059571, + 4.953124329888064, + ] + ax1.plot(x, y, c="limegreen", ls=":", lw=2) # ax1.legend(loc='upper left') fig.legend(loc=7, fontsize=18) -if sim.B_dir == 'z': - ax1.set_xlabel(r'$k l_i$') - ax1.set_title('$B_{R/L} = B_x \pm iB_y$') +if sim.B_dir == "z": + ax1.set_xlabel(r"$k l_i$") + ax1.set_title("$B_{R/L} = B_x \pm iB_y$") fig.suptitle("Parallel EM modes") ax1.set_xlim(-3, 3) ax1.set_ylim(-6, 3) - dir_str = 'par' + dir_str = "par" else: - ax1.set_xlabel(r'$k \rho_i$') - ax1.set_title('$E_z(k, \omega)$') + ax1.set_xlabel(r"$k \rho_i$") + ax1.set_title("$E_z(k, \omega)$") fig.suptitle(f"Perpendicular EM modes (ion Bernstein) - {sim.dim}D") ax1.set_xlim(-3, 3) ax1.set_ylim(0, 8) - dir_str = 'perp' + dir_str = "perp" -ax1.set_ylabel(r'$\omega / \Omega_i$') +ax1.set_ylabel(r"$\omega / \Omega_i$") plt.savefig( f"spectrum_{dir_str}_{sim.dim}d_{sim.substeps}_substeps_{sim.eta}_eta.png", - bbox_inches='tight' + bbox_inches="tight", ) if not sim.test: plt.show() @@ -177,7 +351,8 @@ def get_analytic_L_mode(w): if sim.test: import os import sys - sys.path.insert(1, '../../../../warpx/Regression/Checksum/') + + sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file diff --git a/Examples/Tests/ohm_solver_EM_modes/analysis_rz.py b/Examples/Tests/ohm_solver_EM_modes/analysis_rz.py index f96dd590eee..4d5bc2aa016 100755 --- a/Examples/Tests/ohm_solver_EM_modes/analysis_rz.py +++ b/Examples/Tests/ohm_solver_EM_modes/analysis_rz.py @@ -16,41 +16,43 @@ constants = picmi.constants # load simulation parameters -with open('sim_parameters.dpkl', 'rb') as f: +with open("sim_parameters.dpkl", "rb") as f: sim = dill.load(f) diag_dir = "diags/field_diags" ts = OpenPMDTimeSeries(diag_dir, check_all_files=True) + def transform_spatially(data_for_transform): # interpolate from regular r-grid to special r-grid interp = RegularGridInterpolator( - (info.z, info.r), data_for_transform, - method='linear' + (info.z, info.r), data_for_transform, method="linear" ) data_interp = interp((zg, rg)) # Applying manual hankel in r # Fmz = np.sum(proj*data_for_transform, axis=(2,3)) - Fmz = np.einsum('ijkl,kl->ij', proj, data_interp) + Fmz = np.einsum("ijkl,kl->ij", proj, data_interp) # Standard fourier in z Fmn = fft.fftshift(fft.fft(Fmz, axis=1), axes=1) return Fmn + def process(it): print(f"Processing iteration {it}", flush=True) - field, info = ts.get_field('E', 'y', iteration=it) + field, info = ts.get_field("E", "y", iteration=it) F_k = transform_spatially(field) return F_k + # grab the first iteration to get the grids -Bz, info = ts.get_field('B', 'z', iteration=0) +Bz, info = ts.get_field("B", "z", iteration=0) nr = len(info.r) nz = len(info.z) -nkr = 12 # number of radial modes to solve for +nkr = 12 # number of radial modes to solve for r_max = np.max(info.r) @@ -64,16 +66,19 @@ def process(it): r_modes = np.arange(nkr) A = ( - 4.0 * np.pi * r_max**2 / j_1M**2 - * j1(np.outer(jn_zeros(1, max(r_modes)+1)[r_modes], jn_zeros(1, nr)) / j_1M) - / jn(2 ,jn_zeros(1, nr))**2 + 4.0 + * np.pi + * r_max**2 + / j_1M**2 + * j1(np.outer(jn_zeros(1, max(r_modes) + 1)[r_modes], jn_zeros(1, nr)) / j_1M) + / jn(2, jn_zeros(1, nr)) ** 2 ) # No transformation for z B = np.identity(nz) # combine projection arrays -proj = np.einsum('ab,cd->acbd', A, B) +proj = np.einsum("ab,cd->acbd", A, B) results = np.zeros((len(ts.t), nkr, nz), dtype=complex) for ii, it in enumerate(ts.iterations): @@ -83,15 +88,12 @@ def process(it): F_kw = fft.fftshift(fft.fft(results, axis=0), axes=0) dz = info.z[1] - info.z[0] -kz = 2*np.pi*fft.fftshift(fft.fftfreq(F_kw[0].shape[1], dz)) +kz = 2 * np.pi * fft.fftshift(fft.fftfreq(F_kw[0].shape[1], dz)) dt = ts.iterations[1] - ts.iterations[0] -omega = 2*np.pi*fft.fftshift(fft.fftfreq(F_kw.shape[0], sim.dt*dt)) +omega = 2 * np.pi * fft.fftshift(fft.fftfreq(F_kw.shape[0], sim.dt * dt)) # Save data for future plotting purposes -np.savez( - "diags/spectrograms.npz", - F_kw=F_kw, dz=dz, kz=kz, dt=dt, omega=omega -) +np.savez("diags/spectrograms.npz", F_kw=F_kw, dz=dz, kz=kz, dt=dt, omega=omega) # plot the resulting dispersions k = np.linspace(0, 250, 500) @@ -108,59 +110,83 @@ def process(it): ax.set_title(f"m = {m}", fontsize=11) m -= 1 pm1 = ax.pcolormesh( - kz*sim.l_i, omega/sim.w_ci, - abs(F_kw[:, m, :])/np.max(abs(F_kw[:, m, :])), + kz * sim.l_i, + omega / sim.w_ci, + abs(F_kw[:, m, :]) / np.max(abs(F_kw[:, m, :])), norm=colors.LogNorm(vmin=vmin[ii], vmax=vmax), - cmap='inferno' + cmap="inferno", ) cb = fig.colorbar(pm1, ax=ax) - cb.set_label(r'Normalized $E_\theta(k_z, m, \omega)$') + cb.set_label(r"Normalized $E_\theta(k_z, m, \omega)$") # Get dispersion relation - see for example # T. Stix, Waves in Plasmas (American Inst. of Physics, 1992), Chap 6, Sec 2 - nu_m = jn_zeros(1, m+1)[-1] / sim.Lr + nu_m = jn_zeros(1, m + 1)[-1] / sim.Lr R2 = 0.5 * (nu_m**2 * (1.0 + kappa**2) + k**2 * (kappa**2 + 2.0)) P4 = k**2 * (nu_m**2 + k**2) omega_fast = sim.vA * np.sqrt(R2 + np.sqrt(R2**2 - P4)) omega_slow = sim.vA * np.sqrt(R2 - np.sqrt(R2**2 - P4)) # Upper right corner - ax.plot(k*sim.l_i, omega_fast/sim.w_ci, 'w--', label = "$\omega_{fast}$") - ax.plot(k*sim.l_i, omega_slow/sim.w_ci, color='white', linestyle='--', label = "$\omega_{slow}$") + ax.plot(k * sim.l_i, omega_fast / sim.w_ci, "w--", label="$\omega_{fast}$") + ax.plot( + k * sim.l_i, + omega_slow / sim.w_ci, + color="white", + linestyle="--", + label="$\omega_{slow}$", + ) # Thermal resonance - thermal_res = sim.w_ci + 3*sim.v_ti*k - ax.plot(k*sim.l_i, thermal_res/sim.w_ci, color='magenta', linestyle='--', label = "$\omega = \Omega_i + 3v_{th,i}k$") - ax.plot(-k*sim.l_i, thermal_res/sim.w_ci, color='magenta', linestyle='--', label = "") - thermal_res = sim.w_ci - 3*sim.v_ti*k - ax.plot(k*sim.l_i, thermal_res/sim.w_ci, color='magenta', linestyle='--', label = "$\omega = \Omega_i + 3v_{th,i}k$") - ax.plot(-k*sim.l_i, thermal_res/sim.w_ci, color='magenta', linestyle='--', label = "") + thermal_res = sim.w_ci + 3 * sim.v_ti * k + ax.plot( + k * sim.l_i, + thermal_res / sim.w_ci, + color="magenta", + linestyle="--", + label="$\omega = \Omega_i + 3v_{th,i}k$", + ) + ax.plot( + -k * sim.l_i, thermal_res / sim.w_ci, color="magenta", linestyle="--", label="" + ) + thermal_res = sim.w_ci - 3 * sim.v_ti * k + ax.plot( + k * sim.l_i, + thermal_res / sim.w_ci, + color="magenta", + linestyle="--", + label="$\omega = \Omega_i + 3v_{th,i}k$", + ) + ax.plot( + -k * sim.l_i, thermal_res / sim.w_ci, color="magenta", linestyle="--", label="" + ) for ax in axes.flatten(): ax.set_xlim(-1.75, 1.75) ax.set_ylim(0, 1.6) -axes[0, 0].set_ylabel('$\omega/\Omega_{ci}$') -axes[1, 0].set_ylabel('$\omega/\Omega_{ci}$') -axes[1, 0].set_xlabel('$k_zl_i$') -axes[1, 1].set_xlabel('$k_zl_i$') +axes[0, 0].set_ylabel("$\omega/\Omega_{ci}$") +axes[1, 0].set_ylabel("$\omega/\Omega_{ci}$") +axes[1, 0].set_xlabel("$k_zl_i$") +axes[1, 1].set_xlabel("$k_zl_i$") -plt.savefig('normal_modes_disp.png', dpi=600) +plt.savefig("normal_modes_disp.png", dpi=600) if not sim.test: plt.show() else: plt.close() # check if power spectrum sampling match earlier results - amps = np.abs(F_kw[2, 1, len(kz)//2-2:len(kz)//2+2]) + amps = np.abs(F_kw[2, 1, len(kz) // 2 - 2 : len(kz) // 2 + 2]) print("Amplitude sample: ", amps) assert np.allclose( - amps, np.array([ 61.02377286, 19.80026021, 100.47687017, 10.83331295]) + amps, np.array([61.02377286, 19.80026021, 100.47687017, 10.83331295]) ) if sim.test: import os import sys - sys.path.insert(1, '../../../../warpx/Regression/Checksum/') + + sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/PICMI_inputs.py b/Examples/Tests/ohm_solver_ion_Landau_damping/PICMI_inputs.py index d4430502f42..4f7c26bb403 100644 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/PICMI_inputs.py +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/PICMI_inputs.py @@ -20,35 +20,33 @@ comm = mpi.COMM_WORLD -simulation = picmi.Simulation( - warpx_serialize_initial_conditions=True, - verbose=0 -) +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=0) class IonLandauDamping(object): - '''This input is based on the ion Landau damping test as described by + """This input is based on the ion Landau damping test as described by Munoz et al. (2018). - ''' + """ + # Applied field parameters - B0 = 0.1 # Initial magnetic field strength (T) - beta = 2.0 # Plasma beta, used to calculate temperature + B0 = 0.1 # Initial magnetic field strength (T) + beta = 2.0 # Plasma beta, used to calculate temperature # Plasma species parameters - m_ion = 100.0 # Ion mass (electron masses) - vA_over_c = 1e-3 # ratio of Alfven speed and the speed of light + m_ion = 100.0 # Ion mass (electron masses) + vA_over_c = 1e-3 # ratio of Alfven speed and the speed of light # Spatial domain - Nz = 256 # number of cells in z direction - Nx = 4 # number of cells in x (and y) direction for >1 dimensions + Nz = 256 # number of cells in z direction + Nx = 4 # number of cells in x (and y) direction for >1 dimensions # Temporal domain (if not run as a CI test) - LT = 40.0 # Simulation temporal length (ion cyclotron periods) + LT = 40.0 # Simulation temporal length (ion cyclotron periods) # Numerical parameters - NPPC = [8192, 4096, 1024] # Seed number of particles per cell - DZ = 1.0 / 6.0 # Cell size (ion skin depths) - DT = 1e-3 # Time step (ion cyclotron periods) + NPPC = [8192, 4096, 1024] # Seed number of particles per cell + DZ = 1.0 / 6.0 # Cell size (ion skin depths) + DT = 1e-3 # Time step (ion cyclotron periods) # density perturbation strength epsilon = 0.03 @@ -58,7 +56,6 @@ class IonLandauDamping(object): # Number of substeps used to update B substeps = 10 - def __init__(self, test, dim, m, T_ratio, verbose): """Get input parameters for the specific case desired.""" self.test = test @@ -68,7 +65,7 @@ def __init__(self, test, dim, m, T_ratio, verbose): self.verbose = verbose or self.test # sanity check - assert (dim > 0 and dim < 4), f"{dim}-dimensions not a valid input" + assert dim > 0 and dim < 4, f"{dim}-dimensions not a valid input" # calculate various plasma parameters based on the simulation input self.get_plasma_quantities() @@ -77,7 +74,7 @@ def __init__(self, test, dim, m, T_ratio, verbose): self.Lz = self.Nz * self.dz self.Lx = self.Nx * self.dz - diag_period = 1 / 16.0 # Output interval (ion cyclotron periods) + diag_period = 1 / 16.0 # Output interval (ion cyclotron periods) self.diag_steps = int(diag_period / self.DT) self.total_steps = int(np.ceil(self.LT / self.DT)) @@ -85,11 +82,11 @@ def __init__(self, test, dim, m, T_ratio, verbose): if self.test: self.total_steps = 100 - self.dt = self.DT / self.w_ci # self.DT * self.t_ci + self.dt = self.DT / self.w_ci # self.DT * self.t_ci # dump all the current attributes to a dill pickle file if comm.rank == 0: - with open('sim_parameters.dpkl', 'wb') as f: + with open("sim_parameters.dpkl", "wb") as f: dill.dump(self, f) # print out plasma parameters @@ -129,14 +126,12 @@ def get_plasma_quantities(self): # Alfven speed (m/s): vA = B / sqrt(mu0 * n * (M + m)) = c * omega_ci / w_pi self.vA = self.vA_over_c * constants.c - self.n_plasma = ( - (self.B0 / self.vA)**2 / (constants.mu0 * (self.M + constants.m_e)) + self.n_plasma = (self.B0 / self.vA) ** 2 / ( + constants.mu0 * (self.M + constants.m_e) ) # Ion plasma frequency (Hz) - self.w_pi = np.sqrt( - constants.q_e**2 * self.n_plasma / (self.M * constants.ep0) - ) + self.w_pi = np.sqrt(constants.q_e**2 * self.n_plasma / (self.M * constants.ep0)) # Skin depth (m) self.l_i = constants.c / self.w_pi @@ -145,7 +140,7 @@ def get_plasma_quantities(self): self.v_ti = np.sqrt(self.beta / 2.0) * self.vA # Temperature (eV) from thermal speed: v_ti = sqrt(kT / M) - self.T_plasma = self.v_ti**2 * self.M / constants.q_e # eV + self.T_plasma = self.v_ti**2 * self.M / constants.q_e # eV # Larmor radius (m) self.rho_i = self.v_ti / self.w_ci @@ -165,17 +160,17 @@ def setup_run(self): grid_object = picmi.Cartesian3DGrid self.grid = grid_object( - number_of_cells=[self.Nx, self.Nx, self.Nz][-self.dim:], + number_of_cells=[self.Nx, self.Nx, self.Nz][-self.dim :], warpx_max_grid_size=self.Nz, - lower_bound=[-self.Lx/2.0, -self.Lx/2.0, 0][-self.dim:], - upper_bound=[self.Lx/2.0, self.Lx/2.0, self.Lz][-self.dim:], - lower_boundary_conditions=['periodic']*self.dim, - upper_boundary_conditions=['periodic']*self.dim, - warpx_blocking_factor=4 + lower_bound=[-self.Lx / 2.0, -self.Lx / 2.0, 0][-self.dim :], + upper_bound=[self.Lx / 2.0, self.Lx / 2.0, self.Lz][-self.dim :], + lower_boundary_conditions=["periodic"] * self.dim, + upper_boundary_conditions=["periodic"] * self.dim, + warpx_blocking_factor=4, ) simulation.time_step_size = self.dt simulation.max_steps = self.total_steps - simulation.current_deposition_algo = 'direct' + simulation.current_deposition_algo = "direct" simulation.particle_shape = 1 simulation.verbose = self.verbose @@ -184,10 +179,12 @@ def setup_run(self): ####################################################################### self.solver = picmi.HybridPICSolver( - grid=self.grid, gamma=1.0, - Te=self.T_plasma/self.T_ratio, + grid=self.grid, + gamma=1.0, + Te=self.T_plasma / self.T_ratio, n0=self.n_plasma, - plasma_resistivity=self.eta, substeps=self.substeps + plasma_resistivity=self.eta, + substeps=self.substeps, ) simulation.solver = self.solver @@ -195,19 +192,21 @@ def setup_run(self): # Particle types setup # ####################################################################### - k_m = 2.0*np.pi*self.m / self.Lz + k_m = 2.0 * np.pi * self.m / self.Lz self.ions = picmi.Species( - name='ions', charge='q_e', mass=self.M, + name="ions", + charge="q_e", + mass=self.M, initial_distribution=picmi.AnalyticDistribution( density_expression=f"{self.n_plasma}*(1+{self.epsilon}*cos({k_m}*z))", - rms_velocity=[self.v_ti]*3 - ) + rms_velocity=[self.v_ti] * 3, + ), ) simulation.add_species( self.ions, layout=picmi.PseudoRandomLayout( - grid=self.grid, n_macroparticles_per_cell=self.NPPC[self.dim-1] - ) + grid=self.grid, n_macroparticles_per_cell=self.NPPC[self.dim - 1] + ), ) ####################################################################### @@ -218,25 +217,25 @@ def setup_run(self): if self.test: particle_diag = picmi.ParticleDiagnostic( - name='diag1', + name="diag1", period=100, - write_dir='.', + write_dir=".", species=[self.ions], - data_list = ['ux', 'uy', 'uz', 'x', 'z', 'weighting'], - warpx_file_prefix=f'Python_ohms_law_solver_landau_damping_{self.dim}d_plt', + data_list=["ux", "uy", "uz", "x", "z", "weighting"], + warpx_file_prefix=f"Python_ohms_law_solver_landau_damping_{self.dim}d_plt", ) simulation.add_diagnostic(particle_diag) field_diag = picmi.FieldDiagnostic( - name='diag1', + name="diag1", grid=self.grid, period=100, - write_dir='.', - data_list = ['Bx', 'By', 'Bz', 'Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz'], - warpx_file_prefix=f'Python_ohms_law_solver_landau_damping_{self.dim}d_plt', + write_dir=".", + data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], + warpx_file_prefix=f"Python_ohms_law_solver_landau_damping_{self.dim}d_plt", ) simulation.add_diagnostic(field_diag) - self.output_file_name = 'field_data.txt' + self.output_file_name = "field_data.txt" # install a custom "reduced diagnostic" to save the average field callbacks.installafterEsolve(self._record_average_fields) try: @@ -244,7 +243,7 @@ def setup_run(self): except OSError: # diags directory already exists pass - with open(f"diags/{self.output_file_name}", 'w') as f: + with open(f"diags/{self.output_file_name}", "w") as f: f.write("[0]step() [1]time(s) [2]z_coord(m) [3]Ez_lev0-(V/m)\n") self.prev_time = time.time() @@ -259,9 +258,7 @@ def setup_run(self): simulation.initialize_warpx() # get ion particle container wrapper - self.ion_part_container = particle_containers.ParticleContainerWrapper( - 'ions' - ) + self.ion_part_container = particle_containers.ParticleContainerWrapper("ions") def text_diag(self): """Diagnostic function to print out timing data and particle numbers.""" @@ -275,12 +272,12 @@ def text_diag(self): step_rate = steps / wall_time status_dict = { - 'step': step, - 'nplive ions': self.ion_part_container.nps, - 'wall_time': wall_time, - 'step_rate': step_rate, + "step": step, + "nplive ions": self.ion_part_container.nps, + "wall_time": wall_time, + "step_rate": step_rate, "diag_steps": self.diag_steps, - 'iproc': None + "iproc": None, } diag_string = ( @@ -321,11 +318,9 @@ def _record_average_fields(self): else: Ez = np.mean(Ez_warpx, axis=(0, 1)) - with open(f"diags/{self.output_file_name}", 'a') as f: + with open(f"diags/{self.output_file_name}", "a") as f: for ii in range(self.Nz): - f.write( - f"{step:05d} {t:.10e} {z_vals[ii]:.10e} {Ez[ii]:+.10e}\n" - ) + f.write(f"{step:05d} {t:.10e} {z_vals[ii]:.10e} {Ez[ii]:+.10e}\n") ########################## @@ -334,29 +329,38 @@ def _record_average_fields(self): parser = argparse.ArgumentParser() parser.add_argument( - '-t', '--test', help='toggle whether this script is run as a short CI test', - action='store_true', + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", ) parser.add_argument( - '-d', '--dim', help='Simulation dimension', required=False, type=int, - default=1 + "-d", "--dim", help="Simulation dimension", required=False, type=int, default=1 ) parser.add_argument( - '-m', help='Mode number to excite', required=False, type=int, - default=4 + "-m", help="Mode number to excite", required=False, type=int, default=4 ) parser.add_argument( - '--temp_ratio', help='Ratio of ion to electron temperature', required=False, - type=float, default=1.0/3 + "--temp_ratio", + help="Ratio of ion to electron temperature", + required=False, + type=float, + default=1.0 / 3, ) parser.add_argument( - '-v', '--verbose', help='Verbose output', action='store_true', + "-v", + "--verbose", + help="Verbose output", + action="store_true", ) args, left = parser.parse_known_args() -sys.argv = sys.argv[:1]+left +sys.argv = sys.argv[:1] + left run = IonLandauDamping( - test=args.test, dim=args.dim, m=args.m, T_ratio=args.temp_ratio, - verbose=args.verbose + test=args.test, + dim=args.dim, + m=args.m, + T_ratio=args.temp_ratio, + verbose=args.verbose, ) simulation.step() diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py index 68e231c6c55..700ad68fe87 100755 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py @@ -11,39 +11,56 @@ constants = picmi.constants -matplotlib.rcParams.update({'font.size': 20}) +matplotlib.rcParams.update({"font.size": 20}) # load simulation parameters -with open('sim_parameters.dpkl', 'rb') as f: +with open("sim_parameters.dpkl", "rb") as f: sim = dill.load(f) # theoretical damping rates were taken from Fig. 14b of Munoz et al. -theoretical_damping_rate = np.array([ - [0.09456706, 0.05113443], [0.09864177, 0.05847507], - [0.10339559, 0.0659153 ], [0.10747029, 0.07359366], - [0.11290323, 0.08256106], [0.11833616, 0.09262114], - [0.12580645, 0.10541121], [0.13327674, 0.11825558], - [0.14006791, 0.13203098], [0.14889643, 0.14600538], - [0.15772496, 0.16379615], [0.16791171, 0.18026693], - [0.17606112, 0.19650209], [0.18828523, 0.21522808], - [0.19983022, 0.23349062], [0.21273345, 0.25209216], - [0.22835314, 0.27877403], [0.24465195, 0.30098317], - [0.25959253, 0.32186286], [0.27657046, 0.34254601], - [0.29626486, 0.36983567], [0.3139219 , 0.38984826], - [0.33157895, 0.40897973], [0.35195246, 0.43526107], - [0.37368421, 0.45662113], [0.39745331, 0.47902942], - [0.44974533, 0.52973074], [0.50747029, 0.57743925], - [0.57334465, 0.63246726], [0.64193548, 0.67634255] -]) +theoretical_damping_rate = np.array( + [ + [0.09456706, 0.05113443], + [0.09864177, 0.05847507], + [0.10339559, 0.0659153], + [0.10747029, 0.07359366], + [0.11290323, 0.08256106], + [0.11833616, 0.09262114], + [0.12580645, 0.10541121], + [0.13327674, 0.11825558], + [0.14006791, 0.13203098], + [0.14889643, 0.14600538], + [0.15772496, 0.16379615], + [0.16791171, 0.18026693], + [0.17606112, 0.19650209], + [0.18828523, 0.21522808], + [0.19983022, 0.23349062], + [0.21273345, 0.25209216], + [0.22835314, 0.27877403], + [0.24465195, 0.30098317], + [0.25959253, 0.32186286], + [0.27657046, 0.34254601], + [0.29626486, 0.36983567], + [0.3139219, 0.38984826], + [0.33157895, 0.40897973], + [0.35195246, 0.43526107], + [0.37368421, 0.45662113], + [0.39745331, 0.47902942], + [0.44974533, 0.52973074], + [0.50747029, 0.57743925], + [0.57334465, 0.63246726], + [0.64193548, 0.67634255], + ] +) expected_gamma = np.interp( sim.T_ratio, theoretical_damping_rate[:, 0], theoretical_damping_rate[:, 1] ) data = np.loadtxt("diags/field_data.txt", skiprows=1) -field_idx_dict = {'z': 2, 'Ez': 3} +field_idx_dict = {"z": 2, "Ez": 3} -step = data[:,0] +step = data[:, 0] num_steps = len(np.unique(step)) @@ -51,16 +68,16 @@ resolution = len(np.where(step == 0)[0]) - 1 # reshape to separate spatial and time coordinates -sim_data = data.reshape((num_steps, resolution+1, data.shape[1])) +sim_data = data.reshape((num_steps, resolution + 1, data.shape[1])) -z_grid = sim_data[1, :, field_idx_dict['z']] +z_grid = sim_data[1, :, field_idx_dict["z"]] idx = np.argsort(z_grid)[1:] dz = np.mean(np.diff(z_grid[idx])) -dt = np.mean(np.diff(sim_data[:,0,1])) +dt = np.mean(np.diff(sim_data[:, 0, 1])) data = np.zeros((num_steps, resolution)) for i in range(num_steps): - data[i,:] = sim_data[i,idx,field_idx_dict['Ez']] + data[i, :] = sim_data[i, idx, field_idx_dict["Ez"]] print(f"Data file contains {num_steps} time snapshots.") print(f"Spatial resolution is {resolution}") @@ -72,23 +89,23 @@ # Plot the 4th Fourier mode fig, ax1 = plt.subplots(1, 1, figsize=(10, 5)) -t_points = np.arange(num_steps)*dt*t_norm +t_points = np.arange(num_steps) * dt * t_norm ax1.plot( - t_points, np.abs(field_kt[:, sim.m] / field_kt[0, sim.m]), 'r', - label=f'$T_i/T_e$ = {sim.T_ratio:.2f}' + t_points, + np.abs(field_kt[:, sim.m] / field_kt[0, sim.m]), + "r", + label=f"$T_i/T_e$ = {sim.T_ratio:.2f}", ) # Plot a line showing the expected damping rate t_points = t_points[np.where(t_points < 8)] -ax1.plot( - t_points, np.exp(-t_points*expected_gamma), 'k--', lw=2 -) +ax1.plot(t_points, np.exp(-t_points * expected_gamma), "k--", lw=2) ax1.grid() ax1.legend() -ax1.set_yscale('log') -ax1.set_ylabel('$|E_z|/E_0$') -ax1.set_xlabel('t $(k_mv_{th,i})$') +ax1.set_yscale("log") +ax1.set_ylabel("$|E_z|/E_0$") +ax1.set_xlabel("t $(k_mv_{th,i})$") ax1.set_xlim(0, 18) ax1.set_title(f"Ion Landau damping - {sim.dim}d") @@ -98,7 +115,8 @@ if sim.test: import os import sys - sys.path.insert(1, '../../../../warpx/Regression/Checksum/') + + sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/PICMI_inputs.py b/Examples/Tests/ohm_solver_ion_beam_instability/PICMI_inputs.py index 4f8b5edcc3e..2558d70b4b8 100644 --- a/Examples/Tests/ohm_solver_ion_beam_instability/PICMI_inputs.py +++ b/Examples/Tests/ohm_solver_ion_beam_instability/PICMI_inputs.py @@ -21,35 +21,33 @@ comm = mpi.COMM_WORLD -simulation = picmi.Simulation( - warpx_serialize_initial_conditions=True, - verbose=0 -) +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=0) class HybridPICBeamInstability(object): - '''This input is based on the ion beam R instability test as described by + """This input is based on the ion beam R instability test as described by Munoz et al. (2018). - ''' + """ + # Applied field parameters - B0 = 0.25 # Initial magnetic field strength (T) - beta = 1.0 # Plasma beta, used to calculate temperature + B0 = 0.25 # Initial magnetic field strength (T) + beta = 1.0 # Plasma beta, used to calculate temperature # Plasma species parameters - m_ion = 100.0 # Ion mass (electron masses) - vA_over_c = 1e-4 # ratio of Alfven speed and the speed of light + m_ion = 100.0 # Ion mass (electron masses) + vA_over_c = 1e-4 # ratio of Alfven speed and the speed of light # Spatial domain - Nz = 1024 # number of cells in z direction - Nx = 8 # number of cells in x (and y) direction for >1 dimensions + Nz = 1024 # number of cells in z direction + Nx = 8 # number of cells in x (and y) direction for >1 dimensions # Temporal domain (if not run as a CI test) - LT = 120.0 # Simulation temporal length (ion cyclotron periods) + LT = 120.0 # Simulation temporal length (ion cyclotron periods) # Numerical parameters - NPPC = [1024, 256, 64] # Seed number of particles per cell - DZ = 1.0 / 4.0 # Cell size (ion skin depths) - DT = 0.01 # Time step (ion cyclotron periods) + NPPC = [1024, 256, 64] # Seed number of particles per cell + DZ = 1.0 / 4.0 # Cell size (ion skin depths) + DT = 0.01 # Time step (ion cyclotron periods) # Plasma resistivity - used to dampen the mode excitation eta = 1e-7 @@ -58,7 +56,7 @@ class HybridPICBeamInstability(object): # Beam parameters n_beam = [0.02, 0.1] - U_bc = 10.0 # relative drifts between beam and core in Alfven speeds + U_bc = 10.0 # relative drifts between beam and core in Alfven speeds def __init__(self, test, dim, resonant, verbose): """Get input parameters for the specific case desired.""" @@ -68,7 +66,7 @@ def __init__(self, test, dim, resonant, verbose): self.verbose = verbose or self.test # sanity check - assert (dim > 0 and dim < 4), f"{dim}-dimensions not a valid input" + assert dim > 0 and dim < 4, f"{dim}-dimensions not a valid input" # calculate various plasma parameters based on the simulation input self.get_plasma_quantities() @@ -92,7 +90,7 @@ def __init__(self, test, dim, resonant, verbose): self.volume = self.Lz self.N_cells = self.Nz - diag_period = 1 / 4.0 # Output interval (ion cyclotron periods) + diag_period = 1 / 4.0 # Output interval (ion cyclotron periods) self.diag_steps = int(diag_period / self.DT) # if this is a test case run for only 25 cyclotron periods @@ -105,7 +103,7 @@ def __init__(self, test, dim, resonant, verbose): # dump all the current attributes to a dill pickle file if comm.rank == 0: - with open('sim_parameters.dpkl', 'wb') as f: + with open("sim_parameters.dpkl", "wb") as f: dill.dump(self, f) # print out plasma parameters @@ -145,14 +143,12 @@ def get_plasma_quantities(self): # Alfven speed (m/s): vA = B / sqrt(mu0 * n * (M + m)) = c * omega_ci / w_pi self.vA = self.vA_over_c * constants.c - self.n_plasma = ( - (self.B0 / self.vA)**2 / (constants.mu0 * (self.M + constants.m_e)) + self.n_plasma = (self.B0 / self.vA) ** 2 / ( + constants.mu0 * (self.M + constants.m_e) ) # Ion plasma frequency (Hz) - self.w_pi = np.sqrt( - constants.q_e**2 * self.n_plasma / (self.M * constants.ep0) - ) + self.w_pi = np.sqrt(constants.q_e**2 * self.n_plasma / (self.M * constants.ep0)) # Skin depth (m) self.l_i = constants.c / self.w_pi @@ -161,7 +157,7 @@ def get_plasma_quantities(self): self.v_ti = np.sqrt(self.beta / 2.0) * self.vA # Temperature (eV) from thermal speed: v_ti = sqrt(kT / M) - self.T_plasma = self.v_ti**2 * self.M / constants.q_e # eV + self.T_plasma = self.v_ti**2 * self.M / constants.q_e # eV # Larmor radius (m) self.rho_i = self.v_ti / self.w_ci @@ -181,16 +177,16 @@ def setup_run(self): grid_object = picmi.Cartesian3DGrid self.grid = grid_object( - number_of_cells=[self.Nx, self.Nx, self.Nz][-self.dim:], + number_of_cells=[self.Nx, self.Nx, self.Nz][-self.dim :], warpx_max_grid_size=self.Nz, - lower_bound=[-self.Lx/2.0, -self.Lx/2.0, 0][-self.dim:], - upper_bound=[self.Lx/2.0, self.Lx/2.0, self.Lz][-self.dim:], - lower_boundary_conditions=['periodic']*self.dim, - upper_boundary_conditions=['periodic']*self.dim + lower_bound=[-self.Lx / 2.0, -self.Lx / 2.0, 0][-self.dim :], + upper_bound=[self.Lx / 2.0, self.Lx / 2.0, self.Lz][-self.dim :], + lower_boundary_conditions=["periodic"] * self.dim, + upper_boundary_conditions=["periodic"] * self.dim, ) simulation.time_step_size = self.dt simulation.max_steps = self.total_steps - simulation.current_deposition_algo = 'direct' + simulation.current_deposition_algo = "direct" simulation.particle_shape = 1 simulation.verbose = self.verbose @@ -199,17 +195,17 @@ def setup_run(self): ####################################################################### self.solver = picmi.HybridPICSolver( - grid=self.grid, gamma=1.0, - Te=self.T_plasma/10.0, - n0=self.n_plasma+self.n_beam, - plasma_resistivity=self.eta, substeps=self.substeps + grid=self.grid, + gamma=1.0, + Te=self.T_plasma / 10.0, + n0=self.n_plasma + self.n_beam, + plasma_resistivity=self.eta, + substeps=self.substeps, ) simulation.solver = self.solver B_ext = picmi.AnalyticInitialField( - Bx_expression=0.0, - By_expression=0.0, - Bz_expression=self.B0 + Bx_expression=0.0, By_expression=0.0, Bz_expression=self.B0 ) simulation.add_applied_field(B_ext) @@ -218,33 +214,36 @@ def setup_run(self): ####################################################################### self.ions = picmi.Species( - name='ions', charge='q_e', mass=self.M, + name="ions", + charge="q_e", + mass=self.M, initial_distribution=picmi.UniformDistribution( density=self.n_plasma, - rms_velocity=[self.v_ti]*3, - directed_velocity=[0, 0, self.u_c] - ) + rms_velocity=[self.v_ti] * 3, + directed_velocity=[0, 0, self.u_c], + ), ) simulation.add_species( self.ions, layout=picmi.PseudoRandomLayout( - grid=self.grid, n_macroparticles_per_cell=self.NPPC[self.dim-1] - ) + grid=self.grid, n_macroparticles_per_cell=self.NPPC[self.dim - 1] + ), ) self.beam_ions = picmi.Species( - name='beam_ions', charge='q_e', mass=self.M, + name="beam_ions", + charge="q_e", + mass=self.M, initial_distribution=picmi.UniformDistribution( density=self.n_beam, - rms_velocity=[self.v_ti]*3, - directed_velocity=[0, 0, self.u_beam] - ) + rms_velocity=[self.v_ti] * 3, + directed_velocity=[0, 0, self.u_beam], + ), ) simulation.add_species( self.beam_ions, layout=picmi.PseudoRandomLayout( - grid=self.grid, - n_macroparticles_per_cell=self.NPPC[self.dim-1]/2 - ) + grid=self.grid, n_macroparticles_per_cell=self.NPPC[self.dim - 1] / 2 + ), ) ####################################################################### @@ -256,48 +255,48 @@ def setup_run(self): if self.test: part_diag = picmi.ParticleDiagnostic( - name='diag1', + name="diag1", period=1250, species=[self.ions, self.beam_ions], - data_list = ['ux', 'uy', 'uz', 'z', 'weighting'], - write_dir='.', - warpx_file_prefix='Python_ohms_law_solver_ion_beam_1d_plt', + data_list=["ux", "uy", "uz", "z", "weighting"], + write_dir=".", + warpx_file_prefix="Python_ohms_law_solver_ion_beam_1d_plt", ) simulation.add_diagnostic(part_diag) field_diag = picmi.FieldDiagnostic( - name='diag1', + name="diag1", grid=self.grid, period=1250, - data_list = ['Bx', 'By', 'Bz', 'Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz'], - write_dir='.', - warpx_file_prefix='Python_ohms_law_solver_ion_beam_1d_plt', + data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], + write_dir=".", + warpx_file_prefix="Python_ohms_law_solver_ion_beam_1d_plt", ) simulation.add_diagnostic(field_diag) # output the full particle data at t*w_ci = 40 step = int(40.0 / self.DT) parts_diag = picmi.ParticleDiagnostic( - name='parts_diag', + name="parts_diag", period=f"{step}:{step}", species=[self.ions, self.beam_ions], - write_dir='diags', - warpx_file_prefix='Python_hybrid_PIC_plt', - warpx_format = 'openpmd', - warpx_openpmd_backend = 'h5' + write_dir="diags", + warpx_file_prefix="Python_hybrid_PIC_plt", + warpx_format="openpmd", + warpx_openpmd_backend="h5", ) simulation.add_diagnostic(parts_diag) - self.output_file_name = 'field_data.txt' + self.output_file_name = "field_data.txt" if self.dim == 1: line_diag = picmi.ReducedDiagnostic( - diag_type='FieldProbe', - probe_geometry='Line', + diag_type="FieldProbe", + probe_geometry="Line", z_probe=0, z1_probe=self.Lz, resolution=self.Nz - 1, name=self.output_file_name[:-4], period=self.diag_steps, - path='diags/' + path="diags/", ) simulation.add_diagnostic(line_diag) else: @@ -308,10 +307,9 @@ def setup_run(self): except OSError: # diags directory already exists pass - with open(f"diags/{self.output_file_name}", 'w') as f: + with open(f"diags/{self.output_file_name}", "w") as f: f.write("[0]step() [1]time(s) [2]z_coord(m) [3]By_lev0-(T)\n") - ####################################################################### # Initialize simulation # ####################################################################### @@ -335,7 +333,7 @@ def _create_data_arrays(self): if libwarpx.amr.ParallelDescriptor.MyProc() == 0: # allocate arrays for storing energy values - self.energy_vals = np.zeros((self.total_steps//self.diag_steps, 4)) + self.energy_vals = np.zeros((self.total_steps // self.diag_steps, 4)) def text_diag(self): """Diagnostic function to print out timing data and particle numbers.""" @@ -352,13 +350,13 @@ def text_diag(self): step_rate = steps / wall_time status_dict = { - 'step': step, - 'nplive beam ions': self.ion_container_wrapper.nps, - 'nplive ions': self.beam_ion_container_wrapper.nps, - 'wall_time': wall_time, - 'step_rate': step_rate, + "step": step, + "nplive beam ions": self.ion_container_wrapper.nps, + "nplive ions": self.beam_ion_container_wrapper.nps, + "wall_time": wall_time, + "step_rate": step_rate, "diag_steps": self.diag_steps, - 'iproc': None + "iproc": None, } diag_string = ( @@ -401,7 +399,7 @@ def energy_diagnostic(self): self.energy_vals[idx, 3] = Eb_perp if step == self.total_steps: - np.save('diags/energies.npy', run.energy_vals) + np.save("diags/energies.npy", run.energy_vals) def _get_kinetic_energy(self, container_wrapper): """Utility function to retrieve the total kinetic energy in the @@ -445,11 +443,9 @@ def _record_average_fields(self): else: By = np.mean(By_warpx[:-1], axis=(0, 1)) - with open(f"diags/{self.output_file_name}", 'a') as f: + with open(f"diags/{self.output_file_name}", "a") as f: for ii in range(self.Nz): - f.write( - f"{step:05d} {t:.10e} {z_vals[ii]:.10e} {By[ii]:+.10e}\n" - ) + f.write(f"{step:05d} {t:.10e} {z_vals[ii]:.10e} {By[ii]:+.10e}\n") ########################## @@ -458,22 +454,29 @@ def _record_average_fields(self): parser = argparse.ArgumentParser() parser.add_argument( - '-t', '--test', help='toggle whether this script is run as a short CI test', - action='store_true', + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", ) parser.add_argument( - '-d', '--dim', help='Simulation dimension', required=False, type=int, - default=1 + "-d", "--dim", help="Simulation dimension", required=False, type=int, default=1 ) parser.add_argument( - '-r', '--resonant', help='Run the resonant case', required=False, - action='store_true', + "-r", + "--resonant", + help="Run the resonant case", + required=False, + action="store_true", ) parser.add_argument( - '-v', '--verbose', help='Verbose output', action='store_true', + "-v", + "--verbose", + help="Verbose output", + action="store_true", ) args, left = parser.parse_known_args() -sys.argv = sys.argv[:1]+left +sys.argv = sys.argv[:1] + left run = HybridPICBeamInstability( test=args.test, dim=args.dim, resonant=args.resonant, verbose=args.verbose diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py b/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py index 7fd6746eafe..5bd9db3d91d 100755 --- a/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py +++ b/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py @@ -12,24 +12,24 @@ constants = picmi.constants -matplotlib.rcParams.update({'font.size': 20}) +matplotlib.rcParams.update({"font.size": 20}) # load simulation parameters -with open('sim_parameters.dpkl', 'rb') as f: +with open("sim_parameters.dpkl", "rb") as f: sim = dill.load(f) if sim.resonant: - resonant_str = 'resonant' + resonant_str = "resonant" else: - resonant_str = 'non resonant' + resonant_str = "non resonant" data = np.loadtxt("diags/field_data.txt", skiprows=1) if sim.dim == 1: - field_idx_dict = {'z': 4, 'By': 8} + field_idx_dict = {"z": 4, "By": 8} else: - field_idx_dict = {'z': 2, 'By': 3} + field_idx_dict = {"z": 2, "By": 3} -step = data[:,0] +step = data[:, 0] num_steps = len(np.unique(step)) @@ -37,16 +37,16 @@ resolution = len(np.where(step == 0)[0]) - 1 # reshape to separate spatial and time coordinates -sim_data = data.reshape((num_steps, resolution+1, data.shape[1])) +sim_data = data.reshape((num_steps, resolution + 1, data.shape[1])) -z_grid = sim_data[1, :, field_idx_dict['z']] +z_grid = sim_data[1, :, field_idx_dict["z"]] idx = np.argsort(z_grid)[1:] dz = np.mean(np.diff(z_grid[idx])) -dt = np.mean(np.diff(sim_data[:,0,1])) +dt = np.mean(np.diff(sim_data[:, 0, 1])) data = np.zeros((num_steps, resolution)) for i in range(num_steps): - data[i,:] = sim_data[i,idx,field_idx_dict['By']] + data[i, :] = sim_data[i, idx, field_idx_dict["By"]] print(f"Data file contains {num_steps} time snapshots.") print(f"Spatial resolution is {resolution}") @@ -54,36 +54,48 @@ # Create the stack time plot fig, ax1 = plt.subplots(1, 1, figsize=(10, 5)) -max_val = np.max(np.abs(data[:,:]/sim.B0)) +max_val = np.max(np.abs(data[:, :] / sim.B0)) -extent = [0, sim.Lz/sim.l_i, 0, num_steps*dt*sim.w_ci] # num_steps*dt/sim.t_ci] +extent = [0, sim.Lz / sim.l_i, 0, num_steps * dt * sim.w_ci] # num_steps*dt/sim.t_ci] im = ax1.imshow( - data[:,:]/sim.B0, extent=extent, origin='lower', - cmap='seismic', vmin=-max_val, vmax=max_val, aspect="equal", + data[:, :] / sim.B0, + extent=extent, + origin="lower", + cmap="seismic", + vmin=-max_val, + vmax=max_val, + aspect="equal", ) # Colorbar fig.subplots_adjust(right=0.825) cbar_ax = fig.add_axes([0.85, 0.2, 0.03, 0.6]) -fig.colorbar(im, cax=cbar_ax, orientation='vertical', label='$B_y/B_0$') +fig.colorbar(im, cax=cbar_ax, orientation="vertical", label="$B_y/B_0$") ax1.set_xlabel("$x/l_i$") ax1.set_ylabel("$t \Omega_i$ (rad)") ax1.set_title(f"Ion beam R instability - {resonant_str} case") -plt.savefig(f"diags/ion_beam_R_instability_{resonant_str}_eta_{sim.eta}_substeps_{sim.substeps}.png") +plt.savefig( + f"diags/ion_beam_R_instability_{resonant_str}_eta_{sim.eta}_substeps_{sim.substeps}.png" +) plt.close() if sim.resonant: - # Plot the 4th, 5th and 6th Fourier modes field_kt = np.fft.fft(data[:, :], axis=1) - k = 2*np.pi * np.fft.fftfreq(resolution, dz) * sim.l_i + k = 2 * np.pi * np.fft.fftfreq(resolution, dz) * sim.l_i - t_grid = np.arange(num_steps)*dt*sim.w_ci - plt.plot(t_grid, np.abs(field_kt[:, 4] / sim.B0), 'r', label=f'm = 4, $kl_i={k[4]:.2f}$') - plt.plot(t_grid, np.abs(field_kt[:, 5] / sim.B0), 'b', label=f'm = 5, $kl_i={k[5]:.2f}$') - plt.plot(t_grid, np.abs(field_kt[:, 6] / sim.B0), 'k', label=f'm = 6, $kl_i={k[6]:.2f}$') + t_grid = np.arange(num_steps) * dt * sim.w_ci + plt.plot( + t_grid, np.abs(field_kt[:, 4] / sim.B0), "r", label=f"m = 4, $kl_i={k[4]:.2f}$" + ) + plt.plot( + t_grid, np.abs(field_kt[:, 5] / sim.B0), "b", label=f"m = 5, $kl_i={k[5]:.2f}$" + ) + plt.plot( + t_grid, np.abs(field_kt[:, 6] / sim.B0), "k", label=f"m = 6, $kl_i={k[6]:.2f}$" + ) # The theoretical growth rates for the 4th, 5th and 6th Fourier modes of # the By-field was obtained from Fig. 12a of Munoz et al. @@ -97,94 +109,117 @@ idx = np.where((t_grid > 10) & (t_grid < 40)) t_points = t_grid[idx] - A4 = np.exp(np.mean(np.log(np.abs(field_kt[idx, 4] / sim.B0)) - t_points*gamma4)) - plt.plot(t_points, A4*np.exp(t_points*gamma4), 'r--', lw=3) - A5 = np.exp(np.mean(np.log(np.abs(field_kt[idx, 5] / sim.B0)) - t_points*gamma5)) - plt.plot(t_points, A5*np.exp(t_points*gamma5), 'b--', lw=3) - A6 = np.exp(np.mean(np.log(np.abs(field_kt[idx, 6] / sim.B0)) - t_points*gamma6)) - plt.plot(t_points, A6*np.exp(t_points*gamma6), 'k--', lw=3) + A4 = np.exp(np.mean(np.log(np.abs(field_kt[idx, 4] / sim.B0)) - t_points * gamma4)) + plt.plot(t_points, A4 * np.exp(t_points * gamma4), "r--", lw=3) + A5 = np.exp(np.mean(np.log(np.abs(field_kt[idx, 5] / sim.B0)) - t_points * gamma5)) + plt.plot(t_points, A5 * np.exp(t_points * gamma5), "b--", lw=3) + A6 = np.exp(np.mean(np.log(np.abs(field_kt[idx, 6] / sim.B0)) - t_points * gamma6)) + plt.plot(t_points, A6 * np.exp(t_points * gamma6), "k--", lw=3) plt.grid() plt.legend() - plt.yscale('log') - plt.ylabel('$|B_y/B_0|$') - plt.xlabel('$t\Omega_i$ (rad)') + plt.yscale("log") + plt.ylabel("$|B_y/B_0|$") + plt.xlabel("$t\Omega_i$ (rad)") plt.tight_layout() - plt.savefig(f"diags/ion_beam_R_instability_{resonant_str}_eta_{sim.eta}_substeps_{sim.substeps}_low_modes.png") + plt.savefig( + f"diags/ion_beam_R_instability_{resonant_str}_eta_{sim.eta}_substeps_{sim.substeps}_low_modes.png" + ) plt.close() # check if the growth rate matches expectation - m4_rms_error = np.sqrt(np.mean( - (np.abs(field_kt[idx, 4] / sim.B0) - A4*np.exp(t_points*gamma4))**2 - )) - m5_rms_error = np.sqrt(np.mean( - (np.abs(field_kt[idx, 5] / sim.B0) - A5*np.exp(t_points*gamma5))**2 - )) - m6_rms_error = np.sqrt(np.mean( - (np.abs(field_kt[idx, 6] / sim.B0) - A6*np.exp(t_points*gamma6))**2 - )) + m4_rms_error = np.sqrt( + np.mean( + (np.abs(field_kt[idx, 4] / sim.B0) - A4 * np.exp(t_points * gamma4)) ** 2 + ) + ) + m5_rms_error = np.sqrt( + np.mean( + (np.abs(field_kt[idx, 5] / sim.B0) - A5 * np.exp(t_points * gamma5)) ** 2 + ) + ) + m6_rms_error = np.sqrt( + np.mean( + (np.abs(field_kt[idx, 6] / sim.B0) - A6 * np.exp(t_points * gamma6)) ** 2 + ) + ) print("Growth rate RMS errors:") print(f" m = 4: {m4_rms_error:.3e}") print(f" m = 5: {m5_rms_error:.3e}") print(f" m = 6: {m6_rms_error:.3e}") if not sim.test: - with h5py.File('diags/Python_hybrid_PIC_plt/openpmd_004000.h5', 'r') as data: + with h5py.File("diags/Python_hybrid_PIC_plt/openpmd_004000.h5", "r") as data: + timestep = str(np.squeeze([key for key in data["data"].keys()])) - timestep = str(np.squeeze([key for key in data['data'].keys()])) - - z = np.array(data['data'][timestep]['particles']['ions']['position']['z']) - vy = np.array(data['data'][timestep]['particles']['ions']['momentum']['y']) - w = np.array(data['data'][timestep]['particles']['ions']['weighting']) + z = np.array(data["data"][timestep]["particles"]["ions"]["position"]["z"]) + vy = np.array(data["data"][timestep]["particles"]["ions"]["momentum"]["y"]) + w = np.array(data["data"][timestep]["particles"]["ions"]["weighting"]) fig, ax1 = plt.subplots(1, 1, figsize=(10, 5)) im = ax1.hist2d( - z/sim.l_i, vy/sim.M/sim.vA, weights=w, density=True, - range=[[0, 250], [-10, 10]], bins=250, cmin=1e-5 + z / sim.l_i, + vy / sim.M / sim.vA, + weights=w, + density=True, + range=[[0, 250], [-10, 10]], + bins=250, + cmin=1e-5, ) # Colorbar fig.subplots_adjust(bottom=0.15, right=0.815) cbar_ax = fig.add_axes([0.83, 0.2, 0.03, 0.6]) - fig.colorbar(im[3], cax=cbar_ax, orientation='vertical', format='%.0e', label='$f(z, v_y)$') + fig.colorbar( + im[3], cax=cbar_ax, orientation="vertical", format="%.0e", label="$f(z, v_y)$" + ) ax1.set_xlabel("$x/l_i$") ax1.set_ylabel("$v_{y}/v_A$") ax1.set_title(f"Ion beam R instability - {resonant_str} case") - plt.savefig(f"diags/ion_beam_R_instability_{resonant_str}_eta_{sim.eta}_substeps_{sim.substeps}_core_phase_space.png") + plt.savefig( + f"diags/ion_beam_R_instability_{resonant_str}_eta_{sim.eta}_substeps_{sim.substeps}_core_phase_space.png" + ) plt.close() - with h5py.File('diags/Python_hybrid_PIC_plt/openpmd_004000.h5', 'r') as data: - - timestep = str(np.squeeze([key for key in data['data'].keys()])) + with h5py.File("diags/Python_hybrid_PIC_plt/openpmd_004000.h5", "r") as data: + timestep = str(np.squeeze([key for key in data["data"].keys()])) - z = np.array(data['data'][timestep]['particles']['beam_ions']['position']['z']) - vy = np.array(data['data'][timestep]['particles']['beam_ions']['momentum']['y']) - w = np.array(data['data'][timestep]['particles']['beam_ions']['weighting']) + z = np.array(data["data"][timestep]["particles"]["beam_ions"]["position"]["z"]) + vy = np.array(data["data"][timestep]["particles"]["beam_ions"]["momentum"]["y"]) + w = np.array(data["data"][timestep]["particles"]["beam_ions"]["weighting"]) fig, ax1 = plt.subplots(1, 1, figsize=(10, 5)) im = ax1.hist2d( - z/sim.l_i, vy/sim.M/sim.vA, weights=w, density=True, - range=[[0, 250], [-10, 10]], bins=250, cmin=1e-5 + z / sim.l_i, + vy / sim.M / sim.vA, + weights=w, + density=True, + range=[[0, 250], [-10, 10]], + bins=250, + cmin=1e-5, ) # Colorbar fig.subplots_adjust(bottom=0.15, right=0.815) cbar_ax = fig.add_axes([0.83, 0.2, 0.03, 0.6]) - fig.colorbar(im[3], cax=cbar_ax, orientation='vertical', format='%.0e', label='$f(z, v_y)$') + fig.colorbar( + im[3], cax=cbar_ax, orientation="vertical", format="%.0e", label="$f(z, v_y)$" + ) ax1.set_xlabel("$x/l_i$") ax1.set_ylabel("$v_{y}/v_A$") ax1.set_title(f"Ion beam R instability - {resonant_str} case") - plt.savefig(f"diags/ion_beam_R_instability_{resonant_str}_eta_{sim.eta}_substeps_{sim.substeps}_beam_phase_space.png") + plt.savefig( + f"diags/ion_beam_R_instability_{resonant_str}_eta_{sim.eta}_substeps_{sim.substeps}_beam_phase_space.png" + ) plt.show() if sim.test: - # physics based check - these error tolerances are not set from theory # but from the errors that were present when the test was created. If these # assert's fail, the full benchmark should be rerun (same as the test but @@ -199,7 +234,8 @@ # checksum check import os import sys - sys.path.insert(1, '../../../../warpx/Regression/Checksum/') + + sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py b/Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py index 556eb252856..b776c48f5ab 100644 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py @@ -22,32 +22,28 @@ comm = mpi.COMM_WORLD -simulation = picmi.Simulation( - warpx_serialize_initial_conditions=True, - verbose=0 -) +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=0) class ForceFreeSheetReconnection(object): - # B0 is chosen with all other quantities scaled by it - B0 = 0.1 # Initial magnetic field strength (T) + B0 = 0.1 # Initial magnetic field strength (T) # Physical parameters - m_ion = 400.0 # Ion mass (electron masses) + m_ion = 400.0 # Ion mass (electron masses) beta_e = 0.1 - Bg = 0.3 # times B0 - guiding field - dB = 0.01 # times B0 - initial perturbation to seed reconnection + Bg = 0.3 # times B0 - guiding field + dB = 0.01 # times B0 - initial perturbation to seed reconnection - T_ratio = 5.0 # T_i / T_e + T_ratio = 5.0 # T_i / T_e # Domain parameters - LX = 40 # ion skin depths - LZ = 20 # ion skin depths + LX = 40 # ion skin depths + LZ = 20 # ion skin depths - LT = 50 # ion cyclotron periods - DT = 1e-3 # ion cyclotron periods + LT = 50 # ion cyclotron periods + DT = 1e-3 # ion cyclotron periods # Resolution parameters NX = 512 @@ -62,7 +58,6 @@ class ForceFreeSheetReconnection(object): substeps = 20 def __init__(self, test, verbose): - self.test = test self.verbose = verbose or self.test @@ -93,8 +88,7 @@ def __init__(self, test, verbose): f"*sin({np.pi/self.Lz}*z)" ) self.By = ( - f"sqrt({self.Bg**2 + self.B0**2}-" - f"({self.B0}*tanh(z*{1.0/self.l_i}))**2)" + f"sqrt({self.Bg**2 + self.B0**2}-" f"({self.B0}*tanh(z*{1.0/self.l_i}))**2)" ) self.Bz = f"{self.dB}*sin({2.0*np.pi/self.Lx}*x)*cos({np.pi/self.Lz}*z)" @@ -102,7 +96,7 @@ def __init__(self, test, verbose): # dump all the current attributes to a dill pickle file if comm.rank == 0: - with open('sim_parameters.dpkl', 'wb') as f: + with open("sim_parameters.dpkl", "wb") as f: dill.dump(self, f) # print out plasma parameters @@ -146,14 +140,10 @@ def get_plasma_quantities(self): self.w_pe = 2.0 * self.w_ce # calculate plasma density based on electron plasma frequency - self.n_plasma = ( - self.w_pe**2 * constants.m_e * constants.ep0 / constants.q_e**2 - ) + self.n_plasma = self.w_pe**2 * constants.m_e * constants.ep0 / constants.q_e**2 # Ion plasma frequency (Hz) - self.w_pi = np.sqrt( - constants.q_e**2 * self.n_plasma / (self.M * constants.ep0) - ) + self.w_pi = np.sqrt(constants.q_e**2 * self.n_plasma / (self.M * constants.ep0)) # Ion skin depth (m) self.l_i = constants.c / self.w_pi @@ -165,7 +155,9 @@ def get_plasma_quantities(self): # calculate Te based on beta self.Te = ( - self.beta_e * self.B0**2 / (2.0 * constants.mu0 * self.n_plasma) + self.beta_e + * self.B0**2 + / (2.0 * constants.mu0 * self.n_plasma) / constants.q_e ) self.Ti = self.Te * self.T_ratio @@ -190,17 +182,17 @@ def setup_run(self): # Create grid self.grid = picmi.Cartesian2DGrid( number_of_cells=[self.NX, self.NZ], - lower_bound=[-self.Lx/2.0, -self.Lz/2.0], - upper_bound=[self.Lx/2.0, self.Lz/2.0], - lower_boundary_conditions=['periodic', 'dirichlet'], - upper_boundary_conditions=['periodic', 'dirichlet'], - lower_boundary_conditions_particles=['periodic', 'reflecting'], - upper_boundary_conditions_particles=['periodic', 'reflecting'], - warpx_max_grid_size=self.NZ + lower_bound=[-self.Lx / 2.0, -self.Lz / 2.0], + upper_bound=[self.Lx / 2.0, self.Lz / 2.0], + lower_boundary_conditions=["periodic", "dirichlet"], + upper_boundary_conditions=["periodic", "dirichlet"], + lower_boundary_conditions_particles=["periodic", "reflecting"], + upper_boundary_conditions_particles=["periodic", "reflecting"], + warpx_max_grid_size=self.NZ, ) simulation.time_step_size = self.dt simulation.max_steps = self.total_steps - simulation.current_deposition_algo = 'direct' + simulation.current_deposition_algo = "direct" simulation.particle_shape = 1 simulation.use_filter = False simulation.verbose = self.verbose @@ -210,17 +202,18 @@ def setup_run(self): ####################################################################### self.solver = picmi.HybridPICSolver( - grid=self.grid, gamma=1.0, - Te=self.Te, n0=self.n_plasma, n_floor=0.1*self.n_plasma, - plasma_resistivity=self.eta*self.eta0, - substeps=self.substeps + grid=self.grid, + gamma=1.0, + Te=self.Te, + n0=self.n_plasma, + n_floor=0.1 * self.n_plasma, + plasma_resistivity=self.eta * self.eta0, + substeps=self.substeps, ) simulation.solver = self.solver B_ext = picmi.AnalyticInitialField( - Bx_expression=self.Bx, - By_expression=self.By, - Bz_expression=self.Bz + Bx_expression=self.Bx, By_expression=self.By, Bz_expression=self.Bz ) simulation.add_applied_field(B_ext) @@ -229,18 +222,19 @@ def setup_run(self): ####################################################################### self.ions = picmi.Species( - name='ions', charge='q_e', mass=self.M, + name="ions", + charge="q_e", + mass=self.M, initial_distribution=picmi.UniformDistribution( density=self.n_plasma, - rms_velocity=[self.vi_th]*3, - ) + rms_velocity=[self.vi_th] * 3, + ), ) simulation.add_species( self.ions, layout=picmi.PseudoRandomLayout( - grid=self.grid, - n_macroparticles_per_cell=self.NPPC - ) + grid=self.grid, n_macroparticles_per_cell=self.NPPC + ), ) ####################################################################### @@ -251,42 +245,44 @@ def setup_run(self): if self.test: particle_diag = picmi.ParticleDiagnostic( - name='diag1', + name="diag1", period=self.total_steps, - write_dir='.', + write_dir=".", species=[self.ions], - data_list=['ux', 'uy', 'uz', 'x', 'z', 'weighting'], - warpx_file_prefix='Python_ohms_law_solver_magnetic_reconnection_2d_plt', + data_list=["ux", "uy", "uz", "x", "z", "weighting"], + warpx_file_prefix="Python_ohms_law_solver_magnetic_reconnection_2d_plt", # warpx_format='openpmd', # warpx_openpmd_backend='h5', ) simulation.add_diagnostic(particle_diag) field_diag = picmi.FieldDiagnostic( - name='diag1', + name="diag1", grid=self.grid, period=self.total_steps, - data_list=['Bx', 'By', 'Bz', 'Ex', 'Ey', 'Ez'], - write_dir='.', - warpx_file_prefix='Python_ohms_law_solver_magnetic_reconnection_2d_plt', + data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez"], + write_dir=".", + warpx_file_prefix="Python_ohms_law_solver_magnetic_reconnection_2d_plt", # warpx_format='openpmd', # warpx_openpmd_backend='h5', ) simulation.add_diagnostic(field_diag) - # reduced diagnostics for reconnection rate calculation # create a 2 l_i box around the X-point on which to measure # magnetic flux changes plane = picmi.ReducedDiagnostic( diag_type="FieldProbe", - name='plane', + name="plane", period=self.diag_steps, - path='diags/', - extension='dat', - probe_geometry='Plane', + path="diags/", + extension="dat", + probe_geometry="Plane", resolution=60, - x_probe=0.0, z_probe=0.0, detector_radius=self.l_i, - target_up_x=0, target_up_z=1.0 + x_probe=0.0, + z_probe=0.0, + detector_radius=self.l_i, + target_up_x=0, + target_up_z=1.0, ) simulation.add_diagnostic(plane) @@ -304,13 +300,12 @@ def setup_run(self): simulation.initialize_warpx() def check_fields(self): - step = simulation.extension.warpx.getistep(lev=0) - 1 - if not (step == 1 or step%self.diag_steps == 0): + if not (step == 1 or step % self.diag_steps == 0): return - rho = fields.RhoFPWrapper(include_ghosts=False)[:,:] + rho = fields.RhoFPWrapper(include_ghosts=False)[:, :] Jiy = fields.JyFPWrapper(include_ghosts=False)[...] / self.J0 Jy = fields.JyFPAmpereWrapper(include_ghosts=False)[...] / self.J0 Bx = fields.BxFPWrapper(include_ghosts=False)[...] / self.B0 @@ -321,23 +316,29 @@ def check_fields(self): return # save the fields to file - with open(f"diags/fields/fields_{step:06d}.npz", 'wb') as f: + with open(f"diags/fields/fields_{step:06d}.npz", "wb") as f: np.savez(f, rho=rho, Jiy=Jiy, Jy=Jy, Bx=Bx, By=By, Bz=Bz) + ########################## # parse input parameters ########################## parser = argparse.ArgumentParser() parser.add_argument( - '-t', '--test', help='toggle whether this script is run as a short CI test', - action='store_true', + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", ) parser.add_argument( - '-v', '--verbose', help='Verbose output', action='store_true', + "-v", + "--verbose", + help="Verbose output", + action="store_true", ) args, left = parser.parse_known_args() -sys.argv = sys.argv[:1]+left +sys.argv = sys.argv[:1] + left run = ForceFreeSheetReconnection(test=args.test, verbose=args.verbose) simulation.step() diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py index 23fc3ae2809..93d574e5294 100755 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py @@ -9,10 +9,10 @@ import numpy as np from matplotlib import colors -plt.rcParams.update({'font.size': 20}) +plt.rcParams.update({"font.size": 20}) # load simulation parameters -with open('sim_parameters.dpkl', 'rb') as f: +with open("sim_parameters.dpkl", "rb") as f: sim = dill.load(f) x_idx = 2 @@ -20,11 +20,11 @@ Ey_idx = 6 Bx_idx = 8 -plane_data = np.loadtxt('diags/plane.dat', skiprows=1) +plane_data = np.loadtxt("diags/plane.dat", skiprows=1) -steps = np.unique(plane_data[:,0]) +steps = np.unique(plane_data[:, 0]) num_steps = len(steps) -num_cells = plane_data.shape[0]//num_steps +num_cells = plane_data.shape[0] // num_steps plane_data = plane_data.reshape((num_steps, num_cells, plane_data.shape[1])) @@ -33,13 +33,13 @@ plt.plot( times / sim.t_ci, - np.mean(plane_data[:,:,Ey_idx], axis=1) / (sim.vA * sim.B0), - 'o-' + np.mean(plane_data[:, :, Ey_idx], axis=1) / (sim.vA * sim.B0), + "o-", ) plt.grid() -plt.xlabel(r'$t/\tau_{c,i}$') -plt.ylabel('$/v_AB_0$') +plt.xlabel(r"$t/\tau_{c,i}$") +plt.ylabel("$/v_AB_0$") plt.title("Reconnection rate") plt.tight_layout() plt.savefig("diags/reconnection_rate.png") @@ -52,10 +52,10 @@ fig, axes = plt.subplots(3, 1, sharex=True, figsize=(7, 9)) for ax in axes.flatten(): - ax.set_aspect('equal') - ax.set_ylabel('$z/l_i$') + ax.set_aspect("equal") + ax.set_ylabel("$z/l_i$") - axes[2].set_xlabel('$x/l_i$') + axes[2].set_xlabel("$x/l_i$") datafiles = sorted(glob.glob("diags/fields/*.npz")) num_steps = len(datafiles) @@ -63,47 +63,52 @@ data0 = np.load(datafiles[0]) sX = axes[0].imshow( - data0['Jy'].T, origin='lower', - norm=colors.TwoSlopeNorm(vmin=-0.6, vcenter=0., vmax=1.6), - extent=[0, sim.LX, -sim.LZ/2, sim.LZ/2], - cmap=plt.cm.RdYlBu_r + data0["Jy"].T, + origin="lower", + norm=colors.TwoSlopeNorm(vmin=-0.6, vcenter=0.0, vmax=1.6), + extent=[0, sim.LX, -sim.LZ / 2, sim.LZ / 2], + cmap=plt.cm.RdYlBu_r, ) # axes[0].set_ylim(-5, 5) - cb = plt.colorbar(sX, ax=axes[0], label='$J_y/J_0$') - cb.ax.set_yscale('linear') + cb = plt.colorbar(sX, ax=axes[0], label="$J_y/J_0$") + cb.ax.set_yscale("linear") cb.ax.set_yticks([-0.5, 0.0, 0.75, 1.5]) sY = axes[1].imshow( - data0['By'].T, origin='lower', extent=[0, sim.LX, -sim.LZ/2, sim.LZ/2], - cmap=plt.cm.plasma + data0["By"].T, + origin="lower", + extent=[0, sim.LX, -sim.LZ / 2, sim.LZ / 2], + cmap=plt.cm.plasma, ) # axes[1].set_ylim(-5, 5) - cb = plt.colorbar(sY, ax=axes[1], label='$B_y/B_0$') - cb.ax.set_yscale('linear') + cb = plt.colorbar(sY, ax=axes[1], label="$B_y/B_0$") + cb.ax.set_yscale("linear") sZ = axes[2].imshow( - data0['Bz'].T, origin='lower', extent=[0, sim.LX, -sim.LZ/2, sim.LZ/2], + data0["Bz"].T, + origin="lower", + extent=[0, sim.LX, -sim.LZ / 2, sim.LZ / 2], # norm=colors.TwoSlopeNorm(vmin=-0.02, vcenter=0., vmax=0.02), - cmap=plt.cm.RdBu + cmap=plt.cm.RdBu, ) - cb = plt.colorbar(sZ, ax=axes[2], label='$B_z/B_0$') - cb.ax.set_yscale('linear') + cb = plt.colorbar(sZ, ax=axes[2], label="$B_z/B_0$") + cb.ax.set_yscale("linear") # plot field lines - x_grid = np.linspace(0, sim.LX, data0['Bx'][:-1].shape[0]) - z_grid = np.linspace(-sim.LZ/2.0, sim.LZ/2.0, data0['Bx'].shape[1]) + x_grid = np.linspace(0, sim.LX, data0["Bx"][:-1].shape[0]) + z_grid = np.linspace(-sim.LZ / 2.0, sim.LZ / 2.0, data0["Bx"].shape[1]) n_lines = 10 start_x = np.zeros(n_lines) - start_x[:n_lines//2] = sim.LX - start_z = np.linspace(-sim.LZ/2.0*0.9, sim.LZ/2.0*0.9, n_lines) + start_x[: n_lines // 2] = sim.LX + start_z = np.linspace(-sim.LZ / 2.0 * 0.9, sim.LZ / 2.0 * 0.9, n_lines) step_size = 1.0 / 100.0 def get_field_lines(Bx, Bz): field_line_coords = [] Bx_interp = interpolate.interp2d(x_grid, z_grid, Bx[:-1].T) - Bz_interp = interpolate.interp2d(x_grid, z_grid, Bz[:,:-1].T) + Bz_interp = interpolate.interp2d(x_grid, z_grid, Bz[:, :-1].T) for kk, z in enumerate(start_z): path_x = [start_x[kk]] @@ -111,7 +116,7 @@ def get_field_lines(Bx, Bz): ii = 0 while ii < 10000: - ii+=1 + ii += 1 Bx = Bx_interp(path_x[-1], path_z[-1])[0] Bz = Bz_interp(path_x[-1], path_z[-1])[0] @@ -128,7 +133,12 @@ def get_field_lines(Bx, Bz): x_new = path_x[-1] + dx z_new = path_z[-1] + dz - if np.isnan(x_new) or x_new <= 0 or x_new > sim.LX or abs(z_new) > sim.LZ/2: + if ( + np.isnan(x_new) + or x_new <= 0 + or x_new > sim.LX + or abs(z_new) > sim.LZ / 2 + ): break path_x.append(x_new) @@ -138,44 +148,48 @@ def get_field_lines(Bx, Bz): return field_line_coords field_lines = [] - for path in get_field_lines(data0['Bx'], data0['Bz']): + for path in get_field_lines(data0["Bx"], data0["Bz"]): path_x = path[0] path_z = path[1] - l, = axes[2].plot(path_x, path_z, '--', color='k') + (ln,) = axes[2].plot(path_x, path_z, "--", color="k") # draws arrows on the field lines # if path_x[10] > path_x[0]: axes[2].arrow( - path_x[50], path_z[50], - path_x[250]-path_x[50], path_z[250]-path_z[50], - shape='full', length_includes_head=True, lw=0, head_width=1.0, - color='g' + path_x[50], + path_z[50], + path_x[250] - path_x[50], + path_z[250] - path_z[50], + shape="full", + length_includes_head=True, + lw=0, + head_width=1.0, + color="g", ) - field_lines.append(l) + field_lines.append(ln) def animate(i): data = np.load(datafiles[i]) - sX.set_array(data['Jy'].T) - sY.set_array(data['By'].T) - sZ.set_array(data['Bz'].T) - sZ.set_clim(-np.max(abs(data['Bz'])), np.max(abs(data['Bz']))) + sX.set_array(data["Jy"].T) + sY.set_array(data["By"].T) + sZ.set_array(data["Bz"].T) + sZ.set_clim(-np.max(abs(data["Bz"])), np.max(abs(data["Bz"]))) - for ii, path in enumerate(get_field_lines(data['Bx'], data['Bz'])): + for ii, path in enumerate(get_field_lines(data["Bx"], data["Bz"])): path_x = path[0] path_z = path[1] field_lines[ii].set_data(path_x, path_z) - anim = FuncAnimation( - fig, animate, frames=num_steps-1, repeat=True - ) + anim = FuncAnimation(fig, animate, frames=num_steps - 1, repeat=True) writervideo = FFMpegWriter(fps=14) - anim.save('diags/mag_reconnection.mp4', writer=writervideo) + anim.save("diags/mag_reconnection.mp4", writer=writervideo) if sim.test: import os import sys - sys.path.insert(1, '../../../../warpx/Regression/Checksum/') + + sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file diff --git a/Examples/Tests/openbc_poisson_solver/analysis.py b/Examples/Tests/openbc_poisson_solver/analysis.py index dc7b5dca8bd..8d5be875c7a 100755 --- a/Examples/Tests/openbc_poisson_solver/analysis.py +++ b/Examples/Tests/openbc_poisson_solver/analysis.py @@ -8,7 +8,7 @@ from scipy.constants import epsilon_0, pi from scipy.special import erf -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI sigmaz = 300e-6 @@ -16,46 +16,52 @@ sigmay = 7.7e-9 Q = -3.2e-9 + def w(z): - return np.exp(-z**2) * ( 1 + erf(1.j*z) ) + return np.exp(-(z**2)) * (1 + erf(1.0j * z)) + def evaluate_E(x, y, z): - ''' Basseti-Erskine formula https://cds.cern.ch/record/122227/files/198005132.pdf ''' - den = np.sqrt(2*(sigmax**2-sigmay**2)) - arg1 = (x+1j*y)/den + """Basseti-Erskine formula https://cds.cern.ch/record/122227/files/198005132.pdf""" + den = np.sqrt(2 * (sigmax**2 - sigmay**2)) + arg1 = (x + 1j * y) / den term1 = w(arg1) - arg2 = (x*sigmay/sigmax + 1j*y*sigmax/sigmay)/den - term2 = -np.exp(-x**2/(2*sigmax**2)-y**2/(2*sigmay**2))*w(arg2) - factor = Q/(2.*np.sqrt(2.)*pi*epsilon_0*sigmaz*den)*np.exp(-z**2/(2*sigmaz**2)) - E_complex = factor*(term1 + term2) + arg2 = (x * sigmay / sigmax + 1j * y * sigmax / sigmay) / den + term2 = -np.exp(-(x**2) / (2 * sigmax**2) - y**2 / (2 * sigmay**2)) * w(arg2) + factor = ( + Q + / (2.0 * np.sqrt(2.0) * pi * epsilon_0 * sigmaz * den) + * np.exp(-(z**2) / (2 * sigmaz**2)) + ) + E_complex = factor * (term1 + term2) return E_complex.imag, E_complex.real fn = sys.argv[1] -path=os.path.join('diags', 'diag2') +path = os.path.join("diags", "diag2") ts = OpenPMDTimeSeries(path) -Ex, info = ts.get_field(field='E', coord='x', iteration=0, plot=False) -Ey, info = ts.get_field(field='E', coord='y', iteration=0, plot=False) +Ex, info = ts.get_field(field="E", coord="x", iteration=0, plot=False) +Ey, info = ts.get_field(field="E", coord="y", iteration=0, plot=False) grid_x = info.x[1:-1] grid_y = info.y[1:-1] grid_z = info.z[1:-1] -hnx = int(0.5*len(grid_x)) -hny = int(0.5*len(grid_y)) +hnx = int(0.5 * len(grid_x)) +hny = int(0.5 * len(grid_y)) # Compare theoretical and WarpX Ex, Ey fields for every z -for k, z in enumerate(grid_z,start=1): - Ex_warpx = Ex[k,hny,1:-1] - Ey_warpx = Ey[k,1:-1,hnx] +for k, z in enumerate(grid_z, start=1): + Ex_warpx = Ex[k, hny, 1:-1] + Ey_warpx = Ey[k, 1:-1, hnx] - Ex_theory = evaluate_E(grid_x, 0., z)[0] - Ey_theory = evaluate_E(0., grid_y, z)[1] + Ex_theory = evaluate_E(grid_x, 0.0, z)[0] + Ey_theory = evaluate_E(0.0, grid_y, z)[1] - assert(np.allclose(Ex_warpx, Ex_theory, rtol=0.032, atol=0)) - assert(np.allclose(Ey_warpx, Ey_theory, rtol=0.029, atol=0)) + assert np.allclose(Ex_warpx, Ex_theory, rtol=0.032, atol=0) + assert np.allclose(Ey_warpx, Ey_theory, rtol=0.029, atol=0) # Get name of the test diff --git a/Examples/Tests/openpmd_rz/analysis_openpmd_rz.py b/Examples/Tests/openpmd_rz/analysis_openpmd_rz.py index 247c4ac61a0..13dcd0016a9 100755 --- a/Examples/Tests/openpmd_rz/analysis_openpmd_rz.py +++ b/Examples/Tests/openpmd_rz/analysis_openpmd_rz.py @@ -5,31 +5,44 @@ series = io.Series("LaserAccelerationRZ_opmd_plt/openpmd_%T.h5", io.Access.read_only) -assert len(series.iterations) == 3, 'improper number of iterations stored' +assert len(series.iterations) == 3, "improper number of iterations stored" ii = series.iterations[20] -assert len(ii.meshes) == 8, 'improper number of meshes' +assert len(ii.meshes) == 8, "improper number of meshes" # select j_t -jt = ii.meshes['j']['t'] +jt = ii.meshes["j"]["t"] # this is in C (Python) order; r is the fastest varying index (Nm, Nz, Nr) = jt.shape -assert Nm == 3, 'Wrong number of angular modes stored or possible incorrect ordering when flushed' -assert Nr == 64, 'Wrong number of radial points stored or possible incorrect ordering when flushed' -assert Nz == 512, 'Wrong number of z points stored or possible incorrect ordering when flushed' - -assert ii.meshes['part_per_grid'][io.Mesh_Record_Component.SCALAR].shape == [512,64], 'problem with part_per_grid' -assert ii.meshes['rho_electrons'][io.Mesh_Record_Component.SCALAR].shape == [3, 512, 64], 'problem with rho_electrons' +assert ( + Nm == 3 +), "Wrong number of angular modes stored or possible incorrect ordering when flushed" +assert ( + Nr == 64 +), "Wrong number of radial points stored or possible incorrect ordering when flushed" +assert ( + Nz == 512 +), "Wrong number of z points stored or possible incorrect ordering when flushed" + +assert ii.meshes["part_per_grid"][io.Mesh_Record_Component.SCALAR].shape == [ + 512, + 64, +], "problem with part_per_grid" +assert ii.meshes["rho_electrons"][io.Mesh_Record_Component.SCALAR].shape == [ + 3, + 512, + 64, +], "problem with rho_electrons" ### test that openpmd+RZ ### 1. creates rho per species correctly ### 2. orders these appropriately -rhoe_mesh = ii.meshes['rho_electrons'] -rhob_mesh = ii.meshes['rho_beam'] +rhoe_mesh = ii.meshes["rho_electrons"] +rhob_mesh = ii.meshes["rho_beam"] dz, dr = rhoe_mesh.grid_spacing zmin, rmin = rhoe_mesh.grid_global_offset @@ -38,10 +51,12 @@ series.flush() nm, nz, nr = rhoe.shape zlist = zmin + dz * np.arange(nz) -rhoe0 = rhoe[0] # 0 mode -rhob0 = rhob[0] # 0 mode +rhoe0 = rhoe[0] # 0 mode +rhob0 = rhob[0] # 0 mode -electron_meanz = np.sum(np.dot(zlist, rhoe0))/ np.sum(rhoe0) -beam_meanz = np.sum(np.dot(zlist, rhob0))/ np.sum(rhob0) +electron_meanz = np.sum(np.dot(zlist, rhoe0)) / np.sum(rhoe0) +beam_meanz = np.sum(np.dot(zlist, rhob0)) / np.sum(rhob0) -assert ((electron_meanz > 0) and (beam_meanz < 0)), 'problem with openPMD+RZ. Maybe openPMD+RZ mixed up the order of rho_ diagnostics?' +assert ( + (electron_meanz > 0) and (beam_meanz < 0) +), "problem with openPMD+RZ. Maybe openPMD+RZ mixed up the order of rho_ diagnostics?" diff --git a/Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py b/Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py index df4a4579e2f..7d33de6b5dd 100644 --- a/Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py +++ b/Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py @@ -21,7 +21,7 @@ # --- grid nr = 64 -nz= 64 +nz = 64 rmin = 0.0 rmax = 2 @@ -33,36 +33,39 @@ ########################## grid = picmi.CylindricalGrid( - number_of_cells = [nr, nz], - n_azimuthal_modes = 1, - lower_bound = [rmin, zmin], - upper_bound = [rmax, zmax], - lower_boundary_conditions = ['none', 'dirichlet'], - upper_boundary_conditions = ['dirichlet', 'dirichlet'], - lower_boundary_conditions_particles = ['none', 'reflecting'], - upper_boundary_conditions_particles = ['absorbing', 'reflecting'] + number_of_cells=[nr, nz], + n_azimuthal_modes=1, + lower_bound=[rmin, zmin], + upper_bound=[rmax, zmax], + lower_boundary_conditions=["none", "dirichlet"], + upper_boundary_conditions=["dirichlet", "dirichlet"], + lower_boundary_conditions_particles=["none", "reflecting"], + upper_boundary_conditions_particles=["absorbing", "reflecting"], ) solver = picmi.ElectrostaticSolver( - grid=grid, method='Multigrid', - warpx_absolute_tolerance=1e-7 + grid=grid, method="Multigrid", warpx_absolute_tolerance=1e-7 ) embedded_boundary = picmi.EmbeddedBoundary( - implicit_function="-(x**2+y**2+z**2-radius**2)", - radius = 0.2 + implicit_function="-(x**2+y**2+z**2-radius**2)", radius=0.2 ) ########################## # physics components ########################## -#one particle -e_dist=picmi.ParticleListDistribution(x=0.0, y=0.0, z=-0.25, ux=0.5e10, uy=0.0, uz=1.0e10, weight=1) +# one particle +e_dist = picmi.ParticleListDistribution( + x=0.0, y=0.0, z=-0.25, ux=0.5e10, uy=0.0, uz=1.0e10, weight=1 +) electrons = picmi.Species( - particle_type='electron', name='electrons', initial_distribution=e_dist, warpx_save_particles_at_eb=1 + particle_type="electron", + name="electrons", + initial_distribution=e_dist, + warpx_save_particles_at_eb=1, ) ########################## @@ -70,21 +73,22 @@ ########################## field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = diagnostic_interval, - data_list = ['Er', 'Ez', 'phi', 'rho','rho_electrons'], - warpx_format = 'openpmd', - write_dir = '.', - warpx_file_prefix = 'particle_boundary_interaction_plt' + name="diag1", + grid=grid, + period=diagnostic_interval, + data_list=["Er", "Ez", "phi", "rho", "rho_electrons"], + warpx_format="openpmd", + write_dir=".", + warpx_file_prefix="particle_boundary_interaction_plt", ) -part_diag = picmi.ParticleDiagnostic(name = 'diag1', - period = diagnostic_interval, - species = [electrons], - warpx_format = 'openpmd', - write_dir = '.', - warpx_file_prefix = 'particle_boundary_interaction_plt' +part_diag = picmi.ParticleDiagnostic( + name="diag1", + period=diagnostic_interval, + species=[electrons], + warpx_format="openpmd", + write_dir=".", + warpx_file_prefix="particle_boundary_interaction_plt", ) ########################## @@ -93,17 +97,15 @@ sim = picmi.Simulation( solver=solver, - time_step_size = dt, - max_steps = max_steps, + time_step_size=dt, + max_steps=max_steps, warpx_embedded_boundary=embedded_boundary, warpx_amrex_the_arena_is_managed=1, ) sim.add_species( electrons, - layout = picmi.GriddedLayout( - n_macroparticle_per_cell=[10, 1, 1], grid=grid - ) + layout=picmi.GriddedLayout(n_macroparticle_per_cell=[10, 1, 1], grid=grid), ) sim.add_diagnostic(part_diag) sim.add_diagnostic(field_diag) @@ -115,55 +117,67 @@ # python particle data access ########################## -def concat( list_of_arrays ): + +def concat(list_of_arrays): if len(list_of_arrays) == 0: # Return a 1d array of size 0 return np.empty(0) else: - return np.concatenate( list_of_arrays ) + return np.concatenate(list_of_arrays) def mirror_reflection(): - buffer = particle_containers.ParticleBoundaryBufferWrapper() #boundary buffer - - #STEP 1: extract the different parameters of the boundary buffer (normal, time, position) - lev = 0 # level 0 (no mesh refinement here) - delta_t = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'deltaTimeScraped', lev)) - r = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'x', lev)) - theta = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'theta', lev)) - z = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'z', lev)) - x= r*np.cos(theta) #from RZ coordinates to 3D coordinates - y= r*np.sin(theta) - ux = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'ux', lev)) - uy = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'uy', lev)) - uz = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'uz', lev)) - w = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'w', lev)) - nx = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'nx', lev)) - ny = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'ny', lev)) - nz = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'nz', lev)) - - #STEP 2: use these parameters to inject particle from the same position in the plasma - elect_pc = particle_containers.ParticleContainerWrapper('electrons') #general particle container + buffer = particle_containers.ParticleBoundaryBufferWrapper() # boundary buffer + + # STEP 1: extract the different parameters of the boundary buffer (normal, time, position) + lev = 0 # level 0 (no mesh refinement here) + delta_t = concat( + buffer.get_particle_boundary_buffer("electrons", "eb", "deltaTimeScraped", lev) + ) + r = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "x", lev)) + theta = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "theta", lev)) + z = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "z", lev)) + x = r * np.cos(theta) # from RZ coordinates to 3D coordinates + y = r * np.sin(theta) + ux = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "ux", lev)) + uy = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "uy", lev)) + uz = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "uz", lev)) + w = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "w", lev)) + nx = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "nx", lev)) + ny = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "ny", lev)) + nz = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "nz", lev)) + + # STEP 2: use these parameters to inject particle from the same position in the plasma + elect_pc = particle_containers.ParticleContainerWrapper( + "electrons" + ) # general particle container ####this part is specific to the case of simple reflection. - un=ux*nx+uy*ny+uz*nz - ux_reflect=-2*un*nx+ux #for a "mirror reflection" u(sym)=-2(u.n)n+u - uy_reflect=-2*un*ny+uy - uz_reflect=-2*un*nz+uz + un = ux * nx + uy * ny + uz * nz + ux_reflect = -2 * un * nx + ux # for a "mirror reflection" u(sym)=-2(u.n)n+u + uy_reflect = -2 * un * ny + uy + uz_reflect = -2 * un * nz + uz elect_pc.add_particles( - x=x + (dt-delta_t)*ux_reflect, y=y + (dt-delta_t)*uy_reflect, z=z + (dt-delta_t)*uz_reflect, - ux=ux_reflect, uy=uy_reflect, uz=uz_reflect, - w=w - ) #adds the particle in the general particle container at the next step - #### Can be modified depending on the model of interaction. + x=x + (dt - delta_t) * ux_reflect, + y=y + (dt - delta_t) * uy_reflect, + z=z + (dt - delta_t) * uz_reflect, + ux=ux_reflect, + uy=uy_reflect, + uz=uz_reflect, + w=w, + ) # adds the particle in the general particle container at the next step + #### Can be modified depending on the model of interaction. + + buffer.clear_buffer() # reinitialise the boundary buffer - buffer.clear_buffer() #reinitialise the boundary buffer -callbacks.installafterstep(mirror_reflection) #mirror_reflection is called at the next step - # using the new particle container modified at the last step +callbacks.installafterstep( + mirror_reflection +) # mirror_reflection is called at the next step +# using the new particle container modified at the last step ########################## # simulation run ########################## -sim.step(max_steps) #the whole process is done "max_steps" times +sim.step(max_steps) # the whole process is done "max_steps" times diff --git a/Examples/Tests/particle_boundary_interaction/analysis.py b/Examples/Tests/particle_boundary_interaction/analysis.py index ff83cc1fed7..9d8baf774b7 100755 --- a/Examples/Tests/particle_boundary_interaction/analysis.py +++ b/Examples/Tests/particle_boundary_interaction/analysis.py @@ -7,6 +7,7 @@ (0.5e10,0,1.0e10) with a time step of 1e-11. An input file PICMI_inputs_rz.py is used. """ + import os import sys @@ -15,37 +16,39 @@ from openpmd_viewer import OpenPMDTimeSeries yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Open plotfile specified in command line filename = sys.argv[1] test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format='openpmd') +checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") -ts = OpenPMDTimeSeries('./particle_boundary_interaction_plt') +ts = OpenPMDTimeSeries("./particle_boundary_interaction_plt") -it=ts.iterations -x,y,z=ts.get_particle(['x','y','z'], species='electrons', iteration=it[-1]) +it = ts.iterations +x, y, z = ts.get_particle(["x", "y", "z"], species="electrons", iteration=it[-1]) # Analytical results calculated -x_analytic=0.03532 -y_analytic=0.00000 -z_analytic=-0.20531 +x_analytic = 0.03532 +y_analytic = 0.00000 +z_analytic = -0.20531 -print('NUMERICAL coordinates of the point of contact:') -print('x=%5.5f, y=%5.5f, z=%5.5f' % (x[0], y[0], z[0])) -print('\n') -print('ANALYTICAL coordinates of the point of contact:') -print('x=%5.5f, y=%5.5f, z=%5.5f' % (x_analytic, y_analytic, z_analytic)) +print("NUMERICAL coordinates of the point of contact:") +print("x=%5.5f, y=%5.5f, z=%5.5f" % (x[0], y[0], z[0])) +print("\n") +print("ANALYTICAL coordinates of the point of contact:") +print("x=%5.5f, y=%5.5f, z=%5.5f" % (x_analytic, y_analytic, z_analytic)) -tolerance=1e-5 +tolerance = 1e-5 -diff_x=np.abs((x[0]-x_analytic)/x_analytic) -diff_z=np.abs((z[0]-z_analytic)/z_analytic) +diff_x = np.abs((x[0] - x_analytic) / x_analytic) +diff_z = np.abs((z[0] - z_analytic) / z_analytic) -print('\n') -print("percentage error for x = %5.4f %%" %(diff_x *100)) -print("percentage error for z = %5.4f %%" %(diff_z *100)) +print("\n") +print("percentage error for x = %5.4f %%" % (diff_x * 100)) +print("percentage error for z = %5.4f %%" % (diff_z * 100)) -assert (diff_x < tolerance) and (y[0] < 1e-8) and (diff_z < tolerance), 'Test particle_boundary_interaction did not pass' +assert ( + (diff_x < tolerance) and (y[0] < 1e-8) and (diff_z < tolerance) +), "Test particle_boundary_interaction did not pass" diff --git a/Examples/Tests/particle_boundary_process/PICMI_inputs_reflection.py b/Examples/Tests/particle_boundary_process/PICMI_inputs_reflection.py index bb1ebd73082..0803bc05d59 100755 --- a/Examples/Tests/particle_boundary_process/PICMI_inputs_reflection.py +++ b/Examples/Tests/particle_boundary_process/PICMI_inputs_reflection.py @@ -32,42 +32,45 @@ ########################## grid = picmi.Cartesian2DGrid( - number_of_cells = [nx, nz], - lower_bound = [xmin, zmin], - upper_bound = [xmax, zmax], - lower_boundary_conditions = ['dirichlet', 'dirichlet'], - upper_boundary_conditions = ['dirichlet', 'dirichlet'], - lower_boundary_conditions_particles = ['open', 'absorbing'], - upper_boundary_conditions_particles = ['open', 'absorbing'], - warpx_max_grid_size = 32 + number_of_cells=[nx, nz], + lower_bound=[xmin, zmin], + upper_bound=[xmax, zmax], + lower_boundary_conditions=["dirichlet", "dirichlet"], + upper_boundary_conditions=["dirichlet", "dirichlet"], + lower_boundary_conditions_particles=["open", "absorbing"], + upper_boundary_conditions_particles=["open", "absorbing"], + warpx_max_grid_size=32, ) solver = picmi.ElectrostaticSolver( - grid=grid, method='Multigrid', required_precision=1e-6, - warpx_self_fields_verbosity=0 + grid=grid, + method="Multigrid", + required_precision=1e-6, + warpx_self_fields_verbosity=0, ) -#embedded_boundary = picmi.EmbeddedBoundary( +# embedded_boundary = picmi.EmbeddedBoundary( # implicit_function="-max(max(x-12.5e-6,-12.5e-6-x),max(z+6.15e-5,-8.65e-5-z))" -#) +# ) ########################## # physics components ########################## uniform_plasma_elec = picmi.UniformDistribution( - density = 1e15, # number of electrons per m^3 - lower_bound = [-1e-5, -1e-5, -125e-6], - upper_bound = [1e-5, 1e-5, -120e-6], - directed_velocity = [0., 0., 5e6] # uth the std of the (unitless) momentum + density=1e15, # number of electrons per m^3 + lower_bound=[-1e-5, -1e-5, -125e-6], + upper_bound=[1e-5, 1e-5, -120e-6], + directed_velocity=[0.0, 0.0, 5e6], # uth the std of the (unitless) momentum ) electrons = picmi.Species( - particle_type='electron', name='electrons', + particle_type="electron", + name="electrons", initial_distribution=uniform_plasma_elec, warpx_save_particles_at_zhi=1, warpx_save_particles_at_zlo=1, - warpx_reflection_model_zhi="0.5" + warpx_reflection_model_zhi="0.5", ) ########################## @@ -75,18 +78,18 @@ ########################## particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 10, - write_dir = '.', - warpx_file_prefix = 'Python_particle_reflection_plt' + name="diag1", + period=10, + write_dir=".", + warpx_file_prefix="Python_particle_reflection_plt", ) field_diag = picmi.FieldDiagnostic( grid=grid, - name = 'diag1', - data_list=['E'], - period = 10, - write_dir = '.', - warpx_file_prefix = 'Python_particle_reflection_plt' + name="diag1", + data_list=["E"], + period=10, + write_dir=".", + warpx_file_prefix="Python_particle_reflection_plt", ) ########################## @@ -94,18 +97,15 @@ ########################## sim = picmi.Simulation( - solver = solver, - time_step_size = dt, - max_steps = max_steps, + solver=solver, + time_step_size=dt, + max_steps=max_steps, # warpx_embedded_boundary=embedded_boundary, - verbose = 1 + verbose=1, ) sim.add_species( - electrons, - layout = picmi.GriddedLayout( - n_macroparticle_per_cell=[5, 2], grid=grid - ) + electrons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[5, 2], grid=grid) ) sim.add_diagnostic(particle_diag) sim.add_diagnostic(field_diag) @@ -123,20 +123,24 @@ buffer = particle_containers.ParticleBoundaryBufferWrapper() -n = buffer.get_particle_boundary_buffer_size("electrons", 'z_hi') +n = buffer.get_particle_boundary_buffer_size("electrons", "z_hi") print("Number of electrons in upper buffer:", n) assert n == 63 -n = buffer.get_particle_boundary_buffer_size("electrons", 'z_lo') +n = buffer.get_particle_boundary_buffer_size("electrons", "z_lo") print("Number of electrons in lower buffer:", n) assert n == 67 -scraped_steps = buffer.get_particle_boundary_buffer("electrons", 'z_hi', 'stepScraped', 0) +scraped_steps = buffer.get_particle_boundary_buffer( + "electrons", "z_hi", "stepScraped", 0 +) for arr in scraped_steps: # print(arr) assert all(arr == 4) -scraped_steps = buffer.get_particle_boundary_buffer("electrons", 'z_lo', 'stepScraped', 0) +scraped_steps = buffer.get_particle_boundary_buffer( + "electrons", "z_lo", "stepScraped", 0 +) for arr in scraped_steps: # print(arr) assert all(arr == 8) diff --git a/Examples/Tests/particle_boundary_process/analysis_absorption.py b/Examples/Tests/particle_boundary_process/analysis_absorption.py index 9029cc60214..47ef02937a7 100755 --- a/Examples/Tests/particle_boundary_process/analysis_absorption.py +++ b/Examples/Tests/particle_boundary_process/analysis_absorption.py @@ -10,10 +10,10 @@ # all particles are still there ds40 = yt.load("particle_absorption_plt000040") -np40 = ds40.index.particle_headers['electrons'].num_particles -assert(np40 == 612) +np40 = ds40.index.particle_headers["electrons"].num_particles +assert np40 == 612 # all particles have been removed ds60 = yt.load("particle_absorption_plt000060") -np60 = ds60.index.particle_headers['electrons'].num_particles -assert(np60 == 0) +np60 = ds60.index.particle_headers["electrons"].num_particles +assert np60 == 0 diff --git a/Examples/Tests/particle_boundary_process/analysis_reflection.py b/Examples/Tests/particle_boundary_process/analysis_reflection.py index a6402976549..1187a58e75d 100755 --- a/Examples/Tests/particle_boundary_process/analysis_reflection.py +++ b/Examples/Tests/particle_boundary_process/analysis_reflection.py @@ -9,7 +9,7 @@ import yt -plotfile = 'Python_particle_reflection_plt000010' -ds = yt.load( plotfile ) # noqa +plotfile = "Python_particle_reflection_plt000010" +ds = yt.load(plotfile) # noqa assert True diff --git a/Examples/Tests/particle_boundary_scrape/PICMI_inputs_scrape.py b/Examples/Tests/particle_boundary_scrape/PICMI_inputs_scrape.py index e5a9a58f597..02c22a4723d 100755 --- a/Examples/Tests/particle_boundary_scrape/PICMI_inputs_scrape.py +++ b/Examples/Tests/particle_boundary_scrape/PICMI_inputs_scrape.py @@ -34,16 +34,22 @@ ########################## uniform_plasma_elec = picmi.UniformDistribution( - density = 1e23, # number of electrons per m^3 - lower_bound = [-1e-5, -1e-5, -149e-6], - upper_bound = [1e-5, 1e-5, -129e-6], - directed_velocity = [0., 0., 2000.*picmi.constants.c] # uth the std of the (unitless) momentum + density=1e23, # number of electrons per m^3 + lower_bound=[-1e-5, -1e-5, -149e-6], + upper_bound=[1e-5, 1e-5, -129e-6], + directed_velocity=[ + 0.0, + 0.0, + 2000.0 * picmi.constants.c, + ], # uth the std of the (unitless) momentum ) electrons = picmi.Species( - particle_type='electron', name='electrons', + particle_type="electron", + name="electrons", initial_distribution=uniform_plasma_elec, - warpx_save_particles_at_xhi=1, warpx_save_particles_at_eb=1 + warpx_save_particles_at_xhi=1, + warpx_save_particles_at_eb=1, ) ########################## @@ -51,19 +57,17 @@ ########################## grid = picmi.Cartesian3DGrid( - number_of_cells = [nx, ny, nz], - lower_bound = [xmin, ymin, zmin], - upper_bound = [xmax, ymax, zmax], - lower_boundary_conditions=['none', 'none', 'none'], - upper_boundary_conditions=['none', 'none', 'none'], - lower_boundary_conditions_particles=['open', 'open', 'open'], - upper_boundary_conditions_particles=['open', 'open', 'open'], - warpx_max_grid_size = 32 + number_of_cells=[nx, ny, nz], + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["none", "none", "none"], + upper_boundary_conditions=["none", "none", "none"], + lower_boundary_conditions_particles=["open", "open", "open"], + upper_boundary_conditions_particles=["open", "open", "open"], + warpx_max_grid_size=32, ) -solver = picmi.ElectromagneticSolver( - grid=grid, cfl=cfl -) +solver = picmi.ElectromagneticSolver(grid=grid, cfl=cfl) embedded_boundary = picmi.EmbeddedBoundary( implicit_function="-max(max(max(x-12.5e-6,-12.5e-6-x),max(y-12.5e-6,-12.5e-6-y)),max(z-(-6.15e-5),-8.65e-5-z))" @@ -74,18 +78,18 @@ ########################## particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = diagnostic_intervals, - write_dir = '.', - warpx_file_prefix = 'Python_particle_scrape_plt' + name="diag1", + period=diagnostic_intervals, + write_dir=".", + warpx_file_prefix="Python_particle_scrape_plt", ) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = diagnostic_intervals, - data_list = ['Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz'], - write_dir = '.', - warpx_file_prefix = 'Python_particle_scrape_plt' + name="diag1", + grid=grid, + period=diagnostic_intervals, + data_list=["Ex", "Ey", "Ez", "Bx", "By", "Bz"], + write_dir=".", + warpx_file_prefix="Python_particle_scrape_plt", ) ########################## @@ -93,19 +97,16 @@ ########################## sim = picmi.Simulation( - solver = solver, - max_steps = max_steps, + solver=solver, + max_steps=max_steps, warpx_embedded_boundary=embedded_boundary, verbose=True, warpx_load_balance_intervals=40, - warpx_load_balance_efficiency_ratio_threshold=0.9 + warpx_load_balance_efficiency_ratio_threshold=0.9, ) sim.add_species( - electrons, - layout = picmi.GriddedLayout( - n_macroparticle_per_cell=[1, 1, 1], grid=grid - ) + electrons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[1, 1, 1], grid=grid) ) sim.add_diagnostic(particle_diag) @@ -129,22 +130,24 @@ particle_buffer = particle_containers.ParticleBoundaryBufferWrapper() -n = particle_buffer.get_particle_boundary_buffer_size("electrons", 'eb') +n = particle_buffer.get_particle_boundary_buffer_size("electrons", "eb") print(f"Number of electrons in buffer (proc #{my_id}): {n}") assert n == 612 -scraped_steps = particle_buffer.get_particle_boundary_buffer("electrons", 'eb', 'stepScraped', 0) +scraped_steps = particle_buffer.get_particle_boundary_buffer( + "electrons", "eb", "stepScraped", 0 +) for arr in scraped_steps: assert all(np.array(arr, copy=False) > 40) -weights = particle_buffer.get_particle_boundary_buffer("electrons", 'eb', 'w', 0) +weights = particle_buffer.get_particle_boundary_buffer("electrons", "eb", "w", 0) n = sum(len(arr) for arr in weights) print(f"Number of electrons in this proc's buffer (proc #{my_id}): {n}") -n_sum = mpi.COMM_WORLD.allreduce(n, op=mpi.SUM) +n_sum = mpi.COMM_WORLD.allreduce(n, op=mpi.SUM) assert n_sum == 612 particle_buffer.clear_buffer() # confirm that the buffer was cleared -n = particle_buffer.get_particle_boundary_buffer_size("electrons", 'eb') +n = particle_buffer.get_particle_boundary_buffer_size("electrons", "eb") print(f"Number of electrons in buffer (proc #{my_id}): {n}") assert n == 0 diff --git a/Examples/Tests/particle_boundary_scrape/analysis_scrape.py b/Examples/Tests/particle_boundary_scrape/analysis_scrape.py index bf1de62bf0f..cb737ebd5d6 100755 --- a/Examples/Tests/particle_boundary_scrape/analysis_scrape.py +++ b/Examples/Tests/particle_boundary_scrape/analysis_scrape.py @@ -16,8 +16,8 @@ else: filename = "Python_particle_scrape_plt000040" ds40 = yt.load(filename) -np40 = ds40.index.particle_headers['electrons'].num_particles -assert(np40 == 612) +np40 = ds40.index.particle_headers["electrons"].num_particles +assert np40 == 612 # all particles have been removed if Path("particle_scrape_plt000060").is_dir(): @@ -25,5 +25,5 @@ else: filename = "Python_particle_scrape_plt000060" ds60 = yt.load(filename) -np60 = ds60.index.particle_headers['electrons'].num_particles -assert(np60 == 0) +np60 = ds60.index.particle_headers["electrons"].num_particles +assert np60 == 0 diff --git a/Examples/Tests/particle_data_python/PICMI_inputs_2d.py b/Examples/Tests/particle_data_python/PICMI_inputs_2d.py index 572871b8ed5..4ef9e9b40ed 100755 --- a/Examples/Tests/particle_data_python/PICMI_inputs_2d.py +++ b/Examples/Tests/particle_data_python/PICMI_inputs_2d.py @@ -9,8 +9,10 @@ # Create the parser and add the argument parser = argparse.ArgumentParser() parser.add_argument( - '-u', '--unique', action='store_true', - help="Whether injected particles should be treated as unique" + "-u", + "--unique", + action="store_true", + help="Whether injected particles should be treated as unique", ) # Parse the input @@ -42,65 +44,57 @@ ########################## grid = picmi.Cartesian2DGrid( - number_of_cells = [nx, ny], - lower_bound = [xmin, ymin], - upper_bound = [xmax, ymax], - lower_boundary_conditions = ['dirichlet', 'periodic'], - upper_boundary_conditions = ['dirichlet', 'periodic'], - lower_boundary_conditions_particles = ['absorbing', 'periodic'], - upper_boundary_conditions_particles = ['absorbing', 'periodic'], - moving_window_velocity = None, - warpx_max_grid_size = 32 + number_of_cells=[nx, ny], + lower_bound=[xmin, ymin], + upper_bound=[xmax, ymax], + lower_boundary_conditions=["dirichlet", "periodic"], + upper_boundary_conditions=["dirichlet", "periodic"], + lower_boundary_conditions_particles=["absorbing", "periodic"], + upper_boundary_conditions_particles=["absorbing", "periodic"], + moving_window_velocity=None, + warpx_max_grid_size=32, ) solver = picmi.ElectrostaticSolver( - grid=grid, method='Multigrid', required_precision=1e-6, - warpx_self_fields_verbosity=0 + grid=grid, + method="Multigrid", + required_precision=1e-6, + warpx_self_fields_verbosity=0, ) ########################## # physics components ########################## -electrons = picmi.Species( - particle_type='electron', name='electrons' -) +electrons = picmi.Species(particle_type="electron", name="electrons") ########################## # diagnostics ########################## particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 10, - write_dir = '.', - warpx_file_prefix = f"Python_particle_attr_access_{'unique_' if args.unique else ''}plt" + name="diag1", + period=10, + write_dir=".", + warpx_file_prefix=f"Python_particle_attr_access_{'unique_' if args.unique else ''}plt", ) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = 10, - data_list = ['phi'], - write_dir = '.', - warpx_file_prefix = f"Python_particle_attr_access_{'unique_' if args.unique else ''}plt" + name="diag1", + grid=grid, + period=10, + data_list=["phi"], + write_dir=".", + warpx_file_prefix=f"Python_particle_attr_access_{'unique_' if args.unique else ''}plt", ) ########################## # simulation setup ########################## -sim = picmi.Simulation( - solver = solver, - time_step_size = dt, - max_steps = max_steps, - verbose = 1 -) +sim = picmi.Simulation(solver=solver, time_step_size=dt, max_steps=max_steps, verbose=1) sim.add_species( - electrons, - layout = picmi.GriddedLayout( - n_macroparticle_per_cell=[0, 0], grid=grid - ) + electrons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[0, 0], grid=grid) ) sim.add_diagnostic(particle_diag) sim.add_diagnostic(field_diag) @@ -116,13 +110,13 @@ # below will be reproducible from run to run np.random.seed(30025025) -elec_wrapper = particle_containers.ParticleContainerWrapper('electrons') -elec_wrapper.add_real_comp('newPid') +elec_wrapper = particle_containers.ParticleContainerWrapper("electrons") +elec_wrapper.add_real_comp("newPid") my_id = libwarpx.amr.ParallelDescriptor.MyProc() -def add_particles(): +def add_particles(): nps = 10 * (my_id + 1) x = np.linspace(0.005, 0.025, nps) y = np.zeros(nps) @@ -134,11 +128,18 @@ def add_particles(): newPid = 5.0 elec_wrapper.add_particles( - x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, - w=w, newPid=newPid, - unique_particles=args.unique + x=x, + y=y, + z=z, + ux=ux, + uy=uy, + uz=uz, + w=w, + newPid=newPid, + unique_particles=args.unique, ) + callbacks.installbeforestep(add_particles) ########################## @@ -152,11 +153,11 @@ def add_particles(): # are properly set ########################## -assert (elec_wrapper.nps == 270 / (2 - args.unique)) -assert (elec_wrapper.particle_container.get_comp_index('w') == 2) -assert (elec_wrapper.particle_container.get_comp_index('newPid') == 6) +assert elec_wrapper.nps == 270 / (2 - args.unique) +assert elec_wrapper.particle_container.get_comp_index("w") == 2 +assert elec_wrapper.particle_container.get_comp_index("newPid") == 6 -new_pid_vals = elec_wrapper.get_particle_real_arrays('newPid', 0) +new_pid_vals = elec_wrapper.get_particle_real_arrays("newPid", 0) for vals in new_pid_vals: assert np.allclose(vals, 5) diff --git a/Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py b/Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py index 5de9879f0f8..97a4619e314 100755 --- a/Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py +++ b/Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py @@ -34,20 +34,22 @@ ########################## grid = picmi.Cartesian2DGrid( - number_of_cells = [nx, nz], - lower_bound = [xmin, zmin], - upper_bound = [xmax, zmax], - lower_boundary_conditions = ['dirichlet', 'periodic'], - upper_boundary_conditions = ['dirichlet', 'periodic'], - lower_boundary_conditions_particles = ['absorbing', 'periodic'], - upper_boundary_conditions_particles = ['absorbing', 'periodic'], - moving_window_velocity = None, - warpx_max_grid_size = 32 + number_of_cells=[nx, nz], + lower_bound=[xmin, zmin], + upper_bound=[xmax, zmax], + lower_boundary_conditions=["dirichlet", "periodic"], + upper_boundary_conditions=["dirichlet", "periodic"], + lower_boundary_conditions_particles=["absorbing", "periodic"], + upper_boundary_conditions_particles=["absorbing", "periodic"], + moving_window_velocity=None, + warpx_max_grid_size=32, ) solver = picmi.ElectrostaticSolver( - grid=grid, method='Multigrid', required_precision=1e-6, - warpx_self_fields_verbosity=0 + grid=grid, + method="Multigrid", + required_precision=1e-6, + warpx_self_fields_verbosity=0, ) ########################## @@ -55,16 +57,17 @@ ########################## uniform_plasma_elec = picmi.UniformDistribution( - density = 1e15, - upper_bound = [None] * 3, - rms_velocity = [np.sqrt(constants.kb * 1e3 / constants.m_e)] * 3, - directed_velocity = [0.] * 3 + density=1e15, + upper_bound=[None] * 3, + rms_velocity=[np.sqrt(constants.kb * 1e3 / constants.m_e)] * 3, + directed_velocity=[0.0] * 3, ) electrons = picmi.Species( - particle_type='electron', name='electrons', + particle_type="electron", + name="electrons", initial_distribution=uniform_plasma_elec, - warpx_save_previous_position=True + warpx_save_previous_position=True, ) ########################## @@ -72,36 +75,28 @@ ########################## part_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 10, + name="diag1", + period=10, species=[electrons], - write_dir = '.', - warpx_file_prefix = 'Python_prev_positions_plt' + write_dir=".", + warpx_file_prefix="Python_prev_positions_plt", ) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - data_list=['Bx', 'By', 'Bz', 'Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz'], - period = 10, + name="diag1", + data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], + period=10, grid=grid, - write_dir = '.', - warpx_file_prefix = 'Python_prev_positions_plt' + write_dir=".", + warpx_file_prefix="Python_prev_positions_plt", ) ########################## # simulation setup ########################## -sim = picmi.Simulation( - solver = solver, - time_step_size = dt, - max_steps = max_steps, - verbose = 1 -) +sim = picmi.Simulation(solver=solver, time_step_size=dt, max_steps=max_steps, verbose=1) sim.add_species( - electrons, - layout = picmi.GriddedLayout( - n_macroparticle_per_cell=[1, 1], grid=grid - ) + electrons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[1, 1], grid=grid) ) sim.add_diagnostic(part_diag) sim.add_diagnostic(field_diag) @@ -116,16 +111,16 @@ # exist ########################## -elec_wrapper = particle_containers.ParticleContainerWrapper('electrons') +elec_wrapper = particle_containers.ParticleContainerWrapper("electrons") elec_count = elec_wrapper.nps # check that the runtime attributes have the right indices -assert (elec_wrapper.particle_container.get_comp_index('prev_x') == 6) -assert (elec_wrapper.particle_container.get_comp_index('prev_z') == 7) +assert elec_wrapper.particle_container.get_comp_index("prev_x") == 6 +assert elec_wrapper.particle_container.get_comp_index("prev_z") == 7 # sanity check that the prev_z values are reasonable and # that the correct number of values are returned -prev_z_vals = elec_wrapper.get_particle_real_arrays('prev_z', 0) +prev_z_vals = elec_wrapper.get_particle_real_arrays("prev_z", 0) running_count = 0 for z_vals in prev_z_vals: diff --git a/Examples/Tests/particle_fields_diags/analysis_particle_diags.py b/Examples/Tests/particle_fields_diags/analysis_particle_diags.py index 3a77cbfc571..78ed9137e79 100755 --- a/Examples/Tests/particle_fields_diags/analysis_particle_diags.py +++ b/Examples/Tests/particle_fields_diags/analysis_particle_diags.py @@ -13,4 +13,4 @@ import analysis_particle_diags_impl as an -an.do_analysis(single_precision = False) +an.do_analysis(single_precision=False) diff --git a/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py b/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py index 5e54fc42d87..a7c84b05459 100755 --- a/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py +++ b/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py @@ -19,23 +19,25 @@ import yt from scipy.constants import c, e, m_e, m_p -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -def do_analysis(single_precision = False): +def do_analysis(single_precision=False): fn = sys.argv[1] ds = yt.load(fn) ad = ds.all_data() - ad0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) + ad0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) - opmd = io.Series('diags/openpmd/openpmd_%T.h5', io.Access.read_only) + opmd = io.Series("diags/openpmd/openpmd_%T.h5", io.Access.read_only) opmd_i = opmd.iterations[200] - #-------------------------------------------------------------------------------------------------- + # -------------------------------------------------------------------------------------------------- # Part 1: get results from plotfiles (label '_yt') - #-------------------------------------------------------------------------------------------------- + # -------------------------------------------------------------------------------------------------- # Quantities computed from plotfiles values_yt = dict() @@ -44,11 +46,11 @@ def do_analysis(single_precision = False): dx = domain_size / ds.domain_dimensions # Electrons - x = ad['electrons', 'particle_position_x'].to_ndarray() - y = ad['electrons', 'particle_position_y'].to_ndarray() - z = ad['electrons', 'particle_position_z'].to_ndarray() - uz = ad['electrons', 'particle_momentum_z'].to_ndarray() / m_e / c - w = ad['electrons', 'particle_weight'].to_ndarray() + x = ad["electrons", "particle_position_x"].to_ndarray() + y = ad["electrons", "particle_position_y"].to_ndarray() + z = ad["electrons", "particle_position_z"].to_ndarray() + uz = ad["electrons", "particle_momentum_z"].to_ndarray() / m_e / c + w = ad["electrons", "particle_weight"].to_ndarray() filt = uz < 0 x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int) @@ -63,27 +65,27 @@ def do_analysis(single_precision = False): wavg_filt = np.zeros(ds.domain_dimensions) for i_p in range(len(x)): - zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p] - uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] - zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p] - wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] - uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p] - wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p] + zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p] + uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p] + zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p] + wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] + uzavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p] + wavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] * filt[i_p] wavg_adj = np.where(wavg == 0, 1, wavg) wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt) - values_yt['electrons: zavg'] = zavg / wavg_adj - values_yt['electrons: uzavg'] = uzavg / wavg_adj - values_yt['electrons: zuzavg'] = zuzavg / wavg_adj - values_yt['electrons: uzavg_filt'] = uzavg_filt / wavg_filt_adj - values_yt['electrons: jz'] = e*uzavg + values_yt["electrons: zavg"] = zavg / wavg_adj + values_yt["electrons: uzavg"] = uzavg / wavg_adj + values_yt["electrons: zuzavg"] = zuzavg / wavg_adj + values_yt["electrons: uzavg_filt"] = uzavg_filt / wavg_filt_adj + values_yt["electrons: jz"] = e * uzavg # protons - x = ad['protons', 'particle_position_x'].to_ndarray() - y = ad['protons', 'particle_position_y'].to_ndarray() - z = ad['protons', 'particle_position_z'].to_ndarray() - uz = ad['protons', 'particle_momentum_z'].to_ndarray() / m_p / c - w = ad['protons', 'particle_weight'].to_ndarray() + x = ad["protons", "particle_position_x"].to_ndarray() + y = ad["protons", "particle_position_y"].to_ndarray() + z = ad["protons", "particle_position_z"].to_ndarray() + uz = ad["protons", "particle_momentum_z"].to_ndarray() / m_p / c + w = ad["protons", "particle_weight"].to_ndarray() filt = uz < 0 x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int) @@ -98,27 +100,27 @@ def do_analysis(single_precision = False): wavg_filt = np.zeros(ds.domain_dimensions) for i_p in range(len(x)): - zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p] - uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] - zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p] - wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] - uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p] - wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p] + zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p] + uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p] + zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p] + wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] + uzavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p] + wavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] * filt[i_p] wavg_adj = np.where(wavg == 0, 1, wavg) wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt) - values_yt['protons: zavg'] = zavg / wavg_adj - values_yt['protons: uzavg'] = uzavg / wavg_adj - values_yt['protons: zuzavg'] = zuzavg / wavg_adj - values_yt['protons: uzavg_filt'] = uzavg_filt / wavg_filt_adj - values_yt['protons: jz'] = e*uzavg + values_yt["protons: zavg"] = zavg / wavg_adj + values_yt["protons: uzavg"] = uzavg / wavg_adj + values_yt["protons: zuzavg"] = zuzavg / wavg_adj + values_yt["protons: uzavg_filt"] = uzavg_filt / wavg_filt_adj + values_yt["protons: jz"] = e * uzavg # Photons (momentum in units of m_e c) - x = ad['photons', 'particle_position_x'].to_ndarray() - y = ad['photons', 'particle_position_y'].to_ndarray() - z = ad['photons', 'particle_position_z'].to_ndarray() - uz = ad['photons', 'particle_momentum_z'].to_ndarray() / m_e / c - w = ad['photons', 'particle_weight'].to_ndarray() + x = ad["photons", "particle_position_x"].to_ndarray() + y = ad["photons", "particle_position_y"].to_ndarray() + z = ad["photons", "particle_position_z"].to_ndarray() + uz = ad["photons", "particle_momentum_z"].to_ndarray() / m_e / c + w = ad["photons", "particle_weight"].to_ndarray() filt = uz < 0 x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int) @@ -133,72 +135,95 @@ def do_analysis(single_precision = False): wavg_filt = np.zeros(ds.domain_dimensions) for i_p in range(len(x)): - zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p] - uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] - zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p] - wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] - uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p] - wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p] + zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p] + uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p] + zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p] + wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] + uzavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p] + wavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] * filt[i_p] wavg_adj = np.where(wavg == 0, 1, wavg) wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt) - values_yt['photons: zavg'] = zavg / wavg_adj - values_yt['photons: uzavg'] = uzavg / wavg_adj - values_yt['photons: zuzavg'] = zuzavg / wavg_adj - values_yt['photons: uzavg_filt'] = uzavg_filt / wavg_filt_adj - values_yt['photons: jz'] = e*uzavg - + values_yt["photons: zavg"] = zavg / wavg_adj + values_yt["photons: uzavg"] = uzavg / wavg_adj + values_yt["photons: zuzavg"] = zuzavg / wavg_adj + values_yt["photons: uzavg_filt"] = uzavg_filt / wavg_filt_adj + values_yt["photons: jz"] = e * uzavg values_rd = dict() # Load reduced particle diagnostic data from plotfiles - values_rd['electrons: zavg'] = ad0[('boxlib','z_electrons')] - values_rd['protons: zavg'] = ad0[('boxlib','z_protons')] - values_rd['photons: zavg'] = ad0[('boxlib','z_photons')] + values_rd["electrons: zavg"] = ad0[("boxlib", "z_electrons")] + values_rd["protons: zavg"] = ad0[("boxlib", "z_protons")] + values_rd["photons: zavg"] = ad0[("boxlib", "z_photons")] - values_rd['electrons: uzavg'] = ad0[('boxlib','uz_electrons')] - values_rd['protons: uzavg'] = ad0[('boxlib','uz_protons')] - values_rd['photons: uzavg'] = ad0[('boxlib','uz_photons')] + values_rd["electrons: uzavg"] = ad0[("boxlib", "uz_electrons")] + values_rd["protons: uzavg"] = ad0[("boxlib", "uz_protons")] + values_rd["photons: uzavg"] = ad0[("boxlib", "uz_photons")] - values_rd['electrons: zuzavg'] = ad0[('boxlib','zuz_electrons')] - values_rd['protons: zuzavg'] = ad0[('boxlib','zuz_protons')] - values_rd['photons: zuzavg'] = ad0[('boxlib','zuz_photons')] + values_rd["electrons: zuzavg"] = ad0[("boxlib", "zuz_electrons")] + values_rd["protons: zuzavg"] = ad0[("boxlib", "zuz_protons")] + values_rd["photons: zuzavg"] = ad0[("boxlib", "zuz_photons")] - values_rd['electrons: uzavg_filt'] = ad0[('boxlib','uz_filt_electrons')] - values_rd['protons: uzavg_filt'] = ad0[('boxlib','uz_filt_protons')] - values_rd['photons: uzavg_filt'] = ad0[('boxlib','uz_filt_photons')] + values_rd["electrons: uzavg_filt"] = ad0[("boxlib", "uz_filt_electrons")] + values_rd["protons: uzavg_filt"] = ad0[("boxlib", "uz_filt_protons")] + values_rd["photons: uzavg_filt"] = ad0[("boxlib", "uz_filt_photons")] - values_rd['electrons: jz'] = ad0[('boxlib','jz_electrons')] - values_rd['protons: jz'] = ad0[('boxlib','jz_protons')] - values_rd['photons: jz'] = ad0[('boxlib','jz_photons')] + values_rd["electrons: jz"] = ad0[("boxlib", "jz_electrons")] + values_rd["protons: jz"] = ad0[("boxlib", "jz_protons")] + values_rd["photons: jz"] = ad0[("boxlib", "jz_photons")] values_opmd = dict() # Load reduced particle diagnostic data from OPMD output - values_opmd['electrons: zavg'] = opmd_i.meshes['z_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk() - values_opmd['protons: zavg'] = opmd_i.meshes['z_protons'][io.Mesh_Record_Component.SCALAR].load_chunk() - values_opmd['photons: zavg'] = opmd_i.meshes['z_photons'][io.Mesh_Record_Component.SCALAR].load_chunk() - - values_opmd['electrons: uzavg'] = opmd_i.meshes['uz_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk() - values_opmd['protons: uzavg'] = opmd_i.meshes['uz_protons'][io.Mesh_Record_Component.SCALAR].load_chunk() - values_opmd['photons: uzavg'] = opmd_i.meshes['uz_photons'][io.Mesh_Record_Component.SCALAR].load_chunk() - - values_opmd['electrons: zuzavg'] = opmd_i.meshes['zuz_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk() - values_opmd['protons: zuzavg'] = opmd_i.meshes['zuz_protons'][io.Mesh_Record_Component.SCALAR].load_chunk() - values_opmd['photons: zuzavg'] = opmd_i.meshes['zuz_photons'][io.Mesh_Record_Component.SCALAR].load_chunk() - - values_opmd['electrons: uzavg_filt'] = opmd_i.meshes['uz_filt_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk() - values_opmd['protons: uzavg_filt'] = opmd_i.meshes['uz_filt_protons'][io.Mesh_Record_Component.SCALAR].load_chunk() - values_opmd['photons: uzavg_filt'] = opmd_i.meshes['uz_filt_photons'][io.Mesh_Record_Component.SCALAR].load_chunk() - - values_opmd['electrons: jz'] = opmd_i.meshes['j_electrons']['z'].load_chunk() - values_opmd['protons: jz'] = opmd_i.meshes['j_protons']['z'].load_chunk() - values_opmd['photons: jz'] = opmd_i.meshes['j_photons']['z'].load_chunk() + values_opmd["electrons: zavg"] = opmd_i.meshes["z_electrons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + values_opmd["protons: zavg"] = opmd_i.meshes["z_protons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + values_opmd["photons: zavg"] = opmd_i.meshes["z_photons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + + values_opmd["electrons: uzavg"] = opmd_i.meshes["uz_electrons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + values_opmd["protons: uzavg"] = opmd_i.meshes["uz_protons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + values_opmd["photons: uzavg"] = opmd_i.meshes["uz_photons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + + values_opmd["electrons: zuzavg"] = opmd_i.meshes["zuz_electrons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + values_opmd["protons: zuzavg"] = opmd_i.meshes["zuz_protons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + values_opmd["photons: zuzavg"] = opmd_i.meshes["zuz_photons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + + values_opmd["electrons: uzavg_filt"] = opmd_i.meshes["uz_filt_electrons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + values_opmd["protons: uzavg_filt"] = opmd_i.meshes["uz_filt_protons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + values_opmd["photons: uzavg_filt"] = opmd_i.meshes["uz_filt_photons"][ + io.Mesh_Record_Component.SCALAR + ].load_chunk() + + values_opmd["electrons: jz"] = opmd_i.meshes["j_electrons"]["z"].load_chunk() + values_opmd["protons: jz"] = opmd_i.meshes["j_protons"]["z"].load_chunk() + values_opmd["photons: jz"] = opmd_i.meshes["j_photons"]["z"].load_chunk() opmd.flush() del opmd - #-------------------------------------------------------------------------------------------------- + # -------------------------------------------------------------------------------------------------- # Part 3: compare values from plotfiles and diagnostics and print output - #-------------------------------------------------------------------------------------------------- + # -------------------------------------------------------------------------------------------------- error_plt = dict() error_opmd = dict() @@ -208,16 +233,20 @@ def do_analysis(single_precision = False): for k in values_yt.keys(): # check that the zeros line up, since we'll be ignoring them in the error calculation - assert(np.all((values_yt[k] == 0) == (values_rd[k] == 0))) - error_plt[k] = np.max(abs(values_yt[k] - values_rd[k])[values_yt[k] != 0] / abs(values_yt[k])[values_yt[k] != 0]) - print(k, 'relative error plotfile = ', error_plt[k]) - assert(error_plt[k] < tolerance) - assert(np.all((values_yt[k] == 0) == (values_opmd[k].T == 0))) - error_opmd[k] = np.max(abs(values_yt[k] - values_opmd[k].T)[values_yt[k] != 0] / abs(values_yt[k])[values_yt[k] != 0]) - assert(error_opmd[k] < tolerance) - print(k, 'relative error openPMD = ', error_opmd[k]) - - + assert np.all((values_yt[k] == 0) == (values_rd[k] == 0)) + error_plt[k] = np.max( + abs(values_yt[k] - values_rd[k])[values_yt[k] != 0] + / abs(values_yt[k])[values_yt[k] != 0] + ) + print(k, "relative error plotfile = ", error_plt[k]) + assert error_plt[k] < tolerance + assert np.all((values_yt[k] == 0) == (values_opmd[k].T == 0)) + error_opmd[k] = np.max( + abs(values_yt[k] - values_opmd[k].T)[values_yt[k] != 0] + / abs(values_yt[k])[values_yt[k] != 0] + ) + assert error_opmd[k] < tolerance + print(k, "relative error openPMD = ", error_opmd[k]) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn, rtol=check_tolerance) diff --git a/Examples/Tests/particle_fields_diags/analysis_particle_diags_single.py b/Examples/Tests/particle_fields_diags/analysis_particle_diags_single.py index 56d98831e66..7efbe4e39d4 100755 --- a/Examples/Tests/particle_fields_diags/analysis_particle_diags_single.py +++ b/Examples/Tests/particle_fields_diags/analysis_particle_diags_single.py @@ -13,4 +13,4 @@ import analysis_particle_diags_impl as an -an.do_analysis(single_precision = True) +an.do_analysis(single_precision=True) diff --git a/Examples/Tests/particle_pusher/analysis_pusher.py b/Examples/Tests/particle_pusher/analysis_pusher.py index 0d9e3a743f0..acef0e819d3 100755 --- a/Examples/Tests/particle_pusher/analysis_pusher.py +++ b/Examples/Tests/particle_pusher/analysis_pusher.py @@ -27,19 +27,19 @@ import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI tolerance = 0.001 filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) ad = ds.all_data() -x = ad['particle_position_x'].to_ndarray() +x = ad["particle_position_x"].to_ndarray() -print('error = ', abs(x)) -print('tolerance = ', tolerance) -assert(abs(x) < tolerance) +print("error = ", abs(x)) +print("tolerance = ", tolerance) +assert abs(x) < tolerance test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/particle_thermal_boundary/analysis_2d.py b/Examples/Tests/particle_thermal_boundary/analysis_2d.py index db14479af2c..49f33b5b805 100755 --- a/Examples/Tests/particle_thermal_boundary/analysis_2d.py +++ b/Examples/Tests/particle_thermal_boundary/analysis_2d.py @@ -19,19 +19,19 @@ import numpy as np -sys.path.insert(1,'../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -FE_rdiag = './diags/reducedfiles/EF.txt' -init_Fenergy = np.loadtxt(FE_rdiag)[1,2] -final_Fenergy = np.loadtxt(FE_rdiag)[-1,2] -assert(final_Fenergy/init_Fenergy < 40) -assert(final_Fenergy < 5.e-5) +FE_rdiag = "./diags/reducedfiles/EF.txt" +init_Fenergy = np.loadtxt(FE_rdiag)[1, 2] +final_Fenergy = np.loadtxt(FE_rdiag)[-1, 2] +assert final_Fenergy / init_Fenergy < 40 +assert final_Fenergy < 5.0e-5 -PE_rdiag = './diags/reducedfiles/EN.txt' -init_Penergy = np.loadtxt(PE_rdiag)[0,2] -final_Penergy = np.loadtxt(PE_rdiag)[-1,2] -assert( abs(final_Penergy - init_Penergy)/init_Penergy < 0.02) +PE_rdiag = "./diags/reducedfiles/EN.txt" +init_Penergy = np.loadtxt(PE_rdiag)[0, 2] +final_Penergy = np.loadtxt(PE_rdiag)[-1, 2] +assert abs(final_Penergy - init_Penergy) / init_Penergy < 0.02 filename = sys.argv[1] test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py b/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py index 1d1a8959edd..df106976e78 100755 --- a/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py +++ b/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py @@ -17,21 +17,22 @@ PML, this test fails, since the particles leave a spurious charge, with associated fields, behind them. """ + import os import sys import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Open plotfile specified in command line filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) # When extracting the fields, choose the right dimensions -dimensions = [ n_pts for n_pts in ds.domain_dimensions ] +dimensions = [n_pts for n_pts in ds.domain_dimensions] if ds.max_level == 1: dimensions[0] *= 2 dimensions[1] *= 2 @@ -39,12 +40,14 @@ dimensions[2] *= 2 # Check that the field is low enough -ad0 = ds.covering_grid(level=ds.max_level, left_edge=ds.domain_left_edge, dims=dimensions) -Ex_array = ad0[('mesh','Ex')].to_ndarray() -Ey_array = ad0[('mesh','Ey')].to_ndarray() -Ez_array = ad0[('mesh','Ez')].to_ndarray() +ad0 = ds.covering_grid( + level=ds.max_level, left_edge=ds.domain_left_edge, dims=dimensions +) +Ex_array = ad0[("mesh", "Ex")].to_ndarray() +Ey_array = ad0[("mesh", "Ey")].to_ndarray() +Ez_array = ad0[("mesh", "Ez")].to_ndarray() max_Efield = max(Ex_array.max(), Ey_array.max(), Ez_array.max()) -print( "max_Efield = %s" %max_Efield ) +print("max_Efield = %s" % max_Efield) # The field associated with the particle does not have # the same amplitude in 2d and 3d diff --git a/Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py b/Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py index 66f259da2ef..55ebf64d8e6 100755 --- a/Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py +++ b/Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py @@ -29,9 +29,9 @@ ########################## # different communicators will be passed different plasma density -plasma_density = [1.e18, 1.e19] -plasma_xmin = 0. -plasma_x_velocity = 0.1*constants.c +plasma_density = [1.0e18, 1.0e19] +plasma_xmin = 0.0 +plasma_x_velocity = 0.1 * constants.c ########################## # numerics parameters @@ -45,64 +45,80 @@ nx = 64 ny = 64 -xmin = -20.e-6 -ymin = -20.e-6 -xmax = +20.e-6 -ymax = +20.e-6 +xmin = -20.0e-6 +ymin = -20.0e-6 +xmax = +20.0e-6 +ymax = +20.0e-6 -number_per_cell_each_dim = [2,2] +number_per_cell_each_dim = [2, 2] ########################## # physics components ########################## -uniform_plasma = picmi.UniformDistribution(density = plasma_density[color], - upper_bound = [0., None, None], - directed_velocity = [0.1*constants.c, 0., 0.]) +uniform_plasma = picmi.UniformDistribution( + density=plasma_density[color], + upper_bound=[0.0, None, None], + directed_velocity=[0.1 * constants.c, 0.0, 0.0], +) -electrons = picmi.Species(particle_type='electron', name='electrons', initial_distribution=uniform_plasma) +electrons = picmi.Species( + particle_type="electron", name="electrons", initial_distribution=uniform_plasma +) ########################## # numerics components ########################## -grid = picmi.Cartesian2DGrid(number_of_cells = [nx, ny], - lower_bound = [xmin, ymin], - upper_bound = [xmax, ymax], - lower_boundary_conditions = ['periodic', 'periodic'], - upper_boundary_conditions = ['periodic', 'periodic'], - moving_window_velocity = [0., 0., 0.], - warpx_max_grid_size = 32) +grid = picmi.Cartesian2DGrid( + number_of_cells=[nx, ny], + lower_bound=[xmin, ymin], + upper_bound=[xmax, ymax], + lower_boundary_conditions=["periodic", "periodic"], + upper_boundary_conditions=["periodic", "periodic"], + moving_window_velocity=[0.0, 0.0, 0.0], + warpx_max_grid_size=32, +) -solver = picmi.ElectromagneticSolver(grid=grid, cfl=1.) +solver = picmi.ElectromagneticSolver(grid=grid, cfl=1.0) ########################## # diagnostics ########################## -field_diag = picmi.FieldDiagnostic(name = f'diag{color + 1}', - grid = grid, - period = diagnostic_intervals, - data_list = ['Ex', 'Jx'], - write_dir = '.', - warpx_file_prefix = f'Python_pass_mpi_comm_plt{color + 1}_') +field_diag = picmi.FieldDiagnostic( + name=f"diag{color + 1}", + grid=grid, + period=diagnostic_intervals, + data_list=["Ex", "Jx"], + write_dir=".", + warpx_file_prefix=f"Python_pass_mpi_comm_plt{color + 1}_", +) -part_diag = picmi.ParticleDiagnostic(name = f'diag{color + 1}', - period = diagnostic_intervals, - species = [electrons], - data_list = ['weighting', 'ux']) +part_diag = picmi.ParticleDiagnostic( + name=f"diag{color + 1}", + period=diagnostic_intervals, + species=[electrons], + data_list=["weighting", "ux"], +) ########################## # simulation setup ########################## -sim = picmi.Simulation(solver = solver, - max_steps = max_steps, - verbose = 1, - warpx_current_deposition_algo = 'direct') +sim = picmi.Simulation( + solver=solver, + max_steps=max_steps, + verbose=1, + warpx_current_deposition_algo="direct", +) -sim.add_species(electrons, - layout = picmi.GriddedLayout(n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid)) +sim.add_species( + electrons, + layout=picmi.GriddedLayout( + n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid + ), +) sim.add_diagnostic(field_diag) sim.add_diagnostic(part_diag) @@ -114,7 +130,7 @@ # TODO: Enable in pyAMReX, then enable lines in PICMI_inputs_2d.py again # https://github.com/AMReX-Codes/pyamrex/issues/163 -#sim.step(max_steps, mpi_comm=new_comm) +# sim.step(max_steps, mpi_comm=new_comm) ########################## # test @@ -128,19 +144,19 @@ # TODO: Enable in pyAMReX, then enable lines in PICMI_inputs_2d.py again # https://github.com/AMReX-Codes/pyamrex/issues/163 -#comm_world_size = comm_world.size -#new_comm_size = new_comm.size +# comm_world_size = comm_world.size +# new_comm_size = new_comm.size -#if color == 0: +# if color == 0: # # verify that communicator contains correct number of procs (1) # assert sim.extension.getNProcs() == comm_world_size - 1 # assert sim.extension.getNProcs() == new_comm_size -#else: +# else: # # verify that amrex initialized with 1 fewer proc than comm world # assert sim.extension.getNProcs() == comm_world_size - 1 # assert sim.extension.getNProcs() == new_comm_size - # verify that amrex proc ranks are offset by -1 from - # world comm proc ranks +# verify that amrex proc ranks are offset by -1 from +# world comm proc ranks # assert libwarpx.amr.ParallelDescriptor.MyProc() == rank - 1 diff --git a/Examples/Tests/pass_mpi_communicator/analysis.py b/Examples/Tests/pass_mpi_communicator/analysis.py index 9a622943127..af55346e1f5 100755 --- a/Examples/Tests/pass_mpi_communicator/analysis.py +++ b/Examples/Tests/pass_mpi_communicator/analysis.py @@ -7,13 +7,13 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import yt yt.funcs.mylog.setLevel(50) import numpy as np -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksum # this will be the name of the first plot file @@ -25,56 +25,56 @@ test_name2 = fn2[:-10] -checksum1 = checksum.Checksum(test_name1, fn1, do_fields=True, - do_particles=True) +checksum1 = checksum.Checksum(test_name1, fn1, do_fields=True, do_particles=True) -checksum2 = checksum.Checksum(test_name2, fn2, do_fields=True, - do_particles=True) +checksum2 = checksum.Checksum(test_name2, fn2, do_fields=True, do_particles=True) -rtol=1.e-9 -atol=1.e-40 +rtol = 1.0e-9 +atol = 1.0e-40 # Evaluate checksums against each other, adapted from # Checksum.evaluate() method # Dictionaries have same outer keys (levels, species)? -if (checksum1.data.keys() != checksum2.data.keys()): - print("ERROR: plotfile 1 and plotfile 2 checksums " - "have different outer keys:") +if checksum1.data.keys() != checksum2.data.keys(): + print("ERROR: plotfile 1 and plotfile 2 checksums " "have different outer keys:") print("Plot1: %s" % checksum1.data.keys()) print("Plot2: %s" % checksum2.data.keys()) sys.exit(1) # Dictionaries have same inner keys (field and particle quantities)? for key1 in checksum1.data.keys(): - if (checksum1.data[key1].keys() != checksum2.data[key1].keys()): - print("ERROR: plotfile 1 and plotfile 2 checksums have " - "different inner keys:") + if checksum1.data[key1].keys() != checksum2.data[key1].keys(): + print( + "ERROR: plotfile 1 and plotfile 2 checksums have " "different inner keys:" + ) print("Common outer keys: %s" % checksum2.data.keys()) - print("Plotfile 1 inner keys in %s: %s" - % (key1, checksum1.data[key1].keys())) - print("Plotfile 2 inner keys in %s: %s" - % (key1, checksum2.data[key1].keys())) + print("Plotfile 1 inner keys in %s: %s" % (key1, checksum1.data[key1].keys())) + print("Plotfile 2 inner keys in %s: %s" % (key1, checksum2.data[key1].keys())) sys.exit(1) # Dictionaries have same values? checksums_same = False for key1 in checksum1.data.keys(): for key2 in checksum1.data[key1].keys(): - passed = np.isclose(checksum2.data[key1][key2], - checksum1.data[key1][key2], - rtol=rtol, atol=atol) + passed = np.isclose( + checksum2.data[key1][key2], checksum1.data[key1][key2], rtol=rtol, atol=atol + ) # skip over these, since they will be the same if communicators # have same number of procs if key2 in ["particle_cpu", "particle_id", "particle_position_y"]: continue if passed: - print("ERROR: plotfile 1 and plotfile 2 checksums have " - "same values for key [%s,%s]" % (key1, key2)) - print("Plotfile 1: [%s,%s] %.15e" - % (key1, key2, checksum1.data[key1][key2])) - print("Plotfile 2: [%s,%s] %.15e" - % (key1, key2, checksum2.data[key1][key2])) + print( + "ERROR: plotfile 1 and plotfile 2 checksums have " + "same values for key [%s,%s]" % (key1, key2) + ) + print( + "Plotfile 1: [%s,%s] %.15e" % (key1, key2, checksum1.data[key1][key2]) + ) + print( + "Plotfile 2: [%s,%s] %.15e" % (key1, key2, checksum2.data[key1][key2]) + ) checksums_same = True if checksums_same: sys.exit(1) diff --git a/Examples/Tests/pec/analysis_pec.py b/Examples/Tests/pec/analysis_pec.py index 841cc06ae22..12907bb7846 100755 --- a/Examples/Tests/pec/analysis_pec.py +++ b/Examples/Tests/pec/analysis_pec.py @@ -17,7 +17,7 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import yt @@ -25,50 +25,61 @@ import numpy as np -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file fn = sys.argv[1] # Parameters (these parameters must match the parameters in `inputs.multi.rt`) -Ey_in = 1.e5 -E_th = 2.0*Ey_in +Ey_in = 1.0e5 +E_th = 2.0 * Ey_in # Read the file ds = yt.load(fn) t0 = ds.current_time.to_value() -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, - dims=ds.domain_dimensions) -Ey_array = data[('mesh','Ey')].to_ndarray() +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +Ey_array = data[("mesh", "Ey")].to_ndarray() max_Ey_sim = Ey_array.max() min_Ey_sim = Ey_array.min() -max_Ey_error = abs(max_Ey_sim-E_th)/abs(E_th) -min_Ey_error = abs(min_Ey_sim - (-E_th))/abs(E_th) -print('Ey_max is %s: Max Eth is %s : Max error for Ey_max: %.2e' %(max_Ey_sim,E_th,max_Ey_error)) -print('Ey_min is %s: Min Eth is %s : Max error for Ey_min: %.2e' %(min_Ey_sim,(-E_th), min_Ey_error)) -error_rel = 0. -max_Ey_error_rel = max( error_rel, max_Ey_error ) -min_Ey_error_rel = max( error_rel, min_Ey_error ) +max_Ey_error = abs(max_Ey_sim - E_th) / abs(E_th) +min_Ey_error = abs(min_Ey_sim - (-E_th)) / abs(E_th) +print( + "Ey_max is %s: Max Eth is %s : Max error for Ey_max: %.2e" + % (max_Ey_sim, E_th, max_Ey_error) +) +print( + "Ey_min is %s: Min Eth is %s : Max error for Ey_min: %.2e" + % (min_Ey_sim, (-E_th), min_Ey_error) +) +error_rel = 0.0 +max_Ey_error_rel = max(error_rel, max_Ey_error) +min_Ey_error_rel = max(error_rel, min_Ey_error) # Plot the last field from the loop (Ez at iteration 40) -field = 'Ey' +field = "Ey" xCell = ds.domain_dimensions[0] yCell = ds.domain_dimensions[1] zCell = ds.domain_dimensions[2] -z_array = data['z'].to_ndarray() - -plt.figure(figsize=(8,4)) -plt.plot(z_array[int(xCell/2),int(yCell/2),:]-z_array[int(xCell/2),int(yCell/2),int(zCell/2)],Ey_array[int(xCell/2),int(yCell/2),:]) +z_array = data["z"].to_ndarray() + +plt.figure(figsize=(8, 4)) +plt.plot( + z_array[int(xCell / 2), int(yCell / 2), :] + - z_array[int(xCell / 2), int(yCell / 2), int(zCell / 2)], + Ey_array[int(xCell / 2), int(yCell / 2), :], +) plt.ylim(-1.2e-3, 1.2e-3) plt.ylim(-2.2e5, 2.2e5) plt.xlim(-4.0e-6, 4.000001e-6) -plt.xticks(np.arange(-4.e-6,4.000001e-6, step=1e-6)) -plt.xlabel('z (m)') -plt.ylabel(field +' (V/m)') +plt.xticks(np.arange(-4.0e-6, 4.000001e-6, step=1e-6)) +plt.xlabel("z (m)") +plt.ylabel(field + " (V/m)") plt.tight_layout() -plt.savefig('Ey_pec_analysis.png') +plt.savefig("Ey_pec_analysis.png") tolerance_rel = 0.01 @@ -77,12 +88,12 @@ print("max_Ey_error_rel : " + str(max_Ey_error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( max_Ey_error_rel < tolerance_rel ) -assert( min_Ey_error_rel < tolerance_rel ) +assert max_Ey_error_rel < tolerance_rel +assert min_Ey_error_rel < tolerance_rel test_name = os.path.split(os.getcwd())[1] -if re.search( 'single_precision', fn ): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3) +if re.search("single_precision", fn): + checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) else: checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/pec/analysis_pec_mr.py b/Examples/Tests/pec/analysis_pec_mr.py index e8aab4dcd6f..8361246b8dd 100755 --- a/Examples/Tests/pec/analysis_pec_mr.py +++ b/Examples/Tests/pec/analysis_pec_mr.py @@ -17,7 +17,7 @@ import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import yt @@ -25,51 +25,62 @@ import numpy as np -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file fn = sys.argv[1] # Parameters (these parameters must match the parameters in `inputs.multi.rt`) -Ey_in = 1.e5 -E_th = 2.0*Ey_in +Ey_in = 1.0e5 +E_th = 2.0 * Ey_in # Read the file ds = yt.load(fn) t0 = ds.current_time.to_value() -data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, - dims=ds.domain_dimensions) -Ey_array = data[('mesh','Ey')].to_ndarray() +data = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +Ey_array = data[("mesh", "Ey")].to_ndarray() max_Ey_sim = Ey_array.max() min_Ey_sim = Ey_array.min() -max_Ey_error = abs(max_Ey_sim-E_th)/abs(E_th) -min_Ey_error = abs(min_Ey_sim - (-E_th))/abs(E_th) -print('Ey_max is %s: Max Eth is %s : Max error for Ey_max: %.2e' %(max_Ey_sim,E_th,max_Ey_error)) -print('Ey_min is %s: Min Eth is %s : Max error for Ey_min: %.2e' %(min_Ey_sim,(-E_th), min_Ey_error)) -error_rel = 0. -max_Ey_error_rel = max( error_rel, max_Ey_error ) -min_Ey_error_rel = max( error_rel, min_Ey_error ) +max_Ey_error = abs(max_Ey_sim - E_th) / abs(E_th) +min_Ey_error = abs(min_Ey_sim - (-E_th)) / abs(E_th) +print( + "Ey_max is %s: Max Eth is %s : Max error for Ey_max: %.2e" + % (max_Ey_sim, E_th, max_Ey_error) +) +print( + "Ey_min is %s: Min Eth is %s : Max error for Ey_min: %.2e" + % (min_Ey_sim, (-E_th), min_Ey_error) +) +error_rel = 0.0 +max_Ey_error_rel = max(error_rel, max_Ey_error) +min_Ey_error_rel = max(error_rel, min_Ey_error) # Plot the last field from the loop (Ez at iteration 40) # Plot the last field from the loop (Ez at iteration 40) -field = 'Ey' +field = "Ey" xCell = ds.domain_dimensions[0] yCell = ds.domain_dimensions[1] zCell = ds.domain_dimensions[2] -z_array = data['z'].to_ndarray() - -plt.figure(figsize=(8,4)) -plt.plot(z_array[int(xCell/2),int(yCell/2),:]-z_array[int(xCell/2),int(yCell/2),int(zCell/2)],Ey_array[int(xCell/2),int(yCell/2),:]) +z_array = data["z"].to_ndarray() + +plt.figure(figsize=(8, 4)) +plt.plot( + z_array[int(xCell / 2), int(yCell / 2), :] + - z_array[int(xCell / 2), int(yCell / 2), int(zCell / 2)], + Ey_array[int(xCell / 2), int(yCell / 2), :], +) plt.ylim(-1.2e-3, 1.2e-3) plt.ylim(-2.2e5, 2.2e5) plt.xlim(-4.0e-6, 4.000001e-6) -plt.xticks(np.arange(-4.e-6,4.000001e-6, step=1e-6)) -plt.xlabel('z (m)') -plt.ylabel(field +' (V/m)') +plt.xticks(np.arange(-4.0e-6, 4.000001e-6, step=1e-6)) +plt.xlabel("z (m)") +plt.ylabel(field + " (V/m)") plt.tight_layout() -plt.savefig('Ey_pec_analysis_mr.png') +plt.savefig("Ey_pec_analysis_mr.png") tolerance_rel = 0.05 @@ -77,12 +88,12 @@ print("max_Ey_error_rel : " + str(max_Ey_error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( max_Ey_error_rel < tolerance_rel ) -assert( min_Ey_error_rel < tolerance_rel ) +assert max_Ey_error_rel < tolerance_rel +assert min_Ey_error_rel < tolerance_rel test_name = os.path.split(os.getcwd())[1] -if re.search( 'single_precision', fn ): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3) +if re.search("single_precision", fn): + checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) else: checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/photon_pusher/analysis_photon_pusher.py b/Examples/Tests/photon_pusher/analysis_photon_pusher.py index 72074d75ccb..9135ad981ba 100755 --- a/Examples/Tests/photon_pusher/analysis_photon_pusher.py +++ b/Examples/Tests/photon_pusher/analysis_photon_pusher.py @@ -13,28 +13,42 @@ import numpy as np import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -#This script checks if photons initialized with different momenta and -#different initial directions propagate along straight lines at the speed of -#light. The plotfile to be analyzed is passed as a command line argument. +# This script checks if photons initialized with different momenta and +# different initial directions propagate along straight lines at the speed of +# light. The plotfile to be analyzed is passed as a command line argument. -#If the script is run without a command line argument, it regenerates a new -#inputfile according to the initial conditions listed below. +# If the script is run without a command line argument, it regenerates a new +# inputfile according to the initial conditions listed below. -#Physical constants -c = 299792458. +# Physical constants +c = 299792458.0 m_e = 9.1093837015e-31 -#________________________________________ - -#Test cases -spec_names = ["p_xp_1", "p_xn_1", "p_yp_1", "p_yn_1", - "p_zp_1", "p_zn_1","p_dp_1", "p_dn_1", - "p_xp_10", "p_xn_10", "p_yp_10", "p_yn_10", - "p_zp_10", "p_zn_10", "p_dp_10", "p_dn_10"] -#photon momenta are in units of m_e c +# ________________________________________ + +# Test cases +spec_names = [ + "p_xp_1", + "p_xn_1", + "p_yp_1", + "p_yn_1", + "p_zp_1", + "p_zn_1", + "p_dp_1", + "p_dn_1", + "p_xp_10", + "p_xn_10", + "p_yp_10", + "p_yn_10", + "p_zp_10", + "p_zn_10", + "p_dp_10", + "p_dn_10", +] +# photon momenta are in units of m_e c mxp1 = np.array([1, 0.0, 0.0]) mxn1 = np.array([-1, 0.0, 0.0]) myp1 = np.array([0.0, 1, 0.0]) @@ -50,20 +64,39 @@ mzp10 = np.array([0.0, 0.0, 10]) mzn10 = np.array([0.0, 0.0, -10]) mdp10 = np.array([10, 10, 10]) -mdn10 = np.array([-10,-10, -10]) -gamma_beta_list = np.array([mxp1, mxn1, myp1, myn1, mzp1, mzn1, mdp1, mdn1, - mxp10, mxn10, myp10, myn10, mzp10, mzn10, mdp10, mdn10]) -init_pos = np.array([0.0, 0.0, 0.0]) -#________________________________________ - -#Tolerance +mdn10 = np.array([-10, -10, -10]) +gamma_beta_list = np.array( + [ + mxp1, + mxn1, + myp1, + myn1, + mzp1, + mzn1, + mdp1, + mdn1, + mxp10, + mxn10, + myp10, + myn10, + mzp10, + mzn10, + mdp10, + mdn10, + ] +) +init_pos = np.array([0.0, 0.0, 0.0]) +# ________________________________________ + +# Tolerance tol_pos = 1.0e-14 -tol_mom = 0.0 #momentum should be conserved exactly -#________________________________________ +tol_mom = 0.0 # momentum should be conserved exactly +# ________________________________________ -#Input filename +# Input filename inputname = "inputs" -#________________________________________ +# ________________________________________ + # This function reads the WarpX plotfile given as the first command-line # argument, and check if the position of each photon agrees with theory. @@ -73,46 +106,60 @@ def check(): sim_time = data_set_end.current_time.to_value() - #expected positions list - ll = sim_time*c - answ_pos = init_pos + \ - ll*gamma_beta_list/np.linalg.norm(gamma_beta_list,axis=1, keepdims=True) - - #expected momenta list - answ_mom = m_e * c *gamma_beta_list #momenta don't change - - #simulation results - all_data = data_set_end.all_data() - res_pos = [np.array([ - all_data[sp, 'particle_position_x'].v[0], - all_data[sp, 'particle_position_y'].v[0], - all_data[sp, 'particle_position_z'].v[0]]) - for sp in spec_names] - res_mom = [np.array([ - all_data[sp, 'particle_momentum_x'].v[0], - all_data[sp, 'particle_momentum_y'].v[0], - all_data[sp, 'particle_momentum_z'].v[0]]) - for sp in spec_names] - - #check discrepancies - disc_pos = [np.linalg.norm(a-b)/np.linalg.norm(b) - for a,b in zip(res_pos, answ_pos)] - disc_mom = [np.linalg.norm(a-b)/np.linalg.norm(b) - for a,b in zip(res_mom, answ_mom)] - - print("max(disc_pos) = %s" %max(disc_pos)) - print("tol_pos = %s" %tol_pos) - print("max(disc_mom) = %s" %max(disc_mom)) - print("tol_mom = %s" %tol_mom) - - assert ((max(disc_pos) <= tol_pos) and (max(disc_mom) <= tol_mom)) + # expected positions list + ll = sim_time * c + answ_pos = init_pos + ll * gamma_beta_list / np.linalg.norm( + gamma_beta_list, axis=1, keepdims=True + ) + + # expected momenta list + answ_mom = m_e * c * gamma_beta_list # momenta don't change + + # simulation results + all_data = data_set_end.all_data() + res_pos = [ + np.array( + [ + all_data[sp, "particle_position_x"].v[0], + all_data[sp, "particle_position_y"].v[0], + all_data[sp, "particle_position_z"].v[0], + ] + ) + for sp in spec_names + ] + res_mom = [ + np.array( + [ + all_data[sp, "particle_momentum_x"].v[0], + all_data[sp, "particle_momentum_y"].v[0], + all_data[sp, "particle_momentum_z"].v[0], + ] + ) + for sp in spec_names + ] + + # check discrepancies + disc_pos = [ + np.linalg.norm(a - b) / np.linalg.norm(b) for a, b in zip(res_pos, answ_pos) + ] + disc_mom = [ + np.linalg.norm(a - b) / np.linalg.norm(b) for a, b in zip(res_mom, answ_mom) + ] + + print("max(disc_pos) = %s" % max(disc_pos)) + print("tol_pos = %s" % tol_pos) + print("max(disc_mom) = %s" % max(disc_mom)) + print("tol_mom = %s" % tol_mom) + + assert (max(disc_pos) <= tol_pos) and (max(disc_mom) <= tol_mom) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) + # This function generates the input file to test the photon pusher. def generate(): - with open(inputname,'w') as f: + with open(inputname, "w") as f: f.write("#Automatically generated inputfile\n") f.write("#Run check.py without arguments to regenerate\n") f.write("#\n\n") @@ -131,8 +178,8 @@ def generate(): f.write("algo.field_gathering = energy-conserving\n") f.write("warpx.cfl = 1.0\n") - f.write("particles.species_names = {}\n".format(' '.join(spec_names))) - f.write("particles.photon_species = {}\n".format(' '.join(spec_names))) + f.write("particles.species_names = {}\n".format(" ".join(spec_names))) + f.write("particles.photon_species = {}\n".format(" ".join(spec_names))) f.write("\namr.plot_int = 50\n\n") @@ -144,21 +191,25 @@ def generate(): data = zip(spec_names, gamma_beta_list) for case in data: name = case[0] - velx, vely ,velz = case[1] + velx, vely, velz = case[1] f.write("{}.species_type = photon\n".format(name)) f.write('{}.injection_style = "SingleParticle"\n'.format(name)) - f.write("{}.single_particle_pos = {} {} {}\n". - format(name, init_pos[0], init_pos[1], init_pos[2])) - f.write("{}.single_particle_u = {} {} {}\n". - format(name, velx, vely, velz)) + f.write( + "{}.single_particle_pos = {} {} {}\n".format( + name, init_pos[0], init_pos[1], init_pos[2] + ) + ) + f.write("{}.single_particle_u = {} {} {}\n".format(name, velx, vely, velz)) f.write("{}.single_particle_weight = 1.0\n".format(name)) f.write("\n") + def main(): - if (len(sys.argv) < 2): + if len(sys.argv) < 2: generate() else: check() + if __name__ == "__main__": main() diff --git a/Examples/Tests/plasma_lens/PICMI_inputs_3d.py b/Examples/Tests/plasma_lens/PICMI_inputs_3d.py index 50d222bbf36..32b2ab3abf7 100644 --- a/Examples/Tests/plasma_lens/PICMI_inputs_3d.py +++ b/Examples/Tests/plasma_lens/PICMI_inputs_3d.py @@ -15,80 +15,82 @@ nz = 16 # Physical domain -xmin = -1. -xmax = 1. -ymin = -1. -ymax = 1. -zmin = 0. -zmax = 2. +xmin = -1.0 +xmax = 1.0 +ymin = -1.0 +ymax = 1.0 +zmin = 0.0 +zmax = 2.0 # Create grid -grid = picmi.Cartesian3DGrid(number_of_cells = [nx, ny, nz], - lower_bound = [xmin, ymin, zmin], - upper_bound = [xmax, ymax, zmax], - lower_boundary_conditions = ['dirichlet', 'dirichlet', 'dirichlet'], - upper_boundary_conditions = ['dirichlet', 'dirichlet', 'dirichlet'], - lower_boundary_conditions_particles = ['absorbing', 'absorbing', 'absorbing'], - upper_boundary_conditions_particles = ['absorbing', 'absorbing', 'absorbing']) +grid = picmi.Cartesian3DGrid( + number_of_cells=[nx, ny, nz], + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["dirichlet", "dirichlet", "dirichlet"], + upper_boundary_conditions=["dirichlet", "dirichlet", "dirichlet"], + lower_boundary_conditions_particles=["absorbing", "absorbing", "absorbing"], + upper_boundary_conditions_particles=["absorbing", "absorbing", "absorbing"], +) # Particles -vel_z = 0.5*c -offset_x_particle = picmi.ParticleListDistribution(x = [0.05], - y = [0.], - z = [0.05], - ux = [0.], - uy = [0.], - uz = [vel_z], - weight = [1.]) - -offset_y_particle = picmi.ParticleListDistribution(x = [0.], - y = [0.04], - z = [0.05], - ux = [0.], - uy = [0.], - uz = [vel_z], - weight = [1.]) - -electrons = picmi.Species(particle_type = 'electron', - name = 'electrons', - initial_distribution = [offset_x_particle, offset_y_particle]) +vel_z = 0.5 * c +offset_x_particle = picmi.ParticleListDistribution( + x=[0.05], y=[0.0], z=[0.05], ux=[0.0], uy=[0.0], uz=[vel_z], weight=[1.0] +) + +offset_y_particle = picmi.ParticleListDistribution( + x=[0.0], y=[0.04], z=[0.05], ux=[0.0], uy=[0.0], uz=[vel_z], weight=[1.0] +) + +electrons = picmi.Species( + particle_type="electron", + name="electrons", + initial_distribution=[offset_x_particle, offset_y_particle], +) # Plasma lenses -plasma_lenses = picmi.PlasmaLens(period = 0.5, - starts = [0.1, 0.11, 0.12, 0.13], - lengths = [0.1, 0.11, 0.12, 0.13], - strengths_E = [600000., 800000., 600000., 200000.], - strengths_B = [0.0, 0.0, 0.0, 0.0]) +plasma_lenses = picmi.PlasmaLens( + period=0.5, + starts=[0.1, 0.11, 0.12, 0.13], + lengths=[0.1, 0.11, 0.12, 0.13], + strengths_E=[600000.0, 800000.0, 600000.0, 200000.0], + strengths_B=[0.0, 0.0, 0.0, 0.0], +) # Electromagnetic solver -solver = picmi.ElectromagneticSolver(grid = grid, - method = 'Yee', - cfl = 0.7) +solver = picmi.ElectromagneticSolver(grid=grid, method="Yee", cfl=0.7) # Diagnostics -part_diag1 = picmi.ParticleDiagnostic(name = 'diag1', - period = max_steps, - species = [electrons], - data_list = ['ux', 'uy', 'uz', 'x', 'y', 'z'], - write_dir = '.', - warpx_file_prefix = 'Python_plasma_lens_plt') - -field_diag1 = picmi.FieldDiagnostic(name = 'diag1', - grid = grid, - period = max_steps, - data_list = ['Bx', 'By', 'Bz', 'Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz'], - write_dir = '.', - warpx_file_prefix = 'Python_plasma_lens_plt') +part_diag1 = picmi.ParticleDiagnostic( + name="diag1", + period=max_steps, + species=[electrons], + data_list=["ux", "uy", "uz", "x", "y", "z"], + write_dir=".", + warpx_file_prefix="Python_plasma_lens_plt", +) + +field_diag1 = picmi.FieldDiagnostic( + name="diag1", + grid=grid, + period=max_steps, + data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], + write_dir=".", + warpx_file_prefix="Python_plasma_lens_plt", +) # Set up simulation -sim = picmi.Simulation(solver = solver, - max_steps = max_steps, - verbose = 1, - particle_shape = 'linear', - warpx_serialize_initial_conditions = 1, - warpx_do_dynamic_scheduling = 0) +sim = picmi.Simulation( + solver=solver, + max_steps=max_steps, + verbose=1, + particle_shape="linear", + warpx_serialize_initial_conditions=1, + warpx_do_dynamic_scheduling=0, +) # Add plasma electrons -sim.add_species(electrons, layout = None) +sim.add_species(electrons, layout=None) # Add the plasma lenses sim.add_applied_field(plasma_lenses) @@ -98,7 +100,7 @@ sim.add_diagnostic(field_diag1) # Write input file that can be used to run with the compiled version -#sim.write_input_file(file_name = 'inputs_3d_picmi') +# sim.write_input_file(file_name = 'inputs_3d_picmi') # Advance simulation until last time step sim.step(max_steps) diff --git a/Examples/Tests/plasma_lens/analysis.py b/Examples/Tests/plasma_lens/analysis.py index 212e71087f9..8cbbe86c927 100755 --- a/Examples/Tests/plasma_lens/analysis.py +++ b/Examples/Tests/plasma_lens/analysis.py @@ -23,93 +23,112 @@ from scipy.constants import c, e, m_e yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) ad = ds.all_data() # Get final position of the particles. # There are two particles, one moves in x, the other in y. # The particles may not be in order, so determine which is which # by looking at their max positions in the respective planes. -i0 = np.argmax(np.abs(ad['electrons', 'particle_position_x'].v)) -i1 = np.argmax(np.abs(ad['electrons', 'particle_position_y'].v)) +i0 = np.argmax(np.abs(ad["electrons", "particle_position_x"].v)) +i1 = np.argmax(np.abs(ad["electrons", "particle_position_y"].v)) -xx_sim = ad['electrons', 'particle_position_x'].v[i0] -yy_sim = ad['electrons', 'particle_position_y'].v[i1] -zz_sim0 = ad['electrons', 'particle_position_z'].v[i0] -zz_sim1 = ad['electrons', 'particle_position_z'].v[i1] +xx_sim = ad["electrons", "particle_position_x"].v[i0] +yy_sim = ad["electrons", "particle_position_y"].v[i1] +zz_sim0 = ad["electrons", "particle_position_z"].v[i0] +zz_sim1 = ad["electrons", "particle_position_z"].v[i1] -ux_sim = ad['electrons', 'particle_momentum_x'].v[i0]/m_e -uy_sim = ad['electrons', 'particle_momentum_y'].v[i1]/m_e +ux_sim = ad["electrons", "particle_momentum_x"].v[i0] / m_e +uy_sim = ad["electrons", "particle_momentum_y"].v[i1] / m_e -if 'warpx.gamma_boost' in ds.parameters: - gamma_boost = float(ds.parameters.get('warpx.gamma_boost')) - uz_boost = np.sqrt(gamma_boost*gamma_boost - 1.)*c +if "warpx.gamma_boost" in ds.parameters: + gamma_boost = float(ds.parameters.get("warpx.gamma_boost")) + uz_boost = np.sqrt(gamma_boost * gamma_boost - 1.0) * c time = ds.current_time.to_value() - zz_sim0 = gamma_boost*zz_sim0 + uz_boost*time - zz_sim1 = gamma_boost*zz_sim1 + uz_boost*time + zz_sim0 = gamma_boost * zz_sim0 + uz_boost * time + zz_sim1 = gamma_boost * zz_sim1 + uz_boost * time def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): - kb0 = np.sqrt(e/(m_e*gamma*vz0**2)*lens_strength) - x1 = x0*np.cos(kb0*lens_length) + (vx0/vz0)/kb0*np.sin(kb0*lens_length) - vx1 = vz0*(-kb0*x0*np.sin(kb0*lens_length) + (vx0/vz0)*np.cos(kb0*lens_length)) + kb0 = np.sqrt(e / (m_e * gamma * vz0**2) * lens_strength) + x1 = x0 * np.cos(kb0 * lens_length) + (vx0 / vz0) / kb0 * np.sin(kb0 * lens_length) + vx1 = vz0 * ( + -kb0 * x0 * np.sin(kb0 * lens_length) + (vx0 / vz0) * np.cos(kb0 * lens_length) + ) return x1, vx1 + clight = c try: - vel_z = eval(ds.parameters.get('my_constants.vel_z')) + vel_z = eval(ds.parameters.get("my_constants.vel_z")) except TypeError: # vel_z is not saved in my_constants with the PICMI version - vel_z = 0.5*c - -if 'particles.repeated_plasma_lens_period' in ds.parameters: - plasma_lens_period = float(ds.parameters.get('particles.repeated_plasma_lens_period')) - plasma_lens_starts = [float(x) for x in ds.parameters.get('particles.repeated_plasma_lens_starts').split()] - plasma_lens_lengths = [float(x) for x in ds.parameters.get('particles.repeated_plasma_lens_lengths').split()] - plasma_lens_strengths_E = [eval(x) for x in ds.parameters.get('particles.repeated_plasma_lens_strengths_E').split()] - plasma_lens_strengths_B = [eval(x) for x in ds.parameters.get('particles.repeated_plasma_lens_strengths_B').split()] -elif 'lattice.elements' in ds.parameters: - lattice_elements = ds.parameters.get('lattice.elements').split() + vel_z = 0.5 * c + +if "particles.repeated_plasma_lens_period" in ds.parameters: + plasma_lens_period = float( + ds.parameters.get("particles.repeated_plasma_lens_period") + ) + plasma_lens_starts = [ + float(x) + for x in ds.parameters.get("particles.repeated_plasma_lens_starts").split() + ] + plasma_lens_lengths = [ + float(x) + for x in ds.parameters.get("particles.repeated_plasma_lens_lengths").split() + ] + plasma_lens_strengths_E = [ + eval(x) + for x in ds.parameters.get("particles.repeated_plasma_lens_strengths_E").split() + ] + plasma_lens_strengths_B = [ + eval(x) + for x in ds.parameters.get("particles.repeated_plasma_lens_strengths_B").split() + ] +elif "lattice.elements" in ds.parameters: + lattice_elements = ds.parameters.get("lattice.elements").split() plasma_lens_zstarts = [] plasma_lens_lengths = [] plasma_lens_strengths_E = [] - z_location = 0. + z_location = 0.0 for element in lattice_elements: - element_type = ds.parameters.get(f'{element}.type') - length = float(ds.parameters.get(f'{element}.ds')) - if element_type == 'plasmalens': + element_type = ds.parameters.get(f"{element}.type") + length = float(ds.parameters.get(f"{element}.ds")) + if element_type == "plasmalens": plasma_lens_zstarts.append(z_location) plasma_lens_lengths.append(length) - plasma_lens_strengths_E.append(float(ds.parameters.get(f'{element}.dEdx'))) + plasma_lens_strengths_E.append(float(ds.parameters.get(f"{element}.dEdx"))) z_location += length plasma_lens_period = 0.5 - plasma_lens_starts = plasma_lens_zstarts - plasma_lens_period*np.arange(len(plasma_lens_zstarts)) + plasma_lens_starts = plasma_lens_zstarts - plasma_lens_period * np.arange( + len(plasma_lens_zstarts) + ) plasma_lens_strengths_B = np.zeros(len(plasma_lens_zstarts)) try: # The picmi version - x0 = float(ds.parameters.get('electrons.dist0.multiple_particles_pos_x')) - y0 = float(ds.parameters.get('electrons.dist1.multiple_particles_pos_y')) - z0 = float(ds.parameters.get('electrons.dist0.multiple_particles_pos_z')) - ux0 = float(ds.parameters.get('electrons.dist0.multiple_particles_ux'))*c - uy0 = float(ds.parameters.get('electrons.dist1.multiple_particles_uy'))*c - uz0 = eval(ds.parameters.get('electrons.dist0.multiple_particles_uz'))*c + x0 = float(ds.parameters.get("electrons.dist0.multiple_particles_pos_x")) + y0 = float(ds.parameters.get("electrons.dist1.multiple_particles_pos_y")) + z0 = float(ds.parameters.get("electrons.dist0.multiple_particles_pos_z")) + ux0 = float(ds.parameters.get("electrons.dist0.multiple_particles_ux")) * c + uy0 = float(ds.parameters.get("electrons.dist1.multiple_particles_uy")) * c + uz0 = eval(ds.parameters.get("electrons.dist0.multiple_particles_uz")) * c except TypeError: # The inputs version - x0 = float(ds.parameters.get('electrons.multiple_particles_pos_x').split()[0]) - y0 = float(ds.parameters.get('electrons.multiple_particles_pos_y').split()[1]) - z0 = float(ds.parameters.get('electrons.multiple_particles_pos_z').split()[0]) - ux0 = float(ds.parameters.get('electrons.multiple_particles_ux').split()[0])*c - uy0 = float(ds.parameters.get('electrons.multiple_particles_uy').split()[1])*c - uz0 = eval(ds.parameters.get('electrons.multiple_particles_uz').split()[0])*c - -tt = 0. + x0 = float(ds.parameters.get("electrons.multiple_particles_pos_x").split()[0]) + y0 = float(ds.parameters.get("electrons.multiple_particles_pos_y").split()[1]) + z0 = float(ds.parameters.get("electrons.multiple_particles_pos_z").split()[0]) + ux0 = float(ds.parameters.get("electrons.multiple_particles_ux").split()[0]) * c + uy0 = float(ds.parameters.get("electrons.multiple_particles_uy").split()[1]) * c + uz0 = eval(ds.parameters.get("electrons.multiple_particles_uz").split()[0]) * c + +tt = 0.0 xx = x0 yy = y0 zz = z0 @@ -117,37 +136,41 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): uy = uy0 uz = uz0 -gamma = np.sqrt(uz0**2/c**2 + 1.) -vz = uz/gamma +gamma = np.sqrt(uz0**2 / c**2 + 1.0) +vz = uz / gamma for i in range(len(plasma_lens_starts)): - z_lens = i*plasma_lens_period + plasma_lens_starts[i] - vx = ux/gamma - vy = uy/gamma - dt = (z_lens - zz)/vz + z_lens = i * plasma_lens_period + plasma_lens_starts[i] + vx = ux / gamma + vy = uy / gamma + dt = (z_lens - zz) / vz tt = tt + dt - xx = xx + dt*vx - yy = yy + dt*vy - lens_strength = plasma_lens_strengths_E[i] + plasma_lens_strengths_B[i]*vel_z + xx = xx + dt * vx + yy = yy + dt * vy + lens_strength = plasma_lens_strengths_E[i] + plasma_lens_strengths_B[i] * vel_z xx, vx = applylens(xx, vx, vz, gamma, plasma_lens_lengths[i], lens_strength) yy, vy = applylens(yy, vy, vz, gamma, plasma_lens_lengths[i], lens_strength) - dt = plasma_lens_lengths[i]/vz + dt = plasma_lens_lengths[i] / vz tt = tt + dt - ux = gamma*vx - uy = gamma*vy + ux = gamma * vx + uy = gamma * vy zz = z_lens + plasma_lens_lengths[i] -dt0 = (zz_sim0 - zz)/vz -dt1 = (zz_sim1 - zz)/vz -vx = ux/gamma -vy = uy/gamma -xx = xx + dt0*vx -yy = yy + dt1*vy - -print(f'Error in x position is {abs(np.abs((xx - xx_sim)/xx))}, which should be < 0.02') -print(f'Error in y position is {abs(np.abs((yy - yy_sim)/yy))}, which should be < 0.02') -print(f'Error in x velocity is {abs(np.abs((ux - ux_sim)/ux))}, which should be < 0.002') -print(f'Error in y velocity is {abs(np.abs((uy - uy_sim)/uy))}, which should be < 0.002') +dt0 = (zz_sim0 - zz) / vz +dt1 = (zz_sim1 - zz) / vz +vx = ux / gamma +vy = uy / gamma +xx = xx + dt0 * vx +yy = yy + dt1 * vy + +print(f"Error in x position is {abs(np.abs((xx - xx_sim)/xx))}, which should be < 0.02") +print(f"Error in y position is {abs(np.abs((yy - yy_sim)/yy))}, which should be < 0.02") +print( + f"Error in x velocity is {abs(np.abs((ux - ux_sim)/ux))}, which should be < 0.002" +) +print( + f"Error in y velocity is {abs(np.abs((uy - uy_sim)/uy))}, which should be < 0.002" +) if plasma_lens_lengths[0] < 0.01: # The shorter lens requires a larger tolerance since @@ -158,10 +181,18 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): position_tolerance = 0.02 velocity_tolerance = 0.002 -assert abs(np.abs((xx - xx_sim)/xx)) < position_tolerance, Exception('error in x particle position') -assert abs(np.abs((yy - yy_sim)/yy)) < position_tolerance, Exception('error in y particle position') -assert abs(np.abs((ux - ux_sim)/ux)) < velocity_tolerance, Exception('error in x particle velocity') -assert abs(np.abs((uy - uy_sim)/uy)) < velocity_tolerance, Exception('error in y particle velocity') +assert abs(np.abs((xx - xx_sim) / xx)) < position_tolerance, Exception( + "error in x particle position" +) +assert abs(np.abs((yy - yy_sim) / yy)) < position_tolerance, Exception( + "error in y particle position" +) +assert abs(np.abs((ux - ux_sim) / ux)) < velocity_tolerance, Exception( + "error in x particle velocity" +) +assert abs(np.abs((uy - uy_sim) / uy)) < velocity_tolerance, Exception( + "error in y particle velocity" +) test_name = os.path.split(os.getcwd())[1] # The PICMI and native input versions of `inputs_3d` run the same test, so diff --git a/Examples/Tests/pml/analysis_pml_ckc.py b/Examples/Tests/pml/analysis_pml_ckc.py index c4b9d54647d..4e6bff076c7 100755 --- a/Examples/Tests/pml/analysis_pml_ckc.py +++ b/Examples/Tests/pml/analysis_pml_ckc.py @@ -13,9 +13,10 @@ import numpy as np import scipy.constants as scc +import yt -import yt ; yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +yt.funcs.mylog.setLevel(0) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] @@ -28,31 +29,33 @@ ########################## ### FINAL LASER ENERGY ### ########################## -ds = yt.load( filename ) -all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -Bx = all_data_level_0['boxlib', 'Bx'].v.squeeze() -By = all_data_level_0['boxlib', 'By'].v.squeeze() -Bz = all_data_level_0['boxlib', 'Bz'].v.squeeze() -Ex = all_data_level_0['boxlib', 'Ex'].v.squeeze() -Ey = all_data_level_0['boxlib', 'Ey'].v.squeeze() -Ez = all_data_level_0['boxlib', 'Ez'].v.squeeze() -energyE = np.sum(scc.epsilon_0/2*(Ex**2+Ey**2+Ez**2)) -energyB = np.sum(1./scc.mu_0/2*(Bx**2+By**2+Bz**2)) +ds = yt.load(filename) +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +Bx = all_data_level_0["boxlib", "Bx"].v.squeeze() +By = all_data_level_0["boxlib", "By"].v.squeeze() +Bz = all_data_level_0["boxlib", "Bz"].v.squeeze() +Ex = all_data_level_0["boxlib", "Ex"].v.squeeze() +Ey = all_data_level_0["boxlib", "Ey"].v.squeeze() +Ez = all_data_level_0["boxlib", "Ez"].v.squeeze() +energyE = np.sum(scc.epsilon_0 / 2 * (Ex**2 + Ey**2 + Ez**2)) +energyB = np.sum(1.0 / scc.mu_0 / 2 * (Bx**2 + By**2 + Bz**2)) energy_end = energyE + energyB -Reflectivity = energy_end/energy_start +Reflectivity = energy_end / energy_start Reflectivity_theory = 1.8015e-06 -print("Reflectivity: %s" %Reflectivity) -print("Reflectivity_theory: %s" %Reflectivity_theory) +print("Reflectivity: %s" % Reflectivity) +print("Reflectivity_theory: %s" % Reflectivity_theory) -error_rel = abs(Reflectivity-Reflectivity_theory) / Reflectivity_theory -tolerance_rel = 5./100 +error_rel = abs(Reflectivity - Reflectivity_theory) / Reflectivity_theory +tolerance_rel = 5.0 / 100 print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/pml/analysis_pml_psatd.py b/Examples/Tests/pml/analysis_pml_psatd.py index 50d0b2ac1c1..de2f48810e4 100755 --- a/Examples/Tests/pml/analysis_pml_psatd.py +++ b/Examples/Tests/pml/analysis_pml_psatd.py @@ -13,9 +13,10 @@ import numpy as np import scipy.constants as scc +import yt -import yt ; yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +yt.funcs.mylog.setLevel(0) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] @@ -24,21 +25,23 @@ # Initial laser energy (at iteration 50) if galilean: - filename_init = 'pml_x_galilean_plt000050' + filename_init = "pml_x_galilean_plt000050" energy_start = 4.439376199524034e-08 else: - filename_init = 'pml_x_psatd_plt000050' + filename_init = "pml_x_psatd_plt000050" energy_start = 7.282940107273505e-08 # Check consistency of field energy diagnostics with initial energy above ds = yt.load(filename_init) -all_data_level_0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -Bx = all_data_level_0['boxlib', 'Bx'].v.squeeze() -By = all_data_level_0['boxlib', 'By'].v.squeeze() -Bz = all_data_level_0['boxlib', 'Bz'].v.squeeze() -Ex = all_data_level_0['boxlib', 'Ex'].v.squeeze() -Ey = all_data_level_0['boxlib', 'Ey'].v.squeeze() -Ez = all_data_level_0['boxlib', 'Ez'].v.squeeze() +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +Bx = all_data_level_0["boxlib", "Bx"].v.squeeze() +By = all_data_level_0["boxlib", "By"].v.squeeze() +Bz = all_data_level_0["boxlib", "Bz"].v.squeeze() +Ex = all_data_level_0["boxlib", "Ex"].v.squeeze() +Ey = all_data_level_0["boxlib", "Ey"].v.squeeze() +Ez = all_data_level_0["boxlib", "Ez"].v.squeeze() energyE = np.sum(0.5 * scc.epsilon_0 * (Ex**2 + Ey**2 + Ez**2)) energyB = np.sum(0.5 / scc.mu_0 * (Bx**2 + By**2 + Bz**2)) energy_start_diags = energyE + energyB @@ -47,17 +50,19 @@ print("energy_start expected = " + str(energy_start)) print("energy_start_diags = " + str(energy_start_diags)) print("relative error = " + str(error)) -assert (error < tolerance) +assert error < tolerance # Final laser energy ds = yt.load(filename) -all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -Bx = all_data_level_0['boxlib', 'Bx'].v.squeeze() -By = all_data_level_0['boxlib', 'By'].v.squeeze() -Bz = all_data_level_0['boxlib', 'Bz'].v.squeeze() -Ex = all_data_level_0['boxlib', 'Ex'].v.squeeze() -Ey = all_data_level_0['boxlib', 'Ey'].v.squeeze() -Ez = all_data_level_0['boxlib', 'Ez'].v.squeeze() +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +Bx = all_data_level_0["boxlib", "Bx"].v.squeeze() +By = all_data_level_0["boxlib", "By"].v.squeeze() +Bz = all_data_level_0["boxlib", "Bz"].v.squeeze() +Ex = all_data_level_0["boxlib", "Ex"].v.squeeze() +Ey = all_data_level_0["boxlib", "Ey"].v.squeeze() +Ez = all_data_level_0["boxlib", "Ez"].v.squeeze() energyE = np.sum(0.5 * scc.epsilon_0 * (Ex**2 + Ey**2 + Ez**2)) energyB = np.sum(0.5 / scc.mu_0 * (Bx**2 + By**2 + Bz**2)) energy_end = energyE + energyB @@ -68,10 +73,10 @@ print("reflectivity = " + str(reflectivity)) print("reflectivity_max = " + str(reflectivity_max)) -assert(reflectivity < reflectivity_max) +assert reflectivity < reflectivity_max # Check restart data v. original data -sys.path.insert(0, '../../../../warpx/Examples/') +sys.path.insert(0, "../../../../warpx/Examples/") from analysis_default_restart import check_restart if not galilean: diff --git a/Examples/Tests/pml/analysis_pml_psatd_rz.py b/Examples/Tests/pml/analysis_pml_psatd_rz.py index d4f1ff42ae6..2d9d58734a1 100755 --- a/Examples/Tests/pml/analysis_pml_psatd_rz.py +++ b/Examples/Tests/pml/analysis_pml_psatd_rz.py @@ -15,6 +15,7 @@ most of the pulse escapes the radial boundary. If the PML fails, the pulse will remain with in the domain. """ + import os import sys @@ -22,33 +23,36 @@ import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Open plotfile specified in command line filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) # yt 4.0+ has rounding issues with our domain data: # RuntimeError: yt attempted to read outside the boundaries # of a non-periodic domain along dimension 0. -if 'force_periodicity' in dir(ds): ds.force_periodicity() +if "force_periodicity" in dir(ds): + ds.force_periodicity() # Check that the field is low enough -ad0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -Ex_array = ad0['boxlib', 'Er'].to_ndarray() -Ez_array = ad0['boxlib', 'Ez'].to_ndarray() +ad0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +Ex_array = ad0["boxlib", "Er"].to_ndarray() +Ez_array = ad0["boxlib", "Ez"].to_ndarray() max_Ex = np.abs(Ex_array).max() max_Ez = np.abs(Ez_array).max() -print( f'max Ex = {max_Ex}' ) -print( f'max Ez = {max_Ez}' ) +print(f"max Ex = {max_Ex}") +print(f"max Ez = {max_Ez}") max_Efield = max(max_Ex, max_Ez) # This tolerance was obtained empirically. As the simulation progresses, the field energy is leaking # out through PML so that the max field diminishes with time. When the PML is working properly, # the field level falls below 2 at the end of the simulation. -tolerance_abs = 2. -print('tolerance_abs: ' + str(tolerance_abs)) +tolerance_abs = 2.0 +print("tolerance_abs: " + str(tolerance_abs)) assert max_Efield < tolerance_abs test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/pml/analysis_pml_yee.py b/Examples/Tests/pml/analysis_pml_yee.py index f10b281c544..962036bad0e 100755 --- a/Examples/Tests/pml/analysis_pml_yee.py +++ b/Examples/Tests/pml/analysis_pml_yee.py @@ -13,9 +13,10 @@ import numpy as np import scipy.constants as scc +import yt -import yt ; yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +yt.funcs.mylog.setLevel(0) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] @@ -28,34 +29,36 @@ ########################## ### FINAL LASER ENERGY ### ########################## -ds = yt.load( filename ) -all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -Bx = all_data_level_0['boxlib', 'Bx'].v.squeeze() -By = all_data_level_0['boxlib', 'By'].v.squeeze() -Bz = all_data_level_0['boxlib', 'Bz'].v.squeeze() -Ex = all_data_level_0['boxlib', 'Ex'].v.squeeze() -Ey = all_data_level_0['boxlib', 'Ey'].v.squeeze() -Ez = all_data_level_0['boxlib', 'Ez'].v.squeeze() -energyE = np.sum(scc.epsilon_0/2*(Ex**2+Ey**2+Ez**2)) -energyB = np.sum(1./scc.mu_0/2*(Bx**2+By**2+Bz**2)) +ds = yt.load(filename) +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +Bx = all_data_level_0["boxlib", "Bx"].v.squeeze() +By = all_data_level_0["boxlib", "By"].v.squeeze() +Bz = all_data_level_0["boxlib", "Bz"].v.squeeze() +Ex = all_data_level_0["boxlib", "Ex"].v.squeeze() +Ey = all_data_level_0["boxlib", "Ey"].v.squeeze() +Ez = all_data_level_0["boxlib", "Ez"].v.squeeze() +energyE = np.sum(scc.epsilon_0 / 2 * (Ex**2 + Ey**2 + Ez**2)) +energyB = np.sum(1.0 / scc.mu_0 / 2 * (Bx**2 + By**2 + Bz**2)) energy_end = energyE + energyB -Reflectivity = energy_end/energy_start +Reflectivity = energy_end / energy_start Reflectivity_theory = 5.683000058954201e-07 -print("Reflectivity: %s" %Reflectivity) -print("Reflectivity_theory: %s" %Reflectivity_theory) +print("Reflectivity: %s" % Reflectivity) +print("Reflectivity_theory: %s" % Reflectivity_theory) -error_rel = abs(Reflectivity-Reflectivity_theory) / Reflectivity_theory -tolerance_rel = 5./100 +error_rel = abs(Reflectivity - Reflectivity_theory) / Reflectivity_theory +tolerance_rel = 5.0 / 100 print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel # Check restart data v. original data -sys.path.insert(0, '../../../../warpx/Examples/') +sys.path.insert(0, "../../../../warpx/Examples/") from analysis_default_restart import check_restart check_restart(filename) diff --git a/Examples/Tests/point_of_contact_EB/analysis.py b/Examples/Tests/point_of_contact_EB/analysis.py index 042fc811a62..9fb097f99d4 100755 --- a/Examples/Tests/point_of_contact_EB/analysis.py +++ b/Examples/Tests/point_of_contact_EB/analysis.py @@ -7,6 +7,7 @@ The electron is initially at: (-0.25,0,0) and moves with a normalized momentum: (1,0.5,0) An input file PICMI_inputs_3d.py is used. """ + import os import sys @@ -15,56 +16,84 @@ from openpmd_viewer import OpenPMDTimeSeries yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Open plotfile specified in command line filename = sys.argv[1] test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format='openpmd') +checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") -ts_scraping = OpenPMDTimeSeries('./diags/diag2/particles_at_eb/') +ts_scraping = OpenPMDTimeSeries("./diags/diag2/particles_at_eb/") -it=ts_scraping.iterations -step_scraped, delta, x, y, z, nx, ny, nz=ts_scraping.get_particle( ['stepScraped','deltaTimeScraped','x','y','z', 'nx', 'ny', 'nz'], species='electron', iteration=it ) -delta_reduced=delta[0]*1e10 +it = ts_scraping.iterations +step_scraped, delta, x, y, z, nx, ny, nz = ts_scraping.get_particle( + ["stepScraped", "deltaTimeScraped", "x", "y", "z", "nx", "ny", "nz"], + species="electron", + iteration=it, +) +delta_reduced = delta[0] * 1e10 # Analytical results calculated -x_analytic=-0.1983 -y_analytic=0.02584 -z_analytic=0.0000 -nx_analytic=-0.99 -ny_analytic=0.13 -nz_analytic=0.0 +x_analytic = -0.1983 +y_analytic = 0.02584 +z_analytic = 0.0000 +nx_analytic = -0.99 +ny_analytic = 0.13 +nz_analytic = 0.0 -#result obtained by analysis of simulations -step_ref=3 -delta_reduced_ref=0.59 +# result obtained by analysis of simulations +step_ref = 3 +delta_reduced_ref = 0.59 -print('NUMERICAL coordinates of the point of contact:') -print('step_scraped=%d, time_stamp=%5.4f e-10, x=%5.4f, y=%5.4f, z=%5.4f, nx=%5.4f, ny=%5.4f, nz=%5.4f' % (step_scraped[0],delta_reduced,x[0], y[0], z[0], nx[0], ny[0], nz[0])) -print('\n') -print('ANALYTICAL coordinates of the point of contact:') -print('step_scraped=%d, time_stamp=%5.4f e-10, x=%5.4f, y=%5.4f, z=%5.4f, nx=%5.4f, ny=%5.4f, nz=%5.4f' % (step_ref, delta_reduced_ref, x_analytic, y_analytic, z_analytic, nx_analytic, ny_analytic, nz_analytic)) +print("NUMERICAL coordinates of the point of contact:") +print( + "step_scraped=%d, time_stamp=%5.4f e-10, x=%5.4f, y=%5.4f, z=%5.4f, nx=%5.4f, ny=%5.4f, nz=%5.4f" + % (step_scraped[0], delta_reduced, x[0], y[0], z[0], nx[0], ny[0], nz[0]) +) +print("\n") +print("ANALYTICAL coordinates of the point of contact:") +print( + "step_scraped=%d, time_stamp=%5.4f e-10, x=%5.4f, y=%5.4f, z=%5.4f, nx=%5.4f, ny=%5.4f, nz=%5.4f" + % ( + step_ref, + delta_reduced_ref, + x_analytic, + y_analytic, + z_analytic, + nx_analytic, + ny_analytic, + nz_analytic, + ) +) -tolerance=0.001 -tolerance_t=0.01 -tolerance_n=0.01 -print("tolerance = "+ str(tolerance *100) + '%') -print("tolerance for the time = "+ str(tolerance_t *100) + '%') -print("tolerance for the normal components = "+ str(tolerance_n *100) + '%') +tolerance = 0.001 +tolerance_t = 0.01 +tolerance_n = 0.01 +print("tolerance = " + str(tolerance * 100) + "%") +print("tolerance for the time = " + str(tolerance_t * 100) + "%") +print("tolerance for the normal components = " + str(tolerance_n * 100) + "%") -diff_step=np.abs((step_scraped[0]-step_ref)/step_ref) -diff_delta=np.abs((delta_reduced-delta_reduced_ref)/delta_reduced_ref) -diff_x=np.abs((x[0]-x_analytic)/x_analytic) -diff_y=np.abs((y[0]-y_analytic)/y_analytic) -diff_nx=np.abs((nx[0]-nx_analytic)/nx_analytic) -diff_ny=np.abs((ny[0]-ny_analytic)/ny_analytic) +diff_step = np.abs((step_scraped[0] - step_ref) / step_ref) +diff_delta = np.abs((delta_reduced - delta_reduced_ref) / delta_reduced_ref) +diff_x = np.abs((x[0] - x_analytic) / x_analytic) +diff_y = np.abs((y[0] - y_analytic) / y_analytic) +diff_nx = np.abs((nx[0] - nx_analytic) / nx_analytic) +diff_ny = np.abs((ny[0] - ny_analytic) / ny_analytic) -print("percentage error for x = %5.4f %%" %(diff_x *100)) -print("percentage error for y = %5.4f %%" %(diff_y *100)) -print("percentage error for nx = %5.2f %%" %(diff_nx *100)) -print("percentage error for ny = %5.2f %%" %(diff_ny *100)) -print("nz = %5.2f " %(nz[0])) +print("percentage error for x = %5.4f %%" % (diff_x * 100)) +print("percentage error for y = %5.4f %%" % (diff_y * 100)) +print("percentage error for nx = %5.2f %%" % (diff_nx * 100)) +print("percentage error for ny = %5.2f %%" % (diff_ny * 100)) +print("nz = %5.2f " % (nz[0])) -assert (diff_x < tolerance) and (diff_y < tolerance) and (np.abs(z[0]) < 1e-8) and (diff_step < 1e-8) and (diff_delta < tolerance_t) and (diff_nx < tolerance_n) and (diff_ny < tolerance_n) and (np.abs(nz) < 1e-8) , 'Test point_of_contact did not pass' +assert ( + (diff_x < tolerance) + and (diff_y < tolerance) + and (np.abs(z[0]) < 1e-8) + and (diff_step < 1e-8) + and (diff_delta < tolerance_t) + and (diff_nx < tolerance_n) + and (diff_ny < tolerance_n) + and (np.abs(nz) < 1e-8) +), "Test point_of_contact did not pass" diff --git a/Examples/Tests/python_wrappers/PICMI_inputs_2d.py b/Examples/Tests/python_wrappers/PICMI_inputs_2d.py index db1cc7dcad8..c3aa9eac8b0 100755 --- a/Examples/Tests/python_wrappers/PICMI_inputs_2d.py +++ b/Examples/Tests/python_wrappers/PICMI_inputs_2d.py @@ -14,10 +14,10 @@ nz = 128 # Domain -xmin = 0.e-6 -zmin = 0.e-6 -xmax = 50.e-6 -zmax = 50.e-6 +xmin = 0.0e-6 +zmin = 0.0e-6 +xmax = 50.0e-6 +zmax = 50.0e-6 # Cell size dx = (xmax - xmin) / nx @@ -30,7 +30,7 @@ # PML nxpml = 10 nzpml = 10 -field_boundary = ['open', 'open'] +field_boundary = ["open", "open"] # Spectral order nox = 8 @@ -41,136 +41,254 @@ nzg = 8 # Initialize grid -grid = picmi.Cartesian2DGrid(number_of_cells = [nx,nz], - lower_bound = [xmin,zmin], - upper_bound = [xmax,zmax], - lower_boundary_conditions = field_boundary, - upper_boundary_conditions = field_boundary, - guard_cells = [nxg,nzg], - moving_window_velocity = [0.,0.,0], - warpx_max_grid_size_x = max_grid_size_x, - warpx_max_grid_size_y = max_grid_size_z) +grid = picmi.Cartesian2DGrid( + number_of_cells=[nx, nz], + lower_bound=[xmin, zmin], + upper_bound=[xmax, zmax], + lower_boundary_conditions=field_boundary, + upper_boundary_conditions=field_boundary, + guard_cells=[nxg, nzg], + moving_window_velocity=[0.0, 0.0, 0], + warpx_max_grid_size_x=max_grid_size_x, + warpx_max_grid_size_y=max_grid_size_z, +) # Initialize field solver -solver = picmi.ElectromagneticSolver(grid=grid, cfl=0.95, method='PSATD', - stencil_order = [nox,noz], - divE_cleaning = 1, - divB_cleaning = 1, - pml_divE_cleaning = 1, - pml_divB_cleaning = 1, - warpx_psatd_update_with_rho = True) +solver = picmi.ElectromagneticSolver( + grid=grid, + cfl=0.95, + method="PSATD", + stencil_order=[nox, noz], + divE_cleaning=1, + divB_cleaning=1, + pml_divE_cleaning=1, + pml_divB_cleaning=1, + warpx_psatd_update_with_rho=True, +) # Initialize diagnostics diag_field_list = ["E", "B"] -particle_diag = picmi.ParticleDiagnostic(name = 'diag1', - period = 10, - write_dir = '.', - warpx_file_prefix = 'Python_wrappers_plt', - data_list = diag_field_list) -field_diag = picmi.FieldDiagnostic(name = 'diag1', - grid = grid, - period = 10, - write_dir = '.', - warpx_file_prefix = 'Python_wrappers_plt', - data_list = diag_field_list) +particle_diag = picmi.ParticleDiagnostic( + name="diag1", + period=10, + write_dir=".", + warpx_file_prefix="Python_wrappers_plt", + data_list=diag_field_list, +) +field_diag = picmi.FieldDiagnostic( + name="diag1", + grid=grid, + period=10, + write_dir=".", + warpx_file_prefix="Python_wrappers_plt", + data_list=diag_field_list, +) # Initialize simulation -sim = picmi.Simulation(solver = solver, - max_steps = max_steps, - verbose = 1, - particle_shape = 'cubic', - warpx_current_deposition_algo = 'direct', - warpx_particle_pusher_algo = 'boris', - warpx_field_gathering_algo = 'energy-conserving', - warpx_use_filter = 1) +sim = picmi.Simulation( + solver=solver, + max_steps=max_steps, + verbose=1, + particle_shape="cubic", + warpx_current_deposition_algo="direct", + warpx_particle_pusher_algo="boris", + warpx_field_gathering_algo="energy-conserving", + warpx_use_filter=1, +) # Add diagnostics to simulation sim.add_diagnostic(particle_diag) sim.add_diagnostic(field_diag) # Write input file to run with compiled version -sim.write_input_file(file_name = 'inputs_2d') +sim.write_input_file(file_name="inputs_2d") # Whether to include guard cells in data returned by Python wrappers include_ghosts = 1 + # Compute min and max of fields data def compute_minmax(data): vmax = np.abs(data).max() vmin = -vmax return vmin, vmax + # Plot fields data either in valid domain or in PML def plot_data(data, pml, title, name): - fig, ax = plt.subplots(nrows = 1, ncols = 1, gridspec_kw = dict(wspace = 0.5), figsize = [6,5]) - cax = make_axes_locatable(ax).append_axes('right', size='5%', pad='5%') - lw = 0.8 - ls = '--' + fig, ax = plt.subplots( + nrows=1, ncols=1, gridspec_kw=dict(wspace=0.5), figsize=[6, 5] + ) + cax = make_axes_locatable(ax).append_axes("right", size="5%", pad="5%") + lw = 0.8 + ls = "--" if pml: # Draw PMLs and ghost regions - ax.axvline(x = 0 , linewidth = lw, linestyle = ls) - ax.axvline(x = 0+nxg , linewidth = lw, linestyle = ls) - ax.axvline(x = -nxpml , linewidth = lw, linestyle = ls) - ax.axvline(x = nx , linewidth = lw, linestyle = ls) - ax.axvline(x = nx-nxg , linewidth = lw, linestyle = ls) - ax.axvline(x = nx+nxpml, linewidth = lw, linestyle = ls) - ax.axhline(y = 0 , linewidth = lw, linestyle = ls) - ax.axhline(y = 0+nzg , linewidth = lw, linestyle = ls) - ax.axhline(y = -nzpml , linewidth = lw, linestyle = ls) - ax.axhline(y = nz , linewidth = lw, linestyle = ls) - ax.axhline(y = nz-nzg , linewidth = lw, linestyle = ls) - ax.axhline(y = nz+nzpml, linewidth = lw, linestyle = ls) + ax.axvline(x=0, linewidth=lw, linestyle=ls) + ax.axvline(x=0 + nxg, linewidth=lw, linestyle=ls) + ax.axvline(x=-nxpml, linewidth=lw, linestyle=ls) + ax.axvline(x=nx, linewidth=lw, linestyle=ls) + ax.axvline(x=nx - nxg, linewidth=lw, linestyle=ls) + ax.axvline(x=nx + nxpml, linewidth=lw, linestyle=ls) + ax.axhline(y=0, linewidth=lw, linestyle=ls) + ax.axhline(y=0 + nzg, linewidth=lw, linestyle=ls) + ax.axhline(y=-nzpml, linewidth=lw, linestyle=ls) + ax.axhline(y=nz, linewidth=lw, linestyle=ls) + ax.axhline(y=nz - nzg, linewidth=lw, linestyle=ls) + ax.axhline(y=nz + nzpml, linewidth=lw, linestyle=ls) # Annotations - ax.annotate('PML', xy = (-nxpml//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center') - ax.annotate('PML', xy = (nx+nxpml//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center') - ax.annotate('PML', xy = (nx//2,-nzpml//2), rotation = 'horizontal', ha = 'center', va = 'center') - ax.annotate('PML', xy = (nx//2,nz+nzpml//2), rotation = 'horizontal', ha = 'center', va = 'center') - ax.annotate('PML ghost', xy = (nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center') - ax.annotate('PML ghost', xy = (-nxpml-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center') - ax.annotate('PML ghost', xy = (nx-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center') - ax.annotate('PML ghost', xy = (nx+nxpml+nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center') - ax.annotate('PML ghost', xy = (nx//2,nzg//2), rotation = 'horizontal', ha = 'center', va = 'center') - ax.annotate('PML ghost', xy = (nx//2,-nzpml-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center') - ax.annotate('PML ghost', xy = (nx//2,nz-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center') - ax.annotate('PML ghost', xy = (nx//2,nz+nzpml+nzg//2), rotation = 'horizontal', ha = 'center', va = 'center') + ax.annotate( + "PML", + xy=(-nxpml // 2, nz // 2), + rotation="vertical", + ha="center", + va="center", + ) + ax.annotate( + "PML", + xy=(nx + nxpml // 2, nz // 2), + rotation="vertical", + ha="center", + va="center", + ) + ax.annotate( + "PML", + xy=(nx // 2, -nzpml // 2), + rotation="horizontal", + ha="center", + va="center", + ) + ax.annotate( + "PML", + xy=(nx // 2, nz + nzpml // 2), + rotation="horizontal", + ha="center", + va="center", + ) + ax.annotate( + "PML ghost", + xy=(nxg // 2, nz // 2), + rotation="vertical", + ha="center", + va="center", + ) + ax.annotate( + "PML ghost", + xy=(-nxpml - nxg // 2, nz // 2), + rotation="vertical", + ha="center", + va="center", + ) + ax.annotate( + "PML ghost", + xy=(nx - nxg // 2, nz // 2), + rotation="vertical", + ha="center", + va="center", + ) + ax.annotate( + "PML ghost", + xy=(nx + nxpml + nxg // 2, nz // 2), + rotation="vertical", + ha="center", + va="center", + ) + ax.annotate( + "PML ghost", + xy=(nx // 2, nzg // 2), + rotation="horizontal", + ha="center", + va="center", + ) + ax.annotate( + "PML ghost", + xy=(nx // 2, -nzpml - nzg // 2), + rotation="horizontal", + ha="center", + va="center", + ) + ax.annotate( + "PML ghost", + xy=(nx // 2, nz - nzg // 2), + rotation="horizontal", + ha="center", + va="center", + ) + ax.annotate( + "PML ghost", + xy=(nx // 2, nz + nzpml + nzg // 2), + rotation="horizontal", + ha="center", + va="center", + ) # Set extent and sliced data - extent = np.array([-nxg-nxpml, nx+nxpml+nxg, -nzg-nzpml, nz+nzpml+nzg]) + extent = np.array( + [-nxg - nxpml, nx + nxpml + nxg, -nzg - nzpml, nz + nzpml + nzg] + ) else: # Draw ghost regions - ax.axvline(x = 0 , linewidth = lw, linestyle = ls) - ax.axvline(x = nx, linewidth = lw, linestyle = ls) - ax.axhline(y = 0 , linewidth = lw, linestyle = ls) - ax.axhline(y = nz, linewidth = lw, linestyle = ls) + ax.axvline(x=0, linewidth=lw, linestyle=ls) + ax.axvline(x=nx, linewidth=lw, linestyle=ls) + ax.axhline(y=0, linewidth=lw, linestyle=ls) + ax.axhline(y=nz, linewidth=lw, linestyle=ls) # Annotations - ax.annotate('ghost', xy = (-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center') - ax.annotate('ghost', xy = (nx+nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center') - ax.annotate('ghost', xy = (nx//2,-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center') - ax.annotate('ghost', xy = (nx//2,nz+nzg//2), rotation = 'horizontal', ha = 'center', va = 'center') + ax.annotate( + "ghost", + xy=(-nxg // 2, nz // 2), + rotation="vertical", + ha="center", + va="center", + ) + ax.annotate( + "ghost", + xy=(nx + nxg // 2, nz // 2), + rotation="vertical", + ha="center", + va="center", + ) + ax.annotate( + "ghost", + xy=(nx // 2, -nzg // 2), + rotation="horizontal", + ha="center", + va="center", + ) + ax.annotate( + "ghost", + xy=(nx // 2, nz + nzg // 2), + rotation="horizontal", + ha="center", + va="center", + ) # Set extent and sliced data - extent = np.array([-nxg, nx+nxg, -nzg, nz+nzg]) - X = data[:,:].transpose() + extent = np.array([-nxg, nx + nxg, -nzg, nz + nzg]) + X = data[:, :].transpose() # Min and max for colorbar vmin, vmax = compute_minmax(X) # Display data as image - im = ax.imshow(X = X, origin = 'lower', extent = extent, vmin = vmin, vmax = vmax, cmap = 'seismic') + im = ax.imshow( + X=X, origin="lower", extent=extent, vmin=vmin, vmax=vmax, cmap="seismic" + ) # Add colorbar to plot - fig.colorbar(im, cax = cax) + fig.colorbar(im, cax=cax) # Set label for x- and y-axis, set title - ax.set_xlabel('x') - ax.set_ylabel('z') + ax.set_xlabel("x") + ax.set_ylabel("z") ax.set_title(title) # Set plot title - suptitle = 'PML in (x,z), 4 grids 64 x 64' + suptitle = "PML in (x,z), 4 grids 64 x 64" plt.suptitle(suptitle) # Save figure - figname = 'figure_' + name + '.png' - fig.savefig(figname, dpi = 100) + figname = "figure_" + name + ".png" + fig.savefig(figname, dpi=100) + # Initialize fields data (unit pulse) and apply smoothing def init_data(data): - impulse_1d = np.array([1./4., 1./2., 1./4.]) + impulse_1d = np.array([1.0 / 4.0, 1.0 / 2.0, 1.0 / 4.0]) impulse = np.outer(impulse_1d, impulse_1d) - data[nx//2-1:nx//2+2,nz//2-1:nz//2+2] = impulse + data[nx // 2 - 1 : nx // 2 + 2, nz // 2 - 1 : nz // 2 + 2] = impulse + # Initialize inputs and WarpX instance sim.initialize_inputs() @@ -179,22 +297,22 @@ def init_data(data): # Get fields data using Python wrappers import pywarpx.fields as pwxf -Ex = pwxf.ExFPWrapper(include_ghosts = include_ghosts) -Ey = pwxf.EyFPWrapper(include_ghosts = include_ghosts) -Ez = pwxf.EzFPWrapper(include_ghosts = include_ghosts) -Bx = pwxf.BxFPWrapper(include_ghosts = include_ghosts) -By = pwxf.ByFPWrapper(include_ghosts = include_ghosts) -Bz = pwxf.BzFPWrapper(include_ghosts = include_ghosts) -F = pwxf.FFPWrapper(include_ghosts = include_ghosts) -G = pwxf.GFPWrapper(include_ghosts = include_ghosts) -Expml = pwxf.ExFPPMLWrapper(include_ghosts = include_ghosts) -Eypml = pwxf.EyFPPMLWrapper(include_ghosts = include_ghosts) -Ezpml = pwxf.EzFPPMLWrapper(include_ghosts = include_ghosts) -Bxpml = pwxf.BxFPPMLWrapper(include_ghosts = include_ghosts) -Bypml = pwxf.ByFPPMLWrapper(include_ghosts = include_ghosts) -Bzpml = pwxf.BzFPPMLWrapper(include_ghosts = include_ghosts) -Fpml = pwxf.FFPPMLWrapper(include_ghosts = include_ghosts) -Gpml = pwxf.GFPPMLWrapper(include_ghosts = include_ghosts) +Ex = pwxf.ExFPWrapper(include_ghosts=include_ghosts) +Ey = pwxf.EyFPWrapper(include_ghosts=include_ghosts) +Ez = pwxf.EzFPWrapper(include_ghosts=include_ghosts) +Bx = pwxf.BxFPWrapper(include_ghosts=include_ghosts) +By = pwxf.ByFPWrapper(include_ghosts=include_ghosts) +Bz = pwxf.BzFPWrapper(include_ghosts=include_ghosts) +F = pwxf.FFPWrapper(include_ghosts=include_ghosts) +G = pwxf.GFPWrapper(include_ghosts=include_ghosts) +Expml = pwxf.ExFPPMLWrapper(include_ghosts=include_ghosts) +Eypml = pwxf.EyFPPMLWrapper(include_ghosts=include_ghosts) +Ezpml = pwxf.EzFPPMLWrapper(include_ghosts=include_ghosts) +Bxpml = pwxf.BxFPPMLWrapper(include_ghosts=include_ghosts) +Bypml = pwxf.ByFPPMLWrapper(include_ghosts=include_ghosts) +Bzpml = pwxf.BzFPPMLWrapper(include_ghosts=include_ghosts) +Fpml = pwxf.FFPPMLWrapper(include_ghosts=include_ghosts) +Gpml = pwxf.GFPPMLWrapper(include_ghosts=include_ghosts) # Initialize fields data in valid domain init_data(Ex) @@ -210,92 +328,94 @@ def init_data(data): sim.step(max_steps) # Plot E -plot_data(Ex, pml = False, title = 'Ex', name = 'Ex') -plot_data(Ey, pml = False, title = 'Ey', name = 'Ey') -plot_data(Ez, pml = False, title = 'Ez', name = 'Ez') +plot_data(Ex, pml=False, title="Ex", name="Ex") +plot_data(Ey, pml=False, title="Ey", name="Ey") +plot_data(Ez, pml=False, title="Ez", name="Ez") # Plot B -plot_data(Bx, pml = False, title = 'Bx', name = 'Bx') -plot_data(By, pml = False, title = 'By', name = 'By') -plot_data(Bz, pml = False, title = 'Bz', name = 'Bz') +plot_data(Bx, pml=False, title="Bx", name="Bx") +plot_data(By, pml=False, title="By", name="By") +plot_data(Bz, pml=False, title="Bz", name="Bz") # F and G -plot_data(F, pml = False, title = 'F', name = 'F') -plot_data(G, pml = False, title = 'G', name = 'G') +plot_data(F, pml=False, title="F", name="F") +plot_data(G, pml=False, title="G", name="G") # Plot E in PML -plot_data(Expml[:,:,0], pml = True, title = 'Exy in PML', name = 'Exy') -plot_data(Expml[:,:,1], pml = True, title = 'Exz in PML', name = 'Exz') -plot_data(Expml[:,:,2], pml = True, title = 'Exx in PML', name = 'Exx') -plot_data(Eypml[:,:,0], pml = True, title = 'Eyz in PML', name = 'Eyz') -plot_data(Eypml[:,:,1], pml = True, title = 'Eyx in PML', name = 'Eyx') -plot_data(Eypml[:,:,2], pml = True, title = 'Eyy in PML', name = 'Eyy') # zero -plot_data(Ezpml[:,:,0], pml = True, title = 'Ezx in PML', name = 'Ezx') -plot_data(Ezpml[:,:,1], pml = True, title = 'Ezy in PML', name = 'Ezy') # zero -plot_data(Ezpml[:,:,2], pml = True, title = 'Ezz in PML', name = 'Ezz') +plot_data(Expml[:, :, 0], pml=True, title="Exy in PML", name="Exy") +plot_data(Expml[:, :, 1], pml=True, title="Exz in PML", name="Exz") +plot_data(Expml[:, :, 2], pml=True, title="Exx in PML", name="Exx") +plot_data(Eypml[:, :, 0], pml=True, title="Eyz in PML", name="Eyz") +plot_data(Eypml[:, :, 1], pml=True, title="Eyx in PML", name="Eyx") +plot_data(Eypml[:, :, 2], pml=True, title="Eyy in PML", name="Eyy") # zero +plot_data(Ezpml[:, :, 0], pml=True, title="Ezx in PML", name="Ezx") +plot_data(Ezpml[:, :, 1], pml=True, title="Ezy in PML", name="Ezy") # zero +plot_data(Ezpml[:, :, 2], pml=True, title="Ezz in PML", name="Ezz") # Plot B in PML -plot_data(Bxpml[:,:,0], pml = True, title = 'Bxy in PML', name = 'Bxy') -plot_data(Bxpml[:,:,1], pml = True, title = 'Bxz in PML', name = 'Bxz') -plot_data(Bxpml[:,:,2], pml = True, title = 'Bxx in PML', name = 'Bxx') -plot_data(Bypml[:,:,0], pml = True, title = 'Byz in PML', name = 'Byz') -plot_data(Bypml[:,:,1], pml = True, title = 'Byx in PML', name = 'Byx') -plot_data(Bypml[:,:,2], pml = True, title = 'Byy in PML', name = 'Byy') # zero -plot_data(Bzpml[:,:,0], pml = True, title = 'Bzx in PML', name = 'Bzx') -plot_data(Bzpml[:,:,1], pml = True, title = 'Bzy in PML', name = 'Bzy') # zero -plot_data(Bzpml[:,:,2], pml = True, title = 'Bzz in PML', name = 'Bzz') +plot_data(Bxpml[:, :, 0], pml=True, title="Bxy in PML", name="Bxy") +plot_data(Bxpml[:, :, 1], pml=True, title="Bxz in PML", name="Bxz") +plot_data(Bxpml[:, :, 2], pml=True, title="Bxx in PML", name="Bxx") +plot_data(Bypml[:, :, 0], pml=True, title="Byz in PML", name="Byz") +plot_data(Bypml[:, :, 1], pml=True, title="Byx in PML", name="Byx") +plot_data(Bypml[:, :, 2], pml=True, title="Byy in PML", name="Byy") # zero +plot_data(Bzpml[:, :, 0], pml=True, title="Bzx in PML", name="Bzx") +plot_data(Bzpml[:, :, 1], pml=True, title="Bzy in PML", name="Bzy") # zero +plot_data(Bzpml[:, :, 2], pml=True, title="Bzz in PML", name="Bzz") # Plot F and G in PML -plot_data(Fpml[:,:,0], pml = True, title = 'Fx in PML', name = 'Fx') -plot_data(Fpml[:,:,1], pml = True, title = 'Fy in PML', name = 'Fy') -plot_data(Fpml[:,:,2], pml = True, title = 'Fz in PML', name = 'Fz') -plot_data(Gpml[:,:,0], pml = True, title = 'Gx in PML', name = 'Gx') -plot_data(Gpml[:,:,1], pml = True, title = 'Gy in PML', name = 'Gy') -plot_data(Gpml[:,:,2], pml = True, title = 'Gz in PML', name = 'Gz') +plot_data(Fpml[:, :, 0], pml=True, title="Fx in PML", name="Fx") +plot_data(Fpml[:, :, 1], pml=True, title="Fy in PML", name="Fy") +plot_data(Fpml[:, :, 2], pml=True, title="Fz in PML", name="Fz") +plot_data(Gpml[:, :, 0], pml=True, title="Gx in PML", name="Gx") +plot_data(Gpml[:, :, 1], pml=True, title="Gy in PML", name="Gy") +plot_data(Gpml[:, :, 2], pml=True, title="Gz in PML", name="Gz") + # Check values with benchmarks (precomputed from the same Python arrays) def check_values(benchmark, data, rtol, atol): - passed = np.allclose(benchmark, np.sum(np.abs(data[:,:])), rtol = rtol, atol = atol) - assert(passed) + passed = np.allclose(benchmark, np.sum(np.abs(data[:, :])), rtol=rtol, atol=atol) + assert passed + rtol = 5e-08 atol = 1e-12 # E -check_values(1013263608.6369569, Ex[:,:], rtol, atol) -check_values(717278256.7957529 , Ey[:,:], rtol, atol) -check_values(717866566.5718911 , Ez[:,:], rtol, atol) +check_values(1013263608.6369569, Ex[:, :], rtol, atol) +check_values(717278256.7957529, Ey[:, :], rtol, atol) +check_values(717866566.5718911, Ez[:, :], rtol, atol) # B -check_values(3.0214509313437636, Bx[:,:], rtol, atol) -check_values(3.0242765102729985, By[:,:], rtol, atol) -check_values(3.0214509326970465, Bz[:,:], rtol, atol) +check_values(3.0214509313437636, Bx[:, :], rtol, atol) +check_values(3.0242765102729985, By[:, :], rtol, atol) +check_values(3.0214509326970465, Bz[:, :], rtol, atol) # F and G -check_values(3.0188584528062377, F[:,:], rtol, atol) -check_values(1013672631.8764204, G[:,:], rtol, atol) +check_values(3.0188584528062377, F[:, :], rtol, atol) +check_values(1013672631.8764204, G[:, :], rtol, atol) # E in PML -check_values(364287936.1526477 , Expml[:,:,0], rtol, atol) -check_values(183582352.20753333, Expml[:,:,1], rtol, atol) -check_values(190065766.41491824, Expml[:,:,2], rtol, atol) -check_values(440581907.0828975 , Eypml[:,:,0], rtol, atol) -check_values(178117294.05871135, Eypml[:,:,1], rtol, atol) -check_values(0.0 , Eypml[:,:,2], rtol, atol) -check_values(430277101.26568377, Ezpml[:,:,0], rtol, atol) -check_values(0.0 , Ezpml[:,:,1], rtol, atol) -check_values(190919663.2167449 , Ezpml[:,:,2], rtol, atol) +check_values(364287936.1526477, Expml[:, :, 0], rtol, atol) +check_values(183582352.20753333, Expml[:, :, 1], rtol, atol) +check_values(190065766.41491824, Expml[:, :, 2], rtol, atol) +check_values(440581907.0828975, Eypml[:, :, 0], rtol, atol) +check_values(178117294.05871135, Eypml[:, :, 1], rtol, atol) +check_values(0.0, Eypml[:, :, 2], rtol, atol) +check_values(430277101.26568377, Ezpml[:, :, 0], rtol, atol) +check_values(0.0, Ezpml[:, :, 1], rtol, atol) +check_values(190919663.2167449, Ezpml[:, :, 2], rtol, atol) # B in PML -check_values(1.0565189315366146 , Bxpml[:,:,0], rtol, atol) -check_values(0.46181913800643065, Bxpml[:,:,1], rtol, atol) -check_values(0.6849858305343736 , Bxpml[:,:,2], rtol, atol) -check_values(1.7228584190213505 , Bypml[:,:,0], rtol, atol) -check_values(0.47697332248020935, Bypml[:,:,1], rtol, atol) -check_values(0.0 , Bypml[:,:,2], rtol, atol) -check_values(1.518338068658267 , Bzpml[:,:,0], rtol, atol) -check_values(0.0 , Bzpml[:,:,1], rtol, atol) -check_values(0.6849858291863835 , Bzpml[:,:,2], rtol, atol) +check_values(1.0565189315366146, Bxpml[:, :, 0], rtol, atol) +check_values(0.46181913800643065, Bxpml[:, :, 1], rtol, atol) +check_values(0.6849858305343736, Bxpml[:, :, 2], rtol, atol) +check_values(1.7228584190213505, Bypml[:, :, 0], rtol, atol) +check_values(0.47697332248020935, Bypml[:, :, 1], rtol, atol) +check_values(0.0, Bypml[:, :, 2], rtol, atol) +check_values(1.518338068658267, Bzpml[:, :, 0], rtol, atol) +check_values(0.0, Bzpml[:, :, 1], rtol, atol) +check_values(0.6849858291863835, Bzpml[:, :, 2], rtol, atol) # F and G in PML -check_values(1.7808748509425263, Fpml[:,:,0], rtol, atol) -check_values(0.0 , Fpml[:,:,1], rtol, atol) -check_values(0.4307845604625681, Fpml[:,:,2], rtol, atol) -check_values(536552745.42701197, Gpml[:,:,0], rtol, atol) -check_values(0.0 , Gpml[:,:,1], rtol, atol) -check_values(196016270.97767758, Gpml[:,:,2], rtol, atol) +check_values(1.7808748509425263, Fpml[:, :, 0], rtol, atol) +check_values(0.0, Fpml[:, :, 1], rtol, atol) +check_values(0.4307845604625681, Fpml[:, :, 2], rtol, atol) +check_values(536552745.42701197, Gpml[:, :, 0], rtol, atol) +check_values(0.0, Gpml[:, :, 1], rtol, atol) +check_values(196016270.97767758, Gpml[:, :, 2], rtol, atol) diff --git a/Examples/Tests/qed/breit_wheeler/analysis_core.py b/Examples/Tests/qed/breit_wheeler/analysis_core.py index 9d961fe5732..6f5441355e8 100755 --- a/Examples/Tests/qed/breit_wheeler/analysis_core.py +++ b/Examples/Tests/qed/breit_wheeler/analysis_core.py @@ -40,223 +40,278 @@ # Tolerances -tol = 1.e-8 -tol_red = 2.e-2 +tol = 1.0e-8 +tol_red = 2.0e-2 # Physical constants (from CODATA 2018, see: https://physics.nist.gov/cuu/Constants/index.html ) -me = 9.1093837015e-31 #electron mass -c = 299792458 #speed of light -hbar = 6.62607015e-34/(2*np.pi) #reduced Plank constant -fine_structure = 7.2973525693e-3 #fine structure constant -qe = 1.602176634e-19#elementary charge -E_s = (me**2 * c**3)/(qe * hbar) #Schwinger E field -B_s = E_s/c #Schwinger B field - -mec = me*c -mec2 = mec*c -#______________ +me = 9.1093837015e-31 # electron mass +c = 299792458 # speed of light +hbar = 6.62607015e-34 / (2 * np.pi) # reduced Plank constant +fine_structure = 7.2973525693e-3 # fine structure constant +qe = 1.602176634e-19 # elementary charge +E_s = (me**2 * c**3) / (qe * hbar) # Schwinger E field +B_s = E_s / c # Schwinger B field + +mec = me * c +mec2 = mec * c +# ______________ # Initial parameters spec_names_phot = ["p1", "p2", "p3", "p4"] spec_names_ele = ["ele1", "ele2", "ele3", "ele4"] spec_names_pos = ["pos1", "pos2", "pos3", "pos4"] initial_momenta = [ - np.array([2000.0,0,0])*mec, - np.array([0.0,5000.0,0.0])*mec, - np.array([0.0,0.0,10000.0])*mec, - np.array([57735.02691896, 57735.02691896, 57735.02691896])*mec + np.array([2000.0, 0, 0]) * mec, + np.array([0.0, 5000.0, 0.0]) * mec, + np.array([0.0, 0.0, 10000.0]) * mec, + np.array([57735.02691896, 57735.02691896, 57735.02691896]) * mec, ] initial_particle_number = 1048576 -E_f = np.array([-2433321316961438., 973328526784575., 1459992790176863.]) +E_f = np.array([-2433321316961438.0, 973328526784575.0, 1459992790176863.0]) B_f = np.array([2857142.85714286, 4285714.28571428, 8571428.57142857]) -NNS = [128,128,128,128] #bins for energy distribution comparison. -#______________ +NNS = [128, 128, 128, 128] # bins for energy distribution comparison. +# ______________ -#Returns all the species names and if they are photon species or not + +# Returns all the species names and if they are photon species or not def get_all_species_names_and_types(): names = spec_names_phot + spec_names_ele + spec_names_pos - types = [True]*len(spec_names_phot) + [False]*(len(spec_names_ele)+len(spec_names_pos)) + types = [True] * len(spec_names_phot) + [False] * ( + len(spec_names_ele) + len(spec_names_pos) + ) return names, types + def calc_chi_gamma(p, E, B): pnorm = np.linalg.norm(p) - v = c*(p/pnorm) - EpvvecB = E + np.cross(v,B) - vdotEoverc = np.dot(v,E)/c - ff = np.sqrt(np.dot(EpvvecB,EpvvecB) - np.dot(vdotEoverc,vdotEoverc)) - gamma_phot = pnorm/mec - return gamma_phot*ff/E_s - -#Auxiliary functions + v = c * (p / pnorm) + EpvvecB = E + np.cross(v, B) + vdotEoverc = np.dot(v, E) / c + ff = np.sqrt(np.dot(EpvvecB, EpvvecB) - np.dot(vdotEoverc, vdotEoverc)) + gamma_phot = pnorm / mec + return gamma_phot * ff / E_s + + +# Auxiliary functions @np.vectorize def BW_inner(x): - return integ.quad(lambda s: np.sqrt(s)*spe.kv(1./3., 2./3. * s**(3./2.)), x, np.inf)[0] + return integ.quad( + lambda s: np.sqrt(s) * spe.kv(1.0 / 3.0, 2.0 / 3.0 * s ** (3.0 / 2.0)), + x, + np.inf, + )[0] + def BW_X(chi_phot, chi_ele): - div = (chi_ele*(chi_phot-chi_ele)) + div = chi_ele * (chi_phot - chi_ele) div = np.where(np.logical_and(chi_phot > chi_ele, chi_ele != 0), div, 1.0) - res = np.where(np.logical_and(chi_phot > chi_ele, chi_ele != 0), np.power(chi_phot/div, 2./3.), np.inf) + res = np.where( + np.logical_and(chi_phot > chi_ele, chi_ele != 0), + np.power(chi_phot / div, 2.0 / 3.0), + np.inf, + ) return res + def BW_F(chi_phot, chi_ele): X = BW_X(chi_phot, chi_ele) - res = np.where(np.logical_or(chi_phot == chi_ele, chi_ele == 0), 0.0, - BW_inner(X) - (2.0 - chi_phot* X**(3./2.))*spe.kv(2./3., 2./3. * X**(3./2.)) ) + res = np.where( + np.logical_or(chi_phot == chi_ele, chi_ele == 0), + 0.0, + BW_inner(X) + - (2.0 - chi_phot * X ** (3.0 / 2.0)) + * spe.kv(2.0 / 3.0, 2.0 / 3.0 * X ** (3.0 / 2.0)), + ) return res + @np.vectorize def BW_T(chi_phot): - coeff = 1./(np.pi * np.sqrt(3.) * (chi_phot**2)) - return coeff*integ.quad(lambda chi_ele: BW_F(chi_phot, chi_ele), 0, chi_phot)[0] + coeff = 1.0 / (np.pi * np.sqrt(3.0) * (chi_phot**2)) + return coeff * integ.quad(lambda chi_ele: BW_F(chi_phot, chi_ele), 0, chi_phot)[0] + def small_diff(vv, val): - if(val != 0.0): - return np.max(np.abs((vv - val)/val)) < tol + if val != 0.0: + return np.max(np.abs((vv - val) / val)) < tol else: return np.max(np.abs(vv)) < tol -#__________________ + + +# __________________ + # Breit-Wheeler total and differential cross sections def BW_dN_dt(chi_phot, gamma_phot): - coeff_BW = fine_structure * me*c**2/hbar - return coeff_BW*BW_T(chi_phot)*(chi_phot/gamma_phot) + coeff_BW = fine_structure * me * c**2 / hbar + return coeff_BW * BW_T(chi_phot) * (chi_phot / gamma_phot) + def BW_d2N_dt_dchi(chi_phot, gamma_phot, chi_ele): - coeff_BW = fine_structure * me*c**2/hbar - return coeff_BW*BW_F(chi_phot, chi_ele)*(gamma_phot/gamma_phot) -#__________________ + coeff_BW = fine_structure * me * c**2 / hbar + return coeff_BW * BW_F(chi_phot, chi_ele) * (gamma_phot / gamma_phot) + + +# __________________ # Individual tests -def check_number_of_pairs(particle_data, phot_name, ele_name, pos_name, chi_phot, gamma_phot, dt, particle_number): + +def check_number_of_pairs( + particle_data, + phot_name, + ele_name, + pos_name, + chi_phot, + gamma_phot, + dt, + particle_number, +): dNBW_dt_theo = BW_dN_dt(chi_phot, gamma_phot) - expected_pairs = (1.-np.exp(-dNBW_dt_theo*dt))*particle_number - expected_pairs_tolerance = 5.0*np.sqrt(expected_pairs) + expected_pairs = (1.0 - np.exp(-dNBW_dt_theo * dt)) * particle_number + expected_pairs_tolerance = 5.0 * np.sqrt(expected_pairs) n_ele = len(particle_data[ele_name]["w"]) n_pos = len(particle_data[pos_name]["w"]) n_phot = len(particle_data[phot_name]["w"]) - n_lost = initial_particle_number-n_phot - assert((n_ele == n_pos) and (n_ele == n_lost)) - assert( np.abs(n_ele-expected_pairs) < expected_pairs_tolerance) + n_lost = initial_particle_number - n_phot + assert (n_ele == n_pos) and (n_ele == n_lost) + assert np.abs(n_ele - expected_pairs) < expected_pairs_tolerance print(" [OK] generated pair number is within expectations") return n_lost + def check_weights(phot_data, ele_data, pos_data): - assert(np.all(phot_data["w"] == phot_data["w"][0])) - assert(np.all(ele_data["w"] == phot_data["w"][0])) - assert(np.all(pos_data["w"] == phot_data["w"][0])) + assert np.all(phot_data["w"] == phot_data["w"][0]) + assert np.all(ele_data["w"] == phot_data["w"][0]) + assert np.all(pos_data["w"] == phot_data["w"][0]) print(" [OK] particles weights are the expected ones") + def check_momenta(phot_data, ele_data, pos_data, p0, p_ele, p_pos): - assert(small_diff(phot_data["px"], p0[0])) - assert(small_diff(phot_data["py"], p0[1])) - assert(small_diff(phot_data["pz"], p0[2])) + assert small_diff(phot_data["px"], p0[0]) + assert small_diff(phot_data["py"], p0[1]) + assert small_diff(phot_data["pz"], p0[2]) print(" [OK] residual photons still have initial momentum") - pdir = p0/np.linalg.norm(p0) - assert(small_diff(ele_data["px"]/p_ele, pdir[0])) - assert(small_diff(ele_data["py"]/p_ele, pdir[1])) - assert(small_diff(ele_data["pz"]/p_ele, pdir[2])) - assert(small_diff(pos_data["px"]/p_pos, pdir[0])) - assert(small_diff(pos_data["py"]/p_pos, pdir[1])) - assert(small_diff(pos_data["pz"]/p_pos, pdir[2])) + pdir = p0 / np.linalg.norm(p0) + assert small_diff(ele_data["px"] / p_ele, pdir[0]) + assert small_diff(ele_data["py"] / p_ele, pdir[1]) + assert small_diff(ele_data["pz"] / p_ele, pdir[2]) + assert small_diff(pos_data["px"] / p_pos, pdir[0]) + assert small_diff(pos_data["py"] / p_pos, pdir[1]) + assert small_diff(pos_data["pz"] / p_pos, pdir[2]) print(" [OK] pairs move along the initial photon direction") + def check_energy(energy_phot, energy_ele, energy_pos): # Sorting the arrays is required because electrons and positrons are not # necessarily dumped in the same order. s_energy_ele = np.sort(energy_ele) is_energy_pos = np.sort(energy_pos)[::-1] product_energy = s_energy_ele + is_energy_pos - assert(small_diff(product_energy, energy_phot)) + assert small_diff(product_energy, energy_phot) print(" [OK] energy is conserved in each event") + def check_opt_depths(phot_data, ele_data, pos_data): data = (phot_data, ele_data, pos_data) for dd in data: # Remove the negative optical depths that correspond to photons that will decay into pairs # at the beginning of the next timestep loc, scale = st.expon.fit(dd["opt"][dd["opt"] > 0]) - assert( np.abs(loc - 0) < tol_red ) - assert( np.abs(scale - 1) < tol_red ) + assert np.abs(loc - 0) < tol_red + assert np.abs(scale - 1) < tol_red print(" [OK] optical depth distributions are still exponential") -def check_energy_distrib(energy_ele, energy_pos, gamma_phot, - chi_phot, n_lost, NN, idx, do_plot=False): - gamma_min = 1.0001 - gamma_max = gamma_phot-1.0001 - h_gamma_ele, c_gamma = np.histogram(energy_ele/mec2, bins=NN, range=[gamma_min,gamma_max]) - h_gamma_pos, _ = np.histogram(energy_pos/mec2, bins=NN, range=[gamma_min,gamma_max]) - cchi_part_min = chi_phot*(gamma_min - 1)/(gamma_phot - 2) - cchi_part_max = chi_phot*(gamma_max - 1)/(gamma_phot - 2) - - #Rudimentary integration over npoints for each bin - npoints= 20 - aux_chi = np.linspace(cchi_part_min, cchi_part_max, NN*npoints) +def check_energy_distrib( + energy_ele, energy_pos, gamma_phot, chi_phot, n_lost, NN, idx, do_plot=False +): + gamma_min = 1.0001 + gamma_max = gamma_phot - 1.0001 + h_gamma_ele, c_gamma = np.histogram( + energy_ele / mec2, bins=NN, range=[gamma_min, gamma_max] + ) + h_gamma_pos, _ = np.histogram( + energy_pos / mec2, bins=NN, range=[gamma_min, gamma_max] + ) + + cchi_part_min = chi_phot * (gamma_min - 1) / (gamma_phot - 2) + cchi_part_max = chi_phot * (gamma_max - 1) / (gamma_phot - 2) + + # Rudimentary integration over npoints for each bin + npoints = 20 + aux_chi = np.linspace(cchi_part_min, cchi_part_max, NN * npoints) distrib = BW_d2N_dt_dchi(chi_phot, gamma_phot, aux_chi) - distrib = np.sum(distrib.reshape(-1, npoints),1) - distrib = n_lost*distrib/np.sum(distrib) + distrib = np.sum(distrib.reshape(-1, npoints), 1) + distrib = n_lost * distrib / np.sum(distrib) - if do_plot : + if do_plot: # Visual comparison of distributions - c_gamma_centered = 0.5*(c_gamma[1:]+c_gamma[:-1]) + c_gamma_centered = 0.5 * (c_gamma[1:] + c_gamma[:-1]) plt.clf() plt.xlabel("γ_particle") plt.ylabel("N") plt.title("χ_photon = {:f}".format(chi_phot)) - plt.plot(c_gamma_centered, distrib,label="theory") - plt.plot(c_gamma_centered, h_gamma_ele,label="BW electrons") - plt.plot(c_gamma_centered, h_gamma_pos,label="BW positrons") + plt.plot(c_gamma_centered, distrib, label="theory") + plt.plot(c_gamma_centered, h_gamma_ele, label="BW electrons") + plt.plot(c_gamma_centered, h_gamma_pos, label="BW positrons") plt.legend() - plt.savefig("case_{:d}".format(idx+1)) + plt.savefig("case_{:d}".format(idx + 1)) - discr_ele = np.abs(h_gamma_ele-distrib) - discr_pos = np.abs(h_gamma_pos-distrib) + discr_ele = np.abs(h_gamma_ele - distrib) + discr_pos = np.abs(h_gamma_pos - distrib) max_discr = 5.0 * np.sqrt(distrib) - assert(np.all(discr_ele < max_discr)) - assert(np.all(discr_pos < max_discr)) + assert np.all(discr_ele < max_discr) + assert np.all(discr_pos < max_discr) print(" [OK] energy distribution is within expectations") -#__________________ -def check(dt, particle_data): +# __________________ + +def check(dt, particle_data): for idx in range(4): phot_name = spec_names_phot[idx] - ele_name = spec_names_ele[idx] - pos_name = spec_names_pos[idx] - p0 = initial_momenta[idx] + ele_name = spec_names_ele[idx] + pos_name = spec_names_pos[idx] + p0 = initial_momenta[idx] - p2_phot = p0[0]**2 + p0[1]**2 + p0[2]**2 + p2_phot = p0[0] ** 2 + p0[1] ** 2 + p0[2] ** 2 p_phot = np.sqrt(p2_phot) - energy_phot = p_phot*c + energy_phot = p_phot * c chi_phot = calc_chi_gamma(p0, E_f, B_f) - gamma_phot = np.linalg.norm(p0)/mec + gamma_phot = np.linalg.norm(p0) / mec - print("** Case {:d} **".format(idx+1)) + print("** Case {:d} **".format(idx + 1)) print(" initial momentum: ", p0) print(" quantum parameter: {:f}".format(chi_phot)) print(" normalized photon energy: {:f}".format(gamma_phot)) - print(" timestep: {:f} fs".format(dt*1e15)) + print(" timestep: {:f} fs".format(dt * 1e15)) phot_data = particle_data[phot_name] ele_data = particle_data[ele_name] pos_data = particle_data[pos_name] - p2_ele = ele_data["px"]**2 + ele_data["py"]**2 + ele_data["pz"]**2 + p2_ele = ele_data["px"] ** 2 + ele_data["py"] ** 2 + ele_data["pz"] ** 2 p_ele = np.sqrt(p2_ele) - energy_ele = np.sqrt(1.0 + p2_ele/mec**2 )*mec2 - p2_pos = pos_data["px"]**2 + pos_data["py"]**2 + pos_data["pz"]**2 + energy_ele = np.sqrt(1.0 + p2_ele / mec**2) * mec2 + p2_pos = pos_data["px"] ** 2 + pos_data["py"] ** 2 + pos_data["pz"] ** 2 p_pos = np.sqrt(p2_pos) - energy_pos = np.sqrt(1.0 + p2_pos/mec**2 )*mec2 - - n_lost = check_number_of_pairs(particle_data, - phot_name, ele_name, pos_name, - chi_phot, gamma_phot, dt, - initial_particle_number) + energy_pos = np.sqrt(1.0 + p2_pos / mec**2) * mec2 + + n_lost = check_number_of_pairs( + particle_data, + phot_name, + ele_name, + pos_name, + chi_phot, + gamma_phot, + dt, + initial_particle_number, + ) check_weights(phot_data, ele_data, pos_data) @@ -264,7 +319,9 @@ def check(dt, particle_data): check_energy(energy_phot, energy_ele, energy_pos) - check_energy_distrib(energy_ele, energy_pos, gamma_phot, chi_phot, n_lost, NNS[idx], idx) + check_energy_distrib( + energy_ele, energy_pos, gamma_phot, chi_phot, n_lost, NNS[idx], idx + ) check_opt_depths(phot_data, ele_data, pos_data) diff --git a/Examples/Tests/qed/breit_wheeler/analysis_opmd.py b/Examples/Tests/qed/breit_wheeler/analysis_opmd.py index 2b1fbc7038b..21b1024a665 100755 --- a/Examples/Tests/qed/breit_wheeler/analysis_opmd.py +++ b/Examples/Tests/qed/breit_wheeler/analysis_opmd.py @@ -12,8 +12,8 @@ import analysis_core as ac import openpmd_api as io -#sys.path.insert(1, '../../../../warpx/Regression/Checksum/') -#import checksumAPI +# sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +# import checksumAPI # This script is a frontend for the analysis routines @@ -22,17 +22,18 @@ # format and extracts the data needed for # the analysis routines. + def main(): print("Opening openPMD output") prefix = sys.argv[1] - series = io.Series(prefix+"/openpmd_%T.h5", io.Access.read_only) + series = io.Series(prefix + "/openpmd_%T.h5", io.Access.read_only) data_set_end = series.iterations[2] # get simulation time sim_time = data_set_end.time # no particles can be created on the first timestep so we have 2 timesteps in the test case, # with only the second one resulting in particle creation - dt = sim_time/2. + dt = sim_time / 2.0 # get particle data particle_data = {} @@ -46,12 +47,18 @@ def main(): px = data_set_end.particles[spec_name]["momentum"]["x"][:] py = data_set_end.particles[spec_name]["momentum"]["y"][:] pz = data_set_end.particles[spec_name]["momentum"]["z"][:] - w = data_set_end.particles[spec_name]["weighting"][io.Mesh_Record_Component.SCALAR][:] - - if is_photon : - opt = data_set_end.particles[spec_name]["opticalDepthBW"][io.Mesh_Record_Component.SCALAR][:] + w = data_set_end.particles[spec_name]["weighting"][ + io.Mesh_Record_Component.SCALAR + ][:] + + if is_photon: + opt = data_set_end.particles[spec_name]["opticalDepthBW"][ + io.Mesh_Record_Component.SCALAR + ][:] else: - opt = data_set_end.particles[spec_name]["opticalDepthQSR"][io.Mesh_Record_Component.SCALAR][:] + opt = data_set_end.particles[spec_name]["opticalDepthQSR"][ + io.Mesh_Record_Component.SCALAR + ][:] series.flush() @@ -65,8 +72,9 @@ def main(): ac.check(dt, particle_data) - #test_name = os.path.split(os.getcwd())[1] - #checksumAPI.evaluate_checksum(test_name, filename_end) + # test_name = os.path.split(os.getcwd())[1] + # checksumAPI.evaluate_checksum(test_name, filename_end) + if __name__ == "__main__": main() diff --git a/Examples/Tests/qed/breit_wheeler/analysis_yt.py b/Examples/Tests/qed/breit_wheeler/analysis_yt.py index e8950419f25..dbba6bfb56a 100755 --- a/Examples/Tests/qed/breit_wheeler/analysis_yt.py +++ b/Examples/Tests/qed/breit_wheeler/analysis_yt.py @@ -12,7 +12,7 @@ import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import analysis_core as ac import checksumAPI @@ -22,6 +22,8 @@ # format and extracts the data needed for # the analysis routines. yt + + def main(): print("Opening yt output") filename_end = sys.argv[1] @@ -31,7 +33,7 @@ def main(): sim_time = data_set_end.current_time.to_value() # no particles can be created on the first timestep so we have 2 timesteps in the test case, # with only the second one resulting in particle creation - dt = sim_time/2. + dt = sim_time / 2.0 # get particle data all_data_end = data_set_end.all_data() @@ -42,13 +44,13 @@ def main(): spec_name = spec_name_type[0] is_photon = spec_name_type[1] data = {} - data["px"] = all_data_end[spec_name,"particle_momentum_x"].v - data["py"] = all_data_end[spec_name,"particle_momentum_y"].v - data["pz"] = all_data_end[spec_name,"particle_momentum_z"].v - data["w"] = all_data_end[spec_name,"particle_weighting"].v + data["px"] = all_data_end[spec_name, "particle_momentum_x"].v + data["py"] = all_data_end[spec_name, "particle_momentum_y"].v + data["pz"] = all_data_end[spec_name, "particle_momentum_z"].v + data["w"] = all_data_end[spec_name, "particle_weighting"].v - if is_photon : - data["opt"] = all_data_end[spec_name, "particle_opticalDepthBW"].v + if is_photon: + data["opt"] = all_data_end[spec_name, "particle_opticalDepthBW"].v else: data["opt"] = all_data_end[spec_name, "particle_opticalDepthQSR"].v @@ -59,5 +61,6 @@ def main(): test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) + if __name__ == "__main__": main() diff --git a/Examples/Tests/qed/quantum_synchrotron/analysis.py b/Examples/Tests/qed/quantum_synchrotron/analysis.py index b1986930f36..cf60d2ee647 100755 --- a/Examples/Tests/qed/quantum_synchrotron/analysis.py +++ b/Examples/Tests/qed/quantum_synchrotron/analysis.py @@ -17,7 +17,7 @@ import scipy.stats as st import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI import matplotlib.pyplot as plt @@ -48,206 +48,247 @@ # Tolerances -tol = 1.e-8 -tol_red = 1.e-2 +tol = 1.0e-8 +tol_red = 1.0e-2 # Physical constants (from CODATA 2018, see: https://physics.nist.gov/cuu/Constants/index.html ) -me = 9.1093837015e-31 #electron mass -c = 299792458 #speed of light -hbar = 6.62607015e-34/(2*np.pi) #reduced Plank constant -fine_structure = 7.2973525693e-3 #fine structure constant -qe = 1.602176634e-19#elementary charge -E_s = (me**2 * c**3)/(qe * hbar) #Schwinger E field -B_s = E_s/c #Schwinger B field - -mec = me*c -mec2 = mec*c -#______________ +me = 9.1093837015e-31 # electron mass +c = 299792458 # speed of light +hbar = 6.62607015e-34 / (2 * np.pi) # reduced Plank constant +fine_structure = 7.2973525693e-3 # fine structure constant +qe = 1.602176634e-19 # elementary charge +E_s = (me**2 * c**3) / (qe * hbar) # Schwinger E field +B_s = E_s / c # Schwinger B field + +mec = me * c +mec2 = mec * c +# ______________ # Initial parameters spec_names = ["p1", "p2", "p3", "p4"] spec_names_phot = ["qsp_1", "qsp_2", "qsp_3", "qsp_4"] initial_momenta = [ - np.array([10.0,0,0])*mec, - np.array([0.0,100.0,0.0])*mec, - np.array([0.0,0.0,1000.0])*mec, - np.array([5773.502691896, 5773.502691896, 5773.502691896])*mec + np.array([10.0, 0, 0]) * mec, + np.array([0.0, 100.0, 0.0]) * mec, + np.array([0.0, 0.0, 1000.0]) * mec, + np.array([5773.502691896, 5773.502691896, 5773.502691896]) * mec, ] -csign = [-1,-1,1,1] +csign = [-1, -1, 1, 1] initial_particle_number = 1048576 -E_f = np.array([-2433321316961438., 973328526784575., 1459992790176863.]) +E_f = np.array([-2433321316961438.0, 973328526784575.0, 1459992790176863.0]) B_f = np.array([2857142.85714286, 4285714.28571428, 8571428.57142857]) -NNS = [64,64,64,64] #bins for energy distribution comparison. -#______________ +NNS = [64, 64, 64, 64] # bins for energy distribution comparison. +# ______________ + def calc_chi_part(p, E, B): - gamma_part = np.sqrt(1.0 + np.dot(p,p)/mec**2) - v = p/(gamma_part*me) - EpvvecB = E + np.cross(v,B) - vdotEoverc = np.dot(v,E)/c - ff = np.sqrt(np.dot(EpvvecB,EpvvecB) - np.dot(vdotEoverc,vdotEoverc)) - return gamma_part*ff/E_s - -#Auxiliary functions + gamma_part = np.sqrt(1.0 + np.dot(p, p) / mec**2) + v = p / (gamma_part * me) + EpvvecB = E + np.cross(v, B) + vdotEoverc = np.dot(v, E) / c + ff = np.sqrt(np.dot(EpvvecB, EpvvecB) - np.dot(vdotEoverc, vdotEoverc)) + return gamma_part * ff / E_s + + +# Auxiliary functions @np.vectorize def IC_inner_alternative(y): def ff(x): - return np.exp(-y * (1 + 4 * x ** 2 / 3) * np.sqrt(1 + x * x / 3)) * (9 + 36 * x ** 2 + 16 * x ** 4) / (3 + 4 * x ** 2) / np.sqrt(1 + x ** 2 / 3) + return ( + np.exp(-y * (1 + 4 * x**2 / 3) * np.sqrt(1 + x * x / 3)) + * (9 + 36 * x**2 + 16 * x**4) + / (3 + 4 * x**2) + / np.sqrt(1 + x**2 / 3) + ) + # This integration may not converge in some cases, in which case a python warning message can # be issued. This is probably not a significant issue for this test case and these warnings can # be ignored. - return integ.quad(ff, 0, np.inf)[0]/np.sqrt(3) + return integ.quad(ff, 0, np.inf)[0] / np.sqrt(3) + def IC_Y(chi_ele, xi): - div = (chi_ele*(1-xi)) + div = chi_ele * (1 - xi) div = np.where(np.logical_and(xi < 1, chi_ele != 0), div, 1.0) - res = (2/3)*np.where(np.logical_and(xi < 1, chi_ele != 0), xi/div, np.inf) + res = (2 / 3) * np.where(np.logical_and(xi < 1, chi_ele != 0), xi / div, np.inf) return res + def IC_S(chi_ele, xi): Y = IC_Y(chi_ele, xi) - coeff = np.sqrt(3)/2.0/np.pi + coeff = np.sqrt(3) / 2.0 / np.pi first = IC_inner_alternative(Y) - div = np.where(xi == 1, 1.0, 1.0/(1-xi) ) - res = np.where(np.logical_or(xi == 1, xi == 0), 0.0, - coeff*xi*( first + (xi**2 * spe.kv(2./3.,Y)*div ) ) ) + div = np.where(xi == 1, 1.0, 1.0 / (1 - xi)) + res = np.where( + np.logical_or(xi == 1, xi == 0), + 0.0, + coeff * xi * (first + (xi**2 * spe.kv(2.0 / 3.0, Y) * div)), + ) return res + def IC_SXI(chi_ele, xi): div = np.where(xi != 0, xi, 1.0) - return np.where(xi != 0, IC_S(chi_ele, xi)/div, np.inf) + return np.where(xi != 0, IC_S(chi_ele, xi) / div, np.inf) + @np.vectorize def IC_G(chi_ele): return integ.quad(lambda xi: IC_SXI(chi_ele, xi), 0, 1)[0] + def small_diff(vv, val): - if(val != 0.0): - return np.max(np.abs((vv - val)/val)) < tol + if val != 0.0: + return np.max(np.abs((vv - val) / val)) < tol else: return np.max(np.abs(vv)) < tol + def boris(pp, dt, charge_sign): - econst = 0.5*qe*dt*charge_sign/me - u = pp/(me) - u += econst*E_f - inv_gamma = 1/np.sqrt(1 + np.dot(u,u)/c**2) - t = econst*B_f*inv_gamma - s = 2*t/(1 + np.dot(t,t)) - u_p = u + np.cross(u,t) + econst = 0.5 * qe * dt * charge_sign / me + u = pp / (me) + u += econst * E_f + inv_gamma = 1 / np.sqrt(1 + np.dot(u, u) / c**2) + t = econst * B_f * inv_gamma + s = 2 * t / (1 + np.dot(t, t)) + u_p = u + np.cross(u, t) u += np.cross(u_p, s) - u += econst*E_f - return u *me -#__________________ + u += econst * E_f + return u * me + + +# __________________ + # Quantum Synchrotron total and differential cross sections def QS_dN_dt(chi_ele, gamma_ele): - coeff_IC = (2./3.) * fine_structure * me*c**2/hbar - return coeff_IC*IC_G(chi_ele)/gamma_ele + coeff_IC = (2.0 / 3.0) * fine_structure * me * c**2 / hbar + return coeff_IC * IC_G(chi_ele) / gamma_ele + def QS_d2N_dt_dchi(chi, gamma_ele, chi_phot): - coeff_IC = (2./3.) * fine_structure * me*c**2/hbar - return coeff_IC*IC_S(chi, chi_phot/chi)/chi_phot/gamma_ele -#__________________ + coeff_IC = (2.0 / 3.0) * fine_structure * me * c**2 / hbar + return coeff_IC * IC_S(chi, chi_phot / chi) / chi_phot / gamma_ele + + +# __________________ + # Get data for a species def get_spec(ytdata, specname, is_photon): - px = ytdata[specname,"particle_momentum_x"].v - pz = ytdata[specname,"particle_momentum_z"].v - py = ytdata[specname,"particle_momentum_y"].v + px = ytdata[specname, "particle_momentum_x"].v + pz = ytdata[specname, "particle_momentum_z"].v + py = ytdata[specname, "particle_momentum_y"].v - w = ytdata[specname,"particle_weighting"].v + w = ytdata[specname, "particle_weighting"].v - if (is_photon): - opt = ytdata[specname,"particle_opticalDepthBW"].v + if is_photon: + opt = ytdata[specname, "particle_opticalDepthBW"].v else: - opt = ytdata[specname,"particle_opticalDepthQSR"].v + opt = ytdata[specname, "particle_opticalDepthQSR"].v + + return {"px": px, "py": py, "pz": pz, "w": w, "opt": opt} - return {"px" : px, "py" : py, "pz" : pz, "w" : w, "opt" : opt} # Individual tests -def check_number_of_photons(ytdataset, part_name, phot_name, chi_part, gamma_part, dt, particle_number): +def check_number_of_photons( + ytdataset, part_name, phot_name, chi_part, gamma_part, dt, particle_number +): dNQS_dt_theo = QS_dN_dt(chi_part, gamma_part) - expected_photons = (1.-np.exp(-dNQS_dt_theo*dt))*particle_number - expected_photons_tolerance = 5.0*np.sqrt(expected_photons) + expected_photons = (1.0 - np.exp(-dNQS_dt_theo * dt)) * particle_number + expected_photons_tolerance = 5.0 * np.sqrt(expected_photons) n_phot = ytdataset.particle_type_counts[phot_name] - assert( np.abs(n_phot-expected_photons) < expected_photons_tolerance) + assert np.abs(n_phot - expected_photons) < expected_photons_tolerance print(" [OK] generated photons number is within expectations") return n_phot + def check_weights(part_data, phot_data): - assert(np.all(part_data["w"] == part_data["w"][0])) - assert(np.all(phot_data["w"] == part_data["w"][0])) + assert np.all(part_data["w"] == part_data["w"][0]) + assert np.all(phot_data["w"] == part_data["w"][0]) print(" [OK] particles weights are the expected ones") + def check_momenta(phot_data, p_phot, p0): - pdir = p0/np.linalg.norm(p0) - assert(small_diff(phot_data["px"]/p_phot, pdir[0])) - assert(small_diff(phot_data["py"]/p_phot, pdir[1])) - assert(small_diff(phot_data["pz"]/p_phot, pdir[2])) + pdir = p0 / np.linalg.norm(p0) + assert small_diff(phot_data["px"] / p_phot, pdir[0]) + assert small_diff(phot_data["py"] / p_phot, pdir[1]) + assert small_diff(phot_data["pz"] / p_phot, pdir[2]) print(" [OK] photons move along the initial particle direction") + def check_opt_depths(part_data, phot_data): data = (part_data, phot_data) for dd in data: # Remove the negative optical depths that will be # reset at the beginning of the next timestep loc, scale = st.expon.fit(dd["opt"][dd["opt"] > 0]) - assert( np.abs(loc - 0) < tol_red ) - assert( np.abs(scale - 1) < tol_red ) + assert np.abs(loc - 0) < tol_red + assert np.abs(scale - 1) < tol_red print(" [OK] optical depth distributions are still exponential") -def check_energy_distrib(gamma_phot, chi_part, - gamma_part, n_phot, NN, idx, do_plot=False): - gamma_phot_min = 1e-12*gamma_part - gamma_phot_max = gamma_part - h_log_gamma_phot, c_gamma_phot = np.histogram(gamma_phot, bins=np.logspace(np.log10(gamma_phot_min),np.log10(gamma_phot_max),NN+1)) - - cchi_phot_min = chi_part*(gamma_phot_min)/(gamma_part-1) - cchi_phot_max = chi_part*(gamma_phot_max)/(gamma_part-1) - #Rudimentary integration over npoints for each bin - npoints= 20 - aux_chi = np.logspace(np.log10(cchi_phot_min),np.log10(cchi_phot_max), NN*npoints) - distrib = QS_d2N_dt_dchi(chi_part, gamma_part, aux_chi)*aux_chi - distrib = np.sum(distrib.reshape(-1, npoints),1) - distrib = n_phot*distrib/np.sum(distrib) - - if do_plot : +def check_energy_distrib( + gamma_phot, chi_part, gamma_part, n_phot, NN, idx, do_plot=False +): + gamma_phot_min = 1e-12 * gamma_part + gamma_phot_max = gamma_part + h_log_gamma_phot, c_gamma_phot = np.histogram( + gamma_phot, + bins=np.logspace(np.log10(gamma_phot_min), np.log10(gamma_phot_max), NN + 1), + ) + + cchi_phot_min = chi_part * (gamma_phot_min) / (gamma_part - 1) + cchi_phot_max = chi_part * (gamma_phot_max) / (gamma_part - 1) + + # Rudimentary integration over npoints for each bin + npoints = 20 + aux_chi = np.logspace( + np.log10(cchi_phot_min), np.log10(cchi_phot_max), NN * npoints + ) + distrib = QS_d2N_dt_dchi(chi_part, gamma_part, aux_chi) * aux_chi + distrib = np.sum(distrib.reshape(-1, npoints), 1) + distrib = n_phot * distrib / np.sum(distrib) + + if do_plot: # Visual comparison of distributions - c_gamma_phot = np.exp(0.5*(np.log(c_gamma_phot[1:])+np.log(c_gamma_phot[:-1]))) + c_gamma_phot = np.exp( + 0.5 * (np.log(c_gamma_phot[1:]) + np.log(c_gamma_phot[:-1])) + ) plt.clf() fig, (ax1, ax2) = plt.subplots(1, 2) fig.suptitle("χ_particle = {:f}".format(chi_part)) - ax1.plot(c_gamma_phot, distrib,label="theory") - ax1.loglog(c_gamma_phot, h_log_gamma_phot,label="QSR photons") - ax1.set_xlim(1e-12*(gamma_part-1),gamma_part-1) - ax1.set_ylim(1,1e5) - ax2.plot(c_gamma_phot, distrib,label="theory") - ax2.semilogy(c_gamma_phot, h_log_gamma_phot,label="QSR photons") - ax2.set_ylim(1,1e5) - ax2.set_xlim(1e-12*(gamma_part-1),gamma_part-1) + ax1.plot(c_gamma_phot, distrib, label="theory") + ax1.loglog(c_gamma_phot, h_log_gamma_phot, label="QSR photons") + ax1.set_xlim(1e-12 * (gamma_part - 1), gamma_part - 1) + ax1.set_ylim(1, 1e5) + ax2.plot(c_gamma_phot, distrib, label="theory") + ax2.semilogy(c_gamma_phot, h_log_gamma_phot, label="QSR photons") + ax2.set_ylim(1, 1e5) + ax2.set_xlim(1e-12 * (gamma_part - 1), gamma_part - 1) ax1.set_xlabel("γ_photon") ax1.set_xlabel("N") ax2.set_xlabel("γ_photon") ax2.set_xlabel("N") plt.legend() - plt.savefig("case_{:d}".format(idx+1)) + plt.savefig("case_{:d}".format(idx + 1)) - discr = np.abs(h_log_gamma_phot-distrib) + discr = np.abs(h_log_gamma_phot - distrib) - max_discr = np.sqrt(distrib)*5.0 + max_discr = np.sqrt(distrib) * 5.0 # Use a higer tolerance for the last 8 points (this is due to limitations # of the builtin table) max_discr[-8:] *= 2.0 - assert(np.all( discr < max_discr )) + assert np.all(discr < max_discr) print(" [OK] energy distribution is within expectations") -#__________________ + +# __________________ + def check(): filename_end = sys.argv[1] @@ -256,39 +297,46 @@ def check(): sim_time = data_set_end.current_time.to_value() # no particles can be created on the first timestep so we have 2 timesteps in the test case, # with only the second one resulting in particle creation - dt = sim_time/2. + dt = sim_time / 2.0 all_data_end = data_set_end.all_data() for idx in range(4): part_name = spec_names[idx] - phot_name = spec_names_phot[idx] - t_pi = initial_momenta[idx] - pm = boris(t_pi,-dt*0.5,csign[idx]) - p0 = boris(pm,dt*1.0,csign[idx]) - - p2_part = p0[0]**2 + p0[1]**2 + p0[2]**2 - energy_part = np.sqrt(mec2**2 + p2_part*c**2) - gamma_part = energy_part/mec2 + phot_name = spec_names_phot[idx] + t_pi = initial_momenta[idx] + pm = boris(t_pi, -dt * 0.5, csign[idx]) + p0 = boris(pm, dt * 1.0, csign[idx]) + + p2_part = p0[0] ** 2 + p0[1] ** 2 + p0[2] ** 2 + energy_part = np.sqrt(mec2**2 + p2_part * c**2) + gamma_part = energy_part / mec2 chi_part = calc_chi_part(p0, E_f, B_f) - print("** Case {:d} **".format(idx+1)) + print("** Case {:d} **".format(idx + 1)) print(" initial momentum: ", t_pi) print(" quantum parameter: {:f}".format(chi_part)) print(" normalized particle energy: {:f}".format(gamma_part)) - print(" timestep: {:f} fs".format(dt*1e15)) + print(" timestep: {:f} fs".format(dt * 1e15)) part_data_final = get_spec(all_data_end, part_name, is_photon=False) phot_data = get_spec(all_data_end, phot_name, is_photon=True) - p_phot = np.sqrt(phot_data["px"]**2 + phot_data["py"]**2 + phot_data["pz"]**2) - energy_phot = p_phot*c - gamma_phot = energy_phot/mec2 - - n_phot = check_number_of_photons(data_set_end, - part_name, phot_name, - chi_part, gamma_part, dt, - initial_particle_number) + p_phot = np.sqrt( + phot_data["px"] ** 2 + phot_data["py"] ** 2 + phot_data["pz"] ** 2 + ) + energy_phot = p_phot * c + gamma_phot = energy_phot / mec2 + + n_phot = check_number_of_photons( + data_set_end, + part_name, + phot_name, + chi_part, + gamma_part, + dt, + initial_particle_number, + ) check_weights(part_data_final, phot_data) @@ -303,8 +351,10 @@ def check(): test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) + def main(): check() + if __name__ == "__main__": main() diff --git a/Examples/Tests/qed/schwinger/analysis_schwinger.py b/Examples/Tests/qed/schwinger/analysis_schwinger.py index c93c920216f..4b320cc267a 100755 --- a/Examples/Tests/qed/schwinger/analysis_schwinger.py +++ b/Examples/Tests/qed/schwinger/analysis_schwinger.py @@ -18,108 +18,145 @@ import numpy as np import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # define some parameters -c = 299792458. +c = 299792458.0 m_e = 9.1093837015e-31 -e =1.602176634e-19 +e = 1.602176634e-19 hbar = 1.054571817e-34 -E_S = m_e**2*c**3/e/hbar # Schwinger field +E_S = m_e**2 * c**3 / e / hbar # Schwinger field -dV = (1.e-6)**3 # total simulation volume +dV = (1.0e-6) ** 3 # total simulation volume dt = 9.827726403e-17 filename = sys.argv[1] -Ex_test = 0. -Ey_test = 0. -Ez_test = 0. -Bx_test = 0. -By_test = 0. -Bz_test = 0. +Ex_test = 0.0 +Ey_test = 0.0 +Ez_test = 0.0 +Bx_test = 0.0 +By_test = 0.0 +Bz_test = 0.0 # Find which test we are doing -test_number = re.search( 'qed_schwinger([1234])', filename ).group(1) -if test_number == '1': +test_number = re.search("qed_schwinger([1234])", filename).group(1) +if test_number == "1": # First Schwinger test with "weak" EM field. No pair should be created. - Ex_test = 1.e16 + Ex_test = 1.0e16 Bx_test = 16792888.570516706 By_test = 5256650.141557486 Bz_test = 18363530.799561853 -elif test_number == '2': +elif test_number == "2": # Second Schwinger test with stronger EM field. Many pairs are created and a Gaussian # distribution is used to get the weights of the particles. This is the most sensitive test # because the relative std is extremely low. - Ex_test = 1.e18 + Ex_test = 1.0e18 Bx_test = 1679288857.0516706 By_test = 525665014.1557486 Bz_test = 1836353079.9561853 - dV = dV/2. # Schwinger is only activated in part of the simulation domain -elif test_number == '3': + dV = dV / 2.0 # Schwinger is only activated in part of the simulation domain +elif test_number == "3": # Third Schwinger test with intermediate electric field such that average created pair per cell # is 1. A Poisson distribution is used to obtain the weights of the particles. - Ey_test = 1.090934525450495e+17 -elif test_number == '4': + Ey_test = 1.090934525450495e17 +elif test_number == "4": # Fourth Schwinger test with extremely strong EM field but with E and B perpendicular and nearly # equal so that the pair production rate is fairly low. A Gaussian distribution is used in this # case. - Ez_test = 2.5e+20 - By_test = 833910140000. - dV = dV*(3./4.)**2. # Schwinger is only activated in part of the simulation domain + Ez_test = 2.5e20 + By_test = 833910140000.0 + dV = ( + dV * (3.0 / 4.0) ** 2.0 + ) # Schwinger is only activated in part of the simulation domain else: - assert(False) + assert False -def calculate_rate(Ex,Ey,Ez,Bx,By,Bz): -## Calculate theoretical pair production rate from EM field value - E_squared = Ex**2 + Ey**2 + Ez**2 - H_squared = c**2*(Bx**2 + By**2 + Bz**2) - - F = (E_squared - H_squared)/2. - G = c*(Ex*Bx + Ey*By + Ez*Bz) +def calculate_rate(Ex, Ey, Ez, Bx, By, Bz): + ## Calculate theoretical pair production rate from EM field value - epsilon = np.sqrt(np.sqrt(F**2+G**2)+F)/E_S - eta = np.sqrt(np.sqrt(F**2+G**2)-F)/E_S - - if(epsilon != 0. and eta != 0.): - return e**2*E_S**2/4./np.pi**2/c/hbar**2*epsilon*eta/np.tanh(np.pi*eta/epsilon)*np.exp(-np.pi/epsilon) - elif (epsilon == 0.): - return 0. + E_squared = Ex**2 + Ey**2 + Ez**2 + H_squared = c**2 * (Bx**2 + By**2 + Bz**2) + + F = (E_squared - H_squared) / 2.0 + G = c * (Ex * Bx + Ey * By + Ez * Bz) + + epsilon = np.sqrt(np.sqrt(F**2 + G**2) + F) / E_S + eta = np.sqrt(np.sqrt(F**2 + G**2) - F) / E_S + + if epsilon != 0.0 and eta != 0.0: + return ( + e**2 + * E_S**2 + / 4.0 + / np.pi**2 + / c + / hbar**2 + * epsilon + * eta + / np.tanh(np.pi * eta / epsilon) + * np.exp(-np.pi / epsilon) + ) + elif epsilon == 0.0: + return 0.0 else: - return e**2*E_S**2/4./np.pi**2/c/hbar**2*epsilon**2/np.pi*np.exp(-np.pi/epsilon) - - -def do_analysis(Ex,Ey,Ez,Bx,By,Bz): - + return ( + e**2 + * E_S**2 + / 4.0 + / np.pi**2 + / c + / hbar**2 + * epsilon**2 + / np.pi + * np.exp(-np.pi / epsilon) + ) + + +def do_analysis(Ex, Ey, Ez, Bx, By, Bz): data_set = yt.load(filename) - expected_total_physical_pairs_created = dV*dt*calculate_rate(Ex,Ey,Ez,Bx,By,Bz) + expected_total_physical_pairs_created = ( + dV * dt * calculate_rate(Ex, Ey, Ez, Bx, By, Bz) + ) if expected_total_physical_pairs_created < 0.01: - np_ele = data_set.particle_type_counts["ele_schwinger"] if \ - "ele_schwinger" in data_set.particle_type_counts.keys() else 0 - np_pos = data_set.particle_type_counts["pos_schwinger"] if \ - "pos_schwinger" in data_set.particle_type_counts.keys() else 0 - assert(np_ele == 0 and np_pos == 0) + np_ele = ( + data_set.particle_type_counts["ele_schwinger"] + if "ele_schwinger" in data_set.particle_type_counts.keys() + else 0 + ) + np_pos = ( + data_set.particle_type_counts["pos_schwinger"] + if "pos_schwinger" in data_set.particle_type_counts.keys() + else 0 + ) + assert np_ele == 0 and np_pos == 0 ## Assert whether pairs are created or not. else: all_data = data_set.all_data() - ele_data = all_data["ele_schwinger",'particle_weight'] - pos_data = all_data["pos_schwinger",'particle_weight'] + ele_data = all_data["ele_schwinger", "particle_weight"] + pos_data = all_data["pos_schwinger", "particle_weight"] - std_total_physical_pairs_created = np.sqrt(expected_total_physical_pairs_created) + std_total_physical_pairs_created = np.sqrt( + expected_total_physical_pairs_created + ) # Sorting the arrays is required because electrons and positrons are not necessarily # dumped in the same order. - assert(np.array_equal(np.sort(ele_data),np.sort(pos_data))) + assert np.array_equal(np.sort(ele_data), np.sort(pos_data)) # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions - error = np.abs(np.sum(ele_data)-expected_total_physical_pairs_created) - print("difference between expected and actual number of pairs created: " + str(error)) - print("tolerance: " + str(5*std_total_physical_pairs_created)) - assert(error<5*std_total_physical_pairs_created) + error = np.abs(np.sum(ele_data) - expected_total_physical_pairs_created) + print( + "difference between expected and actual number of pairs created: " + + str(error) + ) + print("tolerance: " + str(5 * std_total_physical_pairs_created)) + assert error < 5 * std_total_physical_pairs_created + do_analysis(Ex_test, Ey_test, Ez_test, Bx_test, By_test, Bz_test) diff --git a/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py b/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py index 93e814759c0..e24129d3e38 100755 --- a/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py +++ b/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py @@ -36,68 +36,70 @@ import numpy as np import yt -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -#Input filename +# Input filename inputname = "inputs" -#________________________________________ +# ________________________________________ -#Physical constants -c = 299792458. +# Physical constants +c = 299792458.0 m_e = 9.1093837015e-31 q_0 = 1.602176634e-19 classical_electron_radius = 2.81794e-15 reference_length = 1.0e-6 very_small_dot_product = 1.0e-4 very_small_weight = 1.0e-8 -#________________________________________ +# ________________________________________ -#Sim box data +# Sim box data sim_size = 0.8e-6 resolution = 64 steps = 64 -init_pos = np.array([0.,0.,0.]) -#________________________________________ - -#Momentum vals -p_aux_0 = np.array([2.,3.,6.]) -p_aux_1 = np.array([1,0,0]) -p_aux_2 = np.array([0,1,0]) -Q, _ = np.linalg.qr(np.column_stack( [p_aux_0, p_aux_1, p_aux_2] )) #Gram-Schmidt -p_0 = -Q[:,0] -p_1 = -Q[:,1] -p_2 = -Q[:,2] - -p_vals = [50,200,1000] -#________________________________________ - -#Field val +init_pos = np.array([0.0, 0.0, 0.0]) +# ________________________________________ + +# Momentum vals +p_aux_0 = np.array([2.0, 3.0, 6.0]) +p_aux_1 = np.array([1, 0, 0]) +p_aux_2 = np.array([0, 1, 0]) +Q, _ = np.linalg.qr(np.column_stack([p_aux_0, p_aux_1, p_aux_2])) # Gram-Schmidt +p_0 = -Q[:, 0] +p_1 = -Q[:, 1] +p_2 = -Q[:, 2] + +p_vals = [50, 200, 1000] +# ________________________________________ + +# Field val B_val_norm = 300 -B_val = B_val_norm*m_e * 2.0 * np.pi * c /q_0/reference_length +B_val = B_val_norm * m_e * 2.0 * np.pi * c / q_0 / reference_length B = p_0 * B_val -#________________________________________ +# ________________________________________ -#Tolerance +# Tolerance tolerance_rel = 0.05 -#________________________________________ +# ________________________________________ -#tau_c -omega_c = q_0*B_val/m_e -t0 = (2./3.)*classical_electron_radius/c -tau_c = 1.0/omega_c/omega_c/t0 -#________________________________________ +# tau_c +omega_c = q_0 * B_val / m_e +t0 = (2.0 / 3.0) * classical_electron_radius / c +tau_c = 1.0 / omega_c / omega_c / t0 +# ________________________________________ -#Simulation case struct +# Simulation case struct class sim_case: def __init__(self, _name, _init_mom, _type): self.name = _name self.init_mom = _init_mom self.type = _type -#________________________________________ -#All cases + +# ________________________________________ + +# All cases cases = [ sim_case("ele_para0", p_0 * p_vals[2], "-q_e"), sim_case("ele_perp0", p_1 * p_vals[0], "-q_e"), @@ -105,21 +107,25 @@ def __init__(self, _name, _init_mom, _type): sim_case("ele_perp2", p_1 * p_vals[2], "-q_e"), sim_case("pos_perp2", p_1 * p_vals[2], "q_e"), ] -#________________________________________ +# ________________________________________ + + +# Auxiliary functions +def gamma(p): + return np.sqrt(1.0 + np.dot(p, p)) -#Auxiliary functions -def gamma(p) : - return np.sqrt(1.0 + np.dot(p,p)) def exp_res(cc, time): if np.all(np.linalg.norm(np.cross(cc.init_mom, B)) < very_small_dot_product): return gamma(cc.init_mom) - else : - tt = time/tau_c + else: + tt = time / tau_c g0 = gamma(cc.init_mom) - C = -0.5 * np.log((g0+1)/(g0-1)) - return 1.0/np.tanh(tt - C) -#________________________________________ + C = -0.5 * np.log((g0 + 1) / (g0 - 1)) + return 1.0 / np.tanh(tt - C) + + +# ________________________________________ def check(): @@ -128,34 +134,41 @@ def check(): sim_time = data_set_end.current_time.to_value() - #simulation results - all_data = data_set_end.all_data() + # simulation results + all_data = data_set_end.all_data() spec_names = [cc.name for cc in cases] - #All momenta - res_mom = np.array([np.array([ - all_data[sp, 'particle_momentum_x'].v[0], - all_data[sp, 'particle_momentum_y'].v[0], - all_data[sp, 'particle_momentum_z'].v[0]]) - for sp in spec_names]) + # All momenta + res_mom = np.array( + [ + np.array( + [ + all_data[sp, "particle_momentum_x"].v[0], + all_data[sp, "particle_momentum_y"].v[0], + all_data[sp, "particle_momentum_z"].v[0], + ] + ) + for sp in spec_names + ] + ) for cc in zip(cases, res_mom): - end_gamma = gamma(cc[1]/m_e/c) + end_gamma = gamma(cc[1] / m_e / c) exp_gamma = exp_res(cc[0], sim_time) - error_rel = np.abs(end_gamma-exp_gamma)/exp_gamma + error_rel = np.abs(end_gamma - exp_gamma) / exp_gamma print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) - assert( error_rel < tolerance_rel ) + assert error_rel < tolerance_rel test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) -def generate(): - with open(inputname,'w') as f: +def generate(): + with open(inputname, "w") as f: f.write("#Automatically generated inputfile\n") f.write("#Run check.py without arguments to regenerate\n") f.write("#\n\n") @@ -187,21 +200,31 @@ def generate(): f.write("{}.charge = {}\n".format(cc.name, cc.type)) f.write("{}.mass = m_e\n".format(cc.name)) f.write('{}.injection_style = "SingleParticle"\n'.format(cc.name)) - f.write("{}.single_particle_pos = {} {} {}\n". - format(cc.name, init_pos[0], init_pos[1], init_pos[2])) - f.write("{}.single_particle_u = {} {} {}\n". - format(cc.name, cc.init_mom[0], cc.init_mom[1], cc.init_mom[2])) - f.write("{}.single_particle_weight = {}\n".format(cc.name, very_small_weight)) + f.write( + "{}.single_particle_pos = {} {} {}\n".format( + cc.name, init_pos[0], init_pos[1], init_pos[2] + ) + ) + f.write( + "{}.single_particle_u = {} {} {}\n".format( + cc.name, cc.init_mom[0], cc.init_mom[1], cc.init_mom[2] + ) + ) + f.write( + "{}.single_particle_weight = {}\n".format(cc.name, very_small_weight) + ) f.write("{}.do_classical_radiation_reaction = 1\n".format(cc.name)) f.write("\n") f.write("warpx.B_external_particle = {} {} {}\n".format(*B)) + def main(): - if (len(sys.argv) < 2): + if len(sys.argv) < 2: generate() else: check() + if __name__ == "__main__": main() diff --git a/Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py b/Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py index 0583a6fe1d0..73050b910d8 100644 --- a/Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py +++ b/Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py @@ -11,12 +11,12 @@ nz = 128 # Physical domain -xmin = 0. -xmax = 4. -ymin = 0. -ymax = 4. -zmin = 0. -zmax = 4. +xmin = 0.0 +xmax = 4.0 +ymin = 0.0 +ymax = 4.0 +zmin = 0.0 +zmax = 4.0 # Create grid grid = picmi.Cartesian3DGrid( @@ -24,75 +24,69 @@ warpx_max_grid_size=32, lower_bound=[xmin, ymin, zmin], upper_bound=[xmax, ymax, zmax], - lower_boundary_conditions=['periodic', 'periodic', 'periodic'], - upper_boundary_conditions=['periodic', 'periodic', 'periodic'] + lower_boundary_conditions=["periodic", "periodic", "periodic"], + upper_boundary_conditions=["periodic", "periodic", "periodic"], ) # Electromagnetic solver -solver = picmi.ElectromagneticSolver( - grid=grid, - method='Yee', - cfl=0.99999 -) +solver = picmi.ElectromagneticSolver(grid=grid, method="Yee", cfl=0.99999) # Particles electrons = picmi.Species( - particle_type='electron', - name='electrons', + particle_type="electron", + name="electrons", initial_distribution=picmi.UniformDistribution( - density=1e14, - rms_velocity=[0]*3, - upper_bound=[xmax, ymax, 1.0] - ) -) -layout = picmi.GriddedLayout( - n_macroparticle_per_cell=[1, 1, 1], grid=grid + density=1e14, rms_velocity=[0] * 3, upper_bound=[xmax, ymax, 1.0] + ), ) +layout = picmi.GriddedLayout(n_macroparticle_per_cell=[1, 1, 1], grid=grid) # Reduced diagnostic reduced_diags = [] -reduced_diags.append(picmi.ReducedDiagnostic( - diag_type='LoadBalanceCosts', - period=1, - name='LBC' -)) - -reduced_diags.append(picmi.ReducedDiagnostic( - diag_type='FieldReduction', - period=1, - name='FR', - reduction_type='Maximum', - reduced_function = 'Ez' -)) - -reduced_diags.append(picmi.ReducedDiagnostic( - diag_type='ParticleHistogram', - period=1, - name='PH', - species = electrons, - bin_number = 10, - bin_min=0., - bin_max = xmax, - normalization = 'unity_particle_weight', - histogram_function = 'x' -)) +reduced_diags.append( + picmi.ReducedDiagnostic(diag_type="LoadBalanceCosts", period=1, name="LBC") +) + +reduced_diags.append( + picmi.ReducedDiagnostic( + diag_type="FieldReduction", + period=1, + name="FR", + reduction_type="Maximum", + reduced_function="Ez", + ) +) + +reduced_diags.append( + picmi.ReducedDiagnostic( + diag_type="ParticleHistogram", + period=1, + name="PH", + species=electrons, + bin_number=10, + bin_min=0.0, + bin_max=xmax, + normalization="unity_particle_weight", + histogram_function="x", + ) +) # Diagnostic particle_diag = picmi.ParticleDiagnostic( - name='diag1', + name="diag1", period=3, species=[electrons], - data_list = ['ux', 'uy', 'uz', 'x', 'y', 'z', 'weighting'], - write_dir='.', - warpx_file_prefix='Python_reduced_diags_loadbalancecosts_timers_plt' + data_list=["ux", "uy", "uz", "x", "y", "z", "weighting"], + write_dir=".", + warpx_file_prefix="Python_reduced_diags_loadbalancecosts_timers_plt", ) field_diag = picmi.FieldDiagnostic( - name='diag1', + name="diag1", grid=grid, period=3, - data_list = ['Bx', 'By', 'Bz', 'Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz'], - write_dir='.', - warpx_file_prefix='Python_reduced_diags_loadbalancecosts_timers_plt' + data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], + write_dir=".", + warpx_file_prefix="Python_reduced_diags_loadbalancecosts_timers_plt", ) # Set up simulation @@ -101,9 +95,9 @@ max_steps=max_steps, verbose=1, particle_shape=1, - warpx_current_deposition_algo='esirkepov', - warpx_field_gathering_algo='energy-conserving', - warpx_load_balance_intervals=2 + warpx_current_deposition_algo="esirkepov", + warpx_field_gathering_algo="energy-conserving", + warpx_load_balance_intervals=2, ) # Add species diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags.py b/Examples/Tests/reduced_diags/analysis_reduced_diags.py index 1885800376b..2972324e2ac 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags.py @@ -13,4 +13,4 @@ import analysis_reduced_diags_impl as an -an.do_analysis(single_precision = False) +an.do_analysis(single_precision=False) diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py index 21359ed8171..64b726e5954 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py @@ -16,283 +16,348 @@ import numpy as np import yt -from scipy.constants import c +from scipy.constants import c, m_e, m_p from scipy.constants import epsilon_0 as eps0 -from scipy.constants import m_e, m_p from scipy.constants import mu_0 as mu0 -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # gamma threshold to switch between the relativistic expression of # the kinetic energy and its Taylor expansion. gamma_relativistic_threshold = 1.005 -def do_analysis(single_precision = False): + +def do_analysis(single_precision=False): fn = sys.argv[1] ds = yt.load(fn) ad = ds.all_data() - #-------------------------------------------------------------------------------------------------- + # -------------------------------------------------------------------------------------------------- # Part 1: get results from plotfiles (label '_yt') - #-------------------------------------------------------------------------------------------------- + # -------------------------------------------------------------------------------------------------- # Quantities computed from plotfiles values_yt = dict() # Electrons - px = ad['electrons', 'particle_momentum_x'].to_ndarray() - py = ad['electrons', 'particle_momentum_y'].to_ndarray() - pz = ad['electrons', 'particle_momentum_z'].to_ndarray() - w = ad['electrons', 'particle_weight'].to_ndarray() + px = ad["electrons", "particle_momentum_x"].to_ndarray() + py = ad["electrons", "particle_momentum_y"].to_ndarray() + pz = ad["electrons", "particle_momentum_z"].to_ndarray() + w = ad["electrons", "particle_weight"].to_ndarray() p2 = px**2 + py**2 + pz**2 # Accumulate particle energy, store number of particles and sum of weights - e_u2 = p2/(m_e**2 * c**2) + e_u2 = p2 / (m_e**2 * c**2) e_gamma = np.sqrt(1 + e_u2) - e_energy = (m_e * c**2)*np.where(e_gamma > gamma_relativistic_threshold, - e_gamma-1, - (e_u2)/2 - (e_u2**2)/8 + (e_u2**3)/16 - (e_u2**4)*(5/128) + (e_u2**5)*(7/256)) - values_yt['electrons: particle energy'] = np.sum(e_energy * w) - values_yt['electrons: particle momentum in x'] = np.sum(px * w) - values_yt['electrons: particle momentum in y'] = np.sum(py * w) - values_yt['electrons: particle momentum in z'] = np.sum(pz * w) - values_yt['electrons: number of particles'] = w.shape[0] - values_yt['electrons: sum of weights'] = np.sum(w) + e_energy = (m_e * c**2) * np.where( + e_gamma > gamma_relativistic_threshold, + e_gamma - 1, + (e_u2) / 2 + - (e_u2**2) / 8 + + (e_u2**3) / 16 + - (e_u2**4) * (5 / 128) + + (e_u2**5) * (7 / 256), + ) + values_yt["electrons: particle energy"] = np.sum(e_energy * w) + values_yt["electrons: particle momentum in x"] = np.sum(px * w) + values_yt["electrons: particle momentum in y"] = np.sum(py * w) + values_yt["electrons: particle momentum in z"] = np.sum(pz * w) + values_yt["electrons: number of particles"] = w.shape[0] + values_yt["electrons: sum of weights"] = np.sum(w) # Protons - px = ad['protons', 'particle_momentum_x'].to_ndarray() - py = ad['protons', 'particle_momentum_y'].to_ndarray() - pz = ad['protons', 'particle_momentum_z'].to_ndarray() - w = ad['protons', 'particle_weight'].to_ndarray() + px = ad["protons", "particle_momentum_x"].to_ndarray() + py = ad["protons", "particle_momentum_y"].to_ndarray() + pz = ad["protons", "particle_momentum_z"].to_ndarray() + w = ad["protons", "particle_weight"].to_ndarray() p2 = px**2 + py**2 + pz**2 # Accumulate particle energy, store number of particles and sum of weights - p_u2 = p2/(m_p**2 * c**2) + p_u2 = p2 / (m_p**2 * c**2) p_gamma = np.sqrt(1 + p_u2) - p_energy = (m_p * c**2)*np.where(p_gamma > gamma_relativistic_threshold, - p_gamma-1, - (p_u2)/2 - (p_u2**2)/8 + (p_u2**3)/16 - (p_u2**4)*(5/128) + (p_u2**5)*(7/256)) - values_yt['protons: particle energy'] = np.sum(p_energy * w) - values_yt['protons: particle momentum in x'] = np.sum(px * w) - values_yt['protons: particle momentum in y'] = np.sum(py * w) - values_yt['protons: particle momentum in z'] = np.sum(pz * w) - values_yt['protons: number of particles'] = w.shape[0] - values_yt['protons: sum of weights'] = np.sum(w) + p_energy = (m_p * c**2) * np.where( + p_gamma > gamma_relativistic_threshold, + p_gamma - 1, + (p_u2) / 2 + - (p_u2**2) / 8 + + (p_u2**3) / 16 + - (p_u2**4) * (5 / 128) + + (p_u2**5) * (7 / 256), + ) + values_yt["protons: particle energy"] = np.sum(p_energy * w) + values_yt["protons: particle momentum in x"] = np.sum(px * w) + values_yt["protons: particle momentum in y"] = np.sum(py * w) + values_yt["protons: particle momentum in z"] = np.sum(pz * w) + values_yt["protons: number of particles"] = w.shape[0] + values_yt["protons: sum of weights"] = np.sum(w) # Photons - px = ad['photons', 'particle_momentum_x'].to_ndarray() - py = ad['photons', 'particle_momentum_y'].to_ndarray() - pz = ad['photons', 'particle_momentum_z'].to_ndarray() - w = ad['photons', 'particle_weight'].to_ndarray() + px = ad["photons", "particle_momentum_x"].to_ndarray() + py = ad["photons", "particle_momentum_y"].to_ndarray() + pz = ad["photons", "particle_momentum_z"].to_ndarray() + w = ad["photons", "particle_weight"].to_ndarray() p2 = px**2 + py**2 + pz**2 # Accumulate particle energy, store number of particles and sum of weights - values_yt['photons: particle energy'] = np.sum(np.sqrt(p2 * c**2) * w) - values_yt['photons: particle momentum in x'] = np.sum(px * w) - values_yt['photons: particle momentum in y'] = np.sum(py * w) - values_yt['photons: particle momentum in z'] = np.sum(pz * w) - values_yt['photons: number of particles'] = w.shape[0] - values_yt['photons: sum of weights'] = np.sum(w) + values_yt["photons: particle energy"] = np.sum(np.sqrt(p2 * c**2) * w) + values_yt["photons: particle momentum in x"] = np.sum(px * w) + values_yt["photons: particle momentum in y"] = np.sum(py * w) + values_yt["photons: particle momentum in z"] = np.sum(pz * w) + values_yt["photons: number of particles"] = w.shape[0] + values_yt["photons: sum of weights"] = np.sum(w) # Accumulate total particle diagnostics - values_yt['particle energy'] = values_yt['electrons: particle energy'] \ - + values_yt['protons: particle energy'] \ - + values_yt['photons: particle energy'] - - values_yt['particle momentum in x'] = values_yt['electrons: particle momentum in x'] \ - + values_yt['protons: particle momentum in x'] \ - + values_yt['photons: particle momentum in x'] - - values_yt['particle momentum in y'] = values_yt['electrons: particle momentum in y'] \ - + values_yt['protons: particle momentum in y'] \ - + values_yt['photons: particle momentum in y'] - - values_yt['particle momentum in z'] = values_yt['electrons: particle momentum in z'] \ - + values_yt['protons: particle momentum in z'] \ - + values_yt['photons: particle momentum in z'] - - values_yt['number of particles'] = values_yt['electrons: number of particles'] \ - + values_yt['protons: number of particles'] \ - + values_yt['photons: number of particles'] - - values_yt['sum of weights'] = values_yt['electrons: sum of weights'] \ - + values_yt['protons: sum of weights'] \ - + values_yt['photons: sum of weights'] - - values_yt['mean particle energy'] = values_yt['particle energy'] / values_yt['sum of weights'] - - values_yt['mean particle momentum in x'] = values_yt['particle momentum in x'] / values_yt['sum of weights'] - values_yt['mean particle momentum in y'] = values_yt['particle momentum in y'] / values_yt['sum of weights'] - values_yt['mean particle momentum in z'] = values_yt['particle momentum in z'] / values_yt['sum of weights'] - - values_yt['electrons: mean particle energy'] = values_yt['electrons: particle energy'] \ - / values_yt['electrons: sum of weights'] - - values_yt['electrons: mean particle momentum in x'] = values_yt['electrons: particle momentum in x'] \ - / values_yt['electrons: sum of weights'] - values_yt['electrons: mean particle momentum in y'] = values_yt['electrons: particle momentum in y'] \ - / values_yt['electrons: sum of weights'] - values_yt['electrons: mean particle momentum in z'] = values_yt['electrons: particle momentum in z'] \ - / values_yt['electrons: sum of weights'] - - values_yt['protons: mean particle energy'] = values_yt['protons: particle energy'] \ - / values_yt['protons: sum of weights'] - - values_yt['protons: mean particle momentum in x'] = values_yt['protons: particle momentum in x'] \ - / values_yt['protons: sum of weights'] - values_yt['protons: mean particle momentum in y'] = values_yt['protons: particle momentum in y'] \ - / values_yt['protons: sum of weights'] - values_yt['protons: mean particle momentum in z'] = values_yt['protons: particle momentum in z'] \ - / values_yt['protons: sum of weights'] - - values_yt['photons: mean particle energy'] = values_yt['photons: particle energy'] \ - / values_yt['photons: sum of weights'] - - values_yt['photons: mean particle momentum in x'] = values_yt['photons: particle momentum in x'] \ - / values_yt['photons: sum of weights'] - values_yt['photons: mean particle momentum in y'] = values_yt['photons: particle momentum in y'] \ - / values_yt['photons: sum of weights'] - values_yt['photons: mean particle momentum in z'] = values_yt['photons: particle momentum in z'] \ - / values_yt['photons: sum of weights'] + values_yt["particle energy"] = ( + values_yt["electrons: particle energy"] + + values_yt["protons: particle energy"] + + values_yt["photons: particle energy"] + ) + + values_yt["particle momentum in x"] = ( + values_yt["electrons: particle momentum in x"] + + values_yt["protons: particle momentum in x"] + + values_yt["photons: particle momentum in x"] + ) + + values_yt["particle momentum in y"] = ( + values_yt["electrons: particle momentum in y"] + + values_yt["protons: particle momentum in y"] + + values_yt["photons: particle momentum in y"] + ) + + values_yt["particle momentum in z"] = ( + values_yt["electrons: particle momentum in z"] + + values_yt["protons: particle momentum in z"] + + values_yt["photons: particle momentum in z"] + ) + + values_yt["number of particles"] = ( + values_yt["electrons: number of particles"] + + values_yt["protons: number of particles"] + + values_yt["photons: number of particles"] + ) + + values_yt["sum of weights"] = ( + values_yt["electrons: sum of weights"] + + values_yt["protons: sum of weights"] + + values_yt["photons: sum of weights"] + ) + + values_yt["mean particle energy"] = ( + values_yt["particle energy"] / values_yt["sum of weights"] + ) + + values_yt["mean particle momentum in x"] = ( + values_yt["particle momentum in x"] / values_yt["sum of weights"] + ) + values_yt["mean particle momentum in y"] = ( + values_yt["particle momentum in y"] / values_yt["sum of weights"] + ) + values_yt["mean particle momentum in z"] = ( + values_yt["particle momentum in z"] / values_yt["sum of weights"] + ) + + values_yt["electrons: mean particle energy"] = ( + values_yt["electrons: particle energy"] / values_yt["electrons: sum of weights"] + ) + + values_yt["electrons: mean particle momentum in x"] = ( + values_yt["electrons: particle momentum in x"] + / values_yt["electrons: sum of weights"] + ) + values_yt["electrons: mean particle momentum in y"] = ( + values_yt["electrons: particle momentum in y"] + / values_yt["electrons: sum of weights"] + ) + values_yt["electrons: mean particle momentum in z"] = ( + values_yt["electrons: particle momentum in z"] + / values_yt["electrons: sum of weights"] + ) + + values_yt["protons: mean particle energy"] = ( + values_yt["protons: particle energy"] / values_yt["protons: sum of weights"] + ) + + values_yt["protons: mean particle momentum in x"] = ( + values_yt["protons: particle momentum in x"] + / values_yt["protons: sum of weights"] + ) + values_yt["protons: mean particle momentum in y"] = ( + values_yt["protons: particle momentum in y"] + / values_yt["protons: sum of weights"] + ) + values_yt["protons: mean particle momentum in z"] = ( + values_yt["protons: particle momentum in z"] + / values_yt["protons: sum of weights"] + ) + + values_yt["photons: mean particle energy"] = ( + values_yt["photons: particle energy"] / values_yt["photons: sum of weights"] + ) + + values_yt["photons: mean particle momentum in x"] = ( + values_yt["photons: particle momentum in x"] + / values_yt["photons: sum of weights"] + ) + values_yt["photons: mean particle momentum in y"] = ( + values_yt["photons: particle momentum in y"] + / values_yt["photons: sum of weights"] + ) + values_yt["photons: mean particle momentum in z"] = ( + values_yt["photons: particle momentum in z"] + / values_yt["photons: sum of weights"] + ) # Load 3D data from plotfiles - ad = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) - Ex = ad[('mesh','Ex')].to_ndarray() - Ey = ad[('mesh','Ey')].to_ndarray() - Ez = ad[('mesh','Ez')].to_ndarray() - Bx = ad[('mesh','Bx')].to_ndarray() - By = ad[('mesh','By')].to_ndarray() - Bz = ad[('mesh','Bz')].to_ndarray() - jx = ad[('mesh','jx')].to_ndarray() - jy = ad[('mesh','jy')].to_ndarray() - jz = ad[('mesh','jz')].to_ndarray() - rho = ad[('boxlib','rho')].to_ndarray() - rho_electrons = ad[('boxlib','rho_electrons')].to_ndarray() - rho_protons = ad[('boxlib','rho_protons')].to_ndarray() - x = ad[('boxlib','x')].to_ndarray() - y = ad[('boxlib','y')].to_ndarray() - z = ad[('boxlib','z')].to_ndarray() + ad = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions + ) + Ex = ad[("mesh", "Ex")].to_ndarray() + Ey = ad[("mesh", "Ey")].to_ndarray() + Ez = ad[("mesh", "Ez")].to_ndarray() + Bx = ad[("mesh", "Bx")].to_ndarray() + By = ad[("mesh", "By")].to_ndarray() + Bz = ad[("mesh", "Bz")].to_ndarray() + jx = ad[("mesh", "jx")].to_ndarray() + jy = ad[("mesh", "jy")].to_ndarray() + jz = ad[("mesh", "jz")].to_ndarray() + rho = ad[("boxlib", "rho")].to_ndarray() + rho_electrons = ad[("boxlib", "rho_electrons")].to_ndarray() + rho_protons = ad[("boxlib", "rho_protons")].to_ndarray() + x = ad[("boxlib", "x")].to_ndarray() + y = ad[("boxlib", "y")].to_ndarray() + z = ad[("boxlib", "z")].to_ndarray() # Field energy E2 = np.sum(Ex**2) + np.sum(Ey**2) + np.sum(Ez**2) B2 = np.sum(Bx**2) + np.sum(By**2) + np.sum(Bz**2) - N = np.array(ds.domain_width / ds.domain_dimensions) + N = np.array(ds.domain_width / ds.domain_dimensions) dV = N[0] * N[1] * N[2] - values_yt['field energy'] = 0.5 * dV * (E2 * eps0 + B2 / mu0) - values_yt['field momentum in x'] = eps0 * np.sum(Ey * Bz - Ez * By) * dV - values_yt['field momentum in y'] = eps0 * np.sum(Ez * Bx - Ex * Bz) * dV - values_yt['field momentum in z'] = eps0 * np.sum(Ex * By - Ey * Bx) * dV + values_yt["field energy"] = 0.5 * dV * (E2 * eps0 + B2 / mu0) + values_yt["field momentum in x"] = eps0 * np.sum(Ey * Bz - Ez * By) * dV + values_yt["field momentum in y"] = eps0 * np.sum(Ez * Bx - Ex * Bz) * dV + values_yt["field momentum in z"] = eps0 * np.sum(Ex * By - Ey * Bx) * dV # Field energy in quarter of simulation domain - E2 = np.sum((Ex**2 + Ey**2 + Ez**2)*(y > 0)*(z < 0)) - B2 = np.sum((Bx**2 + By**2 + Bz**2)*(y > 0)*(z < 0)) - values_yt['field energy in quarter of simulation domain'] = 0.5 * dV * (E2 * eps0 + B2 / mu0) + E2 = np.sum((Ex**2 + Ey**2 + Ez**2) * (y > 0) * (z < 0)) + B2 = np.sum((Bx**2 + By**2 + Bz**2) * (y > 0) * (z < 0)) + values_yt["field energy in quarter of simulation domain"] = ( + 0.5 * dV * (E2 * eps0 + B2 / mu0) + ) # Max/min values of various grid quantities - values_yt['maximum of |Ex|'] = np.amax(np.abs(Ex)) - values_yt['maximum of |Ey|'] = np.amax(np.abs(Ey)) - values_yt['maximum of |Ez|'] = np.amax(np.abs(Ez)) - values_yt['maximum of |Bx|'] = np.amax(np.abs(Bx)) - values_yt['maximum of |By|'] = np.amax(np.abs(By)) - values_yt['maximum of |Bz|'] = np.amax(np.abs(Bz)) - values_yt['maximum of |E|'] = np.amax(np.sqrt(Ex**2 + Ey**2 + Ez**2)) - values_yt['maximum of |B|'] = np.amax(np.sqrt(Bx**2 + By**2 + Bz**2)) - values_yt['maximum of rho'] = np.amax(rho) - values_yt['minimum of rho'] = np.amin(rho) - values_yt['electrons: maximum of |rho|'] = np.amax(np.abs(rho_electrons)) - values_yt['protons: maximum of |rho|'] = np.amax(np.abs(rho_protons)) - values_yt['maximum of |B| from generic field reduction'] = np.amax(np.sqrt(Bx**2 + By**2 + Bz**2)) - values_yt['minimum of x*Ey*Bz'] = np.amin(x*Ey*Bz) - values_yt['maximum of Edotj'] = np.amax(Ex*jx + Ey*jy + Ez*jz) - - #-------------------------------------------------------------------------------------------------- + values_yt["maximum of |Ex|"] = np.amax(np.abs(Ex)) + values_yt["maximum of |Ey|"] = np.amax(np.abs(Ey)) + values_yt["maximum of |Ez|"] = np.amax(np.abs(Ez)) + values_yt["maximum of |Bx|"] = np.amax(np.abs(Bx)) + values_yt["maximum of |By|"] = np.amax(np.abs(By)) + values_yt["maximum of |Bz|"] = np.amax(np.abs(Bz)) + values_yt["maximum of |E|"] = np.amax(np.sqrt(Ex**2 + Ey**2 + Ez**2)) + values_yt["maximum of |B|"] = np.amax(np.sqrt(Bx**2 + By**2 + Bz**2)) + values_yt["maximum of rho"] = np.amax(rho) + values_yt["minimum of rho"] = np.amin(rho) + values_yt["electrons: maximum of |rho|"] = np.amax(np.abs(rho_electrons)) + values_yt["protons: maximum of |rho|"] = np.amax(np.abs(rho_protons)) + values_yt["maximum of |B| from generic field reduction"] = np.amax( + np.sqrt(Bx**2 + By**2 + Bz**2) + ) + values_yt["minimum of x*Ey*Bz"] = np.amin(x * Ey * Bz) + values_yt["maximum of Edotj"] = np.amax(Ex * jx + Ey * jy + Ez * jz) + + # -------------------------------------------------------------------------------------------------- # Part 2: get results from reduced diagnostics (label '_rd') - #-------------------------------------------------------------------------------------------------- + # -------------------------------------------------------------------------------------------------- # Quantities computed from reduced diagnostics values_rd = dict() # Load data from output files - EFdata = np.genfromtxt('./diags/reducedfiles/EF.txt') # Field energy - EPdata = np.genfromtxt('./diags/reducedfiles/EP.txt') # Particle energy - PFdata = np.genfromtxt('./diags/reducedfiles/PF.txt') # Field momentum - PPdata = np.genfromtxt('./diags/reducedfiles/PP.txt') # Particle momentum - MFdata = np.genfromtxt('./diags/reducedfiles/MF.txt') # Field maximum - MRdata = np.genfromtxt('./diags/reducedfiles/MR.txt') # Rho maximum - NPdata = np.genfromtxt('./diags/reducedfiles/NP.txt') # Particle number - FR_Maxdata = np.genfromtxt('./diags/reducedfiles/FR_Max.txt') # Field Reduction using maximum - FR_Mindata = np.genfromtxt('./diags/reducedfiles/FR_Min.txt') # Field Reduction using minimum - FR_Integraldata = np.genfromtxt('./diags/reducedfiles/FR_Integral.txt') # Field Reduction using integral - Edotjdata = np.genfromtxt('./diags/reducedfiles/Edotj.txt') # E dot j maximum + EFdata = np.genfromtxt("./diags/reducedfiles/EF.txt") # Field energy + EPdata = np.genfromtxt("./diags/reducedfiles/EP.txt") # Particle energy + PFdata = np.genfromtxt("./diags/reducedfiles/PF.txt") # Field momentum + PPdata = np.genfromtxt("./diags/reducedfiles/PP.txt") # Particle momentum + MFdata = np.genfromtxt("./diags/reducedfiles/MF.txt") # Field maximum + MRdata = np.genfromtxt("./diags/reducedfiles/MR.txt") # Rho maximum + NPdata = np.genfromtxt("./diags/reducedfiles/NP.txt") # Particle number + FR_Maxdata = np.genfromtxt( + "./diags/reducedfiles/FR_Max.txt" + ) # Field Reduction using maximum + FR_Mindata = np.genfromtxt( + "./diags/reducedfiles/FR_Min.txt" + ) # Field Reduction using minimum + FR_Integraldata = np.genfromtxt( + "./diags/reducedfiles/FR_Integral.txt" + ) # Field Reduction using integral + Edotjdata = np.genfromtxt("./diags/reducedfiles/Edotj.txt") # E dot j maximum # First index "1" points to the values written at the last time step - values_rd['field energy'] = EFdata[1][2] - values_rd['field energy in quarter of simulation domain'] = FR_Integraldata[1][2] - values_rd['particle energy'] = EPdata[1][2] - values_rd['electrons: particle energy'] = EPdata[1][3] - values_rd['protons: particle energy'] = EPdata[1][4] - values_rd['photons: particle energy'] = EPdata[1][5] - values_rd['mean particle energy'] = EPdata[1][6] - values_rd['electrons: mean particle energy'] = EPdata[1][7] - values_rd['protons: mean particle energy'] = EPdata[1][8] - values_rd['photons: mean particle energy'] = EPdata[1][9] - values_rd['field momentum in x'] = PFdata[1][2] - values_rd['field momentum in y'] = PFdata[1][3] - values_rd['field momentum in z'] = PFdata[1][4] - values_rd['particle momentum in x'] = PPdata[1][2] - values_rd['particle momentum in y'] = PPdata[1][3] - values_rd['particle momentum in z'] = PPdata[1][4] - values_rd['electrons: particle momentum in x'] = PPdata[1][5] - values_rd['electrons: particle momentum in y'] = PPdata[1][6] - values_rd['electrons: particle momentum in z'] = PPdata[1][7] - values_rd['protons: particle momentum in x'] = PPdata[1][8] - values_rd['protons: particle momentum in y'] = PPdata[1][9] - values_rd['protons: particle momentum in z'] = PPdata[1][10] - values_rd['photons: particle momentum in x'] = PPdata[1][11] - values_rd['photons: particle momentum in y'] = PPdata[1][12] - values_rd['photons: particle momentum in z'] = PPdata[1][13] - values_rd['mean particle momentum in x'] = PPdata[1][14] - values_rd['mean particle momentum in y'] = PPdata[1][15] - values_rd['mean particle momentum in z'] = PPdata[1][16] - values_rd['electrons: mean particle momentum in x'] = PPdata[1][17] - values_rd['electrons: mean particle momentum in y'] = PPdata[1][18] - values_rd['electrons: mean particle momentum in z'] = PPdata[1][19] - values_rd['protons: mean particle momentum in x'] = PPdata[1][20] - values_rd['protons: mean particle momentum in y'] = PPdata[1][21] - values_rd['protons: mean particle momentum in z'] = PPdata[1][22] - values_rd['photons: mean particle momentum in x'] = PPdata[1][23] - values_rd['photons: mean particle momentum in y'] = PPdata[1][24] - values_rd['photons: mean particle momentum in z'] = PPdata[1][25] - values_rd['maximum of |Ex|'] = MFdata[1][2] - values_rd['maximum of |Ey|'] = MFdata[1][3] - values_rd['maximum of |Ez|'] = MFdata[1][4] - values_rd['maximum of |E|'] = MFdata[1][5] - values_rd['maximum of |Bx|'] = MFdata[1][6] - values_rd['maximum of |By|'] = MFdata[1][7] - values_rd['maximum of |Bz|'] = MFdata[1][8] - values_rd['maximum of |B|'] = MFdata[1][9] - values_rd['maximum of rho'] = MRdata[1][2] - values_rd['minimum of rho'] = MRdata[1][3] - values_rd['electrons: maximum of |rho|'] = MRdata[1][4] - values_rd['protons: maximum of |rho|'] = MRdata[1][5] - values_rd['number of particles'] = NPdata[1][2] - values_rd['electrons: number of particles'] = NPdata[1][3] - values_rd['protons: number of particles'] = NPdata[1][4] - values_rd['photons: number of particles'] = NPdata[1][5] - values_rd['sum of weights'] = NPdata[1][6] - values_rd['electrons: sum of weights'] = NPdata[1][7] - values_rd['protons: sum of weights'] = NPdata[1][8] - values_rd['photons: sum of weights'] = NPdata[1][9] - values_rd['maximum of |B| from generic field reduction'] = FR_Maxdata[1][2] - values_rd['minimum of x*Ey*Bz'] = FR_Mindata[1][2] - values_rd['maximum of Edotj'] = Edotjdata[1][2] - - #-------------------------------------------------------------------------------------------------- + values_rd["field energy"] = EFdata[1][2] + values_rd["field energy in quarter of simulation domain"] = FR_Integraldata[1][2] + values_rd["particle energy"] = EPdata[1][2] + values_rd["electrons: particle energy"] = EPdata[1][3] + values_rd["protons: particle energy"] = EPdata[1][4] + values_rd["photons: particle energy"] = EPdata[1][5] + values_rd["mean particle energy"] = EPdata[1][6] + values_rd["electrons: mean particle energy"] = EPdata[1][7] + values_rd["protons: mean particle energy"] = EPdata[1][8] + values_rd["photons: mean particle energy"] = EPdata[1][9] + values_rd["field momentum in x"] = PFdata[1][2] + values_rd["field momentum in y"] = PFdata[1][3] + values_rd["field momentum in z"] = PFdata[1][4] + values_rd["particle momentum in x"] = PPdata[1][2] + values_rd["particle momentum in y"] = PPdata[1][3] + values_rd["particle momentum in z"] = PPdata[1][4] + values_rd["electrons: particle momentum in x"] = PPdata[1][5] + values_rd["electrons: particle momentum in y"] = PPdata[1][6] + values_rd["electrons: particle momentum in z"] = PPdata[1][7] + values_rd["protons: particle momentum in x"] = PPdata[1][8] + values_rd["protons: particle momentum in y"] = PPdata[1][9] + values_rd["protons: particle momentum in z"] = PPdata[1][10] + values_rd["photons: particle momentum in x"] = PPdata[1][11] + values_rd["photons: particle momentum in y"] = PPdata[1][12] + values_rd["photons: particle momentum in z"] = PPdata[1][13] + values_rd["mean particle momentum in x"] = PPdata[1][14] + values_rd["mean particle momentum in y"] = PPdata[1][15] + values_rd["mean particle momentum in z"] = PPdata[1][16] + values_rd["electrons: mean particle momentum in x"] = PPdata[1][17] + values_rd["electrons: mean particle momentum in y"] = PPdata[1][18] + values_rd["electrons: mean particle momentum in z"] = PPdata[1][19] + values_rd["protons: mean particle momentum in x"] = PPdata[1][20] + values_rd["protons: mean particle momentum in y"] = PPdata[1][21] + values_rd["protons: mean particle momentum in z"] = PPdata[1][22] + values_rd["photons: mean particle momentum in x"] = PPdata[1][23] + values_rd["photons: mean particle momentum in y"] = PPdata[1][24] + values_rd["photons: mean particle momentum in z"] = PPdata[1][25] + values_rd["maximum of |Ex|"] = MFdata[1][2] + values_rd["maximum of |Ey|"] = MFdata[1][3] + values_rd["maximum of |Ez|"] = MFdata[1][4] + values_rd["maximum of |E|"] = MFdata[1][5] + values_rd["maximum of |Bx|"] = MFdata[1][6] + values_rd["maximum of |By|"] = MFdata[1][7] + values_rd["maximum of |Bz|"] = MFdata[1][8] + values_rd["maximum of |B|"] = MFdata[1][9] + values_rd["maximum of rho"] = MRdata[1][2] + values_rd["minimum of rho"] = MRdata[1][3] + values_rd["electrons: maximum of |rho|"] = MRdata[1][4] + values_rd["protons: maximum of |rho|"] = MRdata[1][5] + values_rd["number of particles"] = NPdata[1][2] + values_rd["electrons: number of particles"] = NPdata[1][3] + values_rd["protons: number of particles"] = NPdata[1][4] + values_rd["photons: number of particles"] = NPdata[1][5] + values_rd["sum of weights"] = NPdata[1][6] + values_rd["electrons: sum of weights"] = NPdata[1][7] + values_rd["protons: sum of weights"] = NPdata[1][8] + values_rd["photons: sum of weights"] = NPdata[1][9] + values_rd["maximum of |B| from generic field reduction"] = FR_Maxdata[1][2] + values_rd["minimum of x*Ey*Bz"] = FR_Mindata[1][2] + values_rd["maximum of Edotj"] = Edotjdata[1][2] + + # -------------------------------------------------------------------------------------------------- # Part 3: compare values from plotfiles and reduced diagnostics and print output - #-------------------------------------------------------------------------------------------------- + # -------------------------------------------------------------------------------------------------- error = dict() tolerance = 5e-3 if single_precision else 1e-12 @@ -303,12 +368,12 @@ def do_analysis(single_precision = False): # while the field energy from the reduced diagnostics is computed from (Yee) staggered data. for k in values_yt.keys(): print() - print('values_yt[' + k + '] = ', values_yt[k]) - print('values_rd[' + k + '] = ', values_rd[k]) + print("values_yt[" + k + "] = ", values_yt[k]) + print("values_rd[" + k + "] = ", values_rd[k]) error[k] = abs(values_yt[k] - values_rd[k]) / abs(values_yt[k]) - print('relative error = ', error[k]) - tol = field_energy_tolerance if (k == 'field energy') else tolerance - assert(error[k] < tol) + print("relative error = ", error[k]) + tol = field_energy_tolerance if (k == "field energy") else tolerance + assert error[k] < tol print() test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py index a706aace1f6..0494b84b0d8 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py @@ -21,7 +21,7 @@ import numpy as np -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Command line argument @@ -29,13 +29,15 @@ # Load costs data data = np.genfromtxt("./diags/reducedfiles/LBC.txt") -data = data[:,2:] +data = data[:, 2:] # Compute the number of datafields saved per box n_data_fields = 0 with open("./diags/reducedfiles/LBC.txt") as f: h = f.readlines()[0] - unique_headers=[''.join([l for l in w if not l.isdigit()]) for w in h.split()][2::] + unique_headers = ["".join([ln for ln in w if not ln.isdigit()]) for w in h.split()][ + 2:: + ] n_data_fields = len(set(unique_headers)) f.close() @@ -46,11 +48,12 @@ # ... # cost_box_n, proc_box_n, lev_box_n, i_low_box_n, j_low_box_n, k_low_box_n(, gpu_ID_box_n if GPU run), hostname_box_n] + # Function to get efficiency at an iteration i def get_efficiency(i): # First get the unique ranks - costs, ranks = data[i,0::n_data_fields], data[i,1::n_data_fields].astype(int) - rank_to_cost_map = {r:0. for r in set(ranks)} + costs, ranks = data[i, 0::n_data_fields], data[i, 1::n_data_fields].astype(int) + rank_to_cost_map = {r: 0.0 for r in set(ranks)} # Compute efficiency before/after load balance and check it is improved for c, r in zip(costs, ranks): @@ -62,14 +65,15 @@ def get_efficiency(i): return efficiencies.mean() + # The iteration i=2 is load balanced; examine before/after load balance efficiency_before, efficiency_after = get_efficiency(1), get_efficiency(2) -print('load balance efficiency (before load balance): ', efficiency_before) -print('load balance efficiency (after load balance): ', efficiency_after) +print("load balance efficiency (before load balance): ", efficiency_before) +print("load balance efficiency (after load balance): ", efficiency_after) # The load balanced case is expected to be more efficient # than non-load balanced case -assert(efficiency_before < efficiency_after) +assert efficiency_before < efficiency_after -test_name = 'reduced_diags_loadbalancecosts_timers' +test_name = "reduced_diags_loadbalancecosts_timers" checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_single.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_single.py index 0bd83854c4d..d900ec673c1 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_single.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags_single.py @@ -13,4 +13,4 @@ import analysis_reduced_diags_impl as an -an.do_analysis(single_precision = True) +an.do_analysis(single_precision=True) diff --git a/Examples/Tests/relativistic_space_charge_initialization/analysis.py b/Examples/Tests/relativistic_space_charge_initialization/analysis.py index 569a7d2678a..4828e3ddce5 100755 --- a/Examples/Tests/relativistic_space_charge_initialization/analysis.py +++ b/Examples/Tests/relativistic_space_charge_initialization/analysis.py @@ -11,82 +11,89 @@ verifying that the space-charge field of a Gaussian beam corresponds to the expected theoretical field. """ + import os import sys import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np import scipy.constants as scc import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Parameters from the Simulation -Qtot = -1.e-20 -r0 = 2.e-6 +Qtot = -1.0e-20 +r0 = 2.0e-6 # Open data file filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) # Extract data -ad0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -Ex_array = ad0[('mesh','Ex')].to_ndarray().squeeze() -By_array = ad0[('mesh','By')].to_ndarray() +ad0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +Ex_array = ad0[("mesh", "Ex")].to_ndarray().squeeze() +By_array = ad0[("mesh", "By")].to_ndarray() # Extract grid coordinates -Nx, Ny, Nz = ds.domain_dimensions +Nx, Ny, Nz = ds.domain_dimensions xmin, ymin, zmin = ds.domain_left_edge.v Lx, Ly, Lz = ds.domain_width.v -x = xmin + Lx/Nx*(0.5+np.arange(Nx)) -y = ymin + Ly/Ny*(0.5+np.arange(Ny)) -z = zmin + Lz/Nz*(0.5+np.arange(Nz)) +x = xmin + Lx / Nx * (0.5 + np.arange(Nx)) +y = ymin + Ly / Ny * (0.5 + np.arange(Ny)) +z = zmin + Lz / Nz * (0.5 + np.arange(Nz)) # Compute theoretical field -x_2d, y_2d, z_2d = np.meshgrid(x, y, z, indexing='ij') +x_2d, y_2d, z_2d = np.meshgrid(x, y, z, indexing="ij") r2 = x_2d**2 + y_2d**2 -factor = Qtot/scc.epsilon_0/(2*np.pi*r2) * (1-np.exp(-r2/(2*r0**2))) -factor_z = 1./(2*np.pi)**.5/r0 * np.exp(-z_2d**2/(2*r0**2)) -Ex_th = factor*factor_z*x_2d -Ey_th = factor*factor_z*y_2d +factor = Qtot / scc.epsilon_0 / (2 * np.pi * r2) * (1 - np.exp(-r2 / (2 * r0**2))) +factor_z = 1.0 / (2 * np.pi) ** 0.5 / r0 * np.exp(-(z_2d**2) / (2 * r0**2)) +Ex_th = factor * factor_z * x_2d +Ey_th = factor * factor_z * y_2d + # Plot theory and data def make_2d(arr): if arr.ndim == 3: - return arr[:,Ny//2,:] + return arr[:, Ny // 2, :] else: return arr -plt.figure(figsize=(10,10)) + + +plt.figure(figsize=(10, 10)) plt.subplot(221) -plt.title('Ex: Theory') +plt.title("Ex: Theory") plt.imshow(make_2d(Ex_th)) plt.colorbar() plt.subplot(222) -plt.title('Ex: Simulation') +plt.title("Ex: Simulation") plt.imshow(make_2d(Ex_array)) plt.colorbar() plt.subplot(223) -plt.title('By: Theory') -plt.imshow(make_2d(Ex_th/scc.c)) +plt.title("By: Theory") +plt.imshow(make_2d(Ex_th / scc.c)) plt.colorbar() plt.subplot(224) -plt.title('By: Simulation') +plt.title("By: Simulation") plt.imshow(make_2d(By_array)) plt.colorbar() -plt.savefig('Comparison.png') +plt.savefig("Comparison.png") + # Automatically check the results def check(E, E_th, label): - print( 'Relative error in %s: %.3f'%( - label, abs(E-E_th).max()/E_th.max())) - assert np.allclose( E, E_th, atol=0.175*E_th.max() ) + print("Relative error in %s: %.3f" % (label, abs(E - E_th).max() / E_th.max())) + assert np.allclose(E, E_th, atol=0.175 * E_th.max()) + -check( Ex_array, Ex_th, 'Ex' ) +check(Ex_array, Ex_th, "Ex") test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename, do_particles=False) diff --git a/Examples/Tests/repelling_particles/analysis_repelling.py b/Examples/Tests/repelling_particles/analysis_repelling.py index bda3d74d274..401ba7ba5d0 100755 --- a/Examples/Tests/repelling_particles/analysis_repelling.py +++ b/Examples/Tests/repelling_particles/analysis_repelling.py @@ -22,6 +22,7 @@ d is the distance between them beta is the velocity normalized by the speed of light """ + import glob import os import re @@ -36,22 +37,22 @@ # Check plotfile name specified in command line last_filename = sys.argv[1] -filename_radical = re.findall(r'(.*?)\d+/*$', last_filename)[0] +filename_radical = re.findall(r"(.*?)\d+/*$", last_filename)[0] # Loop through files, and extract the position and velocity of both particles x1 = [] x2 = [] beta1 = [] beta2 = [] -for filename in sorted(glob.glob(filename_radical + '*')): +for filename in sorted(glob.glob(filename_radical + "*")): print(filename) ds = yt.load(filename) ad = ds.all_data() - x1.append( float(ad[('electron1','particle_position_x')][0]) ) - x2.append( float(ad[('electron2','particle_position_x')][0]) ) - beta1.append( float(ad[('electron1','particle_momentum_x')][0])/(m_e*c) ) - beta2.append( float(ad[('electron2','particle_momentum_x')][0])/(m_e*c) ) + x1.append(float(ad[("electron1", "particle_position_x")][0])) + x2.append(float(ad[("electron2", "particle_position_x")][0])) + beta1.append(float(ad[("electron1", "particle_momentum_x")][0]) / (m_e * c)) + beta2.append(float(ad[("electron2", "particle_momentum_x")][0]) / (m_e * c)) # Convert to numpy array x1 = np.array(x1) @@ -60,23 +61,23 @@ beta2 = np.array(beta2) # Plot velocities, compare with theory -w = 5.e12 -re = physical_constants['classical electron radius'][0] -beta_th = np.sqrt( beta1[0]**2 - 2*w*re*np.log( (x2[0]-x1[0])/(x2-x1) ) ) -plt.plot( beta1, '+', label='Particle 1' ) -plt.plot( -beta2, 'x', label='Particle 2' ) -plt.plot( beta_th, '*', label='Theory' ) +w = 5.0e12 +re = physical_constants["classical electron radius"][0] +beta_th = np.sqrt(beta1[0] ** 2 - 2 * w * re * np.log((x2[0] - x1[0]) / (x2 - x1))) +plt.plot(beta1, "+", label="Particle 1") +plt.plot(-beta2, "x", label="Particle 2") +plt.plot(beta_th, "*", label="Theory") plt.legend(loc=0) -plt.xlabel('Time (a.u.)') -plt.ylabel('Normalized velocity') -plt.savefig('Comparison.png') +plt.xlabel("Time (a.u.)") +plt.ylabel("Normalized velocity") +plt.savefig("Comparison.png") # Check that the results are close to the theory -assert np.allclose( beta1[1:], beta_th[1:], atol=0.01 ) -assert np.allclose( -beta2[1:], beta_th[1:], atol=0.01 ) +assert np.allclose(beta1[1:], beta_th[1:], atol=0.01) +assert np.allclose(-beta2[1:], beta_th[1:], atol=0.01) # Run checksum regression test -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/resampling/analysis_leveling_thinning.py b/Examples/Tests/resampling/analysis_leveling_thinning.py index 5f3dc8ecdff..f55f3b996c5 100755 --- a/Examples/Tests/resampling/analysis_leveling_thinning.py +++ b/Examples/Tests/resampling/analysis_leveling_thinning.py @@ -16,11 +16,11 @@ import yt from scipy.special import erf -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI fn_final = sys.argv[1] -fn0 = fn_final[:-4] + '0000' +fn0 = fn_final[:-4] + "0000" ds0 = yt.load(fn0) ds = yt.load(fn_final) @@ -28,45 +28,48 @@ ad0 = ds0.all_data() ad = ds.all_data() -numcells = 16*16 -t_r = 1.3 # target ratio -relative_tol = 1.e-13 # tolerance for machine precision errors +numcells = 16 * 16 +t_r = 1.3 # target ratio +relative_tol = 1.0e-13 # tolerance for machine precision errors #### Tests for first species #### # Particles are present in all simulation cells and all have the same weight -ppc = 400. -numparts_init = numcells*ppc +ppc = 400.0 +numparts_init = numcells * ppc -w0 = ad0['resampled_part1','particle_weight'].to_ndarray() # weights before resampling -w = ad['resampled_part1','particle_weight'].to_ndarray() # weights after resampling +w0 = ad0["resampled_part1", "particle_weight"].to_ndarray() # weights before resampling +w = ad["resampled_part1", "particle_weight"].to_ndarray() # weights after resampling # Renormalize weights for easier calculations -w0 = w0*ppc -w = w*ppc +w0 = w0 * ppc +w = w * ppc # Check that initial number of particles is indeed as expected -assert(w0.shape[0] == numparts_init) +assert w0.shape[0] == numparts_init # Check that all initial particles have the same weight -assert(np.unique(w0).shape[0] == 1) +assert np.unique(w0).shape[0] == 1 # Check that this weight is 1 (to machine precision) -assert(abs(w0[0] - 1) < relative_tol) +assert abs(w0[0] - 1) < relative_tol # Check that the number of particles after resampling is as expected numparts_final = w.shape[0] -expected_numparts_final = numparts_init/t_r**2 +expected_numparts_final = numparts_init / t_r**2 error = np.abs(numparts_final - expected_numparts_final) -std_numparts_final = np.sqrt(numparts_init/t_r**2*(1.-1./t_r**2)) +std_numparts_final = np.sqrt(numparts_init / t_r**2 * (1.0 - 1.0 / t_r**2)) # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions -print("difference between expected and actual final number of particles (1st species): " + str(error)) -print("tolerance: " + str(5*std_numparts_final)) -assert(error<5*std_numparts_final) +print( + "difference between expected and actual final number of particles (1st species): " + + str(error) +) +print("tolerance: " + str(5 * std_numparts_final)) +assert error < 5 * std_numparts_final # Check that the final weight is the same for all particles (to machine precision) and is as # expected final_weight = t_r**2 -assert(np.amax(np.abs(w-final_weight)) < relative_tol*final_weight ) +assert np.amax(np.abs(w - final_weight)) < relative_tol * final_weight #### Tests for second species #### @@ -74,68 +77,99 @@ # Using a single cell makes the analysis easier because leveling thinning is done separately in # each cell -ppc = 100000. +ppc = 100000.0 numparts_init = ppc -w0 = ad0['resampled_part2','particle_weight'].to_ndarray() # weights before resampling -w = ad['resampled_part2','particle_weight'].to_ndarray() # weights after resampling +w0 = ad0["resampled_part2", "particle_weight"].to_ndarray() # weights before resampling +w = ad["resampled_part2", "particle_weight"].to_ndarray() # weights after resampling # Renormalize and sort weights for easier calculations -w0 = np.sort(w0)*ppc -w = np.sort(w)*ppc +w0 = np.sort(w0) * ppc +w = np.sort(w) * ppc ## First we verify that the initial distribution is as expected # Check that the mean initial weight is as expected mean_initial_weight = np.average(w0) -expected_mean_initial_weight = 2.*np.sqrt(2.) +expected_mean_initial_weight = 2.0 * np.sqrt(2.0) error = np.abs(mean_initial_weight - expected_mean_initial_weight) -expected_std_initial_weight = 1./np.sqrt(2.) -std_mean_initial_weight = expected_std_initial_weight/np.sqrt(numparts_init) +expected_std_initial_weight = 1.0 / np.sqrt(2.0) +std_mean_initial_weight = expected_std_initial_weight / np.sqrt(numparts_init) # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions -print("difference between expected and actual mean initial weight (2nd species): " + str(error)) -print("tolerance: " + str(5*std_mean_initial_weight)) -assert(error<5*std_mean_initial_weight) +print( + "difference between expected and actual mean initial weight (2nd species): " + + str(error) +) +print("tolerance: " + str(5 * std_mean_initial_weight)) +assert error < 5 * std_mean_initial_weight # Check that the initial weight variance is as expected variance_initial_weight = np.var(w0) expected_variance_initial_weight = 0.5 error = np.abs(variance_initial_weight - expected_variance_initial_weight) -std_variance_initial_weight = expected_variance_initial_weight*np.sqrt(2./numparts_init) +std_variance_initial_weight = expected_variance_initial_weight * np.sqrt( + 2.0 / numparts_init +) # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions -print("difference between expected and actual variance of initial weight (2nd species): " + str(error)) -print("tolerance: " + str(5*std_variance_initial_weight)) +print( + "difference between expected and actual variance of initial weight (2nd species): " + + str(error) +) +print("tolerance: " + str(5 * std_variance_initial_weight)) ## Next we verify that the resampling worked as expected # Check that the level weight value is as expected from the initial distribution level_weight = w[0] -assert(np.abs(level_weight - mean_initial_weight*t_r) < level_weight*relative_tol) +assert np.abs(level_weight - mean_initial_weight * t_r) < level_weight * relative_tol # Check that the number of particles at the level weight is the same as predicted from analytic # calculations -numparts_leveled = np.argmax(w > level_weight) # This returns the first index for which +numparts_leveled = np.argmax(w > level_weight) # This returns the first index for which # w > level_weight, which thus corresponds to the number of particles at the level weight -expected_numparts_leveled = numparts_init/(2.*t_r)*(1+erf(expected_mean_initial_weight*(t_r-1.)) \ - -1./(np.sqrt(np.pi)*expected_mean_initial_weight)* \ - np.exp(-(expected_mean_initial_weight*(t_r-1.))**2)) +expected_numparts_leveled = ( + numparts_init + / (2.0 * t_r) + * ( + 1 + + erf(expected_mean_initial_weight * (t_r - 1.0)) + - 1.0 + / (np.sqrt(np.pi) * expected_mean_initial_weight) + * np.exp(-((expected_mean_initial_weight * (t_r - 1.0)) ** 2)) + ) +) error = np.abs(numparts_leveled - expected_numparts_leveled) -std_numparts_leveled = np.sqrt(expected_numparts_leveled - numparts_init/np.sqrt(np.pi)/(t_r* \ - expected_mean_initial_weight)**2*(np.sqrt(np.pi)/4.* \ - (2.*expected_mean_initial_weight**2+1.)*(1.-erf(expected_mean_initial_weight* \ - (t_r-1.)))-0.5*np.exp(-(expected_mean_initial_weight*(t_r-1.))**2* \ - (expected_mean_initial_weight*(t_r+1.))))) +std_numparts_leveled = np.sqrt( + expected_numparts_leveled + - numparts_init + / np.sqrt(np.pi) + / (t_r * expected_mean_initial_weight) ** 2 + * ( + np.sqrt(np.pi) + / 4.0 + * (2.0 * expected_mean_initial_weight**2 + 1.0) + * (1.0 - erf(expected_mean_initial_weight * (t_r - 1.0))) + - 0.5 + * np.exp( + -((expected_mean_initial_weight * (t_r - 1.0)) ** 2) + * (expected_mean_initial_weight * (t_r + 1.0)) + ) + ) +) # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions -print("difference between expected and actual number of leveled particles (2nd species): " + str(error)) -print("tolerance: " + str(5*std_numparts_leveled)) +print( + "difference between expected and actual number of leveled particles (2nd species): " + + str(error) +) +print("tolerance: " + str(5 * std_numparts_leveled)) numparts_unaffected = w.shape[0] - numparts_leveled numparts_unaffected_anticipated = w0.shape[0] - np.argmax(w0 > level_weight) # Check that number of particles with weight higher than level weight is the same before and after # resampling -assert(numparts_unaffected == numparts_unaffected_anticipated) +assert numparts_unaffected == numparts_unaffected_anticipated # Check that particles with weight higher than level weight are unaffected by resampling. -assert(np.all(w[-numparts_unaffected:] == w0[-numparts_unaffected:])) +assert np.all(w[-numparts_unaffected:] == w0[-numparts_unaffected:]) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn_final) diff --git a/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py b/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py index 88053fd7f5a..8c2be7b8750 100755 --- a/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py +++ b/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py @@ -34,73 +34,65 @@ ########################## grid = picmi.Cartesian2DGrid( - number_of_cells = [nx, ny], - lower_bound = [xmin, ymin], - upper_bound = [xmax, ymax], - lower_boundary_conditions = ['dirichlet', 'periodic'], - upper_boundary_conditions = ['dirichlet', 'periodic'], - lower_boundary_conditions_particles = ['absorbing', 'periodic'], - upper_boundary_conditions_particles = ['absorbing', 'periodic'], - moving_window_velocity = None, - warpx_max_grid_size = 32 + number_of_cells=[nx, ny], + lower_bound=[xmin, ymin], + upper_bound=[xmax, ymax], + lower_boundary_conditions=["dirichlet", "periodic"], + upper_boundary_conditions=["dirichlet", "periodic"], + lower_boundary_conditions_particles=["absorbing", "periodic"], + upper_boundary_conditions_particles=["absorbing", "periodic"], + moving_window_velocity=None, + warpx_max_grid_size=32, ) solver = picmi.ElectrostaticSolver( - grid=grid, method='Multigrid', required_precision=1e-6, - warpx_self_fields_verbosity=0 + grid=grid, + method="Multigrid", + required_precision=1e-6, + warpx_self_fields_verbosity=0, ) ########################## # physics components ########################## -electrons = picmi.Species( - particle_type='electron', name='electrons' -) +electrons = picmi.Species(particle_type="electron", name="electrons") ########################## # diagnostics ########################## particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 10, - write_dir = '.', - warpx_file_prefix = 'Python_restart_runtime_components_plt' + name="diag1", + period=10, + write_dir=".", + warpx_file_prefix="Python_restart_runtime_components_plt", ) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = 10, - data_list = ['phi'], - write_dir = '.', - warpx_file_prefix = 'Python_restart_runtime_components_plt' + name="diag1", + grid=grid, + period=10, + data_list=["phi"], + write_dir=".", + warpx_file_prefix="Python_restart_runtime_components_plt", ) checkpoint = picmi.Checkpoint( - name = 'chkpoint', - period = 5, - write_dir = '.', - warpx_file_min_digits = 5, - warpx_file_prefix = 'Python_restart_runtime_components_chk' + name="chkpoint", + period=5, + write_dir=".", + warpx_file_min_digits=5, + warpx_file_prefix="Python_restart_runtime_components_chk", ) ########################## # simulation setup ########################## -sim = picmi.Simulation( - solver = solver, - time_step_size = dt, - max_steps = max_steps, - verbose = 1 -) +sim = picmi.Simulation(solver=solver, time_step_size=dt, max_steps=max_steps, verbose=1) sim.add_species( - electrons, - layout = picmi.GriddedLayout( - n_macroparticle_per_cell=[0, 0], grid=grid - ) + electrons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[0, 0], grid=grid) ) for arg in sys.argv: @@ -124,11 +116,11 @@ np.random.seed(30025025) # wrap the electrons particle container -electron_wrapper = particle_containers.ParticleContainerWrapper('electrons') -electron_wrapper.add_real_comp('newPid') +electron_wrapper = particle_containers.ParticleContainerWrapper("electrons") +electron_wrapper.add_real_comp("newPid") -def add_particles(): +def add_particles(): nps = 10 x = np.linspace(0.005, 0.025, nps) y = np.zeros(nps) @@ -140,10 +132,10 @@ def add_particles(): newPid = 5.0 electron_wrapper.add_particles( - x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, - w=w, newPid=newPid + x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, w=w, newPid=newPid ) + callbacks.installbeforestep(add_particles) ########################## @@ -157,5 +149,5 @@ def add_particles(): # check that the ids and cpus are read properly ############################################### -assert(np.sum(np.concatenate(electron_wrapper.get_particle_id())) == 5050) -assert(np.sum(np.concatenate(electron_wrapper.get_particle_cpu())) == 0) +assert np.sum(np.concatenate(electron_wrapper.get_particle_id())) == 5050 +assert np.sum(np.concatenate(electron_wrapper.get_particle_cpu())) == 0 diff --git a/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py b/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py index b6e28076cbd..3061a3c1ff6 100755 --- a/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py +++ b/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py @@ -35,73 +35,65 @@ ########################## grid = picmi.Cartesian2DGrid( - number_of_cells = [nx, ny], - lower_bound = [xmin, ymin], - upper_bound = [xmax, ymax], - lower_boundary_conditions = ['dirichlet', 'periodic'], - upper_boundary_conditions = ['dirichlet', 'periodic'], - lower_boundary_conditions_particles = ['absorbing', 'periodic'], - upper_boundary_conditions_particles = ['absorbing', 'periodic'], - moving_window_velocity = None, - warpx_max_grid_size = 32 + number_of_cells=[nx, ny], + lower_bound=[xmin, ymin], + upper_bound=[xmax, ymax], + lower_boundary_conditions=["dirichlet", "periodic"], + upper_boundary_conditions=["dirichlet", "periodic"], + lower_boundary_conditions_particles=["absorbing", "periodic"], + upper_boundary_conditions_particles=["absorbing", "periodic"], + moving_window_velocity=None, + warpx_max_grid_size=32, ) solver = picmi.ElectrostaticSolver( - grid=grid, method='Multigrid', required_precision=1e-6, - warpx_self_fields_verbosity=0 + grid=grid, + method="Multigrid", + required_precision=1e-6, + warpx_self_fields_verbosity=0, ) ########################## # physics components ########################## -electrons = picmi.Species( - particle_type='electron', name='electrons' -) +electrons = picmi.Species(particle_type="electron", name="electrons") ########################## # diagnostics ########################## particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = 10, - write_dir = '.', - warpx_file_prefix = 'Python_restart_runtime_components_plt' + name="diag1", + period=10, + write_dir=".", + warpx_file_prefix="Python_restart_runtime_components_plt", ) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = 10, - data_list = ['phi'], - write_dir = '.', - warpx_file_prefix = 'Python_restart_runtime_components_plt' + name="diag1", + grid=grid, + period=10, + data_list=["phi"], + write_dir=".", + warpx_file_prefix="Python_restart_runtime_components_plt", ) checkpoint = picmi.Checkpoint( - name = 'chkpoint', - period = 5, - write_dir = '.', - warpx_file_min_digits = 5, - warpx_file_prefix = 'Python_restart_runtime_components_chk' + name="chkpoint", + period=5, + write_dir=".", + warpx_file_min_digits=5, + warpx_file_prefix="Python_restart_runtime_components_chk", ) ########################## # simulation setup ########################## -sim = picmi.Simulation( - solver = solver, - time_step_size = dt, - max_steps = max_steps, - verbose = 1 -) +sim = picmi.Simulation(solver=solver, time_step_size=dt, max_steps=max_steps, verbose=1) sim.add_species( - electrons, - layout = picmi.GriddedLayout( - n_macroparticle_per_cell=[0, 0], grid=grid - ) + electrons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[0, 0], grid=grid) ) for arg in sys.argv: @@ -127,8 +119,8 @@ electron_wrapper = particle_containers.ParticleContainerWrapper("electrons") electron_wrapper.add_real_comp("newPid") -def add_particles(): +def add_particles(): nps = 10 x = np.linspace(0.005, 0.025, nps) y = np.zeros(nps) @@ -140,10 +132,10 @@ def add_particles(): newPid = 5.0 electron_wrapper.add_particles( - x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, - w=w, newPid=newPid + x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, w=w, newPid=newPid ) + callbacks.installbeforestep(add_particles) ########################## diff --git a/Examples/Tests/restart/analysis_restart.py b/Examples/Tests/restart/analysis_restart.py index 1a5b1374672..4a4d198f63f 100755 --- a/Examples/Tests/restart/analysis_restart.py +++ b/Examples/Tests/restart/analysis_restart.py @@ -3,13 +3,13 @@ import os import sys -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] # Check restart data v. original data -sys.path.insert(0, '../../../../warpx/Examples/') +sys.path.insert(0, "../../../../warpx/Examples/") from analysis_default_restart import check_restart check_restart(filename) diff --git a/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py b/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py index 8457d6e051a..0cfd0bcff5f 100755 --- a/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py +++ b/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py @@ -34,16 +34,22 @@ ########################## uniform_plasma_elec = picmi.UniformDistribution( - density = 1e23, # number of electrons per m^3 - lower_bound = [-1e-5, -1e-5, -149e-6], - upper_bound = [1e-5, 1e-5, -129e-6], - directed_velocity = [0., 0., 2000.*picmi.constants.c] # uth the std of the (unitless) momentum + density=1e23, # number of electrons per m^3 + lower_bound=[-1e-5, -1e-5, -149e-6], + upper_bound=[1e-5, 1e-5, -129e-6], + directed_velocity=[ + 0.0, + 0.0, + 2000.0 * picmi.constants.c, + ], # uth the std of the (unitless) momentum ) electrons = picmi.Species( - particle_type='electron', name='electrons', + particle_type="electron", + name="electrons", initial_distribution=uniform_plasma_elec, - warpx_save_particles_at_xhi=1, warpx_save_particles_at_eb=1 + warpx_save_particles_at_xhi=1, + warpx_save_particles_at_eb=1, ) ########################## @@ -51,19 +57,17 @@ ########################## grid = picmi.Cartesian3DGrid( - number_of_cells = [nx, ny, nz], - lower_bound = [xmin, ymin, zmin], - upper_bound = [xmax, ymax, zmax], - lower_boundary_conditions=['none', 'none', 'none'], - upper_boundary_conditions=['none', 'none', 'none'], - lower_boundary_conditions_particles=['open', 'open', 'open'], - upper_boundary_conditions_particles=['open', 'open', 'open'], - warpx_max_grid_size = 32 + number_of_cells=[nx, ny, nz], + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=["none", "none", "none"], + upper_boundary_conditions=["none", "none", "none"], + lower_boundary_conditions_particles=["open", "open", "open"], + upper_boundary_conditions_particles=["open", "open", "open"], + warpx_max_grid_size=32, ) -solver = picmi.ElectromagneticSolver( - grid=grid, cfl=cfl -) +solver = picmi.ElectromagneticSolver(grid=grid, cfl=cfl) embedded_boundary = picmi.EmbeddedBoundary( implicit_function="-max(max(max(x-12.5e-6,-12.5e-6-x),max(y-12.5e-6,-12.5e-6-y)),max(z-(-6.15e-5),-8.65e-5-z))" @@ -74,26 +78,26 @@ ########################## particle_diag = picmi.ParticleDiagnostic( - name = 'diag1', - period = diagnostic_intervals, - write_dir = '.', - warpx_file_prefix = 'Python_restart_eb_plt' + name="diag1", + period=diagnostic_intervals, + write_dir=".", + warpx_file_prefix="Python_restart_eb_plt", ) field_diag = picmi.FieldDiagnostic( - name = 'diag1', - grid = grid, - period = diagnostic_intervals, - data_list = ['Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz'], - write_dir = '.', - warpx_file_prefix = 'Python_restart_eb_plt' + name="diag1", + grid=grid, + period=diagnostic_intervals, + data_list=["Ex", "Ey", "Ez", "Bx", "By", "Bz"], + write_dir=".", + warpx_file_prefix="Python_restart_eb_plt", ) checkpoint = picmi.Checkpoint( - name = 'chkpoint', - period = diagnostic_intervals, - write_dir = '.', - warpx_file_min_digits = 5, - warpx_file_prefix = 'Python_restart_eb_chk' + name="chkpoint", + period=diagnostic_intervals, + write_dir=".", + warpx_file_min_digits=5, + warpx_file_prefix="Python_restart_eb_chk", ) ########################## @@ -101,19 +105,16 @@ ########################## sim = picmi.Simulation( - solver = solver, - max_steps = max_steps, + solver=solver, + max_steps=max_steps, warpx_embedded_boundary=embedded_boundary, verbose=True, warpx_load_balance_intervals=40, - warpx_load_balance_efficiency_ratio_threshold=0.9 + warpx_load_balance_efficiency_ratio_threshold=0.9, ) sim.add_species( - electrons, - layout = picmi.GriddedLayout( - n_macroparticle_per_cell=[1, 1, 1], grid=grid - ) + electrons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[1, 1, 1], grid=grid) ) for arg in sys.argv: diff --git a/Examples/Tests/rigid_injection/analysis_rigid_injection_BoostedFrame.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_BoostedFrame.py index fd51298816b..9b9054a4d42 100755 --- a/Examples/Tests/rigid_injection/analysis_rigid_injection_BoostedFrame.py +++ b/Examples/Tests/rigid_injection/analysis_rigid_injection_BoostedFrame.py @@ -8,7 +8,7 @@ # License: BSD-3-Clause-LBNL -''' +""" Analysis script of a WarpX simulation of rigid injection in a boosted frame. A Gaussian electron beam starts from -5 microns, propagates rigidly up to @@ -18,7 +18,7 @@ The simulation runs in a boosted frame, and the analysis is done in the lab frame, i.e., on the back-transformed diagnostics. -''' +""" import os import sys @@ -29,7 +29,7 @@ yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] @@ -40,46 +40,46 @@ # Read data from new back-transformed diagnostics (plotfile) ds_plotfile = yt.load(filename) -x_plotfile = ds_plotfile.all_data()['beam', 'particle_position_x'].v -z_plotfile = ds_plotfile.all_data()['beam', 'particle_position_y'].v -ux_plotfile = ds_plotfile.all_data()['beam', 'particle_momentum_x'].v -uy_plotfile = ds_plotfile.all_data()['beam', 'particle_momentum_y'].v -uz_plotfile = ds_plotfile.all_data()['beam', 'particle_momentum_z'].v +x_plotfile = ds_plotfile.all_data()["beam", "particle_position_x"].v +z_plotfile = ds_plotfile.all_data()["beam", "particle_position_y"].v +ux_plotfile = ds_plotfile.all_data()["beam", "particle_momentum_x"].v +uy_plotfile = ds_plotfile.all_data()["beam", "particle_momentum_y"].v +uz_plotfile = ds_plotfile.all_data()["beam", "particle_momentum_z"].v # Read data from new back-transformed diagnostics (openPMD) series = io.Series("./diags/diag2/openpmd_%T.h5", io.Access.read_only) ds_openpmd = series.iterations[1] -x_openpmd = ds_openpmd.particles['beam']['position']['x'][:] -z_openpmd = ds_openpmd.particles['beam']['position']['z'][:] -ux_openpmd = ds_openpmd.particles['beam']['momentum']['x'][:] -uy_openpmd = ds_openpmd.particles['beam']['momentum']['y'][:] -uz_openpmd = ds_openpmd.particles['beam']['momentum']['z'][:] +x_openpmd = ds_openpmd.particles["beam"]["position"]["x"][:] +z_openpmd = ds_openpmd.particles["beam"]["position"]["z"][:] +ux_openpmd = ds_openpmd.particles["beam"]["momentum"]["x"][:] +uy_openpmd = ds_openpmd.particles["beam"]["momentum"]["y"][:] +uz_openpmd = ds_openpmd.particles["beam"]["momentum"]["z"][:] series.flush() # Sort and compare arrays to check consistency between plotfile BTD and openPMD BTD -assert(np.allclose(np.sort(x_plotfile), np.sort(x_openpmd), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(z_plotfile), np.sort(z_openpmd), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(ux_plotfile), np.sort(ux_openpmd), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(uy_plotfile), np.sort(uy_openpmd), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(uz_plotfile), np.sort(uz_openpmd), rtol=rtol, atol=atol)) +assert np.allclose(np.sort(x_plotfile), np.sort(x_openpmd), rtol=rtol, atol=atol) +assert np.allclose(np.sort(z_plotfile), np.sort(z_openpmd), rtol=rtol, atol=atol) +assert np.allclose(np.sort(ux_plotfile), np.sort(ux_openpmd), rtol=rtol, atol=atol) +assert np.allclose(np.sort(uy_plotfile), np.sort(uy_openpmd), rtol=rtol, atol=atol) +assert np.allclose(np.sort(uz_plotfile), np.sort(uz_openpmd), rtol=rtol, atol=atol) # Initial parameters -z0 = 20.e-6 -x0 = 1.e-6 +z0 = 20.0e-6 +x0 = 1.0e-6 theta0 = np.arcsin(0.1) # Theoretical beam width after propagation with rigid injection z = np.mean(z_plotfile) x = np.std(x_plotfile) -print(f'Beam position = {z}') -print(f'Beam width = {x}') +print(f"Beam position = {z}") +print(f"Beam width = {x}") -xth = np.sqrt(x0**2 + (z-z0)**2 * theta0**2) -err = np.abs((x-xth) / xth) +xth = np.sqrt(x0**2 + (z - z0) ** 2 * theta0**2) +err = np.abs((x - xth) / xth) tol = 1e-2 -print(f'error = {err}') -print(f'tolerance = {tol}') -assert(err < tol) +print(f"error = {err}") +print(f"tolerance = {tol}") +assert err < tol test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/rigid_injection/analysis_rigid_injection_LabFrame.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_LabFrame.py index ee88e32252d..94b2a1ac07e 100755 --- a/Examples/Tests/rigid_injection/analysis_rigid_injection_LabFrame.py +++ b/Examples/Tests/rigid_injection/analysis_rigid_injection_LabFrame.py @@ -7,7 +7,7 @@ # License: BSD-3-Clause-LBNL -''' +""" Analysis script of a WarpX simulation of rigid injection. A Gaussian electron beam starts from -5 microns, propagates rigidly up to @@ -21,7 +21,7 @@ Additionally, this script tests that runtime attributes are correctly initialized with the gaussian_beam injection style. -''' +""" import os import sys @@ -30,53 +30,55 @@ import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] + # WarpX headers include more data when rigid injection is used, # which gives an error with the last yt release. # To avoid this issue, the three last lines of WarpXHeader are removed if # needed. def remove_rigid_lines(plotfile, nlines_if_rigid): - header_name = plotfile + '/WarpXHeader' - f = open(header_name, 'r') + header_name = plotfile + "/WarpXHeader" + f = open(header_name, "r") file_lines = f.readlines() nlines = len(file_lines) f.close() if nlines == nlines_if_rigid: - f = open(header_name, 'w') + f = open(header_name, "w") f.writelines(file_lines[:-3]) f.close() + # Remove rigid injection header lines remove_rigid_lines(filename, 18) # Read beam parameters -ds = yt.load( filename ) +ds = yt.load(filename) ad = ds.all_data() # Beam longitudinal position -z = np.mean(ad['beam', 'particle_position_y'].v) +z = np.mean(ad["beam", "particle_position_y"].v) # Beam width -w = np.std(ad['beam', 'particle_position_x'].v) +w = np.std(ad["beam", "particle_position_x"].v) # initial parameters -z0 = 20.e-6 -z0_no_rigid = -5.e-6 -w0 = 1.e-6 +z0 = 20.0e-6 +z0_no_rigid = -5.0e-6 +w0 = 1.0e-6 theta0 = np.arcsin(0.1) # Theoretical beam width after propagation if rigid OFF # Inform the user if rigid injection simply off (just to be kind) -wth_no_rigid = np.sqrt( w0**2 + (z-z0_no_rigid)**2*theta0**2 ) -error_no_rigid = np.abs((w-wth_no_rigid)/wth_no_rigid) -if ( error_no_rigid < 0.05): +wth_no_rigid = np.sqrt(w0**2 + (z - z0_no_rigid) ** 2 * theta0**2) +error_no_rigid = np.abs((w - wth_no_rigid) / wth_no_rigid) +if error_no_rigid < 0.05: print("error no rigid: " + str(error_no_rigid)) print("Looks like the beam defocuses as if rigid injection were OFF") # Theoretical beam width after propagation if rigid ON -wth = np.sqrt( w0**2 + (z-z0)**2*theta0**2 ) -error_rel = np.abs((w-wth)/wth) +wth = np.sqrt(w0**2 + (z - z0) ** 2 * theta0**2) +error_rel = np.abs((w - wth) / wth) tolerance_rel = 0.05 # Print error and assert small error @@ -86,19 +88,19 @@ def remove_rigid_lines(plotfile, nlines_if_rigid): print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel ### Check that user runtime attributes are correctly initialized -filename_start = filename[:-5] + '00000' -ds_start = yt.load( filename_start ) +filename_start = filename[:-5] + "00000" +ds_start = yt.load(filename_start) ad_start = ds_start.all_data() -x = ad_start['beam', 'particle_position_x'] -z = ad_start['beam', 'particle_position_y'] -orig_z = ad_start['beam', 'particle_orig_z'] -center = ad_start['beam', 'particle_center'] -assert(np.array_equal(z, orig_z)) -assert(np.array_equal(1*(np.abs(x) < 5.e-7), center)) +x = ad_start["beam", "particle_position_x"] +z = ad_start["beam", "particle_position_y"] +orig_z = ad_start["beam", "particle_orig_z"] +center = ad_start["beam", "particle_center"] +assert np.array_equal(z, orig_z) +assert np.array_equal(1 * (np.abs(x) < 5.0e-7), center) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/scraping/analysis_rz.py b/Examples/Tests/scraping/analysis_rz.py index 11d0194707f..8bf86e320f3 100755 --- a/Examples/Tests/scraping/analysis_rz.py +++ b/Examples/Tests/scraping/analysis_rz.py @@ -27,46 +27,61 @@ import yt from openpmd_viewer import OpenPMDTimeSeries -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI tolerance = 0 fn = sys.argv[1] -ds = yt.load( fn ) +ds = yt.load(fn) ad = ds.all_data() -x = ad['electron', 'particle_position_x'].v +x = ad["electron", "particle_position_x"].v -error = len(x)-512 -print('error = ', error) -print('tolerance = ', tolerance) -assert(error==tolerance) +error = len(x) - 512 +print("error = ", error) +print("tolerance = ", tolerance) +assert error == tolerance # Check that all the removed particles are properly recorded # by making sure that, at each iteration, the sum of the number of # remaining particles and scraped particles is equal to the # original number of particles -ts_full = OpenPMDTimeSeries('./diags/diag2/') -ts_scraping = OpenPMDTimeSeries('./diags/diag3/particles_at_eb') +ts_full = OpenPMDTimeSeries("./diags/diag2/") +ts_scraping = OpenPMDTimeSeries("./diags/diag3/particles_at_eb") -def n_remaining_particles( iteration ): - w, = ts_full.get_particle(['w'], iteration=iteration) + +def n_remaining_particles(iteration): + (w,) = ts_full.get_particle(["w"], iteration=iteration) return len(w) -def n_scraped_particles( iteration ): - step_scraped = ts_scraping.get_particle( ['stepScraped'], iteration=ts_scraping.iterations[0] ) + + +def n_scraped_particles(iteration): + step_scraped = ts_scraping.get_particle( + ["stepScraped"], iteration=ts_scraping.iterations[0] + ) return (step_scraped <= iteration).sum() -n_remaining = np.array([ n_remaining_particles(iteration) for iteration in ts_full.iterations ]) -n_scraped = np.array([ n_scraped_particles(iteration) for iteration in ts_full.iterations ]) + + +n_remaining = np.array( + [n_remaining_particles(iteration) for iteration in ts_full.iterations] +) +n_scraped = np.array( + [n_scraped_particles(iteration) for iteration in ts_full.iterations] +) n_total = n_remaining[0] -assert np.all( n_scraped+n_remaining == n_total) +assert np.all(n_scraped + n_remaining == n_total) # Check that the particle IDs match between the initial iteration # (all particles in the simulation domain) and the finall iteration (particles are either scraped or still in simulation box) -id_initial, = ts_full.get_particle(['id'], iteration=0) -id_final_scrape, = ts_scraping.get_particle(['id'], iteration=ts_scraping.iterations[0]) -id_final_box, = ts_full.get_particle(['id'], iteration=ts_full.iterations[-1]) -id_final = np.concatenate( (id_final_scrape, id_final_box)) -assert np.all( np.sort(id_initial) == np.sort(id_final) ) # Sort because particles may not be in the same order +(id_initial,) = ts_full.get_particle(["id"], iteration=0) +(id_final_scrape,) = ts_scraping.get_particle( + ["id"], iteration=ts_scraping.iterations[0] +) +(id_final_box,) = ts_full.get_particle(["id"], iteration=ts_full.iterations[-1]) +id_final = np.concatenate((id_final_scrape, id_final_box)) +assert np.all( + np.sort(id_initial) == np.sort(id_final) +) # Sort because particles may not be in the same order # Checksum test test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/scraping/analysis_rz_filter.py b/Examples/Tests/scraping/analysis_rz_filter.py index a4e0dafddbc..498f25e8422 100755 --- a/Examples/Tests/scraping/analysis_rz_filter.py +++ b/Examples/Tests/scraping/analysis_rz_filter.py @@ -29,35 +29,50 @@ tolerance = 0 fn = sys.argv[1] -ds = yt.load( fn ) +ds = yt.load(fn) ad = ds.all_data() -x = ad['electron', 'particle_position_x'].v +x = ad["electron", "particle_position_x"].v -error = len(x)-512 -print('error = ', error) -print('tolerance = ', tolerance) -assert(error==tolerance) +error = len(x) - 512 +print("error = ", error) +print("tolerance = ", tolerance) +assert error == tolerance # Check that all the removed particles are properly recorded # by making sure that, at each iteration, the sum of the number of # remaining particles and scraped particles is equal to half the # original number of particles # also check that no particles with z <= 0 have been scraped -ts_full = OpenPMDTimeSeries('./diags/diag2/') -ts_scraping = OpenPMDTimeSeries('./diags/diag3/particles_at_eb') +ts_full = OpenPMDTimeSeries("./diags/diag2/") +ts_scraping = OpenPMDTimeSeries("./diags/diag3/particles_at_eb") -def n_remaining_particles( iteration ): - w, = ts_full.get_particle(['w'], iteration=iteration) + +def n_remaining_particles(iteration): + (w,) = ts_full.get_particle(["w"], iteration=iteration) return len(w) -def n_scraped_particles( iteration ): - step_scraped = ts_scraping.get_particle( ['stepScraped'], iteration=ts_scraping.iterations[0] ) + + +def n_scraped_particles(iteration): + step_scraped = ts_scraping.get_particle( + ["stepScraped"], iteration=ts_scraping.iterations[0] + ) return (step_scraped <= iteration).sum() -def n_scraped_z_leq_zero( iteration ): - z_pos, = ts_scraping.get_particle( ['z'], iteration=ts_scraping.iterations[0] ) + + +def n_scraped_z_leq_zero(iteration): + (z_pos,) = ts_scraping.get_particle(["z"], iteration=ts_scraping.iterations[0]) return (z_pos <= 0).sum() -n_remaining = np.array([ n_remaining_particles(iteration) for iteration in ts_full.iterations ]) -n_scraped = np.array([ n_scraped_particles(iteration) for iteration in ts_full.iterations ]) -n_z_leq_zero = np.array([ n_scraped_z_leq_zero(iteration) for iteration in ts_full.iterations ]) + + +n_remaining = np.array( + [n_remaining_particles(iteration) for iteration in ts_full.iterations] +) +n_scraped = np.array( + [n_scraped_particles(iteration) for iteration in ts_full.iterations] +) +n_z_leq_zero = np.array( + [n_scraped_z_leq_zero(iteration) for iteration in ts_full.iterations] +) n_total = n_remaining[0] -assert np.all( 2*n_scraped+n_remaining == n_total) -assert np.all( n_z_leq_zero == 0) +assert np.all(2 * n_scraped + n_remaining == n_total) +assert np.all(n_z_leq_zero == 0) diff --git a/Examples/Tests/silver_mueller/analysis_silver_mueller.py b/Examples/Tests/silver_mueller/analysis_silver_mueller.py index bfab40aa991..e1de7199aa0 100755 --- a/Examples/Tests/silver_mueller/analysis_silver_mueller.py +++ b/Examples/Tests/silver_mueller/analysis_silver_mueller.py @@ -16,37 +16,40 @@ import sys import numpy as np +import yt -import yt ; yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +yt.funcs.mylog.setLevel(0) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI filename = sys.argv[1] -ds = yt.load( filename ) -all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -warpx_used_inputs = open('./warpx_used_inputs', 'r').read() -geom_RZ = re.search('geometry.dims = RZ', warpx_used_inputs) +ds = yt.load(filename) +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +warpx_used_inputs = open("./warpx_used_inputs", "r").read() +geom_RZ = re.search("geometry.dims = RZ", warpx_used_inputs) if geom_RZ: - Er = all_data_level_0['boxlib', 'Er'].v.squeeze() - Et = all_data_level_0['boxlib', 'Et'].v.squeeze() - Ez = all_data_level_0['boxlib', 'Ez'].v.squeeze() + Er = all_data_level_0["boxlib", "Er"].v.squeeze() + Et = all_data_level_0["boxlib", "Et"].v.squeeze() + Ez = all_data_level_0["boxlib", "Ez"].v.squeeze() else: - Ex = all_data_level_0['boxlib', 'Ex'].v.squeeze() - Ey = all_data_level_0['boxlib', 'Ey'].v.squeeze() - Ez = all_data_level_0['boxlib', 'Ez'].v.squeeze() + Ex = all_data_level_0["boxlib", "Ex"].v.squeeze() + Ey = all_data_level_0["boxlib", "Ey"].v.squeeze() + Ez = all_data_level_0["boxlib", "Ez"].v.squeeze() # The peak of the initial laser pulse is on the order of 6 V/m # Check that the amplitude after reflection is less than 0.01 V/m max_reflection_amplitude = 0.01 if geom_RZ: - assert np.all( abs(Er) < max_reflection_amplitude ) - assert np.all( abs(Et) < max_reflection_amplitude ) - assert np.all( abs(Ez) < max_reflection_amplitude ) + assert np.all(abs(Er) < max_reflection_amplitude) + assert np.all(abs(Et) < max_reflection_amplitude) + assert np.all(abs(Ez) < max_reflection_amplitude) else: - assert np.all( abs(Ex) < max_reflection_amplitude ) - assert np.all( abs(Ey) < max_reflection_amplitude ) - assert np.all( abs(Ez) < max_reflection_amplitude ) + assert np.all(abs(Ex) < max_reflection_amplitude) + assert np.all(abs(Ey) < max_reflection_amplitude) + assert np.all(abs(Ez) < max_reflection_amplitude) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/single_particle/analysis_bilinear_filter.py b/Examples/Tests/single_particle/analysis_bilinear_filter.py index db7250dc3bb..198d84c6bfd 100755 --- a/Examples/Tests/single_particle/analysis_bilinear_filter.py +++ b/Examples/Tests/single_particle/analysis_bilinear_filter.py @@ -11,10 +11,11 @@ import sys import numpy as np +import yt from scipy import signal -import yt ; yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +yt.funcs.mylog.setLevel(0) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Build Jx without filter. This can be obtained by running this test without @@ -25,41 +26,45 @@ # > OMP_NUM_THREADS=2 mpirun -np 2 ~/warpx/Bin/main2d.gnu.TPROF.MPI.OMP.ex \ # inputs warpx.use_filter=1 warpx.filter_npass_each_dir=1 5 # and then print the values in the array F_filtered below. -my_F_nofilter = np.zeros([16,16]) -my_F_nofilter[8,8] = -1.601068237523421e-11 -my_F_nofilter[8,7] = -1.601068237523421e-11 +my_F_nofilter = np.zeros([16, 16]) +my_F_nofilter[8, 8] = -1.601068237523421e-11 +my_F_nofilter[8, 7] = -1.601068237523421e-11 # Build 2D filter -filter0 = np.array([.25,.5,.25]) -my_order = [1,5] +filter0 = np.array([0.25, 0.5, 0.25]) +my_order = [1, 5] my_filterx = filter0 my_filtery = filter0 -while my_order[0]>1: - my_filterx = np.convolve(my_filterx,filter0) +while my_order[0] > 1: + my_filterx = np.convolve(my_filterx, filter0) my_order[0] -= 1 -while my_order[1]>1: - my_filtery = np.convolve(my_filtery,filter0) +while my_order[1] > 1: + my_filtery = np.convolve(my_filtery, filter0) my_order[1] -= 1 -my_filter = my_filterx[:,None]*my_filtery +my_filter = my_filterx[:, None] * my_filtery # Apply filter. my_F_filtered is the theoretical value for filtered field -my_F_filtered = signal.convolve2d(my_F_nofilter, my_filter, boundary='symm', mode='same') +my_F_filtered = signal.convolve2d( + my_F_nofilter, my_filter, boundary="symm", mode="same" +) # Get simulation result for F_filtered filename = sys.argv[1] -ds = yt.load( filename ) -sl = yt.SlicePlot(ds, 2, 'jx', aspect=1) -all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) -F_filtered = all_data_level_0['boxlib', 'jx'].v.squeeze() +ds = yt.load(filename) +sl = yt.SlicePlot(ds, 2, "jx", aspect=1) +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +F_filtered = all_data_level_0["boxlib", "jx"].v.squeeze() # Compare theory and PIC for filtered value -error_rel = np.sum( np.abs(F_filtered - my_F_filtered) ) / np.sum( np.abs(my_F_filtered) ) -tolerance_rel = 1.e-14 +error_rel = np.sum(np.abs(F_filtered - my_F_filtered)) / np.sum(np.abs(my_F_filtered)) +tolerance_rel = 1.0e-14 print("error_rel : " + str(error_rel)) print("tolerance_rel: " + str(tolerance_rel)) -assert( error_rel < tolerance_rel ) +assert error_rel < tolerance_rel test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/space_charge_initialization/analysis.py b/Examples/Tests/space_charge_initialization/analysis.py index 48e19ea0b75..1d5c8b9cb78 100755 --- a/Examples/Tests/space_charge_initialization/analysis.py +++ b/Examples/Tests/space_charge_initialization/analysis.py @@ -11,12 +11,13 @@ verifying that the space-charge field of a Gaussian beam corresponds to the expected theoretical field. """ + import os import sys import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np import scipy.constants as scc @@ -24,23 +25,26 @@ from scipy.special import gammainc yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Parameters from the Simulation -Qtot = -1.e-20 -r0 = 2.e-6 +Qtot = -1.0e-20 +r0 = 2.0e-6 # Open data file filename = sys.argv[1] -ds = yt.load( filename ) +ds = yt.load(filename) # yt 4.0+ has rounding issues with our domain data: # RuntimeError: yt attempted to read outside the boundaries # of a non-periodic domain along dimension 0. -if 'force_periodicity' in dir(ds): ds.force_periodicity() +if "force_periodicity" in dir(ds): + ds.force_periodicity() # Extract data -ad0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) +ad0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) Ex_array = ad0[("mesh", "Ex")].to_ndarray().squeeze() if ds.dimensionality == 2: # Rename the z dimension as y, so as to make this script work for 2d and 3d @@ -50,65 +54,75 @@ Ez_array = ad0[("mesh", "Ez")].to_ndarray() # Extract grid coordinates -Nx, Ny, Nz = ds.domain_dimensions +Nx, Ny, Nz = ds.domain_dimensions xmin, ymin, zmin = ds.domain_left_edge.v Lx, Ly, Lz = ds.domain_width.v -x = xmin + Lx/Nx*(0.5+np.arange(Nx)) -y = ymin + Ly/Ny*(0.5+np.arange(Ny)) -z = zmin + Lz/Nz*(0.5+np.arange(Nz)) +x = xmin + Lx / Nx * (0.5 + np.arange(Nx)) +y = ymin + Ly / Ny * (0.5 + np.arange(Ny)) +z = zmin + Lz / Nz * (0.5 + np.arange(Nz)) # Compute theoretical field if ds.dimensionality == 2: - x_2d, y_2d = np.meshgrid(x, y, indexing='ij') + x_2d, y_2d = np.meshgrid(x, y, indexing="ij") r2 = x_2d**2 + y_2d**2 - factor = (Qtot/r0)/(2*np.pi*scc.epsilon_0*r2) * (1-np.exp(-r2/(2*r0**2))) + factor = ( + (Qtot / r0) / (2 * np.pi * scc.epsilon_0 * r2) * (1 - np.exp(-r2 / (2 * r0**2))) + ) Ex_th = x_2d * factor Ey_th = y_2d * factor elif ds.dimensionality == 3: - x_2d, y_2d, z_2d = np.meshgrid(x, y, z, indexing='ij') + x_2d, y_2d, z_2d = np.meshgrid(x, y, z, indexing="ij") r2 = x_2d**2 + y_2d**2 + z_2d**2 - factor = Qtot/(4*np.pi*scc.epsilon_0*r2**1.5) * gammainc(3./2, r2/(2.*r0**2)) - Ex_th = factor*x_2d - Ey_th = factor*y_2d - Ez_th = factor*z_2d + factor = ( + Qtot + / (4 * np.pi * scc.epsilon_0 * r2**1.5) + * gammainc(3.0 / 2, r2 / (2.0 * r0**2)) + ) + Ex_th = factor * x_2d + Ey_th = factor * y_2d + Ez_th = factor * z_2d + # Plot theory and data def make_2d(arr): if arr.ndim == 3: - return arr[:,:,Nz//2] + return arr[:, :, Nz // 2] else: return arr -plt.figure(figsize=(10,10)) + + +plt.figure(figsize=(10, 10)) plt.subplot(221) -plt.title('Ex: Theory') +plt.title("Ex: Theory") plt.imshow(make_2d(Ex_th)) plt.colorbar() plt.subplot(222) -plt.title('Ex: Simulation') +plt.title("Ex: Simulation") plt.imshow(make_2d(Ex_array)) plt.colorbar() plt.subplot(223) -plt.title('Ey: Theory') +plt.title("Ey: Theory") plt.imshow(make_2d(Ey_th)) plt.colorbar() plt.subplot(224) -plt.title('Ey: Simulation') +plt.title("Ey: Simulation") plt.imshow(make_2d(Ey_array)) plt.colorbar() -plt.savefig('Comparison.png') +plt.savefig("Comparison.png") + # Automatically check the results def check(E, E_th, label): - print( 'Relative error in %s: %.3f'%( - label, abs(E-E_th).max()/E_th.max())) + print("Relative error in %s: %.3f" % (label, abs(E - E_th).max() / E_th.max())) tolerance_rel = 0.165 print("tolerance_rel: " + str(tolerance_rel)) - assert np.allclose( E, E_th, atol=tolerance_rel*E_th.max() ) + assert np.allclose(E, E_th, atol=tolerance_rel * E_th.max()) + -check( Ex_array, Ex_th, 'Ex' ) -check( Ey_array, Ey_th, 'Ey' ) +check(Ex_array, Ex_th, "Ex") +check(Ey_array, Ey_th, "Ey") if ds.dimensionality == 3: - check( Ez_array, Ez_th, 'Ez' ) + check(Ez_array, Ez_th, "Ez") test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename, do_particles=0) diff --git a/Examples/Tests/vay_deposition/analysis.py b/Examples/Tests/vay_deposition/analysis.py index cfd089f2112..82776c34c42 100755 --- a/Examples/Tests/vay_deposition/analysis.py +++ b/Examples/Tests/vay_deposition/analysis.py @@ -15,7 +15,7 @@ yt.funcs.mylog.setLevel(50) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # Plotfile data set @@ -24,17 +24,16 @@ # Check relative L-infinity spatial norm of rho/epsilon_0 - div(E) data = ds.covering_grid( - level=0, - left_edge=ds.domain_left_edge, - dims=ds.domain_dimensions) -rho = data[('boxlib','rho')].to_ndarray() -divE = data[('boxlib','divE')].to_ndarray() -error_rel = np.amax(np.abs(divE-rho/epsilon_0))/np.amax(np.abs(rho/epsilon_0)) + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +rho = data[("boxlib", "rho")].to_ndarray() +divE = data[("boxlib", "divE")].to_ndarray() +error_rel = np.amax(np.abs(divE - rho / epsilon_0)) / np.amax(np.abs(rho / epsilon_0)) tolerance = 1e-3 print("Error on charge conservation:") print("error_rel = {}".format(error_rel)) print("tolerance = {}".format(tolerance)) -assert( error_rel < tolerance ) +assert error_rel < tolerance # Checksum analysis test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/analysis_default_openpmd_regression.py b/Examples/analysis_default_openpmd_regression.py index 3aadc49ac51..03a0f1ede1f 100755 --- a/Examples/analysis_default_openpmd_regression.py +++ b/Examples/analysis_default_openpmd_regression.py @@ -4,7 +4,7 @@ import re import sys -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file @@ -14,7 +14,7 @@ test_name = os.path.split(os.getcwd())[1] # Run checksum regression test -if re.search( 'single_precision', fn ): - checksumAPI.evaluate_checksum(test_name, fn, output_format='openpmd', rtol=2.e-6) +if re.search("single_precision", fn): + checksumAPI.evaluate_checksum(test_name, fn, output_format="openpmd", rtol=2.0e-6) else: - checksumAPI.evaluate_checksum(test_name, fn, output_format='openpmd') + checksumAPI.evaluate_checksum(test_name, fn, output_format="openpmd") diff --git a/Examples/analysis_default_regression.py b/Examples/analysis_default_regression.py index 453f650be01..5e1e88ee28b 100755 --- a/Examples/analysis_default_regression.py +++ b/Examples/analysis_default_regression.py @@ -4,7 +4,7 @@ import re import sys -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI # this will be the name of the plot file @@ -14,7 +14,7 @@ test_name = os.path.split(os.getcwd())[1] # Run checksum regression test -if re.search( 'single_precision', fn ): - checksumAPI.evaluate_checksum(test_name, fn, rtol=2.e-6) +if re.search("single_precision", fn): + checksumAPI.evaluate_checksum(test_name, fn, rtol=2.0e-6) else: checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/analysis_default_restart.py b/Examples/analysis_default_restart.py index 612851678fd..30491ad59e9 100755 --- a/Examples/analysis_default_restart.py +++ b/Examples/analysis_default_restart.py @@ -4,7 +4,7 @@ import yt -def check_restart(filename, tolerance = 1e-12): +def check_restart(filename, tolerance=1e-12): """ Compare output data generated from initial run with output data generated after restart. @@ -21,33 +21,41 @@ def check_restart(filename, tolerance = 1e-12): # yt 4.0+ has rounding issues with our domain data: # RuntimeError: yt attempted to read outside the boundaries # of a non-periodic domain along dimension 0. - if 'force_periodicity' in dir(ds_restart): ds_restart.force_periodicity() + if "force_periodicity" in dir(ds_restart): + ds_restart.force_periodicity() - ad_restart = ds_restart.covering_grid(level = 0, - left_edge = ds_restart.domain_left_edge, dims = ds_restart.domain_dimensions) + ad_restart = ds_restart.covering_grid( + level=0, + left_edge=ds_restart.domain_left_edge, + dims=ds_restart.domain_dimensions, + ) # Load output data generated from initial run - benchmark = 'orig_' + filename + benchmark = "orig_" + filename ds_benchmark = yt.load(benchmark) # yt 4.0+ has rounding issues with our domain data: # RuntimeError: yt attempted to read outside the boundaries # of a non-periodic domain along dimension 0. - if 'force_periodicity' in dir(ds_benchmark): ds_benchmark.force_periodicity() + if "force_periodicity" in dir(ds_benchmark): + ds_benchmark.force_periodicity() - ad_benchmark = ds_benchmark.covering_grid(level = 0, - left_edge = ds_benchmark.domain_left_edge, dims = ds_benchmark.domain_dimensions) + ad_benchmark = ds_benchmark.covering_grid( + level=0, + left_edge=ds_benchmark.domain_left_edge, + dims=ds_benchmark.domain_dimensions, + ) # Loop over all fields (all particle species, all particle attributes, all grid fields) # and compare output data generated from initial run with output data generated after restart - print('\ntolerance = {:g}'.format(tolerance)) + print("\ntolerance = {:g}".format(tolerance)) print() for field in ds_benchmark.field_list: dr = ad_restart[field].squeeze().v db = ad_benchmark[field].squeeze().v error = np.amax(np.abs(dr - db)) - if (np.amax(np.abs(db)) != 0.): + if np.amax(np.abs(db)) != 0.0: error /= np.amax(np.abs(db)) - print('field: {}; error = {:g}'.format(field, error)) - assert(error < tolerance) + print("field: {}; error = {:g}".format(field, error)) + assert error < tolerance print() diff --git a/Python/pywarpx/Algo.py b/Python/pywarpx/Algo.py index f8049207078..2f637627ac5 100644 --- a/Python/pywarpx/Algo.py +++ b/Python/pywarpx/Algo.py @@ -6,4 +6,4 @@ from .Bucket import Bucket -algo = Bucket('algo') +algo = Bucket("algo") diff --git a/Python/pywarpx/Amr.py b/Python/pywarpx/Amr.py index f9164f4d419..618db17d6d1 100644 --- a/Python/pywarpx/Amr.py +++ b/Python/pywarpx/Amr.py @@ -6,4 +6,4 @@ from .Bucket import Bucket -amr = Bucket('amr') +amr = Bucket("amr") diff --git a/Python/pywarpx/Amrex.py b/Python/pywarpx/Amrex.py index 4dab8225a84..34b50637003 100644 --- a/Python/pywarpx/Amrex.py +++ b/Python/pywarpx/Amrex.py @@ -6,4 +6,4 @@ from .Bucket import Bucket -amrex = Bucket('amrex') +amrex = Bucket("amrex") diff --git a/Python/pywarpx/Boundary.py b/Python/pywarpx/Boundary.py index b7ebd17af70..b84dbfe2193 100644 --- a/Python/pywarpx/Boundary.py +++ b/Python/pywarpx/Boundary.py @@ -6,4 +6,4 @@ from .Bucket import Bucket -boundary = Bucket('boundary') +boundary = Bucket("boundary") diff --git a/Python/pywarpx/Bucket.py b/Python/pywarpx/Bucket.py index 9dbe4def88e..fa595039726 100644 --- a/Python/pywarpx/Bucket.py +++ b/Python/pywarpx/Bucket.py @@ -13,9 +13,10 @@ class Bucket(object): The purpose of this class is to be a named bucket for holding attributes. This attributes will be concatenated into a string and passed into argv during initialization. """ + def __init__(self, instancename, **defaults): - self._localsetattr('instancename', instancename) - self._localsetattr('argvattrs', {}) + self._localsetattr("instancename", instancename) + self._localsetattr("argvattrs", {}) for name, value in defaults.items(): self.add_new_attr(name, value) @@ -26,7 +27,7 @@ def add_new_attr(self, name, value): """Names starting with "_" are made instance attributes. Otherwise the attribute is added to the args list. """ - if name.startswith('_'): + if name.startswith("_"): self._localsetattr(name, value) else: self.argvattrs[name] = value @@ -36,7 +37,7 @@ def add_new_group_attr(self, group, name, value): group is not an empty string, otherwise as only "name". """ if group: - self.argvattrs[f'{group}.{name}'] = value + self.argvattrs[f"{group}.{name}"] = value else: self.argvattrs[name] = value @@ -51,7 +52,9 @@ def __getattr__(self, name): def check_consistency(self, vname, value, errmsg): if vname in self.argvattrs: - assert (self.argvattrs[vname] is None) or (self.argvattrs[vname] == value), Exception(errmsg) + assert (self.argvattrs[vname] is None) or ( + self.argvattrs[vname] == value + ), Exception(errmsg) def attrlist(self): "Concatenate the attributes into a string" @@ -60,7 +63,7 @@ def attrlist(self): if value is None: continue if isinstance(value, str): - if value.find('=') > -1: + if value.find("=") > -1: # --- Expressions with temporary variables need to be inside quotes rhs = f'"{value}"' else: @@ -71,11 +74,11 @@ def attrlist(self): continue # --- For lists, tuples, and arrays make a space delimited string of the values. # --- The lambda is needed in case this is a list of strings. - rhs = ' '.join(map(lambda s : f'{s}', value)) + rhs = " ".join(map(lambda s: f"{s}", value)) elif isinstance(value, bool): rhs = 1 if value else 0 else: rhs = value - attrstring = f'{self.instancename}.{attr} = {rhs}' + attrstring = f"{self.instancename}.{attr} = {rhs}" result += [attrstring] return result diff --git a/Python/pywarpx/Collisions.py b/Python/pywarpx/Collisions.py index 9ad7f9e14a9..5269f530f4c 100644 --- a/Python/pywarpx/Collisions.py +++ b/Python/pywarpx/Collisions.py @@ -6,9 +6,10 @@ from .Bucket import Bucket -collisions = Bucket('collisions') +collisions = Bucket("collisions") collisions_list = [] + def newcollision(name): result = Bucket(name) collisions_list.append(result) diff --git a/Python/pywarpx/Constants.py b/Python/pywarpx/Constants.py index e899fa255ae..bd17c2f2dfc 100644 --- a/Python/pywarpx/Constants.py +++ b/Python/pywarpx/Constants.py @@ -13,18 +13,21 @@ class Constants(Bucket): """ The purpose of this class is to be hold user defined constants """ + def __init__(self): - Bucket.__init__(self, 'my_constants') + Bucket.__init__(self, "my_constants") def __setattr__(self, name, value): # Make sure that any constants redefined have a consistent value if name in self.argvattrs: - assert self.argvattrs[name] == value, Exception('Inconsistent values given for user defined constants') + assert self.argvattrs[name] == value, Exception( + "Inconsistent values given for user defined constants" + ) Bucket.__setattr__(self, name, value) def add_keywords(self, kwdict): mangle_dict = {} - for k,v in kwdict.items(): + for k, v in kwdict.items(): # WarpX has a single global dictionary of expression variables, my_constants, # so each variable must be unique. # Check if keyword has already been defined. If so and it has a different @@ -33,7 +36,7 @@ def add_keywords(self, kwdict): k_mangled = k while k_mangled in self.argvattrs and self.argvattrs[k_mangled] != v: mangle_number += 1 - k_mangled = f'{k}{mangle_number}' + k_mangled = f"{k}{mangle_number}" if mangle_number > 0: # The mangle_dict contains only mangled names mangle_dict[k] = k_mangled @@ -45,8 +48,8 @@ def mangle_expression(self, expression, mangle_dict): return None # For each key in mangle_dict, modify the expression replacing # the key with its value, the mangled version of key - for k,v in mangle_dict.items(): - expression = re.sub(r'\b%s\b'%k, v, expression) + for k, v in mangle_dict.items(): + expression = re.sub(r"\b%s\b" % k, v, expression) return expression diff --git a/Python/pywarpx/Diagnostics.py b/Python/pywarpx/Diagnostics.py index 19860e9b7ee..731d1d31d01 100644 --- a/Python/pywarpx/Diagnostics.py +++ b/Python/pywarpx/Diagnostics.py @@ -6,22 +6,25 @@ from .Bucket import Bucket -diagnostics = Bucket('diagnostics', _diagnostics_dict={}) -reduced_diagnostics = Bucket('warpx', _diagnostics_dict={}) +diagnostics = Bucket("diagnostics", _diagnostics_dict={}) +reduced_diagnostics = Bucket("warpx", _diagnostics_dict={}) + class Diagnostic(Bucket): """ This is the same as a Bucket, but checks that any attributes are always given the same value. """ + def add_new_attr_with_check(self, name, value): - if name.startswith('_'): + if name.startswith("_"): self._localsetattr(name, value) else: if name in self.argvattrs: - assert value == self.argvattrs[name], \ - Exception(f'Diagnostic attributes not consistent for ' - f'"{self.instancename}": ' - f'"{value}" != "{self.argvattrs[name]}"') + assert value == self.argvattrs[name], Exception( + f"Diagnostic attributes not consistent for " + f'"{self.instancename}": ' + f'"{value}" != "{self.argvattrs[name]}"' + ) self.argvattrs[name] = value def __setattr__(self, name, value): @@ -33,5 +36,5 @@ def set_or_replace_attr(self, name, value): (since __setattr__ cannot be used for replacing as it would raise an Exception) """ - assert not name.startswith('_') + assert not name.startswith("_") self.argvattrs[name] = value diff --git a/Python/pywarpx/EB2.py b/Python/pywarpx/EB2.py index 4b74aafb04f..949d362bfcc 100644 --- a/Python/pywarpx/EB2.py +++ b/Python/pywarpx/EB2.py @@ -6,4 +6,4 @@ from .Bucket import Bucket -eb2 = Bucket('eb2') +eb2 = Bucket("eb2") diff --git a/Python/pywarpx/Geometry.py b/Python/pywarpx/Geometry.py index 2eddb9b8f05..a870a2c5a57 100644 --- a/Python/pywarpx/Geometry.py +++ b/Python/pywarpx/Geometry.py @@ -6,4 +6,4 @@ from .Bucket import Bucket -geometry = Bucket('geometry') +geometry = Bucket("geometry") diff --git a/Python/pywarpx/HybridPICModel.py b/Python/pywarpx/HybridPICModel.py index e21bba4a240..7bd8c961950 100644 --- a/Python/pywarpx/HybridPICModel.py +++ b/Python/pywarpx/HybridPICModel.py @@ -8,4 +8,4 @@ from .Bucket import Bucket -hybridpicmodel = Bucket('hybrid_pic_model') +hybridpicmodel = Bucket("hybrid_pic_model") diff --git a/Python/pywarpx/Interpolation.py b/Python/pywarpx/Interpolation.py index d25539de77b..b84def573eb 100644 --- a/Python/pywarpx/Interpolation.py +++ b/Python/pywarpx/Interpolation.py @@ -6,4 +6,4 @@ from .Bucket import Bucket -interpolation = Bucket('interpolation') +interpolation = Bucket("interpolation") diff --git a/Python/pywarpx/Lasers.py b/Python/pywarpx/Lasers.py index 60dfaca31e9..3836700215d 100644 --- a/Python/pywarpx/Lasers.py +++ b/Python/pywarpx/Lasers.py @@ -6,9 +6,10 @@ from .Bucket import Bucket -lasers = Bucket('lasers', names=[]) +lasers = Bucket("lasers", names=[]) lasers_list = [] + def newlaser(name): result = Bucket(name) lasers_list.append(result) diff --git a/Python/pywarpx/LoadThirdParty.py b/Python/pywarpx/LoadThirdParty.py index ea62d558eeb..5ec84247604 100644 --- a/Python/pywarpx/LoadThirdParty.py +++ b/Python/pywarpx/LoadThirdParty.py @@ -17,19 +17,23 @@ def load_cupy(): if amr.Config.have_gpu: try: import cupy as cp + xp = cp # Note: found and will use cupy except ImportError: status = "Warning: GPU found but cupy not available! Trying managed memory in numpy..." import numpy as np + xp = np if amr.Config.gpu_backend == "SYCL": status = "Warning: SYCL GPU backend not yet implemented for Python" import numpy as np + xp = np else: import numpy as np + xp = np # Note: found and will use numpy return xp, status diff --git a/Python/pywarpx/PSATD.py b/Python/pywarpx/PSATD.py index 0cd3038336a..a5072a81fc8 100644 --- a/Python/pywarpx/PSATD.py +++ b/Python/pywarpx/PSATD.py @@ -6,4 +6,4 @@ from .Bucket import Bucket -psatd = Bucket('psatd') +psatd = Bucket("psatd") diff --git a/Python/pywarpx/Particles.py b/Python/pywarpx/Particles.py index e05341e8203..9c87bb7083e 100644 --- a/Python/pywarpx/Particles.py +++ b/Python/pywarpx/Particles.py @@ -6,10 +6,11 @@ from .Bucket import Bucket -particles = Bucket('particles', species_names=[], rigid_injected_species=[]) +particles = Bucket("particles", species_names=[], rigid_injected_species=[]) particles_list = [] particle_dict = {} + def newspecies(name): result = Bucket(name) particles_list.append(result) diff --git a/Python/pywarpx/WarpX.py b/Python/pywarpx/WarpX.py index 6752b00f371..b0724d9db46 100644 --- a/Python/pywarpx/WarpX.py +++ b/Python/pywarpx/WarpX.py @@ -37,7 +37,7 @@ def create_argv_list(self, **kw): for k, v in kw.items(): if v is not None: - argv.append(f'{k} = {v}') + argv.append(f"{k} = {v}") argv += warpx.attrlist() argv += my_constants.attrlist() @@ -62,7 +62,9 @@ def create_argv_list(self, **kw): particles_list.append(getattr(Particles, pstring)) particles_list_names.append(pstring) else: - raise Exception('Species %s listed in species_names not defined'%pstring) + raise Exception( + "Species %s listed in species_names not defined" % pstring + ) argv += particles.attrlist() for particle in particles_list: @@ -84,7 +86,9 @@ def create_argv_list(self, **kw): for species_diagnostic in diagnostic._species_dict.values(): argv += species_diagnostic.attrlist() - reduced_diagnostics.reduced_diags_names = reduced_diagnostics._diagnostics_dict.keys() + reduced_diagnostics.reduced_diags_names = ( + reduced_diagnostics._diagnostics_dict.keys() + ) argv += reduced_diagnostics.attrlist() for diagnostic in reduced_diagnostics._diagnostics_dict.values(): argv += diagnostic.attrlist() @@ -120,25 +124,25 @@ def getProbLo(self, direction): def getProbHi(self, direction): return libwarpx.libwarpx_so.warpx_getProbHi(direction) - def write_inputs(self, filename='inputs', **kw): + def write_inputs(self, filename="inputs", **kw): argv = self.create_argv_list(**kw) # Sort the argv list to make it more human readable argv.sort() - with open(filename, 'w') as ff: - - prefix_old = '' + with open(filename, "w") as ff: + prefix_old = "" for arg in argv: # This prints the name of the input group (prefix) as a header # before each group to make the input file more human readable - prefix_new = re.split(' |\.', arg)[0] + prefix_new = re.split(" |\.", arg)[0] if prefix_new != prefix_old: - if prefix_old != '': - ff.write('\n') - ff.write(f'# {prefix_new}\n') + if prefix_old != "": + ff.write("\n") + ff.write(f"# {prefix_new}\n") prefix_old = prefix_new - ff.write(f'{arg}\n') + ff.write(f"{arg}\n") + -warpx = WarpX('warpx', _bucket_dict = {}) +warpx = WarpX("warpx", _bucket_dict={}) diff --git a/Python/pywarpx/__init__.py b/Python/pywarpx/__init__.py index 7531f764a48..b858222af38 100644 --- a/Python/pywarpx/__init__.py +++ b/Python/pywarpx/__init__.py @@ -44,14 +44,16 @@ # This is a circular import and must happen after the import of libwarpx from . import picmi # noqa # isort:skip + # intentionally query the value - only set once sim dimension is known def __getattr__(name): # https://stackoverflow.com/a/57263518/2719194 - if name == '__version__': + if name == "__version__": return libwarpx.__version__ raise AttributeError(f"module '{__name__}' has no attribute '{name}'") + # TODO -#__doc__ = cxx.__doc__ -#__license__ = cxx.__license__ -#__author__ = cxx.__author__ +# __doc__ = cxx.__doc__ +# __license__ = cxx.__license__ +# __author__ = cxx.__author__ diff --git a/Python/pywarpx/_libwarpx.py b/Python/pywarpx/_libwarpx.py index 6e347ff5fd7..40426104b9f 100755 --- a/Python/pywarpx/_libwarpx.py +++ b/Python/pywarpx/_libwarpx.py @@ -14,11 +14,12 @@ import atexit import os +import sys from .Geometry import geometry -class LibWarpX(): +class LibWarpX: """This class manages the WarpX classes, part of the Python module from the compiled C++ code. It will only load the library when it is referenced, and this can only be done after the geometry is defined so that the version of the library that is needed can be determined. @@ -34,7 +35,7 @@ def __init__(self): self.__version__ = None def __getattr__(self, attribute): - if attribute == 'libwarpx_so': + if attribute == "libwarpx_so": # If the 'libwarpx_so' is referenced, load it. # Once loaded, it gets added to the dictionary so this code won't be called again. self.load_library() @@ -45,21 +46,20 @@ def __getattr__(self, attribute): return self.__getattribute__(attribute) def _get_package_root(self): - ''' + """ Get the path to the installation location (where libwarpx.so would be installed). - ''' + """ cur = os.path.abspath(__file__) while True: name = os.path.basename(cur) - if name == 'pywarpx': + if name == "pywarpx": return cur elif not name: - return '' + return "" cur = os.path.dirname(cur) def load_library(self): - - if 'libwarpx_so' in self.__dict__: + if "libwarpx_so" in self.__dict__: raise RuntimeError( "Invalid attempt to load the pybind11 bindings library multiple times. " "Note that multiple AMReX/WarpX geometries cannot be loaded yet into the same Python process. " @@ -72,57 +72,69 @@ def load_library(self): _prob_lo = geometry.prob_lo _dims = geometry.dims except AttributeError: - raise Exception('The shared object could not be loaded. The geometry must be setup before the WarpX pybind11 module can be accessesd. The geometry determines which version of the shared object to load.') + raise Exception( + "The shared object could not be loaded. The geometry must be setup before the WarpX pybind11 module can be accessesd. The geometry determines which version of the shared object to load." + ) - if _dims == 'RZ': - self.geometry_dim = 'rz' - elif (_dims == '1' or _dims == '2' or _dims == '3'): - self.geometry_dim = '%dd'%len(_prob_lo) + if _dims == "RZ": + self.geometry_dim = "rz" + elif _dims == "1" or _dims == "2" or _dims == "3": + self.geometry_dim = "%dd" % len(_prob_lo) else: - raise Exception('Undefined geometry %d'%_dims) + raise Exception("Undefined geometry %d" % _dims) try: if self.geometry_dim == "1d": import amrex.space1d as amr + self.amr = amr from . import warpx_pybind_1d as cxx_1d + self.libwarpx_so = cxx_1d self.dim = 1 elif self.geometry_dim == "2d": import amrex.space2d as amr + self.amr = amr from . import warpx_pybind_2d as cxx_2d + self.libwarpx_so = cxx_2d self.dim = 2 elif self.geometry_dim == "rz": import amrex.space2d as amr + self.amr = amr from . import warpx_pybind_rz as cxx_rz + self.libwarpx_so = cxx_rz self.dim = 2 elif self.geometry_dim == "3d": import amrex.space3d as amr + self.amr = amr from . import warpx_pybind_3d as cxx_3d + self.libwarpx_so = cxx_3d self.dim = 3 self.Config = self.libwarpx_so.Config except ImportError: - raise Exception(f"Dimensionality '{self.geometry_dim}' was not compiled in this Python install. Please recompile with -DWarpX_DIMS={_dims}") + raise Exception( + f"Dimensionality '{self.geometry_dim}' was not compiled in this Python install. Please recompile with -DWarpX_DIMS={_dims}" + ) self.__version__ = self.libwarpx_so.__version__ def amrex_init(self, argv, mpi_comm=None): - if mpi_comm is None: # or MPI is None: + if mpi_comm is None: # or MPI is None: self.libwarpx_so.amrex_init(argv) else: - raise Exception('mpi_comm argument not yet supported') + raise Exception("mpi_comm argument not yet supported") def initialize(self, argv=None, mpi_comm=None): - ''' + """ Initialize WarpX and AMReX. Must be called before doing anything else. - ''' + """ if argv is None: argv = sys.argv self.amrex_init(argv, mpi_comm) @@ -131,22 +143,24 @@ def initialize(self, argv=None, mpi_comm=None): self.libwarpx_so.execute_python_callback("afterinit") self.libwarpx_so.execute_python_callback("particleloader") - #self.libwarpx_so.warpx_init() + # self.libwarpx_so.warpx_init() self.initialized = True def finalize(self, finalize_mpi=1): - ''' + """ Call finalize for WarpX and AMReX. Registered to run at program exit. - ''' + """ # TODO: simplify, part of pyAMReX already if self.initialized: del self.warpx # The call to warpx_finalize causes a crash - don't know why - #self.libwarpx_so.warpx_finalize() + # self.libwarpx_so.warpx_finalize() self.libwarpx_so.amrex_finalize() from pywarpx import callbacks + callbacks.clear_all() + libwarpx = LibWarpX() diff --git a/Python/pywarpx/callbacks.py b/Python/pywarpx/callbacks.py index 1f32fee6620..c5fff67047f 100644 --- a/Python/pywarpx/callbacks.py +++ b/Python/pywarpx/callbacks.py @@ -100,19 +100,20 @@ class CallbackFunctions(object): installed a method in one of the call back lists. """ - def __init__(self,name=None,lcallonce=0,singlefunconly=False): + def __init__(self, name=None, lcallonce=0, singlefunconly=False): self.funcs = [] - self.time = 0. + self.time = 0.0 self.timers = {} self.name = name self.lcallonce = lcallonce self.singlefunconly = singlefunconly - def __call__(self,*args,**kw): + def __call__(self, *args, **kw): """Call all of the functions in the list""" - tt = self.callfuncsinlist(*args,**kw) + tt = self.callfuncsinlist(*args, **kw) self.time = self.time + tt - if self.lcallonce: self.funcs = [] + if self.lcallonce: + self.funcs = [] def clearlist(self): """Unregister/clear out all registered C callbacks""" @@ -131,7 +132,7 @@ def hasfuncsinstalled(self): """Checks if there are any functions installed""" return len(self.funcs) > 0 - def _getmethodobject(self,func): + def _getmethodobject(self, func): """For call backs that are methods, returns the method's instance""" return func[0] @@ -139,14 +140,15 @@ def callbackfunclist(self): """Generator returning callable functions from the list""" funclistcopy = copy.copy(self.funcs) for f in funclistcopy: - if isinstance(f,list): + if isinstance(f, list): object = self._getmethodobject(f) if object is None: self.funcs.remove(f) continue - result = getattr(object,f[1]) - elif isinstance(f,str): + result = getattr(object, f[1]) + elif isinstance(f, str): import __main__ + if f in __main__.__dict__: result = __main__.__dict__[f] # --- If the function with the name is found, then replace the @@ -159,19 +161,19 @@ def callbackfunclist(self): if not callable(result): print("\n\nWarning: a call back was found that is not callable.") if self.name is not None: - print("For %s"%self.name) + print("For %s" % self.name) print("Only callable objects can be installed.") print("It is possible that the callable's name has been overwritten") print("by something not callable. This can happen during restart") print("if a function name had later been used as a variable name.") print(self.name) - if isinstance(f,str): - print("The name of the call back is %s"%f) + if isinstance(f, str): + print(f"The name of the call back is {f}") print("\n\n") continue yield result - def installfuncinlist(self,f): + def installfuncinlist(self, f): """Check if the specified function is installed""" if self.singlefunconly and self.hasfuncsinstalled(): raise RuntimeError( @@ -182,12 +184,12 @@ def installfuncinlist(self,f): # If this is the first function installed, set the callback in the C++ # to call this class instance. libwarpx.libwarpx_so.add_python_callback(self.name, self) - if isinstance(f,types.MethodType): + if isinstance(f, types.MethodType): # --- If the function is a method of a class instance, then save a full # --- reference to that instance and the method name. finstance = f.__self__ fname = f.__name__ - self.funcs.append([finstance,fname]) + self.funcs.append([finstance, fname]) elif callable(f): # --- If a function had already been installed by name, then skip the install. # --- This is problematic, since no warning message is given, but it is unlikely @@ -202,7 +204,7 @@ def installfuncinlist(self,f): else: self.funcs.append(f) - def uninstallfuncinlist(self,f): + def uninstallfuncinlist(self, f): """Uninstall the specified function""" # --- An element by element search is needed # --- f can be a function or method object, or a name (string). @@ -212,62 +214,66 @@ def uninstallfuncinlist(self,f): if f == func: self.funcs.remove(f) break - elif isinstance(func,list) and isinstance(f,types.MethodType): + elif isinstance(func, list) and isinstance(f, types.MethodType): object = self._getmethodobject(func) if f.__self__ is object and f.__name__ == func[1]: self.funcs.remove(func) break - elif isinstance(func,str): + elif isinstance(func, str): if f.__name__ == func: self.funcs.remove(func) break - elif isinstance(f,str): - if isinstance(func,str): funcname = func - elif isinstance(func,list): funcname = None - else: funcname = func.__name__ + elif isinstance(f, str): + if isinstance(func, str): + funcname = func + elif isinstance(func, list): + funcname = None + else: + funcname = func.__name__ if f == funcname: self.funcs.remove(func) break # check that a function was removed if len(self.funcs) == len(funclistcopy): - raise Exception(f'Warning: no function, {f}, had been installed') + raise Exception(f"Warning: no function, {f}, had been installed") # if there are no functions left, remove the C callback if not self.hasfuncsinstalled(): self.clearlist() - def isinstalledfuncinlist(self,f): + def isinstalledfuncinlist(self, f): """Checks if the specified function is installed""" # --- An element by element search is needed funclistcopy = copy.copy(self.funcs) for func in funclistcopy: if f == func: return 1 - elif isinstance(func,list) and isinstance(f,types.MethodType): + elif isinstance(func, list) and isinstance(f, types.MethodType): object = self._getmethodobject(func) if f.__self__ is object and f.__name__ == func[1]: return 1 - elif isinstance(func,str): + elif isinstance(func, str): if f.__name__ == func: return 1 return 0 - def callfuncsinlist(self,*args,**kw): + def callfuncsinlist(self, *args, **kw): """Call the functions in the list""" bb = time.time() for f in self.callbackfunclist(): - #barrier() + # barrier() t1 = time.time() - f(*args,**kw) - #barrier() + f(*args, **kw) + # barrier() t2 = time.time() # --- For the timers, use the function (or method) name as the key. - self.timers[f.__name__] = self.timers.get(f.__name__,0.) + (t2 - t1) + self.timers[f.__name__] = self.timers.get(f.__name__, 0.0) + (t2 - t1) aa = time.time() return aa - bb -#============================================================================= + +# ============================================================================= callback_instances = { "beforeInitEsolve": {}, @@ -276,7 +282,7 @@ def callfuncsinlist(self,*args,**kw): "beforecollisions": {}, "aftercollisions": {}, "beforeEsolve": {}, - "poissonsolver": {'singlefunconly': True}, # external Poisson solver + "poissonsolver": {"singlefunconly": True}, # external Poisson solver "afterEsolve": {}, "afterBpush": {}, "afterEpush": {}, @@ -291,13 +297,14 @@ def callfuncsinlist(self,*args,**kw): "oncheckpointsignal": {}, "onbreaksignal": {}, "particleinjection": {}, - "appliedfields": {} + "appliedfields": {}, } # --- Now create the actual instances. for key, val in callback_instances.items(): callback_instances[key] = CallbackFunctions(name=key, **val) + def installcallback(name, f): """Installs a function to be called at that specified time. @@ -305,186 +312,263 @@ def installcallback(name, f): """ callback_instances[name].installfuncinlist(f) + def uninstallcallback(name, f): """Uninstalls the function (so it won't be called anymore). Removes the function from the list of functions called by this callback.""" callback_instances[name].uninstallfuncinlist(f) + def isinstalled(name, f): """Checks if a function is installed for this callback.""" return callback_instances[name].isinstalledfuncinlist(f) + def clear_all(): for key, val in callback_instances.items(): val.clearlist() -#============================================================================= -def printcallbacktimers(tmin=1.,lminmax=False,ff=None): +# ============================================================================= + + +def printcallbacktimers(tmin=1.0, lminmax=False, ff=None): """Prints timings of installed functions. - tmin=1.: only functions with time greater than tmin will be printed - lminmax=False: If True, prints the min and max times over all processors - ff=None: If given, timings will be written to the file object instead of stdout """ - if ff is None: ff = sys.stdout + if ff is None: + ff = sys.stdout for c in callback_instances.values(): - for fname, time in c.timers.items(): - #vlist = numpy.array(gather(time)) - vlist = numpy.array([time]) - #if me > 0: continue + for fname, this_time in c.timers.items(): + # vlist = numpy.array(gather(this_time)) + vlist = numpy.array([this_time]) + # if me > 0: continue vsum = numpy.sum(vlist) - if vsum <= tmin: continue - vrms = numpy.sqrt(max(0.,numpy.sum(vlist**2)/len(vlist) - (numpy.sum(vlist)/len(vlist))**2)) - npes = 1. # Only works for one processor - ff.write('%20s %s %10.4f %10.4f %10.4f'%(c.name,fname,vsum,vsum/npes,vrms)) + if vsum <= tmin: + continue + vrms = numpy.sqrt( + max( + 0.0, + numpy.sum(vlist**2) / len(vlist) + - (numpy.sum(vlist) / len(vlist)) ** 2, + ) + ) + npes = 1.0 # Only works for one processor + ff.write( + "%20s %s %10.4f %10.4f %10.4f" + % (c.name, fname, vsum, vsum / npes, vrms) + ) if lminmax: vmin = numpy.min(vlist) vmax = numpy.max(vlist) - ff.write(' %10.4f %10.4f'%(vmin,vmax)) + ff.write(" %10.4f %10.4f" % (vmin, vmax)) it = libwarpx.libwarpx_so.warpx_getistep(0) if it > 0: - ff.write(' %10.4f'%(vsum/npes/(it))) - ff.write('\n') + ff.write(" %10.4f" % (vsum / npes / (it))) + ff.write("\n") + + +# ============================================================================= -#============================================================================= # ---------------------------------------------------------------------------- def callfrombeforeInitEsolve(f): - installcallback('beforeInitEsolve', f) + installcallback("beforeInitEsolve", f) return f + + def installbeforeInitEsolve(f): - installcallback('beforeInitEsolve', f) + installcallback("beforeInitEsolve", f) + # ---------------------------------------------------------------------------- def callfromafterInitEsolve(f): - installcallback('afterInitEsolve', f) + installcallback("afterInitEsolve", f) return f + + def installafterInitEsolve(f): - installcallback('afterInitEsolve', f) + installcallback("afterInitEsolve", f) + # ---------------------------------------------------------------------------- def callfromafterinit(f): - installcallback('afterinit', f) + installcallback("afterinit", f) return f + + def installafterinit(f): - installcallback('afterinit', f) + installcallback("afterinit", f) + # ---------------------------------------------------------------------------- def callfrombeforecollisions(f): - installcallback('beforecollisions', f) + installcallback("beforecollisions", f) return f + + def installbeforecollisions(f): - installcallback('beforecollisions', f) + installcallback("beforecollisions", f) + # ---------------------------------------------------------------------------- def callfromaftercollisions(f): - installcallback('aftercollisions', f) + installcallback("aftercollisions", f) return f + + def installaftercollisions(f): - installcallback('aftercollisions', f) + installcallback("aftercollisions", f) + # ---------------------------------------------------------------------------- def callfrombeforeEsolve(f): - installcallback('beforeEsolve', f) + installcallback("beforeEsolve", f) return f + + def installbeforeEsolve(f): - installcallback('beforeEsolve', f) + installcallback("beforeEsolve", f) + # ---------------------------------------------------------------------------- def callfrompoissonsolver(f): - installcallback('poissonsolver', f) + installcallback("poissonsolver", f) return f + + def installpoissonsolver(f): - installcallback('poissonsolver', f) + installcallback("poissonsolver", f) + # ---------------------------------------------------------------------------- def callfromafterEsolve(f): - installcallback('afterEsolve', f) + installcallback("afterEsolve", f) return f + + def installafterEsolve(f): - installcallback('afterEsolve', f) + installcallback("afterEsolve", f) + # ---------------------------------------------------------------------------- def callfromafterBpush(f): - installcallback('afterBpush', f) + installcallback("afterBpush", f) return f + + def installafterBpush(f): - installcallback('afterBpush', f) + installcallback("afterBpush", f) + # ---------------------------------------------------------------------------- def callfromafterEpush(f): - installcallback('afterEpush', f) + installcallback("afterEpush", f) return f + + def installafterEpush(f): - installcallback('afterEpush', f) + installcallback("afterEpush", f) + # ---------------------------------------------------------------------------- def callfrombeforedeposition(f): - installcallback('beforedeposition', f) + installcallback("beforedeposition", f) return f + + def installbeforedeposition(f): - installcallback('beforedeposition', f) + installcallback("beforedeposition", f) + # ---------------------------------------------------------------------------- def callfromafterdeposition(f): - installcallback('afterdeposition', f) + installcallback("afterdeposition", f) return f + + def installafterdeposition(f): - installcallback('afterdeposition', f) + installcallback("afterdeposition", f) + # ---------------------------------------------------------------------------- def callfromparticlescraper(f): - installcallback('particlescraper', f) + installcallback("particlescraper", f) return f + + def installparticlescraper(f): - installcallback('particlescraper', f) + installcallback("particlescraper", f) + # ---------------------------------------------------------------------------- def callfromparticleloader(f): - installcallback('particleloader', f) + installcallback("particleloader", f) return f + + def installparticleloader(f): - installcallback('particleloader', f) + installcallback("particleloader", f) + # ---------------------------------------------------------------------------- def callfrombeforestep(f): - installcallback('beforestep', f) + installcallback("beforestep", f) return f + + def installbeforestep(f): - installcallback('beforestep', f) + installcallback("beforestep", f) + # ---------------------------------------------------------------------------- def callfromafterstep(f): - installcallback('afterstep', f) + installcallback("afterstep", f) return f + + def installafterstep(f): - installcallback('afterstep', f) + installcallback("afterstep", f) + # ---------------------------------------------------------------------------- def callfromafterdiagnostics(f): - installcallback('afterdiagnostics', f) + installcallback("afterdiagnostics", f) return f + + def installafterdiagnostics(f): - installcallback('afterdiagnostics', f) + installcallback("afterdiagnostics", f) + # ---------------------------------------------------------------------------- def oncheckpointsignal(f): - installcallback('oncheckpointsignal', f) + installcallback("oncheckpointsignal", f) return f + + def installoncheckpointsignal(f): - installcallback('oncheckpointsignal', f) + installcallback("oncheckpointsignal", f) + # ---------------------------------------------------------------------------- def onbreaksignal(f): - installcallback('onbreaksignal', f) + installcallback("onbreaksignal", f) return f + + def installonbreaksignal(f): - installcallback('onbreaksignal', f) + installcallback("onbreaksignal", f) + # ---------------------------------------------------------------------------- def callfromparticleinjection(f): - installcallback('particleinjection', f) + installcallback("particleinjection", f) return f + + def installparticleinjection(f): - installcallback('particleinjection', f) + installcallback("particleinjection", f) diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index 4c2c26dcb38..b765424cf57 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -41,6 +41,7 @@ JxCPPMLWrapper, JyCPPMLWrapper, JzCPPMLWrapper FCPPMLWrapper, GCPPMLWrapper """ + import numpy as np try: @@ -51,6 +52,7 @@ try: from mpi4py import MPI as mpi + comm_world = mpi.COMM_WORLD npes = comm_world.Get_size() except ImportError: @@ -83,6 +85,7 @@ class _MultiFABWrapper(object): Note that when True, the first n-ghost negative indices will refer to the lower ghost cells. """ + def __init__(self, mf=None, mf_name=None, level=0, include_ghosts=False): self._mf = mf self.mf_name = mf_name @@ -94,7 +97,9 @@ def __init__(self, mf=None, mf_name=None, level=0, include_ghosts=False): # The overlaps list is one along the axes where the grid boundaries overlap the neighboring grid, # which is the case with node centering. ix_type = self.mf.box_array().ix_type() - self.overlaps = self._get_indices([int(ix_type.node_centered(i)) for i in range(self.dim)], 0) + self.overlaps = self._get_indices( + [int(ix_type.node_centered(i)) for i in range(self.dim)], 0 + ) def __len__(self): "Returns the number of blocks" @@ -112,17 +117,16 @@ def mf(self): # Always fetch this anew in case the C++ MultiFab is recreated warpx = libwarpx.libwarpx_so.get_instance() # All MultiFab names have the level suffix - return warpx.multifab(f'{self.mf_name}[level={self.level}]') + return warpx.multifab(f"{self.mf_name}[level={self.level}]") @property def shape(self): - """Returns the shape of the global array - """ + """Returns the shape of the global array""" min_box = self.mf.box_array().minimal_box() shape = list(min_box.size - min_box.small_end) if self.include_ghosts: nghosts = self.mf.n_grow_vect - shape = [shape[i] + 2*nghosts[i] for i in range(self.dim)] + shape = [shape[i] + 2 * nghosts[i] for i in range(self.dim)] shape.append(self.mf.nComp) return tuple(shape) @@ -139,16 +143,16 @@ def mesh(self, direction): """ try: - if libwarpx.geometry_dim == '3d': - idir = ['x', 'y', 'z'].index(direction) - elif libwarpx.geometry_dim == '2d': - idir = ['x', 'z'].index(direction) - elif libwarpx.geometry_dim == 'rz': - idir = ['r', 'z'].index(direction) - elif libwarpx.geometry_dim == '1d': - idir = ['z'].index(direction) + if libwarpx.geometry_dim == "3d": + idir = ["x", "y", "z"].index(direction) + elif libwarpx.geometry_dim == "2d": + idir = ["x", "z"].index(direction) + elif libwarpx.geometry_dim == "rz": + idir = ["r", "z"].index(direction) + elif libwarpx.geometry_dim == "1d": + idir = ["z"].index(direction) except ValueError: - raise Exception('Inappropriate direction given') + raise Exception("Inappropriate direction given") min_box = self.mf.box_array().minimal_box() ilo = min_box.small_end[idir] @@ -168,13 +172,13 @@ def mesh(self, direction): ix_type = self.mf.box_array().ix_type() if ix_type.node_centered(idir): # node centered - shift = 0. + shift = 0.0 else: # cell centered - shift = 0.5*dd + shift = 0.5 * dd lo = warpx.Geom(self.level).ProbLo(idir) - return lo + np.arange(ilo,ihi+1)*dd + shift + return lo + np.arange(ilo, ihi + 1) * dd + shift def _get_indices(self, index, missing): """Expand the index list to length three. @@ -210,8 +214,7 @@ def _get_min_indices(self): return imin def _get_max_indices(self): - """Returns the maximum indices, expanded to length 3. - """ + """Returns the maximum indices, expanded to length 3.""" min_box = self.mf.box_array().minimal_box() if self.include_ghosts: min_box.grow(self.mf.n_grow_vect) @@ -273,8 +276,12 @@ def _find_start_stop(self, ii, imin, imax, d): ii = self._fix_index(ii, imax, d) iistart = ii iistop = ii + 1 - assert imin <= iistart <= imax, Exception(f'Dimension {d+1} lower index is out of bounds') - assert imin <= iistop <= imax, Exception(f'Dimension {d+1} upper index is out of bounds') + assert imin <= iistart <= imax, Exception( + f"Dimension {d+1} lower index is out of bounds" + ) + assert imin <= iistop <= imax, Exception( + f"Dimension {d+1} upper index is out of bounds" + ) return iistart, iistop def _get_field(self, mfi): @@ -298,7 +305,9 @@ def _get_field(self, mfi): device_arr = device_arr4.to_numpy(copy=False) if not self.include_ghosts: nghosts = self._get_n_ghosts() - device_arr = device_arr[tuple([slice(ng, -ng) for ng in nghosts[:self.dim]])] + device_arr = device_arr[ + tuple([slice(ng, -ng) for ng in nghosts[: self.dim]]) + ] return device_arr def _get_intersect_slice(self, mfi, starts, stops, icstart, icstop): @@ -349,7 +358,6 @@ def _get_intersect_slice(self, mfi, starts, stops, icstart, icstop): i2 = np.minimum(stops, ihi_p1) if np.all(i1 < i2): - block_slices = [] global_slices = [] for i in range(3): @@ -380,19 +388,19 @@ def __getitem__(self, index): # Note that the index can have negative values (which wrap around) and has 1 added to the upper # limit using python style slicing if index == Ellipsis: - index = self.dim*[slice(None)] + index = self.dim * [slice(None)] elif isinstance(index, slice): # If only one slice passed in, it was not wrapped in a list index = [index] - if len(index) < self.dim+1: + if len(index) < self.dim + 1: # Add extra dims to index, including for the component. # These are the dims left out and assumed to extend over the full size of the dim index = list(index) - while len(index) < self.dim+1: + while len(index) < self.dim + 1: index.append(slice(None)) - elif len(index) > self.dim+1: - raise Exception('Too many indices given') + elif len(index) > self.dim + 1: + raise Exception("Too many indices given") # Expand the indices to length 3 ii = self._get_indices(index, None) @@ -403,9 +411,9 @@ def __getitem__(self, index): ixmax, iymax, izmax = self._get_max_indices() # Setup the size of the array to be returned - ixstart, ixstop = self._find_start_stop(ii[0], ixmin, ixmax+1, 0) - iystart, iystop = self._find_start_stop(ii[1], iymin, iymax+1, 1) - izstart, izstop = self._find_start_stop(ii[2], izmin, izmax+1, 2) + ixstart, ixstop = self._find_start_stop(ii[0], ixmin, ixmax + 1, 0) + iystart, iystop = self._find_start_stop(ii[1], iymin, iymax + 1, 1) + izstart, izstop = self._find_start_stop(ii[2], izmin, izmax + 1, 2) icstart, icstop = self._find_start_stop(ic, 0, self.mf.n_comp, 3) # Gather the data to be included in a list to be sent to other processes @@ -413,7 +421,9 @@ def __getitem__(self, index): stops = [ixstop, iystop, izstop] datalist = [] for mfi in self.mf: - block_slices, global_slices = self._get_intersect_slice(mfi, starts, stops, icstart, icstop) + block_slices, global_slices = self._get_intersect_slice( + mfi, starts, stops, icstart, icstop + ) if global_slices is not None: # Note that the array will always have 4 dimensions. device_arr = self._get_field(mfi) @@ -430,10 +440,12 @@ def __getitem__(self, index): all_datalist = comm_world.allgather(datalist) # Create the array to be returned - result_shape = (max(0, ixstop - ixstart), - max(0, iystop - iystart), - max(0, izstop - izstart), - max(0, icstop - icstart)) + result_shape = ( + max(0, ixstop - ixstart), + max(0, iystop - iystart), + max(0, izstop - izstart), + max(0, icstop - icstart), + ) # Now, copy the data into the result array result_global = None @@ -470,19 +482,19 @@ def __setitem__(self, index, value): # Note that the index can have negative values (which wrap around) and has 1 added to the upper # limit using python style slicing if index == Ellipsis: - index = tuple(self.dim*[slice(None)]) + index = tuple(self.dim * [slice(None)]) elif isinstance(index, slice): # If only one slice passed in, it was not wrapped in a list index = [index] - if len(index) < self.dim+1: + if len(index) < self.dim + 1: # Add extra dims to index, including for the component. # These are the dims left out and assumed to extend over the full size of the dim. index = list(index) - while len(index) < self.dim+1: + while len(index) < self.dim + 1: index.append(slice(None)) - elif len(index) > self.dim+1: - raise Exception('Too many indices given') + elif len(index) > self.dim + 1: + raise Exception("Too many indices given") # Expand the indices to length 3 ii = self._get_indices(index, None) @@ -493,9 +505,9 @@ def __setitem__(self, index, value): ixmax, iymax, izmax = self._get_max_indices() # Setup the size of the global array to be set - ixstart, ixstop = self._find_start_stop(ii[0], ixmin, ixmax+1, 0) - iystart, iystop = self._find_start_stop(ii[1], iymin, iymax+1, 1) - izstart, izstop = self._find_start_stop(ii[2], izmin, izmax+1, 2) + ixstart, ixstop = self._find_start_stop(ii[0], ixmin, ixmax + 1, 0) + iystart, iystop = self._find_start_stop(ii[1], iymin, iymax + 1, 1) + izstart, izstop = self._find_start_stop(ii[2], izmin, izmax + 1, 2) icstart, icstop = self._find_start_stop(ic, 0, self.mf.n_comp, 3) if isinstance(value, np.ndarray): @@ -513,7 +525,7 @@ def __setitem__(self, index, value): global_shape[1:1] = [1] if not isinstance(ii[2], slice): global_shape[2:2] = [1] - if not isinstance(ic , slice) or len(global_shape) < 4: + if not isinstance(ic, slice) or len(global_shape) < 4: global_shape[3:3] = [1] value3d.shape = global_shape @@ -526,7 +538,9 @@ def __setitem__(self, index, value): starts = [ixstart, iystart, izstart] stops = [ixstop, iystop, izstop] for mfi in self.mf: - block_slices, global_slices = self._get_intersect_slice(mfi, starts, stops, icstart, icstop) + block_slices, global_slices = self._get_intersect_slice( + mfi, starts, stops, icstart, icstop + ) if global_slices is not None: mf_arr = self._get_field(mfi) if isinstance(value, np.ndarray): @@ -558,205 +572,406 @@ def norm0(self, *args): def ExWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Efield_aux[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Efield_aux[x]", level=level, include_ghosts=include_ghosts + ) + def EyWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Efield_aux[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Efield_aux[y]", level=level, include_ghosts=include_ghosts + ) + def EzWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Efield_aux[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Efield_aux[z]", level=level, include_ghosts=include_ghosts + ) + def BxWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Bfield_aux[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Bfield_aux[x]", level=level, include_ghosts=include_ghosts + ) + def ByWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Bfield_aux[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Bfield_aux[y]", level=level, include_ghosts=include_ghosts + ) + def BzWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Bfield_aux[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Bfield_aux[z]", level=level, include_ghosts=include_ghosts + ) + def JxWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_fp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_fp[x]", level=level, include_ghosts=include_ghosts + ) + def JyWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_fp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_fp[y]", level=level, include_ghosts=include_ghosts + ) + def JzWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_fp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_fp[z]", level=level, include_ghosts=include_ghosts + ) + def ExFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Efield_fp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Efield_fp[x]", level=level, include_ghosts=include_ghosts + ) + def EyFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Efield_fp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Efield_fp[y]", level=level, include_ghosts=include_ghosts + ) + def EzFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Efield_fp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Efield_fp[z]", level=level, include_ghosts=include_ghosts + ) + def BxFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Bfield_fp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Bfield_fp[x]", level=level, include_ghosts=include_ghosts + ) + def ByFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Bfield_fp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Bfield_fp[y]", level=level, include_ghosts=include_ghosts + ) + def BzFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Bfield_fp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Bfield_fp[z]", level=level, include_ghosts=include_ghosts + ) + def JxFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_fp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_fp[x]", level=level, include_ghosts=include_ghosts + ) + def JyFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_fp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_fp[y]", level=level, include_ghosts=include_ghosts + ) + def JzFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_fp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_fp[z]", level=level, include_ghosts=include_ghosts + ) + def RhoFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='rho_fp', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="rho_fp", level=level, include_ghosts=include_ghosts + ) + def PhiFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='phi_fp', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="phi_fp", level=level, include_ghosts=include_ghosts + ) + def FFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='F_fp', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper(mf_name="F_fp", level=level, include_ghosts=include_ghosts) + def GFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='G_fp', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper(mf_name="G_fp", level=level, include_ghosts=include_ghosts) + def AxFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='vector_potential_fp_nodal[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="vector_potential_fp_nodal[x]", + level=level, + include_ghosts=include_ghosts, + ) + def AyFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='vector_potential_fp_nodal[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="vector_potential_fp_nodal[y]", + level=level, + include_ghosts=include_ghosts, + ) + def AzFPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='vector_potential_fp_nodal[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="vector_potential_fp_nodal[z]", + level=level, + include_ghosts=include_ghosts, + ) + def ExCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Efield_cp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Efield_cp[x]", level=level, include_ghosts=include_ghosts + ) + def EyCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Efield_cp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Efield_cp[y]", level=level, include_ghosts=include_ghosts + ) + def EzCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Efield_cp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Efield_cp[z]", level=level, include_ghosts=include_ghosts + ) + def BxCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Bfield_cp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Bfield_cp[x]", level=level, include_ghosts=include_ghosts + ) + def ByCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Bfield_cp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Bfield_cp[y]", level=level, include_ghosts=include_ghosts + ) + def BzCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='Bfield_cp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="Bfield_cp[z]", level=level, include_ghosts=include_ghosts + ) + def JxCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_cp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_cp[x]", level=level, include_ghosts=include_ghosts + ) + def JyCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_cp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_cp[y]", level=level, include_ghosts=include_ghosts + ) + def JzCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_cp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_cp[z]", level=level, include_ghosts=include_ghosts + ) + def RhoCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='rho_cp', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="rho_cp", level=level, include_ghosts=include_ghosts + ) + def FCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='F_cp', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper(mf_name="F_cp", level=level, include_ghosts=include_ghosts) + def GCPWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='G_cp', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper(mf_name="G_cp", level=level, include_ghosts=include_ghosts) + def EdgeLengthsxWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='m_edge_lengths[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="m_edge_lengths[x]", level=level, include_ghosts=include_ghosts + ) + def EdgeLengthsyWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='m_edge_lengths[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="m_edge_lengths[y]", level=level, include_ghosts=include_ghosts + ) + def EdgeLengthszWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='m_edge_lengths[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="m_edge_lengths[z]", level=level, include_ghosts=include_ghosts + ) + def FaceAreasxWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='m_face_areas[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="m_face_areas[x]", level=level, include_ghosts=include_ghosts + ) + def FaceAreasyWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='m_face_areas[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="m_face_areas[y]", level=level, include_ghosts=include_ghosts + ) + def FaceAreaszWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='m_face_areas[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="m_face_areas[z]", level=level, include_ghosts=include_ghosts + ) + def JxFPAmpereWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_fp_ampere[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_fp_ampere[x]", level=level, include_ghosts=include_ghosts + ) + def JyFPAmpereWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_fp_ampere[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_fp_ampere[y]", level=level, include_ghosts=include_ghosts + ) + def JzFPAmpereWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='current_fp_ampere[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="current_fp_ampere[z]", level=level, include_ghosts=include_ghosts + ) + def ExFPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_E_fp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_E_fp[x]", level=level, include_ghosts=include_ghosts + ) + def EyFPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_E_fp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_E_fp[y]", level=level, include_ghosts=include_ghosts + ) + def EzFPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_E_fp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_E_fp[z]", level=level, include_ghosts=include_ghosts + ) + def BxFPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_B_fp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_B_fp[x]", level=level, include_ghosts=include_ghosts + ) + def ByFPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_B_fp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_B_fp[y]", level=level, include_ghosts=include_ghosts + ) + def BzFPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_B_fp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_B_fp[z]", level=level, include_ghosts=include_ghosts + ) + def JxFPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_j_fp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_j_fp[x]", level=level, include_ghosts=include_ghosts + ) + def JyFPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_j_fp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_j_fp[y]", level=level, include_ghosts=include_ghosts + ) + def JzFPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_j_fp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_j_fp[z]", level=level, include_ghosts=include_ghosts + ) + def FFPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_F_fp', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_F_fp", level=level, include_ghosts=include_ghosts + ) + def GFPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_G_fp', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_G_fp", level=level, include_ghosts=include_ghosts + ) + def ExCPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_E_cp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_E_cp[x]", level=level, include_ghosts=include_ghosts + ) + def EyCPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_E_cp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_E_cp[y]", level=level, include_ghosts=include_ghosts + ) + def EzCPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_E_cp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_E_cp[z]", level=level, include_ghosts=include_ghosts + ) + def BxCPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_B_cp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_B_cp[x]", level=level, include_ghosts=include_ghosts + ) + def ByCPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_B_cp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_B_cp[y]", level=level, include_ghosts=include_ghosts + ) + def BzCPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_B_cp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_B_cp[z]", level=level, include_ghosts=include_ghosts + ) + def JxCPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_j_cp[x]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_j_cp[x]", level=level, include_ghosts=include_ghosts + ) + def JyCPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_j_cp[y]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_j_cp[y]", level=level, include_ghosts=include_ghosts + ) + def JzCPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_j_cp[z]', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_j_cp[z]", level=level, include_ghosts=include_ghosts + ) + def FCPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_F_cp', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_F_cp", level=level, include_ghosts=include_ghosts + ) + def GCPPMLWrapper(level=0, include_ghosts=False): - return _MultiFABWrapper(mf_name='pml_G_cp', level=level, include_ghosts=include_ghosts) + return _MultiFABWrapper( + mf_name="pml_G_cp", level=level, include_ghosts=include_ghosts + ) diff --git a/Python/pywarpx/particle_containers.py b/Python/pywarpx/particle_containers.py index 70471e24d1a..8af012f5e7b 100644 --- a/Python/pywarpx/particle_containers.py +++ b/Python/pywarpx/particle_containers.py @@ -29,10 +29,19 @@ def __init__(self, species_name): mypc = libwarpx.warpx.multi_particle_container() self.particle_container = mypc.get_particle_container_from_name(self.name) - - def add_particles(self, x=None, y=None, z=None, ux=None, uy=None, - uz=None, w=None, unique_particles=True, **kwargs): - ''' + def add_particles( + self, + x=None, + y=None, + z=None, + ux=None, + uy=None, + uz=None, + w=None, + unique_particles=True, + **kwargs, + ): + """ A function for adding particles to the WarpX simulation. Parameters @@ -58,7 +67,7 @@ def add_particles(self, x=None, y=None, z=None, ux=None, uy=None, kwargs : dict Containing an entry for all the extra particle attribute arrays. If an attribute is not given it will be set to 0. - ''' + """ # --- Get length of arrays, set to one for scalars lenx = np.size(x) @@ -87,32 +96,48 @@ def add_particles(self, x=None, y=None, z=None, ux=None, uy=None, maxlen = max(maxlen, lenw) # --- Make sure that the lengths of the input parameters are consistent - assert x is None or lenx==maxlen or lenx==1, "Length of x doesn't match len of others" - assert y is None or leny==maxlen or leny==1, "Length of y doesn't match len of others" - assert z is None or lenz==maxlen or lenz==1, "Length of z doesn't match len of others" - assert ux is None or lenux==maxlen or lenux==1, "Length of ux doesn't match len of others" - assert uy is None or lenuy==maxlen or lenuy==1, "Length of uy doesn't match len of others" - assert uz is None or lenuz==maxlen or lenuz==1, "Length of uz doesn't match len of others" - assert w is None or lenw==maxlen or lenw==1, "Length of w doesn't match len of others" + assert ( + x is None or lenx == maxlen or lenx == 1 + ), "Length of x doesn't match len of others" + assert ( + y is None or leny == maxlen or leny == 1 + ), "Length of y doesn't match len of others" + assert ( + z is None or lenz == maxlen or lenz == 1 + ), "Length of z doesn't match len of others" + assert ( + ux is None or lenux == maxlen or lenux == 1 + ), "Length of ux doesn't match len of others" + assert ( + uy is None or lenuy == maxlen or lenuy == 1 + ), "Length of uy doesn't match len of others" + assert ( + uz is None or lenuz == maxlen or lenuz == 1 + ), "Length of uz doesn't match len of others" + assert ( + w is None or lenw == maxlen or lenw == 1 + ), "Length of w doesn't match len of others" for key, val in kwargs.items(): - assert np.size(val)==1 or len(val)==maxlen, f"Length of {key} doesn't match len of others" + assert ( + np.size(val) == 1 or len(val) == maxlen + ), f"Length of {key} doesn't match len of others" # --- Broadcast scalars into appropriate length arrays # --- If the parameter was not supplied, use the default value if lenx == 1: - x = np.full(maxlen, (x or 0.)) + x = np.full(maxlen, (x or 0.0)) if leny == 1: - y = np.full(maxlen, (y or 0.)) + y = np.full(maxlen, (y or 0.0)) if lenz == 1: - z = np.full(maxlen, (z or 0.)) + z = np.full(maxlen, (z or 0.0)) if lenux == 1: - ux = np.full(maxlen, (ux or 0.)) + ux = np.full(maxlen, (ux or 0.0)) if lenuy == 1: - uy = np.full(maxlen, (uy or 0.)) + uy = np.full(maxlen, (uy or 0.0)) if lenuz == 1: - uz = np.full(maxlen, (uz or 0.)) + uz = np.full(maxlen, (uz or 0.0)) if lenw == 1: - w = np.full(maxlen, (w or 0.)) + w = np.full(maxlen, (w or 0.0)) for key, val in kwargs.items(): if np.size(val) == 1: kwargs[key] = np.full(maxlen, val) @@ -122,22 +147,22 @@ def add_particles(self, x=None, y=None, z=None, ux=None, uy=None, built_in_attrs = libwarpx.dim # --- The three velocities built_in_attrs += 3 - if libwarpx.geometry_dim == 'rz': + if libwarpx.geometry_dim == "rz": # --- With RZ, there is also theta built_in_attrs += 1 # --- The number of extra attributes (including the weight) nattr = self.particle_container.num_real_comps - built_in_attrs attr = np.zeros((maxlen, nattr)) - attr[:,0] = w + attr[:, 0] = w # --- Note that the velocities are handled separately and not included in attr # --- (even though they are stored as attributes in the C++) for key, vals in kwargs.items(): - attr[:,self.particle_container.get_comp_index(key) - built_in_attrs] = vals + attr[:, self.particle_container.get_comp_index(key) - built_in_attrs] = vals nattr_int = 0 - attr_int = np.empty([0], dtype=np.int32) + attr_int = np.empty([0], dtype=np.int32) # TODO: expose ParticleReal through pyAMReX # and cast arrays to the correct types, before calling add_n_particles @@ -149,13 +174,23 @@ def add_particles(self, x=None, y=None, z=None, ux=None, uy=None, # uz = uz.astype(self._numpy_particlereal_dtype, copy=False) self.particle_container.add_n_particles( - 0, x.size, x, y, z, ux, uy, uz, - nattr, attr, nattr_int, attr_int, unique_particles + 0, + x.size, + x, + y, + z, + ux, + uy, + uz, + nattr, + attr, + nattr_int, + attr_int, + unique_particles, ) - def get_particle_count(self, local=False): - ''' + """ Get the number of particles of this species in the simulation. Parameters @@ -170,13 +205,13 @@ def get_particle_count(self, local=False): int An integer count of the number of particles - ''' + """ return self.particle_container.total_number_of_particles(True, local) - nps = property(get_particle_count) + nps = property(get_particle_count) def add_real_comp(self, pid_name, comm=True): - ''' + """ Add a real component to the particle data array. Parameters @@ -187,12 +222,11 @@ def add_real_comp(self, pid_name, comm=True): comm : bool Should the component be communicated - ''' + """ self.particle_container.add_real_comp(pid_name, comm) - def get_particle_real_arrays(self, comp_name, level, copy_to_host=False): - ''' + """ This returns a list of numpy or cupy arrays containing the particle real array data on each tile for this process. @@ -218,7 +252,7 @@ def get_particle_real_arrays(self, comp_name, level, copy_to_host=False): List of arrays The requested particle array data - ''' + """ comp_idx = self.particle_container.get_comp_index(comp_name) data_array = [] @@ -236,9 +270,8 @@ def get_particle_real_arrays(self, comp_name, level, copy_to_host=False): return data_array - def get_particle_int_arrays(self, comp_name, level, copy_to_host=False): - ''' + """ This returns a list of numpy or cupy arrays containing the particle int array data on each tile for this process. @@ -264,7 +297,7 @@ def get_particle_int_arrays(self, comp_name, level, copy_to_host=False): List of arrays The requested particle array data - ''' + """ comp_idx = self.particle_container.get_icomp_index(comp_name) data_array = [] @@ -282,9 +315,8 @@ def get_particle_int_arrays(self, comp_name, level, copy_to_host=False): return data_array - def get_particle_idcpu_arrays(self, level, copy_to_host=False): - ''' + """ This returns a list of numpy or cupy arrays containing the particle idcpu data on each tile for this process. @@ -306,7 +338,7 @@ def get_particle_idcpu_arrays(self, level, copy_to_host=False): List of arrays The requested particle array data - ''' + """ data_array = [] for pti in libwarpx.libwarpx_so.WarpXParIter(self.particle_container, level): soa = pti.soa() @@ -322,9 +354,8 @@ def get_particle_idcpu_arrays(self, level, copy_to_host=False): return data_array - def get_particle_idcpu(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle 'idcpu' numbers on each tile. @@ -343,13 +374,13 @@ def get_particle_idcpu(self, level=0, copy_to_host=False): List of arrays The requested particle idcpu - ''' + """ return self.get_particle_idcpu_arrays(level, copy_to_host=copy_to_host) - idcpu = property(get_particle_idcpu) + idcpu = property(get_particle_idcpu) def get_particle_id(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle 'id' numbers on each tile. @@ -368,13 +399,12 @@ def get_particle_id(self, level=0, copy_to_host=False): List of arrays The requested particle ids - ''' + """ idcpu = self.get_particle_idcpu(level, copy_to_host) return [libwarpx.amr.unpack_ids(tile) for tile in idcpu] - def get_particle_cpu(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle 'cpu' numbers on each tile. @@ -393,13 +423,12 @@ def get_particle_cpu(self, level=0, copy_to_host=False): List of arrays The requested particle cpus - ''' + """ idcpu = self.get_particle_idcpu(level, copy_to_host) return [libwarpx.amr.unpack_cpus(tile) for tile in idcpu] - def get_particle_x(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle 'x' positions on each tile. @@ -418,13 +447,13 @@ def get_particle_x(self, level=0, copy_to_host=False): List of arrays The requested particle x position - ''' - return self.get_particle_real_arrays('x', level, copy_to_host=copy_to_host) - xp = property(get_particle_x) + """ + return self.get_particle_real_arrays("x", level, copy_to_host=copy_to_host) + xp = property(get_particle_x) def get_particle_y(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle 'y' positions on each tile. @@ -443,13 +472,13 @@ def get_particle_y(self, level=0, copy_to_host=False): List of arrays The requested particle y position - ''' - return self.get_particle_real_arrays('y', level, copy_to_host=copy_to_host) - yp = property(get_particle_y) + """ + return self.get_particle_real_arrays("y", level, copy_to_host=copy_to_host) + yp = property(get_particle_y) def get_particle_r(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle 'r' positions on each tile. @@ -468,22 +497,24 @@ def get_particle_r(self, level=0, copy_to_host=False): List of arrays The requested particle r position - ''' + """ xp, cupy_status = load_cupy() - if libwarpx.geometry_dim == 'rz': + if libwarpx.geometry_dim == "rz": return self.get_particle_x(level, copy_to_host) - elif libwarpx.geometry_dim == '3d': + elif libwarpx.geometry_dim == "3d": x = self.get_particle_x(level, copy_to_host) y = self.get_particle_y(level, copy_to_host) return xp.sqrt(x**2 + y**2) - elif libwarpx.geometry_dim == '2d' or libwarpx.geometry_dim == '1d': - raise Exception('get_particle_r: There is no r coordinate with 1D or 2D Cartesian') - rp = property(get_particle_r) + elif libwarpx.geometry_dim == "2d" or libwarpx.geometry_dim == "1d": + raise Exception( + "get_particle_r: There is no r coordinate with 1D or 2D Cartesian" + ) + rp = property(get_particle_r) def get_particle_theta(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle theta on each tile. @@ -502,22 +533,24 @@ def get_particle_theta(self, level=0, copy_to_host=False): List of arrays The requested particle theta position - ''' + """ xp, cupy_status = load_cupy() - if libwarpx.geometry_dim == 'rz': - return self.get_particle_real_arrays('theta', level, copy_to_host) - elif libwarpx.geometry_dim == '3d': + if libwarpx.geometry_dim == "rz": + return self.get_particle_real_arrays("theta", level, copy_to_host) + elif libwarpx.geometry_dim == "3d": x = self.get_particle_x(level, copy_to_host) y = self.get_particle_y(level, copy_to_host) return xp.arctan2(y, x) - elif libwarpx.geometry_dim == '2d' or libwarpx.geometry_dim == '1d': - raise Exception('get_particle_theta: There is no theta coordinate with 1D or 2D Cartesian') - thetap = property(get_particle_theta) + elif libwarpx.geometry_dim == "2d" or libwarpx.geometry_dim == "1d": + raise Exception( + "get_particle_theta: There is no theta coordinate with 1D or 2D Cartesian" + ) + thetap = property(get_particle_theta) def get_particle_z(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle 'z' positions on each tile. @@ -536,13 +569,13 @@ def get_particle_z(self, level=0, copy_to_host=False): List of arrays The requested particle z position - ''' - return self.get_particle_real_arrays('z', level, copy_to_host=copy_to_host) - zp = property(get_particle_z) + """ + return self.get_particle_real_arrays("z", level, copy_to_host=copy_to_host) + zp = property(get_particle_z) def get_particle_weight(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle weight on each tile. @@ -561,13 +594,13 @@ def get_particle_weight(self, level=0, copy_to_host=False): List of arrays The requested particle weight - ''' - return self.get_particle_real_arrays('w', level, copy_to_host=copy_to_host) - wp = property(get_particle_weight) + """ + return self.get_particle_real_arrays("w", level, copy_to_host=copy_to_host) + wp = property(get_particle_weight) def get_particle_ux(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle x momentum on each tile. @@ -586,13 +619,13 @@ def get_particle_ux(self, level=0, copy_to_host=False): List of arrays The requested particle x momentum - ''' - return self.get_particle_real_arrays('ux', level, copy_to_host=copy_to_host) - uxp = property(get_particle_ux) + """ + return self.get_particle_real_arrays("ux", level, copy_to_host=copy_to_host) + uxp = property(get_particle_ux) def get_particle_uy(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle y momentum on each tile. @@ -611,13 +644,13 @@ def get_particle_uy(self, level=0, copy_to_host=False): List of arrays The requested particle y momentum - ''' - return self.get_particle_real_arrays('uy', level, copy_to_host=copy_to_host) - uyp = property(get_particle_uy) + """ + return self.get_particle_real_arrays("uy", level, copy_to_host=copy_to_host) + uyp = property(get_particle_uy) def get_particle_uz(self, level=0, copy_to_host=False): - ''' + """ Return a list of numpy or cupy arrays containing the particle z momentum on each tile. @@ -636,14 +669,14 @@ def get_particle_uz(self, level=0, copy_to_host=False): List of arrays The requested particle z momentum - ''' + """ - return self.get_particle_real_arrays('uz', level, copy_to_host=copy_to_host) - uzp = property(get_particle_uz) + return self.get_particle_real_arrays("uz", level, copy_to_host=copy_to_host) + uzp = property(get_particle_uz) def get_species_charge_sum(self, local=False): - ''' + """ Returns the total charge in the simulation due to the given species. Parameters @@ -651,39 +684,38 @@ def get_species_charge_sum(self, local=False): local : bool If True return total charge per processor - ''' + """ return self.particle_container.sum_particle_charge(local) - def getex(self): - raise NotImplementedError('Particle E fields not supported') - ex = property(getex) + raise NotImplementedError("Particle E fields not supported") + ex = property(getex) def getey(self): - raise NotImplementedError('Particle E fields not supported') - ey = property(getey) + raise NotImplementedError("Particle E fields not supported") + ey = property(getey) def getez(self): - raise NotImplementedError('Particle E fields not supported') - ez = property(getez) + raise NotImplementedError("Particle E fields not supported") + ez = property(getez) def getbx(self): - raise NotImplementedError('Particle B fields not supported') - bx = property(getbx) + raise NotImplementedError("Particle B fields not supported") + bx = property(getbx) def getby(self): - raise NotImplementedError('Particle B fields not supported') - by = property(getby) + raise NotImplementedError("Particle B fields not supported") + by = property(getby) def getbz(self): - raise NotImplementedError('Particle B fields not supported') - bz = property(getbz) + raise NotImplementedError("Particle B fields not supported") + bz = property(getbz) def deposit_charge_density(self, level, clear_rho=True, sync_rho=True): """ @@ -701,7 +733,7 @@ def deposit_charge_density(self, level, clear_rho=True, sync_rho=True): sync_rho : bool If True, perform MPI exchange and properly set boundary cells for rho_fp. """ - rho_fp = libwarpx.warpx.multifab(f'rho_fp[level={level}]') + rho_fp = libwarpx.warpx.multifab(f"rho_fp[level={level}]") if rho_fp is None: raise RuntimeError("Multifab `rho_fp` is not allocated.") @@ -712,7 +744,7 @@ def deposit_charge_density(self, level, clear_rho=True, sync_rho=True): # deposit the charge density from the desired species self.particle_container.deposit_charge(rho_fp, level) - if libwarpx.geometry_dim == 'rz': + if libwarpx.geometry_dim == "rz": libwarpx.warpx.apply_inverse_volume_scaling_to_charge_density(rho_fp, level) if sync_rho: @@ -728,9 +760,8 @@ class ParticleBoundaryBufferWrapper(object): def __init__(self): self.particle_buffer = libwarpx.warpx.get_particle_boundary_buffer() - def get_particle_boundary_buffer_size(self, species_name, boundary, local=False): - ''' + """ This returns the number of particles that have been scraped so far in the simulation from the specified boundary and of the specified species. @@ -747,15 +778,13 @@ def get_particle_boundary_buffer_size(self, species_name, boundary, local=False) local : bool Whether to only return the number of particles in the current processor's buffer - ''' + """ return self.particle_buffer.get_num_particles_in_container( - species_name, self._get_boundary_number(boundary), - local=local + species_name, self._get_boundary_number(boundary), local=local ) - def get_particle_boundary_buffer(self, species_name, boundary, comp_name, level): - ''' + """ This returns a list of numpy or cupy arrays containing the particle array data for a species that has been scraped by a specific simulation boundary. @@ -783,41 +812,43 @@ def get_particle_boundary_buffer(self, species_name, boundary, comp_name, level) level : int Which AMR level to retrieve scraped particle data from. - ''' + """ xp, cupy_status = load_cupy() part_container = self.particle_buffer.get_particle_container( species_name, self._get_boundary_number(boundary) ) data_array = [] - #loop over the real attributes + # loop over the real attributes if comp_name in part_container.real_comp_names: comp_idx = part_container.real_comp_names[comp_name] - for ii, pti in enumerate(libwarpx.libwarpx_so.BoundaryBufferParIter(part_container, level)): + for ii, pti in enumerate( + libwarpx.libwarpx_so.BoundaryBufferParIter(part_container, level) + ): soa = pti.soa() data_array.append(xp.array(soa.get_real_data(comp_idx), copy=False)) - #loop over the integer attributes + # loop over the integer attributes elif comp_name in part_container.int_comp_names: - comp_idx = part_container.int_comp_names[comp_name] - for ii, pti in enumerate(libwarpx.libwarpx_so.BoundaryBufferParIter(part_container, level)): + comp_idx = part_container.int_comp_names[comp_name] + for ii, pti in enumerate( + libwarpx.libwarpx_so.BoundaryBufferParIter(part_container, level) + ): soa = pti.soa() data_array.append(xp.array(soa.get_int_data(comp_idx), copy=False)) else: - raise RuntimeError('Name %s not found' %comp_name) + raise RuntimeError("Name %s not found" % comp_name) return data_array - def clear_buffer(self): - ''' + """ Clear the buffer that holds the particles lost at the boundaries. - ''' + """ self.particle_buffer.clear_particles() - def _get_boundary_number(self, boundary): - ''' + """ Utility function to find the boundary number given a boundary name. @@ -832,34 +863,36 @@ def _get_boundary_number(self, boundary): ------- int Integer index in the boundary scraper buffer for the given boundary. - ''' - if libwarpx.geometry_dim == '3d': - dimensions = {'x' : 0, 'y' : 1, 'z' : 2} - elif libwarpx.geometry_dim == '2d' or libwarpx.geometry_dim == 'rz': - dimensions = {'x' : 0, 'z' : 1} - elif libwarpx.geometry_dim == '1d': - dimensions = {'z' : 0} + """ + if libwarpx.geometry_dim == "3d": + dimensions = {"x": 0, "y": 1, "z": 2} + elif libwarpx.geometry_dim == "2d" or libwarpx.geometry_dim == "rz": + dimensions = {"x": 0, "z": 1} + elif libwarpx.geometry_dim == "1d": + dimensions = {"z": 0} else: raise RuntimeError(f"Unknown simulation geometry: {libwarpx.geometry_dim}") - if boundary != 'eb': + if boundary != "eb": boundary_parts = boundary.split("_") dim_num = dimensions[boundary_parts[0]] - if boundary_parts[1] == 'lo': + if boundary_parts[1] == "lo": side = 0 - elif boundary_parts[1] == 'hi': + elif boundary_parts[1] == "hi": side = 1 else: - raise RuntimeError(f'Unknown boundary specified: {boundary}') + raise RuntimeError(f"Unknown boundary specified: {boundary}") boundary_num = 2 * dim_num + side else: - if libwarpx.geometry_dim == '3d': + if libwarpx.geometry_dim == "3d": boundary_num = 6 - elif libwarpx.geometry_dim == '2d' or libwarpx.geometry_dim == 'rz': + elif libwarpx.geometry_dim == "2d" or libwarpx.geometry_dim == "rz": boundary_num = 4 - elif libwarpx.geometry_dim == '1d': + elif libwarpx.geometry_dim == "1d": boundary_num = 2 else: - raise RuntimeError(f"Unknown simulation geometry: {libwarpx.geometry_dim}") + raise RuntimeError( + f"Unknown simulation geometry: {libwarpx.geometry_dim}" + ) return boundary_num diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 4f3993c911c..f7fabfe48d9 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -6,8 +6,8 @@ # # License: BSD-3-Clause-LBNL -"""Classes following the PICMI standard -""" +"""Classes following the PICMI standard""" + import os import re from dataclasses import dataclass @@ -18,20 +18,26 @@ import picmistandard import pywarpx -codename = 'warpx' +codename = "warpx" picmistandard.register_codename(codename) # dictionary to map field boundary conditions from picmistandard to WarpX BC_map = { - 'open':'pml', 'dirichlet':'pec', 'periodic':'periodic', 'damped':'damped', - 'absorbing_silver_mueller':'absorbing_silver_mueller', - 'neumann':'neumann', 'none':'none', None:'none' + "open": "pml", + "dirichlet": "pec", + "periodic": "periodic", + "damped": "damped", + "absorbing_silver_mueller": "absorbing_silver_mueller", + "neumann": "neumann", + "none": "none", + None: "none", } + class constants: # --- Put the constants in their own namespace # --- Values from WarpXConst.H - c = 299792458. + c = 299792458.0 ep0 = 8.8541878128e-12 mu0 = 1.25663706212e-06 q_e = 1.602176634e-19 @@ -40,6 +46,7 @@ class constants: hbar = 1.054571817e-34 kb = 1.380649e-23 + picmistandard.register_constants(constants) @@ -180,37 +187,47 @@ class Species(picmistandard.PICMI_Species): Dictionary of extra real particle attributes initialized from an expression that is a function of the variables (x, y, z, ux, uy, uz, t). """ - def init(self, kw): - if self.particle_type == 'electron': - if self.charge is None: self.charge = '-q_e' - if self.mass is None: self.mass = 'm_e' - elif self.particle_type == 'positron': - if self.charge is None: self.charge = 'q_e' - if self.mass is None: self.mass = 'm_e' - elif self.particle_type == 'proton': - if self.charge is None: self.charge = 'q_e' - if self.mass is None: self.mass = 'm_p' - elif self.particle_type == 'anti-proton': - if self.charge is None: self.charge = '-q_e' - if self.mass is None: self.mass = 'm_p' + def init(self, kw): + if self.particle_type == "electron": + if self.charge is None: + self.charge = "-q_e" + if self.mass is None: + self.mass = "m_e" + elif self.particle_type == "positron": + if self.charge is None: + self.charge = "q_e" + if self.mass is None: + self.mass = "m_e" + elif self.particle_type == "proton": + if self.charge is None: + self.charge = "q_e" + if self.mass is None: + self.mass = "m_p" + elif self.particle_type == "anti-proton": + if self.charge is None: + self.charge = "-q_e" + if self.mass is None: + self.mass = "m_p" else: if self.charge is None and self.charge_state is not None: - if self.charge_state == +1.: - self.charge = 'q_e' - elif self.charge_state == -1.: - self.charge = '-q_e' + if self.charge_state == +1.0: + self.charge = "q_e" + elif self.charge_state == -1.0: + self.charge = "-q_e" else: - self.charge = self.charge_state*constants.q_e + self.charge = self.charge_state * constants.q_e if self.particle_type is not None: # Match a string of the format '#nXx', with the '#n' optional isotope number. - m = re.match(r'(?P#[\d+])*(?P[A-Za-z]+)', self.particle_type) + m = re.match(r"(?P#[\d+])*(?P[A-Za-z]+)", self.particle_type) if m is not None: - element = periodictable.elements.symbol(m['sym']) - if m['iso'] is not None: - element = element[m['iso'][1:]] + element = periodictable.elements.symbol(m["sym"]) + if m["iso"] is not None: + element = element[m["iso"][1:]] if self.charge_state is not None: - assert self.charge_state <= element.number, Exception('%s charge state not valid'%self.particle_type) + assert self.charge_state <= element.number, Exception( + "%s charge state not valid" % self.particle_type + ) try: element = element.ion[self.charge_state] except ValueError: @@ -219,107 +236,139 @@ def init(self, kw): pass self.element = element if self.mass is None: - self.mass = element.mass*periodictable.constants.atomic_mass_constant + self.mass = ( + element.mass * periodictable.constants.atomic_mass_constant + ) else: raise Exception('The species "particle_type" is not known') - self.boost_adjust_transverse_positions = kw.pop('warpx_boost_adjust_transverse_positions', None) + self.boost_adjust_transverse_positions = kw.pop( + "warpx_boost_adjust_transverse_positions", None + ) # For the relativistic electrostatic solver - self.self_fields_required_precision = kw.pop('warpx_self_fields_required_precision', None) - self.self_fields_absolute_tolerance = kw.pop('warpx_self_fields_absolute_tolerance', None) - self.self_fields_max_iters = kw.pop('warpx_self_fields_max_iters', None) - self.self_fields_verbosity = kw.pop('warpx_self_fields_verbosity', None) - self.save_previous_position = kw.pop('warpx_save_previous_position', None) - self.do_not_deposit = kw.pop('warpx_do_not_deposit', None) - self.do_not_push = kw.pop('warpx_do_not_push', None) - self.do_not_gather = kw.pop('warpx_do_not_gather', None) - self.random_theta = kw.pop('warpx_random_theta', None) + self.self_fields_required_precision = kw.pop( + "warpx_self_fields_required_precision", None + ) + self.self_fields_absolute_tolerance = kw.pop( + "warpx_self_fields_absolute_tolerance", None + ) + self.self_fields_max_iters = kw.pop("warpx_self_fields_max_iters", None) + self.self_fields_verbosity = kw.pop("warpx_self_fields_verbosity", None) + self.save_previous_position = kw.pop("warpx_save_previous_position", None) + self.do_not_deposit = kw.pop("warpx_do_not_deposit", None) + self.do_not_push = kw.pop("warpx_do_not_push", None) + self.do_not_gather = kw.pop("warpx_do_not_gather", None) + self.random_theta = kw.pop("warpx_random_theta", None) # For particle reflection - self.reflection_model_xlo = kw.pop('warpx_reflection_model_xlo', None) - self.reflection_model_xhi = kw.pop('warpx_reflection_model_xhi', None) - self.reflection_model_ylo = kw.pop('warpx_reflection_model_ylo', None) - self.reflection_model_yhi = kw.pop('warpx_reflection_model_yhi', None) - self.reflection_model_zlo = kw.pop('warpx_reflection_model_zlo', None) - self.reflection_model_zhi = kw.pop('warpx_reflection_model_zhi', None) + self.reflection_model_xlo = kw.pop("warpx_reflection_model_xlo", None) + self.reflection_model_xhi = kw.pop("warpx_reflection_model_xhi", None) + self.reflection_model_ylo = kw.pop("warpx_reflection_model_ylo", None) + self.reflection_model_yhi = kw.pop("warpx_reflection_model_yhi", None) + self.reflection_model_zlo = kw.pop("warpx_reflection_model_zlo", None) + self.reflection_model_zhi = kw.pop("warpx_reflection_model_zhi", None) # self.reflection_model_eb = kw.pop('warpx_reflection_model_eb', None) # For the scraper buffer - self.save_particles_at_xlo = kw.pop('warpx_save_particles_at_xlo', None) - self.save_particles_at_xhi = kw.pop('warpx_save_particles_at_xhi', None) - self.save_particles_at_ylo = kw.pop('warpx_save_particles_at_ylo', None) - self.save_particles_at_yhi = kw.pop('warpx_save_particles_at_yhi', None) - self.save_particles_at_zlo = kw.pop('warpx_save_particles_at_zlo', None) - self.save_particles_at_zhi = kw.pop('warpx_save_particles_at_zhi', None) - self.save_particles_at_eb = kw.pop('warpx_save_particles_at_eb', None) + self.save_particles_at_xlo = kw.pop("warpx_save_particles_at_xlo", None) + self.save_particles_at_xhi = kw.pop("warpx_save_particles_at_xhi", None) + self.save_particles_at_ylo = kw.pop("warpx_save_particles_at_ylo", None) + self.save_particles_at_yhi = kw.pop("warpx_save_particles_at_yhi", None) + self.save_particles_at_zlo = kw.pop("warpx_save_particles_at_zlo", None) + self.save_particles_at_zhi = kw.pop("warpx_save_particles_at_zhi", None) + self.save_particles_at_eb = kw.pop("warpx_save_particles_at_eb", None) # Resampling settings - self.do_resampling = kw.pop('warpx_do_resampling', None) - self.resampling_algorithm = kw.pop('warpx_resampling_algorithm', None) - self.resampling_min_ppc = kw.pop('warpx_resampling_min_ppc', None) - self.resampling_trigger_intervals = kw.pop('warpx_resampling_trigger_intervals', None) - self.resampling_triggering_max_avg_ppc = kw.pop('warpx_resampling_trigger_max_avg_ppc', None) - self.resampling_algorithm_target_weight = kw.pop('warpx_resampling_algorithm_target_weight', None) - self.resampling_algorithm_velocity_grid_type = kw.pop('warpx_resampling_algorithm_velocity_grid_type', None) - self.resampling_algorithm_delta_ur = kw.pop('warpx_resampling_algorithm_delta_ur', None) - self.resampling_algorithm_n_theta = kw.pop('warpx_resampling_algorithm_n_theta', None) - self.resampling_algorithm_n_phi = kw.pop('warpx_resampling_algorithm_n_phi', None) - self.resampling_algorithm_delta_u = kw.pop('warpx_resampling_algorithm_delta_u', None) - if self.resampling_algorithm_delta_u is not None and np.size(self.resampling_algorithm_delta_u) == 1: - self.resampling_algorithm_delta_u = [self.resampling_algorithm_delta_u]*3 + self.do_resampling = kw.pop("warpx_do_resampling", None) + self.resampling_algorithm = kw.pop("warpx_resampling_algorithm", None) + self.resampling_min_ppc = kw.pop("warpx_resampling_min_ppc", None) + self.resampling_trigger_intervals = kw.pop( + "warpx_resampling_trigger_intervals", None + ) + self.resampling_triggering_max_avg_ppc = kw.pop( + "warpx_resampling_trigger_max_avg_ppc", None + ) + self.resampling_algorithm_target_weight = kw.pop( + "warpx_resampling_algorithm_target_weight", None + ) + self.resampling_algorithm_velocity_grid_type = kw.pop( + "warpx_resampling_algorithm_velocity_grid_type", None + ) + self.resampling_algorithm_delta_ur = kw.pop( + "warpx_resampling_algorithm_delta_ur", None + ) + self.resampling_algorithm_n_theta = kw.pop( + "warpx_resampling_algorithm_n_theta", None + ) + self.resampling_algorithm_n_phi = kw.pop( + "warpx_resampling_algorithm_n_phi", None + ) + self.resampling_algorithm_delta_u = kw.pop( + "warpx_resampling_algorithm_delta_u", None + ) + if ( + self.resampling_algorithm_delta_u is not None + and np.size(self.resampling_algorithm_delta_u) == 1 + ): + self.resampling_algorithm_delta_u = [self.resampling_algorithm_delta_u] * 3 # extra particle attributes - self.extra_int_attributes = kw.pop('warpx_add_int_attributes', None) - self.extra_real_attributes = kw.pop('warpx_add_real_attributes', None) - - def species_initialize_inputs(self, layout, - initialize_self_fields = False, - injection_plane_position = None, - injection_plane_normal_vector = None): + self.extra_int_attributes = kw.pop("warpx_add_int_attributes", None) + self.extra_real_attributes = kw.pop("warpx_add_real_attributes", None) + + def species_initialize_inputs( + self, + layout, + initialize_self_fields=False, + injection_plane_position=None, + injection_plane_normal_vector=None, + ): self.species_number = len(pywarpx.particles.species_names) if self.name is None: - self.name = 'species{}'.format(self.species_number) + self.name = "species{}".format(self.species_number) pywarpx.particles.species_names.append(self.name) if initialize_self_fields is None: initialize_self_fields = False - self.species = pywarpx.Bucket.Bucket(self.name, - mass = self.mass, - charge = self.charge, - injection_style = None, - initialize_self_fields = int(initialize_self_fields), - boost_adjust_transverse_positions = self.boost_adjust_transverse_positions, - self_fields_required_precision = self.self_fields_required_precision, - self_fields_absolute_tolerance = self.self_fields_absolute_tolerance, - self_fields_max_iters = self.self_fields_max_iters, - self_fields_verbosity = self.self_fields_verbosity, - save_particles_at_xlo = self.save_particles_at_xlo, - save_particles_at_xhi = self.save_particles_at_xhi, - save_particles_at_ylo = self.save_particles_at_ylo, - save_particles_at_yhi = self.save_particles_at_yhi, - save_particles_at_zlo = self.save_particles_at_zlo, - save_particles_at_zhi = self.save_particles_at_zhi, - save_particles_at_eb = self.save_particles_at_eb, - save_previous_position = self.save_previous_position, - do_not_deposit = self.do_not_deposit, - do_not_push = self.do_not_push, - do_not_gather = self.do_not_gather, - random_theta = self.random_theta, - do_resampling=self.do_resampling, - resampling_algorithm=self.resampling_algorithm, - resampling_min_ppc=self.resampling_min_ppc, - resampling_trigger_intervals=self.resampling_trigger_intervals, - resampling_trigger_max_avg_ppc=self.resampling_triggering_max_avg_ppc, - resampling_algorithm_target_weight=self.resampling_algorithm_target_weight, - resampling_algorithm_velocity_grid_type=self.resampling_algorithm_velocity_grid_type, - resampling_algorithm_delta_ur=self.resampling_algorithm_delta_ur, - resampling_algorithm_n_theta=self.resampling_algorithm_n_theta, - resampling_algorithm_n_phi=self.resampling_algorithm_n_phi, - resampling_algorithm_delta_u=self.resampling_algorithm_delta_u) + self.species = pywarpx.Bucket.Bucket( + self.name, + mass=self.mass, + charge=self.charge, + injection_style=None, + initialize_self_fields=int(initialize_self_fields), + boost_adjust_transverse_positions=self.boost_adjust_transverse_positions, + self_fields_required_precision=self.self_fields_required_precision, + self_fields_absolute_tolerance=self.self_fields_absolute_tolerance, + self_fields_max_iters=self.self_fields_max_iters, + self_fields_verbosity=self.self_fields_verbosity, + save_particles_at_xlo=self.save_particles_at_xlo, + save_particles_at_xhi=self.save_particles_at_xhi, + save_particles_at_ylo=self.save_particles_at_ylo, + save_particles_at_yhi=self.save_particles_at_yhi, + save_particles_at_zlo=self.save_particles_at_zlo, + save_particles_at_zhi=self.save_particles_at_zhi, + save_particles_at_eb=self.save_particles_at_eb, + save_previous_position=self.save_previous_position, + do_not_deposit=self.do_not_deposit, + do_not_push=self.do_not_push, + do_not_gather=self.do_not_gather, + random_theta=self.random_theta, + do_resampling=self.do_resampling, + resampling_algorithm=self.resampling_algorithm, + resampling_min_ppc=self.resampling_min_ppc, + resampling_trigger_intervals=self.resampling_trigger_intervals, + resampling_trigger_max_avg_ppc=self.resampling_triggering_max_avg_ppc, + resampling_algorithm_target_weight=self.resampling_algorithm_target_weight, + resampling_algorithm_velocity_grid_type=self.resampling_algorithm_velocity_grid_type, + resampling_algorithm_delta_ur=self.resampling_algorithm_delta_ur, + resampling_algorithm_n_theta=self.resampling_algorithm_n_theta, + resampling_algorithm_n_phi=self.resampling_algorithm_n_phi, + resampling_algorithm_delta_u=self.resampling_algorithm_delta_u, + ) # add reflection models self.species.add_new_attr("reflection_model_xlo(E)", self.reflection_model_xlo) @@ -334,11 +383,15 @@ def species_initialize_inputs(self, layout, if self.extra_int_attributes is not None: self.species.addIntegerAttributes = self.extra_int_attributes.keys() for attr, function in self.extra_int_attributes.items(): - self.species.add_new_attr('attribute.'+attr+'(x,y,z,ux,uy,uz,t)', function) + self.species.add_new_attr( + "attribute." + attr + "(x,y,z,ux,uy,uz,t)", function + ) if self.extra_real_attributes is not None: self.species.addRealAttributes = self.extra_real_attributes.keys() for attr, function in self.extra_real_attributes.items(): - self.species.add_new_attr('attribute.'+attr+'(x,y,z,ux,uy,uz,t)', function) + self.species.add_new_attr( + "attribute." + attr + "(x,y,z,ux,uy,uz,t)", function + ) pywarpx.Particles.particles_list.append(self.species) @@ -346,101 +399,155 @@ def species_initialize_inputs(self, layout, distributions_is_list = np.iterable(self.initial_distribution) layout_is_list = np.iterable(layout) if not distributions_is_list and not layout_is_list: - self.initial_distribution.distribution_initialize_inputs(self.species_number, layout, self.species, - self.density_scale, '') + self.initial_distribution.distribution_initialize_inputs( + self.species_number, layout, self.species, self.density_scale, "" + ) elif distributions_is_list and (layout_is_list or layout is None): - assert layout is None or (len(self.initial_distribution) == len(layout)),\ - Exception('The initial distribution and layout lists must have the same lenth') - source_names = [f'dist{i}' for i in range(len(self.initial_distribution))] + assert layout is None or ( + len(self.initial_distribution) == len(layout) + ), Exception( + "The initial distribution and layout lists must have the same lenth" + ) + source_names = [ + f"dist{i}" for i in range(len(self.initial_distribution)) + ] self.species.injection_sources = source_names for i, dist in enumerate(self.initial_distribution): layout_i = layout[i] if layout is not None else None - dist.distribution_initialize_inputs(self.species_number, layout_i, self.species, - self.density_scale, source_names[i]) + dist.distribution_initialize_inputs( + self.species_number, + layout_i, + self.species, + self.density_scale, + source_names[i], + ) else: - raise Exception('The initial distribution and layout must both be scalars or both be lists') + raise Exception( + "The initial distribution and layout must both be scalars or both be lists" + ) if injection_plane_position is not None: if injection_plane_normal_vector is not None: - assert injection_plane_normal_vector[0] == 0. and injection_plane_normal_vector[1] == 0.,\ - Exception('Rigid injection can only be done along z') + assert ( + injection_plane_normal_vector[0] == 0.0 + and injection_plane_normal_vector[1] == 0.0 + ), Exception("Rigid injection can only be done along z") pywarpx.particles.rigid_injected_species.append(self.name) self.species.rigid_advance = 1 self.species.zinject_plane = injection_plane_position picmistandard.PICMI_MultiSpecies.Species_class = Species + + class MultiSpecies(picmistandard.PICMI_MultiSpecies): - def species_initialize_inputs(self, layout, - initialize_self_fields = False, - injection_plane_position = None, - injection_plane_normal_vector = None): + def species_initialize_inputs( + self, + layout, + initialize_self_fields=False, + injection_plane_position=None, + injection_plane_normal_vector=None, + ): for species in self.species_instances_list: - species.species_initialize_inputs(layout, - initialize_self_fields, - injection_plane_position, - injection_plane_normal_vector) + species.species_initialize_inputs( + layout, + initialize_self_fields, + injection_plane_position, + injection_plane_normal_vector, + ) class GaussianBunchDistribution(picmistandard.PICMI_GaussianBunchDistribution): def init(self, kw): - self.do_symmetrize = kw.pop('warpx_do_symmetrize', None) - self.symmetrization_order = kw.pop('warpx_symmetrization_order', None) - - def distribution_initialize_inputs(self, species_number, layout, species, density_scale, source_name): - species.add_new_group_attr(source_name, 'injection_style', "gaussian_beam") - species.add_new_group_attr(source_name, 'x_m', self.centroid_position[0]) - species.add_new_group_attr(source_name, 'y_m', self.centroid_position[1]) - species.add_new_group_attr(source_name, 'z_m', self.centroid_position[2]) - species.add_new_group_attr(source_name, 'x_rms', self.rms_bunch_size[0]) - species.add_new_group_attr(source_name, 'y_rms', self.rms_bunch_size[1]) - species.add_new_group_attr(source_name, 'z_rms', self.rms_bunch_size[2]) + self.do_symmetrize = kw.pop("warpx_do_symmetrize", None) + self.symmetrization_order = kw.pop("warpx_symmetrization_order", None) + + def distribution_initialize_inputs( + self, species_number, layout, species, density_scale, source_name + ): + species.add_new_group_attr(source_name, "injection_style", "gaussian_beam") + species.add_new_group_attr(source_name, "x_m", self.centroid_position[0]) + species.add_new_group_attr(source_name, "y_m", self.centroid_position[1]) + species.add_new_group_attr(source_name, "z_m", self.centroid_position[2]) + species.add_new_group_attr(source_name, "x_rms", self.rms_bunch_size[0]) + species.add_new_group_attr(source_name, "y_rms", self.rms_bunch_size[1]) + species.add_new_group_attr(source_name, "z_rms", self.rms_bunch_size[2]) # --- Only PseudoRandomLayout is supported - species.add_new_group_attr(source_name, 'npart', layout.n_macroparticles) + species.add_new_group_attr(source_name, "npart", layout.n_macroparticles) # --- Calculate the total charge. Note that charge might be a string instead of a number. charge = species.charge - if charge == 'q_e' or charge == '+q_e': + if charge == "q_e" or charge == "+q_e": charge = constants.q_e - elif charge == '-q_e': + elif charge == "-q_e": charge = -constants.q_e - species.add_new_group_attr(source_name, 'q_tot', self.n_physical_particles*charge) + species.add_new_group_attr( + source_name, "q_tot", self.n_physical_particles * charge + ) if density_scale is not None: - species.add_new_group_attr(source_name, 'q_tot', density_scale) + species.add_new_group_attr(source_name, "q_tot", density_scale) # --- The PICMI standard doesn't yet have a way of specifying these values. # --- They should default to the size of the domain. They are not typically # --- necessary though since any particles outside the domain are rejected. - #species.xmin - #species.xmax - #species.ymin - #species.ymax - #species.zmin - #species.zmax + # species.xmin + # species.xmax + # species.ymin + # species.ymax + # species.zmin + # species.zmax # --- Note that WarpX takes gamma*beta as input - if np.any(np.not_equal(self.velocity_divergence, 0.)): - species.add_new_group_attr(source_name, 'momentum_distribution_type', "radial_expansion") - species.add_new_group_attr(source_name, 'u_over_r', self.velocity_divergence[0]/constants.c) - #species.add_new_group_attr(source_name, 'u_over_y', self.velocity_divergence[1]/constants.c) - #species.add_new_group_attr(source_name, 'u_over_z', self.velocity_divergence[2]/constants.c) - elif np.any(np.not_equal(self.rms_velocity, 0.)): - species.add_new_group_attr(source_name, 'momentum_distribution_type', "gaussian") - species.add_new_group_attr(source_name, 'ux_m', self.centroid_velocity[0]/constants.c) - species.add_new_group_attr(source_name, 'uy_m', self.centroid_velocity[1]/constants.c) - species.add_new_group_attr(source_name, 'uz_m', self.centroid_velocity[2]/constants.c) - species.add_new_group_attr(source_name, 'ux_th', self.rms_velocity[0]/constants.c) - species.add_new_group_attr(source_name, 'uy_th', self.rms_velocity[1]/constants.c) - species.add_new_group_attr(source_name, 'uz_th', self.rms_velocity[2]/constants.c) + if np.any(np.not_equal(self.velocity_divergence, 0.0)): + species.add_new_group_attr( + source_name, "momentum_distribution_type", "radial_expansion" + ) + species.add_new_group_attr( + source_name, "u_over_r", self.velocity_divergence[0] / constants.c + ) + # species.add_new_group_attr(source_name, 'u_over_y', self.velocity_divergence[1]/constants.c) + # species.add_new_group_attr(source_name, 'u_over_z', self.velocity_divergence[2]/constants.c) + elif np.any(np.not_equal(self.rms_velocity, 0.0)): + species.add_new_group_attr( + source_name, "momentum_distribution_type", "gaussian" + ) + species.add_new_group_attr( + source_name, "ux_m", self.centroid_velocity[0] / constants.c + ) + species.add_new_group_attr( + source_name, "uy_m", self.centroid_velocity[1] / constants.c + ) + species.add_new_group_attr( + source_name, "uz_m", self.centroid_velocity[2] / constants.c + ) + species.add_new_group_attr( + source_name, "ux_th", self.rms_velocity[0] / constants.c + ) + species.add_new_group_attr( + source_name, "uy_th", self.rms_velocity[1] / constants.c + ) + species.add_new_group_attr( + source_name, "uz_th", self.rms_velocity[2] / constants.c + ) else: - species.add_new_group_attr(source_name, 'momentum_distribution_type', "constant") - species.add_new_group_attr(source_name, 'ux', self.centroid_velocity[0]/constants.c) - species.add_new_group_attr(source_name, 'uy', self.centroid_velocity[1]/constants.c) - species.add_new_group_attr(source_name, 'uz', self.centroid_velocity[2]/constants.c) + species.add_new_group_attr( + source_name, "momentum_distribution_type", "constant" + ) + species.add_new_group_attr( + source_name, "ux", self.centroid_velocity[0] / constants.c + ) + species.add_new_group_attr( + source_name, "uy", self.centroid_velocity[1] / constants.c + ) + species.add_new_group_attr( + source_name, "uz", self.centroid_velocity[2] / constants.c + ) - species.add_new_group_attr(source_name, 'do_symmetrize', self.do_symmetrize) - species.add_new_group_attr(source_name, 'symmetrization_order', self.symmetrization_order) + species.add_new_group_attr(source_name, "do_symmetrize", self.do_symmetrize) + species.add_new_group_attr( + source_name, "symmetrization_order", self.symmetrization_order + ) class DensityDistributionBase(object): @@ -448,7 +555,7 @@ class DensityDistributionBase(object): captures universal initialization logic.""" def set_mangle_dict(self): - if not hasattr(self, 'mangle_dict'): + if not hasattr(self, "mangle_dict"): self.mangle_dict = None if hasattr(self, "user_defined_kw") and self.mangle_dict is None: @@ -459,103 +566,181 @@ def set_mangle_dict(self): def set_species_attributes(self, species, layout, source_name): if isinstance(layout, GriddedLayout): # --- Note that the grid attribute of GriddedLayout is ignored - species.add_new_group_attr(source_name, 'injection_style', "nuniformpercell") - species.add_new_group_attr(source_name, 'num_particles_per_cell_each_dim', layout.n_macroparticle_per_cell) + species.add_new_group_attr( + source_name, "injection_style", "nuniformpercell" + ) + species.add_new_group_attr( + source_name, + "num_particles_per_cell_each_dim", + layout.n_macroparticle_per_cell, + ) elif isinstance(layout, PseudoRandomLayout): - assert (layout.n_macroparticles_per_cell is not None), Exception('WarpX only supports n_macroparticles_per_cell for the PseudoRandomLayout with this distribution') - species.add_new_group_attr(source_name, 'injection_style', "nrandompercell") - species.add_new_group_attr(source_name, 'num_particles_per_cell', layout.n_macroparticles_per_cell) + assert layout.n_macroparticles_per_cell is not None, Exception( + "WarpX only supports n_macroparticles_per_cell for the PseudoRandomLayout with this distribution" + ) + species.add_new_group_attr(source_name, "injection_style", "nrandompercell") + species.add_new_group_attr( + source_name, "num_particles_per_cell", layout.n_macroparticles_per_cell + ) else: - raise Exception('WarpX does not support the specified layout for this distribution') + raise Exception( + "WarpX does not support the specified layout for this distribution" + ) - species.add_new_group_attr(source_name, 'xmin', self.lower_bound[0]) - species.add_new_group_attr(source_name, 'xmax', self.upper_bound[0]) - species.add_new_group_attr(source_name, 'ymin', self.lower_bound[1]) - species.add_new_group_attr(source_name, 'ymax', self.upper_bound[1]) - species.add_new_group_attr(source_name, 'zmin', self.lower_bound[2]) - species.add_new_group_attr(source_name, 'zmax', self.upper_bound[2]) + species.add_new_group_attr(source_name, "xmin", self.lower_bound[0]) + species.add_new_group_attr(source_name, "xmax", self.upper_bound[0]) + species.add_new_group_attr(source_name, "ymin", self.lower_bound[1]) + species.add_new_group_attr(source_name, "ymax", self.upper_bound[1]) + species.add_new_group_attr(source_name, "zmin", self.lower_bound[2]) + species.add_new_group_attr(source_name, "zmax", self.upper_bound[2]) if self.fill_in: - species.add_new_group_attr(source_name, 'do_continuous_injection', 1) + species.add_new_group_attr(source_name, "do_continuous_injection", 1) # --- Note that WarpX takes gamma*beta as input - if (hasattr(self, "momentum_spread_expressions") - and np.any(np.not_equal(self.momentum_spread_expressions, None)) + if hasattr(self, "momentum_spread_expressions") and np.any( + np.not_equal(self.momentum_spread_expressions, None) ): - species.momentum_distribution_type = 'gaussian_parse_momentum_function' - self.setup_parse_momentum_functions(species, source_name, self.momentum_expressions, '_m', self.directed_velocity) - self.setup_parse_momentum_functions(species, source_name, self.momentum_spread_expressions, '_th', [0.,0.,0.]) - elif (hasattr(self, "momentum_expressions") - and np.any(np.not_equal(self.momentum_expressions, None)) + species.momentum_distribution_type = "gaussian_parse_momentum_function" + self.setup_parse_momentum_functions( + species, + source_name, + self.momentum_expressions, + "_m", + self.directed_velocity, + ) + self.setup_parse_momentum_functions( + species, + source_name, + self.momentum_spread_expressions, + "_th", + [0.0, 0.0, 0.0], + ) + elif hasattr(self, "momentum_expressions") and np.any( + np.not_equal(self.momentum_expressions, None) ): - species.add_new_group_attr(source_name, 'momentum_distribution_type', 'parse_momentum_function') - self.setup_parse_momentum_functions(species, source_name, self.momentum_expressions, '', self.directed_velocity) - elif np.any(np.not_equal(self.rms_velocity, 0.)): - species.add_new_group_attr(source_name, 'momentum_distribution_type', "gaussian") - species.add_new_group_attr(source_name, 'ux_m', self.directed_velocity[0]/constants.c) - species.add_new_group_attr(source_name, 'uy_m', self.directed_velocity[1]/constants.c) - species.add_new_group_attr(source_name, 'uz_m', self.directed_velocity[2]/constants.c) - species.add_new_group_attr(source_name, 'ux_th', self.rms_velocity[0]/constants.c) - species.add_new_group_attr(source_name, 'uy_th', self.rms_velocity[1]/constants.c) - species.add_new_group_attr(source_name, 'uz_th', self.rms_velocity[2]/constants.c) + species.add_new_group_attr( + source_name, "momentum_distribution_type", "parse_momentum_function" + ) + self.setup_parse_momentum_functions( + species, + source_name, + self.momentum_expressions, + "", + self.directed_velocity, + ) + elif np.any(np.not_equal(self.rms_velocity, 0.0)): + species.add_new_group_attr( + source_name, "momentum_distribution_type", "gaussian" + ) + species.add_new_group_attr( + source_name, "ux_m", self.directed_velocity[0] / constants.c + ) + species.add_new_group_attr( + source_name, "uy_m", self.directed_velocity[1] / constants.c + ) + species.add_new_group_attr( + source_name, "uz_m", self.directed_velocity[2] / constants.c + ) + species.add_new_group_attr( + source_name, "ux_th", self.rms_velocity[0] / constants.c + ) + species.add_new_group_attr( + source_name, "uy_th", self.rms_velocity[1] / constants.c + ) + species.add_new_group_attr( + source_name, "uz_th", self.rms_velocity[2] / constants.c + ) else: - species.add_new_group_attr(source_name, 'momentum_distribution_type', "constant") - species.add_new_group_attr(source_name, 'ux', self.directed_velocity[0]/constants.c) - species.add_new_group_attr(source_name, 'uy', self.directed_velocity[1]/constants.c) - species.add_new_group_attr(source_name, 'uz', self.directed_velocity[2]/constants.c) - - if hasattr(self, 'density_min'): - species.add_new_group_attr(source_name, 'density_min', self.density_min) - if hasattr(self, 'density_max'): - species.add_new_group_attr(source_name, 'density_max', self.density_max) - - def setup_parse_momentum_functions(self, species, source_name, expressions, suffix, defaults): - for sdir, idir in zip(['x', 'y', 'z'], [0, 1, 2]): + species.add_new_group_attr( + source_name, "momentum_distribution_type", "constant" + ) + species.add_new_group_attr( + source_name, "ux", self.directed_velocity[0] / constants.c + ) + species.add_new_group_attr( + source_name, "uy", self.directed_velocity[1] / constants.c + ) + species.add_new_group_attr( + source_name, "uz", self.directed_velocity[2] / constants.c + ) + + if hasattr(self, "density_min"): + species.add_new_group_attr(source_name, "density_min", self.density_min) + if hasattr(self, "density_max"): + species.add_new_group_attr(source_name, "density_max", self.density_max) + + def setup_parse_momentum_functions( + self, species, source_name, expressions, suffix, defaults + ): + for sdir, idir in zip(["x", "y", "z"], [0, 1, 2]): if expressions[idir] is not None: - expression = pywarpx.my_constants.mangle_expression(expressions[idir], self.mangle_dict) + expression = pywarpx.my_constants.mangle_expression( + expressions[idir], self.mangle_dict + ) else: - expression = f'{defaults[idir]}' - species.add_new_group_attr(source_name, f'momentum_function_u{sdir}{suffix}(x,y,z)', f'({expression})/{constants.c}') - + expression = f"{defaults[idir]}" + species.add_new_group_attr( + source_name, + f"momentum_function_u{sdir}{suffix}(x,y,z)", + f"({expression})/{constants.c}", + ) -class UniformFluxDistribution(picmistandard.PICMI_UniformFluxDistribution, DensityDistributionBase): - def distribution_initialize_inputs(self, species_number, layout, species, density_scale, source_name): +class UniformFluxDistribution( + picmistandard.PICMI_UniformFluxDistribution, DensityDistributionBase +): + def distribution_initialize_inputs( + self, species_number, layout, species, density_scale, source_name + ): self.fill_in = False self.set_mangle_dict() self.set_species_attributes(species, layout, source_name) - species.add_new_group_attr(source_name, 'flux_profile', "constant") - species.add_new_group_attr(source_name, 'flux', self.flux) + species.add_new_group_attr(source_name, "flux_profile", "constant") + species.add_new_group_attr(source_name, "flux", self.flux) if density_scale is not None: - species.add_new_group_attr(source_name, 'flux', density_scale) - species.add_new_group_attr(source_name, 'flux_normal_axis', self.flux_normal_axis) - species.add_new_group_attr(source_name, 'surface_flux_pos', self.surface_flux_position) - species.add_new_group_attr(source_name, 'flux_direction', self.flux_direction) - species.add_new_group_attr(source_name, 'flux_tmin', self.flux_tmin) - species.add_new_group_attr(source_name, 'flux_tmax', self.flux_tmax) + species.add_new_group_attr(source_name, "flux", density_scale) + species.add_new_group_attr( + source_name, "flux_normal_axis", self.flux_normal_axis + ) + species.add_new_group_attr( + source_name, "surface_flux_pos", self.surface_flux_position + ) + species.add_new_group_attr(source_name, "flux_direction", self.flux_direction) + species.add_new_group_attr(source_name, "flux_tmin", self.flux_tmin) + species.add_new_group_attr(source_name, "flux_tmax", self.flux_tmax) # --- Use specific attributes for flux injection - species.add_new_group_attr(source_name, 'injection_style', "nfluxpercell") - assert (isinstance(layout, PseudoRandomLayout)), Exception('UniformFluxDistribution only supports the PseudoRandomLayout in WarpX') + species.add_new_group_attr(source_name, "injection_style", "nfluxpercell") + assert isinstance(layout, PseudoRandomLayout), Exception( + "UniformFluxDistribution only supports the PseudoRandomLayout in WarpX" + ) if self.gaussian_flux_momentum_distribution: - species.add_new_group_attr(source_name, 'momentum_distribution_type', "gaussianflux") - + species.add_new_group_attr( + source_name, "momentum_distribution_type", "gaussianflux" + ) -class UniformDistribution(picmistandard.PICMI_UniformDistribution, DensityDistributionBase): - def distribution_initialize_inputs(self, species_number, layout, species, density_scale, source_name): +class UniformDistribution( + picmistandard.PICMI_UniformDistribution, DensityDistributionBase +): + def distribution_initialize_inputs( + self, species_number, layout, species, density_scale, source_name + ): self.set_mangle_dict() self.set_species_attributes(species, layout, source_name) # --- Only constant density is supported by this class - species.add_new_group_attr(source_name, 'profile', "constant") - species.add_new_group_attr(source_name, 'density', self.density) + species.add_new_group_attr(source_name, "profile", "constant") + species.add_new_group_attr(source_name, "density", self.density) if density_scale is not None: - species.add_new_group_attr(source_name, 'density', density_scale) + species.add_new_group_attr(source_name, "density", density_scale) -class AnalyticDistribution(picmistandard.PICMI_AnalyticDistribution, DensityDistributionBase): +class AnalyticDistribution( + picmistandard.PICMI_AnalyticDistribution, DensityDistributionBase +): """ Parameters ---------- @@ -575,43 +760,68 @@ class AnalyticDistribution(picmistandard.PICMI_AnalyticDistribution, DensityDist For any axis not supplied (set to None), zero will be used. """ - def init(self, kw): - self.density_min = kw.pop('warpx_density_min', None) - self.density_max = kw.pop('warpx_density_max', None) - self.momentum_spread_expressions = kw.pop('warpx_momentum_spread_expressions', [None, None, None]) - def distribution_initialize_inputs(self, species_number, layout, species, density_scale, source_name): + def init(self, kw): + self.density_min = kw.pop("warpx_density_min", None) + self.density_max = kw.pop("warpx_density_max", None) + self.momentum_spread_expressions = kw.pop( + "warpx_momentum_spread_expressions", [None, None, None] + ) + def distribution_initialize_inputs( + self, species_number, layout, species, density_scale, source_name + ): self.set_mangle_dict() self.set_species_attributes(species, layout, source_name) - species.add_new_group_attr(source_name, 'profile', "parse_density_function") - expression = pywarpx.my_constants.mangle_expression(self.density_expression, self.mangle_dict) + species.add_new_group_attr(source_name, "profile", "parse_density_function") + expression = pywarpx.my_constants.mangle_expression( + self.density_expression, self.mangle_dict + ) if density_scale is None: - species.add_new_group_attr(source_name, 'density_function(x,y,z)', expression) + species.add_new_group_attr( + source_name, "density_function(x,y,z)", expression + ) else: - species.add_new_group_attr(source_name, 'density_function(x,y,z)', "{}*({})".format(density_scale, expression)) + species.add_new_group_attr( + source_name, + "density_function(x,y,z)", + "{}*({})".format(density_scale, expression), + ) class ParticleListDistribution(picmistandard.PICMI_ParticleListDistribution): def init(self, kw): pass - def distribution_initialize_inputs(self, species_number, layout, species, density_scale, source_name): - - species.add_new_group_attr(source_name, 'injection_style', "multipleparticles") - species.add_new_group_attr(source_name, 'multiple_particles_pos_x', self.x) - species.add_new_group_attr(source_name, 'multiple_particles_pos_y', self.y) - species.add_new_group_attr(source_name, 'multiple_particles_pos_z', self.z) - species.add_new_group_attr(source_name, 'multiple_particles_ux', np.array(self.ux)/constants.c) - species.add_new_group_attr(source_name, 'multiple_particles_uy', np.array(self.uy)/constants.c) - species.add_new_group_attr(source_name, 'multiple_particles_uz', np.array(self.uz)/constants.c) - species.add_new_group_attr(source_name, 'multiple_particles_weight', self.weight) + def distribution_initialize_inputs( + self, species_number, layout, species, density_scale, source_name + ): + species.add_new_group_attr(source_name, "injection_style", "multipleparticles") + species.add_new_group_attr(source_name, "multiple_particles_pos_x", self.x) + species.add_new_group_attr(source_name, "multiple_particles_pos_y", self.y) + species.add_new_group_attr(source_name, "multiple_particles_pos_z", self.z) + species.add_new_group_attr( + source_name, "multiple_particles_ux", np.array(self.ux) / constants.c + ) + species.add_new_group_attr( + source_name, "multiple_particles_uy", np.array(self.uy) / constants.c + ) + species.add_new_group_attr( + source_name, "multiple_particles_uz", np.array(self.uz) / constants.c + ) + species.add_new_group_attr( + source_name, "multiple_particles_weight", self.weight + ) if density_scale is not None: - species.add_new_group_attr(source_name, 'multiple_particles_weight', self.weight*density_scale) + species.add_new_group_attr( + source_name, "multiple_particles_weight", self.weight * density_scale + ) -class ParticleDistributionPlanarInjector(picmistandard.PICMI_ParticleDistributionPlanarInjector): +class ParticleDistributionPlanarInjector( + picmistandard.PICMI_ParticleDistributionPlanarInjector +): pass @@ -622,11 +832,12 @@ class GriddedLayout(picmistandard.PICMI_GriddedLayout): class PseudoRandomLayout(picmistandard.PICMI_PseudoRandomLayout): def init(self, kw): if self.seed is not None: - print('Warning: WarpX does not support specifying the random number seed in PseudoRandomLayout') + print( + "Warning: WarpX does not support specifying the random number seed in PseudoRandomLayout" + ) class BinomialSmoother(picmistandard.PICMI_BinomialSmoother): - def smoother_initialize_inputs(self, solver): pywarpx.warpx.use_filter = 1 pywarpx.warpx.use_filter_compensation = bool(np.all(self.compensation)) @@ -638,7 +849,7 @@ def smoother_initialize_inputs(self, solver): len(self.n_pass) except TypeError: # If not, make it a vector - self.n_pass = solver.grid.number_of_dimensions*[self.n_pass] + self.n_pass = solver.grid.number_of_dimensions * [self.n_pass] pywarpx.warpx.filter_npass_each_dir = self.n_pass @@ -696,34 +907,35 @@ class CylindricalGrid(picmistandard.PICMI_CylindricalGrid): specify the thermal speed for each species in the form {``: u_th}. Note: u_th = sqrt(T*q_e/mass)/clight with T in eV. """ + def init(self, kw): - self.max_grid_size = kw.pop('warpx_max_grid_size', 32) - self.max_grid_size_x = kw.pop('warpx_max_grid_size_x', None) - self.max_grid_size_y = kw.pop('warpx_max_grid_size_y', None) - self.blocking_factor = kw.pop('warpx_blocking_factor', None) - self.blocking_factor_x = kw.pop('warpx_blocking_factor_x', None) - self.blocking_factor_y = kw.pop('warpx_blocking_factor_y', None) - - self.potential_xmin = kw.pop('warpx_potential_lo_r', None) - self.potential_xmax = kw.pop('warpx_potential_hi_r', None) + self.max_grid_size = kw.pop("warpx_max_grid_size", 32) + self.max_grid_size_x = kw.pop("warpx_max_grid_size_x", None) + self.max_grid_size_y = kw.pop("warpx_max_grid_size_y", None) + self.blocking_factor = kw.pop("warpx_blocking_factor", None) + self.blocking_factor_x = kw.pop("warpx_blocking_factor_x", None) + self.blocking_factor_y = kw.pop("warpx_blocking_factor_y", None) + + self.potential_xmin = kw.pop("warpx_potential_lo_r", None) + self.potential_xmax = kw.pop("warpx_potential_hi_r", None) self.potential_ymin = None self.potential_ymax = None - self.potential_zmin = kw.pop('warpx_potential_lo_z', None) - self.potential_zmax = kw.pop('warpx_potential_hi_z', None) - self.reflect_all_velocities = kw.pop('warpx_reflect_all_velocities', None) + self.potential_zmin = kw.pop("warpx_potential_lo_z", None) + self.potential_zmax = kw.pop("warpx_potential_hi_z", None) + self.reflect_all_velocities = kw.pop("warpx_reflect_all_velocities", None) - self.start_moving_window_step = kw.pop('warpx_start_moving_window_step', None) - self.end_moving_window_step = kw.pop('warpx_end_moving_window_step', None) + self.start_moving_window_step = kw.pop("warpx_start_moving_window_step", None) + self.end_moving_window_step = kw.pop("warpx_end_moving_window_step", None) # Geometry # Set these as soon as the information is available # (since these are needed to determine which shared object to load) - pywarpx.geometry.dims = 'RZ' + pywarpx.geometry.dims = "RZ" pywarpx.geometry.prob_lo = self.lower_bound # physical domain pywarpx.geometry.prob_hi = self.upper_bound # if a thermal boundary is used for particles, get the thermal speeds - self.thermal_boundary_u_th = kw.pop('warpx_boundary_u_th', None) + self.thermal_boundary_u_th = kw.pop("warpx_boundary_u_th", None) def grid_initialize_inputs(self): pywarpx.amr.n_cell = self.number_of_cells @@ -737,37 +949,56 @@ def grid_initialize_inputs(self): pywarpx.amr.blocking_factor_x = self.blocking_factor_x pywarpx.amr.blocking_factor_y = self.blocking_factor_y - assert self.lower_bound[0] >= 0., Exception('Lower radial boundary must be >= 0.') - assert self.lower_boundary_conditions[0] != 'periodic' and self.upper_boundary_conditions[0] != 'periodic', Exception('Radial boundaries can not be periodic') + assert self.lower_bound[0] >= 0.0, Exception( + "Lower radial boundary must be >= 0." + ) + assert ( + self.lower_boundary_conditions[0] != "periodic" + and self.upper_boundary_conditions[0] != "periodic" + ), Exception("Radial boundaries can not be periodic") pywarpx.warpx.n_rz_azimuthal_modes = self.n_azimuthal_modes # Boundary conditions - pywarpx.boundary.field_lo = [BC_map[bc] for bc in self.lower_boundary_conditions] - pywarpx.boundary.field_hi = [BC_map[bc] for bc in self.upper_boundary_conditions] + pywarpx.boundary.field_lo = [ + BC_map[bc] for bc in self.lower_boundary_conditions + ] + pywarpx.boundary.field_hi = [ + BC_map[bc] for bc in self.upper_boundary_conditions + ] pywarpx.boundary.particle_lo = self.lower_boundary_conditions_particles pywarpx.boundary.particle_hi = self.upper_boundary_conditions_particles pywarpx.boundary.reflect_all_velocities = self.reflect_all_velocities if self.thermal_boundary_u_th is not None: for name, val in self.thermal_boundary_u_th.items(): - pywarpx.boundary.__setattr__(f'{name}.u_th', val) + pywarpx.boundary.__setattr__(f"{name}.u_th", val) - if self.moving_window_velocity is not None and np.any(np.not_equal(self.moving_window_velocity, 0.)): + if self.moving_window_velocity is not None and np.any( + np.not_equal(self.moving_window_velocity, 0.0) + ): pywarpx.warpx.do_moving_window = 1 - if self.moving_window_velocity[0] != 0.: - pywarpx.warpx.moving_window_dir = 'r' - pywarpx.warpx.moving_window_v = self.moving_window_velocity[0]/constants.c # in units of the speed of light - if self.moving_window_velocity[1] != 0.: - pywarpx.warpx.moving_window_dir = 'z' - pywarpx.warpx.moving_window_v = self.moving_window_velocity[1]/constants.c # in units of the speed of light + if self.moving_window_velocity[0] != 0.0: + pywarpx.warpx.moving_window_dir = "r" + pywarpx.warpx.moving_window_v = ( + self.moving_window_velocity[0] / constants.c + ) # in units of the speed of light + if self.moving_window_velocity[1] != 0.0: + pywarpx.warpx.moving_window_dir = "z" + pywarpx.warpx.moving_window_v = ( + self.moving_window_velocity[1] / constants.c + ) # in units of the speed of light pywarpx.warpx.start_moving_window_step = self.start_moving_window_step pywarpx.warpx.end_moving_window_step = self.end_moving_window_step if self.refined_regions: - assert len(self.refined_regions) == 1, Exception('WarpX only supports one refined region.') - assert self.refined_regions[0][0] == 1, Exception('The one refined region can only be level 1') + assert len(self.refined_regions) == 1, Exception( + "WarpX only supports one refined region." + ) + assert self.refined_regions[0][0] == 1, Exception( + "The one refined region can only be level 1" + ) pywarpx.amr.max_level = 1 pywarpx.warpx.fine_tag_lo = self.refined_regions[0][1] pywarpx.warpx.fine_tag_hi = self.refined_regions[0][2] @@ -812,31 +1043,32 @@ class Cartesian1DGrid(picmistandard.PICMI_Cartesian1DGrid): specify the thermal speed for each species in the form {``: u_th}. Note: u_th = sqrt(T*q_e/mass)/clight with T in eV. """ + def init(self, kw): - self.max_grid_size = kw.pop('warpx_max_grid_size', 32) - self.max_grid_size_x = kw.pop('warpx_max_grid_size_x', None) - self.blocking_factor = kw.pop('warpx_blocking_factor', None) - self.blocking_factor_x = kw.pop('warpx_blocking_factor_x', None) + self.max_grid_size = kw.pop("warpx_max_grid_size", 32) + self.max_grid_size_x = kw.pop("warpx_max_grid_size_x", None) + self.blocking_factor = kw.pop("warpx_blocking_factor", None) + self.blocking_factor_x = kw.pop("warpx_blocking_factor_x", None) self.potential_xmin = None self.potential_xmax = None self.potential_ymin = None self.potential_ymax = None - self.potential_zmin = kw.pop('warpx_potential_lo_z', None) - self.potential_zmax = kw.pop('warpx_potential_hi_z', None) + self.potential_zmin = kw.pop("warpx_potential_lo_z", None) + self.potential_zmax = kw.pop("warpx_potential_hi_z", None) - self.start_moving_window_step = kw.pop('warpx_start_moving_window_step', None) - self.end_moving_window_step = kw.pop('warpx_end_moving_window_step', None) + self.start_moving_window_step = kw.pop("warpx_start_moving_window_step", None) + self.end_moving_window_step = kw.pop("warpx_end_moving_window_step", None) # Geometry # Set these as soon as the information is available # (since these are needed to determine which shared object to load) - pywarpx.geometry.dims = '1' + pywarpx.geometry.dims = "1" pywarpx.geometry.prob_lo = self.lower_bound # physical domain pywarpx.geometry.prob_hi = self.upper_bound # if a thermal boundary is used for particles, get the thermal speeds - self.thermal_boundary_u_th = kw.pop('warpx_boundary_u_th', None) + self.thermal_boundary_u_th = kw.pop("warpx_boundary_u_th", None) def grid_initialize_inputs(self): pywarpx.amr.n_cell = self.number_of_cells @@ -849,27 +1081,39 @@ def grid_initialize_inputs(self): pywarpx.amr.blocking_factor_x = self.blocking_factor_x # Boundary conditions - pywarpx.boundary.field_lo = [BC_map[bc] for bc in self.lower_boundary_conditions] - pywarpx.boundary.field_hi = [BC_map[bc] for bc in self.upper_boundary_conditions] + pywarpx.boundary.field_lo = [ + BC_map[bc] for bc in self.lower_boundary_conditions + ] + pywarpx.boundary.field_hi = [ + BC_map[bc] for bc in self.upper_boundary_conditions + ] pywarpx.boundary.particle_lo = self.lower_boundary_conditions_particles pywarpx.boundary.particle_hi = self.upper_boundary_conditions_particles if self.thermal_boundary_u_th is not None: for name, val in self.thermal_boundary_u_th.items(): - pywarpx.boundary.__setattr__(f'{name}.u_th', val) + pywarpx.boundary.__setattr__(f"{name}.u_th", val) - if self.moving_window_velocity is not None and np.any(np.not_equal(self.moving_window_velocity, 0.)): + if self.moving_window_velocity is not None and np.any( + np.not_equal(self.moving_window_velocity, 0.0) + ): pywarpx.warpx.do_moving_window = 1 - if self.moving_window_velocity[0] != 0.: - pywarpx.warpx.moving_window_dir = 'z' - pywarpx.warpx.moving_window_v = self.moving_window_velocity[0]/constants.c # in units of the speed of light + if self.moving_window_velocity[0] != 0.0: + pywarpx.warpx.moving_window_dir = "z" + pywarpx.warpx.moving_window_v = ( + self.moving_window_velocity[0] / constants.c + ) # in units of the speed of light pywarpx.warpx.start_moving_window_step = self.start_moving_window_step pywarpx.warpx.end_moving_window_step = self.end_moving_window_step if self.refined_regions: - assert len(self.refined_regions) == 1, Exception('WarpX only supports one refined region.') - assert self.refined_regions[0][0] == 1, Exception('The one refined region can only be level 1') + assert len(self.refined_regions) == 1, Exception( + "WarpX only supports one refined region." + ) + assert self.refined_regions[0][0] == 1, Exception( + "The one refined region can only be level 1" + ) pywarpx.amr.max_level = 1 pywarpx.warpx.fine_tag_lo = self.refined_regions[0][1] pywarpx.warpx.fine_tag_hi = self.refined_regions[0][2] @@ -877,6 +1121,7 @@ def grid_initialize_inputs(self): else: pywarpx.amr.max_level = 0 + class Cartesian2DGrid(picmistandard.PICMI_Cartesian2DGrid): """ See `Input Parameters `__ for more information. @@ -925,33 +1170,34 @@ class Cartesian2DGrid(picmistandard.PICMI_Cartesian2DGrid): specify the thermal speed for each species in the form {``: u_th}. Note: u_th = sqrt(T*q_e/mass)/clight with T in eV. """ + def init(self, kw): - self.max_grid_size = kw.pop('warpx_max_grid_size', 32) - self.max_grid_size_x = kw.pop('warpx_max_grid_size_x', None) - self.max_grid_size_y = kw.pop('warpx_max_grid_size_y', None) - self.blocking_factor = kw.pop('warpx_blocking_factor', None) - self.blocking_factor_x = kw.pop('warpx_blocking_factor_x', None) - self.blocking_factor_y = kw.pop('warpx_blocking_factor_y', None) - - self.potential_xmin = kw.pop('warpx_potential_lo_x', None) - self.potential_xmax = kw.pop('warpx_potential_hi_x', None) + self.max_grid_size = kw.pop("warpx_max_grid_size", 32) + self.max_grid_size_x = kw.pop("warpx_max_grid_size_x", None) + self.max_grid_size_y = kw.pop("warpx_max_grid_size_y", None) + self.blocking_factor = kw.pop("warpx_blocking_factor", None) + self.blocking_factor_x = kw.pop("warpx_blocking_factor_x", None) + self.blocking_factor_y = kw.pop("warpx_blocking_factor_y", None) + + self.potential_xmin = kw.pop("warpx_potential_lo_x", None) + self.potential_xmax = kw.pop("warpx_potential_hi_x", None) self.potential_ymin = None self.potential_ymax = None - self.potential_zmin = kw.pop('warpx_potential_lo_z', None) - self.potential_zmax = kw.pop('warpx_potential_hi_z', None) + self.potential_zmin = kw.pop("warpx_potential_lo_z", None) + self.potential_zmax = kw.pop("warpx_potential_hi_z", None) - self.start_moving_window_step = kw.pop('warpx_start_moving_window_step', None) - self.end_moving_window_step = kw.pop('warpx_end_moving_window_step', None) + self.start_moving_window_step = kw.pop("warpx_start_moving_window_step", None) + self.end_moving_window_step = kw.pop("warpx_end_moving_window_step", None) # Geometry # Set these as soon as the information is available # (since these are needed to determine which shared object to load) - pywarpx.geometry.dims = '2' + pywarpx.geometry.dims = "2" pywarpx.geometry.prob_lo = self.lower_bound # physical domain pywarpx.geometry.prob_hi = self.upper_bound # if a thermal boundary is used for particles, get the thermal speeds - self.thermal_boundary_u_th = kw.pop('warpx_boundary_u_th', None) + self.thermal_boundary_u_th = kw.pop("warpx_boundary_u_th", None) def grid_initialize_inputs(self): pywarpx.amr.n_cell = self.number_of_cells @@ -966,30 +1212,44 @@ def grid_initialize_inputs(self): pywarpx.amr.blocking_factor_y = self.blocking_factor_y # Boundary conditions - pywarpx.boundary.field_lo = [BC_map[bc] for bc in self.lower_boundary_conditions] - pywarpx.boundary.field_hi = [BC_map[bc] for bc in self.upper_boundary_conditions] + pywarpx.boundary.field_lo = [ + BC_map[bc] for bc in self.lower_boundary_conditions + ] + pywarpx.boundary.field_hi = [ + BC_map[bc] for bc in self.upper_boundary_conditions + ] pywarpx.boundary.particle_lo = self.lower_boundary_conditions_particles pywarpx.boundary.particle_hi = self.upper_boundary_conditions_particles if self.thermal_boundary_u_th is not None: for name, val in self.thermal_boundary_u_th.items(): - pywarpx.boundary.__setattr__(f'{name}.u_th', val) + pywarpx.boundary.__setattr__(f"{name}.u_th", val) - if self.moving_window_velocity is not None and np.any(np.not_equal(self.moving_window_velocity, 0.)): + if self.moving_window_velocity is not None and np.any( + np.not_equal(self.moving_window_velocity, 0.0) + ): pywarpx.warpx.do_moving_window = 1 - if self.moving_window_velocity[0] != 0.: - pywarpx.warpx.moving_window_dir = 'x' - pywarpx.warpx.moving_window_v = self.moving_window_velocity[0]/constants.c # in units of the speed of light - if self.moving_window_velocity[1] != 0.: - pywarpx.warpx.moving_window_dir = 'z' - pywarpx.warpx.moving_window_v = self.moving_window_velocity[1]/constants.c # in units of the speed of light + if self.moving_window_velocity[0] != 0.0: + pywarpx.warpx.moving_window_dir = "x" + pywarpx.warpx.moving_window_v = ( + self.moving_window_velocity[0] / constants.c + ) # in units of the speed of light + if self.moving_window_velocity[1] != 0.0: + pywarpx.warpx.moving_window_dir = "z" + pywarpx.warpx.moving_window_v = ( + self.moving_window_velocity[1] / constants.c + ) # in units of the speed of light pywarpx.warpx.start_moving_window_step = self.start_moving_window_step pywarpx.warpx.end_moving_window_step = self.end_moving_window_step if self.refined_regions: - assert len(self.refined_regions) == 1, Exception('WarpX only supports one refined region.') - assert self.refined_regions[0][0] == 1, Exception('The one refined region can only be level 1') + assert len(self.refined_regions) == 1, Exception( + "WarpX only supports one refined region." + ) + assert self.refined_regions[0][0] == 1, Exception( + "The one refined region can only be level 1" + ) pywarpx.amr.max_level = 1 pywarpx.warpx.fine_tag_lo = self.refined_regions[0][1] pywarpx.warpx.fine_tag_hi = self.refined_regions[0][2] @@ -1058,35 +1318,36 @@ class Cartesian3DGrid(picmistandard.PICMI_Cartesian3DGrid): specify the thermal speed for each species in the form {``: u_th}. Note: u_th = sqrt(T*q_e/mass)/clight with T in eV. """ + def init(self, kw): - self.max_grid_size = kw.pop('warpx_max_grid_size', 32) - self.max_grid_size_x = kw.pop('warpx_max_grid_size_x', None) - self.max_grid_size_y = kw.pop('warpx_max_grid_size_y', None) - self.max_grid_size_z = kw.pop('warpx_max_grid_size_z', None) - self.blocking_factor = kw.pop('warpx_blocking_factor', None) - self.blocking_factor_x = kw.pop('warpx_blocking_factor_x', None) - self.blocking_factor_y = kw.pop('warpx_blocking_factor_y', None) - self.blocking_factor_z = kw.pop('warpx_blocking_factor_z', None) - - self.potential_xmin = kw.pop('warpx_potential_lo_x', None) - self.potential_xmax = kw.pop('warpx_potential_hi_x', None) - self.potential_ymin = kw.pop('warpx_potential_lo_y', None) - self.potential_ymax = kw.pop('warpx_potential_hi_y', None) - self.potential_zmin = kw.pop('warpx_potential_lo_z', None) - self.potential_zmax = kw.pop('warpx_potential_hi_z', None) - - self.start_moving_window_step = kw.pop('warpx_start_moving_window_step', None) - self.end_moving_window_step = kw.pop('warpx_end_moving_window_step', None) + self.max_grid_size = kw.pop("warpx_max_grid_size", 32) + self.max_grid_size_x = kw.pop("warpx_max_grid_size_x", None) + self.max_grid_size_y = kw.pop("warpx_max_grid_size_y", None) + self.max_grid_size_z = kw.pop("warpx_max_grid_size_z", None) + self.blocking_factor = kw.pop("warpx_blocking_factor", None) + self.blocking_factor_x = kw.pop("warpx_blocking_factor_x", None) + self.blocking_factor_y = kw.pop("warpx_blocking_factor_y", None) + self.blocking_factor_z = kw.pop("warpx_blocking_factor_z", None) + + self.potential_xmin = kw.pop("warpx_potential_lo_x", None) + self.potential_xmax = kw.pop("warpx_potential_hi_x", None) + self.potential_ymin = kw.pop("warpx_potential_lo_y", None) + self.potential_ymax = kw.pop("warpx_potential_hi_y", None) + self.potential_zmin = kw.pop("warpx_potential_lo_z", None) + self.potential_zmax = kw.pop("warpx_potential_hi_z", None) + + self.start_moving_window_step = kw.pop("warpx_start_moving_window_step", None) + self.end_moving_window_step = kw.pop("warpx_end_moving_window_step", None) # Geometry # Set these as soon as the information is available # (since these are needed to determine which shared object to load) - pywarpx.geometry.dims = '3' + pywarpx.geometry.dims = "3" pywarpx.geometry.prob_lo = self.lower_bound # physical domain pywarpx.geometry.prob_hi = self.upper_bound # if a thermal boundary is used for particles, get the thermal speeds - self.thermal_boundary_u_th = kw.pop('warpx_boundary_u_th', None) + self.thermal_boundary_u_th = kw.pop("warpx_boundary_u_th", None) def grid_initialize_inputs(self): pywarpx.amr.n_cell = self.number_of_cells @@ -1103,33 +1364,49 @@ def grid_initialize_inputs(self): pywarpx.amr.blocking_factor_z = self.blocking_factor_z # Boundary conditions - pywarpx.boundary.field_lo = [BC_map[bc] for bc in self.lower_boundary_conditions] - pywarpx.boundary.field_hi = [BC_map[bc] for bc in self.upper_boundary_conditions] + pywarpx.boundary.field_lo = [ + BC_map[bc] for bc in self.lower_boundary_conditions + ] + pywarpx.boundary.field_hi = [ + BC_map[bc] for bc in self.upper_boundary_conditions + ] pywarpx.boundary.particle_lo = self.lower_boundary_conditions_particles pywarpx.boundary.particle_hi = self.upper_boundary_conditions_particles if self.thermal_boundary_u_th is not None: for name, val in self.thermal_boundary_u_th.items(): - pywarpx.boundary.__setattr__(f'{name}.u_th', val) + pywarpx.boundary.__setattr__(f"{name}.u_th", val) - if self.moving_window_velocity is not None and np.any(np.not_equal(self.moving_window_velocity, 0.)): + if self.moving_window_velocity is not None and np.any( + np.not_equal(self.moving_window_velocity, 0.0) + ): pywarpx.warpx.do_moving_window = 1 - if self.moving_window_velocity[0] != 0.: - pywarpx.warpx.moving_window_dir = 'x' - pywarpx.warpx.moving_window_v = self.moving_window_velocity[0]/constants.c # in units of the speed of light - if self.moving_window_velocity[1] != 0.: - pywarpx.warpx.moving_window_dir = 'y' - pywarpx.warpx.moving_window_v = self.moving_window_velocity[1]/constants.c # in units of the speed of light - if self.moving_window_velocity[2] != 0.: - pywarpx.warpx.moving_window_dir = 'z' - pywarpx.warpx.moving_window_v = self.moving_window_velocity[2]/constants.c # in units of the speed of light + if self.moving_window_velocity[0] != 0.0: + pywarpx.warpx.moving_window_dir = "x" + pywarpx.warpx.moving_window_v = ( + self.moving_window_velocity[0] / constants.c + ) # in units of the speed of light + if self.moving_window_velocity[1] != 0.0: + pywarpx.warpx.moving_window_dir = "y" + pywarpx.warpx.moving_window_v = ( + self.moving_window_velocity[1] / constants.c + ) # in units of the speed of light + if self.moving_window_velocity[2] != 0.0: + pywarpx.warpx.moving_window_dir = "z" + pywarpx.warpx.moving_window_v = ( + self.moving_window_velocity[2] / constants.c + ) # in units of the speed of light pywarpx.warpx.start_moving_window_step = self.start_moving_window_step pywarpx.warpx.end_moving_window_step = self.end_moving_window_step if self.refined_regions: - assert len(self.refined_regions) == 1, Exception('WarpX only supports one refined region.') - assert self.refined_regions[0][0] == 1, Exception('The one refined region can only be level 1') + assert len(self.refined_regions) == 1, Exception( + "WarpX only supports one refined region." + ) + assert self.refined_regions[0][0] == 1, Exception( + "The one refined region can only be level 1" + ) pywarpx.amr.max_level = 1 pywarpx.warpx.fine_tag_lo = self.refined_regions[0][1] pywarpx.warpx.fine_tag_hi = self.refined_regions[0][2] @@ -1180,30 +1457,37 @@ class ElectromagneticSolver(picmistandard.PICMI_ElectromagneticSolver): warpx_do_pml_j_damping: bool, default=False Whether to do damping of J in the PML """ - def init(self, kw): - assert self.method is None or self.method in ['Yee', 'CKC', 'PSATD', 'ECT'], Exception("Only 'Yee', 'CKC', 'PSATD', and 'ECT' are supported") - - self.pml_ncell = kw.pop('warpx_pml_ncell', None) - if self.method == 'PSATD': - self.psatd_periodic_single_box_fft = kw.pop('warpx_periodic_single_box_fft', None) - self.psatd_current_correction = kw.pop('warpx_current_correction', None) - self.psatd_update_with_rho = kw.pop('warpx_psatd_update_with_rho', None) - self.psatd_do_time_averaging = kw.pop('warpx_psatd_do_time_averaging', None) - self.psatd_J_in_time = kw.pop('warpx_psatd_J_in_time', None) - self.psatd_rho_in_time = kw.pop('warpx_psatd_rho_in_time', None) + def init(self, kw): + assert self.method is None or self.method in [ + "Yee", + "CKC", + "PSATD", + "ECT", + ], Exception("Only 'Yee', 'CKC', 'PSATD', and 'ECT' are supported") + + self.pml_ncell = kw.pop("warpx_pml_ncell", None) + + if self.method == "PSATD": + self.psatd_periodic_single_box_fft = kw.pop( + "warpx_periodic_single_box_fft", None + ) + self.psatd_current_correction = kw.pop("warpx_current_correction", None) + self.psatd_update_with_rho = kw.pop("warpx_psatd_update_with_rho", None) + self.psatd_do_time_averaging = kw.pop("warpx_psatd_do_time_averaging", None) + self.psatd_J_in_time = kw.pop("warpx_psatd_J_in_time", None) + self.psatd_rho_in_time = kw.pop("warpx_psatd_rho_in_time", None) - self.do_pml_in_domain = kw.pop('warpx_do_pml_in_domain', None) - self.pml_has_particles = kw.pop('warpx_pml_has_particles', None) - self.do_pml_j_damping = kw.pop('warpx_do_pml_j_damping', None) + self.do_pml_in_domain = kw.pop("warpx_do_pml_in_domain", None) + self.pml_has_particles = kw.pop("warpx_pml_has_particles", None) + self.do_pml_j_damping = kw.pop("warpx_do_pml_j_damping", None) def solver_initialize_inputs(self): - self.grid.grid_initialize_inputs() pywarpx.warpx.pml_ncell = self.pml_ncell - if self.method == 'PSATD': + if self.method == "PSATD": pywarpx.psatd.periodic_single_box_fft = self.psatd_periodic_single_box_fft pywarpx.psatd.current_correction = self.psatd_current_correction pywarpx.psatd.update_with_rho = self.psatd_update_with_rho @@ -1225,8 +1509,14 @@ def solver_initialize_inputs(self): if self.galilean_velocity is not None: if self.grid.number_of_dimensions == 2: - self.galilean_velocity = [self.galilean_velocity[0], 0., self.galilean_velocity[1]] - pywarpx.psatd.v_galilean = np.array(self.galilean_velocity)/constants.c + self.galilean_velocity = [ + self.galilean_velocity[0], + 0.0, + self.galilean_velocity[1], + ] + pywarpx.psatd.v_galilean = ( + np.array(self.galilean_velocity) / constants.c + ) # --- Same method names are used, though mapped to lower case. pywarpx.algo.maxwell_solver = self.method @@ -1252,8 +1542,9 @@ class ExplicitEvolveScheme(picmistandard.base._ClassWithInit): """ Sets up the explicit evolve scheme """ + def solver_scheme_initialize_inputs(self): - pywarpx.algo.evolve_scheme = 'explicit' + pywarpx.algo.evolve_scheme = "explicit" class ThetaImplicitEMEvolveScheme(picmistandard.base._ClassWithInit): @@ -1268,13 +1559,14 @@ class ThetaImplicitEMEvolveScheme(picmistandard.base._ClassWithInit): theta: float, optional The "theta" parameter, determining the level of implicitness """ - def __init__(self, nonlinear_solver, theta = None): + + def __init__(self, nonlinear_solver, theta=None): self.nonlinear_solver = nonlinear_solver self.theta = theta def solver_scheme_initialize_inputs(self): - pywarpx.algo.evolve_scheme = 'theta_implicit_em' - implicit_evolve = pywarpx.warpx.get_bucket('implicit_evolve') + pywarpx.algo.evolve_scheme = "theta_implicit_em" + implicit_evolve = pywarpx.warpx.get_bucket("implicit_evolve") implicit_evolve.theta = self.theta self.nonlinear_solver.nonlinear_solver_initialize_inputs() @@ -1289,11 +1581,12 @@ class SemiImplicitEMEvolveScheme(picmistandard.base._ClassWithInit): nonlinear_solver: nonlinear solver instance The nonlinear solver to use for the iterations """ + def __init__(self, nonlinear_solver): self.nonlinear_solver = nonlinear_solver def solver_scheme_initialize_inputs(self): - pywarpx.algo.evolve_scheme = 'semi_implicit_em' + pywarpx.algo.evolve_scheme = "semi_implicit_em" self.nonlinear_solver.nonlinear_solver_initialize_inputs() @@ -1319,8 +1612,15 @@ class PicardNonlinearSolver(picmistandard.base._ClassWithInit): require_convergence: bool, default True Whether convergence is required. If True and convergence is not obtained, the code will exit. """ - def __init__(self, verbose=None, absolute_tolerance=None, relative_tolerance=None, - max_iterations=None, require_convergence=None): + + def __init__( + self, + verbose=None, + absolute_tolerance=None, + relative_tolerance=None, + max_iterations=None, + require_convergence=None, + ): self.verbose = verbose self.absolute_tolerance = absolute_tolerance self.relative_tolerance = relative_tolerance @@ -1328,10 +1628,10 @@ def __init__(self, verbose=None, absolute_tolerance=None, relative_tolerance=Non self.require_convergence = require_convergence def nonlinear_solver_initialize_inputs(self): - implicit_evolve = pywarpx.warpx.get_bucket('implicit_evolve') - implicit_evolve.nonlinear_solver = 'picard' + implicit_evolve = pywarpx.warpx.get_bucket("implicit_evolve") + implicit_evolve.nonlinear_solver = "picard" - picard = pywarpx.warpx.get_bucket('picard') + picard = pywarpx.warpx.get_bucket("picard") picard.verbose = self.verbose picard.absolute_tolerance = self.absolute_tolerance picard.relative_tolerance = self.relative_tolerance @@ -1370,9 +1670,18 @@ class NewtonNonlinearSolver(picmistandard.base._ClassWithInit): The tolerance of parrticle quantities for convergence """ - def __init__(self, verbose=None, absolute_tolerance=None, relative_tolerance=None, - max_iterations=None, require_convergence=None, linear_solver=None, - max_particle_iterations=None, particle_tolerance=None): + + def __init__( + self, + verbose=None, + absolute_tolerance=None, + relative_tolerance=None, + max_iterations=None, + require_convergence=None, + linear_solver=None, + max_particle_iterations=None, + particle_tolerance=None, + ): self.verbose = verbose self.absolute_tolerance = absolute_tolerance self.relative_tolerance = relative_tolerance @@ -1383,12 +1692,12 @@ def __init__(self, verbose=None, absolute_tolerance=None, relative_tolerance=Non self.particle_tolerance = particle_tolerance def nonlinear_solver_initialize_inputs(self): - implicit_evolve = pywarpx.warpx.get_bucket('implicit_evolve') - implicit_evolve.nonlinear_solver = 'newton' + implicit_evolve = pywarpx.warpx.get_bucket("implicit_evolve") + implicit_evolve.nonlinear_solver = "newton" implicit_evolve.max_particle_iterations = self.max_particle_iterations implicit_evolve.particle_tolerance = self.particle_tolerance - newton = pywarpx.warpx.get_bucket('newton') + newton = pywarpx.warpx.get_bucket("newton") newton.verbose = self.verbose newton.absolute_tolerance = self.absolute_tolerance newton.relative_tolerance = self.relative_tolerance @@ -1419,8 +1728,15 @@ class GMRESLinearSolver(picmistandard.base._ClassWithInit): max_iterations: integer, default=1000 Maximum number of iterations """ - def __init__(self, verbose_int=None, restart_length=None, absolute_tolerance=None, relative_tolerance=None, - max_iterations=None): + + def __init__( + self, + verbose_int=None, + restart_length=None, + absolute_tolerance=None, + relative_tolerance=None, + max_iterations=None, + ): self.verbose_int = verbose_int self.restart_length = restart_length self.absolute_tolerance = absolute_tolerance @@ -1428,7 +1744,7 @@ def __init__(self, verbose_int=None, restart_length=None, absolute_tolerance=Non self.max_iterations = max_iterations def linear_solver_initialize_inputs(self): - gmres = pywarpx.warpx.get_bucket('gmres') + gmres = pywarpx.warpx.get_bucket("gmres") gmres.verbose_int = self.verbose_int gmres.restart_length = self.restart_length gmres.absolute_tolerance = self.absolute_tolerance @@ -1467,11 +1783,22 @@ class HybridPICSolver(picmistandard.base._ClassWithInit): Jx/y/z_external_function: str Function of space and time specifying external (non-plasma) currents. """ - def __init__(self, grid, Te=None, n0=None, gamma=None, - n_floor=None, plasma_resistivity=None, - plasma_hyper_resistivity=None, substeps=None, - Jx_external_function=None, Jy_external_function=None, - Jz_external_function=None, **kw): + + def __init__( + self, + grid, + Te=None, + n0=None, + gamma=None, + n_floor=None, + plasma_resistivity=None, + plasma_hyper_resistivity=None, + substeps=None, + Jx_external_function=None, + Jy_external_function=None, + Jz_external_function=None, + **kw, + ): self.grid = grid self.method = "hybrid" @@ -1497,7 +1824,6 @@ def __init__(self, grid, Te=None, n0=None, gamma=None, self.handle_init(kw) def solver_initialize_inputs(self): - # Add the user defined keywords to my_constants # The keywords are mangled if there is a conflicting variable already # defined in my_constants with the same name but different value. @@ -1512,22 +1838,30 @@ def solver_initialize_inputs(self): pywarpx.hybridpicmodel.gamma = self.gamma pywarpx.hybridpicmodel.n_floor = self.n_floor pywarpx.hybridpicmodel.__setattr__( - 'plasma_resistivity(rho,J)', - pywarpx.my_constants.mangle_expression(self.plasma_resistivity, self.mangle_dict) + "plasma_resistivity(rho,J)", + pywarpx.my_constants.mangle_expression( + self.plasma_resistivity, self.mangle_dict + ), ) pywarpx.hybridpicmodel.plasma_hyper_resistivity = self.plasma_hyper_resistivity pywarpx.hybridpicmodel.substeps = self.substeps pywarpx.hybridpicmodel.__setattr__( - 'Jx_external_grid_function(x,y,z,t)', - pywarpx.my_constants.mangle_expression(self.Jx_external_function, self.mangle_dict) + "Jx_external_grid_function(x,y,z,t)", + pywarpx.my_constants.mangle_expression( + self.Jx_external_function, self.mangle_dict + ), ) pywarpx.hybridpicmodel.__setattr__( - 'Jy_external_grid_function(x,y,z,t)', - pywarpx.my_constants.mangle_expression(self.Jy_external_function, self.mangle_dict) + "Jy_external_grid_function(x,y,z,t)", + pywarpx.my_constants.mangle_expression( + self.Jy_external_function, self.mangle_dict + ), ) pywarpx.hybridpicmodel.__setattr__( - 'Jz_external_grid_function(x,y,z,t)', - pywarpx.my_constants.mangle_expression(self.Jz_external_function, self.mangle_dict) + "Jz_external_grid_function(x,y,z,t)", + pywarpx.my_constants.mangle_expression( + self.Jz_external_function, self.mangle_dict + ), ) @@ -1546,26 +1880,26 @@ class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): warpx_self_fields_verbosity: integer, default=2 Level of verbosity for the lab frame solver """ + def init(self, kw): - self.relativistic = kw.pop('warpx_relativistic', False) - self.absolute_tolerance = kw.pop('warpx_absolute_tolerance', None) - self.self_fields_verbosity = kw.pop('warpx_self_fields_verbosity', None) - self.magnetostatic = kw.pop('warpx_magnetostatic', False) + self.relativistic = kw.pop("warpx_relativistic", False) + self.absolute_tolerance = kw.pop("warpx_absolute_tolerance", None) + self.self_fields_verbosity = kw.pop("warpx_self_fields_verbosity", None) + self.magnetostatic = kw.pop("warpx_magnetostatic", False) def solver_initialize_inputs(self): - # Open BC means FieldBoundaryType::Open for electrostatic sims, rather than perfectly-matched layer - BC_map['open'] = 'open' + BC_map["open"] = "open" self.grid.grid_initialize_inputs() if self.relativistic: - pywarpx.warpx.do_electrostatic = 'relativistic' + pywarpx.warpx.do_electrostatic = "relativistic" else: if self.magnetostatic: - pywarpx.warpx.do_electrostatic = 'labframe-electromagnetostatic' + pywarpx.warpx.do_electrostatic = "labframe-electromagnetostatic" else: - pywarpx.warpx.do_electrostatic = 'labframe' + pywarpx.warpx.do_electrostatic = "labframe" pywarpx.warpx.self_fields_required_precision = self.required_precision pywarpx.warpx.self_fields_absolute_tolerance = self.absolute_tolerance pywarpx.warpx.self_fields_max_iters = self.maximum_iterations @@ -1584,16 +1918,22 @@ class GaussianLaser(picmistandard.PICMI_GaussianLaser): def laser_initialize_inputs(self): self.laser_number = len(pywarpx.lasers.names) + 1 if self.name is None: - self.name = 'laser{}'.format(self.laser_number) + self.name = "laser{}".format(self.laser_number) self.laser = pywarpx.Lasers.newlaser(self.name) self.laser.profile = "Gaussian" - self.laser.wavelength = self.wavelength # The wavelength of the laser (in meters) + self.laser.wavelength = ( + self.wavelength + ) # The wavelength of the laser (in meters) self.laser.e_max = self.E0 # Maximum amplitude of the laser field (in V/m) - self.laser.polarization = self.polarization_direction # The main polarization vector + self.laser.polarization = ( + self.polarization_direction + ) # The main polarization vector self.laser.profile_waist = self.waist # The waist of the laser (in meters) - self.laser.profile_duration = self.duration # The duration of the laser (in seconds) + self.laser.profile_duration = ( + self.duration + ) # The duration of the laser (in seconds) self.laser.direction = self.propagation_direction self.laser.zeta = self.zeta self.laser.beta = self.beta @@ -1602,6 +1942,7 @@ def laser_initialize_inputs(self): self.laser.do_continuous_injection = self.fill_in + class AnalyticLaser(picmistandard.PICMI_AnalyticLaser): def init(self, kw): self.mangle_dict = None @@ -1609,14 +1950,18 @@ def init(self, kw): def laser_initialize_inputs(self): self.laser_number = len(pywarpx.lasers.names) + 1 if self.name is None: - self.name = 'laser{}'.format(self.laser_number) + self.name = "laser{}".format(self.laser_number) self.laser = pywarpx.Lasers.newlaser(self.name) self.laser.profile = "parse_field_function" - self.laser.wavelength = self.wavelength # The wavelength of the laser (in meters) + self.laser.wavelength = ( + self.wavelength + ) # The wavelength of the laser (in meters) self.laser.e_max = self.Emax # Maximum amplitude of the laser field (in V/m) - self.laser.polarization = self.polarization_direction # The main polarization vector + self.laser.polarization = ( + self.polarization_direction + ) # The main polarization vector self.laser.direction = self.propagation_direction self.laser.do_continuous_injection = self.fill_in @@ -1624,50 +1969,54 @@ def laser_initialize_inputs(self): # Only do this once so that the same variables are used in this distribution # is used multiple times self.mangle_dict = pywarpx.my_constants.add_keywords(self.user_defined_kw) - expression = pywarpx.my_constants.mangle_expression(self.field_expression, self.mangle_dict) - self.laser.__setattr__('field_function(X,Y,t)', expression) + expression = pywarpx.my_constants.mangle_expression( + self.field_expression, self.mangle_dict + ) + self.laser.__setattr__("field_function(X,Y,t)", expression) class LaserAntenna(picmistandard.PICMI_LaserAntenna): def laser_antenna_initialize_inputs(self, laser): laser.laser.position = self.position # This point is on the laser plane - if ( - self.normal_vector is not None - and not np.allclose(laser.laser.direction, self.normal_vector) + if self.normal_vector is not None and not np.allclose( + laser.laser.direction, self.normal_vector ): raise AttributeError( - 'The specified laser direction does not match the ' - 'specified antenna normal.' + "The specified laser direction does not match the " + "specified antenna normal." ) - self.normal_vector = laser.laser.direction # The plane normal direction + self.normal_vector = laser.laser.direction # The plane normal direction if isinstance(laser, GaussianLaser): # Focal distance from the antenna (in meters) laser.laser.profile_focal_distance = np.sqrt( - (laser.focal_position[0] - self.position[0])**2 + - (laser.focal_position[1] - self.position[1])**2 + - (laser.focal_position[2] - self.position[2])**2 + (laser.focal_position[0] - self.position[0]) ** 2 + + (laser.focal_position[1] - self.position[1]) ** 2 + + (laser.focal_position[2] - self.position[2]) ** 2 ) # The time at which the laser reaches its peak (in seconds) - laser.laser.profile_t_peak = np.sqrt( - (self.position[0] - laser.centroid_position[0])**2 + - (self.position[1] - laser.centroid_position[1])**2 + - (self.position[2] - laser.centroid_position[2])**2 - ) / constants.c + laser.laser.profile_t_peak = ( + np.sqrt( + (self.position[0] - laser.centroid_position[0]) ** 2 + + (self.position[1] - laser.centroid_position[1]) ** 2 + + (self.position[2] - laser.centroid_position[2]) ** 2 + ) + / constants.c + ) class LoadInitialField(picmistandard.PICMI_LoadGriddedField): def applied_field_initialize_inputs(self): pywarpx.warpx.read_fields_from_path = self.read_fields_from_path if self.load_E: - pywarpx.warpx.E_ext_grid_init_style = 'read_from_file' + pywarpx.warpx.E_ext_grid_init_style = "read_from_file" if self.load_B: - pywarpx.warpx.B_ext_grid_init_style = 'read_from_file' + pywarpx.warpx.B_ext_grid_init_style = "read_from_file" class AnalyticInitialField(picmistandard.PICMI_AnalyticAppliedField): def init(self, kw): self.mangle_dict = None - self.maxlevel_extEMfield_init = kw.pop('warpx_maxlevel_extEMfield_init', None) + self.maxlevel_extEMfield_init = kw.pop("warpx_maxlevel_extEMfield_init", None) def applied_field_initialize_inputs(self): # Note that lower and upper_bound are not used by WarpX @@ -1678,45 +2027,69 @@ def applied_field_initialize_inputs(self): # is used multiple times self.mangle_dict = pywarpx.my_constants.add_keywords(self.user_defined_kw) - if (self.Ex_expression is not None or - self.Ey_expression is not None or - self.Ez_expression is not None): - pywarpx.warpx.E_ext_grid_init_style = 'parse_e_ext_grid_function' - for sdir, expression in zip(['x', 'y', 'z'], [self.Ex_expression, self.Ey_expression, self.Ez_expression]): - expression = pywarpx.my_constants.mangle_expression(expression, self.mangle_dict) - pywarpx.warpx.__setattr__(f'E{sdir}_external_grid_function(x,y,z)', expression) - - if (self.Bx_expression is not None or - self.By_expression is not None or - self.Bz_expression is not None): - pywarpx.warpx.B_ext_grid_init_style = 'parse_b_ext_grid_function' - for sdir, expression in zip(['x', 'y', 'z'], [self.Bx_expression, self.By_expression, self.Bz_expression]): - expression = pywarpx.my_constants.mangle_expression(expression, self.mangle_dict) - pywarpx.warpx.__setattr__(f'B{sdir}_external_grid_function(x,y,z)', expression) + if ( + self.Ex_expression is not None + or self.Ey_expression is not None + or self.Ez_expression is not None + ): + pywarpx.warpx.E_ext_grid_init_style = "parse_e_ext_grid_function" + for sdir, expression in zip( + ["x", "y", "z"], + [self.Ex_expression, self.Ey_expression, self.Ez_expression], + ): + expression = pywarpx.my_constants.mangle_expression( + expression, self.mangle_dict + ) + pywarpx.warpx.__setattr__( + f"E{sdir}_external_grid_function(x,y,z)", expression + ) + + if ( + self.Bx_expression is not None + or self.By_expression is not None + or self.Bz_expression is not None + ): + pywarpx.warpx.B_ext_grid_init_style = "parse_b_ext_grid_function" + for sdir, expression in zip( + ["x", "y", "z"], + [self.Bx_expression, self.By_expression, self.Bz_expression], + ): + expression = pywarpx.my_constants.mangle_expression( + expression, self.mangle_dict + ) + pywarpx.warpx.__setattr__( + f"B{sdir}_external_grid_function(x,y,z)", expression + ) + class LoadAppliedField(picmistandard.PICMI_LoadAppliedField): def applied_field_initialize_inputs(self): pywarpx.particles.read_fields_from_path = self.read_fields_from_path if self.load_E: - pywarpx.particles.E_ext_particle_init_style = 'read_from_file' + pywarpx.particles.E_ext_particle_init_style = "read_from_file" if self.load_B: - pywarpx.particles.B_ext_particle_init_style = 'read_from_file' + pywarpx.particles.B_ext_particle_init_style = "read_from_file" + class ConstantAppliedField(picmistandard.PICMI_ConstantAppliedField): def applied_field_initialize_inputs(self): # Note that lower and upper_bound are not used by WarpX - if (self.Ex is not None or - self.Ey is not None or - self.Ez is not None): - pywarpx.particles.E_ext_particle_init_style = 'constant' - pywarpx.particles.E_external_particle = [self.Ex or 0., self.Ey or 0., self.Ez or 0.] + if self.Ex is not None or self.Ey is not None or self.Ez is not None: + pywarpx.particles.E_ext_particle_init_style = "constant" + pywarpx.particles.E_external_particle = [ + self.Ex or 0.0, + self.Ey or 0.0, + self.Ez or 0.0, + ] - if (self.Bx is not None or - self.By is not None or - self.Bz is not None): - pywarpx.particles.B_ext_particle_init_style = 'constant' - pywarpx.particles.B_external_particle = [self.Bx or 0., self.By or 0., self.Bz or 0.] + if self.Bx is not None or self.By is not None or self.Bz is not None: + pywarpx.particles.B_ext_particle_init_style = "constant" + pywarpx.particles.B_external_particle = [ + self.Bx or 0.0, + self.By or 0.0, + self.Bz or 0.0, + ] class AnalyticAppliedField(picmistandard.PICMI_AnalyticAppliedField): @@ -1731,21 +2104,43 @@ def applied_field_initialize_inputs(self): # is used multiple times self.mangle_dict = pywarpx.my_constants.add_keywords(self.user_defined_kw) - if (self.Ex_expression is not None or - self.Ey_expression is not None or - self.Ez_expression is not None): - pywarpx.particles.E_ext_particle_init_style = 'parse_e_ext_particle_function' - for sdir, expression in zip(['x', 'y', 'z'], [self.Ex_expression, self.Ey_expression, self.Ez_expression]): - expression = pywarpx.my_constants.mangle_expression(expression, self.mangle_dict) - pywarpx.particles.__setattr__(f'E{sdir}_external_particle_function(x,y,z,t)', expression) + if ( + self.Ex_expression is not None + or self.Ey_expression is not None + or self.Ez_expression is not None + ): + pywarpx.particles.E_ext_particle_init_style = ( + "parse_e_ext_particle_function" + ) + for sdir, expression in zip( + ["x", "y", "z"], + [self.Ex_expression, self.Ey_expression, self.Ez_expression], + ): + expression = pywarpx.my_constants.mangle_expression( + expression, self.mangle_dict + ) + pywarpx.particles.__setattr__( + f"E{sdir}_external_particle_function(x,y,z,t)", expression + ) - if (self.Bx_expression is not None or - self.By_expression is not None or - self.Bz_expression is not None): - pywarpx.particles.B_ext_particle_init_style = 'parse_b_ext_particle_function' - for sdir, expression in zip(['x', 'y', 'z'], [self.Bx_expression, self.By_expression, self.Bz_expression]): - expression = pywarpx.my_constants.mangle_expression(expression, self.mangle_dict) - pywarpx.particles.__setattr__(f'B{sdir}_external_particle_function(x,y,z,t)', expression) + if ( + self.Bx_expression is not None + or self.By_expression is not None + or self.Bz_expression is not None + ): + pywarpx.particles.B_ext_particle_init_style = ( + "parse_b_ext_particle_function" + ) + for sdir, expression in zip( + ["x", "y", "z"], + [self.Bx_expression, self.By_expression, self.Bz_expression], + ): + expression = pywarpx.my_constants.mangle_expression( + expression, self.mangle_dict + ) + pywarpx.particles.__setattr__( + f"B{sdir}_external_particle_function(x,y,z,t)", expression + ) class Mirror(picmistandard.PICMI_Mirror): @@ -1768,13 +2163,20 @@ class FieldIonization(picmistandard.PICMI_FieldIonization): """ WarpX only has ADK ionization model implemented. """ + def interaction_initialize_inputs(self): - assert self.model == 'ADK', 'WarpX only has ADK ionization model implemented' + assert self.model == "ADK", "WarpX only has ADK ionization model implemented" self.ionized_species.species.do_field_ionization = 1 - self.ionized_species.species.physical_element = self.ionized_species.particle_type - self.ionized_species.species.ionization_product_species = self.product_species.name - self.ionized_species.species.ionization_initial_level = self.ionized_species.charge_state - self.ionized_species.species.charge = 'q_e' + self.ionized_species.species.physical_element = ( + self.ionized_species.particle_type + ) + self.ionized_species.species.ionization_product_species = ( + self.product_species.name + ) + self.ionized_species.species.ionization_initial_level = ( + self.ionized_species.charge_state + ) + self.ionized_species.species.charge = "q_e" class CoulombCollisions(picmistandard.base._ClassWithInit): @@ -1798,6 +2200,7 @@ class CoulombCollisions(picmistandard.base._ClassWithInit): ndt: integer, optional The collisions will be applied every "ndt" steps. Must be 1 or larger. """ + def __init__(self, name, species, CoulombLog=None, ndt=None, **kw): self.name = name self.species = species @@ -1808,7 +2211,7 @@ def __init__(self, name, species, CoulombLog=None, ndt=None, **kw): def collision_initialize_inputs(self): collision = pywarpx.Collisions.newcollision(self.name) - collision.type = 'pairwisecoulomb' + collision.type = "pairwisecoulomb" collision.species = [species.name for species in self.species] collision.CoulombLog = self.CoulombLog collision.ndt = self.ndt @@ -1849,9 +2252,18 @@ class MCCCollisions(picmistandard.base._ClassWithInit): The collisions will be applied every "ndt" steps. Must be 1 or larger. """ - def __init__(self, name, species, background_density, - background_temperature, scattering_processes, - background_mass=None, max_background_density=None, ndt=None, **kw): + def __init__( + self, + name, + species, + background_density, + background_temperature, + scattering_processes, + background_mass=None, + max_background_density=None, + ndt=None, + **kw, + ): self.name = name self.species = species self.background_density = background_density @@ -1865,14 +2277,18 @@ def __init__(self, name, species, background_density, def collision_initialize_inputs(self): collision = pywarpx.Collisions.newcollision(self.name) - collision.type = 'background_mcc' + collision.type = "background_mcc" collision.species = self.species.name if isinstance(self.background_density, str): - collision.__setattr__('background_density(x,y,z,t)', self.background_density) + collision.__setattr__( + "background_density(x,y,z,t)", self.background_density + ) else: collision.background_density = self.background_density if isinstance(self.background_temperature, str): - collision.__setattr__('background_temperature(x,y,z,t)', self.background_temperature) + collision.__setattr__( + "background_temperature(x,y,z,t)", self.background_temperature + ) else: collision.background_temperature = self.background_temperature collision.background_mass = self.background_mass @@ -1882,9 +2298,9 @@ def collision_initialize_inputs(self): collision.scattering_processes = self.scattering_processes.keys() for process, kw in self.scattering_processes.items(): for key, val in kw.items(): - if key == 'species': + if key == "species": val = val.name - collision.add_new_attr(process+'_'+key, val) + collision.add_new_attr(process + "_" + key, val) class DSMCCollisions(picmistandard.base._ClassWithInit): @@ -1918,16 +2334,16 @@ def __init__(self, name, species, scattering_processes, ndt=None, **kw): def collision_initialize_inputs(self): collision = pywarpx.Collisions.newcollision(self.name) - collision.type = 'dsmc' + collision.type = "dsmc" collision.species = [species.name for species in self.species] collision.ndt = self.ndt collision.scattering_processes = self.scattering_processes.keys() for process, kw in self.scattering_processes.items(): for key, val in kw.items(): - if key == 'species': + if key == "species": val = val.name - collision.add_new_attr(process+'_'+key, val) + collision.add_new_attr(process + "_" + key, val) class EmbeddedBoundary(picmistandard.base._ClassWithInit): @@ -1966,19 +2382,35 @@ class EmbeddedBoundary(picmistandard.base._ClassWithInit): Parameters used in the analytic expressions should be given as additional keyword arguments. """ - def __init__(self, implicit_function=None, stl_file=None, stl_scale=None, stl_center=None, stl_reverse_normal=False, - potential=None, cover_multiple_cuts=None, **kw): - assert stl_file is None or implicit_function is None, Exception('Only one between implicit_function and ' - 'stl_file can be specified') + def __init__( + self, + implicit_function=None, + stl_file=None, + stl_scale=None, + stl_center=None, + stl_reverse_normal=False, + potential=None, + cover_multiple_cuts=None, + **kw, + ): + assert stl_file is None or implicit_function is None, Exception( + "Only one between implicit_function and " "stl_file can be specified" + ) self.implicit_function = implicit_function self.stl_file = stl_file if stl_file is None: - assert stl_scale is None, Exception('EB can only be scaled only when using an stl file') - assert stl_center is None, Exception('EB can only be translated only when using an stl file') - assert stl_reverse_normal is False, Exception('EB can only be reversed only when using an stl file') + assert stl_scale is None, Exception( + "EB can only be scaled only when using an stl file" + ) + assert stl_center is None, Exception( + "EB can only be translated only when using an stl file" + ) + assert stl_reverse_normal is False, Exception( + "EB can only be reversed only when using an stl file" + ) self.stl_scale = stl_scale self.stl_center = stl_center @@ -1991,22 +2423,26 @@ def __init__(self, implicit_function=None, stl_file=None, stl_scale=None, stl_ce # Handle keyword arguments used in expressions self.user_defined_kw = {} for k in list(kw.keys()): - if (implicit_function is not None and re.search(r'\b%s\b'%k, implicit_function) or - (potential is not None and re.search(r'\b%s\b'%k, potential))): + if ( + implicit_function is not None + and re.search(r"\b%s\b" % k, implicit_function) + or (potential is not None and re.search(r"\b%s\b" % k, potential)) + ): self.user_defined_kw[k] = kw[k] del kw[k] self.handle_init(kw) def embedded_boundary_initialize_inputs(self, solver): - # Add the user defined keywords to my_constants # The keywords are mangled if there is a conflicting variable already # defined in my_constants with the same name but different value. self.mangle_dict = pywarpx.my_constants.add_keywords(self.user_defined_kw) if self.implicit_function is not None: - expression = pywarpx.my_constants.mangle_expression(self.implicit_function, self.mangle_dict) + expression = pywarpx.my_constants.mangle_expression( + self.implicit_function, self.mangle_dict + ) pywarpx.warpx.eb_implicit_function = expression if self.stl_file is not None: @@ -2019,8 +2455,10 @@ def embedded_boundary_initialize_inputs(self, solver): pywarpx.eb2.cover_multiple_cuts = self.cover_multiple_cuts if self.potential is not None: - expression = pywarpx.my_constants.mangle_expression(self.potential, self.mangle_dict) - pywarpx.warpx.__setattr__('eb_potential(x,y,z,t)', expression) + expression = pywarpx.my_constants.mangle_expression( + self.potential, self.mangle_dict + ) + pywarpx.warpx.__setattr__("eb_potential(x,y,z,t)", expression) class PlasmaLens(picmistandard.base._ClassWithInit): @@ -2057,22 +2495,25 @@ class PlasmaLens(picmistandard.base._ClassWithInit): - By = -x*strengths_B """ - def __init__(self, period, starts, lengths, strengths_E=None, strengths_B=None, **kw): + + def __init__( + self, period, starts, lengths, strengths_E=None, strengths_B=None, **kw + ): self.period = period self.starts = starts self.lengths = lengths self.strengths_E = strengths_E self.strengths_B = strengths_B - assert (self.strengths_E is not None) or (self.strengths_B is not None),\ - Exception('One of strengths_E or strengths_B must be supplied') + assert (self.strengths_E is not None) or ( + self.strengths_B is not None + ), Exception("One of strengths_E or strengths_B must be supplied") self.handle_init(kw) def applied_field_initialize_inputs(self): - - pywarpx.particles.E_ext_particle_init_style = 'repeated_plasma_lens' - pywarpx.particles.B_ext_particle_init_style = 'repeated_plasma_lens' + pywarpx.particles.E_ext_particle_init_style = "repeated_plasma_lens" + pywarpx.particles.B_ext_particle_init_style = "repeated_plasma_lens" pywarpx.particles.repeated_plasma_lens_period = self.period pywarpx.particles.repeated_plasma_lens_starts = self.starts pywarpx.particles.repeated_plasma_lens_lengths = self.lengths @@ -2251,49 +2692,62 @@ class Simulation(picmistandard.PICMI_Simulation): extension = pywarpx.libwarpx def init(self, kw): + self.evolve_scheme = kw.pop("warpx_evolve_scheme", None) + self.current_deposition_algo = kw.pop("warpx_current_deposition_algo", None) + self.charge_deposition_algo = kw.pop("warpx_charge_deposition_algo", None) + self.field_gathering_algo = kw.pop("warpx_field_gathering_algo", None) + self.particle_pusher_algo = kw.pop("warpx_particle_pusher_algo", None) + self.use_filter = kw.pop("warpx_use_filter", None) + self.do_multi_J = kw.pop("warpx_do_multi_J", None) + self.do_multi_J_n_depositions = kw.pop("warpx_do_multi_J_n_depositions", None) + self.grid_type = kw.pop("warpx_grid_type", None) + self.do_current_centering = kw.pop("warpx_do_current_centering", None) + self.field_centering_order = kw.pop("warpx_field_centering_order", None) + self.current_centering_order = kw.pop("warpx_current_centering_order", None) + self.serialize_initial_conditions = kw.pop( + "warpx_serialize_initial_conditions", None + ) + self.random_seed = kw.pop("warpx_random_seed", None) + self.do_dynamic_scheduling = kw.pop("warpx_do_dynamic_scheduling", None) + self.load_balance_intervals = kw.pop("warpx_load_balance_intervals", None) + self.load_balance_efficiency_ratio_threshold = kw.pop( + "warpx_load_balance_efficiency_ratio_threshold", None + ) + self.load_balance_with_sfc = kw.pop("warpx_load_balance_with_sfc", None) + self.load_balance_knapsack_factor = kw.pop( + "warpx_load_balance_knapsack_factor", None + ) + self.load_balance_costs_update = kw.pop("warpx_load_balance_costs_update", None) + self.costs_heuristic_particles_wt = kw.pop( + "warpx_costs_heuristic_particles_wt", None + ) + self.costs_heuristic_cells_wt = kw.pop("warpx_costs_heuristic_cells_wt", None) + self.use_fdtd_nci_corr = kw.pop("warpx_use_fdtd_nci_corr", None) + self.amr_check_input = kw.pop("warpx_amr_check_input", None) + self.amr_restart = kw.pop("warpx_amr_restart", None) + self.amrex_the_arena_is_managed = kw.pop( + "warpx_amrex_the_arena_is_managed", None + ) + self.amrex_the_arena_init_size = kw.pop("warpx_amrex_the_arena_init_size", None) + self.amrex_use_gpu_aware_mpi = kw.pop("warpx_amrex_use_gpu_aware_mpi", None) + self.zmax_plasma_to_compute_max_step = kw.pop( + "warpx_zmax_plasma_to_compute_max_step", None + ) + self.compute_max_step_from_btd = kw.pop("warpx_compute_max_step_from_btd", None) + self.sort_intervals = kw.pop("warpx_sort_intervals", None) + self.sort_particles_for_deposition = kw.pop( + "warpx_sort_particles_for_deposition", None + ) + self.sort_idx_type = kw.pop("warpx_sort_idx_type", None) + self.sort_bin_size = kw.pop("warpx_sort_bin_size", None) + self.used_inputs_file = kw.pop("warpx_used_inputs_file", None) + + self.collisions = kw.pop("warpx_collisions", None) + self.embedded_boundary = kw.pop("warpx_embedded_boundary", None) - self.evolve_scheme = kw.pop('warpx_evolve_scheme', None) - self.current_deposition_algo = kw.pop('warpx_current_deposition_algo', None) - self.charge_deposition_algo = kw.pop('warpx_charge_deposition_algo', None) - self.field_gathering_algo = kw.pop('warpx_field_gathering_algo', None) - self.particle_pusher_algo = kw.pop('warpx_particle_pusher_algo', None) - self.use_filter = kw.pop('warpx_use_filter', None) - self.do_multi_J = kw.pop('warpx_do_multi_J', None) - self.do_multi_J_n_depositions = kw.pop('warpx_do_multi_J_n_depositions', None) - self.grid_type = kw.pop('warpx_grid_type', None) - self.do_current_centering = kw.pop('warpx_do_current_centering', None) - self.field_centering_order = kw.pop('warpx_field_centering_order', None) - self.current_centering_order = kw.pop('warpx_current_centering_order', None) - self.serialize_initial_conditions = kw.pop('warpx_serialize_initial_conditions', None) - self.random_seed = kw.pop('warpx_random_seed', None) - self.do_dynamic_scheduling = kw.pop('warpx_do_dynamic_scheduling', None) - self.load_balance_intervals = kw.pop('warpx_load_balance_intervals', None) - self.load_balance_efficiency_ratio_threshold = kw.pop('warpx_load_balance_efficiency_ratio_threshold', None) - self.load_balance_with_sfc = kw.pop('warpx_load_balance_with_sfc', None) - self.load_balance_knapsack_factor = kw.pop('warpx_load_balance_knapsack_factor', None) - self.load_balance_costs_update = kw.pop('warpx_load_balance_costs_update', None) - self.costs_heuristic_particles_wt = kw.pop('warpx_costs_heuristic_particles_wt', None) - self.costs_heuristic_cells_wt = kw.pop('warpx_costs_heuristic_cells_wt', None) - self.use_fdtd_nci_corr = kw.pop('warpx_use_fdtd_nci_corr', None) - self.amr_check_input = kw.pop('warpx_amr_check_input', None) - self.amr_restart = kw.pop('warpx_amr_restart', None) - self.amrex_the_arena_is_managed = kw.pop('warpx_amrex_the_arena_is_managed', None) - self.amrex_the_arena_init_size = kw.pop('warpx_amrex_the_arena_init_size', None) - self.amrex_use_gpu_aware_mpi = kw.pop('warpx_amrex_use_gpu_aware_mpi', None) - self.zmax_plasma_to_compute_max_step = kw.pop('warpx_zmax_plasma_to_compute_max_step', None) - self.compute_max_step_from_btd = kw.pop('warpx_compute_max_step_from_btd', None) - self.sort_intervals = kw.pop('warpx_sort_intervals', None) - self.sort_particles_for_deposition = kw.pop('warpx_sort_particles_for_deposition', None) - self.sort_idx_type = kw.pop('warpx_sort_idx_type', None) - self.sort_bin_size = kw.pop('warpx_sort_bin_size', None) - self.used_inputs_file = kw.pop('warpx_used_inputs_file', None) - - self.collisions = kw.pop('warpx_collisions', None) - self.embedded_boundary = kw.pop('warpx_embedded_boundary', None) - - self.break_signals = kw.pop('warpx_break_signals', None) - self.checkpoint_signals = kw.pop('warpx_checkpoint_signals', None) - self.numprocs = kw.pop('warpx_numprocs', None) + self.break_signals = kw.pop("warpx_break_signals", None) + self.checkpoint_signals = kw.pop("warpx_checkpoint_signals", None) + self.numprocs = kw.pop("warpx_numprocs", None) self.inputs_initialized = False self.warpx_initialized = False @@ -2310,9 +2764,11 @@ def initialize_inputs(self): if self.gamma_boost is not None: pywarpx.warpx.gamma_boost = self.gamma_boost - pywarpx.warpx.boost_direction = 'z' + pywarpx.warpx.boost_direction = "z" - pywarpx.warpx.zmax_plasma_to_compute_max_step = self.zmax_plasma_to_compute_max_step + pywarpx.warpx.zmax_plasma_to_compute_max_step = ( + self.zmax_plasma_to_compute_max_step + ) pywarpx.warpx.compute_max_step_from_btd = self.compute_max_step_from_btd pywarpx.warpx.sort_intervals = self.sort_intervals @@ -2328,7 +2784,9 @@ def initialize_inputs(self): pywarpx.algo.field_gathering = self.field_gathering_algo pywarpx.algo.particle_pusher = self.particle_pusher_algo pywarpx.algo.load_balance_intervals = self.load_balance_intervals - pywarpx.algo.load_balance_efficiency_ratio_threshold = self.load_balance_efficiency_ratio_threshold + pywarpx.algo.load_balance_efficiency_ratio_threshold = ( + self.load_balance_efficiency_ratio_threshold + ) pywarpx.algo.load_balance_with_sfc = self.load_balance_with_sfc pywarpx.algo.load_balance_knapsack_factor = self.load_balance_knapsack_factor pywarpx.algo.load_balance_costs_update = self.load_balance_costs_update @@ -2358,13 +2816,22 @@ def initialize_inputs(self): particle_shape = self.particle_shape for s in self.species: if s.particle_shape is not None: - assert particle_shape is None or particle_shape == s.particle_shape, Exception('WarpX only supports one particle shape for all species') + assert ( + particle_shape is None or particle_shape == s.particle_shape + ), Exception("WarpX only supports one particle shape for all species") # --- If this was set for any species, use that value. particle_shape = s.particle_shape - if particle_shape is not None and (len(self.species) > 0 or len(self.lasers) > 0): + if particle_shape is not None and ( + len(self.species) > 0 or len(self.lasers) > 0 + ): if isinstance(particle_shape, str): - interpolation_order = {'NGP':0, 'linear':1, 'quadratic':2, 'cubic':3}[particle_shape] + interpolation_order = { + "NGP": 0, + "linear": 1, + "quadratic": 2, + "cubic": 3, + }[particle_shape] else: interpolation_order = particle_shape pywarpx.algo.particle_shape = interpolation_order @@ -2387,13 +2854,15 @@ def initialize_inputs(self): pywarpx.warpx.current_centering_noz = self.current_centering_order[-1] for i in range(len(self.species)): - self.species[i].species_initialize_inputs(self.layouts[i], - self.initialize_self_fields[i], - self.injection_plane_positions[i], - self.injection_plane_normal_vectors[i]) + self.species[i].species_initialize_inputs( + self.layouts[i], + self.initialize_self_fields[i], + self.injection_plane_positions[i], + self.injection_plane_normal_vectors[i], + ) for interaction in self.interactions: - assert(isinstance(interaction, FieldIonization)) + assert isinstance(interaction, FieldIonization) interaction.interaction_initialize_inputs() if self.collisions is not None: @@ -2407,7 +2876,9 @@ def initialize_inputs(self): for i in range(len(self.lasers)): self.lasers[i].laser_initialize_inputs() - self.laser_injection_methods[i].laser_antenna_initialize_inputs(self.lasers[i]) + self.laser_injection_methods[i].laser_antenna_initialize_inputs( + self.lasers[i] + ) for applied_field in self.applied_fields: applied_field.applied_field_initialize_inputs() @@ -2434,9 +2905,11 @@ def initialize_warpx(self, mpi_comm=None): self.warpx_initialized = True pywarpx.warpx.init(mpi_comm, max_step=self.max_steps, stop_time=self.max_time) - def write_input_file(self, file_name='inputs'): + def write_input_file(self, file_name="inputs"): self.initialize_inputs() - pywarpx.warpx.write_inputs(file_name, max_step=self.max_steps, stop_time=self.max_time) + pywarpx.warpx.write_inputs( + file_name, max_step=self.max_steps, stop_time=self.max_time + ) def step(self, nsteps=None, mpi_comm=None): self.initialize_inputs() @@ -2458,24 +2931,26 @@ def finalize(self): # Simulation frame diagnostics # ---------------------------- + class WarpXDiagnosticBase(object): """ Base class for all WarpX diagnostic containing functionality shared by all WarpX diagnostic installations. """ + def add_diagnostic(self): # reduced diagnostics go in a different bucket than regular diagnostics if isinstance(self, ReducedDiagnostic): bucket = pywarpx.reduced_diagnostics - name_template = 'reduced_diag' + name_template = "reduced_diag" else: bucket = pywarpx.diagnostics - name_template = 'diag' + name_template = "diag" - name = getattr(self, 'name', None) + name = getattr(self, "name", None) if name is None: - diagnostics_number = (len(bucket._diagnostics_dict) + 1) - self.name = f'{name_template}{diagnostics_number}' + diagnostics_number = len(bucket._diagnostics_dict) + 1 + self.name = f"{name_template}{diagnostics_number}" try: self.diagnostic = bucket._diagnostics_dict[self.name] @@ -2487,8 +2962,8 @@ def add_diagnostic(self): def set_write_dir(self): if self.write_dir is not None or self.file_prefix is not None: - write_dir = (self.write_dir or 'diags') - file_prefix = (self.file_prefix or self.name) + write_dir = self.write_dir or "diags" + file_prefix = self.file_prefix or self.name self.diagnostic.file_prefix = os.path.join(write_dir, file_prefix) @@ -2515,6 +2990,7 @@ class ParticleFieldDiagnostic: If not specified, all particles will be included. The function arguments are the same as the `func` above. """ + name: str func: str do_average: int = 1 @@ -2565,27 +3041,26 @@ class FieldDiagnostic(picmistandard.PICMI_FieldDiagnostic, WarpXDiagnosticBase): be calculated separately for each specified species. If not passed, default is all of the available particle species. """ - def init(self, kw): - self.plot_raw_fields = kw.pop('warpx_plot_raw_fields', None) - self.plot_raw_fields_guards = kw.pop('warpx_plot_raw_fields_guards', None) - self.plot_finepatch = kw.pop('warpx_plot_finepatch', None) - self.plot_crsepatch = kw.pop('warpx_plot_crsepatch', None) - self.format = kw.pop('warpx_format', 'plotfile') - self.openpmd_backend = kw.pop('warpx_openpmd_backend', None) - self.openpmd_encoding = kw.pop('warpx_openpmd_encoding', None) - self.file_prefix = kw.pop('warpx_file_prefix', None) - self.file_min_digits = kw.pop('warpx_file_min_digits', None) - self.dump_rz_modes = kw.pop('warpx_dump_rz_modes', None) - self.dump_last_timestep = kw.pop('warpx_dump_last_timestep', None) - self.particle_fields_to_plot = kw.pop('warpx_particle_fields_to_plot', []) - self.particle_fields_species = kw.pop('warpx_particle_fields_species', None) + def init(self, kw): + self.plot_raw_fields = kw.pop("warpx_plot_raw_fields", None) + self.plot_raw_fields_guards = kw.pop("warpx_plot_raw_fields_guards", None) + self.plot_finepatch = kw.pop("warpx_plot_finepatch", None) + self.plot_crsepatch = kw.pop("warpx_plot_crsepatch", None) + self.format = kw.pop("warpx_format", "plotfile") + self.openpmd_backend = kw.pop("warpx_openpmd_backend", None) + self.openpmd_encoding = kw.pop("warpx_openpmd_encoding", None) + self.file_prefix = kw.pop("warpx_file_prefix", None) + self.file_min_digits = kw.pop("warpx_file_min_digits", None) + self.dump_rz_modes = kw.pop("warpx_dump_rz_modes", None) + self.dump_last_timestep = kw.pop("warpx_dump_last_timestep", None) + self.particle_fields_to_plot = kw.pop("warpx_particle_fields_to_plot", []) + self.particle_fields_species = kw.pop("warpx_particle_fields_species", None) def diagnostic_initialize_inputs(self): - self.add_diagnostic() - self.diagnostic.diag_type = 'Full' + self.diagnostic.diag_type = "Full" self.diagnostic.format = self.format self.diagnostic.openpmd_backend = self.openpmd_backend self.diagnostic.openpmd_encoding = self.openpmd_encoding @@ -2596,38 +3071,48 @@ def diagnostic_initialize_inputs(self): self.diagnostic.diag_lo = self.lower_bound self.diagnostic.diag_hi = self.upper_bound if self.number_of_cells is not None: - self.diagnostic.coarsening_ratio = (np.array(self.grid.number_of_cells)/np.array(self.number_of_cells)).astype(int) + self.diagnostic.coarsening_ratio = ( + np.array(self.grid.number_of_cells) / np.array(self.number_of_cells) + ).astype(int) # --- Use a set to ensure that fields don't get repeated. fields_to_plot = set() - if pywarpx.geometry.dims == 'RZ': - E_fields_list = ['Er', 'Et', 'Ez'] - B_fields_list = ['Br', 'Bt', 'Bz'] - J_fields_list = ['Jr', 'Jt', 'Jz'] - J_displacement_fields_list = ['Jr_displacement', 'Jt_displacement', 'Jz_displacement'] - A_fields_list = ['Ar', 'At', 'Az'] + if pywarpx.geometry.dims == "RZ": + E_fields_list = ["Er", "Et", "Ez"] + B_fields_list = ["Br", "Bt", "Bz"] + J_fields_list = ["Jr", "Jt", "Jz"] + J_displacement_fields_list = [ + "Jr_displacement", + "Jt_displacement", + "Jz_displacement", + ] + A_fields_list = ["Ar", "At", "Az"] else: - E_fields_list = ['Ex', 'Ey', 'Ez'] - B_fields_list = ['Bx', 'By', 'Bz'] - J_fields_list = ['Jx', 'Jy', 'Jz'] - J_displacement_fields_list = ['Jx_displacement', 'Jy_displacement', 'Jz_displacement'] - A_fields_list = ['Ax', 'Ay', 'Az'] + E_fields_list = ["Ex", "Ey", "Ez"] + B_fields_list = ["Bx", "By", "Bz"] + J_fields_list = ["Jx", "Jy", "Jz"] + J_displacement_fields_list = [ + "Jx_displacement", + "Jy_displacement", + "Jz_displacement", + ] + A_fields_list = ["Ax", "Ay", "Az"] if self.data_list is not None: for dataname in self.data_list: - if dataname == 'E': + if dataname == "E": for field_name in E_fields_list: fields_to_plot.add(field_name) - elif dataname == 'B': + elif dataname == "B": for field_name in B_fields_list: fields_to_plot.add(field_name) - elif dataname == 'J': + elif dataname == "J": for field_name in J_fields_list: fields_to_plot.add(field_name.lower()) - elif dataname == 'J_displacement': + elif dataname == "J_displacement": for field_name in J_displacement_fields_list: fields_to_plot.add(field_name.lower()) - elif dataname == 'A': + elif dataname == "A": for field_name in A_fields_list: fields_to_plot.add(field_name) elif dataname in E_fields_list: @@ -2636,52 +3121,61 @@ def diagnostic_initialize_inputs(self): fields_to_plot.add(dataname) elif dataname in A_fields_list: fields_to_plot.add(dataname) - elif dataname in ['rho', 'phi', 'F', 'G', 'divE', 'divB', 'proc_number', 'part_per_cell']: + elif dataname in [ + "rho", + "phi", + "F", + "G", + "divE", + "divB", + "proc_number", + "part_per_cell", + ]: fields_to_plot.add(dataname) elif dataname in J_fields_list: fields_to_plot.add(dataname.lower()) elif dataname in J_displacement_fields_list: fields_to_plot.add(dataname.lower()) - elif dataname.startswith('rho_'): + elif dataname.startswith("rho_"): # Adds rho_species diagnostic fields_to_plot.add(dataname) - elif dataname.startswith('T_'): + elif dataname.startswith("T_"): # Adds T_species diagnostic fields_to_plot.add(dataname) - elif dataname == 'dive': - fields_to_plot.add('divE') - elif dataname == 'divb': - fields_to_plot.add('divB') - elif dataname == 'raw_fields': + elif dataname == "dive": + fields_to_plot.add("divE") + elif dataname == "divb": + fields_to_plot.add("divB") + elif dataname == "raw_fields": self.plot_raw_fields = 1 - elif dataname == 'raw_fields_guards': + elif dataname == "raw_fields_guards": self.plot_raw_fields_guards = 1 - elif dataname == 'finepatch': + elif dataname == "finepatch": self.plot_finepatch = 1 - elif dataname == 'crsepatch': + elif dataname == "crsepatch": self.plot_crsepatch = 1 - elif dataname == 'none': - fields_to_plot = set(('none',)) + elif dataname == "none": + fields_to_plot = set(("none",)) # --- Convert the set to a sorted list so that the order # --- is the same on all processors. fields_to_plot = list(fields_to_plot) fields_to_plot.sort() - self.diagnostic.set_or_replace_attr('fields_to_plot', fields_to_plot) + self.diagnostic.set_or_replace_attr("fields_to_plot", fields_to_plot) particle_fields_to_plot_names = list() for pfd in self.particle_fields_to_plot: if pfd.name in particle_fields_to_plot_names: - raise Exception('A particle fields name can not be repeated.') + raise Exception("A particle fields name can not be repeated.") particle_fields_to_plot_names.append(pfd.name) self.diagnostic.__setattr__( - f'particle_fields.{pfd.name}(x,y,z,ux,uy,uz)', pfd.func + f"particle_fields.{pfd.name}(x,y,z,ux,uy,uz)", pfd.func ) self.diagnostic.__setattr__( - f'particle_fields.{pfd.name}.do_average', pfd.do_average + f"particle_fields.{pfd.name}.do_average", pfd.do_average ) self.diagnostic.__setattr__( - f'particle_fields.{pfd.name}.filter(x,y,z,ux,uy,uz)', pfd.filter + f"particle_fields.{pfd.name}.filter(x,y,z,ux,uy,uz)", pfd.filter ) # --- Convert to a sorted list so that the order @@ -2694,7 +3188,7 @@ def diagnostic_initialize_inputs(self): self.diagnostic.plot_raw_fields_guards = self.plot_raw_fields_guards self.diagnostic.plot_finepatch = self.plot_finepatch self.diagnostic.plot_crsepatch = self.plot_crsepatch - if 'write_species' not in self.diagnostic.argvattrs: + if "write_species" not in self.diagnostic.argvattrs: self.diagnostic.write_species = False self.set_write_dir() @@ -2718,26 +3212,24 @@ class Checkpoint(picmistandard.base._ClassWithInit, WarpXDiagnosticBase): directory name. """ - def __init__(self, period = 1, write_dir = None, name = None, **kw): - + def __init__(self, period=1, write_dir=None, name=None, **kw): self.period = period self.write_dir = write_dir - self.file_prefix = kw.pop('warpx_file_prefix', None) - self.file_min_digits = kw.pop('warpx_file_min_digits', None) + self.file_prefix = kw.pop("warpx_file_prefix", None) + self.file_min_digits = kw.pop("warpx_file_min_digits", None) self.name = name if self.name is None: - self.name = 'chkpoint' + self.name = "chkpoint" self.handle_init(kw) def diagnostic_initialize_inputs(self): - self.add_diagnostic() self.diagnostic.intervals = self.period - self.diagnostic.diag_type = 'Full' - self.diagnostic.format = 'checkpoint' + self.diagnostic.diag_type = "Full" + self.diagnostic.format = "checkpoint" self.diagnostic.file_min_digits = self.file_min_digits self.set_write_dir() @@ -2784,43 +3276,44 @@ class ParticleDiagnostic(picmistandard.PICMI_ParticleDiagnostic, WarpXDiagnostic warpx_plot_filter_function: string, optional Analytic expression to down select the particles to in the diagnostic """ - def init(self, kw): - self.format = kw.pop('warpx_format', 'plotfile') - self.openpmd_backend = kw.pop('warpx_openpmd_backend', None) - self.openpmd_encoding = kw.pop('warpx_openpmd_encoding', None) - self.file_prefix = kw.pop('warpx_file_prefix', None) - self.file_min_digits = kw.pop('warpx_file_min_digits', None) - self.random_fraction = kw.pop('warpx_random_fraction', None) - self.uniform_stride = kw.pop('warpx_uniform_stride', None) - self.plot_filter_function = kw.pop('warpx_plot_filter_function', None) - self.dump_last_timestep = kw.pop('warpx_dump_last_timestep', None) + def init(self, kw): + self.format = kw.pop("warpx_format", "plotfile") + self.openpmd_backend = kw.pop("warpx_openpmd_backend", None) + self.openpmd_encoding = kw.pop("warpx_openpmd_encoding", None) + self.file_prefix = kw.pop("warpx_file_prefix", None) + self.file_min_digits = kw.pop("warpx_file_min_digits", None) + self.random_fraction = kw.pop("warpx_random_fraction", None) + self.uniform_stride = kw.pop("warpx_uniform_stride", None) + self.plot_filter_function = kw.pop("warpx_plot_filter_function", None) + self.dump_last_timestep = kw.pop("warpx_dump_last_timestep", None) self.user_defined_kw = {} if self.plot_filter_function is not None: # This allows variables to be used in the plot_filter_function, but # in order not to break other codes, the variables must begin with "warpx_" for k in list(kw.keys()): - if k.startswith('warpx_') and re.search(r'\b%s\b'%k, self.plot_filter_function): + if k.startswith("warpx_") and re.search( + r"\b%s\b" % k, self.plot_filter_function + ): self.user_defined_kw[k] = kw[k] del kw[k] self.mangle_dict = None def diagnostic_initialize_inputs(self): - self.add_diagnostic() - self.diagnostic.diag_type = 'Full' + self.diagnostic.diag_type = "Full" self.diagnostic.format = self.format self.diagnostic.openpmd_backend = self.openpmd_backend self.diagnostic.openpmd_encoding = self.openpmd_encoding self.diagnostic.file_min_digits = self.file_min_digits self.diagnostic.dump_last_timestep = self.dump_last_timestep self.diagnostic.intervals = self.period - self.diagnostic.set_or_replace_attr('write_species', True) - if 'fields_to_plot' not in self.diagnostic.argvattrs: - self.diagnostic.fields_to_plot = 'none' + self.diagnostic.set_or_replace_attr("write_species", True) + if "fields_to_plot" not in self.diagnostic.argvattrs: + self.diagnostic.fields_to_plot = "none" self.set_write_dir() # --- Use a set to ensure that fields don't get repeated. @@ -2828,39 +3321,59 @@ def diagnostic_initialize_inputs(self): if self.data_list is not None: for dataname in self.data_list: - if dataname == 'position': - if pywarpx.geometry.dims != '1': # because then it's WARPX_DIM_1D_Z - variables.add('x') - if pywarpx.geometry.dims == '3': - variables.add('y') - variables.add('z') - if pywarpx.geometry.dims == 'RZ': - variables.add('theta') - elif dataname == 'momentum': - variables.add('ux') - variables.add('uy') - variables.add('uz') - elif dataname == 'weighting': - variables.add('w') - elif dataname == 'fields': - variables.add('Ex') - variables.add('Ey') - variables.add('Ez') - variables.add('Bx') - variables.add('By') - variables.add('Bz') - elif dataname in ['x', 'y', 'z', 'theta', 'ux', 'uy', 'uz', 'Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz', 'Er', 'Et', 'Br', 'Bt']: - if pywarpx.geometry.dims == '1' and (dataname == 'x' or dataname == 'y'): + if dataname == "position": + if pywarpx.geometry.dims != "1": # because then it's WARPX_DIM_1D_Z + variables.add("x") + if pywarpx.geometry.dims == "3": + variables.add("y") + variables.add("z") + if pywarpx.geometry.dims == "RZ": + variables.add("theta") + elif dataname == "momentum": + variables.add("ux") + variables.add("uy") + variables.add("uz") + elif dataname == "weighting": + variables.add("w") + elif dataname == "fields": + variables.add("Ex") + variables.add("Ey") + variables.add("Ez") + variables.add("Bx") + variables.add("By") + variables.add("Bz") + elif dataname in [ + "x", + "y", + "z", + "theta", + "ux", + "uy", + "uz", + "Ex", + "Ey", + "Ez", + "Bx", + "By", + "Bz", + "Er", + "Et", + "Br", + "Bt", + ]: + if pywarpx.geometry.dims == "1" and ( + dataname == "x" or dataname == "y" + ): raise RuntimeError( f"The attribute {dataname} is not available in mode WARPX_DIM_1D_Z" f"chosen by dim={pywarpx.geometry.dims} in pywarpx." ) - elif pywarpx.geometry.dims != '3' and dataname == 'y': + elif pywarpx.geometry.dims != "3" and dataname == "y": raise RuntimeError( f"The attribute {dataname} is not available outside of mode WARPX_DIM_3D" f"The chosen value was dim={pywarpx.geometry.dims} in pywarpx." ) - elif pywarpx.geometry.dims != 'RZ' and dataname == 'theta': + elif pywarpx.geometry.dims != "RZ" and dataname == "theta": raise RuntimeError( f"The attribute {dataname} is not available outside of mode WARPX_DIM_RZ." f"The chosen value was dim={pywarpx.geometry.dims} in pywarpx." @@ -2906,21 +3419,27 @@ def diagnostic_initialize_inputs(self): self.mangle_dict = pywarpx.my_constants.add_keywords(self.user_defined_kw) for name in species_names: - diag = pywarpx.Bucket.Bucket(self.name + '.' + name, - variables = variables, - random_fraction = random_fraction.get(name, random_fraction_default), - uniform_stride = uniform_stride.get(name, uniform_stride_default)) - expression = pywarpx.my_constants.mangle_expression(self.plot_filter_function, self.mangle_dict) - diag.__setattr__('plot_filter_function(t,x,y,z,ux,uy,uz)', expression) + diag = pywarpx.Bucket.Bucket( + self.name + "." + name, + variables=variables, + random_fraction=random_fraction.get(name, random_fraction_default), + uniform_stride=uniform_stride.get(name, uniform_stride_default), + ) + expression = pywarpx.my_constants.mangle_expression( + self.plot_filter_function, self.mangle_dict + ) + diag.__setattr__("plot_filter_function(t,x,y,z,ux,uy,uz)", expression) self.diagnostic._species_dict[name] = diag + # ---------------------------- # Lab frame diagnostics # ---------------------------- -class LabFrameFieldDiagnostic(picmistandard.PICMI_LabFrameFieldDiagnostic, - WarpXDiagnosticBase): +class LabFrameFieldDiagnostic( + picmistandard.PICMI_LabFrameFieldDiagnostic, WarpXDiagnosticBase +): """ See `Input Parameters `__ for more information. @@ -2957,24 +3476,24 @@ class LabFrameFieldDiagnostic(picmistandard.PICMI_LabFrameFieldDiagnostic, warpx_upper_bound: vector of floats, optional Passed to .upper_bound """ + def init(self, kw): """The user is using the new BTD""" - self.format = kw.pop('warpx_format', None) - self.openpmd_backend = kw.pop('warpx_openpmd_backend', None) - self.openpmd_encoding = kw.pop('warpx_openpmd_encoding', None) - self.file_prefix = kw.pop('warpx_file_prefix', None) - self.intervals = kw.pop('warpx_intervals', None) - self.file_min_digits = kw.pop('warpx_file_min_digits', None) - self.buffer_size = kw.pop('warpx_buffer_size', None) - self.lower_bound = kw.pop('warpx_lower_bound', None) - self.upper_bound = kw.pop('warpx_upper_bound', None) + self.format = kw.pop("warpx_format", None) + self.openpmd_backend = kw.pop("warpx_openpmd_backend", None) + self.openpmd_encoding = kw.pop("warpx_openpmd_encoding", None) + self.file_prefix = kw.pop("warpx_file_prefix", None) + self.intervals = kw.pop("warpx_intervals", None) + self.file_min_digits = kw.pop("warpx_file_min_digits", None) + self.buffer_size = kw.pop("warpx_buffer_size", None) + self.lower_bound = kw.pop("warpx_lower_bound", None) + self.upper_bound = kw.pop("warpx_upper_bound", None) def diagnostic_initialize_inputs(self): - self.add_diagnostic() - self.diagnostic.diag_type = 'BackTransformed' + self.diagnostic.diag_type = "BackTransformed" self.diagnostic.format = self.format self.diagnostic.openpmd_backend = self.openpmd_backend self.diagnostic.openpmd_encoding = self.openpmd_encoding @@ -2995,23 +3514,23 @@ def diagnostic_initialize_inputs(self): # --- Use a set to ensure that fields don't get repeated. fields_to_plot = set() - if pywarpx.geometry.dims == 'RZ': - E_fields_list = ['Er', 'Et', 'Ez'] - B_fields_list = ['Br', 'Bt', 'Bz'] - J_fields_list = ['Jr', 'Jt', 'Jz'] + if pywarpx.geometry.dims == "RZ": + E_fields_list = ["Er", "Et", "Ez"] + B_fields_list = ["Br", "Bt", "Bz"] + J_fields_list = ["Jr", "Jt", "Jz"] else: - E_fields_list = ['Ex', 'Ey', 'Ez'] - B_fields_list = ['Bx', 'By', 'Bz'] - J_fields_list = ['Jx', 'Jy', 'Jz'] + E_fields_list = ["Ex", "Ey", "Ez"] + B_fields_list = ["Bx", "By", "Bz"] + J_fields_list = ["Jx", "Jy", "Jz"] if self.data_list is not None: for dataname in self.data_list: - if dataname == 'E': + if dataname == "E": for field_name in E_fields_list: fields_to_plot.add(field_name) - elif dataname == 'B': + elif dataname == "B": for field_name in B_fields_list: fields_to_plot.add(field_name) - elif dataname == 'J': + elif dataname == "J": for field_name in J_fields_list: fields_to_plot.add(field_name.lower()) elif dataname in E_fields_list: @@ -3020,7 +3539,7 @@ def diagnostic_initialize_inputs(self): fields_to_plot.add(dataname) elif dataname in J_fields_list: fields_to_plot.add(dataname.lower()) - elif dataname.startswith('rho_'): + elif dataname.startswith("rho_"): # Adds rho_species diagnostic fields_to_plot.add(dataname) @@ -3028,15 +3547,16 @@ def diagnostic_initialize_inputs(self): # --- is the same on all processors. fields_to_plot = list(fields_to_plot) fields_to_plot.sort() - self.diagnostic.set_or_replace_attr('fields_to_plot', fields_to_plot) + self.diagnostic.set_or_replace_attr("fields_to_plot", fields_to_plot) - if 'write_species' not in self.diagnostic.argvattrs: + if "write_species" not in self.diagnostic.argvattrs: self.diagnostic.write_species = False self.set_write_dir() -class LabFrameParticleDiagnostic(picmistandard.PICMI_LabFrameParticleDiagnostic, - WarpXDiagnosticBase): +class LabFrameParticleDiagnostic( + picmistandard.PICMI_LabFrameParticleDiagnostic, WarpXDiagnosticBase +): """ See `Input Parameters `__ for more information. @@ -3067,20 +3587,20 @@ class LabFrameParticleDiagnostic(picmistandard.PICMI_LabFrameParticleDiagnostic, warpx_buffer_size: integer, optional Passed to .buffer_size """ + def init(self, kw): - self.format = kw.pop('warpx_format', None) - self.openpmd_backend = kw.pop('warpx_openpmd_backend', None) - self.openpmd_encoding = kw.pop('warpx_openpmd_encoding', None) - self.file_prefix = kw.pop('warpx_file_prefix', None) - self.intervals = kw.pop('warpx_intervals', None) - self.file_min_digits = kw.pop('warpx_file_min_digits', None) - self.buffer_size = kw.pop('warpx_buffer_size', None) + self.format = kw.pop("warpx_format", None) + self.openpmd_backend = kw.pop("warpx_openpmd_backend", None) + self.openpmd_encoding = kw.pop("warpx_openpmd_encoding", None) + self.file_prefix = kw.pop("warpx_file_prefix", None) + self.intervals = kw.pop("warpx_intervals", None) + self.file_min_digits = kw.pop("warpx_file_min_digits", None) + self.buffer_size = kw.pop("warpx_buffer_size", None) def diagnostic_initialize_inputs(self): - self.add_diagnostic() - self.diagnostic.diag_type = 'BackTransformed' + self.diagnostic.diag_type = "BackTransformed" self.diagnostic.format = self.format self.diagnostic.openpmd_backend = self.openpmd_backend self.diagnostic.openpmd_encoding = self.openpmd_encoding @@ -3098,9 +3618,9 @@ def diagnostic_initialize_inputs(self): self.diagnostic.do_back_transformed_fields = False - self.diagnostic.set_or_replace_attr('write_species', True) - if 'fields_to_plot' not in self.diagnostic.argvattrs: - self.diagnostic.fields_to_plot = 'none' + self.diagnostic.set_or_replace_attr("write_species", True) + if "fields_to_plot" not in self.diagnostic.argvattrs: + self.diagnostic.fields_to_plot = "none" self.set_write_dir() @@ -3109,39 +3629,59 @@ def diagnostic_initialize_inputs(self): if self.data_list is not None: for dataname in self.data_list: - if dataname == 'position': - if pywarpx.geometry.dims != '1': # because then it's WARPX_DIM_1D_Z - variables.add('x') - if pywarpx.geometry.dims == '3': - variables.add('y') - variables.add('z') - if pywarpx.geometry.dims == 'RZ': - variables.add('theta') - elif dataname == 'momentum': - variables.add('ux') - variables.add('uy') - variables.add('uz') - elif dataname == 'weighting': - variables.add('w') - elif dataname == 'fields': - variables.add('Ex') - variables.add('Ey') - variables.add('Ez') - variables.add('Bx') - variables.add('By') - variables.add('Bz') - elif dataname in ['x', 'y', 'z', 'theta', 'ux', 'uy', 'uz', 'Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz', 'Er', 'Et', 'Br', 'Bt']: - if pywarpx.geometry.dims == '1' and (dataname == 'x' or dataname == 'y'): + if dataname == "position": + if pywarpx.geometry.dims != "1": # because then it's WARPX_DIM_1D_Z + variables.add("x") + if pywarpx.geometry.dims == "3": + variables.add("y") + variables.add("z") + if pywarpx.geometry.dims == "RZ": + variables.add("theta") + elif dataname == "momentum": + variables.add("ux") + variables.add("uy") + variables.add("uz") + elif dataname == "weighting": + variables.add("w") + elif dataname == "fields": + variables.add("Ex") + variables.add("Ey") + variables.add("Ez") + variables.add("Bx") + variables.add("By") + variables.add("Bz") + elif dataname in [ + "x", + "y", + "z", + "theta", + "ux", + "uy", + "uz", + "Ex", + "Ey", + "Ez", + "Bx", + "By", + "Bz", + "Er", + "Et", + "Br", + "Bt", + ]: + if pywarpx.geometry.dims == "1" and ( + dataname == "x" or dataname == "y" + ): raise RuntimeError( f"The attribute {dataname} is not available in mode WARPX_DIM_1D_Z" f"chosen by dim={pywarpx.geometry.dims} in pywarpx." ) - elif pywarpx.geometry.dims != '3' and dataname == 'y': + elif pywarpx.geometry.dims != "3" and dataname == "y": raise RuntimeError( f"The attribute {dataname} is not available outside of mode WARPX_DIM_3D" f"The chosen value was dim={pywarpx.geometry.dims} in pywarpx." ) - elif pywarpx.geometry.dims != 'RZ' and dataname == 'theta': + elif pywarpx.geometry.dims != "RZ" and dataname == "theta": raise RuntimeError( f"The attribute {dataname} is not available outside of mode WARPX_DIM_RZ." f"The chosen value was dim={pywarpx.geometry.dims} in pywarpx." @@ -3163,8 +3703,7 @@ def diagnostic_initialize_inputs(self): species_names = [self.species.name] for name in species_names: - diag = pywarpx.Bucket.Bucket(self.name + '.' + name, - variables = variables) + diag = pywarpx.Bucket.Bucket(self.name + "." + name, variables=variables) self.diagnostic._species_dict[name] = diag @@ -3260,9 +3799,16 @@ class ReducedDiagnostic(picmistandard.base._ClassWithInit, WarpXDiagnosticBase): For diagnostic type 'FieldProbe', the vector specifying up in the 'Plane' """ - def __init__(self, diag_type, name=None, period=1, path=None, - extension=None, separator=None, **kw): - + def __init__( + self, + diag_type, + name=None, + period=1, + path=None, + extension=None, + separator=None, + **kw, + ): self.name = name self.type = diag_type self.intervals = period @@ -3277,21 +3823,29 @@ def __init__(self, diag_type, name=None, period=1, path=None, # The simple diagnostics do not require any additional arguments self._simple_reduced_diagnostics = [ - 'ParticleEnergy', 'ParticleMomentum', 'FieldEnergy', - 'FieldMomentum', 'FieldMaximum', 'RhoMaximum', 'ParticleNumber', - 'LoadBalanceCosts', 'LoadBalanceEfficiency' + "ParticleEnergy", + "ParticleMomentum", + "FieldEnergy", + "FieldMomentum", + "FieldMaximum", + "RhoMaximum", + "ParticleNumber", + "LoadBalanceCosts", + "LoadBalanceEfficiency", ] # The species diagnostics require a species to be provided self._species_reduced_diagnostics = [ - 'BeamRelevant', 'ParticleHistogram', 'ParticleExtrema' + "BeamRelevant", + "ParticleHistogram", + "ParticleExtrema", ] if self.type in self._simple_reduced_diagnostics: pass elif self.type in self._species_reduced_diagnostics: - species = kw.pop('species') + species = kw.pop("species") self.species = species.name - if self.type == 'ParticleHistogram': + if self.type == "ParticleHistogram": kw = self._handle_particle_histogram(**kw) elif self.type == "FieldProbe": kw = self._handle_field_probe(**kw) @@ -3301,8 +3855,7 @@ def __init__(self, diag_type, name=None, period=1, path=None, kw = self._handle_charge_on_eb(**kw) else: raise RuntimeError( - f"{self.type} reduced diagnostic is not yet supported " - "in pywarpx." + f"{self.type} reduced diagnostic is not yet supported " "in pywarpx." ) self.handle_init(kw) @@ -3318,15 +3871,15 @@ def _handle_field_probe(self, **kw): self.integrate = kw.pop("integrate", None) self.do_moving_window_FP = kw.pop("do_moving_window_FP", None) - if self.probe_geometry.lower() != 'point': + if self.probe_geometry.lower() != "point": self.resolution = kw.pop("resolution") - if self.probe_geometry.lower() == 'line': + if self.probe_geometry.lower() == "line": self.x1_probe = kw.pop("x1_probe", None) self.y1_probe = kw.pop("y1_probe", None) self.z1_probe = kw.pop("z1_probe") - if self.probe_geometry.lower() == 'plane': + if self.probe_geometry.lower() == "plane": self.detector_radius = kw.pop("detector_radius") self.target_normal_x = kw.pop("target_normal_x", None) @@ -3344,9 +3897,15 @@ def _handle_particle_histogram(self, **kw): self.bin_max = kw.pop("bin_max") self.bin_min = kw.pop("bin_min") self.normalization = kw.pop("normalization", None) - if self.normalization not in [None, "unity_particle_weight", "max_to_unity", "area_to_unity"]: + if self.normalization not in [ + None, + "unity_particle_weight", + "max_to_unity", + "area_to_unity", + ]: raise AttributeError( - "The ParticleHistogram normalization must be one of 'unity_particle_weight', 'max_to_unity', or 'area_to_unity'") + "The ParticleHistogram normalization must be one of 'unity_particle_weight', 'max_to_unity', or 'area_to_unity'" + ) histogram_function = kw.pop("histogram_function") filter_function = kw.pop("filter_function", None) @@ -3356,8 +3915,10 @@ def _handle_particle_histogram(self, **kw): # Check the reduced function expressions for constants for k in list(kw.keys()): - if (re.search(r'\b%s\b'%k, histogram_function) or - (filter_function is not None and re.search(r'\b%s\b'%k, filter_function))): + if re.search(r"\b%s\b" % k, histogram_function) or ( + filter_function is not None + and re.search(r"\b%s\b" % k, filter_function) + ): self.user_defined_kw[k] = kw[k] del kw[k] @@ -3367,11 +3928,13 @@ def _handle_field_reduction(self, **kw): self.reduction_type = kw.pop("reduction_type") reduced_function = kw.pop("reduced_function") - self.__setattr__("reduced_function(x,y,z,Ex,Ey,Ez,Bx,By,Bz,jx,jy,jz)", reduced_function) + self.__setattr__( + "reduced_function(x,y,z,Ex,Ey,Ez,Bx,By,Bz,jx,jy,jz)", reduced_function + ) # Check the reduced function expression for constants for k in list(kw.keys()): - if re.search(r'\b%s\b'%k, reduced_function): + if re.search(r"\b%s\b" % k, reduced_function): self.user_defined_kw[k] = kw[k] del kw[k] @@ -3384,23 +3947,24 @@ def _handle_charge_on_eb(self, **kw): # Check the reduced function expression for constants for k in list(kw.keys()): - if re.search(r'\b%s\b'%k, weighting_function): + if re.search(r"\b%s\b" % k, weighting_function): self.user_defined_kw[k] = kw[k] del kw[k] return kw def diagnostic_initialize_inputs(self): - self.add_diagnostic() self.mangle_dict = pywarpx.my_constants.add_keywords(self.user_defined_kw) for key, value in self.__dict__.items(): - if not key.startswith('_') and key not in ['name', 'diagnostic']: + if not key.startswith("_") and key not in ["name", "diagnostic"]: if key.endswith(")"): # Analytic expressions require processing to deal with constants - expression = pywarpx.my_constants.mangle_expression(value, self.mangle_dict) + expression = pywarpx.my_constants.mangle_expression( + value, self.mangle_dict + ) self.diagnostic.__setattr__(key, expression) else: self.diagnostic.__setattr__(key, value) diff --git a/Python/setup.py b/Python/setup.py index ed0d4ce29e0..7768813a244 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -19,8 +19,18 @@ from setuptools import setup argparser = argparse.ArgumentParser(add_help=False) -argparser.add_argument('--with-libwarpx', type=str, default=None, help='Install libwarpx with the given value as DIM. This option is only used by the GNU makefile build system.') -argparser.add_argument('--with-lib-dir', type=str, default=None, help='Install with all libwarpx* binaries found in a directory.') +argparser.add_argument( + "--with-libwarpx", + type=str, + default=None, + help="Install libwarpx with the given value as DIM. This option is only used by the GNU makefile build system.", +) +argparser.add_argument( + "--with-lib-dir", + type=str, + default=None, + help="Install with all libwarpx* binaries found in a directory.", +) args, unknown = argparser.parse_known_args() sys.argv = [sys.argv[0]] + unknown @@ -28,38 +38,39 @@ # Allow to control options via environment vars. # Work-around for https://github.com/pypa/setuptools/issues/1712 -PYWARPX_LIB_DIR = os.environ.get('PYWARPX_LIB_DIR') +PYWARPX_LIB_DIR = os.environ.get("PYWARPX_LIB_DIR") if args.with_libwarpx: # GNUmake if args.with_libwarpx not in allowed_dims: print("WARNING: '%s' is not an allowed WarpX DIM" % args.with_libwarpx) - package_data = {'pywarpx' : ['libwarpx.%s.so' % args.with_libwarpx]} + package_data = {"pywarpx": ["libwarpx.%s.so" % args.with_libwarpx]} data_files = [] elif args.with_lib_dir or PYWARPX_LIB_DIR: # CMake and Package Managers - package_data = {'pywarpx' : []} + package_data = {"pywarpx": []} lib_dir = args.with_lib_dir if args.with_lib_dir else PYWARPX_LIB_DIR my_path = os.path.dirname(os.path.realpath(__file__)) for dim in allowed_dims: - lib_name = 'libwarpx.%s.so' % dim + lib_name = "libwarpx.%s.so" % dim lib_path = os.path.join(lib_dir, lib_name) link_name = os.path.join(my_path, "pywarpx", lib_name) if os.path.isfile(link_name): os.remove(link_name) if os.path.isfile(lib_path) and os.access(lib_path, os.R_OK): os.symlink(lib_path, link_name) - package_data['pywarpx'].append(lib_name) + package_data["pywarpx"].append(lib_name) else: package_data = {} -setup(name = 'pywarpx', - version = '24.08', - packages = ['pywarpx'], - package_dir = {'pywarpx': 'pywarpx'}, - description = """Wrapper of WarpX""", - package_data = package_data, - install_requires = ['numpy', 'picmistandard==0.29.0', 'periodictable'], - python_requires = '>=3.8', - zip_safe=False +setup( + name="pywarpx", + version="24.08", + packages=["pywarpx"], + package_dir={"pywarpx": "pywarpx"}, + description="""Wrapper of WarpX""", + package_data=package_data, + install_requires=["numpy", "picmistandard==0.29.0", "periodictable"], + python_requires=">=3.8", + zip_safe=False, ) diff --git a/Regression/Checksum/benchmark.py b/Regression/Checksum/benchmark.py index fbbe44f98b0..549900da628 100644 --- a/Regression/Checksum/benchmark.py +++ b/Regression/Checksum/benchmark.py @@ -34,8 +34,9 @@ def __init__(self, test_name, data=None): """ self.test_name = test_name - self.json_file = os.path.join(config.benchmark_location, - self.test_name + '.json') + self.json_file = os.path.join( + config.benchmark_location, self.test_name + ".json" + ) if data is None: self.data = self.get() else: @@ -45,7 +46,7 @@ def reset(self): """ Update the benchmark (overwrites reference json file). """ - with open(self.json_file, 'w') as outfile: + with open(self.json_file, "w") as outfile: json.dump(self.data, outfile, sort_keys=True, indent=2) def get(self): diff --git a/Regression/Checksum/checksum.py b/Regression/Checksum/checksum.py index 727c8beb7f7..4133d882a41 100644 --- a/Regression/Checksum/checksum.py +++ b/Regression/Checksum/checksum.py @@ -1,10 +1,10 @@ """ - Copyright 2020 +Copyright 2020 - This file is part of WarpX. +This file is part of WarpX. - License: BSD-3-Clause-LBNL - """ +License: BSD-3-Clause-LBNL +""" import json import sys @@ -19,10 +19,16 @@ class Checksum: - """Class for checksum comparison of one test. - """ + """Class for checksum comparison of one test.""" - def __init__(self, test_name, output_file, output_format='plotfile', do_fields=True, do_particles=True): + def __init__( + self, + test_name, + output_file, + output_format="plotfile", + do_fields=True, + do_particles=True, + ): """ Checksum constructor. Store test_name, output file name and format, compute checksum @@ -49,8 +55,9 @@ def __init__(self, test_name, output_file, output_format='plotfile', do_fields=T self.test_name = test_name self.output_file = output_file self.output_format = output_format - self.data = self.read_output_file(do_fields=do_fields, - do_particles=do_particles) + self.data = self.read_output_file( + do_fields=do_fields, do_particles=do_particles + ) def read_output_file(self, do_fields=True, do_particles=True): """ @@ -68,38 +75,45 @@ def read_output_file(self, do_fields=True, do_particles=True): Whether to read particles from the output file. """ - if self.output_format == 'plotfile': + if self.output_format == "plotfile": ds = yt.load(self.output_file) # yt 4.0+ has rounding issues with our domain data: # RuntimeError: yt attempted to read outside the boundaries # of a non-periodic domain along dimension 0. - if 'force_periodicity' in dir(ds): ds.force_periodicity() - grid_fields = [item for item in ds.field_list if item[0] == 'boxlib'] + if "force_periodicity" in dir(ds): + ds.force_periodicity() + grid_fields = [item for item in ds.field_list if item[0] == "boxlib"] # "fields"/"species" we remove: # - nbody: added by yt by default, unused by us - species_list = set([item[0] for item in ds.field_list if - item[1][:9] == 'particle_' and - item[0] != 'all' and - item[0] != 'nbody']) + species_list = set( + [ + item[0] + for item in ds.field_list + if item[1][:9] == "particle_" + and item[0] != "all" + and item[0] != "nbody" + ] + ) data = {} # Compute checksum for field quantities if do_fields: - for lev in range(ds.max_level+1): + for lev in range(ds.max_level + 1): data_lev = {} - lev_grids = [grid for grid in ds.index.grids - if grid.Level == lev] + lev_grids = [grid for grid in ds.index.grids if grid.Level == lev] # Warning: For now, we assume all levels are rectangular LeftEdge = np.min( - np.array([grid.LeftEdge.v for grid in lev_grids]), axis=0) + np.array([grid.LeftEdge.v for grid in lev_grids]), axis=0 + ) all_data_level = ds.covering_grid( - level=lev, left_edge=LeftEdge, dims=ds.domain_dimensions) + level=lev, left_edge=LeftEdge, dims=ds.domain_dimensions + ) for field in grid_fields: Q = all_data_level[field].v.squeeze() data_lev[field[1]] = np.sum(np.abs(Q)) - data['lev=' + str(lev)] = data_lev + data["lev=" + str(lev)] = data_lev # Compute checksum for particle quantities if do_particles: @@ -109,49 +123,63 @@ def read_output_file(self, do_fields=True, do_particles=True): # - particle_cpu/id: they depend on the parallelism: MPI-ranks and # on-node acceleration scheme, thus not portable # and irrelevant for physics benchmarking - part_fields = [item[1] for item in ds.field_list - if item[0] == species and - item[1] != 'particle_cpu' and - item[1] != 'particle_id' - ] + part_fields = [ + item[1] + for item in ds.field_list + if item[0] == species + and item[1] != "particle_cpu" + and item[1] != "particle_id" + ] data_species = {} for field in part_fields: Q = ad[(species, field)].v data_species[field] = np.sum(np.abs(Q)) data[species] = data_species - elif self.output_format == 'openpmd': + elif self.output_format == "openpmd": # Load time series ts = OpenPMDTimeSeries(self.output_file) data = {} # Compute number of MR levels # TODO This calculation of nlevels assumes that the last element # of level_fields is by default on the highest MR level. - level_fields = [field for field in ts.avail_fields if 'lvl' in field] + level_fields = [field for field in ts.avail_fields if "lvl" in field] nlevels = 0 if level_fields == [] else int(level_fields[-1][-1]) # Compute checksum for field quantities if do_fields: - for lev in range(nlevels+1): + for lev in range(nlevels + 1): # Create list of fields specific to level lev grid_fields = [] if lev == 0: - grid_fields = [field for field in ts.avail_fields if 'lvl' not in field] + grid_fields = [ + field for field in ts.avail_fields if "lvl" not in field + ] else: - grid_fields = [field for field in ts.avail_fields if f'lvl{lev}' in field] + grid_fields = [ + field for field in ts.avail_fields if f"lvl{lev}" in field + ] data_lev = {} for field in grid_fields: - vector_components = ts.fields_metadata[field]['avail_components'] + vector_components = ts.fields_metadata[field][ + "avail_components" + ] if vector_components != []: for coord in vector_components: - Q, info = ts.get_field(field=field, iteration=ts.iterations[-1], coord=coord) + Q, info = ts.get_field( + field=field, + iteration=ts.iterations[-1], + coord=coord, + ) # key stores strings composed of field name and vector components # (e.g., field='B' or field='B_lvl1' + coord='y' results in key='By') - key = field.replace(f'_lvl{lev}', '') + coord + key = field.replace(f"_lvl{lev}", "") + coord data_lev[key] = np.sum(np.abs(Q)) - else: # scalar field - Q, info = ts.get_field(field=field, iteration=ts.iterations[-1]) + else: # scalar field + Q, info = ts.get_field( + field=field, iteration=ts.iterations[-1] + ) data_lev[field] = np.sum(np.abs(Q)) - data[f'lev={lev}'] = data_lev + data[f"lev={lev}"] = data_lev # Compute checksum for particle quantities if do_particles: species_list = [] @@ -159,27 +187,36 @@ def read_output_file(self, do_fields=True, do_particles=True): species_list = ts.avail_record_components.keys() for species in species_list: data_species = {} - part_fields = [item for item in ts.avail_record_components[species] - if item != 'id' and item != 'charge' and item != 'mass'] + part_fields = [ + item + for item in ts.avail_record_components[species] + if item != "id" and item != "charge" and item != "mass" + ] # Convert the field name to the name used in plotfiles for field in part_fields: - Q = ts.get_particle(var_list=[field], species=species, iteration=ts.iterations[-1]) - if field in ['x', 'y', 'z']: - field_name = 'particle_position_' + field - elif field in ['ux', 'uy', 'uz']: - field_name = 'particle_momentum_' + field[-1] - m, = ts.get_particle(['mass'], species=species, iteration=ts.iterations[-1]) - Q *= m*c - elif field in ['w']: - field_name = 'particle_weight' + Q = ts.get_particle( + var_list=[field], + species=species, + iteration=ts.iterations[-1], + ) + if field in ["x", "y", "z"]: + field_name = "particle_position_" + field + elif field in ["ux", "uy", "uz"]: + field_name = "particle_momentum_" + field[-1] + (m,) = ts.get_particle( + ["mass"], species=species, iteration=ts.iterations[-1] + ) + Q *= m * c + elif field in ["w"]: + field_name = "particle_weight" else: - field_name = 'particle_' + field + field_name = "particle_" + field data_species[field_name] = np.sum(np.abs(Q)) data[species] = data_species return data - def evaluate(self, rtol=1.e-9, atol=1.e-40): + def evaluate(self, rtol=1.0e-9, atol=1.0e-40): """ Compare output file checksum with benchmark. Read checksum from output file, read benchmark @@ -199,9 +236,11 @@ def evaluate(self, rtol=1.e-9, atol=1.e-40): ref_benchmark = Benchmark(self.test_name) # Dictionaries have same outer keys (levels, species)? - if (self.data.keys() != ref_benchmark.data.keys()): - print("ERROR: Benchmark and output file checksum " - "have different outer keys:") + if self.data.keys() != ref_benchmark.data.keys(): + print( + "ERROR: Benchmark and output file checksum " + "have different outer keys:" + ) print("Benchmark: %s" % ref_benchmark.data.keys()) print("Test file: %s" % self.data.keys()) print("\n----------------\nNew file for " + self.test_name + ":") @@ -211,14 +250,17 @@ def evaluate(self, rtol=1.e-9, atol=1.e-40): # Dictionaries have same inner keys (field and particle quantities)? for key1 in ref_benchmark.data.keys(): - if (self.data[key1].keys() != ref_benchmark.data[key1].keys()): - print("ERROR: Benchmark and output file checksum have " - "different inner keys:") + if self.data[key1].keys() != ref_benchmark.data[key1].keys(): + print( + "ERROR: Benchmark and output file checksum have " + "different inner keys:" + ) print("Common outer keys: %s" % ref_benchmark.data.keys()) - print("Benchmark inner keys in %s: %s" - % (key1, ref_benchmark.data[key1].keys())) - print("Test file inner keys in %s: %s" - % (key1, self.data[key1].keys())) + print( + "Benchmark inner keys in %s: %s" + % (key1, ref_benchmark.data[key1].keys()) + ) + print("Test file inner keys in %s: %s" % (key1, self.data[key1].keys())) print("\n----------------\nNew file for " + self.test_name + ":") print(json.dumps(self.data, indent=2)) print("----------------") @@ -228,23 +270,31 @@ def evaluate(self, rtol=1.e-9, atol=1.e-40): checksums_differ = False for key1 in ref_benchmark.data.keys(): for key2 in ref_benchmark.data[key1].keys(): - passed = np.isclose(self.data[key1][key2], - ref_benchmark.data[key1][key2], - rtol=rtol, atol=atol) + passed = np.isclose( + self.data[key1][key2], + ref_benchmark.data[key1][key2], + rtol=rtol, + atol=atol, + ) if not passed: - print("ERROR: Benchmark and output file checksum have " - "different value for key [%s,%s]" % (key1, key2)) - print("Benchmark: [%s,%s] %.15e" - % (key1, key2, ref_benchmark.data[key1][key2])) - print("Test file: [%s,%s] %.15e" - % (key1, key2, self.data[key1][key2])) + print( + "ERROR: Benchmark and output file checksum have " + "different value for key [%s,%s]" % (key1, key2) + ) + print( + "Benchmark: [%s,%s] %.15e" + % (key1, key2, ref_benchmark.data[key1][key2]) + ) + print( + "Test file: [%s,%s] %.15e" % (key1, key2, self.data[key1][key2]) + ) checksums_differ = True # Print absolute and relative error for each failing key x = ref_benchmark.data[key1][key2] y = self.data[key1][key2] abs_err = np.abs(x - y) print("Absolute error: {:.2e}".format(abs_err)) - if (np.abs(x) != 0.): + if np.abs(x) != 0.0: rel_err = abs_err / np.abs(x) print("Relative error: {:.2e}".format(rel_err)) if checksums_differ: diff --git a/Regression/Checksum/checksumAPI.py b/Regression/Checksum/checksumAPI.py index 11adae5b5e1..85aac38c3d4 100755 --- a/Regression/Checksum/checksumAPI.py +++ b/Regression/Checksum/checksumAPI.py @@ -1,12 +1,12 @@ #! /usr/bin/env python3 """ - Copyright 2020 +Copyright 2020 - This file is part of WarpX. +This file is part of WarpX. - License: BSD-3-Clause-LBNL - """ +License: BSD-3-Clause-LBNL +""" import argparse import glob @@ -35,8 +35,15 @@ """ -def evaluate_checksum(test_name, output_file, output_format='plotfile', rtol=1.e-9, atol=1.e-40, - do_fields=True, do_particles=True): +def evaluate_checksum( + test_name, + output_file, + output_format="plotfile", + rtol=1.0e-9, + atol=1.0e-40, + do_fields=True, + do_particles=True, +): """ Compare output file checksum with benchmark. Read checksum from output file, read benchmark @@ -69,19 +76,34 @@ def evaluate_checksum(test_name, output_file, output_format='plotfile', rtol=1.e Whether to compare particles in the checksum. """ # Reset benchmark? - reset = ( os.getenv('CHECKSUM_RESET', 'False').lower() in - ['true', '1', 't', 'y', 'yes', 'on'] ) + reset = os.getenv("CHECKSUM_RESET", "False").lower() in [ + "true", + "1", + "t", + "y", + "yes", + "on", + ] if reset: - print(f"Environment variable CHECKSUM_RESET is set, resetting benchmark for {test_name}") + print( + f"Environment variable CHECKSUM_RESET is set, resetting benchmark for {test_name}" + ) reset_benchmark(test_name, output_file, output_format, do_fields, do_particles) else: - test_checksum = Checksum(test_name, output_file, output_format, - do_fields=do_fields, do_particles=do_particles) + test_checksum = Checksum( + test_name, + output_file, + output_format, + do_fields=do_fields, + do_particles=do_particles, + ) test_checksum.evaluate(rtol=rtol, atol=atol) -def reset_benchmark(test_name, output_file, output_format='plotfile', do_fields=True, do_particles=True): +def reset_benchmark( + test_name, output_file, output_format="plotfile", do_fields=True, do_particles=True +): """ Update the benchmark (overwrites reference json file). Overwrite value of benchmark corresponding to @@ -104,13 +126,18 @@ def reset_benchmark(test_name, output_file, output_format='plotfile', do_fields= do_particles: bool, default=True Whether to write particles checksums in the benchmark. """ - ref_checksum = Checksum(test_name, output_file, output_format, - do_fields=do_fields, do_particles=do_particles) + ref_checksum = Checksum( + test_name, + output_file, + output_format, + do_fields=do_fields, + do_particles=do_particles, + ) ref_benchmark = Benchmark(test_name, ref_checksum.data) ref_benchmark.reset() -def reset_all_benchmarks(path_to_all_output_files, output_format='plotfile'): +def reset_all_benchmarks(path_to_all_output_files, output_format="plotfile"): """ Update all benchmarks (overwrites reference json files) found in path_to_all_output_files @@ -127,8 +154,9 @@ def reset_all_benchmarks(path_to_all_output_files, output_format='plotfile'): """ # Get list of output files in path_to_all_output_files - output_file_list = glob.glob(path_to_all_output_files + '*_plt*[0-9]', - recursive=True) + output_file_list = glob.glob( + path_to_all_output_files + "*_plt*[0-9]", recursive=True + ) output_file_list.sort() # Loop over output files and reset the corresponding benchmark @@ -137,68 +165,122 @@ def reset_all_benchmarks(path_to_all_output_files, output_format='plotfile'): reset_benchmark(test_name, output_file, output_format) -if __name__ == '__main__': - +if __name__ == "__main__": parser = argparse.ArgumentParser() # Options relevant to evaluate a checksum or reset a benchmark - parser.add_argument('--evaluate', dest='evaluate', action='store_true', - default=False, help='Evaluate a checksum.') - parser.add_argument('--reset-benchmark', dest='reset_benchmark', - default=False, - action='store_true', help='Reset a benchmark.') - parser.add_argument('--test-name', dest='test_name', type=str, default='', - required='--evaluate' in sys.argv or - '--reset-benchmark' in sys.argv, - help='Name of the test (as in WarpX-tests.ini)') - parser.add_argument('--output-file', dest='output_file', type=str, default='', - required='--evaluate' in sys.argv or - '--reset-benchmark' in sys.argv, - help='Name of WarpX output file') - parser.add_argument('--output-format', dest='output_format', type=str, default='plotfile', - required='--evaluate' in sys.argv or - '--reset-benchmark' in sys.argv, - help='Format of the output file (plotfile, openpmd)') - parser.add_argument('--skip-fields', dest='do_fields', - default=True, action='store_false', - help='If used, do not read/write field checksums') - parser.add_argument('--skip-particles', dest='do_particles', - default=True, action='store_false', - help='If used, do not read/write particle checksums') + parser.add_argument( + "--evaluate", + dest="evaluate", + action="store_true", + default=False, + help="Evaluate a checksum.", + ) + parser.add_argument( + "--reset-benchmark", + dest="reset_benchmark", + default=False, + action="store_true", + help="Reset a benchmark.", + ) + parser.add_argument( + "--test-name", + dest="test_name", + type=str, + default="", + required="--evaluate" in sys.argv or "--reset-benchmark" in sys.argv, + help="Name of the test (as in WarpX-tests.ini)", + ) + parser.add_argument( + "--output-file", + dest="output_file", + type=str, + default="", + required="--evaluate" in sys.argv or "--reset-benchmark" in sys.argv, + help="Name of WarpX output file", + ) + parser.add_argument( + "--output-format", + dest="output_format", + type=str, + default="plotfile", + required="--evaluate" in sys.argv or "--reset-benchmark" in sys.argv, + help="Format of the output file (plotfile, openpmd)", + ) + parser.add_argument( + "--skip-fields", + dest="do_fields", + default=True, + action="store_false", + help="If used, do not read/write field checksums", + ) + parser.add_argument( + "--skip-particles", + dest="do_particles", + default=True, + action="store_false", + help="If used, do not read/write particle checksums", + ) # Fields and/or particles are read from output file/written to benchmark? - parser.add_argument('--rtol', dest='rtol', - type=float, default=1.e-9, - help='relative tolerance for comparison') - parser.add_argument('--atol', dest='atol', - type=float, default=1.e-40, - help='absolute tolerance for comparison') + parser.add_argument( + "--rtol", + dest="rtol", + type=float, + default=1.0e-9, + help="relative tolerance for comparison", + ) + parser.add_argument( + "--atol", + dest="atol", + type=float, + default=1.0e-40, + help="absolute tolerance for comparison", + ) # Option to reset all benchmarks present in a folder. - parser.add_argument('--reset-all-benchmarks', dest='reset_all_benchmarks', - action='store_true', default=False, - help='Reset all benchmarks.') - parser.add_argument('--path-to-all-output-files', - dest='path_to_all_output_files', type=str, default='', - required='--reset-all-benchmarks' in sys.argv, - help='Directory containing all benchmark output files, \ + parser.add_argument( + "--reset-all-benchmarks", + dest="reset_all_benchmarks", + action="store_true", + default=False, + help="Reset all benchmarks.", + ) + parser.add_argument( + "--path-to-all-output-files", + dest="path_to_all_output_files", + type=str, + default="", + required="--reset-all-benchmarks" in sys.argv, + help="Directory containing all benchmark output files, \ typically WarpX-benchmarks generated by \ - regression_testing/regtest.py') + regression_testing/regtest.py", + ) args = parser.parse_args() if args.reset_benchmark: - reset_benchmark(args.test_name, args.output_file, args.output_format, - do_fields=args.do_fields, - do_particles=args.do_particles) + reset_benchmark( + args.test_name, + args.output_file, + args.output_format, + do_fields=args.do_fields, + do_particles=args.do_particles, + ) if args.evaluate: - evaluate_checksum(args.test_name, args.output_file, args.output_format, - rtol=args.rtol, atol=args.atol, - do_fields=args.do_fields, do_particles=args.do_particles) + evaluate_checksum( + args.test_name, + args.output_file, + args.output_format, + rtol=args.rtol, + atol=args.atol, + do_fields=args.do_fields, + do_particles=args.do_particles, + ) if args.reset_all_benchmarks: - if args.output_format == 'openpmd': - sys.exit('Option --reset-all-benchmarks does not work with openPMD format') + if args.output_format == "openpmd": + sys.exit("Option --reset-all-benchmarks does not work with openPMD format") # WARNING: this mode does not support skip-fields/particles and tolerances reset_all_benchmarks(args.path_to_all_output_files, args.output_format) diff --git a/Regression/Checksum/config.py b/Regression/Checksum/config.py index fb9dbb223c8..8d19d82beaa 100644 --- a/Regression/Checksum/config.py +++ b/Regression/Checksum/config.py @@ -1,11 +1,11 @@ """ - Copyright 2020 +Copyright 2020 - This file is part of WarpX. +This file is part of WarpX. - License: BSD-3-Clause-LBNL - """ +License: BSD-3-Clause-LBNL +""" import os -benchmark_location = os.path.split(__file__)[0] + '/benchmarks_json' +benchmark_location = os.path.split(__file__)[0] + "/benchmarks_json" diff --git a/Regression/PostProcessingUtils/post_processing_utils.py b/Regression/PostProcessingUtils/post_processing_utils.py index 50026ca1076..55bc357c28b 100644 --- a/Regression/PostProcessingUtils/post_processing_utils.py +++ b/Regression/PostProcessingUtils/post_processing_utils.py @@ -13,118 +13,157 @@ ## This is a generic function to test a particle filter. We reproduce the filter in python and ## verify that the results are the same as with the WarpX filtered diagnostic. def check_particle_filter(fn, filtered_fn, filter_expression, dim, species_name): - ds = yt.load( fn ) - ds_filtered = yt.load( filtered_fn ) - ad = ds.all_data() - ad_filtered = ds_filtered.all_data() + ds = yt.load(fn) + ds_filtered = yt.load(filtered_fn) + ad = ds.all_data() + ad_filtered = ds_filtered.all_data() ## Load arrays from the unfiltered diagnostic - ids = ad[species_name, 'particle_id'].to_ndarray() - cpus = ad[species_name, 'particle_cpu'].to_ndarray() - px = ad[species_name, 'particle_momentum_x'].to_ndarray() - pz = ad[species_name, 'particle_momentum_z'].to_ndarray() - py = ad[species_name, 'particle_momentum_y'].to_ndarray() - w = ad[species_name, 'particle_weight'].to_ndarray() - if (dim == "2d"): - x = ad[species_name, 'particle_position_x'].to_ndarray() - z = ad[species_name, 'particle_position_y'].to_ndarray() - elif (dim == "3d"): - x = ad[species_name, 'particle_position_x'].to_ndarray() - y = ad[species_name, 'particle_position_y'].to_ndarray() - z = ad[species_name, 'particle_position_z'].to_ndarray() - elif (dim == "rz"): - r = ad[species_name, 'particle_position_x'].to_ndarray() - z = ad[species_name, 'particle_position_y'].to_ndarray() - theta = ad[species_name, 'particle_theta'].to_ndarray() + ids = ad[species_name, "particle_id"].to_ndarray() + cpus = ad[species_name, "particle_cpu"].to_ndarray() + px = ad[species_name, "particle_momentum_x"].to_ndarray() + pz = ad[species_name, "particle_momentum_z"].to_ndarray() + py = ad[species_name, "particle_momentum_y"].to_ndarray() + w = ad[species_name, "particle_weight"].to_ndarray() + if dim == "2d": + x = ad[species_name, "particle_position_x"].to_ndarray() + z = ad[species_name, "particle_position_y"].to_ndarray() + elif dim == "3d": + x = ad[species_name, "particle_position_x"].to_ndarray() + y = ad[species_name, "particle_position_y"].to_ndarray() + z = ad[species_name, "particle_position_z"].to_ndarray() + elif dim == "rz": + r = ad[species_name, "particle_position_x"].to_ndarray() + z = ad[species_name, "particle_position_y"].to_ndarray() + theta = ad[species_name, "particle_theta"].to_ndarray() ## Load arrays from the filtered diagnostic - ids_filtered_warpx = ad_filtered[species_name, 'particle_id'].to_ndarray() - cpus_filtered_warpx = ad_filtered[species_name, 'particle_cpu'].to_ndarray() - px_filtered_warpx = ad_filtered[species_name, 'particle_momentum_x'].to_ndarray() - pz_filtered_warpx = ad_filtered[species_name, 'particle_momentum_z'].to_ndarray() - py_filtered_warpx = ad_filtered[species_name, 'particle_momentum_y'].to_ndarray() - w_filtered_warpx = ad_filtered[species_name, 'particle_weight'].to_ndarray() - if (dim == "2d"): - x_filtered_warpx = ad_filtered[species_name, 'particle_position_x'].to_ndarray() - z_filtered_warpx = ad_filtered[species_name, 'particle_position_y'].to_ndarray() - elif (dim == "3d"): - x_filtered_warpx = ad_filtered[species_name, 'particle_position_x'].to_ndarray() - y_filtered_warpx = ad_filtered[species_name, 'particle_position_y'].to_ndarray() - z_filtered_warpx = ad_filtered[species_name, 'particle_position_z'].to_ndarray() - elif (dim == "rz"): - r_filtered_warpx = ad_filtered[species_name, 'particle_position_x'].to_ndarray() - z_filtered_warpx = ad_filtered[species_name, 'particle_position_y'].to_ndarray() - theta_filtered_warpx = ad_filtered[species_name, 'particle_theta'].to_ndarray() + ids_filtered_warpx = ad_filtered[species_name, "particle_id"].to_ndarray() + cpus_filtered_warpx = ad_filtered[species_name, "particle_cpu"].to_ndarray() + px_filtered_warpx = ad_filtered[species_name, "particle_momentum_x"].to_ndarray() + pz_filtered_warpx = ad_filtered[species_name, "particle_momentum_z"].to_ndarray() + py_filtered_warpx = ad_filtered[species_name, "particle_momentum_y"].to_ndarray() + w_filtered_warpx = ad_filtered[species_name, "particle_weight"].to_ndarray() + if dim == "2d": + x_filtered_warpx = ad_filtered[species_name, "particle_position_x"].to_ndarray() + z_filtered_warpx = ad_filtered[species_name, "particle_position_y"].to_ndarray() + elif dim == "3d": + x_filtered_warpx = ad_filtered[species_name, "particle_position_x"].to_ndarray() + y_filtered_warpx = ad_filtered[species_name, "particle_position_y"].to_ndarray() + z_filtered_warpx = ad_filtered[species_name, "particle_position_z"].to_ndarray() + elif dim == "rz": + r_filtered_warpx = ad_filtered[species_name, "particle_position_x"].to_ndarray() + z_filtered_warpx = ad_filtered[species_name, "particle_position_y"].to_ndarray() + theta_filtered_warpx = ad_filtered[species_name, "particle_theta"].to_ndarray() ## Reproduce the filter in python: this returns the indices of the filtered particles in the ## unfiltered arrays. - ind_filtered_python, = np.where(eval(filter_expression)) + (ind_filtered_python,) = np.where(eval(filter_expression)) ## Sort the indices of the filtered arrays by particle id. - sorted_ind_filtered_python = ind_filtered_python[np.argsort(ids[ind_filtered_python])] + sorted_ind_filtered_python = ind_filtered_python[ + np.argsort(ids[ind_filtered_python]) + ] sorted_ind_filtered_warpx = np.argsort(ids_filtered_warpx) ## Check that the sorted ids are exactly the same with the warpx filter and the filter ## reproduced in python - assert(np.array_equal(ids[sorted_ind_filtered_python], - ids_filtered_warpx[sorted_ind_filtered_warpx])) - assert(np.array_equal(cpus[sorted_ind_filtered_python], - cpus_filtered_warpx[sorted_ind_filtered_warpx])) + assert np.array_equal( + ids[sorted_ind_filtered_python], ids_filtered_warpx[sorted_ind_filtered_warpx] + ) + assert np.array_equal( + cpus[sorted_ind_filtered_python], cpus_filtered_warpx[sorted_ind_filtered_warpx] + ) ## Finally, we check that the sum of the particles quantities are the same to machine precision - tolerance_checksum = 1.e-12 - check_array_sum(px[sorted_ind_filtered_python], - px_filtered_warpx[sorted_ind_filtered_warpx], tolerance_checksum) - check_array_sum(pz[sorted_ind_filtered_python], - pz_filtered_warpx[sorted_ind_filtered_warpx], tolerance_checksum) - check_array_sum(py[sorted_ind_filtered_python], - py_filtered_warpx[sorted_ind_filtered_warpx], tolerance_checksum) - check_array_sum(w[sorted_ind_filtered_python], - w_filtered_warpx[sorted_ind_filtered_warpx], tolerance_checksum) - check_array_sum(z[sorted_ind_filtered_python], - z_filtered_warpx[sorted_ind_filtered_warpx], tolerance_checksum) - if (dim == "2d"): - check_array_sum(x[sorted_ind_filtered_python], - x_filtered_warpx[sorted_ind_filtered_warpx], tolerance_checksum) - elif (dim == "3d"): - check_array_sum(x[sorted_ind_filtered_python], - x_filtered_warpx[sorted_ind_filtered_warpx], tolerance_checksum) - check_array_sum(y[sorted_ind_filtered_python], - y_filtered_warpx[sorted_ind_filtered_warpx], tolerance_checksum) - elif (dim == "rz"): - check_array_sum(r[sorted_ind_filtered_python], - r_filtered_warpx[sorted_ind_filtered_warpx], tolerance_checksum) - check_array_sum(theta[sorted_ind_filtered_python], - theta_filtered_warpx[sorted_ind_filtered_warpx], tolerance_checksum) + tolerance_checksum = 1.0e-12 + check_array_sum( + px[sorted_ind_filtered_python], + px_filtered_warpx[sorted_ind_filtered_warpx], + tolerance_checksum, + ) + check_array_sum( + pz[sorted_ind_filtered_python], + pz_filtered_warpx[sorted_ind_filtered_warpx], + tolerance_checksum, + ) + check_array_sum( + py[sorted_ind_filtered_python], + py_filtered_warpx[sorted_ind_filtered_warpx], + tolerance_checksum, + ) + check_array_sum( + w[sorted_ind_filtered_python], + w_filtered_warpx[sorted_ind_filtered_warpx], + tolerance_checksum, + ) + check_array_sum( + z[sorted_ind_filtered_python], + z_filtered_warpx[sorted_ind_filtered_warpx], + tolerance_checksum, + ) + if dim == "2d": + check_array_sum( + x[sorted_ind_filtered_python], + x_filtered_warpx[sorted_ind_filtered_warpx], + tolerance_checksum, + ) + elif dim == "3d": + check_array_sum( + x[sorted_ind_filtered_python], + x_filtered_warpx[sorted_ind_filtered_warpx], + tolerance_checksum, + ) + check_array_sum( + y[sorted_ind_filtered_python], + y_filtered_warpx[sorted_ind_filtered_warpx], + tolerance_checksum, + ) + elif dim == "rz": + check_array_sum( + r[sorted_ind_filtered_python], + r_filtered_warpx[sorted_ind_filtered_warpx], + tolerance_checksum, + ) + check_array_sum( + theta[sorted_ind_filtered_python], + theta_filtered_warpx[sorted_ind_filtered_warpx], + tolerance_checksum, + ) + ## This function checks that the absolute sums of two arrays are the same to a required precision def check_array_sum(array1, array2, tolerance_checksum): sum1 = np.sum(np.abs(array1)) sum2 = np.sum(np.abs(array2)) - assert(abs(sum2-sum1)/sum1 < tolerance_checksum) + assert abs(sum2 - sum1) / sum1 < tolerance_checksum + ## This function is specifically used to test the random filter. First, we check that the number of ## dumped particles is as expected. Next, we call the generic check_particle_filter function. def check_random_filter(fn, filtered_fn, random_fraction, dim, species_name): - ds = yt.load( fn ) - ds_filtered = yt.load( filtered_fn ) - ad = ds.all_data() - ad_filtered = ds_filtered.all_data() + ds = yt.load(fn) + ds_filtered = yt.load(filtered_fn) + ad = ds.all_data() + ad_filtered = ds_filtered.all_data() ## Check that the number of particles is as expected - numparts = ad[species_name, 'particle_id'].to_ndarray().shape[0] - numparts_filtered = ad_filtered['particle_id'].to_ndarray().shape[0] - expected_numparts_filtered = random_fraction*numparts + numparts = ad[species_name, "particle_id"].to_ndarray().shape[0] + numparts_filtered = ad_filtered["particle_id"].to_ndarray().shape[0] + expected_numparts_filtered = random_fraction * numparts # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions std_numparts_filtered = np.sqrt(expected_numparts_filtered) - error = abs(numparts_filtered-expected_numparts_filtered) - print("Random filter: difference between expected and actual number of dumped particles: " \ - + str(error)) - print("tolerance: " + str(5*std_numparts_filtered)) - assert(error<5*std_numparts_filtered) + error = abs(numparts_filtered - expected_numparts_filtered) + print( + "Random filter: difference between expected and actual number of dumped particles: " + + str(error) + ) + print("tolerance: " + str(5 * std_numparts_filtered)) + assert error < 5 * std_numparts_filtered ## Dirty trick to find particles with the same ID + same CPU (does not work with more than 10 ## MPI ranks) - random_filter_expression = 'np.isin(ids + 0.1*cpus,' \ - 'ids_filtered_warpx + 0.1*cpus_filtered_warpx)' + random_filter_expression = ( + "np.isin(ids + 0.1*cpus," "ids_filtered_warpx + 0.1*cpus_filtered_warpx)" + ) check_particle_filter(fn, filtered_fn, random_filter_expression, dim, species_name) diff --git a/Regression/prepare_file_ci.py b/Regression/prepare_file_ci.py index 16f78e074ea..cb9bf0304f3 100644 --- a/Regression/prepare_file_ci.py +++ b/Regression/prepare_file_ci.py @@ -15,147 +15,163 @@ import re # Get relevant environment variables -arch = os.environ.get('WARPX_TEST_ARCH', 'CPU') - -ci_regular_cartesian_1d = os.environ.get('WARPX_CI_REGULAR_CARTESIAN_1D') == 'TRUE' -ci_regular_cartesian_2d = os.environ.get('WARPX_CI_REGULAR_CARTESIAN_2D') == 'TRUE' -ci_regular_cartesian_3d = os.environ.get('WARPX_CI_REGULAR_CARTESIAN_3D') == 'TRUE' -ci_psatd = os.environ.get('WARPX_CI_PSATD', 'TRUE') == 'TRUE' -ci_single_precision = os.environ.get('WARPX_CI_SINGLE_PRECISION') == 'TRUE' -ci_rz_or_nompi = os.environ.get('WARPX_CI_RZ_OR_NOMPI') == 'TRUE' -ci_qed = os.environ.get('WARPX_CI_QED') == 'TRUE' -ci_eb = os.environ.get('WARPX_CI_EB') == 'TRUE' -ci_openpmd = os.environ.get('WARPX_CI_OPENPMD') == 'TRUE' -ci_ccache = os.environ.get('WARPX_CI_CCACHE') == 'TRUE' -ci_num_make_jobs = os.environ.get('WARPX_CI_NUM_MAKE_JOBS', None) +arch = os.environ.get("WARPX_TEST_ARCH", "CPU") + +ci_regular_cartesian_1d = os.environ.get("WARPX_CI_REGULAR_CARTESIAN_1D") == "TRUE" +ci_regular_cartesian_2d = os.environ.get("WARPX_CI_REGULAR_CARTESIAN_2D") == "TRUE" +ci_regular_cartesian_3d = os.environ.get("WARPX_CI_REGULAR_CARTESIAN_3D") == "TRUE" +ci_psatd = os.environ.get("WARPX_CI_PSATD", "TRUE") == "TRUE" +ci_single_precision = os.environ.get("WARPX_CI_SINGLE_PRECISION") == "TRUE" +ci_rz_or_nompi = os.environ.get("WARPX_CI_RZ_OR_NOMPI") == "TRUE" +ci_qed = os.environ.get("WARPX_CI_QED") == "TRUE" +ci_eb = os.environ.get("WARPX_CI_EB") == "TRUE" +ci_openpmd = os.environ.get("WARPX_CI_OPENPMD") == "TRUE" +ci_ccache = os.environ.get("WARPX_CI_CCACHE") == "TRUE" +ci_num_make_jobs = os.environ.get("WARPX_CI_NUM_MAKE_JOBS", None) # Find the directory in which the tests should be run current_dir = os.getcwd() -test_dir = re.sub('warpx/Regression', '', current_dir ) +test_dir = re.sub("warpx/Regression", "", current_dir) -with open('WarpX-tests.ini') as f: +with open("WarpX-tests.ini") as f: text = f.read() # Replace default folder name -text = re.sub('/home/regtester/AMReX_RegTesting', test_dir, text) +text = re.sub("/home/regtester/AMReX_RegTesting", test_dir, text) # Remove the web directory -text = re.sub('[\w\-\/]*/web', '', text) +text = re.sub("[\w\-\/]*/web", "", text) # Add doComparison = 0 for each test -text = re.sub( '\[(?P.*)\]\nbuildDir = ', - '[\g]\ndoComparison = 0\nbuildDir = ', text ) +text = re.sub( + "\[(?P.*)\]\nbuildDir = ", "[\g]\ndoComparison = 0\nbuildDir = ", text +) # Change compile options when running on GPU -if arch == 'GPU': - text = re.sub( 'addToCompileString =', - 'addToCompileString = USE_GPU=TRUE USE_OMP=FALSE ', text) -print('Compiling for %s' %arch) +if arch == "GPU": + text = re.sub( + "addToCompileString =", "addToCompileString = USE_GPU=TRUE USE_OMP=FALSE ", text + ) +print("Compiling for %s" % arch) # Extra dependencies if ci_openpmd: - text = re.sub('addToCompileString =', - 'addToCompileString = USE_OPENPMD=TRUE ', text) + text = re.sub( + "addToCompileString =", "addToCompileString = USE_OPENPMD=TRUE ", text + ) # always build with PSATD support (runtime controlled if used) if ci_psatd: - text = re.sub('addToCompileString =', - 'addToCompileString = USE_FFT=TRUE ', text) - text = re.sub('USE_FFT=FALSE', - '', text) + text = re.sub("addToCompileString =", "addToCompileString = USE_FFT=TRUE ", text) + text = re.sub("USE_FFT=FALSE", "", text) # CCache if ci_ccache: - text = re.sub('addToCompileString =', - 'addToCompileString = USE_CCACHE=TRUE ', text) + text = re.sub("addToCompileString =", "addToCompileString = USE_CCACHE=TRUE ", text) # Add runtime options: # > crash for unused variables # > trap NaNs, divisions by zero, and overflows # > abort upon any warning message by default -text = re.sub('runtime_params =', - 'runtime_params = amrex.abort_on_unused_inputs=1 '+ - 'amrex.fpe_trap_invalid=1 amrex.fpe_trap_zero=1 amrex.fpe_trap_overflow=1 '+ - 'warpx.always_warn_immediately=1 warpx.abort_on_warning_threshold=low', - text) +text = re.sub( + "runtime_params =", + "runtime_params = amrex.abort_on_unused_inputs=1 " + + "amrex.fpe_trap_invalid=1 amrex.fpe_trap_zero=1 amrex.fpe_trap_overflow=1 " + + "warpx.always_warn_immediately=1 warpx.abort_on_warning_threshold=low", + text, +) # Add runtime options for CPU: # > serialize initial conditions and no dynamic scheduling in OpenMP -if arch == 'CPU': - text = re.sub('runtime_params =', - 'runtime_params = '+ - 'warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1', - text) +if arch == "CPU": + text = re.sub( + "runtime_params =", + "runtime_params = " + + "warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1", + text, + ) # Use less/more cores for compiling, e.g. public CI only provides 2 cores if ci_num_make_jobs is not None: - text = re.sub( 'numMakeJobs = \d+', 'numMakeJobs = {}'.format(ci_num_make_jobs), text ) + text = re.sub( + "numMakeJobs = \d+", "numMakeJobs = {}".format(ci_num_make_jobs), text + ) # Prevent emails from being sent -text = re.sub( 'sendEmailWhenFail = 1', 'sendEmailWhenFail = 0', text ) +text = re.sub("sendEmailWhenFail = 1", "sendEmailWhenFail = 0", text) # Select the tests to be run # -------------------------- # - Extract test blocks (they are identified by the fact that they contain "inputFile") -select_test_regex = r'(\[(.+\n)*inputFile(.+\n)*)' -test_blocks = [ match[0] for match in re.findall(select_test_regex, text) ] +select_test_regex = r"(\[(.+\n)*inputFile(.+\n)*)" +test_blocks = [match[0] for match in re.findall(select_test_regex, text)] # - Remove the test blocks from `text` (only the selected ones will be added back) -text = re.sub( select_test_regex, '', text ) +text = re.sub(select_test_regex, "", text) + def select_tests(blocks, match_string_list, do_test): """Remove or keep tests from list in WarpX-tests.ini according to do_test variable""" if do_test not in [True, False]: raise ValueError("do_test must be True or False") - if (do_test is False): + if do_test is False: for match_string in match_string_list: - print('Selecting tests without ' + match_string) - blocks = [ block for block in blocks if match_string not in block ] + print("Selecting tests without " + match_string) + blocks = [block for block in blocks if match_string not in block] else: for match_string in match_string_list: - print('Selecting tests with ' + match_string) - blocks = [ block for block in blocks if match_string in block ] + print("Selecting tests with " + match_string) + blocks = [block for block in blocks if match_string in block] return blocks + if ci_regular_cartesian_1d: - test_blocks = select_tests(test_blocks, ['dim = 1'], True) - test_blocks = select_tests(test_blocks, ['USE_RZ=TRUE'], False) - test_blocks = select_tests(test_blocks, ['PRECISION=FLOAT', 'USE_SINGLE_PRECISION_PARTICLES=TRUE'], False) - test_blocks = select_tests(test_blocks, ['useMPI = 0'], False) - test_blocks = select_tests(test_blocks, ['QED=TRUE'], False) - test_blocks = select_tests(test_blocks, ['USE_EB=TRUE'], False) + test_blocks = select_tests(test_blocks, ["dim = 1"], True) + test_blocks = select_tests(test_blocks, ["USE_RZ=TRUE"], False) + test_blocks = select_tests( + test_blocks, ["PRECISION=FLOAT", "USE_SINGLE_PRECISION_PARTICLES=TRUE"], False + ) + test_blocks = select_tests(test_blocks, ["useMPI = 0"], False) + test_blocks = select_tests(test_blocks, ["QED=TRUE"], False) + test_blocks = select_tests(test_blocks, ["USE_EB=TRUE"], False) if ci_regular_cartesian_2d: - test_blocks = select_tests(test_blocks, ['dim = 2'], True) - test_blocks = select_tests(test_blocks, ['USE_RZ=TRUE'], False) - test_blocks = select_tests(test_blocks, ['PRECISION=FLOAT', 'USE_SINGLE_PRECISION_PARTICLES=TRUE'], False) - test_blocks = select_tests(test_blocks, ['useMPI = 0'], False) - test_blocks = select_tests(test_blocks, ['QED=TRUE'], False) - test_blocks = select_tests(test_blocks, ['USE_EB=TRUE'], False) + test_blocks = select_tests(test_blocks, ["dim = 2"], True) + test_blocks = select_tests(test_blocks, ["USE_RZ=TRUE"], False) + test_blocks = select_tests( + test_blocks, ["PRECISION=FLOAT", "USE_SINGLE_PRECISION_PARTICLES=TRUE"], False + ) + test_blocks = select_tests(test_blocks, ["useMPI = 0"], False) + test_blocks = select_tests(test_blocks, ["QED=TRUE"], False) + test_blocks = select_tests(test_blocks, ["USE_EB=TRUE"], False) if ci_regular_cartesian_3d: - test_blocks = select_tests(test_blocks, ['dim = 3'], True) - test_blocks = select_tests(test_blocks, ['PRECISION=FLOAT', 'USE_SINGLE_PRECISION_PARTICLES=TRUE'], False) - test_blocks = select_tests(test_blocks, ['useMPI = 0'], False) - test_blocks = select_tests(test_blocks, ['QED=TRUE'], False) - test_blocks = select_tests(test_blocks, ['USE_EB=TRUE'], False) + test_blocks = select_tests(test_blocks, ["dim = 3"], True) + test_blocks = select_tests( + test_blocks, ["PRECISION=FLOAT", "USE_SINGLE_PRECISION_PARTICLES=TRUE"], False + ) + test_blocks = select_tests(test_blocks, ["useMPI = 0"], False) + test_blocks = select_tests(test_blocks, ["QED=TRUE"], False) + test_blocks = select_tests(test_blocks, ["USE_EB=TRUE"], False) if ci_single_precision: - test_blocks = select_tests(test_blocks, ['PRECISION=FLOAT', 'USE_SINGLE_PRECISION_PARTICLES=TRUE'], True) + test_blocks = select_tests( + test_blocks, ["PRECISION=FLOAT", "USE_SINGLE_PRECISION_PARTICLES=TRUE"], True + ) if ci_rz_or_nompi: - block1 = select_tests(test_blocks, ['USE_RZ=TRUE'], True) - block2 = select_tests(test_blocks, ['useMPI = 0'], True) + block1 = select_tests(test_blocks, ["USE_RZ=TRUE"], True) + block2 = select_tests(test_blocks, ["useMPI = 0"], True) test_blocks = block1 + block2 if ci_qed: - test_blocks = select_tests(test_blocks, ['QED=TRUE'], True) + test_blocks = select_tests(test_blocks, ["QED=TRUE"], True) if ci_eb: - test_blocks = select_tests(test_blocks, ['USE_RZ=TRUE'], False) - test_blocks = select_tests(test_blocks, ['USE_EB=TRUE'], True) + test_blocks = select_tests(test_blocks, ["USE_RZ=TRUE"], False) + test_blocks = select_tests(test_blocks, ["USE_EB=TRUE"], True) # - Add the selected test blocks to the text -text = text + '\n' + '\n'.join(test_blocks) +text = text + "\n" + "\n".join(test_blocks) -with open('ci-tests.ini', 'w') as f: +with open("ci-tests.ini", "w") as f: f.write(text) diff --git a/Source/Utils/Physics/write_atomic_data_cpp.py b/Source/Utils/Physics/write_atomic_data_cpp.py index 11cd3b2c0c5..e1572871ada 100644 --- a/Source/Utils/Physics/write_atomic_data_cpp.py +++ b/Source/Utils/Physics/write_atomic_data_cpp.py @@ -7,84 +7,93 @@ # # License: BSD-3-Clause-LBNL -''' +""" This python script reads ionization tables in atomic_data.txt (generated from the NIST website) and extracts ionization levels into C++ file IonizationEnergiesTable.H, which contains tables + metadata. -''' +""" import os import re import numpy as np -filename = os.path.join( '.', 'atomic_data.txt' ) +filename = os.path.join(".", "atomic_data.txt") with open(filename) as f: text_data = f.read() # Read full table from file and get names, atomic numbers and offsets # position in table of ionization energies for all species -regex_command = '\n\s+(\d+)\s+\|\s+([A-Z]+[a-z]*)\s+\w+\s+\|\s+\+*(\d+)\s+\|\s+\(*\[*(\d+\.*\d*)' -list_of_tuples = re.findall( regex_command, text_data ) -ion_atom_numbers = [int(i) for i in list(dict.fromkeys( [x[0] for x in list_of_tuples] ))] -ion_names = list(dict.fromkeys( [x[1] for x in list_of_tuples] )) +regex_command = ( + "\n\s+(\d+)\s+\|\s+([A-Z]+[a-z]*)\s+\w+\s+\|\s+\+*(\d+)\s+\|\s+\(*\[*(\d+\.*\d*)" +) +list_of_tuples = re.findall(regex_command, text_data) +ion_atom_numbers = [int(i) for i in list(dict.fromkeys([x[0] for x in list_of_tuples]))] +ion_names = list(dict.fromkeys([x[1] for x in list_of_tuples])) ion_offsets = np.concatenate(([0], np.cumsum(np.array(ion_atom_numbers)[:-1])), axis=0) # Head of CPP file -cpp_string = '// This script was automatically generated!\n' -cpp_string += '// Edit dev/Source/Utils/Physics/write_atomic_data_cpp.py instead!\n\n' -cpp_string += '#ifndef WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_\n' -cpp_string += '#define WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_\n\n' -cpp_string += '#include \n\n' -cpp_string += '#include \n' -cpp_string += '#include \n\n' -cpp_string += 'namespace utils::physics\n' -cpp_string += '{\n' +cpp_string = "// This script was automatically generated!\n" +cpp_string += "// Edit dev/Source/Utils/Physics/write_atomic_data_cpp.py instead!\n\n" +cpp_string += "#ifndef WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_\n" +cpp_string += "#define WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_\n\n" +cpp_string += "#include \n\n" +cpp_string += "#include \n" +cpp_string += "#include \n\n" +cpp_string += "namespace utils::physics\n" +cpp_string += "{\n" # Map each element to ID in table -cpp_string += ' static std::map const ion_map_ids = {' +cpp_string += " static std::map const ion_map_ids = {" for count, name in enumerate(ion_names): - cpp_string += '\n {"' + name + '", ' + str(count) + '},' + cpp_string += '\n {"' + name + '", ' + str(count) + "}," cpp_string = cpp_string[:-1] -cpp_string += ' };\n\n' +cpp_string += " };\n\n" # Atomic number of each species -cpp_string += ' constexpr int nelements = ' + str(len(ion_names)) + ';\n\n' -cpp_string += ' constexpr int ion_atomic_numbers[nelements] = {\n ' +cpp_string += " constexpr int nelements = " + str(len(ion_names)) + ";\n\n" +cpp_string += " constexpr int ion_atomic_numbers[nelements] = {\n " for count, atom_num in enumerate(ion_atom_numbers): - if count%10==0 and count>0: cpp_string = cpp_string[:-2] + ',\n ' - cpp_string += str(atom_num) + ', ' + if count % 10 == 0 and count > 0: + cpp_string = cpp_string[:-2] + ",\n " + cpp_string += str(atom_num) + ", " cpp_string = cpp_string[:-2] -cpp_string += '};\n\n' +cpp_string += "};\n\n" # Offset of each element in table of ionization energies -cpp_string += ' constexpr int ion_energy_offsets[nelements] = {\n ' +cpp_string += " constexpr int ion_energy_offsets[nelements] = {\n " for count, offset in enumerate(ion_offsets): - if count%10==0 and count>0: cpp_string = cpp_string[:-2] + ',\n ' - cpp_string += str(offset) + ', ' + if count % 10 == 0 and count > 0: + cpp_string = cpp_string[:-2] + ",\n " + cpp_string += str(offset) + ", " cpp_string = cpp_string[:-2] -cpp_string += '};\n\n' +cpp_string += "};\n\n" # Table of ionization energies -cpp_string += ' constexpr int energies_tab_length = ' + str(len(list_of_tuples)) + ';\n\n' -cpp_string += ' constexpr amrex::Real table_ionization_energies[energies_tab_length]{' +cpp_string += ( + " constexpr int energies_tab_length = " + str(len(list_of_tuples)) + ";\n\n" +) +cpp_string += ( + " constexpr amrex::Real table_ionization_energies[energies_tab_length]{" +) for element in ion_names: - cpp_string += '\n // ' + element + '\n ' - regex_command = \ - '\n\s+(\d+)\s+\|\s+%s\s+\w+\s+\|\s+\+*(\d+)\s+\|\s+\(*\[*(\d+\.*\d*)' \ - %element - list_of_tuples = re.findall( regex_command, text_data ) + cpp_string += "\n // " + element + "\n " + regex_command = ( + "\n\s+(\d+)\s+\|\s+%s\s+\w+\s+\|\s+\+*(\d+)\s+\|\s+\(*\[*(\d+\.*\d*)" % element + ) + list_of_tuples = re.findall(regex_command, text_data) for count, energy in enumerate([x[2] for x in list_of_tuples]): - if count%3==0 and count>0: cpp_string = cpp_string[:-2] + ',\n ' - cpp_string += "amrex::Real(" + energy + '), ' + if count % 3 == 0 and count > 0: + cpp_string = cpp_string[:-2] + ",\n " + cpp_string += "amrex::Real(" + energy + "), " cpp_string = cpp_string[:-1] cpp_string = cpp_string[:-1] -cpp_string += '\n };\n\n' +cpp_string += "\n };\n\n" -cpp_string += '}\n\n' +cpp_string += "}\n\n" # Write the string to file -cpp_string += '#endif // #ifndef WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_\n' -f= open("IonizationEnergiesTable.H","w") +cpp_string += "#endif // #ifndef WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_\n" +f = open("IonizationEnergiesTable.H", "w") f.write(cpp_string) f.close() diff --git a/Source/Utils/check_interp_points_and_weights.py b/Source/Utils/check_interp_points_and_weights.py index 8bf2cf08490..2f5a6e13b96 100644 --- a/Source/Utils/check_interp_points_and_weights.py +++ b/Source/Utils/check_interp_points_and_weights.py @@ -4,7 +4,7 @@ # # License: BSD-3-Clause-LBNL -#------------------------------------------------------------------------------- +# ------------------------------------------------------------------------------- # Compute interpolation points and weights for coarsening and refinement in IO # and MR applications in 1D (extensions to 2D and 3D are trivial). Weights are # computed in order to guarantee total charge conservation for both cell-centered @@ -23,7 +23,7 @@ # while terms multiplied by sf*sc are ON for nodal data and OFF for cell-centered # data. C++ implementation in Source/ablastr/coarsen/average.(H/.cpp) and # Source/ablastr/coarsen/sample.(H/.cpp) -#------------------------------------------------------------------------------- +# ------------------------------------------------------------------------------- import sys @@ -31,57 +31,67 @@ # Fine grid limits (without ghost cells) -def fine_grid_limits( sf ): - if ( sf == 0 ): # cell-centered - iimin = 0 - iimax = 7 - elif ( sf == 1 ): # nodal - iimin = 0 - iimax = 8 - return [ iimin, iimax ] +def fine_grid_limits(sf): + if sf == 0: # cell-centered + iimin = 0 + iimax = 7 + elif sf == 1: # nodal + iimin = 0 + iimax = 8 + return [iimin, iimax] + # Coarse grid limits (without ghost cells) -def coarse_grid_limits( sc, sf, iimin, iimax ): - imin = int( iimin/cr ) - imax = int( iimax/cr )-(1-sc)*sf+(1-sf)*sc - return [ imin, imax ] +def coarse_grid_limits(sc, sf, iimin, iimax): + imin = int(iimin / cr) + imax = int(iimax / cr) - (1 - sc) * sf + (1 - sf) * sc + return [imin, imax] + # Coarsening for MR: interpolation points and weights -def coarsening_points_and_weights( i, sc, sf, cr ): - if ( cr==1 ): +def coarsening_points_and_weights(i, sc, sf, cr): + if cr == 1: numpts = 1 idxmin = i - elif ( cr>=2 ): - numpts = cr*(1-sf)*(1-sc)+(2*(cr-1)+1)*sf*sc - idxmin = i*cr*(1-sf)*(1-sc)+(i*cr-cr+1)*sf*sc - weights = np.zeros( numpts ) - for ir in range( numpts ): - ii = idxmin+ir - weights[ir] = (1/cr)*(1-sf)*(1-sc)+((abs(cr-abs(ii-i*cr)))/(cr*cr))*sf*sc - return [ numpts, idxmin, weights ] + elif cr >= 2: + numpts = cr * (1 - sf) * (1 - sc) + (2 * (cr - 1) + 1) * sf * sc + idxmin = i * cr * (1 - sf) * (1 - sc) + (i * cr - cr + 1) * sf * sc + weights = np.zeros(numpts) + for ir in range(numpts): + ii = idxmin + ir + weights[ir] = (1 / cr) * (1 - sf) * (1 - sc) + ( + (abs(cr - abs(ii - i * cr))) / (cr * cr) + ) * sf * sc + return [numpts, idxmin, weights] + # Refinement for MR: interpolation points and weights -def refinement_points_and_weights( ii, sc, sf, cr ): - if ( cr==1 ): +def refinement_points_and_weights(ii, sc, sf, cr): + if cr == 1: numpts = 1 idxmin = ii - elif ( cr>=2 ): - if ( ii%cr==0 ): - numpts = (1-sf)*(1-sc)+sf*sc - elif ( ii%cr!=0 ): - numpts = (1-sf)*(1-sc)+2*sf*sc - idxmin = (ii//cr)*(1-sf)*(1-sc)+(ii//cr)*sf*sc - weights = np.zeros( numpts ) - for ir in range( numpts ): - i = idxmin+ir - if ( ii==iimin or ii==iimax ): - weights[ir] = (1-sf)*(1-sc)+((abs(cr-abs(ii-i*cr)))/(cr)+(cr/2-0.5))*sf*sc + elif cr >= 2: + if ii % cr == 0: + numpts = (1 - sf) * (1 - sc) + sf * sc + elif ii % cr != 0: + numpts = (1 - sf) * (1 - sc) + 2 * sf * sc + idxmin = (ii // cr) * (1 - sf) * (1 - sc) + (ii // cr) * sf * sc + weights = np.zeros(numpts) + for ir in range(numpts): + i = idxmin + ir + if ii == iimin or ii == iimax: + weights[ir] = (1 - sf) * (1 - sc) + ( + (abs(cr - abs(ii - i * cr))) / (cr) + (cr / 2 - 0.5) + ) * sf * sc else: - weights[ir] = (1-sf)*(1-sc)+((abs(cr-abs(ii-i*cr)))/(cr))*sf*sc - return [ numpts, idxmin, weights ] + weights[ir] = (1 - sf) * (1 - sc) + ( + (abs(cr - abs(ii - i * cr))) / (cr) + ) * sf * sc + return [numpts, idxmin, weights] + ## TODO Coarsening for IO: interpolation points and weights -#def coarsening_points_and_weights_for_IO( i, sf, sc, cr ): +# def coarsening_points_and_weights_for_IO( i, sf, sc, cr ): # if ( cr==1 ): # numpts = 1+abs(sf-sc) # idxmin = i-sc*(1-sf) @@ -93,98 +103,113 @@ def refinement_points_and_weights( ii, sc, sf, cr ): # weights[ir] = (1/numpts)*(1-sf)*(1-sc)+(1/numpts)*sf*sc # return [ numpts, idxmin, weights ] -#------------------------------------------------------------------------------- +# ------------------------------------------------------------------------------- # Main -#------------------------------------------------------------------------------- +# ------------------------------------------------------------------------------- # Input coarsening ratio -cr = int( input( "\n Select coarsening ratio (cr=1,2,4): cr=" ) ) -if ( cr!=1 and cr!=2 and cr!=4 ): +cr = int(input("\n Select coarsening ratio (cr=1,2,4): cr=")) +if cr != 1 and cr != 2 and cr != 4: print() - sys.exit( 'coarsening ratio cr={} is not valid'.format( cr ) ) + sys.exit("coarsening ratio cr={} is not valid".format(cr)) # Loop over possible staggering of coarse and fine grid (cell-centered or nodal) -for sc in [0,1]: - for sf in [0,1]: - - print( '\n **************************************************' ) - print( ' * Staggering of coarse grid: sc={}'.format( sc ), end='' ) - if ( sc == 0 ): - print( ' cell-centered *' ) - elif ( sc == 1 ): - print( ' nodal *' ) - print( ' * Staggering of fine grid: sf={}'.format( sf ), end='' ) - if ( sf == 0 ): - print( ' cell-centered *' ) - elif ( sf == 1 ): - print( ' nodal *' ) - print( ' **************************************************' ) - - iimin,iimax = fine_grid_limits( sf ) - imin ,imax = coarse_grid_limits( sc, sf, iimin, iimax ) - - print( '\n Min and max index on coarse grid: imin={} imax={}'.format( imin, imax ) ) - print( ' Min and max index on fine grid: iimin={} iimax={}'.format( iimin, iimax ) ) +for sc in [0, 1]: + for sf in [0, 1]: + print("\n **************************************************") + print(" * Staggering of coarse grid: sc={}".format(sc), end="") + if sc == 0: + print(" cell-centered *") + elif sc == 1: + print(" nodal *") + print(" * Staggering of fine grid: sf={}".format(sf), end="") + if sf == 0: + print(" cell-centered *") + elif sf == 1: + print(" nodal *") + print(" **************************************************") + + iimin, iimax = fine_grid_limits(sf) + imin, imax = coarse_grid_limits(sc, sf, iimin, iimax) + + print( + "\n Min and max index on coarse grid: imin={} imax={}".format(imin, imax) + ) + print( + " Min and max index on fine grid: iimin={} iimax={}".format(iimin, iimax) + ) # Number of grid points - nc = imax-imin+1 - nf = iimax-iimin+1 - - print( '\n Number of points on coarse grid: nc={}'.format( nc ) ) - print( ' Number of points on fine grid: nf={}'.format( nf ) ) - - if ( sf!=sc ): - print( '\n WARNING: sc={} not equal to sf={}, not implemented for MR, continue ...'.format( sc, sf ) ) + nc = imax - imin + 1 + nf = iimax - iimin + 1 + + print("\n Number of points on coarse grid: nc={}".format(nc)) + print(" Number of points on fine grid: nf={}".format(nf)) + + if sf != sc: + print( + "\n WARNING: sc={} not equal to sf={}, not implemented for MR, continue ...".format( + sc, sf + ) + ) continue - print( '\n Coarsening for MR: check interpolation points and weights' ) - print( ' ---------------------------------------------------------' ) + print("\n Coarsening for MR: check interpolation points and weights") + print(" ---------------------------------------------------------") # Coarsening for MR: interpolation points and weights - for i in range ( nc ): # index on coarse grid - numpts,idxmin,weights = coarsening_points_and_weights( i, sc, sf, cr ) - print( '\n Find value at i={} by interpolating over the following points and weights:'.format( i ) ) - for ir in range( numpts ): # interpolation points and weights - ii = idxmin+ir - print( ' ({},{})'.format( ii, weights[ir] ), end='' ) - if not ( ir == numpts-1 ): - print( ' ', end='' ) + for i in range(nc): # index on coarse grid + numpts, idxmin, weights = coarsening_points_and_weights(i, sc, sf, cr) + print( + "\n Find value at i={} by interpolating over the following points and weights:".format( + i + ) + ) + for ir in range(numpts): # interpolation points and weights + ii = idxmin + ir + print(" ({},{})".format(ii, weights[ir]), end="") + if not (ir == numpts - 1): + print(" ", end="") print() # Coarsening for MR: check conservation properties - for ii in range( nf ): # index on fine grid + for ii in range(nf): # index on fine grid ws = 0.0 - for i in range( nc ): # index on coarse grid - numpts,idxmin,weights = coarsening_points_and_weights( i, sc, sf, cr ) - for ir in range( numpts ): # interpolation points and weights - jj = idxmin+ir - if ( jj==ii ): # interpolation point matches point on fine grid - ws += weights[ir] - if ( ws!=1.0/cr ): - print( '\n ERROR: sum of weights ws={} should be 1/cr'.format( ws ) ) - - print( '\n Refinement for MR: check interpolation points and weights' ) - print( ' ---------------------------------------------------------' ) + for i in range(nc): # index on coarse grid + numpts, idxmin, weights = coarsening_points_and_weights(i, sc, sf, cr) + for ir in range(numpts): # interpolation points and weights + jj = idxmin + ir + if jj == ii: # interpolation point matches point on fine grid + ws += weights[ir] + if ws != 1.0 / cr: + print("\n ERROR: sum of weights ws={} should be 1/cr".format(ws)) + + print("\n Refinement for MR: check interpolation points and weights") + print(" ---------------------------------------------------------") # Refinement for MR: interpolation points and weights - for ii in range ( nf ): # index on fine grid - numpts,idxmin,weights = refinement_points_and_weights( ii, sc, sf, cr ) - print( '\n Find value at ii={} by interpolating over the following points and weights:'.format( ii ) ) - for ir in range( numpts ): # interpolation points and weights - i = idxmin+ir - print( ' ({},{})'.format( i, weights[ir] ), end='' ) - if not ( ir == numpts-1 ): - print( ' ', end='' ) + for ii in range(nf): # index on fine grid + numpts, idxmin, weights = refinement_points_and_weights(ii, sc, sf, cr) + print( + "\n Find value at ii={} by interpolating over the following points and weights:".format( + ii + ) + ) + for ir in range(numpts): # interpolation points and weights + i = idxmin + ir + print(" ({},{})".format(i, weights[ir]), end="") + if not (ir == numpts - 1): + print(" ", end="") print() # Refinement for MR: check conservation properties - for i in range( nc ): # index on coarse grid + for i in range(nc): # index on coarse grid ws = 0.0 - for ii in range( nf ): # index on fine grid - numpts,idxmin,weights = refinement_points_and_weights( ii, sc, sf, cr ) - for ir in range( numpts ): # interpolation points and weights - jj = idxmin+ir - if ( jj==i ): # interpolation point matches point on coarse grid - ws += weights[ir] - if ( ws!=cr ): - print( '\n ERROR: sum of weights ws={} should be cr'.format( ws ) ) + for ii in range(nf): # index on fine grid + numpts, idxmin, weights = refinement_points_and_weights(ii, sc, sf, cr) + for ir in range(numpts): # interpolation points and weights + jj = idxmin + ir + if jj == i: # interpolation point matches point on coarse grid + ws += weights[ir] + if ws != cr: + print("\n ERROR: sum of weights ws={} should be cr".format(ws)) diff --git a/Tools/Algorithms/stencil.py b/Tools/Algorithms/stencil.py index dde7398daaa..2fe67d1c681 100644 --- a/Tools/Algorithms/stencil.py +++ b/Tools/Algorithms/stencil.py @@ -15,18 +15,19 @@ import os import sys -sys.path.append('../Parser/') +sys.path.append("../Parser/") import matplotlib.pyplot as plt import numpy as np from input_file_parser import parse_input_file from scipy.constants import c -plt.style.use('tableau-colorblind10') -plt.rcParams.update({'font.size': 14}) +plt.style.use("tableau-colorblind10") +plt.rcParams.update({"font.size": 14}) sp = np.finfo(np.float32).eps dp = np.finfo(np.float64).eps + def get_Fornberg_coeffs(order, staggered): """ Compute the centered or staggered Fornberg coefficients at finite order. @@ -43,24 +44,29 @@ def get_Fornberg_coeffs(order, staggered): coeffs : numpy.ndarray Array of centered or staggered Fornberg coefficients. """ - m = order//2 - coeffs = np.zeros(m+1) + m = order // 2 + coeffs = np.zeros(m + 1) # Compute Fornberg coefficients by recurrence if staggered: - prod = 1. - for k in range(1, m+1): - prod = prod*(m+k)/(4*k) - coeffs[0] = 4*m*prod**2 - for n in range(1, m+1): - coeffs[n] = -(((2*n-3)*(m+1-n))/((2*n-1)*(m-1+n))*coeffs[n-1]) + prod = 1.0 + for k in range(1, m + 1): + prod = prod * (m + k) / (4 * k) + coeffs[0] = 4 * m * prod**2 + for n in range(1, m + 1): + coeffs[n] = -( + ((2 * n - 3) * (m + 1 - n)) + / ((2 * n - 1) * (m - 1 + n)) + * coeffs[n - 1] + ) else: - coeffs[0] = -2. - for n in range(1, m+1): - coeffs[n] = -(m+1-n)/(m+n)*coeffs[n-1] + coeffs[0] = -2.0 + for n in range(1, m + 1): + coeffs[n] = -(m + 1 - n) / (m + n) * coeffs[n - 1] return coeffs + def modified_k(kx, dx, order, staggered): """ Compute the centered or staggered modified wave vector at finite order. @@ -81,24 +87,29 @@ def modified_k(kx, dx, order, staggered): k_mod : numpy.ndarray Centered or staggered modified wave vector. """ - m = order//2 + m = order // 2 coeffs = get_Fornberg_coeffs(order, staggered) # Array of values for n: from 1 to m - n = np.arange(1, m+1) + n = np.arange(1, m + 1) # Array of values of sin # (first axis corresponds to k and second axis to n) if staggered: - sin_kn = (np.sin(kx[:,np.newaxis]*(n[np.newaxis,:]-0.5)*dx)/((n[np.newaxis,:]-0.5)*dx)) + sin_kn = np.sin(kx[:, np.newaxis] * (n[np.newaxis, :] - 0.5) * dx) / ( + (n[np.newaxis, :] - 0.5) * dx + ) else: - sin_kn = (np.sin(kx[:,np.newaxis]*n[np.newaxis,:]*dx)/(n[np.newaxis,:]*dx)) + sin_kn = np.sin(kx[:, np.newaxis] * n[np.newaxis, :] * dx) / ( + n[np.newaxis, :] * dx + ) # Modified k - k_mod = np.tensordot(sin_kn, coeffs[1:], axes=(-1,-1)) + k_mod = np.tensordot(sin_kn, coeffs[1:], axes=(-1, -1)) return k_mod + def func_cosine(om, w_c, dt): """ Compute the leading spectral coefficient of the general PSATD equations: @@ -120,10 +131,11 @@ def func_cosine(om, w_c, dt): coeff : numpy.ndarray Leading spectral coefficient of the general PSATD equations. """ - theta_c = np.exp(1.j*w_c*dt*0.5) - coeff = theta_c**2*np.cos(om*dt) + theta_c = np.exp(1.0j * w_c * dt * 0.5) + coeff = theta_c**2 * np.cos(om * dt) return coeff + def compute_stencils(coeff_nodal, coeff_stagg, axis): """ Compute nodal and staggered stencils along a given direction. @@ -159,17 +171,20 @@ def compute_stencils(coeff_nodal, coeff_stagg, axis): # Average over the other two directions i1 = (axis + 1) % 3 i2 = (axis + 2) % 3 - stencil_avg_nodal = (stencil_nodal.sum(axis=(i1,i2)) / - (stencil_nodal.shape[i1]*stencil_nodal.shape[i2])) - stencil_avg_stagg = (stencil_stagg.sum(axis=(i1,i2)) / - (stencil_stagg.shape[i1]*stencil_stagg.shape[i2])) + stencil_avg_nodal = stencil_nodal.sum(axis=(i1, i2)) / ( + stencil_nodal.shape[i1] * stencil_nodal.shape[i2] + ) + stencil_avg_stagg = stencil_stagg.sum(axis=(i1, i2)) / ( + stencil_stagg.shape[i1] * stencil_stagg.shape[i2] + ) stencils = dict() - stencils['nodal'] = abs(stencil_avg_nodal) - stencils['stagg'] = abs(stencil_avg_stagg) + stencils["nodal"] = abs(stencil_avg_nodal) + stencils["stagg"] = abs(stencil_avg_stagg) return stencils + def compute_all(dx_boosted, dt, psatd_order, v_gal, nx=None): """ Compute nodal and staggered stencils along all directions. @@ -200,12 +215,12 @@ def compute_all(dx_boosted, dt, psatd_order, v_gal, nx=None): nx = np.full(shape=dims, fill_value=256) # k vectors and modified k vectors - k_arr = [] + k_arr = [] k_arr_c = [] k_arr_s = [] for i in range(dims): - k_arr.append(2*np.pi*np.fft.fftfreq(nx[i], dx_boosted[i])) - if psatd_order[i] != 'inf': + k_arr.append(2 * np.pi * np.fft.fftfreq(nx[i], dx_boosted[i])) + if psatd_order[i] != "inf": k_arr_c.append(modified_k(k_arr[i], dx_boosted[i], psatd_order[i], False)) k_arr_s.append(modified_k(k_arr[i], dx_boosted[i], psatd_order[i], True)) else: @@ -219,9 +234,9 @@ def compute_all(dx_boosted, dt, psatd_order, v_gal, nx=None): kk_s = np.sqrt(sum(k**2 for k in k_s)) # Frequencies - om_c = c*kk_c - om_s = c*kk_s - w_c = v_gal*k_c[-1] + om_c = c * kk_c + om_s = c * kk_s + w_c = v_gal * k_c[-1] # Spectral coefficient coeff_nodal = func_cosine(om_c, w_c, dt) @@ -234,6 +249,7 @@ def compute_all(dx_boosted, dt, psatd_order, v_gal, nx=None): return stencils + def compute_guard_cells(errmin, errmax, stencil): """ Compute the minimum number of guard cells for a given error threshold @@ -254,15 +270,16 @@ def compute_guard_cells(errmin, errmax, stencil): """ diff = stencil - errmin v = next(d for d in diff if d < 0) - gcmin = np.argwhere(diff == v)[0,0] + gcmin = np.argwhere(diff == v)[0, 0] diff = stencil - errmax try: v = next(d for d in diff if d < 0) - gcmax = np.argwhere(diff == v)[0,0] - 1 + gcmax = np.argwhere(diff == v)[0, 0] - 1 except StopIteration: - gcmin, gcmax = compute_guard_cells(errmin, errmax*10, stencil) + gcmin, gcmax = compute_guard_cells(errmin, errmax * 10, stencil) return (gcmin, gcmax) + def plot_stencil(cells, stencil_nodal, stencil_stagg, label, path, name): """ Plot stencil extent for nodal and staggered/hybrid solver, @@ -281,34 +298,37 @@ def plot_stencil(cells, stencil_nodal, stencil_stagg, label, path, name): name : str Label for figure name. """ - fig = plt.figure(figsize=[10,6]) + fig = plt.figure(figsize=[10, 6]) ax = fig.add_subplot(111) - ax.plot(cells, stencil_nodal, linestyle='-', label='nodal') - ax.plot(cells, stencil_stagg, linestyle='-', label='staggered or hybrid') + ax.plot(cells, stencil_nodal, linestyle="-", label="nodal") + ax.plot(cells, stencil_stagg, linestyle="-", label="staggered or hybrid") # Plot single and double precision machine epsilons - ax.axhline(y=sp, c='grey', ls='dashed', label='machine epsilon (single precision)') - ax.axhline(y=dp, c='grey', ls='dotted', label='machine epsilon (double precision)') + ax.axhline(y=sp, c="grey", ls="dashed", label="machine epsilon (single precision)") + ax.axhline(y=dp, c="grey", ls="dotted", label="machine epsilon (double precision)") # Shade regions between single and double precision machine epsilons xmin, xmax = compute_guard_cells(sp, dp, stencil_nodal) - ax.fill_between(cells[xmin:xmax+1], stencil_nodal[xmin:xmax+1], alpha=0.5) + ax.fill_between(cells[xmin : xmax + 1], stencil_nodal[xmin : xmax + 1], alpha=0.5) xmin, xmax = compute_guard_cells(sp, dp, stencil_stagg) - ax.fill_between(cells[xmin:xmax+1], stencil_stagg[xmin:xmax+1], alpha=0.5) + ax.fill_between(cells[xmin : xmax + 1], stencil_stagg[xmin : xmax + 1], alpha=0.5) # - ax.set_yscale('log') + ax.set_yscale("log") ax.set_xticks(cells, minor=True) - ax.grid(which='minor', linewidth=0.2) - ax.grid(which='major', linewidth=0.4) + ax.grid(which="minor", linewidth=0.2) + ax.grid(which="major", linewidth=0.4) ax.legend() - ax.set_xlabel('number of cells') - ax.set_ylabel('signal to be truncated') - ax.set_title(r'Stencil extent along ${:s}$'.format(label)) + ax.set_xlabel("number of cells") + ax.set_ylabel("signal to be truncated") + ax.set_title(r"Stencil extent along ${:s}$".format(label)) fig.tight_layout() - fig_name = os.path.join(path, 'figure_stencil_' + label) + fig_name = os.path.join(path, "figure_stencil_" + label) if name: - fig_name += '_' + name - fig.savefig(fig_name + '.png', dpi=150) + fig_name += "_" + name + fig.savefig(fig_name + ".png", dpi=150) + -def run_main(dims, dx_boosted, dt, psatd_order, gamma=1., galilean=False, path='.', name=''): +def run_main( + dims, dx_boosted, dt, psatd_order, gamma=1.0, galilean=False, path=".", name="" +): """ Main function. @@ -333,44 +353,44 @@ def run_main(dims, dx_boosted, dt, psatd_order, gamma=1., galilean=False, path=' """ # Galilean velocity (default = 0.) - v_gal = 0. + v_gal = 0.0 if galilean: - v_gal = -np.sqrt(1.-1./gamma**2)*c + v_gal = -np.sqrt(1.0 - 1.0 / gamma**2) * c # Display some output - print('\nCell size:') - print(f'- dx = {dx_boosted}') + print("\nCell size:") + print(f"- dx = {dx_boosted}") if dims > 1: - print(f'- dx[1:]/dx[0] = {dx_boosted[1:]/dx_boosted[0]}') - print('\nTime step:') - print(f'- dt = {dt}') - print(f'- c*dt/dx = {c*dt/dx_boosted}') - print('\nSpectral order:') - print(f'- order = {psatd_order}') - print('\nLorentz boost, Galilean velocity:') - print(f'- gamma = {gamma}') - print(f'- v_gal = {v_gal}') + print(f"- dx[1:]/dx[0] = {dx_boosted[1:]/dx_boosted[0]}") + print("\nTime step:") + print(f"- dt = {dt}") + print(f"- c*dt/dx = {c*dt/dx_boosted}") + print("\nSpectral order:") + print(f"- order = {psatd_order}") + print("\nLorentz boost, Galilean velocity:") + print(f"- gamma = {gamma}") + print(f"- v_gal = {v_gal}") stencils = compute_all(dx_boosted, dt, psatd_order, v_gal) # Maximum number of cells - nc = dims*[65] + nc = dims * [65] # Arrays of stencils for i, s in enumerate(stencils): - s['nodal'] = s['nodal'][:nc[i]] - s['stagg'] = s['stagg'][:nc[i]] + s["nodal"] = s["nodal"][: nc[i]] + s["stagg"] = s["stagg"][: nc[i]] # Axis labels - label = ['x'] + label = ["x"] if dims == 3: - label.append('y') + label.append("y") if dims > 1: - label.append('z') + label.append("z") # Plot stencils for i, s in enumerate(stencils): - plot_stencil(np.arange(nc[i]), s['nodal'], s['stagg'], label[i], path, name) + plot_stencil(np.arange(nc[i]), s["nodal"], s["stagg"], label[i], path, name) # Compute min and max numbers of guard cells gcmin_nodal = [] @@ -378,38 +398,46 @@ def run_main(dims, dx_boosted, dt, psatd_order, gamma=1., galilean=False, path=' gcmin_stagg = [] gcmax_stagg = [] for s in stencils: - gcmin, gcmax = compute_guard_cells(sp, dp, s['nodal']) + gcmin, gcmax = compute_guard_cells(sp, dp, s["nodal"]) gcmin_nodal.append(gcmin) gcmax_nodal.append(gcmax) - gcmin, gcmax = compute_guard_cells(sp, dp, s['stagg']) + gcmin, gcmax = compute_guard_cells(sp, dp, s["stagg"]) gcmin_stagg.append(gcmin) gcmax_stagg.append(gcmax) fig_path = os.path.abspath(path) - print(f'\nFigures saved in {fig_path}/.') - print('\nThe plots show the extent of the signal to be truncated (y-axis)' - + '\nby choosing a given number of cells (x-axis) for the ghost regions' - + '\nof each simulation grid, along x, y, and z.') - print('\nIt is recommended to choose a number of ghost cells that corresponds to' - + '\na truncation of the signal between single and double machine precision.' - + '\nThe more ghost cells, the more accurate, yet expensive, results.' - + '\nFor each stencil the region of accuracy between single and double precision' - + '\nis shaded to help you identify a suitable number of ghost cells.') - print('\nFor a nodal simulation, choose:') + print(f"\nFigures saved in {fig_path}/.") + print( + "\nThe plots show the extent of the signal to be truncated (y-axis)" + + "\nby choosing a given number of cells (x-axis) for the ghost regions" + + "\nof each simulation grid, along x, y, and z." + ) + print( + "\nIt is recommended to choose a number of ghost cells that corresponds to" + + "\na truncation of the signal between single and double machine precision." + + "\nThe more ghost cells, the more accurate, yet expensive, results." + + "\nFor each stencil the region of accuracy between single and double precision" + + "\nis shaded to help you identify a suitable number of ghost cells." + ) + print("\nFor a nodal simulation, choose:") for i in range(dims): - print(f'- between {gcmin_nodal[i]} and {gcmax_nodal[i]} ghost cells along {label[i]}') - print('\nFor a staggered or hybrid simulation, choose:') + print( + f"- between {gcmin_nodal[i]} and {gcmax_nodal[i]} ghost cells along {label[i]}" + ) + print("\nFor a staggered or hybrid simulation, choose:") for i in range(dims): - print(f'- between {gcmin_stagg[i]} and {gcmax_stagg[i]} ghost cells along {label[i]}') + print( + f"- between {gcmin_stagg[i]} and {gcmax_stagg[i]} ghost cells along {label[i]}" + ) print() return -if __name__ == '__main__': +if __name__ == "__main__": # Parse path to input file from command line parser = argparse.ArgumentParser() - parser.add_argument('--input_file', help='path to input file to be parsed') + parser.add_argument("--input_file", help="path to input file to be parsed") args = parser.parse_args() input_file = args.input_file @@ -417,45 +445,45 @@ def run_main(dims, dx_boosted, dt, psatd_order, gamma=1., galilean=False, path=' input_dict = parse_input_file(input_file) # TODO Handle RZ - dims = int(input_dict['geometry.dims'][0]) + dims = int(input_dict["geometry.dims"][0]) # Notation considering x as vector of coordinates (x,y,z) - nx = np.array([int(w) for w in input_dict['amr.n_cell']]) - xmin = np.array([float(w) for w in input_dict['geometry.prob_lo']]) - xmax = np.array([float(w) for w in input_dict['geometry.prob_hi']]) + nx = np.array([int(w) for w in input_dict["amr.n_cell"]]) + xmin = np.array([float(w) for w in input_dict["geometry.prob_lo"]]) + xmax = np.array([float(w) for w in input_dict["geometry.prob_hi"]]) # Cell size in the lab frame and boosted frame (boost along z) ## lab frame - dx = (xmax-xmin) / nx + dx = (xmax - xmin) / nx ## boosted frame - gamma = 1. - if 'warpx.gamma_boost' in input_dict: - gamma = float(input_dict['warpx.gamma_boost'][0]) - beta = np.sqrt(1. - 1./gamma**2) + gamma = 1.0 + if "warpx.gamma_boost" in input_dict: + gamma = float(input_dict["warpx.gamma_boost"][0]) + beta = np.sqrt(1.0 - 1.0 / gamma**2) dx_boosted = np.copy(dx) - dx_boosted[-1] = (1. + beta) * gamma * dx[-1] + dx_boosted[-1] = (1.0 + beta) * gamma * dx[-1] # Time step for pseudo-spectral scheme cfl = 0.999 - if 'warpx.cfl' in input_dict: - cfl = float(input_dict['warpx.cfl'][0]) + if "warpx.cfl" in input_dict: + cfl = float(input_dict["warpx.cfl"][0]) dt = cfl * np.min(dx_boosted) / c # Pseudo-spectral order psatd_order = np.full(shape=dims, fill_value=16) - if 'psatd.nox' in input_dict: - psatd_order[0] = int(input_dict['psatd.nox'][0]) - if 'psatd.noy' in input_dict: - psatd_order[1] = int(input_dict['psatd.noy'][0]) - if 'psatd.noz' in input_dict: - psatd_order[-1] = int(input_dict['psatd.noz'][0]) + if "psatd.nox" in input_dict: + psatd_order[0] = int(input_dict["psatd.nox"][0]) + if "psatd.noy" in input_dict: + psatd_order[1] = int(input_dict["psatd.noy"][0]) + if "psatd.noz" in input_dict: + psatd_order[-1] = int(input_dict["psatd.noz"][0]) # Galilean flag galilean = False - if 'psatd.use_default_v_galilean' in input_dict: - galilean = bool(input_dict['psatd.use_default_v_galilean'][0]) - if 'psatd.v_galilean' in input_dict: - galilean = bool(input_dict['psatd.v_galilean'][-1]) + if "psatd.use_default_v_galilean" in input_dict: + galilean = bool(input_dict["psatd.use_default_v_galilean"][0]) + if "psatd.v_galilean" in input_dict: + galilean = bool(input_dict["psatd.v_galilean"][-1]) # Run main function (some arguments are optional, # see definition of run_main function for help) diff --git a/Tools/DevUtils/compute_domain.py b/Tools/DevUtils/compute_domain.py index b54412639bd..d54ef2abad4 100644 --- a/Tools/DevUtils/compute_domain.py +++ b/Tools/DevUtils/compute_domain.py @@ -6,7 +6,7 @@ import numpy as np -''' +""" This Python script helps a user to parallelize a WarpX simulation. The user specifies the minimal size of the physical domain and the resolution @@ -22,96 +22,106 @@ Note that the script has no notion of blocking_factor. It is assumed that blocking_factor = max_grid_size, and that all boxes have the same size. -''' +""" # Update the lines below for your simulation # ------------------------------------------ # 2 elements for 2D, 3 elements for 3D # Lower corner of the box -box_lo0 = np.array([-25.e-6, -25.e-6, -15.e-6]) +box_lo0 = np.array([-25.0e-6, -25.0e-6, -15.0e-6]) # Upper corner of the box -box_hi0 = np.array([ 25.e-6, 25.e-6, 60.e-6]) +box_hi0 = np.array([25.0e-6, 25.0e-6, 60.0e-6]) # Cell size -dx = 1.e-6 +dx = 1.0e-6 dz = dx cell_size = np.array([dx, dx, dz]) # Use this for simulations in a boosted frame if you # want to enforce dz < dx / dx_over_dz_boosted_frame compute_dz_boosted_frame = True -gamma_boost = 30. -dx_over_dz_boosted_frame = 1.1 # >1. is usually more stable +gamma_boost = 30.0 +dx_over_dz_boosted_frame = 1.1 # >1. is usually more stable # ------------------------------------------ + # similar to numpy.ceil, except the output data type is int def intceil(num): return np.ceil(num).astype(int) + # Enlarge simulation boundaries to satisfy three conditions: # - The resolution must be exactly the one provided by the user # - The physical domain must cover the domain specified by box_lo0, box_hi0 # - The number of cells must be a multiple of mgs (max_grid_size). def adjust_bounds(box_lo0, box_hi0, box_ncell0, mgs): - cell_size = (box_hi0-box_lo0) / box_ncell0 - box_ncell = intceil(box_ncell0/mgs)*mgs + cell_size = (box_hi0 - box_lo0) / box_ncell0 + box_ncell = intceil(box_ncell0 / mgs) * mgs box_lo = box_ncell * cell_size * box_lo0 / (box_hi0 - box_lo0) box_hi = box_ncell * cell_size * box_hi0 / (box_hi0 - box_lo0) return box_lo, box_hi, box_ncell + # Calculate parallelization for the simulation, given numerical parameters # (number of cells, max_grid_size, number of threads per node etc.) -def nb_nodes_mpi(box_ncell,mgs,threadspernode,ompnumthreads,ngridpernode, ndim): - nmpipernode = threadspernode/ompnumthreads - ngridpermpi = ngridpernode/nmpipernode - box_ngrids = box_ncell/mgs +def nb_nodes_mpi(box_ncell, mgs, threadspernode, ompnumthreads, ngridpernode, ndim): + nmpipernode = threadspernode / ompnumthreads + ngridpermpi = ngridpernode / nmpipernode + box_ngrids = box_ncell / mgs if ndim == 2: ngrids = box_ngrids[0] * box_ngrids[1] elif ndim == 3: ngrids = np.prod(box_ngrids) - n_mpi = intceil( ngrids/ngridpermpi ) - n_node = intceil( n_mpi/nmpipernode ) + n_mpi = intceil(ngrids / ngridpermpi) + n_node = intceil(n_mpi / nmpipernode) return n_node, n_mpi + # Get number of dimensions (2 or 3) ndim = box_lo0.size if compute_dz_boosted_frame: # Adjust dz so that dx/dz = dx_over_dz_boosted_frame in simulation frame - cell_size[-1] = cell_size[0] / dx_over_dz_boosted_frame / 2. / gamma_boost + cell_size[-1] = cell_size[0] / dx_over_dz_boosted_frame / 2.0 / gamma_boost # Given the resolution, compute number of cells a priori -box_ncell0 = ( box_hi0 - box_lo0 ) / cell_size +box_ncell0 = (box_hi0 - box_lo0) / cell_size if ndim == 2: # Set of parameters suitable for a 2D simulation on Cori KNL - ngridpernode = 16. - ompnumthreads = 8. - mgs = 1024. - threadspernode = 64. # HyperThreading level = 1: no hyperthreading - distance_between_threads = int(68*4/threadspernode) - c_option = int( ompnumthreads*distance_between_threads ) + ngridpernode = 16.0 + ompnumthreads = 8.0 + mgs = 1024.0 + threadspernode = 64.0 # HyperThreading level = 1: no hyperthreading + distance_between_threads = int(68 * 4 / threadspernode) + c_option = int(ompnumthreads * distance_between_threads) elif ndim == 3: # Set of parameters suitable for a 3D simulation on Cori KNL - ngridpernode = 8. - ompnumthreads = 8. - mgs = 64. - threadspernode = 64. # HyperThreading level = 1: no hyperthreading - distance_between_threads = int(68*4/threadspernode) - c_option = int( ompnumthreads*distance_between_threads ) + ngridpernode = 8.0 + ompnumthreads = 8.0 + mgs = 64.0 + threadspernode = 64.0 # HyperThreading level = 1: no hyperthreading + distance_between_threads = int(68 * 4 / threadspernode) + c_option = int(ompnumthreads * distance_between_threads) # Adjust simulation bounds box_lo, box_hi, box_ncell = adjust_bounds(box_lo0, box_hi0, box_ncell0, mgs) # Calculate parallelization -n_node,n_mpi = nb_nodes_mpi(box_ncell, mgs, threadspernode, ompnumthreads, ngridpernode, ndim) +n_node, n_mpi = nb_nodes_mpi( + box_ncell, mgs, threadspernode, ompnumthreads, ngridpernode, ndim +) # Print results -string_output = ' ### Parameters used ### \n' -string_output += 'ngridpernode = ' + str(ngridpernode) + '\n' -string_output += 'ompnumthreads = ' + str(ompnumthreads) + '\n' -string_output += 'mgs (max_grid_size) = ' + str(mgs) + '\n' -string_output += 'threadspernode ( = # MPI ranks per node * OMP_NUM_THREADS) = ' + str(threadspernode) + '\n' -string_output += 'ndim = ' + str(ndim) + '\n\n' -string_output += 'box_lo = ' + str(box_lo) + '\n' -string_output += 'box_hi = ' + str(box_hi) + '\n' -string_output += 'box_ncell = ' + str(box_ncell) + '\n' -string_output += 'n_node = ' + str(n_node) + '\n' -string_output += 'n_mpi = ' + str(n_mpi) + '\n' +string_output = " ### Parameters used ### \n" +string_output += "ngridpernode = " + str(ngridpernode) + "\n" +string_output += "ompnumthreads = " + str(ompnumthreads) + "\n" +string_output += "mgs (max_grid_size) = " + str(mgs) + "\n" +string_output += ( + "threadspernode ( = # MPI ranks per node * OMP_NUM_THREADS) = " + + str(threadspernode) + + "\n" +) +string_output += "ndim = " + str(ndim) + "\n\n" +string_output += "box_lo = " + str(box_lo) + "\n" +string_output += "box_hi = " + str(box_hi) + "\n" +string_output += "box_ncell = " + str(box_ncell) + "\n" +string_output += "n_node = " + str(n_node) + "\n" +string_output += "n_mpi = " + str(n_mpi) + "\n" print(string_output) diff --git a/Tools/DevUtils/update_benchmarks_from_azure_output.py b/Tools/DevUtils/update_benchmarks_from_azure_output.py index ec344988b81..b2be4d17a7b 100644 --- a/Tools/DevUtils/update_benchmarks_from_azure_output.py +++ b/Tools/DevUtils/update_benchmarks_from_azure_output.py @@ -8,7 +8,7 @@ import re import sys -''' +""" This Python script updates the Azure benchmarks automatically using a raw Azure output textfile that is given as the first and only argument of the script. @@ -17,12 +17,12 @@ and the next occurrence of "'----------------'" And use these lines to update the benchmarks -''' +""" azure_output_filename = sys.argv[1] -pattern_test_name = 'New file for (?P[\w\-]*)' -closing_string = '----------------' +pattern_test_name = "New file for (?P[\w\-]*)" +closing_string = "----------------" benchmark_path = "../../Regression/Checksum/benchmarks_json/" benchmark_suffix = ".json" @@ -31,14 +31,13 @@ with open(azure_output_filename, "r") as f: for line in f: - if current_test == "": # Here we search lines that read, for example, # "New file for LaserAcceleration_BTD" # and we set current_test = "LaserAcceleration_BTD" match_test_name = re.search(pattern_test_name, line) if match_test_name: - current_test = match_test_name.group('testname') + current_test = match_test_name.group("testname") new_file_string = "" else: @@ -52,14 +51,14 @@ # not need here. The first line that we will read is the prefix followed by the # "{" character, so we determine how long the prefix is by finding the last # occurrence of the "{" character in this line. - azure_indent = line.rfind('{') + azure_indent = line.rfind("{") first_line_read = True new_file_string += line[azure_indent:] else: # We have read the new file entirely. Dump it in the json file. new_file_json = json.loads(new_file_string) - json_filepath = benchmark_path+current_test+benchmark_suffix + json_filepath = benchmark_path + current_test + benchmark_suffix with open(json_filepath, "w") as f_json: json.dump(new_file_json, f_json, indent=2) current_test = "" diff --git a/Tools/Parser/input_file_parser.py b/Tools/Parser/input_file_parser.py index 0ab134f6222..9aeba6bbacf 100644 --- a/Tools/Parser/input_file_parser.py +++ b/Tools/Parser/input_file_parser.py @@ -16,16 +16,18 @@ def parse_input_file(input_file): input_dict = dict() with open(input_file) as ff: for line in ff: - sline = line.split('=') + sline = line.split("=") # skip lines that are commented out, blank, or continuation of previous parameters - skip_line = sline[0].startswith('#') or sline[0].startswith('\n') or len(sline) == 1 + skip_line = ( + sline[0].startswith("#") or sline[0].startswith("\n") or len(sline) == 1 + ) if not skip_line: key = sline[0].strip() val = sline[1].split() # The value corresponding to a given key of input_dict is a list # of strings, from which we remove any leftover comments for i in range(len(val)): - if val[i].startswith('#'): + if val[i].startswith("#"): val = val[:i] break input_dict[key] = val diff --git a/Tools/PostProcessing/plot_distribution_mapping.py b/Tools/PostProcessing/plot_distribution_mapping.py index db95c862bd5..899ea4678c4 100644 --- a/Tools/PostProcessing/plot_distribution_mapping.py +++ b/Tools/PostProcessing/plot_distribution_mapping.py @@ -11,6 +11,7 @@ class SimData: """ Structure for easy access to load costs reduced diagnostics """ + def __init__(self, directory, prange, is_3D): """ Set data-containing dir, data range; load data @@ -40,7 +41,6 @@ def __call__(self, i): # Data_fields index currently set self.idx = i - def _get_costs_reduced_diagnostics(self, directory, prange): """ Read costs reduced diagnostics @@ -58,20 +58,21 @@ def _get_costs_reduced_diagnostics(self, directory, prange): if len(data.shape) == 1: data = data.reshape(-1, data.shape[0]) - steps = data[:,0].astype(int) + steps = data[:, 0].astype(int) - times = data[:,1] - data = data[:,2:] + times = data[:, 1] + data = data[:, 2:] # Compute the number of datafields saved per box n_data_fields = 0 with open(directory) as f: h = f.readlines()[0] - unique_headers=[''.join([l for l in w if not l.isdigit()]) - for w in h.split()][2::] + unique_headers = [ + "".join([ln for ln in w if not ln.isdigit()]) for w in h.split() + ][2::] # Either 9 or 10 depending if GPU - n_data_fields = 9 if len(set(unique_headers))%9 == 0 else 10 + n_data_fields = 9 if len(set(unique_headers)) % 9 == 0 else 10 f.close() # From data header, data layout is: @@ -86,9 +87,11 @@ def _get_costs_reduced_diagnostics(self, directory, prange): # cost_box_n, proc_box_n, lev_box_n, i_low_box_n, j_low_box_n, # k_low_box_n, num_cells_n, num_macro_particles_n, # (, gpu_ID_box_n if GPU run), hostname_box_n - i, j, k = (data[0,3::n_data_fields], - data[0,4::n_data_fields], - data[0,5::n_data_fields]) + i, j, k = ( + data[0, 3::n_data_fields], + data[0, 4::n_data_fields], + data[0, 5::n_data_fields], + ) i_blocks = np.diff(np.array(sorted(i.astype(int)))) j_blocks = np.diff(np.array(sorted(j.astype(int)))) @@ -103,21 +106,23 @@ def _get_costs_reduced_diagnostics(self, directory, prange): j_blocking_factor = 1 if len(j_non_zero) == 0 else j_non_zero.min() k_blocking_factor = 1 if len(k_non_zero) == 0 else k_non_zero.min() - imax = i.astype(int).max()//i_blocking_factor - jmax = j.astype(int).max()//j_blocking_factor - kmax = k.astype(int).max()//k_blocking_factor + imax = i.astype(int).max() // i_blocking_factor + jmax = j.astype(int).max() // j_blocking_factor + kmax = k.astype(int).max() // k_blocking_factor for key in self.keys: row = np.where(key == steps)[0][0] costs = data[row, 0::n_data_fields].astype(float) ranks = data[row, 1::n_data_fields].astype(int) - icoords = i.astype(int)//i_blocking_factor - jcoords = j.astype(int)//j_blocking_factor - kcoords = k.astype(int)//k_blocking_factor + icoords = i.astype(int) // i_blocking_factor + jcoords = j.astype(int) // j_blocking_factor + kcoords = k.astype(int) // k_blocking_factor # Fill in cost array - shape = (kmax+1, jmax+1, imax+1)[:2+self.is_3D] - coords = [coord[:2+self.is_3D] for coord in zip(kcoords, jcoords, icoords)] + shape = (kmax + 1, jmax + 1, imax + 1)[: 2 + self.is_3D] + coords = [ + coord[: 2 + self.is_3D] for coord in zip(kcoords, jcoords, icoords) + ] cost_arr = np.full(shape, 0.0) rank_arr = np.full(shape, -1) @@ -127,43 +132,56 @@ def _get_costs_reduced_diagnostics(self, directory, prange): rank_arr[coord] = ranks[nc] # For non-uniform blocks: fill with the corresponding cost/rank - visited = np.full(shape, False) + visited = np.full(shape, False) + def dfs(corner, pos, prev): # Exit conditions - if any([pos[i]>=shape[i] for i in range(len(shape))]): return - edges = list(rank_arr[corner[0]:pos[0]+1, pos[1], pos[2]]) \ - + list(rank_arr[pos[0], corner[1]:pos[1]+1, pos[2]]) \ - + list(rank_arr[pos[0], pos[1], corner[2]:pos[2]+1]) \ - if self.is_3D else \ - list(rank_arr[corner[0]:pos[0]+1, pos[1]]) \ - + list(rank_arr[pos[0], corner[1]:pos[1]+1]) - if visited[pos] or not set(edges).issubset(set([prev, -1])): return + if any([pos[i] >= shape[i] for i in range(len(shape))]): + return + edges = ( + list(rank_arr[corner[0] : pos[0] + 1, pos[1], pos[2]]) + + list(rank_arr[pos[0], corner[1] : pos[1] + 1, pos[2]]) + + list(rank_arr[pos[0], pos[1], corner[2] : pos[2] + 1]) + if self.is_3D + else list(rank_arr[corner[0] : pos[0] + 1, pos[1]]) + + list(rank_arr[pos[0], corner[1] : pos[1] + 1]) + ) + if visited[pos] or not set(edges).issubset(set([prev, -1])): + return visited[pos] = True - if rank_arr[pos] not in [-1, prev]: prev, corner = rank_arr[pos], pos - else: rank_arr[pos] = prev + if rank_arr[pos] not in [-1, prev]: + prev, corner = rank_arr[pos], pos + else: + rank_arr[pos] = prev args = [[0, 1] for _ in range(len(shape))] - neighbors = [tuple(np.array(pos) + np.array(p)) for p in product(*args) - if not p == (0,)*len(shape)] - for n in neighbors: dfs(corner, n, prev) + neighbors = [ + tuple(np.array(pos) + np.array(p)) + for p in product(*args) + if not p == (0,) * len(shape) + ] + for n in neighbors: + dfs(corner, n, prev) - for corner in coords: dfs(corner, corner, rank_arr[corner]) + for corner in coords: + dfs(corner, corner, rank_arr[corner]) - self.data_fields[key]['cost_arr'] = cost_arr - self.data_fields[key]['rank_arr'] = rank_arr + self.data_fields[key]["cost_arr"] = cost_arr + self.data_fields[key]["rank_arr"] = rank_arr # Compute load balance efficiency - rank_to_cost_map = {r:0. for r in set(ranks)} - for c, r in zip(costs, ranks): rank_to_cost_map[r] += c + rank_to_cost_map = {r: 0.0 for r in set(ranks)} + for c, r in zip(costs, ranks): + rank_to_cost_map[r] += c efficiencies = np.array(list(rank_to_cost_map.values())) efficiencies /= efficiencies.max() - self.data_fields[key]['ranks'] = np.array(list(rank_to_cost_map.keys())) - self.data_fields[key]['lb_efficiencies'] = efficiencies - self.data_fields[key]['lb_efficiency'] = efficiencies.mean() - self.data_fields[key]['lb_efficiency_max'] = efficiencies.max() - self.data_fields[key]['lb_efficiency_min'] = efficiencies.min() - self.data_fields[key]['t'] = times[row] - self.data_fields[key]['step'] = steps[row] + self.data_fields[key]["ranks"] = np.array(list(rank_to_cost_map.keys())) + self.data_fields[key]["lb_efficiencies"] = efficiencies + self.data_fields[key]["lb_efficiency"] = efficiencies.mean() + self.data_fields[key]["lb_efficiency_max"] = efficiencies.max() + self.data_fields[key]["lb_efficiency_min"] = efficiencies.min() + self.data_fields[key]["t"] = times[row] + self.data_fields[key]["step"] = steps[row] # ... diff --git a/Tools/PostProcessing/plot_parallel.py b/Tools/PostProcessing/plot_parallel.py index 9719b7006c3..2bdac5d0177 100644 --- a/Tools/PostProcessing/plot_parallel.py +++ b/Tools/PostProcessing/plot_parallel.py @@ -16,7 +16,7 @@ import matplotlib.pyplot as plt import numpy as np -''' +""" This script loops over all WarpX plotfiles in a directory and, for each plotfile, saves an image showing the field and particles. @@ -41,33 +41,71 @@ To get help, run > python plot_parallel --help -''' +""" # Parse command line for options. parser = argparse.ArgumentParser() -parser.add_argument('--path', default=None, - help='path to plotfiles, defaults to diags/plotfiles. Plotfiles names must be plt?????') -parser.add_argument('--image_dir', default=None, - help='path where images are placed, defaults to diags/plotfiles or path if specified.') -parser.add_argument('--plotlib', default='yt', - choices=['yt','matplotlib'], - help='Plotting library to use') -parser.add_argument('--field', default='Ez', - help='Which field to plot, e.g., Ez, By, jx or rho. The central slice in y is plotted') -parser.add_argument('--pjump', default=20, - help='When plotlib=matplotlib, we plot every pjump particle') -parser.add_argument('--vmax', type=float, default=None, - help='If specified, the colormap will have bounds [-vmax, vmax]') -parser.add_argument('--slicewidth', default=10.e-6, - help='Only particles with -slicewidth/2 1: @@ -232,10 +310,11 @@ def reduce_evolved_quantity(z, q): else: return z, q + ### Analysis ### # Get list of plotfiles -file_list = glob.glob(os.path.join(path, 'plt?????')) +file_list = glob.glob(os.path.join(path, "plt?????")) file_list.sort() nfiles = len(file_list) @@ -247,6 +326,7 @@ def reduce_evolved_quantity(z, q): if not args.serial: try: from mpi4py import MPI + comm_world = MPI.COMM_WORLD rank = comm_world.Get_rank() size = comm_world.Get_size() @@ -254,9 +334,9 @@ def reduce_evolved_quantity(z, q): pass if rank == 0: - print('number of MPI ranks: %d'%size) - print('Number of plotfiles: %s'%nfiles) - print('list of species: ', pslist) + print("number of MPI ranks: %d" % size) + print("Number of plotfiles: %s" % nfiles) + print("list of species: ", pslist) if plot_evolution is not None: # Fill with a value less than any possible value @@ -271,14 +351,16 @@ def reduce_evolved_quantity(z, q): # - plot field snapshot # - store window position and field max in arrays for count, filename in enumerate(file_list): - if count%size != rank: + if count % size != rank: continue - plot_snapshot( filename ) + plot_snapshot(filename) if plot_evolution is not None: - zwin[count], quantity[count] = get_evolution_quantity( filename, plot_evolution ) + zwin[count], quantity[count] = get_evolution_quantity(filename, plot_evolution) if plot_particle_evolution is not None: - zbar[count], xstd[count] = get_particle_evolution_quantity(filename, plot_particle_evolution) + zbar[count], xstd[count] = get_particle_evolution_quantity( + filename, plot_particle_evolution + ) if plot_evolution is not None: zwin, quantity = reduce_evolved_quantity(zwin, quantity) diff --git a/Tools/PostProcessing/plot_particle_path.py b/Tools/PostProcessing/plot_particle_path.py index 9bf7f896c10..af29dfc0e11 100644 --- a/Tools/PostProcessing/plot_particle_path.py +++ b/Tools/PostProcessing/plot_particle_path.py @@ -9,7 +9,7 @@ class AMReXParticleHeader(object): - ''' + """ This class is designed to parse and store the information contained in an AMReX particle header file. @@ -22,19 +22,18 @@ class AMReXParticleHeader(object): etc... - ''' + """ def __init__(self, header_filename): - self.real_component_names = [] self.int_component_names = [] with open(header_filename, "r") as f: self.version_string = f.readline().strip() - particle_real_type = self.version_string.split('_')[-1] - if particle_real_type == 'double': + particle_real_type = self.version_string.split("_")[-1] + if particle_real_type == "double": self.real_type = np.float64 - elif particle_real_type == 'single': + elif particle_real_type == "single": self.real_type = np.float32 else: raise RuntimeError("Did not recognize particle real type.") @@ -62,7 +61,7 @@ def __init__(self, header_filename): self.num_int_extra = 0 self.num_int = 0 - self.grids_per_level = np.zeros(self.num_levels, dtype='int64') + self.grids_per_level = np.zeros(self.num_levels, dtype="int64") self.grids = [] for level_num in range(self.num_levels): self.grids_per_level[level_num] = int(f.readline().strip()) @@ -75,7 +74,7 @@ def __init__(self, header_filename): def read_particle_data(fn, ptype="particle0"): - ''' + """ This function returns the particle data stored in a particular plot file and particle type. It returns two numpy arrays, the @@ -89,7 +88,7 @@ def read_particle_data(fn, ptype="particle0"): idata, rdata = read_particle_data("plt00000", "particle0") - ''' + """ base_fn = fn + "/" + ptype header = AMReXParticleHeader(base_fn + "/Header") @@ -99,22 +98,23 @@ def read_particle_data(fn, ptype="particle0"): elif header.real_type == np.float32: fdtype = "(%d,)f4" % header.num_real - idata = np.empty((header.num_particles, header.num_int )) + idata = np.empty((header.num_particles, header.num_int)) rdata = np.empty((header.num_particles, header.num_real)) ip = 0 for lvl, level_grids in enumerate(header.grids): - for (which, count, where) in level_grids: - if count == 0: continue + for which, count, where in level_grids: + if count == 0: + continue fn = base_fn + "/Level_%d/DATA_%04d" % (lvl, which) - with open(fn, 'rb') as f: + with open(fn, "rb") as f: f.seek(where) - ints = np.fromfile(f, dtype = idtype, count=count) - floats = np.fromfile(f, dtype = fdtype, count=count) + ints = np.fromfile(f, dtype=idtype, count=count) + floats = np.fromfile(f, dtype=fdtype, count=count) - idata[ip:ip+count] = ints - rdata[ip:ip+count] = floats + idata[ip : ip + count] = ints + rdata[ip : ip + count] = floats ip += count return idata, rdata @@ -143,10 +143,10 @@ def read_particle_data(fn, ptype="particle0"): fig = plt.gcf() fig.set_size_inches(8, 8) - plt.plot(x0, y0, 'r.') - plt.plot(x1, y1, 'b.') - plt.axis((-2., 2., -2., 2.)) + plt.plot(x0, y0, "r.") + plt.plot(x1, y1, "b.") + plt.axis((-2.0, 2.0, -2.0, 2.0)) ax = plt.gca() - ax.set_xlabel(r'$x$') - ax.set_ylabel(r'$y$') - plt.savefig('particles.png') + ax.set_xlabel(r"$x$") + ax.set_ylabel(r"$y$") + plt.savefig("particles.png") diff --git a/Tools/PostProcessing/plot_timestep_duration.py b/Tools/PostProcessing/plot_timestep_duration.py index 9858eb6a422..7b893f1ad1e 100755 --- a/Tools/PostProcessing/plot_timestep_duration.py +++ b/Tools/PostProcessing/plot_timestep_duration.py @@ -8,25 +8,24 @@ def extract_data(filename): - regex_step = re.compile( - r"STEP [0-9]* ends.*\n.* Avg\. per step = ([0-9]*[.])?[0-9]+ s", re.MULTILINE) + r"STEP [0-9]* ends.*\n.* Avg\. per step = ([0-9]*[.])?[0-9]+ s", re.MULTILINE + ) string_data = [] - print("Processing " + filename + " ...", end='') + print("Processing " + filename + " ...", end="") with open(filename) as f: text = f.read() string_data = [s.group(0) for s in regex_step.finditer(text)] - regex_real = re.compile( - r" -?[\d.]+(?:e-?\d+)?", re.MULTILINE) + regex_real = re.compile(r" -?[\d.]+(?:e-?\d+)?", re.MULTILINE) time_data = np.zeros([len(string_data), 6]) for i, ss in enumerate(string_data): numbers = regex_real.findall(ss) - time_data[i,:] = np.array(numbers) + time_data[i, :] = np.array(numbers) print("...done!") return time_data @@ -34,22 +33,22 @@ def extract_data(filename): def plot_timestep_duration(time_data, name): fig_name = name + "_ts_duration.png" - print("Generating " + fig_name + "...", end='') + print("Generating " + fig_name + "...", end="") - plt.rcParams.update({'font.size': 20}) - plt.rcParams['axes.linewidth'] = 3 + plt.rcParams.update({"font.size": 20}) + plt.rcParams["axes.linewidth"] = 3 - f, ax = plt.subplots(figsize=(12,6)) + f, ax = plt.subplots(figsize=(12, 6)) ax.set_ylabel("timestep duration [s]") ax.set_xlabel("step [#]") - ax.semilogy(time_data[:,0], time_data[:,4]) + ax.semilogy(time_data[:, 0], time_data[:, 4]) - ax.spines['bottom'].set_color('gray') - ax.spines['top'].set_visible(False) - ax.spines['left'].set_color('gray') - ax.spines['right'].set_visible(False) + ax.spines["bottom"].set_color("gray") + ax.spines["top"].set_visible(False) + ax.spines["left"].set_color("gray") + ax.spines["right"].set_visible(False) plt.tight_layout() @@ -59,22 +58,22 @@ def plot_timestep_duration(time_data, name): def plot_cumulative_duration(time_data, name): fig_name = name + "_cumulative_duration.png" - print("Generating " + fig_name + "...", end='') + print("Generating " + fig_name + "...", end="") - plt.rcParams.update({'font.size': 20}) - plt.rcParams['axes.linewidth'] = 3 + plt.rcParams.update({"font.size": 20}) + plt.rcParams["axes.linewidth"] = 3 - f, ax = plt.subplots(figsize=(12,6)) + f, ax = plt.subplots(figsize=(12, 6)) ax.set_ylabel("cumulative duration [s]") ax.set_xlabel("step [#]") - ax.plot(time_data[:,0], np.cumsum(time_data[:,4])) + ax.plot(time_data[:, 0], np.cumsum(time_data[:, 4])) - ax.spines['bottom'].set_color('gray') - ax.spines['top'].set_visible(False) - ax.spines['left'].set_color('gray') - ax.spines['right'].set_visible(False) + ax.spines["bottom"].set_color("gray") + ax.spines["top"].set_visible(False) + ax.spines["left"].set_color("gray") + ax.spines["right"].set_visible(False) plt.tight_layout() @@ -83,9 +82,16 @@ def plot_cumulative_duration(time_data, name): def do_plot_timestep_duration(): - parser = argparse.ArgumentParser(description='Generates plots of timestep duration from WarpX standard output logs') - parser.add_argument('file_name', metavar='file_name', type=str, nargs=1, - help='the name of the WarpX output log to process') + parser = argparse.ArgumentParser( + description="Generates plots of timestep duration from WarpX standard output logs" + ) + parser.add_argument( + "file_name", + metavar="file_name", + type=str, + nargs=1, + help="the name of the WarpX output log to process", + ) args = parser.parse_args() log_file_name = args.file_name[0] @@ -95,5 +101,6 @@ def do_plot_timestep_duration(): plot_timestep_duration(time_data, log_file_name) plot_cumulative_duration(time_data, log_file_name) + if __name__ == "__main__": do_plot_timestep_duration() diff --git a/Tools/PostProcessing/read_raw_data.py b/Tools/PostProcessing/read_raw_data.py index c34ea11d301..a180cad18e0 100644 --- a/Tools/PostProcessing/read_raw_data.py +++ b/Tools/PostProcessing/read_raw_data.py @@ -10,10 +10,11 @@ import numpy as np -HeaderInfo = namedtuple('HeaderInfo', ['version', 'how', 'ncomp', 'nghost']) +HeaderInfo = namedtuple("HeaderInfo", ["version", "how", "ncomp", "nghost"]) + def read_data(plt_file): - ''' + """ This function reads the raw (i.e. not averaged to cell centers) data from a WarpX plt file. The plt file must have been written with the @@ -33,9 +34,9 @@ def read_data(plt_file): >>> data = read_data("plt00016") >>> print(data.keys()) - >>> print(data['Ex'].shape) + >>> print(data["Ex"].shape) - ''' + """ all_data = [] raw_files = sorted(glob(plt_file + "/raw_fields/Level_*/")) for raw_file in raw_files: @@ -69,33 +70,35 @@ def _line_to_numpy_arrays(line): def _read_local_Header(header_file, dim): with open(header_file, "r") as f: t_snapshot = float(f.readline()) - if dim==2: + if dim == 2: nx, nz = [int(x) for x in f.readline().split()] ny = 1 xmin, zmin = [float(x) for x in f.readline().split()] ymin = 0 xmax, zmax = [float(x) for x in f.readline().split()] ymax = 0 - if dim==3: + if dim == 3: nx, ny, nz = [int(x) for x in f.readline().split()] xmin, ymin, zmin = [float(x) for x in f.readline().split()] xmax, ymax, zmax = [float(x) for x in f.readline().split()] field_names = f.readline().split() local_info = { - 't_snapshot' : t_snapshot, - 'field_names' : field_names, - 'xmin' : xmin, - 'ymin' : ymin, - 'zmin' : zmin, - 'xmax' : xmax, - 'ymax' : ymax, - 'zmax' : zmax, - 'nx' : nx, - 'ny' : ny, - 'nz' : nz - } + "t_snapshot": t_snapshot, + "field_names": field_names, + "xmin": xmin, + "ymin": ymin, + "zmin": zmin, + "xmax": xmax, + "ymax": ymax, + "zmax": zmax, + "nx": nx, + "ny": ny, + "nz": nz, + } return local_info + + ## ------------------------------------------------------------ ## USE THIS INSTEAD OF THE PREVIOUS FUNCTION IF Header contains ## (x,y,z) min and max vectors instead of zmin and zmax @@ -115,25 +118,23 @@ def _read_local_Header(header_file, dim): def _read_global_Header(header_file): with open(header_file, "r") as f: - nshapshots = int(f.readline()) dt_between_snapshots = float(f.readline()) gamma_boost = float(f.readline()) beta_boost = float(f.readline()) global_info = { - 'nshapshots' : nshapshots, - 'dt_between_snapshots' : dt_between_snapshots, - 'gamma_boost' : gamma_boost, - 'beta_boost' : beta_boost - } + "nshapshots": nshapshots, + "dt_between_snapshots": dt_between_snapshots, + "gamma_boost": gamma_boost, + "beta_boost": beta_boost, + } return global_info def _read_header(header_file): with open(header_file, "r") as f: - version = int(f.readline()) how = int(f.readline()) ncomp = int(f.readline()) @@ -142,9 +143,11 @@ def _read_header(header_file): # If the number of ghost cells varies depending on the direction, # s is a string of the form '(9,8)\n' in 2D or '(9,8,9)\n' in 3D. s = f.readline() - s = s.replace('(', '') # remove left parenthesis '(', if any - s = s.replace(')', '') # remove right parenthesis ')', if any - nghost = np.fromstring(s, dtype = int, sep = ',') # convert from string to numpy array + s = s.replace("(", "") # remove left parenthesis '(', if any + s = s.replace(")", "") # remove right parenthesis ')', if any + nghost = np.fromstring( + s, dtype=int, sep="," + ) # convert from string to numpy array header = HeaderInfo(version, how, ncomp, nghost) @@ -155,12 +158,10 @@ def _read_header(header_file): boxes = [] for line in f: clean_line = line.strip().split() - if clean_line == [')']: + if clean_line == [")"]: break lo_corner, hi_corner, node_type = _line_to_numpy_arrays(clean_line) - boxes.append((lo_corner - nghost, - hi_corner + nghost, - node_type)) + boxes.append((lo_corner - nghost, hi_corner + nghost, node_type)) # read the file and offset position for the corresponding box file_names = [] @@ -182,7 +183,6 @@ def _combine_boxes(boxes): def _read_field(raw_file, field_name): - header_file = raw_file + field_name + "_H" boxes, file_names, offsets, header = _read_header(header_file) @@ -200,11 +200,11 @@ def _read_field(raw_file, field_name): shape = np.append(shape, header.ncomp) with open(raw_file + fn, "rb") as f: f.seek(offset) - if (header.version == 1): + if header.version == 1: f.readline() # skip the first line - arr = np.fromfile(f, 'float64', np.product(shape)) - arr = arr.reshape(shape, order='F') - box_shape = [slice(l,h+1) for l, h in zip(lo, hi)] + arr = np.fromfile(f, "float64", np.product(shape)) + arr = arr.reshape(shape, order="F") + box_shape = [slice(low, hig + 1) for low, hig in zip(lo, hi)] if header.ncomp > 1: box_shape += [slice(None)] data[tuple(box_shape)] = arr @@ -212,9 +212,7 @@ def _read_field(raw_file, field_name): return data - def _read_buffer(snapshot, header_fn, _component_names): - boxes, file_names, offsets, header = _read_header(header_fn) dom_lo, dom_hi = _combine_boxes(boxes) @@ -230,18 +228,21 @@ def _read_buffer(snapshot, header_fn, _component_names): size = np.product(shape) with open(snapshot + "/Level_0/" + fn, "rb") as f: f.seek(offset) - if (header.version == 1): + if header.version == 1: f.readline() # skip the first line - arr = np.fromfile(f, 'float64', header.ncomp*size) + arr = np.fromfile(f, "float64", header.ncomp * size) for i in range(header.ncomp): - comp_data = arr[i*size:(i+1)*size].reshape(shape, order='F') + comp_data = arr[i * size : (i + 1) * size].reshape(shape, order="F") data = all_data[_component_names[i]] - data[tuple([slice(l,h+1) for l, h in zip(lo, hi)])] = comp_data + data[tuple([slice(low, hig + 1) for low, hig in zip(lo, hi)])] = ( + comp_data + ) all_data[_component_names[i]] = data return all_data -def read_reduced_diags(filename, delimiter=' '): - ''' + +def read_reduced_diags(filename, delimiter=" "): + """ Read data written by WarpX Reduced Diagnostics, and return them into Python objects input: - filename name of file to open @@ -249,54 +250,67 @@ def read_reduced_diags(filename, delimiter=' '): output: - metadata_dict dictionary where first key is the type of metadata, second is the field - data dictionary with data - ''' + """ # Read header line - unformatted_header = list( np.genfromtxt( filename, comments="@", max_rows=1, dtype="str", delimiter=delimiter) ) + unformatted_header = list( + np.genfromtxt( + filename, comments="@", max_rows=1, dtype="str", delimiter=delimiter + ) + ) # From header line, get field name, units and column number - field_names = [s[s.find("]")+1:s.find("(")] for s in unformatted_header] - field_units = [s[s.find("(")+1:s.find(")")] for s in unformatted_header] - field_column = [s[s.find("[")+1:s.find("]")] for s in unformatted_header] + field_names = [s[s.find("]") + 1 : s.find("(")] for s in unformatted_header] + field_units = [s[s.find("(") + 1 : s.find(")")] for s in unformatted_header] + field_column = [s[s.find("[") + 1 : s.find("]")] for s in unformatted_header] # Load data and re-format to a dictionary - data = np.loadtxt( filename, delimiter=delimiter ) + data = np.loadtxt(filename, delimiter=delimiter) if data.ndim == 1: data_dict = {key: np.atleast_1d(data[i]) for i, key in enumerate(field_names)} else: - data_dict = {key: data[:,i] for i, key in enumerate(field_names)} + data_dict = {key: data[:, i] for i, key in enumerate(field_names)} # Put header data into a dictionary metadata_dict = {} - metadata_dict['units'] = {key: field_units[i] for i, key in enumerate(field_names)} - metadata_dict['column'] = {key: field_column[i] for i, key in enumerate(field_names)} + metadata_dict["units"] = {key: field_units[i] for i, key in enumerate(field_names)} + metadata_dict["column"] = { + key: field_column[i] for i, key in enumerate(field_names) + } return metadata_dict, data_dict -def read_reduced_diags_histogram(filename, delimiter=' '): - ''' + +def read_reduced_diags_histogram(filename, delimiter=" "): + """ Modified based on read_reduced_diags Two extra return objects: - bin_value: the values of bins - bin_data: the histogram data values of bins - ''' + """ # Read header line - unformatted_header = list( np.genfromtxt( filename, comments="@", max_rows=1, dtype="str", delimiter=delimiter) ) + unformatted_header = list( + np.genfromtxt( + filename, comments="@", max_rows=1, dtype="str", delimiter=delimiter + ) + ) # From header line, get field name, units and column number - field_names = [s[s.find("]")+1:s.find("(")] for s in unformatted_header] - field_names[2:] = [s[s.find("b"):s.find("=")] for s in field_names[2:]] - field_units = [s[s.find("(")+1:s.find(")")] for s in unformatted_header] - field_column = [s[s.find("[")+1:s.find("]")] for s in unformatted_header] - field_bin = [s[s.find("=")+1:s.find("(")] for s in unformatted_header] + field_names = [s[s.find("]") + 1 : s.find("(")] for s in unformatted_header] + field_names[2:] = [s[s.find("b") : s.find("=")] for s in field_names[2:]] + field_units = [s[s.find("(") + 1 : s.find(")")] for s in unformatted_header] + field_column = [s[s.find("[") + 1 : s.find("]")] for s in unformatted_header] + field_bin = [s[s.find("=") + 1 : s.find("(")] for s in unformatted_header] # Load data and re-format to a dictionary - data = np.loadtxt( filename, delimiter=delimiter ) + data = np.loadtxt(filename, delimiter=delimiter) if data.ndim == 1: data_dict = {key: data[i] for i, key in enumerate(field_names)} else: - data_dict = {key: data[:,i] for i, key in enumerate(field_names)} + data_dict = {key: data[:, i] for i, key in enumerate(field_names)} # Put header data into a dictionary metadata_dict = {} - metadata_dict['units'] = {key: field_units[i] for i, key in enumerate(field_names)} - metadata_dict['column'] = {key: field_column[i] for i, key in enumerate(field_names)} + metadata_dict["units"] = {key: field_units[i] for i, key in enumerate(field_names)} + metadata_dict["column"] = { + key: field_column[i] for i, key in enumerate(field_names) + } # Save bin values - bin_value = np.asarray(field_bin[2:], dtype=np.float64, order='C') + bin_value = np.asarray(field_bin[2:], dtype=np.float64, order="C") if data.ndim == 1: - bin_data = data[2:] + bin_data = data[2:] else: - bin_data = data[:,2:] + bin_data = data[:, 2:] return metadata_dict, data_dict, bin_value, bin_data diff --git a/Tools/PostProcessing/video_yt.py b/Tools/PostProcessing/video_yt.py index 61046b3c074..90aad9f8d17 100644 --- a/Tools/PostProcessing/video_yt.py +++ b/Tools/PostProcessing/video_yt.py @@ -5,7 +5,7 @@ # # License: BSD-3-Clause-LBNL -''' +""" This script loops over 3D plotfiles plt*****, generates a 3D rendering of the data with fields and particles, and saves one image per plotfile to plt_****_img.png. It was written for a laser-wakefield acceleration @@ -18,7 +18,7 @@ > mpirun -np 4 python video_yt.py to generate the images. It can be quite slow for even moderately large plotfiles. -''' +""" import glob @@ -28,37 +28,43 @@ yt.enable_parallelism() import numpy as np -field = 'Ez' -my_max = int(5.e9) # Field maximum amplitude +field = "Ez" +my_max = int(5.0e9) # Field maximum amplitude do_particles = True -species0 = 'beam' -species1 = 'electrons' -do_patch = False # if want to plot an MR patch +species0 = "beam" +species1 = "electrons" +do_patch = False # if want to plot an MR patch resolution = (512, 512) -camera_position = np.array([15., 20., -5.])*yt.units.micrometer -file_list = glob.glob('./diags/plotfiles/plt?????') +camera_position = np.array([15.0, 20.0, -5.0]) * yt.units.micrometer +file_list = glob.glob("./diags/plotfiles/plt?????") + +clight = 299792458.0 # must be the same value as in WarpX -clight = 299792458.0 # must be the same value as in WarpX def plot_species(species, ad, radii, transparency, abs_xmax): # Color for each of these particles - colors_vect = [1., 1., 1., .05] # the last value is overwritten later - x = ad[species,'particle_position_x'].v - y = ad[species,'particle_position_y'].v - z = ad[species,'particle_position_z'].v + colors_vect = [1.0, 1.0, 1.0, 0.05] # the last value is overwritten later + x = ad[species, "particle_position_x"].v + y = ad[species, "particle_position_y"].v + z = ad[species, "particle_position_z"].v selector = np.abs(x) < abs_xmax - x = x[selector] ; y = y[selector] ; z = z[selector] - vertices = np.column_stack((x,y,z)) - colors = np.tile(colors_vect,(vertices.shape[0], 1)) - colors[:,3] = transparency - point = yt.visualization.volume_rendering.render_source.PointSource(vertices, colors=colors, radii=radii) + x = x[selector] + y = y[selector] + z = z[selector] + vertices = np.column_stack((x, y, z)) + colors = np.tile(colors_vect, (vertices.shape[0], 1)) + colors[:, 3] = transparency + point = yt.visualization.volume_rendering.render_source.PointSource( + vertices, colors=colors, radii=radii + ) return point + # Create the 3d image for 1 timestep # filename is the name of the folder (e.g. plt00000) def img_onestep(filename): # Load the data - ds = yt.load( filename ) + ds = yt.load(filename) ad = ds.all_data() # Calculate the z position of the box. @@ -66,30 +72,48 @@ def img_onestep(filename): # was used in the simulation, the rendering shows some jitter. # This is because a cell is added in z at some iterations but not all. # These lines calculate this jitter z_shift and remove it from the camera position and focus - iteration=int(filename[-5:]) - dt = 1./clight * 1./np.sqrt((1./ad['dx'][-1]**2 + 1./ad['dy'][-1]**2 + 1./ad['dz'][-1]**2)) + iteration = int(filename[-5:]) + dt = ( + 1.0 + / clight + * 1.0 + / np.sqrt( + ( + 1.0 / ad["dx"][-1] ** 2 + + 1.0 / ad["dy"][-1] ** 2 + + 1.0 / ad["dz"][-1] ** 2 + ) + ) + ) z_front = dt * float(iteration) * clight - z_shift = z_front-ds.domain_right_edge[2] + z_shift = z_front - ds.domain_right_edge[2] # Create a yt source object for the level1 patch if do_patch: box_patch = yt.visualization.volume_rendering.render_source.BoxSource( - left_edge=ds.index.grids[1].LeftEdge+np.array([0., 0., z_shift])*yt.units.meter, - right_edge=ds.index.grids[1].RightEdge+np.array([0., 0., z_shift])*yt.units.meter, - color=[1.,0.1,0.1,.01]) + left_edge=ds.index.grids[1].LeftEdge + + np.array([0.0, 0.0, z_shift]) * yt.units.meter, + right_edge=ds.index.grids[1].RightEdge + + np.array([0.0, 0.0, z_shift]) * yt.units.meter, + color=[1.0, 0.1, 0.1, 0.01], + ) # Handle 2 populations of particles: beam and plasma electrons if do_particles: - point0 = plot_species(species0, ad, 2, .01, 1.) - point1 = plot_species(species1, ad, 1, .002, 20.e-6) + point0 = plot_species(species0, ad, 2, 0.01, 1.0) + point1 = plot_species(species1, ad, 1, 0.002, 20.0e-6) sc = yt.create_scene(ds, field=field) # Set camera properties cam = sc.camera dom_length = ds.domain_width[2].v cam.set_width(ds.quan(dom_length, yt.units.meter)) - cam.position = ds.domain_center + camera_position + np.array([0., 0., z_shift])*yt.units.meter - cam.focus = ds.domain_center + np.array([0., 0., z_shift])*yt.units.meter + cam.position = ( + ds.domain_center + + camera_position + + np.array([0.0, 0.0, z_shift]) * yt.units.meter + ) + cam.focus = ds.domain_center + np.array([0.0, 0.0, z_shift]) * yt.units.meter cam.resolution = resolution # Field rendering properties source = sc[0] @@ -98,17 +122,17 @@ def img_onestep(filename): source.use_ghost_zones = True bounds = (-my_max, my_max) tf = yt.ColorTransferFunction(bounds) - w = (.01*my_max)**2 + w = (0.01 * my_max) ** 2 # Define the transfer function for 3d rendering # 3 isocontours for negative field values # The sharpness of the contour is controlled by argument width - tf.add_gaussian(-.04 *my_max, width=8*w, height=[0.1, 0.1, 1.0, 0.02]) - tf.add_gaussian(-.2 *my_max, width=5*w, height=[0.1, 0.1, 1.0, 0.05]) - tf.add_gaussian(-.6 *my_max, width=w, height=[0.0, 0.0, 1.0, 0.3]) + tf.add_gaussian(-0.04 * my_max, width=8 * w, height=[0.1, 0.1, 1.0, 0.02]) + tf.add_gaussian(-0.2 * my_max, width=5 * w, height=[0.1, 0.1, 1.0, 0.05]) + tf.add_gaussian(-0.6 * my_max, width=w, height=[0.0, 0.0, 1.0, 0.3]) # 3 isocontours for positive field values - tf.add_gaussian(.04 *my_max, width=8*w, height=[1.0, 1.0, 0.2, 0.02]) - tf.add_gaussian(.2 *my_max, width=5*w, height=[1.0, 1.0, 0.2, 0.05]) - tf.add_gaussian(.6 *my_max, width=w, height=[1.0, 1.0, 0.0, 0.3]) + tf.add_gaussian(0.04 * my_max, width=8 * w, height=[1.0, 1.0, 0.2, 0.02]) + tf.add_gaussian(0.2 * my_max, width=5 * w, height=[1.0, 1.0, 0.2, 0.05]) + tf.add_gaussian(0.6 * my_max, width=w, height=[1.0, 1.0, 0.0, 0.3]) source.tfh.tf = tf source.tfh.bounds = bounds source.tfh.set_log(False) @@ -118,7 +142,8 @@ def img_onestep(filename): sc.add_source(point1) if do_patch: sc.add_source(box_patch) - sc.save('./img_' + filename[-8:] + '.png', sigma_clip=1.) + sc.save("./img_" + filename[-8:] + ".png", sigma_clip=1.0) + # Get plt folders in current folder and loop over them. file_list.sort() diff --git a/Tools/PostProcessing/yt3d_mpi.py b/Tools/PostProcessing/yt3d_mpi.py index 655327aff3d..10734494280 100644 --- a/Tools/PostProcessing/yt3d_mpi.py +++ b/Tools/PostProcessing/yt3d_mpi.py @@ -4,7 +4,7 @@ # # License: BSD-3-Clause-LBNL -''' +""" This script loops over 3D plotfiles plt*****, generates a 3D rendering of the data with fields and particles, and saves one image per plotfile to img_*****.png. It was written for a beam-driven wakefield acceleration @@ -15,7 +15,7 @@ > mpirun -np 12 python yt3d_mpi.py to generate the images. It can be quite slow for even moderately large plotfiles. -''' +""" import glob @@ -27,60 +27,79 @@ yt.funcs.mylog.setLevel(50) # my_max = 1.e11 # for smooth rendering -my_max = 5.e10 # for layered rendering -species_to_plot = ['plasma_e', 'beam', 'driver'] +my_max = 5.0e10 # for layered rendering +species_to_plot = ["plasma_e", "beam", "driver"] # For each species, provide [red, green, blue, alpha] between 0. and 1. -species_colors = { 'plasma_e': [1., 1., 1., .15], - 'beam' : [1., 1., 1., .2 ], - 'driver' : [1., 1., 1., .2 ] } +species_colors = { + "plasma_e": [1.0, 1.0, 1.0, 0.15], + "beam": [1.0, 1.0, 1.0, 0.2], + "driver": [1.0, 1.0, 1.0, 0.2], +} # provide these to avoid jitter when using a moving window use_moving_window = True plot_mr_patch = False -rendering_type = 'layers' # 'layers' or 'smooth' -maxwell_solver = 'ckc' # 'ckc' or 'yee' +rendering_type = "layers" # 'layers' or 'smooth' +maxwell_solver = "ckc" # 'ckc' or 'yee' cfl = 0.99 -file_list = glob.glob('plotfiles/plt?????') +file_list = glob.glob("plotfiles/plt?????") + +bounds = (-my_max, my_max) +z_shift = 0.0 +w = (0.01 * my_max) ** 2 -bounds = ( -my_max, my_max ) -z_shift = 0. -w = (.01*my_max)**2 def jitter_shift(ds, ad, cfl, iteration): - if maxwell_solver == 'yee': - dt = 1./scc.c * 1./np.sqrt((1./ad['dx'][-1]**2 + 1./ad['dy'][-1]**2 + 1./ad['dz'][-1]**2)) - elif maxwell_solver == 'ckc': - dt = cfl * min( [ ad['dx'][-1], ad['dy'][-1], ad['dz'][-1] ] ) / scc.c - z_front = dt * float(iteration) * scc.c + 7.5e-6*yt.units.meter - z_shift = z_front-ds.domain_right_edge[2] + if maxwell_solver == "yee": + dt = ( + 1.0 + / scc.c + * 1.0 + / np.sqrt( + ( + 1.0 / ad["dx"][-1] ** 2 + + 1.0 / ad["dy"][-1] ** 2 + + 1.0 / ad["dz"][-1] ** 2 + ) + ) + ) + elif maxwell_solver == "ckc": + dt = cfl * min([ad["dx"][-1], ad["dy"][-1], ad["dz"][-1]]) / scc.c + z_front = dt * float(iteration) * scc.c + 7.5e-6 * yt.units.meter + z_shift = z_front - ds.domain_right_edge[2] return z_shift + def get_species_ytpoints(ad, species, color_vec): - xp = ad[species,'particle_position_x'].v - yp = ad[species,'particle_position_y'].v - zp = ad[species,'particle_position_z'].v - if species == 'plasma_e': - selection = np.abs(xp)<2.e-6 + xp = ad[species, "particle_position_x"].v + yp = ad[species, "particle_position_y"].v + zp = ad[species, "particle_position_z"].v + if species == "plasma_e": + selection = np.abs(xp) < 2.0e-6 zp = zp[selection] yp = yp[selection] xp = xp[selection] - vertices = np.column_stack((xp,yp,zp)) - colors = np.tile(color_vec,(vertices.shape[0], 1)) - points = yt.visualization.volume_rendering.render_source.PointSource(vertices, colors=colors, radii=1) + vertices = np.column_stack((xp, yp, zp)) + colors = np.tile(color_vec, (vertices.shape[0], 1)) + points = yt.visualization.volume_rendering.render_source.PointSource( + vertices, colors=colors, radii=1 + ) return points + def img_onestep(filename): - ds = yt.load( filename ) + ds = yt.load(filename) ad = ds.all_data() - iteration=int(filename[-5:]) - sc = yt.create_scene(ds, field='Ez') + iteration = int(filename[-5:]) + sc = yt.create_scene(ds, field="Ez") if use_moving_window: - z_shift = jitter_shift( ds, ad, cfl, iteration ) - array_shift = z_shift * np.array([0., 0., 1.]) + z_shift = jitter_shift(ds, ad, cfl, iteration) + array_shift = z_shift * np.array([0.0, 0.0, 1.0]) if plot_mr_patch: box_patch = yt.visualization.volume_rendering.render_source.BoxSource( - left_edge =ds.index.grids[1].LeftEdge +array_shift, - right_edge=ds.index.grids[1].RightEdge+array_shift, - color=[1.,0.1,0.1,.01] ) + left_edge=ds.index.grids[1].LeftEdge + array_shift, + right_edge=ds.index.grids[1].RightEdge + array_shift, + color=[1.0, 0.1, 0.1, 0.01], + ) sc.add_source(box_patch) ######################## ### volume rendering ### @@ -90,27 +109,28 @@ def img_onestep(filename): source.grey_opacity = True source.set_log(False) tf = yt.ColorTransferFunction(bounds) - if rendering_type == 'smooth': - tf.add_gaussian(-my_max/4, width=15**2*w, height=[0.0, 0.0, 1.0, 1]) - tf.add_gaussian( my_max/4, width=15**2*w, height=[1.0, 0.0, 0.0, 1]) - if rendering_type == 'layers': + if rendering_type == "smooth": + tf.add_gaussian(-my_max / 4, width=15**2 * w, height=[0.0, 0.0, 1.0, 1]) + tf.add_gaussian(my_max / 4, width=15**2 * w, height=[1.0, 0.0, 0.0, 1]) + if rendering_type == "layers": # NEGATIVE - tf.add_gaussian(-.04 *my_max, width=8*w, height=[0.1, 0.1, 1.0, 0.2]) - tf.add_gaussian(-.2 *my_max, width=5*w, height=[0.1, 0.1, 1.0, 0.5]) - tf.add_gaussian(-.6 *my_max, width=w, height=[0.0, 0.0, 1.0, 1.]) + tf.add_gaussian(-0.04 * my_max, width=8 * w, height=[0.1, 0.1, 1.0, 0.2]) + tf.add_gaussian(-0.2 * my_max, width=5 * w, height=[0.1, 0.1, 1.0, 0.5]) + tf.add_gaussian(-0.6 * my_max, width=w, height=[0.0, 0.0, 1.0, 1.0]) # POSITIVE - tf.add_gaussian(.04 *my_max, width=8*w, height=[1.0, 1.0, 0.2, 0.2]) - tf.add_gaussian(.2 *my_max, width=5*w, height=[1.0, 1.0, 0.2, 0.5]) - tf.add_gaussian(.6 *my_max, width=w, height=[1.0, 1.0, 0.0, 1.]) + tf.add_gaussian(0.04 * my_max, width=8 * w, height=[1.0, 1.0, 0.2, 0.2]) + tf.add_gaussian(0.2 * my_max, width=5 * w, height=[1.0, 1.0, 0.2, 0.5]) + tf.add_gaussian(0.6 * my_max, width=w, height=[1.0, 1.0, 0.0, 1.0]) ###################### ### plot particles ### ###################### species_points = {} for species in species_to_plot: - species_points[ species ] = get_species_ytpoints(ad, - species, species_colors[species]) - sc.add_source( species_points[ species ] ) + species_points[species] = get_species_ytpoints( + ad, species, species_colors[species] + ) + sc.add_source(species_points[species]) source.tfh.tf = tf source.tfh.bounds = bounds ######################### @@ -118,20 +138,23 @@ def img_onestep(filename): ######################### cam = sc.camera cam.resolution = (2048, 2048) - cam.width = .00018*yt.units.meter - cam.focus = ds.domain_center + \ - np.array([0., 0., 10.e-6 ])*yt.units.meter + \ - array_shift - cam.position = ds.domain_center + \ - np.array([15., 15., -5. ])*yt.units.micrometer + \ - array_shift - cam.normal_vector = [-0.3, -0.3, -.2] + cam.width = 0.00018 * yt.units.meter + cam.focus = ( + ds.domain_center + np.array([0.0, 0.0, 10.0e-6]) * yt.units.meter + array_shift + ) + cam.position = ( + ds.domain_center + + np.array([15.0, 15.0, -5.0]) * yt.units.micrometer + + array_shift + ) + cam.normal_vector = [-0.3, -0.3, -0.2] cam.switch_orientation() # save image - if rendering_type == 'smooth': - sc.save('img_' + str(my_number_list[count]).zfill(5), sigma_clip=5.) - if rendering_type == 'layers': - sc.save('img_' + str(my_number_list[count]).zfill(5), sigma_clip=2.) + if rendering_type == "smooth": + sc.save("img_" + str(my_number_list[count]).zfill(5), sigma_clip=5.0) + if rendering_type == "layers": + sc.save("img_" + str(my_number_list[count]).zfill(5), sigma_clip=2.0) + file_list.sort() # Total number of files @@ -142,9 +165,9 @@ def img_onestep(filename): me = comm_world.Get_rank() nrank = comm_world.Get_size() # List of files to process for current proc -my_list = file_list[ (me*nfiles)/nrank : ((me+1)*nfiles)/nrank ] +my_list = file_list[(me * nfiles) / nrank : ((me + 1) * nfiles) / nrank] # List if file numbers for current proc -my_number_list = number_list[ (me*nfiles)/nrank : ((me+1)*nfiles)/nrank ] +my_number_list = number_list[(me * nfiles) / nrank : ((me + 1) * nfiles) / nrank] for count, filename in enumerate(my_list): - print('processing ' + filename) + print("processing " + filename) img_onestep(filename) diff --git a/Tools/Release/updateAMReX.py b/Tools/Release/updateAMReX.py index b01014852d4..7ba3bca8357 100755 --- a/Tools/Release/updateAMReX.py +++ b/Tools/Release/updateAMReX.py @@ -46,7 +46,9 @@ # Current Versions ############################################################ # AMReX development HEAD -amrex_gh = requests.get('https://api.github.com/repos/AMReX-Codes/amrex/commits/development') +amrex_gh = requests.get( + "https://api.github.com/repos/AMReX-Codes/amrex/commits/development" +) amrex_HEAD = amrex_gh.json()["sha"] # WarpX references to AMReX: cmake/dependencies/AMReX.cmake @@ -54,18 +56,20 @@ # branch/commit/tag (git fetcher) version # set(WarpX_amrex_branch "development" ... amrex_branch = f"unknown (format issue in {amrex_cmake_path})" -with open(amrex_cmake_path, encoding='utf-8') as f: - r_minimal = re.findall(r'.*set\(WarpX_amrex_branch\s+"(.+)"\s+.*', - f.read(), re.MULTILINE) +with open(amrex_cmake_path, encoding="utf-8") as f: + r_minimal = re.findall( + r'.*set\(WarpX_amrex_branch\s+"(.+)"\s+.*', f.read(), re.MULTILINE + ) if len(r_minimal) >= 1: amrex_branch = r_minimal[0] # minimal (external) version # find_package(AMReX YY.MM CONFIG ... amrex_minimal = f"unknown (format issue in {amrex_cmake_path})" -with open(amrex_cmake_path, encoding='utf-8') as f: - r_minimal = re.findall(r'.*find_package\(AMReX\s+(.+)\s+CONFIG\s+.*', - f.read(), re.MULTILINE) +with open(amrex_cmake_path, encoding="utf-8") as f: + r_minimal = re.findall( + r".*find_package\(AMReX\s+(.+)\s+CONFIG\s+.*", f.read(), re.MULTILINE + ) if len(r_minimal) >= 1: amrex_minimal = r_minimal[0] @@ -84,7 +88,9 @@ print(f"--> Nothing entered, will keep: {amrex_branch}") print() -print(f"Currently, a pre-installed AMReX is required at least at version: {amrex_minimal}") +print( + f"Currently, a pre-installed AMReX is required at least at version: {amrex_minimal}" +) today = datetime.date.today().strftime("%y.%m") amrex_new_minimal = input(f"New minimal AMReX version (e.g. {today})? ").strip() if not amrex_new_minimal: @@ -106,30 +112,34 @@ # run_test.sh (used also for Azure Pipelines) run_test_path = str(REPO_DIR.joinpath("run_test.sh")) -with open(run_test_path, encoding='utf-8') as f: +with open(run_test_path, encoding="utf-8") as f: run_test_content = f.read() # branch/commit/tag (git fetcher) version # cd amrex && git checkout COMMIT_TAG_OR_BRANCH && cd - run_test_content = re.sub( - r'(.*cd\s+amrex.+git checkout\s+--detach\s+)(.+)(\s+&&\s.*)', - r'\g<1>{}\g<3>'.format(amrex_new_branch), - run_test_content, flags = re.MULTILINE) + r"(.*cd\s+amrex.+git checkout\s+--detach\s+)(.+)(\s+&&\s.*)", + r"\g<1>{}\g<3>".format(amrex_new_branch), + run_test_content, + flags=re.MULTILINE, + ) -with open(run_test_path, "w", encoding='utf-8') as f: +with open(run_test_path, "w", encoding="utf-8") as f: f.write(run_test_content) # CI: legacy build check in .github/workflows/cuda.yml ci_gnumake_path = str(REPO_DIR.joinpath(".github/workflows/cuda.yml")) -with open(ci_gnumake_path, encoding='utf-8') as f: +with open(ci_gnumake_path, encoding="utf-8") as f: ci_gnumake_content = f.read() # branch/commit/tag (git fetcher) version # cd ../amrex && git checkout COMMIT_TAG_OR_BRANCH && cd - ci_gnumake_content = re.sub( - r'(.*cd\s+\.\./amrex.+git checkout\s+--detach\s+)(.+)(\s+&&\s.*)', - r'\g<1>{}\g<3>'.format(amrex_new_branch), - ci_gnumake_content, flags = re.MULTILINE) + r"(.*cd\s+\.\./amrex.+git checkout\s+--detach\s+)(.+)(\s+&&\s.*)", + r"\g<1>{}\g<3>".format(amrex_new_branch), + ci_gnumake_content, + flags=re.MULTILINE, + ) -with open(ci_gnumake_path, "w", encoding='utf-8') as f: +with open(ci_gnumake_path, "w", encoding="utf-8") as f: f.write(ci_gnumake_content) if ConfigUpdater is not None: @@ -138,7 +148,7 @@ cp = ConfigUpdater() cp.optionxform = str cp.read(tests_ini_path) - cp['AMReX']['branch'].value = amrex_new_branch + cp["AMReX"]["branch"].value = amrex_new_branch cp.update_file() # WarpX-GPU-tests.ini @@ -146,28 +156,32 @@ cp = ConfigUpdater() cp.optionxform = str cp.read(tests_gpu_ini_path) - cp['AMReX']['branch'].value = amrex_new_branch + cp["AMReX"]["branch"].value = amrex_new_branch cp.update_file() # WarpX references to AMReX: cmake/dependencies/AMReX.cmake -with open(amrex_cmake_path, encoding='utf-8') as f: +with open(amrex_cmake_path, encoding="utf-8") as f: amrex_cmake_content = f.read() # branch/commit/tag (git fetcher) version # set(WarpX_amrex_branch "development" ... amrex_cmake_content = re.sub( r'(.*set\(WarpX_amrex_branch\s+")(.+)("\s+.*)', - r'\g<1>{}\g<3>'.format(amrex_new_branch), - amrex_cmake_content, flags = re.MULTILINE) + r"\g<1>{}\g<3>".format(amrex_new_branch), + amrex_cmake_content, + flags=re.MULTILINE, + ) # minimal (external) version # find_package(AMReX YY.MM CONFIG ... amrex_cmake_content = re.sub( - r'(.*find_package\(AMReX\s+)(.+)(\s+CONFIG\s+.*)', - r'\g<1>{}\g<3>'.format(amrex_new_minimal), - amrex_cmake_content, flags = re.MULTILINE) + r"(.*find_package\(AMReX\s+)(.+)(\s+CONFIG\s+.*)", + r"\g<1>{}\g<3>".format(amrex_new_minimal), + amrex_cmake_content, + flags=re.MULTILINE, + ) -with open(amrex_cmake_path, "w", encoding='utf-8') as f: +with open(amrex_cmake_path, "w", encoding="utf-8") as f: f.write(amrex_cmake_content) diff --git a/Tools/Release/updatePICSAR.py b/Tools/Release/updatePICSAR.py index fe15e5b120e..5148c16727e 100755 --- a/Tools/Release/updatePICSAR.py +++ b/Tools/Release/updatePICSAR.py @@ -37,7 +37,9 @@ # Current Versions ############################################################ # PICSAR development HEAD -PICSAR_gh = requests.get('https://api.github.com/repos/ECP-WarpX/picsar/commits/development') +PICSAR_gh = requests.get( + "https://api.github.com/repos/ECP-WarpX/picsar/commits/development" +) PICSAR_HEAD = PICSAR_gh.json()["sha"] # WarpX references to PICSAR: cmake/dependencies/PICSAR.cmake @@ -45,18 +47,20 @@ # branch/commit/tag (git fetcher) version # set(WarpX_picsar_branch "development" ... PICSAR_branch = f"unknown (format issue in {PICSAR_cmake_path})" -with open(PICSAR_cmake_path, encoding='utf-8') as f: - r_minimal = re.findall(r'.*set\(WarpX_picsar_branch\s+"(.+)"\s+.*', - f.read(), re.MULTILINE) +with open(PICSAR_cmake_path, encoding="utf-8") as f: + r_minimal = re.findall( + r'.*set\(WarpX_picsar_branch\s+"(.+)"\s+.*', f.read(), re.MULTILINE + ) if len(r_minimal) >= 1: PICSAR_branch = r_minimal[0] # minimal (external) version # find_package(PICSAR YY.MM CONFIG ... PICSAR_minimal = f"unknown (format issue in {PICSAR_cmake_path})" -with open(PICSAR_cmake_path, encoding='utf-8') as f: - r_minimal = re.findall(r'.*find_package\(PICSAR\s+(.+)\s+CONFIG\s+.*', - f.read(), re.MULTILINE) +with open(PICSAR_cmake_path, encoding="utf-8") as f: + r_minimal = re.findall( + r".*find_package\(PICSAR\s+(.+)\s+CONFIG\s+.*", f.read(), re.MULTILINE + ) if len(r_minimal) >= 1: PICSAR_minimal = r_minimal[0] @@ -75,7 +79,9 @@ print(f"--> Nothing entered, will keep: {PICSAR_branch}") print() -print(f"Currently, a pre-installed PICSAR is required at least at version: {PICSAR_minimal}") +print( + f"Currently, a pre-installed PICSAR is required at least at version: {PICSAR_minimal}" +) today = datetime.date.today().strftime("%y.%m") PICSAR_new_minimal = input(f"New minimal PICSAR version (e.g. {today})? ").strip() if not PICSAR_new_minimal: @@ -96,24 +102,28 @@ # Updates ##################################################################### # WarpX references to PICSAR: cmake/dependencies/PICSAR.cmake -with open(PICSAR_cmake_path, encoding='utf-8') as f: +with open(PICSAR_cmake_path, encoding="utf-8") as f: PICSAR_cmake_content = f.read() # branch/commit/tag (git fetcher) version # set(WarpX_picsar_branch "development" ... PICSAR_cmake_content = re.sub( r'(.*set\(WarpX_picsar_branch\s+")(.+)("\s+.*)', - r'\g<1>{}\g<3>'.format(PICSAR_new_branch), - PICSAR_cmake_content, flags = re.MULTILINE) + r"\g<1>{}\g<3>".format(PICSAR_new_branch), + PICSAR_cmake_content, + flags=re.MULTILINE, + ) # minimal (external) version # find_package(PICSAR YY.MM CONFIG ... PICSAR_cmake_content = re.sub( - r'(.*find_package\(PICSAR\s+)(.+)(\s+CONFIG\s+.*)', - r'\g<1>{}\g<3>'.format(PICSAR_new_minimal), - PICSAR_cmake_content, flags = re.MULTILINE) + r"(.*find_package\(PICSAR\s+)(.+)(\s+CONFIG\s+.*)", + r"\g<1>{}\g<3>".format(PICSAR_new_minimal), + PICSAR_cmake_content, + flags=re.MULTILINE, + ) -with open(PICSAR_cmake_path, "w", encoding='utf-8') as f: +with open(PICSAR_cmake_path, "w", encoding="utf-8") as f: f.write(PICSAR_cmake_content) diff --git a/Tools/Release/updatepyAMReX.py b/Tools/Release/updatepyAMReX.py index 04887dc4988..68001222241 100755 --- a/Tools/Release/updatepyAMReX.py +++ b/Tools/Release/updatepyAMReX.py @@ -37,7 +37,9 @@ # Current Versions ############################################################ # pyAMReX development HEAD -pyamrex_gh = requests.get('https://api.github.com/repos/AMReX-Codes/pyamrex/commits/development') +pyamrex_gh = requests.get( + "https://api.github.com/repos/AMReX-Codes/pyamrex/commits/development" +) pyamrex_HEAD = pyamrex_gh.json()["sha"] # WarpX references to pyAMReX: cmake/dependencies/pyAMReX.cmake @@ -45,18 +47,20 @@ # branch/commit/tag (git fetcher) version # set(WarpX_pyamrex_branch "development" ... pyamrex_branch = f"unknown (format issue in {pyamrex_cmake_path})" -with open(pyamrex_cmake_path, encoding='utf-8') as f: - r_minimal = re.findall(r'.*set\(WarpX_pyamrex_branch\s+"(.+)"\s+.*', - f.read(), re.MULTILINE) +with open(pyamrex_cmake_path, encoding="utf-8") as f: + r_minimal = re.findall( + r'.*set\(WarpX_pyamrex_branch\s+"(.+)"\s+.*', f.read(), re.MULTILINE + ) if len(r_minimal) >= 1: pyamrex_branch = r_minimal[0] # minimal (external) version # find_package(AMReX YY.MM CONFIG ... pyamrex_minimal = f"unknown (format issue in {pyamrex_cmake_path})" -with open(pyamrex_cmake_path, encoding='utf-8') as f: - r_minimal = re.findall(r'.*find_package\(pyAMReX\s+(.+)\s+CONFIG\s+.*', - f.read(), re.MULTILINE) +with open(pyamrex_cmake_path, encoding="utf-8") as f: + r_minimal = re.findall( + r".*find_package\(pyAMReX\s+(.+)\s+CONFIG\s+.*", f.read(), re.MULTILINE + ) if len(r_minimal) >= 1: pyamrex_minimal = r_minimal[0] @@ -67,7 +71,9 @@ Please answer the following questions about the version number you want to require from pyAMReX:\n""") -print(f"Currently, WarpX builds against this pyAMReX commit/branch/sha: {pyamrex_branch}") +print( + f"Currently, WarpX builds against this pyAMReX commit/branch/sha: {pyamrex_branch}" +) print(f"pyAMReX HEAD commit (development branch): {pyamrex_HEAD}") pyamrex_new_branch = input("Update pyAMReX commit/branch/sha: ").strip() if not pyamrex_new_branch: @@ -75,7 +81,9 @@ print(f"--> Nothing entered, will keep: {pyamrex_branch}") print() -print(f"Currently, a pre-installed pyAMReX is required at least at version: {pyamrex_minimal}") +print( + f"Currently, a pre-installed pyAMReX is required at least at version: {pyamrex_minimal}" +) today = datetime.date.today().strftime("%y.%m") pyamrex_new_minimal = input(f"New minimal pyAMReX version (e.g. {today})? ").strip() if not pyamrex_new_minimal: @@ -96,24 +104,28 @@ # Updates ##################################################################### # WarpX references to pyAMReX: cmake/dependencies/pyAMReX.cmake -with open(pyamrex_cmake_path, encoding='utf-8') as f: +with open(pyamrex_cmake_path, encoding="utf-8") as f: pyAMReX_cmake_content = f.read() # branch/commit/tag (git fetcher) version # set(WarpX_pyamrex_branch "development" ... pyAMReX_cmake_content = re.sub( r'(.*set\(WarpX_pyamrex_branch\s+")(.+)("\s+.*)', - r'\g<1>{}\g<3>'.format(pyamrex_new_branch), - pyAMReX_cmake_content, flags = re.MULTILINE) + r"\g<1>{}\g<3>".format(pyamrex_new_branch), + pyAMReX_cmake_content, + flags=re.MULTILINE, + ) # minimal (external) version # find_package(AMReX YY.MM CONFIG ... pyAMReX_cmake_content = re.sub( - r'(.*find_package\(pyAMReX\s+)(.+)(\s+CONFIG\s+.*)', - r'\g<1>{}\g<3>'.format(pyamrex_new_minimal), - pyAMReX_cmake_content, flags = re.MULTILINE) + r"(.*find_package\(pyAMReX\s+)(.+)(\s+CONFIG\s+.*)", + r"\g<1>{}\g<3>".format(pyamrex_new_minimal), + pyAMReX_cmake_content, + flags=re.MULTILINE, + ) -with open(pyamrex_cmake_path, "w", encoding='utf-8') as f: +with open(pyamrex_cmake_path, "w", encoding="utf-8") as f: f.write(pyAMReX_cmake_content) diff --git a/pyproject.toml b/pyproject.toml index f9e615f9b83..9d5e78a6cc4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,6 +7,16 @@ requires = [ ] build-backend = "setuptools.build_meta" +[tool.ruff.format] +docstring-code-format = true + +[tool.ruff.lint] +select = ["E", "F", "I"] +ignore = ["E402", "E501", "F405"] + +[tool.ruff.lint.isort] +known-first-party = ["amrex", "picmistandard", "pywarpx", "warpx"] + [tool.isort] known_first_party = ["amrex", "picmistandard", "pywarpx", "warpx"] profile = "black" diff --git a/setup.py b/setup.py index ecce858518d..acf61165e98 100644 --- a/setup.py +++ b/setup.py @@ -38,8 +38,10 @@ def run(self): lib_path = os.path.join(PYWARPX_LIB_DIR, lib_name) libs_found.append(lib_path) if len(libs_found) == 0: - raise RuntimeError("Error: no pre-build WarpX modules found in " - "PYWARPX_LIB_DIR='{}'".format(PYWARPX_LIB_DIR)) + raise RuntimeError( + "Error: no pre-build WarpX modules found in " + "PYWARPX_LIB_DIR='{}'".format(PYWARPX_LIB_DIR) + ) # copy external libs into collection of files in a temporary build dir dst_path = os.path.join(self.build_lib, "pywarpx") @@ -48,7 +50,7 @@ def run(self): class CMakeExtension(Extension): - def __init__(self, name, sourcedir=''): + def __init__(self, name, sourcedir=""): Extension.__init__(self, name, sources=[]) self.sourcedir = os.path.abspath(sourcedir) @@ -58,12 +60,13 @@ def run(self): from packaging.version import parse try: - out = subprocess.check_output(['cmake', '--version']) + out = subprocess.check_output(["cmake", "--version"]) except OSError: raise RuntimeError( - "CMake 3.20.0+ must be installed to build the following " + - "extensions: " + - ", ".join(e.name for e in self.extensions)) + "CMake 3.20.0+ must be installed to build the following " + + "extensions: " + + ", ".join(e.name for e in self.extensions) + ) cmake_version = parse(re.search(r"version\s*([\d.]+)", out.decode()).group(1)) if cmake_version < parse("3.20.0"): @@ -73,125 +76,115 @@ def run(self): self.build_extension(ext) def build_extension(self, ext): - extdir = os.path.abspath(os.path.dirname( - self.get_ext_fullpath(ext.name) - )) + extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) # required for auto-detection of auxiliary "native" libs if not extdir.endswith(os.path.sep): extdir += os.path.sep - r_dim = re.search(r'warpx_(1|2|rz|3)(?:d*)', ext.name) + r_dim = re.search(r"warpx_(1|2|rz|3)(?:d*)", ext.name) dims = r_dim.group(1).upper() cmake_args = [ - '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + - os.path.join(extdir, "pywarpx"), - '-DCMAKE_RUNTIME_OUTPUT_DIRECTORY=' + extdir, - '-DWarpX_DIMS=' + dims, - '-DWarpX_APP:BOOL=OFF', + "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + os.path.join(extdir, "pywarpx"), + "-DCMAKE_RUNTIME_OUTPUT_DIRECTORY=" + extdir, + "-DWarpX_DIMS=" + dims, + "-DWarpX_APP:BOOL=OFF", ## variants - '-DWarpX_COMPUTE=' + WARPX_COMPUTE, - '-DWarpX_MPI:BOOL=' + WARPX_MPI, - '-DWarpX_EB:BOOL=' + WARPX_EB, - '-DWarpX_OPENPMD:BOOL=' + WARPX_OPENPMD, - '-DWarpX_PRECISION=' + WARPX_PRECISION, - '-DWarpX_PARTICLE_PRECISION=' + WARPX_PARTICLE_PRECISION, - '-DWarpX_FFT:BOOL=' + WARPX_FFT, - '-DWarpX_HEFFTE:BOOL=' + WARPX_HEFFTE, - '-DWarpX_PYTHON:BOOL=ON', - '-DWarpX_PYTHON_IPO:BOOL=' + WARPX_PYTHON_IPO, - '-DWarpX_QED:BOOL=' + WARPX_QED, - '-DWarpX_QED_TABLE_GEN:BOOL=' + WARPX_QED_TABLE_GEN, + "-DWarpX_COMPUTE=" + WARPX_COMPUTE, + "-DWarpX_MPI:BOOL=" + WARPX_MPI, + "-DWarpX_EB:BOOL=" + WARPX_EB, + "-DWarpX_OPENPMD:BOOL=" + WARPX_OPENPMD, + "-DWarpX_PRECISION=" + WARPX_PRECISION, + "-DWarpX_PARTICLE_PRECISION=" + WARPX_PARTICLE_PRECISION, + "-DWarpX_FFT:BOOL=" + WARPX_FFT, + "-DWarpX_HEFFTE:BOOL=" + WARPX_HEFFTE, + "-DWarpX_PYTHON:BOOL=ON", + "-DWarpX_PYTHON_IPO:BOOL=" + WARPX_PYTHON_IPO, + "-DWarpX_QED:BOOL=" + WARPX_QED, + "-DWarpX_QED_TABLE_GEN:BOOL=" + WARPX_QED_TABLE_GEN, ## dependency control (developers & package managers) - '-DWarpX_amrex_internal=' + WARPX_AMREX_INTERNAL, + "-DWarpX_amrex_internal=" + WARPX_AMREX_INTERNAL, # PEP-440 conformant version from package "-DpyWarpX_VERSION_INFO=" + self.distribution.get_version(), # see PICSAR and openPMD below ## static/shared libs - '-DBUILD_SHARED_LIBS:BOOL=' + BUILD_SHARED_LIBS, + "-DBUILD_SHARED_LIBS:BOOL=" + BUILD_SHARED_LIBS, ## Unix: rpath to current dir when packaged ## needed for shared (here non-default) builds and ADIOS1 ## wrapper libraries - '-DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON', - '-DCMAKE_INSTALL_RPATH_USE_LINK_PATH:BOOL=OFF', + "-DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON", + "-DCMAKE_INSTALL_RPATH_USE_LINK_PATH:BOOL=OFF", # Windows: has no RPath concept, all `.dll`s must be in %PATH% # or same dir as calling executable ] - if WARPX_QED.upper() in ['1', 'ON', 'TRUE', 'YES']: - cmake_args.append('-DWarpX_picsar_internal=' + WARPX_PICSAR_INTERNAL) - if WARPX_OPENPMD.upper() in ['1', 'ON', 'TRUE', 'YES']: + if WARPX_QED.upper() in ["1", "ON", "TRUE", "YES"]: + cmake_args.append("-DWarpX_picsar_internal=" + WARPX_PICSAR_INTERNAL) + if WARPX_OPENPMD.upper() in ["1", "ON", "TRUE", "YES"]: cmake_args += [ - '-DHDF5_USE_STATIC_LIBRARIES:BOOL=' + HDF5_USE_STATIC_LIBRARIES, - '-DADIOS_USE_STATIC_LIBS:BOOL=' + ADIOS_USE_STATIC_LIBS, - '-DWarpX_openpmd_internal=' + WARPX_OPENPMD_INTERNAL, + "-DHDF5_USE_STATIC_LIBRARIES:BOOL=" + HDF5_USE_STATIC_LIBRARIES, + "-DADIOS_USE_STATIC_LIBS:BOOL=" + ADIOS_USE_STATIC_LIBS, + "-DWarpX_openpmd_internal=" + WARPX_OPENPMD_INTERNAL, ] # further dependency control (developers & package managers) if WARPX_AMREX_SRC: - cmake_args.append('-DWarpX_amrex_src=' + WARPX_AMREX_SRC) + cmake_args.append("-DWarpX_amrex_src=" + WARPX_AMREX_SRC) if WARPX_AMREX_REPO: - cmake_args.append('-DWarpX_amrex_repo=' + WARPX_AMREX_REPO) + cmake_args.append("-DWarpX_amrex_repo=" + WARPX_AMREX_REPO) if WARPX_AMREX_BRANCH: - cmake_args.append('-DWarpX_amrex_branch=' + WARPX_AMREX_BRANCH) + cmake_args.append("-DWarpX_amrex_branch=" + WARPX_AMREX_BRANCH) if WARPX_OPENPMD_SRC: - cmake_args.append('-DWarpX_openpmd_src=' + WARPX_OPENPMD_SRC) + cmake_args.append("-DWarpX_openpmd_src=" + WARPX_OPENPMD_SRC) if WARPX_PICSAR_SRC: - cmake_args.append('-DWarpX_picsar_src=' + WARPX_PICSAR_SRC) + cmake_args.append("-DWarpX_picsar_src=" + WARPX_PICSAR_SRC) if WARPX_PYAMREX_SRC: - cmake_args.append('-DWarpX_pyamrex_src=' + WARPX_PYAMREX_SRC) + cmake_args.append("-DWarpX_pyamrex_src=" + WARPX_PYAMREX_SRC) if WARPX_PYAMREX_INTERNAL: - cmake_args.append('-DWarpX_pyamrex_internal=' + WARPX_PYAMREX_INTERNAL) + cmake_args.append("-DWarpX_pyamrex_internal=" + WARPX_PYAMREX_INTERNAL) if WARPX_PYBIND11_SRC: - cmake_args.append('-DWarpX_pybind11_src=' + WARPX_PYBIND11_SRC) + cmake_args.append("-DWarpX_pybind11_src=" + WARPX_PYBIND11_SRC) if WARPX_PYBIND11_INTERNAL: - cmake_args.append('-DWarpX_pybind11_internal=' + WARPX_PYBIND11_INTERNAL) + cmake_args.append("-DWarpX_pybind11_internal=" + WARPX_PYBIND11_INTERNAL) if WARPX_CCACHE_PROGRAM is not None: - cmake_args.append('-DCCACHE_PROGRAM=' + WARPX_CCACHE_PROGRAM) + cmake_args.append("-DCCACHE_PROGRAM=" + WARPX_CCACHE_PROGRAM) if sys.platform == "darwin": - cmake_args.append('-DCMAKE_INSTALL_RPATH=@loader_path') + cmake_args.append("-DCMAKE_INSTALL_RPATH=@loader_path") else: # values: linux*, aix, freebsd, ... # just as well win32 & cygwin (although Windows has no RPaths) - cmake_args.append('-DCMAKE_INSTALL_RPATH=$ORIGIN') + cmake_args.append("-DCMAKE_INSTALL_RPATH=$ORIGIN") - cfg = 'Debug' if self.debug else 'Release' - build_args = ['--config', cfg] + cfg = "Debug" if self.debug else "Release" + build_args = ["--config", cfg] if platform.system() == "Windows": cmake_args += [ - '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format( - cfg.upper(), - os.path.join(extdir, "pywarpx") + "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format( + cfg.upper(), os.path.join(extdir, "pywarpx") ) ] if sys.maxsize > 2**32: - cmake_args += ['-A', 'x64'] + cmake_args += ["-A", "x64"] else: - cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] + cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg] - build_args += ['--parallel', BUILD_PARALLEL] + build_args += ["--parallel", BUILD_PARALLEL] build_dir = os.path.join(self.build_temp, dims) os.makedirs(build_dir, exist_ok=True) - subprocess.check_call( - ['cmake', ext.sourcedir] + cmake_args, - cwd=build_dir - ) - subprocess.check_call( - ['cmake', '--build', '.'] + build_args, - cwd=build_dir - ) + subprocess.check_call(["cmake", ext.sourcedir] + cmake_args, cwd=build_dir) + subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=build_dir) # note that this does not call install; # we pick up artifacts directly from the build output dirs -with open('./README.md', encoding='utf-8') as f: +with open("./README.md", encoding="utf-8") as f: long_description = f.read() # Allow to control options via environment vars. # Work-around for https://github.com/pypa/setuptools/issues/1712 # Pick up existing WarpX libraries or... -PYWARPX_LIB_DIR = os.environ.get('PYWARPX_LIB_DIR') +PYWARPX_LIB_DIR = os.environ.get("PYWARPX_LIB_DIR") WARPX_PYTHON_IPO = os.environ.get("WARPX_PYTHON_IPO", "ON") @@ -200,143 +193,152 @@ def build_extension(self, ext): # note: changed default for SHARED, MPI, TESTING and EXAMPLES # note: we use all-uppercase variable names for environment control to be # consistent across platforms (especially Windows) -WARPX_COMPUTE = env.pop('WARPX_COMPUTE', 'OMP') -WARPX_MPI = env.pop('WARPX_MPI', 'OFF') -WARPX_EB = env.pop('WARPX_EB', 'OFF') -WARPX_OPENPMD = env.pop('WARPX_OPENPMD', 'ON') -WARPX_PRECISION = env.pop('WARPX_PRECISION', 'DOUBLE') -WARPX_PARTICLE_PRECISION = env.pop('WARPX_PARTICLE_PRECISION', WARPX_PRECISION) -WARPX_FFT = env.pop('WARPX_FFT', 'OFF') -WARPX_HEFFTE = env.pop('WARPX_HEFFTE', 'OFF') -WARPX_QED = env.pop('WARPX_QED', 'ON') -WARPX_QED_TABLE_GEN = env.pop('WARPX_QED_TABLE_GEN', 'OFF') -WARPX_DIMS = env.pop('WARPX_DIMS', '1;2;RZ;3') -BUILD_PARALLEL = env.pop('BUILD_PARALLEL', '2') -BUILD_SHARED_LIBS = env.pop('WARPX_BUILD_SHARED_LIBS', - 'OFF') -#BUILD_TESTING = env.pop('WARPX_BUILD_TESTING', +WARPX_COMPUTE = env.pop("WARPX_COMPUTE", "OMP") +WARPX_MPI = env.pop("WARPX_MPI", "OFF") +WARPX_EB = env.pop("WARPX_EB", "OFF") +WARPX_OPENPMD = env.pop("WARPX_OPENPMD", "ON") +WARPX_PRECISION = env.pop("WARPX_PRECISION", "DOUBLE") +WARPX_PARTICLE_PRECISION = env.pop("WARPX_PARTICLE_PRECISION", WARPX_PRECISION) +WARPX_FFT = env.pop("WARPX_FFT", "OFF") +WARPX_HEFFTE = env.pop("WARPX_HEFFTE", "OFF") +WARPX_QED = env.pop("WARPX_QED", "ON") +WARPX_QED_TABLE_GEN = env.pop("WARPX_QED_TABLE_GEN", "OFF") +WARPX_DIMS = env.pop("WARPX_DIMS", "1;2;RZ;3") +BUILD_PARALLEL = env.pop("BUILD_PARALLEL", "2") +BUILD_SHARED_LIBS = env.pop("WARPX_BUILD_SHARED_LIBS", "OFF") +# BUILD_TESTING = env.pop('WARPX_BUILD_TESTING', # 'OFF') -#BUILD_EXAMPLES = env.pop('WARPX_BUILD_EXAMPLES', +# BUILD_EXAMPLES = env.pop('WARPX_BUILD_EXAMPLES', # 'OFF') # openPMD-api sub-control -HDF5_USE_STATIC_LIBRARIES = env.pop('HDF5_USE_STATIC_LIBRARIES', 'OFF') -ADIOS_USE_STATIC_LIBS = env.pop('ADIOS_USE_STATIC_LIBS', 'OFF') +HDF5_USE_STATIC_LIBRARIES = env.pop("HDF5_USE_STATIC_LIBRARIES", "OFF") +ADIOS_USE_STATIC_LIBS = env.pop("ADIOS_USE_STATIC_LIBS", "OFF") # CMake dependency control (developers & package managers) -WARPX_AMREX_SRC = env.pop('WARPX_AMREX_SRC', '') -WARPX_AMREX_REPO = env.pop('WARPX_AMREX_REPO', '') -WARPX_AMREX_BRANCH = env.pop('WARPX_AMREX_BRANCH', '') -WARPX_AMREX_INTERNAL = env.pop('WARPX_AMREX_INTERNAL', 'ON') -WARPX_OPENPMD_SRC = env.pop('WARPX_OPENPMD_SRC', '') -WARPX_OPENPMD_INTERNAL = env.pop('WARPX_OPENPMD_INTERNAL', 'ON') -WARPX_PICSAR_SRC = env.pop('WARPX_PICSAR_SRC', '') -WARPX_PICSAR_INTERNAL = env.pop('WARPX_PICSAR_INTERNAL', 'ON') -WARPX_PYAMREX_SRC = env.pop('WARPX_PYAMREX_SRC', '') -WARPX_PYAMREX_INTERNAL = env.pop('WARPX_PYAMREX_INTERNAL', 'ON') -WARPX_PYBIND11_SRC = env.pop('WARPX_PYBIND11_SRC', '') -WARPX_PYBIND11_INTERNAL = env.pop('WARPX_PYBIND11_INTERNAL', 'ON') -WARPX_CCACHE_PROGRAM = env.pop('WARPX_CCACHE_PROGRAM', None) +WARPX_AMREX_SRC = env.pop("WARPX_AMREX_SRC", "") +WARPX_AMREX_REPO = env.pop("WARPX_AMREX_REPO", "") +WARPX_AMREX_BRANCH = env.pop("WARPX_AMREX_BRANCH", "") +WARPX_AMREX_INTERNAL = env.pop("WARPX_AMREX_INTERNAL", "ON") +WARPX_OPENPMD_SRC = env.pop("WARPX_OPENPMD_SRC", "") +WARPX_OPENPMD_INTERNAL = env.pop("WARPX_OPENPMD_INTERNAL", "ON") +WARPX_PICSAR_SRC = env.pop("WARPX_PICSAR_SRC", "") +WARPX_PICSAR_INTERNAL = env.pop("WARPX_PICSAR_INTERNAL", "ON") +WARPX_PYAMREX_SRC = env.pop("WARPX_PYAMREX_SRC", "") +WARPX_PYAMREX_INTERNAL = env.pop("WARPX_PYAMREX_INTERNAL", "ON") +WARPX_PYBIND11_SRC = env.pop("WARPX_PYBIND11_SRC", "") +WARPX_PYBIND11_INTERNAL = env.pop("WARPX_PYBIND11_INTERNAL", "ON") +WARPX_CCACHE_PROGRAM = env.pop("WARPX_CCACHE_PROGRAM", None) for key in env.keys(): - if key.lower().startswith('warpx'): - print(f"\nWARNING: Found environment variable '{key}', which is not a recognized WarpX option\n") + if key.lower().startswith("warpx"): + print( + f"\nWARNING: Found environment variable '{key}', which is not a recognized WarpX option\n" + ) # https://cmake.org/cmake/help/v3.0/command/if.html -if WARPX_MPI.upper() in ['1', 'ON', 'TRUE', 'YES']: +if WARPX_MPI.upper() in ["1", "ON", "TRUE", "YES"]: WARPX_MPI = "ON" else: WARPX_MPI = "OFF" # Include embedded boundary functionality -if WARPX_EB.upper() in ['1', 'ON', 'TRUE', 'YES']: +if WARPX_EB.upper() in ["1", "ON", "TRUE", "YES"]: WARPX_EB = "ON" else: WARPX_EB = "OFF" # for CMake -cxx_modules = [] # values: warpx_1d, warpx_2d, warpx_rz, warpx_3d -cmdclass = {} # build extensions +cxx_modules = [] # values: warpx_1d, warpx_2d, warpx_rz, warpx_3d +cmdclass = {} # build extensions # externally pre-built: pick up pre-built WarpX libraries if PYWARPX_LIB_DIR: - cmdclass=dict(build=CopyPreBuild) + cmdclass = dict(build=CopyPreBuild) # CMake: build WarpX libraries ourselves else: cmdclass = dict(build_ext=CMakeBuild) - for dim in [x.lower() for x in WARPX_DIMS.split(';')]: + for dim in [x.lower() for x in WARPX_DIMS.split(";")]: name = dim if dim == "rz" else dim + "d" cxx_modules.append(CMakeExtension("warpx_" + name)) # Get the package requirements from the requirements.txt file install_requires = [] -with open('./requirements.txt') as f: - install_requires = [line.strip('\n') for line in f.readlines()] +with open("./requirements.txt") as f: + install_requires = [line.strip("\n") for line in f.readlines()] if WARPX_MPI == "ON": - install_requires.append('mpi4py>=2.1.0') + install_requires.append("mpi4py>=2.1.0") # keyword reference: # https://packaging.python.org/guides/distributing-packages-using-setuptools setup( - name='pywarpx', + name="pywarpx", # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version = '24.08', - packages = ['pywarpx'], - package_dir = {'pywarpx': 'Python/pywarpx'}, - author='Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.', - author_email='jlvay@lbl.gov, grote1@llnl.gov, maxence.thevenet@desy.de, rlehe@lbl.gov, atmyers@lbl.gov, WeiqunZhang@lbl.gov, axelhuebl@lbl.gov', - maintainer='Axel Huebl, David P. Grote, Rémi Lehe', # wheel/pypi packages - maintainer_email='axelhuebl@lbl.gov, grote1@llnl.gov, rlehe@lbl.gov', - description='WarpX is an advanced electromagnetic Particle-In-Cell code.', + version="24.08", + packages=["pywarpx"], + package_dir={"pywarpx": "Python/pywarpx"}, + author="Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.", + author_email="jlvay@lbl.gov, grote1@llnl.gov, maxence.thevenet@desy.de, rlehe@lbl.gov, atmyers@lbl.gov, WeiqunZhang@lbl.gov, axelhuebl@lbl.gov", + maintainer="Axel Huebl, David P. Grote, Rémi Lehe", # wheel/pypi packages + maintainer_email="axelhuebl@lbl.gov, grote1@llnl.gov, rlehe@lbl.gov", + description="WarpX is an advanced electromagnetic Particle-In-Cell code.", long_description=long_description, - long_description_content_type='text/markdown', - keywords=('WarpX openscience mpi hpc research pic particle-in-cell ' - 'plasma laser-plasma accelerator modeling simulation'), - url='https://ecp-warpx.github.io', + long_description_content_type="text/markdown", + keywords=( + "WarpX openscience mpi hpc research pic particle-in-cell " + "plasma laser-plasma accelerator modeling simulation" + ), + url="https://ecp-warpx.github.io", project_urls={ - 'Documentation': 'https://warpx.readthedocs.io', - 'Doxygen': 'https://warpx.readthedocs.io/en/latest/_static/doxyhtml/index.html', + "Documentation": "https://warpx.readthedocs.io", + "Doxygen": "https://warpx.readthedocs.io/en/latest/_static/doxyhtml/index.html", #'Reference': 'https://doi.org/...', (Paper and/or Zenodo) - 'Source': 'https://github.com/ECP-WarpX/WarpX', - 'Tracker': 'https://github.com/ECP-WarpX/WarpX/issues', + "Source": "https://github.com/ECP-WarpX/WarpX", + "Tracker": "https://github.com/ECP-WarpX/WarpX/issues", }, # CMake: self-built as extension module ext_modules=cxx_modules, cmdclass=cmdclass, # scripts=['warpx_1d', 'warpx_2d', 'warpx_rz', 'warpx_3d'], zip_safe=False, - python_requires='>=3.8', + python_requires=">=3.8", # tests_require=['pytest'], install_requires=install_requires, # see: src/bindings/python/cli - #entry_points={ + # entry_points={ # 'console_scripts': [ # 'warpx_3d = warpx.3d.__main__:main' # ] - #}, + # }, extras_require={ - 'all': ['openPMD-api~=0.15.1', 'openPMD-viewer~=1.1', 'yt>=4.1.0', 'matplotlib'], + "all": [ + "openPMD-api~=0.15.1", + "openPMD-viewer~=1.1", + "yt>=4.1.0", + "matplotlib", + ], }, # cmdclass={'test': PyTest}, # platforms='any', classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Natural Language :: English', - 'Environment :: Console', - 'Intended Audience :: Science/Research', - 'Operating System :: OS Independent', - 'Topic :: Scientific/Engineering', - 'Topic :: Scientific/Engineering :: Physics', - 'Programming Language :: C++', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - ('License :: OSI Approved :: ' - 'BSD License'), # TODO: use real SPDX: BSD-3-Clause-LBNL + "Development Status :: 5 - Production/Stable", + "Natural Language :: English", + "Environment :: Console", + "Intended Audience :: Science/Research", + "Operating System :: OS Independent", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Physics", + "Programming Language :: C++", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + ( + "License :: OSI Approved :: " "BSD License" + ), # TODO: use real SPDX: BSD-3-Clause-LBNL ], # new PEP 639 format - license='BSD-3-Clause-LBNL', - license_files = ['LICENSE.txt', 'LEGAL.txt'], + license="BSD-3-Clause-LBNL", + license_files=["LICENSE.txt", "LEGAL.txt"], )