diff --git a/.docs/Notebooks/array_output_tutorial.py b/.docs/Notebooks/array_output_tutorial.py
index ba35fa49e2..641cf66c47 100644
--- a/.docs/Notebooks/array_output_tutorial.py
+++ b/.docs/Notebooks/array_output_tutorial.py
@@ -29,12 +29,15 @@
# + pycharm={"name": "#%%\n"}
import os
import sys
+from pathlib import Path
from pprint import pformat
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import flopy
@@ -44,8 +47,40 @@
exe_name = "mf2005"
mfexe = exe_name
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+sim_name = "freyberg"
+
+file_names = {
+ "freyberg.bas": "63266024019fef07306b8b639c6c67d5e4b22f73e42dcaa9db18b5e0f692c097",
+ "freyberg.dis": "62d0163bf36c7ee9f7ee3683263e08a0abcdedf267beedce6dd181600380b0a2",
+ "freyberg.githds": "abe92497b55e6f6c73306e81399209e1cada34cf794a7867d776cfd18303673b",
+ "freyberg.gitlist": "aef02c664344a288264d5f21e08a748150e43bb721a16b0e3f423e6e3e293056",
+ "freyberg.lpf": "06500bff979424f58e5e4fbd07a7bdeb0c78f31bd08640196044b6ccefa7a1fe",
+ "freyberg.nam": "e66321007bb603ef55ed2ba41f4035ba6891da704a4cbd3967f0c66ef1532c8f",
+ "freyberg.oc": "532905839ccbfce01184980c230b6305812610b537520bf5a4abbcd3bd703ef4",
+ "freyberg.pcg": "0d1686fac4680219fffdb56909296c5031029974171e25d4304e70fa96ebfc38",
+ "freyberg.rch": "37a1e113a7ec16b61417d1fa9710dd111a595de738a367bd34fd4a359c480906",
+ "freyberg.riv": "7492a1d5eb23d6812ec7c8227d0ad4d1e1b35631a765c71182b71e3bd6a6d31d",
+ "freyberg.wel": "00aa55f59797c02f0be5318a523b36b168fc6651f238f34e8b0938c04292d3e7",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=fhash,
+ )
+
# Set the paths
-loadpth = os.path.join("..", "..", "examples", "data", "freyberg")
+loadpth = data_path / sim_name
temp_dir = TemporaryDirectory()
modelpth = temp_dir.name
diff --git a/.docs/Notebooks/dis_triangle_example.py b/.docs/Notebooks/dis_triangle_example.py
index 3d0699e694..1a43b94d4d 100644
--- a/.docs/Notebooks/dis_triangle_example.py
+++ b/.docs/Notebooks/dis_triangle_example.py
@@ -53,7 +53,7 @@
radius = 100.0
x = radius * np.cos(theta)
y = radius * np.sin(theta)
-circle_poly = [(x, y) for x, y in zip(x, y)]
+circle_poly = list(zip(x, y))
fig = plt.figure(figsize=(10, 10))
ax = plt.subplot(1, 1, 1, aspect="equal")
ax.plot(x, y, "bo-")
@@ -94,7 +94,7 @@
radius = 30.0
x = radius * np.cos(theta) + 25.0
y = radius * np.sin(theta) + 25.0
-inner_circle_poly = [(x, y) for x, y in zip(x, y)]
+inner_circle_poly = list(zip(x, y))
# The hole is created by passing in another polygon and
# then passing a point inside the hole polygon with the
@@ -198,9 +198,7 @@
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=workspace
)
-tdis = flopy.mf6.ModflowTdis(
- sim, time_units="DAYS", perioddata=[[1.0, 1, 1.0]]
-)
+tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", perioddata=[[1.0, 1, 1.0]])
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
ims = flopy.mf6.ModflowIms(
sim,
@@ -227,9 +225,7 @@
vertices=vertices,
cell2d=cell2d,
)
-npf = flopy.mf6.ModflowGwfnpf(
- gwf, xt3doptions=[(True)], save_specific_discharge=None
-)
+npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=[(True)], save_specific_discharge=None)
ic = flopy.mf6.ModflowGwfic(gwf)
diff --git a/.docs/Notebooks/dis_voronoi_example.py b/.docs/Notebooks/dis_voronoi_example.py
index 73d81fef33..8278e1d6ae 100644
--- a/.docs/Notebooks/dis_voronoi_example.py
+++ b/.docs/Notebooks/dis_voronoi_example.py
@@ -130,9 +130,7 @@
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=sim_ws
)
-tdis = flopy.mf6.ModflowTdis(
- sim, time_units="DAYS", perioddata=[[1.0, 1, 1.0]]
-)
+tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", perioddata=[[1.0, 1, 1.0]])
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
ims = flopy.mf6.ModflowIms(
sim,
@@ -145,9 +143,7 @@
nlay = 1
top = 1.0
botm = [0.0]
-disv = flopy.mf6.ModflowGwfdisv(
- gwf, nlay=nlay, **disv_gridprops, top=top, botm=botm
-)
+disv = flopy.mf6.ModflowGwfdisv(gwf, nlay=nlay, **disv_gridprops, top=top, botm=botm)
npf = flopy.mf6.ModflowGwfnpf(
gwf,
xt3doptions=[(True)],
@@ -209,9 +205,7 @@
nlay = 1
top = 1.0
botm = [0.0]
-disv = flopy.mf6.ModflowGwtdisv(
- gwt, nlay=nlay, **disv_gridprops, top=top, botm=botm
-)
+disv = flopy.mf6.ModflowGwtdisv(gwt, nlay=nlay, **disv_gridprops, top=top, botm=botm)
ic = flopy.mf6.ModflowGwtic(gwt, strt=0.0)
sto = flopy.mf6.ModflowGwtmst(gwt, porosity=0.2)
adv = flopy.mf6.ModflowGwtadv(gwt, scheme="TVD")
@@ -320,7 +314,7 @@
radius = 100.0
x = radius * np.cos(theta)
y = radius * np.sin(theta)
-circle_poly = [(x, y) for x, y in zip(x, y)]
+circle_poly = list(zip(x, y))
tri = Triangle(maximum_area=5, angle=30, model_ws=workspace)
tri.add_polygon(circle_poly)
tri.build(verbose=False)
@@ -342,7 +336,7 @@
radius = 30.0
x = radius * np.cos(theta) + 25.0
y = radius * np.sin(theta) + 25.0
-inner_circle_poly = [(x, y) for x, y in zip(x, y)]
+inner_circle_poly = list(zip(x, y))
tri = Triangle(maximum_area=10, angle=30, model_ws=workspace)
tri.add_polygon(circle_poly)
@@ -402,7 +396,7 @@
radius = 10.0
x = radius * np.cos(theta) + 50.0
y = radius * np.sin(theta) + 70.0
-circle_poly0 = [(x, y) for x, y in zip(x, y)]
+circle_poly0 = list(zip(x, y))
tri.add_polygon(circle_poly0)
tri.add_hole((50, 70))
@@ -411,7 +405,7 @@
radius = 10.0
x = radius * np.cos(theta) + 70.0
y = radius * np.sin(theta) + 20.0
-circle_poly1 = [(x, y) for x, y in zip(x, y)]
+circle_poly1 = list(zip(x, y))
tri.add_polygon(circle_poly1)
# tri.add_hole((70, 20))
diff --git a/.docs/Notebooks/export_tutorial.py b/.docs/Notebooks/export_tutorial.py
index 9751b3f1a2..7c1b29a2a3 100644
--- a/.docs/Notebooks/export_tutorial.py
+++ b/.docs/Notebooks/export_tutorial.py
@@ -20,8 +20,12 @@
# +
import os
import sys
+from pathlib import Path
from tempfile import TemporaryDirectory
+import git
+import pooch
+
import flopy
print(sys.version)
@@ -30,10 +34,42 @@
# Load our old friend...the Freyberg model
+sim_name = "freyberg_multilayer_transient"
+
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+file_names = {
+ "freyberg.bas": None,
+ "freyberg.cbc": None,
+ "freyberg.ddn": None,
+ "freyberg.dis": None,
+ "freyberg.drn": None,
+ "freyberg.hds": None,
+ "freyberg.list": None,
+ "freyberg.nam": None,
+ "freyberg.nwt": None,
+ "freyberg.oc": None,
+ "freyberg.rch": None,
+ "freyberg.upw": None,
+ "freyberg.wel": None,
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=fhash,
+ )
+
nam_file = "freyberg.nam"
-model_ws = os.path.join(
- "..", "..", "examples", "data", "freyberg_multilayer_transient"
-)
+model_ws = data_path / sim_name
ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False)
# We can see the ``Modelgrid`` instance has generic entries, as does ``start_datetime``
@@ -44,9 +80,7 @@
# Setting the attributes of the ``ml.modelgrid`` is easy:
-ml.modelgrid.set_coord_info(
- xoff=123456.7, yoff=765432.1, angrot=15.0, crs=3070
-)
+ml.modelgrid.set_coord_info(xoff=123456.7, yoff=765432.1, angrot=15.0, crs=3070)
ml.dis.start_datetime = "7/4/1776"
ml.modeltime.start_datetime
@@ -125,9 +159,7 @@
export_dict = {"hds": hds, "cbc": cbc}
# export head and cell budget outputs to netcdf
-fnc = flopy.export.utils.output_helper(
- os.path.join(pth, "output.nc"), ml, export_dict
-)
+fnc = flopy.export.utils.output_helper(os.path.join(pth, "output.nc"), ml, export_dict)
# -
try:
diff --git a/.docs/Notebooks/export_vtk_tutorial.py b/.docs/Notebooks/export_vtk_tutorial.py
index c21d40919a..2719da25d2 100644
--- a/.docs/Notebooks/export_vtk_tutorial.py
+++ b/.docs/Notebooks/export_vtk_tutorial.py
@@ -33,7 +33,9 @@
from pprint import pformat
from tempfile import TemporaryDirectory
+import git
import numpy as np
+import pooch
import flopy
from flopy.export import vtk
@@ -42,13 +44,39 @@
print(f"flopy version: {flopy.__version__}")
# -
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+sim_name = "freyberg_multilayer_transient"
+file_names = {
+ "freyberg.bas": None,
+ "freyberg.cbc": None,
+ "freyberg.ddn": None,
+ "freyberg.dis": None,
+ "freyberg.drn": None,
+ "freyberg.hds": None,
+ "freyberg.list": None,
+ "freyberg.nam": None,
+ "freyberg.nwt": None,
+ "freyberg.oc": None,
+ "freyberg.rch": None,
+ "freyberg.upw": None,
+ "freyberg.wel": None,
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=fhash,
+ )
+
# load model for examples
nam_file = "freyberg.nam"
-model_ws = Path(
- os.path.join(
- "..", "..", "examples", "data", "freyberg_multilayer_transient"
- )
-)
+model_ws = data_path / sim_name
ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False)
# Create a temporary workspace.
@@ -107,9 +135,7 @@
# 3D Array export
# hk export, with points
model_hk_dir = output_dir / "HK"
-ml.upw.hk.export(
- model_hk_dir, smooth=True, fmt="vtk", name="HK", point_scalars=True
-)
+ml.upw.hk.export(model_hk_dir, smooth=True, fmt="vtk", name="HK", point_scalars=True)
# ### Package export to .vtu files
#
@@ -220,13 +246,7 @@
## add recharge to the VTK object
recharge = ml.rch.rech.transient_2ds
-vtkobj.add_transient_array(
- recharge,
- "recharge",
- masked_values=[
- 0,
- ],
-)
+vtkobj.add_transient_array(recharge, "recharge", masked_values=[0])
## write vtk files
vtkobj.write(output_dir / "tr_array_example" / "recharge.vtu")
@@ -246,12 +266,7 @@
## add well fluxes to the VTK object
spd = ml.wel.stress_period_data
-vtkobj.add_transient_list(
- spd,
- masked_values=[
- 0,
- ],
-)
+vtkobj.add_transient_list(spd, masked_values=[0])
## write vtk files
vtkobj.write(output_dir / "tr_list_example" / "wel_flux.vtu")
@@ -312,9 +327,7 @@
# +
# export heads as point scalars
-vtkobj = vtk.Vtk(
- ml, xml=True, pvd=True, point_scalars=True, vertical_exageration=10
-)
+vtkobj = vtk.Vtk(ml, xml=True, pvd=True, point_scalars=True, vertical_exageration=10)
# export heads for time step 1, stress periods 1, 50, 100, 1000
vtkobj.add_heads(hds, kstpkper=[(0, 0), (0, 49), (0, 99), (0, 999)])
@@ -418,17 +431,7 @@ def run_vertex_grid_example(ws):
xmax = 12 * delr
ymin = 8 * delc
ymax = 13 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 1, range(nlay))
rf1shp = os.path.join(gridgen_ws, "rf1")
@@ -436,17 +439,7 @@ def run_vertex_grid_example(ws):
xmax = 11 * delr
ymin = 9 * delc
ymax = 12 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 2, range(nlay))
rf2shp = os.path.join(gridgen_ws, "rf2")
@@ -454,17 +447,7 @@ def run_vertex_grid_example(ws):
xmax = 10 * delr
ymin = 10 * delc
ymax = 11 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 3, range(nlay))
g.build(verbose=False)
@@ -559,9 +542,7 @@ def run_vertex_grid_example(ws):
# riv
riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]]
rivcells = g.intersect(riverline, "line", 0)
- rivspd = [
- [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"]
- ]
+ rivspd = [[(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"]]
riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd)
# output control
diff --git a/.docs/Notebooks/external_file_handling_tutorial.py b/.docs/Notebooks/external_file_handling_tutorial.py
index f8ee402be5..fecb1073d8 100644
--- a/.docs/Notebooks/external_file_handling_tutorial.py
+++ b/.docs/Notebooks/external_file_handling_tutorial.py
@@ -100,11 +100,7 @@
# list the files in model_ws that have 'hk' in the name
print(
"\n".join(
- [
- name
- for name in os.listdir(ml.model_ws)
- if "hk" in name or "impor" in name
- ]
+ [name for name in os.listdir(ml.model_ws) if "hk" in name or "impor" in name]
)
)
diff --git a/.docs/Notebooks/feat_working_stack_examples.py b/.docs/Notebooks/feat_working_stack_examples.py
index bf11f6afe8..f4dd1a9044 100644
--- a/.docs/Notebooks/feat_working_stack_examples.py
+++ b/.docs/Notebooks/feat_working_stack_examples.py
@@ -23,17 +23,14 @@
from pprint import pformat
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
-
-# +
+import pooch
from IPython.display import clear_output, display
-proj_root = Path.cwd().parent.parent
-
-# run installed version of flopy or add local path
import flopy
print(sys.version)
@@ -41,15 +38,53 @@
print(f"matplotlib version: {mpl.__version__}")
print(f"pandas version: {pd.__version__}")
print(f"flopy version: {flopy.__version__}")
-# -
+# First create a temporary workspace.
+
+sim_name = "freyberg_multilayer_transient"
+temp_dir = TemporaryDirectory()
+workspace = Path(temp_dir.name)
+
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+# Download files if needed.
+
+file_names = {
+ "freyberg.bas": "781585c140d40a27bce9369baee262c621bcf969de82361ad8d6b4d8c253ee02",
+ "freyberg.cbc": "d4e18e968cabde8470fcb7cb8a1c4cc57fcd643bd63b23e7751460bfdb651ea4",
+ "freyberg.ddn": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "freyberg.dis": "1ef61a467a219c036e58902ce11297e06b4eeb5f2f9d2ea40245b421a248a471",
+ "freyberg.drn": "93c22ab27d599938a8c2fc5b420ec03e5251b11b050d6ae1cb23ce2aa1b77997",
+ "freyberg.hds": "0b3e911ef35f625d2d046e05a20bc1300341b41028220c5b25ace6f5a267ceef",
+ "freyberg.list": "14ec36c22b48d253d6b82c44f36c5bad4f0785b3a3384b386f6b69c4ee2e31bf",
+ "freyberg.nam": "9e3747ce6d6229caec55a9357285a96cb4608dae11d90dd165a23e0bb394a2bd",
+ "freyberg.nwt": "d66c5cc255d050a0f871639af4af0cef8d48fa59c1c64217de65fc6e7fd78cb1",
+ "freyberg.oc": "faefd462d11b9a21c4579420b2156fb616ca642bc1e66fc5eb5e1b9046449e43",
+ "freyberg.rch": "93a12742a2d37961d53df0405e39cbecf0e6f14d45b5ca8cbba84a2d90828258",
+ "freyberg.upw": "80838be7af2f97c92965bad1d121c252b69d9c66e4885c5f3f49a6e99582deac",
+ "freyberg.wel": "dd322655eadff3f618f0835c9277af30720197bd48328aae2d6772f26eef2686",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=fhash,
+ )
+
+# -
# ### Model Inputs
-# first lets load an existing model
-model_ws = proj_root / "examples" / "data" / "freyberg_multilayer_transient"
ml = flopy.modflow.Modflow.load(
"freyberg.nam",
- model_ws=model_ws,
+ model_ws=data_path / sim_name,
verbose=False,
check=False,
exe_name="mfnwt",
@@ -66,11 +101,6 @@
ml.drn.plot(key="cond")
ml.drn.plot(key="elev")
-# First create a temporary workspace.
-
-# create a temporary workspace
-temp_dir = TemporaryDirectory()
-workspace = Path(temp_dir.name)
# Write a shapefile of the DIS package.
@@ -96,7 +126,7 @@
#
# First, let's look at the list file. The list file summarizes the model's results.
-mfl = flopy.utils.MfListBudget(model_ws / "freyberg.list")
+mfl = flopy.utils.MfListBudget(workspace / "freyberg.list")
df_flux, df_vol = mfl.get_dataframes(start_datetime="10-21-2015")
df_flux
@@ -116,7 +146,7 @@
# Now let's look at the simulated head.
# if you pass the model instance, then the plots will be offset and rotated
-h = flopy.utils.HeadFile(model_ws / "freyberg.hds", model=ml)
+h = flopy.utils.HeadFile(workspace / "freyberg.hds", model=ml)
h.times
h.plot(totim=900, contour=True, grid=True, colorbar=True, figsize=(10, 10))
diff --git a/.docs/Notebooks/get_transmissivities_example.py b/.docs/Notebooks/get_transmissivities_example.py
index dbb7307935..be2faefb3c 100644
--- a/.docs/Notebooks/get_transmissivities_example.py
+++ b/.docs/Notebooks/get_transmissivities_example.py
@@ -76,9 +76,7 @@
model_ws = temp_dir.name
m = flopy.modflow.Modflow("junk", version="mfnwt", model_ws=model_ws)
-dis = flopy.modflow.ModflowDis(
- m, nlay=nl, nrow=nr, ncol=nc, botm=botm, top=top
-)
+dis = flopy.modflow.ModflowDis(m, nlay=nl, nrow=nr, ncol=nc, botm=botm, top=top)
upw = flopy.modflow.ModflowUpw(m, hk=hk)
# -
@@ -88,9 +86,7 @@
# (cells that are partially within the open interval have reduced thickness, cells outside of the open interval have transmissivities of 0). If no `sctop` or `scbot` arguments are supplied, trasmissivites reflect the full saturated thickness in each column of cells (see plot below, which shows different open intervals relative to the model layering)
r, c = np.arange(nr), np.arange(nc)
-T = flopy.utils.get_transmissivities(
- heads, m, r=r, c=c, sctop=sctop, scbot=scbot
-)
+T = flopy.utils.get_transmissivities(heads, m, r=r, c=c, sctop=sctop, scbot=scbot)
np.round(T, 2)
m.dis.botm.array[:, r, c]
@@ -110,11 +106,7 @@
plt.plot(heads[0], label="piezometric surface", color="b", linestyle=":")
for iw in range(len(sctop)):
ax.fill_between(
- [iw - 0.25, iw + 0.25],
- scbot[iw],
- sctop[iw],
- facecolor="None",
- edgecolor="k",
+ [iw - 0.25, iw + 0.25], scbot[iw], sctop[iw], facecolor="None", edgecolor="k"
)
ax.legend(loc=2)
diff --git a/.docs/Notebooks/grid_intersection_example.py b/.docs/Notebooks/grid_intersection_example.py
index e90a5b8a0d..74218c315a 100644
--- a/.docs/Notebooks/grid_intersection_example.py
+++ b/.docs/Notebooks/grid_intersection_example.py
@@ -44,13 +44,7 @@
import matplotlib.pyplot as plt
import numpy as np
import shapely
-from shapely.geometry import (
- LineString,
- MultiLineString,
- MultiPoint,
- Point,
- Polygon,
-)
+from shapely.geometry import LineString, MultiLineString, MultiPoint, Point, Polygon
import flopy
import flopy.discretization as fgrid
@@ -60,8 +54,8 @@
print(sys.version)
print(f"numpy version: {np.__version__}")
print(f"matplotlib version: {mpl.__version__}")
-print(f"flopy version: {flopy.__version__}")
print(f"shapely version: {shapely.__version__}")
+print(f"flopy version: {flopy.__version__}")
# -
# ## [GridIntersect Class](#top)
@@ -70,23 +64,14 @@
# the constructor. There are options users can select to change how the
# intersection is calculated.
#
-# - `method`: derived from model grid type or defined by the user: can be either `"vertex"` or
-# `"structured"`. If `"structured"` is passed, the intersections are performed
-# using structured methods. These methods use information about the regular grid
-# to limit the search space for intersection calculations. Note that `method="vertex"`
-# also works for structured grids.
-# - `rtree`: either `True` (default) or `False`, only read when
-# `method="vertex"`. When True, an STR-tree is built, which allows for fast
-# spatial queries. Building the STR-tree does take some time however. Setting the
-# option to False avoids building the STR-tree but requires the intersection
-# calculation to loop through all grid cells.
-#
-# In general the "vertex" option is robust and fast and is therefore recommended
-# in most situations. In some rare cases building the STR-tree might not be worth
-# the time, in which case it can be avoided by passing `rtree=False`. If you are
-# working with a structured grid, then the `method="structured"` can speed up
-# intersection operations in some situations (e.g. for (multi)points) with the added
-# advantage of not having to build an STR-tree.
+# - `rtree`: either `True` (default) or `False`. When True, an STR-tree is built,
+# which allows for fast spatial queries. Building the STR-tree takes some
+# time however. Setting the option to False avoids building the STR-tree but requires
+# the intersection calculation to loop through all grid cells. It is generally
+# recommended to set this option to True.
+# - `local`: either `False` (default) or `True`. When True the local model coordinates
+# are used. When False, real-world coordinates are used. Can be useful if shapes are
+# defined in local coordinates.
#
# The important methods in the GridIntersect object are:
#
@@ -96,9 +81,7 @@
# - `intersect()`: for intersecting the modelgrid with point, linestrings, and
# polygon geometries (accepts shapely geometry objects, flopy geometry object,
# shapefile.Shape objects, and geojson objects)
-# - `plot_point()`: for plotting point intersection results
-# - `plot_linestring()`: for plotting linestring intersection results
-# - `plot_polygon()`: for plotting polygon intersection results
+# - `ix.plot_intersection_result()`: for plotting intersection results
#
# In the following sections examples of intersections are shown for structured
# and vertex grids for different types of shapes (Polygon, LineString and Point).
@@ -121,20 +104,12 @@
# Polygon to intersect with:
p = Polygon(
- shell=[
- (15, 15),
- (20, 50),
- (35, 80.0),
- (80, 50),
- (80, 40),
- (40, 5),
- (15, 12),
- ],
+ shell=[(15, 15), (20, 50), (35, 80.0), (80, 50), (80, 40), (40, 5), (15, 12)],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]],
)
-# Create the GridIntersect class for our modelgrid. The `method` kwarg is passed to force GridIntersect to use the `"vertex"` intersection methods.
-
+# Create the GridIntersect class for our modelgrid.
+# TODO: remove method kwarg in v3.9.0
ix = GridIntersect(sgr, method="vertex")
# Do the intersect operation for a polygon
@@ -151,7 +126,7 @@
# Looking at the first few entries of the results of the polygon intersection (convert to pandas.DataFrame for prettier formatting)
result[:5]
-# pd.DataFrame(result) # recommended for prettier formatting and working with result
+# pd.DataFrame(result).head()
# The cellids can be easily obtained
@@ -165,18 +140,14 @@
ix.intersects(p)
-# The results of an intersection can be visualized with the plotting methods in the `GridIntersect` object:
-# - `plot_polygon`
-# - `plot_linestring`
-# - `plot_point`
+# The results of an intersection can be visualized with the `GridIntersect.plot_intersection_result()` method.
# +
# create a figure and plot the grid
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
-sgr.plot(ax=ax)
# the intersection object contains some helpful plotting commands
-ix.plot_polygon(result, ax=ax)
+ix.plot_intersection_result(result, ax=ax)
# add black x at cell centers
for irow, icol in result.cellids:
@@ -205,12 +176,8 @@
result2 = ix.intersect(p, contains_centroid=True)
-# create a figure and plot the grid
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
-sgr.plot(ax=ax)
-
-# the intersection object contains some helpful plotting commands
-ix.plot_polygon(result2, ax=ax)
+ix.plot_intersection_result(result2, ax=ax)
# add black x at cell centers
for irow, icol in result2.cellids:
@@ -232,12 +199,8 @@
result3 = ix.intersect(p, min_area_fraction=0.35)
-# create a figure and plot the grid
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
-sgr.plot(ax=ax)
-
-# the intersection object contains some helpful plotting commands
-ix.plot_polygon(result3, ax=ax)
+ix.plot_intersection_result(result3, ax=ax)
# add black x at cell centers
for irow, icol in result3.cellids:
@@ -247,35 +210,6 @@
"kx",
label="centroids of intersected gridcells",
)
-
-# add legend
-ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
-# -
-
-# Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
-
-ixs = GridIntersect(sgr, method="structured")
-result4 = ixs.intersect(p)
-
-# The result is the same as before:
-
-# +
-# create a figure and plot the grid
-fig, ax = plt.subplots(1, 1, figsize=(8, 8))
-sgr.plot(ax=ax)
-
-# the intersection object contains some helpful plotting commands
-ix.plot_polygon(result4, ax=ax)
-
-# add black x at cell centers
-for irow, icol in result4.cellids:
- (h2,) = ax.plot(
- sgr.xcellcenters[0, icol],
- sgr.ycellcenters[irow, 0],
- "kx",
- label="centroids of intersected gridcells",
- )
-
# add legend
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
# -
@@ -295,7 +229,7 @@
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
-ix.plot_linestring(result, ax=ax, cmap="viridis")
+ix.plot_intersection_result(result, ax=ax, cmap="viridis")
for irow, icol in result.cellids:
(h2,) = ax.plot(
@@ -308,32 +242,12 @@
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
# -
-# Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
-
-ixs = GridIntersect(sgr, method="structured")
-
-# +
-result2 = ixs.intersect(mls)
-
-# ordering is different so compare sets to check equality
-check = len(set(result2.cellids) - set(result.cellids)) == 0
-print(
- "Intersection result with method='structured' and "
- f"method='vertex' are equal: {check}"
-)
-# -
-
# ### [MultiPoint with regular grid](#top)
#
# MultiPoint to intersect with
mp = MultiPoint(
- points=[
- Point(50.0, 0.0),
- Point(45.0, 45.0),
- Point(10.0, 10.0),
- Point(150.0, 100.0),
- ]
+ points=[Point(50.0, 0.0), Point(45.0, 45.0), Point(10.0, 10.0), Point(150.0, 100.0)]
)
# For points and linestrings there is a keyword argument `return_all_intersections` which will return multiple intersection results for points or (parts of) linestrings on cell boundaries. As an example, the difference is shown with the MultiPoint intersection. Note the number of red "+" symbols indicating the centroids of intersected cells, in the bottom left case, there are 4 results because the point lies exactly on the intersection between 4 grid cells.
@@ -368,21 +282,6 @@
ax.legend([h2, h3], [i.get_label() for i in [h2, h3]], loc="best")
# -
-# Same as before, the intersect for structured grids can also be performed with a different method written specifically for structured grids.
-
-ixs = GridIntersect(sgr, method="structured")
-
-# +
-result2 = ixs.intersect(mp, return_all_intersections=False)
-
-# ordering is different so compare sets to check equality
-check = len(set(result2.cellids) - set(result.cellids)) == 0
-print(
- "Intersection result with method='structured' and "
- f"method='vertex' are equal: {check}"
-)
-# -
-
# ## [Vertex Grid](#top)
cell2d = [
@@ -420,9 +319,7 @@
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
-pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
-pmv.plot_grid()
-ix.plot_polygon(result, ax=ax)
+ix.plot_intersection_result(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
@@ -442,9 +339,7 @@
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
-pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
-pmv.plot_grid()
-ix2.plot_linestring(result, ax=ax, lw=3)
+ix2.plot_intersection_result(result, ax=ax, lw=3)
for cellid in result.cellids:
(h2,) = ax.plot(
@@ -464,9 +359,7 @@
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
-pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
-pmv.plot_grid()
-ix2.plot_point(result, ax=ax, color="k", zorder=5, s=80)
+ix2.plot_intersection_result(result, ax=ax, color="k", zorder=5, s=80)
for cellid in result.cellids:
(h2,) = ax.plot(
diff --git a/.docs/Notebooks/gridgen_example.py b/.docs/Notebooks/gridgen_example.py
index a9d944bfa6..59af49f20f 100644
--- a/.docs/Notebooks/gridgen_example.py
+++ b/.docs/Notebooks/gridgen_example.py
@@ -55,9 +55,7 @@
)
print(msg)
else:
- print(
- f"gridgen executable was found at: {flopy_io.relpath_safe(gridgen_exe)}"
- )
+ print(f"gridgen executable was found at: {flopy_io.relpath_safe(gridgen_exe)}")
# +
# temporary directory
@@ -264,9 +262,7 @@
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
disv = flopy.mf6.ModflowGwfdisv(gwf, **disv_gridprops)
ic = flopy.mf6.ModflowGwfic(gwf)
-npf = flopy.mf6.ModflowGwfnpf(
- gwf, xt3doptions=True, save_specific_discharge=True
-)
+npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=True, save_specific_discharge=True)
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdspd)
budget_file = f"{name}.bud"
head_file = f"{name}.hds"
@@ -367,9 +363,7 @@
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
disu = flopy.mf6.ModflowGwfdisu(gwf, **disu_gridprops)
ic = flopy.mf6.ModflowGwfic(gwf)
-npf = flopy.mf6.ModflowGwfnpf(
- gwf, xt3doptions=True, save_specific_discharge=True
-)
+npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=True, save_specific_discharge=True)
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdspd)
budget_file = f"{name}.bud"
head_file = f"{name}.hds"
diff --git a/.docs/Notebooks/groundwater2023_watershed_example.py b/.docs/Notebooks/groundwater2023_watershed_example.py
index 8621505dd4..b25da41e6d 100644
--- a/.docs/Notebooks/groundwater2023_watershed_example.py
+++ b/.docs/Notebooks/groundwater2023_watershed_example.py
@@ -25,10 +25,12 @@
import pathlib as pl
import sys
+import git
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import shapely
import yaml
from shapely.geometry import LineString, Polygon
@@ -70,9 +72,7 @@ def densify_geometry(line, step, keep_internal_nodes=True):
lines_strings = []
if keep_internal_nodes:
for idx in range(1, len(line)):
- lines_strings.append(
- shapely.geometry.LineString(line[idx - 1 : idx + 1])
- )
+ lines_strings.append(shapely.geometry.LineString(line[idx - 1 : idx + 1]))
else:
lines_strings = [shapely.geometry.LineString(line)]
@@ -96,7 +96,7 @@ def densify_geometry(line, step, keep_internal_nodes=True):
def set_idomain(grid, boundary):
ix = GridIntersect(grid, method="vertex", rtree=True)
result = ix.intersect(Polygon(boundary))
- idx = [coords for coords in result.cellids]
+ idx = list(result.cellids)
idx = np.array(idx, dtype=int)
nr = idx.shape[0]
if idx.ndim == 1:
@@ -108,10 +108,25 @@ def set_idomain(grid, boundary):
grid.idomain = idomain
-geometries = yaml.safe_load(
- open(pl.Path("../../examples/data/groundwater2023/geometries.yml"))
+# Check if we are in the repository and define the data path.
+
+try:
+ root = pl.Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else pl.Path.cwd()
+folder_name = "groundwater2023"
+fname = "geometries.yml"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/{fname}",
+ fname=fname,
+ path=data_path / folder_name,
+ known_hash=None,
)
+geometries = yaml.safe_load(open(data_path / folder_name / fname))
+
# basic figure size
figwidth = 180 # 90 # mm
figwidth = figwidth / 10 / 2.54 # inches
@@ -163,7 +178,13 @@ def set_idomain(grid, boundary):
os.mkdir(temp_path)
# Load the fine topography that will be sampled
-ascii_file = pl.Path("../../examples/data/geospatial/fine_topo.asc")
+fname = "fine_topo.asc"
+ascii_file = pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/geospatial/{fname}",
+ fname=fname,
+ path=data_path / "geospatial",
+ known_hash=None,
+)
fine_topo = flopy.utils.Raster.load(ascii_file)
# Define the problem size and extents
@@ -235,14 +256,7 @@ def set_idomain(grid, boundary):
pmv = flopy.plot.PlotMapView(modelgrid=struct_grid)
ax.set_aspect("equal")
pmv.plot_array(top_sg)
-pmv.plot_array(
- intersection_sg,
- masked_values=[
- 0,
- ],
- alpha=0.2,
- cmap="Reds_r",
-)
+pmv.plot_array(intersection_sg, masked_values=[0], alpha=0.2, cmap="Reds_r")
pmv.plot_grid(lw=0.25, color="0.5")
cg = pmv.contour_array(top_sg, levels=levels, linewidths=0.3, colors="0.75")
pmv.plot_inactive()
@@ -262,9 +276,7 @@ def set_idomain(grid, boundary):
multiplier = 1.175
transition = 20000.0
ncells = 7
-smoothr = [
- transition * (multiplier - 1.0) / (multiplier ** float(ncells) - 1.0)
-]
+smoothr = [transition * (multiplier - 1.0) / (multiplier ** float(ncells) - 1.0)]
for i in range(ncells - 1):
smoothr.append(smoothr[i] * multiplier)
smooth = smoothr.copy()
@@ -318,17 +330,8 @@ def set_idomain(grid, boundary):
pmv = flopy.plot.PlotMapView(modelgrid=struct_vrc_grid)
ax.set_aspect("equal")
pmv.plot_array(top_sg_vrc)
-pmv.plot_array(
- intersection_sg_vrc,
- masked_values=[
- 0,
- ],
- alpha=0.2,
- cmap="Reds_r",
-)
-cg = pmv.contour_array(
- top_sg_vrc, levels=levels, linewidths=0.3, colors="0.75"
-)
+pmv.plot_array(intersection_sg_vrc, masked_values=[0], alpha=0.2, cmap="Reds_r")
+cg = pmv.contour_array(top_sg_vrc, levels=levels, linewidths=0.3, colors="0.75")
pmv.plot_inactive()
ax.plot(bp[:, 0], bp[:, 1], "k-")
@@ -443,14 +446,7 @@ def set_idomain(grid, boundary):
pmv = flopy.plot.PlotMapView(modelgrid=struct_gridp, extent=extent)
pmv.plot_inactive()
pmv.plot_array(top_ngp, vmin=vmin, vmax=vmax)
-pmv.plot_array(
- intersection_nested_grid[0],
- masked_values=[
- 0,
- ],
- alpha=0.2,
- cmap="Reds_r",
-)
+pmv.plot_array(intersection_nested_grid[0], masked_values=[0], alpha=0.2, cmap="Reds_r")
cgp = pmv.contour_array(top_ngp, levels=levels, linewidths=0.3, colors="0.75")
pmv.plot_inactive(zorder=100)
ax.set_aspect("equal")
@@ -459,12 +455,7 @@ def set_idomain(grid, boundary):
# pmvc.plot_grid()
pmvc.plot_array(top_ngc, vmin=vmin, vmax=vmax)
pmvc.plot_array(
- intersection_nested_grid[1],
- masked_values=[
- 0,
- ],
- alpha=0.2,
- cmap="Reds_r",
+ intersection_nested_grid[1], masked_values=[0], alpha=0.2, cmap="Reds_r"
)
cgc = pmvc.contour_array(top_ngc, levels=levels, linewidths=0.3, colors="0.75")
@@ -503,9 +494,7 @@ def set_idomain(grid, boundary):
delc=dx,
)
g = Gridgen(gwf.modelgrid, model_ws=temp_path)
-adpoly = [
- [[(1000, 1000), (3000, 1000), (3000, 2000), (1000, 2000), (1000, 1000)]]
-]
+adpoly = [[[(1000, 1000), (3000, 1000), (3000, 2000), (1000, 2000), (1000, 1000)]]]
adpoly = boundary_polygon + [boundary_polygon[0]]
adpoly = [[adpoly]]
g.add_refinement_features([lgr_poly], "polygon", 2, range(1))
@@ -540,14 +529,7 @@ def set_idomain(grid, boundary):
ax = fig.add_subplot()
pmv = flopy.plot.PlotMapView(modelgrid=quadtree_grid)
pmv.plot_array(top_qg, ec="0.75")
-pmv.plot_array(
- intersection_qg,
- masked_values=[
- 0,
- ],
- alpha=0.2,
- cmap="Reds_r",
-)
+pmv.plot_array(intersection_qg, masked_values=[0], alpha=0.2, cmap="Reds_r")
cg = pmv.contour_array(top_qg, levels=levels, linewidths=0.3, colors="white")
pmv.plot_inactive(zorder=100)
ax.set_aspect("equal")
@@ -570,9 +552,7 @@ def set_idomain(grid, boundary):
nodes = np.array(nodes)
# +
-tri = Triangle(
- maximum_area=5000 * 5000, angle=30, nodes=nodes, model_ws=temp_path
-)
+tri = Triangle(maximum_area=5000 * 5000, angle=30, nodes=nodes, model_ws=temp_path)
poly = bp
tri.add_polygon(poly)
tri.build(verbose=False)
@@ -625,14 +605,7 @@ def set_idomain(grid, boundary):
pmv = flopy.plot.PlotMapView(modelgrid=triangular_grid)
pmv.plot_array(top_tg, ec="0.75")
-pmv.plot_array(
- intersection_tg,
- masked_values=[
- 0,
- ],
- alpha=0.2,
- cmap="Reds_r",
-)
+pmv.plot_array(intersection_tg, masked_values=[0], alpha=0.2, cmap="Reds_r")
cg = pmv.contour_array(top_tg, levels=levels, linewidths=0.3, colors="white")
ax.clabel(cg, cg.levels, inline=True, fmt="%1.0f", fontsize=10)
@@ -680,14 +653,7 @@ def set_idomain(grid, boundary):
pmv = flopy.plot.PlotMapView(modelgrid=voronoi_grid)
ax.set_aspect("equal")
pmv.plot_array(top_vg)
-pmv.plot_array(
- intersection_vg,
- masked_values=[
- 0,
- ],
- alpha=0.2,
- cmap="Reds_r",
-)
+pmv.plot_array(intersection_vg, masked_values=[0], alpha=0.2, cmap="Reds_r")
pmv.plot_inactive()
ax.plot(bp[:, 0], bp[:, 1], "k-")
for sg in sgs:
@@ -753,9 +719,7 @@ def set_idomain(grid, boundary):
gg = grids[idx]
tt = topo_grids[idx]
for g, t in zip(gg[1:], tt[1:]):
- pmvc = flopy.plot.PlotMapView(
- modelgrid=g, ax=ax, extent=extent
- )
+ pmvc = flopy.plot.PlotMapView(modelgrid=g, ax=ax, extent=extent)
pmvc.plot_array(top_ngc, vmin=vmin, vmax=vmax)
pmvc.plot_grid(**grid_dict)
cgc = pmvc.contour_array(top_ngc, **contour_dict)
@@ -801,29 +765,10 @@ def set_idomain(grid, boundary):
ax.set_ylim(0, 1)
ax.set_axis_off()
- ax.axhline(
- xy0[0],
- color="black",
- lw=2,
- label="Basin boundary",
- )
- ax.axhline(
- xy0[0],
- **river_dict,
- label="River",
- )
- ax.axhline(
- xy0[0],
- color=contour_color,
- lw=0.5,
- ls="--",
- label="Elevation contour",
- )
- ax.axhline(
- xy0[0],
- label="Grid refinement area",
- **refinement_dict,
- )
+ ax.axhline(xy0[0], color="black", lw=2, label="Basin boundary")
+ ax.axhline(xy0[0], **river_dict, label="River")
+ ax.axhline(xy0[0], color=contour_color, lw=0.5, ls="--", label="Elevation contour")
+ ax.axhline(xy0[0], label="Grid refinement area", **refinement_dict)
ax.axhline(
xy0[0],
marker="s",
@@ -856,26 +801,11 @@ def set_idomain(grid, boundary):
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_axis_off()
- cax = ax.inset_axes(
- cbar_axis,
- )
+ cax = ax.inset_axes(cbar_axis)
# cax.set_axisbelow(False)
- cbar = plt.colorbar(
- v,
- orientation="vertical",
- cax=cax,
- ticks=[25, 50, 75, 100],
- )
- cbar.ax.tick_params(
- labelsize=5,
- labelcolor="black",
- color="black",
- length=9,
- pad=2,
- )
- cbar.ax.set_title(
- "Elevation (m)", pad=2.5, loc="center", fontdict=font_dict
- )
+ cbar = plt.colorbar(v, orientation="vertical", cax=cax, ticks=[25, 50, 75, 100])
+ cbar.ax.tick_params(labelsize=5, labelcolor="black", color="black", length=9, pad=2)
+ cbar.ax.set_title("Elevation (m)", pad=2.5, loc="center", fontdict=font_dict)
# -
# ### Plot the river intersection for the six grids
@@ -939,12 +869,8 @@ def set_idomain(grid, boundary):
gg = grids[idx]
tt = intersections[idx]
for g, t in zip(gg[1:], tt[1:]):
- pmvc = flopy.plot.PlotMapView(
- modelgrid=g, ax=ax, extent=extent
- )
- pmvc.plot_array(
- t, masked_values=(0,), cmap=intersection_cmap
- )
+ pmvc = flopy.plot.PlotMapView(modelgrid=g, ax=ax, extent=extent)
+ pmvc.plot_array(t, masked_values=(0,), cmap=intersection_cmap)
pmvc.plot_grid(**grid_dict)
# plot lgr polyline
@@ -989,11 +915,7 @@ def set_idomain(grid, boundary):
ax.set_axis_off()
ax.axhline(xy0[0], **river_dict, label="River")
- ax.axhline(
- xy0[0],
- label="Grid refinement area",
- **refinement_dict,
- )
+ ax.axhline(xy0[0], label="Grid refinement area", **refinement_dict)
ax.axhline(
xy0[0],
marker="s",
diff --git a/.docs/Notebooks/groundwater_paper_example_1.py b/.docs/Notebooks/groundwater_paper_example_1.py
index 4afe99585a..3fe0b12a15 100644
--- a/.docs/Notebooks/groundwater_paper_example_1.py
+++ b/.docs/Notebooks/groundwater_paper_example_1.py
@@ -17,7 +17,7 @@
# ## Basic Flopy example
#
# From:
-# Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Starn, J. J. and Fienen, M. N., 2016, Scripting MODFLOW Model Development Using Python and FloPy: Groundwater, v. 54, p. 733–739, https://doi.org/10.1111/gwat.12413.
+# Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Starn, J. J. and Fienen, M. N., 2016, Scripting MODFLOW Model Development Using Python and FloPy: Groundwater, v. 54, p. 733-739, https://doi.org/10.1111/gwat.12413.
# Import the `modflow` and `utils` subpackages of FloPy and give them the aliases `fpm` and `fpu`, respectively
@@ -48,9 +48,7 @@
# The discretization of the model is specified with the discretization file (DIS) of MODFLOW. The aquifer is divided into 201 cells of length 10 m and width 1 m. The first input of the discretization package is the name of the model object. All other input arguments are self explanatory.
-fpm.ModflowDis(
- model, nlay=1, nrow=1, ncol=201, delr=10, delc=1, top=50, botm=0
-)
+fpm.ModflowDis(model, nlay=1, nrow=1, ncol=201, delr=10, delc=1, top=50, botm=0)
# Active cells and the like are defined with the Basic package (BAS), which is required for every MODFLOW model. It contains the {\tt ibound} array, which is used to specify which cells are active (value is positive), inactive (value is 0), or fixed head (value is negative). The {\tt numpy} package (aliased as {\tt np}) can be used to quickly initialize the {\tt ibound} array with values of 1, and then set the {\tt ibound} value for the first and last columns to -1. The {\tt numpy} package (and Python, in general) uses zero-based indexing and supports negative indexing so that row 1 and column 1, and row 1 and column 201, can be referenced as [0, 0], and [0, -1], respectively. Although this simulation is for steady flow, starting heads still need to be specified. They are used as the head for fixed-head cells (where {\tt ibound} is negative), and as a starting point to compute the saturated thickness for cases of unconfined flow.
diff --git a/.docs/Notebooks/groundwater_paper_uspb_example.py b/.docs/Notebooks/groundwater_paper_uspb_example.py
index 1fa202af4a..367939b8a6 100644
--- a/.docs/Notebooks/groundwater_paper_uspb_example.py
+++ b/.docs/Notebooks/groundwater_paper_uspb_example.py
@@ -17,16 +17,19 @@
# # Capture fraction example
#
# From:
-# Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Starn, J. J. and Fienen, M. N., 2016, Scripting MODFLOW Model Development Using Python and FloPy: Groundwater, v. 54, p. 733–739, https://doi.org/10.1111/gwat.12413.
+# Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Starn, J. J. and Fienen, M. N., 2016, Scripting MODFLOW Model Development Using Python and FloPy: Groundwater, v. 54, p. 733-739, https://doi.org/10.1111/gwat.12413.
# +
import os
import sys
+from pathlib import Path
from pprint import pformat
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import scipy.ndimage
import flopy
@@ -41,13 +44,23 @@
if not os.path.exists(ws):
os.makedirs(ws)
-fn = os.path.join(
- "..",
- "groundwater_paper",
- "uspb",
- "results",
- "USPB_capture_fraction_04_01.dat",
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / ".docs" / "groundwater_paper" if root else Path.cwd()
+
+fname = "USPB_capture_fraction_04_01.dat"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/.docs/groundwater_paper/uspb/results/{fname}",
+ fname=fname,
+ path=data_path / "uspb" / "results",
+ known_hash=None,
)
+fn = data_path / "uspb" / "results" / fname
cf = np.loadtxt(fn)
print(cf.shape)
@@ -57,7 +70,7 @@
c = plt.imshow(cf2, cmap="jet")
plt.colorbar(c)
-wsl = os.path.join("..", "groundwater_paper", "uspb", "flopy")
+wsl = data_path / "uspb" / "flopy"
ml = flopy.modflow.Modflow.load("DG.nam", model_ws=wsl, verbose=False)
nlay, nrow, ncol = ml.nlay, ml.dis.nrow, ml.dis.ncol
@@ -86,11 +99,7 @@
label="Maximum active model extent",
)
plt.plot(
- [-10000, 0],
- [-10000, 0],
- color="purple",
- lw=0.75,
- label="STR reaches (all layers)",
+ [-10000, 0], [-10000, 0], color="purple", lw=0.75, label="STR reaches (all layers)"
)
leg = plt.legend(loc="upper left", numpoints=1, prop={"size": 6})
leg.draw_frame(False)
@@ -109,21 +118,15 @@
# +
hedObj = flopy.utils.HeadFile(os.path.join(ws, "DG.hds"), precision="double")
h = hedObj.get_data(kstpkper=(0, 0))
-cbcObj = flopy.utils.CellBudgetFile(
- os.path.join(ws, "DG.cbc"), precision="double"
-)
+cbcObj = flopy.utils.CellBudgetFile(os.path.join(ws, "DG.cbc"), precision="double")
frf = cbcObj.get_data(kstpkper=(0, 0), text="FLOW RIGHT FACE")[0]
fff = cbcObj.get_data(kstpkper=(0, 0), text="FLOW FRONT FACE")[0]
-qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge(
- (frf, fff, None), ml
-)
+qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge((frf, fff, None), ml)
# +
cnt = np.arange(1200, 1700, 100)
-f, (ax1, ax2) = plt.subplots(
- 1, 2, figsize=(6.75, 4.47), constrained_layout=True
-)
+f, (ax1, ax2) = plt.subplots(1, 2, figsize=(6.75, 4.47), constrained_layout=True)
ax1.set_xlim(0, xmax)
ax1.set_ylim(0, ymax)
ax2.set_xlim(0, xmax)
@@ -177,9 +180,7 @@
cb = plt.colorbar(h2, cax=ax3)
cb.ax.set_ylabel("Simulated head, m")
-ax1.plot(
- [-10000, 0], [-10000, 0], color="purple", lw=0.75, label="STR reaches"
-)
+ax1.plot([-10000, 0], [-10000, 0], color="purple", lw=0.75, label="STR reaches")
ax1.plot(
[-10000],
[-10000],
@@ -193,9 +194,7 @@
leg = ax1.legend(loc="upper left", numpoints=1, prop={"size": 6})
leg.draw_frame(False)
-ax1.text(
- 0.0, 1.01, "Model layer 4", ha="left", va="bottom", transform=ax1.transAxes
-)
+ax1.text(0.0, 1.01, "Model layer 4", ha="left", va="bottom", transform=ax1.transAxes)
ax2.text(
0.98,
0.02,
@@ -204,20 +203,19 @@
va="bottom",
transform=ax2.transAxes,
)
-ax2.text(
- 0.0, 1.01, "Model layer 5", ha="left", va="bottom", transform=ax2.transAxes
-)
+ax2.text(0.0, 1.01, "Model layer 5", ha="left", va="bottom", transform=ax2.transAxes)
plt.savefig(os.path.join(ws, "uspb_heads.png"), dpi=300)
# -
-fn = os.path.join(
- "..",
- "groundwater_paper",
- "uspb",
- "results",
- "USPB_capture_fraction_04_10.dat",
+fname = "USPB_capture_fraction_04_10.dat"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/.docs/groundwater_paper/uspb/results/{fname}",
+ fname=fname,
+ path=data_path / "uspb" / "results",
+ known_hash=None,
)
+fn = data_path / "uspb" / "results" / fname
cf = np.loadtxt(fn)
cf2 = scipy.ndimage.zoom(cf, 4, order=0)
diff --git a/.docs/Notebooks/load_swr_binary_data_example.py b/.docs/Notebooks/load_swr_binary_data_example.py
index cd07ff0883..e21689e344 100644
--- a/.docs/Notebooks/load_swr_binary_data_example.py
+++ b/.docs/Notebooks/load_swr_binary_data_example.py
@@ -20,10 +20,13 @@
import os
import sys
+from pathlib import Path
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
+import pooch
# +
from IPython.display import Image
@@ -35,9 +38,37 @@
print(f"matplotlib version: {mpl.__version__}")
print(f"flopy version: {flopy.__version__}")
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+folder_name = "swr_test"
+
# +
# Set the paths
-datapth = os.path.join("..", "..", "examples", "data", "swr_test")
+datapth = data_path / folder_name
+
+file_names = [
+ "SWR004.dis.ref",
+ "SWR004.flow",
+ "SWR004.obs",
+ "SWR004.stg",
+ "SWR004.str",
+ "SWR004.vel",
+ "swr005.qaq",
+ "swr005.str",
+]
+for fname in file_names:
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/{fname}",
+ fname=fname,
+ path=datapth,
+ known_hash=None,
+ )
# SWR Process binary files
files = ("SWR004.obs", "SWR004.vel", "SWR004.str", "SWR004.stg", "SWR004.flow")
@@ -182,10 +213,7 @@
stage = np.extract(iprof, s["stage"])
xs = flopy.plot.PlotCrossSection(model=ml, line={"Row": 0})
xs.plot_fill_between(
- stage.reshape(1, 1, 12),
- colors=["none", "blue"],
- ax=ax,
- edgecolors="none",
+ stage.reshape(1, 1, 12), colors=["none", "blue"], ax=ax, edgecolors="none"
)
linecollection = xs.plot_grid(ax=ax, zorder=10)
ax.fill_between(
diff --git a/.docs/Notebooks/mf6_complex_model_example.py b/.docs/Notebooks/mf6_complex_model_example.py
index f999b00d96..d5c10c05bf 100644
--- a/.docs/Notebooks/mf6_complex_model_example.py
+++ b/.docs/Notebooks/mf6_complex_model_example.py
@@ -21,15 +21,18 @@
# ### Setup the Notebook Environment
import os
+import sys
# +
-import sys
+from pathlib import Path
from pprint import pformat
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import flopy
@@ -39,20 +42,63 @@
print(f"flopy version: {flopy.__version__}")
# -
+
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+sim_name = "test005_advgw_tidal"
+file_names = [
+ "AdvGW_tidal.dis",
+ "AdvGW_tidal.evt",
+ "AdvGW_tidal.ghb",
+ "AdvGW_tidal.ghb.obs",
+ "AdvGW_tidal.head.cont.opncls",
+ "AdvGW_tidal.ic",
+ "AdvGW_tidal.nam",
+ "AdvGW_tidal.npf",
+ "AdvGW_tidal.obs",
+ "AdvGW_tidal.oc",
+ "AdvGW_tidal.riv",
+ "AdvGW_tidal.riv.obs",
+ "AdvGW_tidal.riv.single.opncls",
+ "AdvGW_tidal.sto",
+ "AdvGW_tidal.wel",
+ "AdvGW_tidal_1.rch",
+ "AdvGW_tidal_2.rch",
+ "AdvGW_tidal_3.rch",
+ "advgw_tidal.dis.grb",
+ "mfsim.nam",
+ "model.ims",
+ "recharge_rates.ts",
+ "recharge_rates_1.ts",
+ "recharge_rates_2.ts",
+ "recharge_rates_3.ts",
+ "river_stages.ts",
+ "simulation.tdis",
+ "tides.ts",
+ "tides.txt",
+ "well_rates.ts",
+]
+for fname in file_names:
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mf6/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / "mf6" / sim_name,
+ known_hash=None,
+ )
+
# For this example, we will set up a temporary workspace.
# Model input files and output files will reside here.
temp_dir = TemporaryDirectory()
model_name = "advgw_tidal"
workspace = os.path.join(temp_dir.name, model_name)
-data_pth = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mf6",
- "test005_advgw_tidal",
-)
+data_pth = data_path / "mf6" / sim_name
assert os.path.isdir(data_pth)
# +
@@ -112,9 +158,7 @@
)
# initial conditions
-ic = flopy.mf6.ModflowGwfic(
- gwf, pname="ic", strt=50.0, filename=f"{model_name}.ic"
-)
+ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=50.0, filename=f"{model_name}.ic")
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
@@ -143,9 +187,7 @@
for layer in range(0, 3):
sy[layer]["data"] = 0.2
-ss = flopy.mf6.ModflowGwfsto.ss.empty(
- gwf, layered=True, default_value=0.000001
-)
+ss = flopy.mf6.ModflowGwfsto.ss.empty(gwf, layered=True, default_value=0.000001)
sto = flopy.mf6.ModflowGwfsto(
gwf,
@@ -162,30 +204,18 @@
# well package
# test empty with aux vars, bound names, and time series
period_two = flopy.mf6.ModflowGwfwel.stress_period_data.empty(
- gwf,
- maxbound=3,
- aux_vars=["var1", "var2", "var3"],
- boundnames=True,
- timeseries=True,
+ gwf, maxbound=3, aux_vars=["var1", "var2", "var3"], boundnames=True, timeseries=True
)
period_two[0][0] = ((0, 11, 2), -50.0, -1, -2, -3, None)
period_two[0][1] = ((2, 4, 7), "well_1_rate", 1, 2, 3, "well_1")
period_two[0][2] = ((2, 3, 2), "well_2_rate", 4, 5, 6, "well_2")
period_three = flopy.mf6.ModflowGwfwel.stress_period_data.empty(
- gwf,
- maxbound=2,
- aux_vars=["var1", "var2", "var3"],
- boundnames=True,
- timeseries=True,
+ gwf, maxbound=2, aux_vars=["var1", "var2", "var3"], boundnames=True, timeseries=True
)
period_three[0][0] = ((2, 3, 2), "well_2_rate", 1, 2, 3, "well_2")
period_three[0][1] = ((2, 4, 7), "well_1_rate", 4, 5, 6, "well_1")
period_four = flopy.mf6.ModflowGwfwel.stress_period_data.empty(
- gwf,
- maxbound=5,
- aux_vars=["var1", "var2", "var3"],
- boundnames=True,
- timeseries=True,
+ gwf, maxbound=5, aux_vars=["var1", "var2", "var3"], boundnames=True, timeseries=True
)
period_four[0][0] = ((2, 4, 7), "well_1_rate", 1, 2, 3, "well_1")
period_four[0][1] = ((2, 3, 2), "well_2_rate", 4, 5, 6, "well_2")
@@ -296,9 +326,7 @@
obs_recarray = {
"head_obs.csv": [("h1_13_8", "HEAD", (2, 12, 7))],
- "intercell_flow_obs1.csv": [
- ("ICF1_1.0", "FLOW-JA-FACE", (0, 4, 5), (0, 5, 5))
- ],
+ "intercell_flow_obs1.csv": [("ICF1_1.0", "FLOW-JA-FACE", (0, 4, 5), (0, 5, 5))],
"head-hydrographs.csv": [
("h3-13-9", "HEAD", (2, 12, 8)),
("h3-12-8", "HEAD", (2, 11, 7)),
@@ -379,15 +407,7 @@
("rv2-upper", "RIV", "riv2_upper"),
("rv-2-7-4", "RIV", (0, 6, 3)),
("rv2-8-5", "RIV", (0, 6, 4)),
- (
- "rv-2-9-6",
- "RIV",
- (
- 0,
- 5,
- 5,
- ),
- ),
+ ("rv-2-9-6", "RIV", (0, 5, 5)),
],
"riv_flowsA.csv": [
("riv1-3-1", "RIV", (0, 2, 0)),
diff --git a/.docs/Notebooks/mf6_data_tutorial01.py b/.docs/Notebooks/mf6_data_tutorial01.py
index 54e3d58a96..989b3f56d9 100644
--- a/.docs/Notebooks/mf6_data_tutorial01.py
+++ b/.docs/Notebooks/mf6_data_tutorial01.py
@@ -51,18 +51,14 @@
# set up simulation and basic packages
sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=workspace)
-flopy.mf6.ModflowTdis(
- sim, nper=10, perioddata=[[365.0, 1, 1.0] for _ in range(10)]
-)
+flopy.mf6.ModflowTdis(sim, nper=10, perioddata=[[365.0, 1, 1.0] for _ in range(10)])
flopy.mf6.ModflowIms(sim)
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
botm = [30.0, 20.0, 10.0]
flopy.mf6.ModflowGwfdis(gwf, nlay=3, nrow=4, ncol=5, top=50.0, botm=botm)
flopy.mf6.ModflowGwfic(gwf)
flopy.mf6.ModflowGwfnpf(gwf, save_specific_discharge=True)
-flopy.mf6.ModflowGwfchd(
- gwf, stress_period_data=[[(0, 0, 0), 1.0], [(2, 3, 4), 0.0]]
-)
+flopy.mf6.ModflowGwfchd(gwf, stress_period_data=[[(0, 0, 0), 1.0], [(2, 3, 4), 0.0]])
budget_file = f"{name}.bud"
head_file = f"{name}.hds"
flopy.mf6.ModflowGwfoc(
diff --git a/.docs/Notebooks/mf6_data_tutorial03.py b/.docs/Notebooks/mf6_data_tutorial03.py
index 359696489d..a4ec2ebef3 100644
--- a/.docs/Notebooks/mf6_data_tutorial03.py
+++ b/.docs/Notebooks/mf6_data_tutorial03.py
@@ -240,20 +240,17 @@
print(
"{} is using {} interpolation".format(
- ghb.ts[0].filename,
- ghb.ts[0].interpolation_methodrecord.get_data()[0][0],
+ ghb.ts[0].filename, ghb.ts[0].interpolation_methodrecord.get_data()[0][0]
)
)
print(
"{} is using {} interpolation".format(
- ghb.ts[1].filename,
- ghb.ts[1].interpolation_methodrecord.get_data()[0][0],
+ ghb.ts[1].filename, ghb.ts[1].interpolation_methodrecord.get_data()[0][0]
)
)
print(
"{} is using {} interpolation".format(
- ghb.ts[2].filename,
- ghb.ts[2].interpolation_methodrecord.get_data()[0][0],
+ ghb.ts[2].filename, ghb.ts[2].interpolation_methodrecord.get_data()[0][0]
)
)
diff --git a/.docs/Notebooks/mf6_data_tutorial08.py b/.docs/Notebooks/mf6_data_tutorial08.py
index 678347c56e..6d36db95ea 100644
--- a/.docs/Notebooks/mf6_data_tutorial08.py
+++ b/.docs/Notebooks/mf6_data_tutorial08.py
@@ -243,10 +243,7 @@
# These options can also be turned off when loading an existing simulation
# or creating a new simulation by setting lazy_io to True.
-sim2 = flopy.mf6.MFSimulation.load(
- sim_ws=workspace,
- lazy_io=True,
-)
+sim2 = flopy.mf6.MFSimulation.load(sim_ws=workspace, lazy_io=True)
sim3 = flopy.mf6.MFSimulation(lazy_io=True)
diff --git a/.docs/Notebooks/mf6_data_tutorial09.py b/.docs/Notebooks/mf6_data_tutorial09.py
index e2e3466232..30dbea39f1 100644
--- a/.docs/Notebooks/mf6_data_tutorial09.py
+++ b/.docs/Notebooks/mf6_data_tutorial09.py
@@ -55,9 +55,7 @@
# set up first groundwater flow model
name_1 = "ex_1_mod_1"
model_nam_file = f"{name_1}.nam"
-gwf = flopy.mf6.ModflowGwf(
- sim, modelname=name_1, model_nam_file=model_nam_file
-)
+gwf = flopy.mf6.ModflowGwf(sim, modelname=name_1, model_nam_file=model_nam_file)
# create the discretization package
bot = [-10.0, -50.0, -200.0]
delrow = delcol = 4.0
@@ -116,9 +114,7 @@
# set up second groundwater flow model with a finer grid
name_1 = "ex_1_mod_2"
model_nam_file = f"{name_1}.nam"
-gwf_2 = flopy.mf6.ModflowGwf(
- sim, modelname=name_1, model_nam_file=model_nam_file
-)
+gwf_2 = flopy.mf6.ModflowGwf(sim, modelname=name_1, model_nam_file=model_nam_file)
# create the flopy iterative model solver (ims) package object
# by default flopy will register both models with the ims package.
ims = flopy.mf6.modflow.mfims.ModflowIms(
diff --git a/.docs/Notebooks/mf6_data_tutorial10.py b/.docs/Notebooks/mf6_data_tutorial10.py
index f1e50dfe5f..4a74df6760 100644
--- a/.docs/Notebooks/mf6_data_tutorial10.py
+++ b/.docs/Notebooks/mf6_data_tutorial10.py
@@ -567,20 +567,17 @@
# retreive information from each time series
print(
"{} is using {} interpolation".format(
- ghb.ts[0].filename,
- ghb.ts[0].interpolation_methodrecord.get_data()[0][0],
+ ghb.ts[0].filename, ghb.ts[0].interpolation_methodrecord.get_data()[0][0]
)
)
print(
"{} is using {} interpolation".format(
- ghb.ts[1].filename,
- ghb.ts[1].interpolation_methodrecord.get_data()[0][0],
+ ghb.ts[1].filename, ghb.ts[1].interpolation_methodrecord.get_data()[0][0]
)
)
print(
"{} is using {} interpolation".format(
- ghb.ts[2].filename,
- ghb.ts[2].interpolation_methodrecord.get_data()[0][0],
+ ghb.ts[2].filename, ghb.ts[2].interpolation_methodrecord.get_data()[0][0]
)
)
diff --git a/.docs/Notebooks/mf6_lgr_tutorial01.py b/.docs/Notebooks/mf6_lgr_tutorial01.py
index 172756ccfe..b30608c788 100644
--- a/.docs/Notebooks/mf6_lgr_tutorial01.py
+++ b/.docs/Notebooks/mf6_lgr_tutorial01.py
@@ -551,10 +551,7 @@
# +
# load and store the head arrays from the parent and child models
head = [gwfp.output.head().get_data(), gwfc.output.head().get_data()]
-conc = [
- gwtp.output.concentration().get_data(),
- gwtc.output.concentration().get_data(),
-]
+conc = [gwtp.output.concentration().get_data(), gwtc.output.concentration().get_data()]
# load and store the specific discharge results for the parent and child models
bud = gwfp.output.budget()
@@ -577,12 +574,8 @@
# pmvc.plot_array(head[1], vmin=0., vmax=1.)
# contour head
-cs = pmvp.contour_array(
- head[0], levels=np.linspace(0, 1), masked_values=[1.0e30]
-)
-cs = pmvc.contour_array(
- head[1], levels=np.linspace(0, 1), masked_values=[1.0e30]
-)
+cs = pmvp.contour_array(head[0], levels=np.linspace(0, 1), masked_values=[1.0e30])
+cs = pmvc.contour_array(head[1], levels=np.linspace(0, 1), masked_values=[1.0e30])
# color flood concentrations
a1 = conc[0]
diff --git a/.docs/Notebooks/mf6_mnw2_tutorial01.py b/.docs/Notebooks/mf6_mnw2_tutorial01.py
index 6f09a523a6..f81d6ddbc8 100644
--- a/.docs/Notebooks/mf6_mnw2_tutorial01.py
+++ b/.docs/Notebooks/mf6_mnw2_tutorial01.py
@@ -17,13 +17,16 @@
# # Working with the Multi-node Well (MNW2) Package
import os
+import sys
# +
-import sys
+from pathlib import Path
from tempfile import TemporaryDirectory
+import git
import numpy as np
import pandas as pd
+import pooch
import flopy
@@ -33,6 +36,15 @@
print(f"flopy version: {flopy.__version__}")
# -
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
# ### Make an MNW2 package from scratch
# +
@@ -41,9 +53,7 @@
model_ws = temp_dir.name
m = flopy.modflow.Modflow("mnw2example", model_ws=model_ws)
-dis = flopy.modflow.ModflowDis(
- nrow=5, ncol=5, nlay=3, nper=3, top=10, botm=0, model=m
-)
+dis = flopy.modflow.ModflowDis(nrow=5, ncol=5, nlay=3, nper=3, top=10, botm=0, model=m)
# -
# ### MNW2 information by node
@@ -178,13 +188,35 @@
mnw2.write_file(os.path.join(model_ws, "test.mnw2"))
junk = [
- print(l.strip("\n"))
- for l in open(os.path.join(model_ws, "test.mnw2")).readlines()
+ print(l.strip("\n")) for l in open(os.path.join(model_ws, "test.mnw2")).readlines()
]
# ### Load some example MNW2 packages
-path = os.path.join("..", "..", "examples", "data", "mnw2_examples")
+folder_name = "mnw2_examples"
+
+file_names = {
+ "BadRiver_cal.mnw2": None,
+ "MNW2-Fig28.bas": None,
+ "MNW2-Fig28.dis": None,
+ "MNW2-Fig28.lpf": None,
+ "MNW2-Fig28.mnw2": None,
+ "MNW2-Fig28.mnwi": None,
+ "MNW2-Fig28.nam": None,
+ "MNW2-Fig28.oc": None,
+ "MNW2-Fig28.pcg": None,
+ "MNW2-Fig28.rch": None,
+ "MNW2-Fig28.wel": None,
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/{fname}",
+ fname=fname,
+ path=data_path / folder_name,
+ known_hash=fhash,
+ )
+
+path = data_path / folder_name
m = flopy.modflow.Modflow("MNW2-Fig28", model_ws=model_ws)
dis = flopy.modflow.ModflowDis.load(os.path.join(path, "MNW2-Fig28.dis"), m)
@@ -201,11 +233,9 @@
pd.DataFrame(mnw2.mnw["well-a"].stress_period_data)
-path = os.path.join("..", "..", "examples", "data", "mnw2_examples")
+path = data_path / "mnw2_examples"
m = flopy.modflow.Modflow("br", model_ws=model_ws)
-mnw2 = flopy.modflow.ModflowMnw2.load(
- os.path.join(path, "BadRiver_cal.mnw2"), m
-)
+mnw2 = flopy.modflow.ModflowMnw2.load(os.path.join(path, "BadRiver_cal.mnw2"), m)
df = pd.DataFrame(mnw2.node_data)
df.loc[:, df.sum(axis=0) != 0]
diff --git a/.docs/Notebooks/mf6_output_tutorial01.py b/.docs/Notebooks/mf6_output_tutorial01.py
index 96bc0d1f8a..368cddba31 100644
--- a/.docs/Notebooks/mf6_output_tutorial01.py
+++ b/.docs/Notebooks/mf6_output_tutorial01.py
@@ -20,33 +20,68 @@
# by using the built in `.output` attribute on any MODFLOW 6 model or
# package object
-import os
from pathlib import Path
+from shutil import copytree
from tempfile import TemporaryDirectory
+import git
import numpy as np
+import pooch
-# ## Package import
import flopy
-# ## Load a simple demonstration model
+# ## Loading a model
+
+# Start by creating a temporary workspace and defining some names.
exe_name = "mf6"
-project_root_path = Path.cwd().parent.parent
-ws = os.path.abspath(os.path.dirname(""))
-sim_ws = str(
- project_root_path / "examples" / "data" / "mf6" / "test001e_UZF_3lay"
-)
+sim_name = "test001e_UZF_3lay"
+temp_dir = TemporaryDirectory()
+sim_ws = Path(temp_dir.name)
+
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+# Download files if needed.
+
+files = {
+ "chd_spd.txt": "4d87f60022832372981caa2bd162681d5c4b8b3fcf8bc7f5de533c96ad1ed03c",
+ "mfsim.nam": "2f7889dedb9e7befb45251f08f015bd5531a4952f4141295ebad9e550be365fd",
+ "simulation.tdis": "d466787698c88b7f229cf244f2c9f226a87628c0a5748819e4e34fd4edc48d4c",
+ "test001e_UZF_3lay.chd": "96a00121e7004b152a03d0759bf4abfd70f8a1ea21cbce6b9441f18ce4d89b45",
+ "test001e_UZF_3lay.dis": "d2f879dcba84ec4be8883d6e29ea9197dd0e67c4058fdde7b9e1de737d1e0639",
+ "test001e_UZF_3lay.ic": "6e434a9d42ffe1b126b26890476f6893e9ab526f3a4ee96e63d443fd9008e1df",
+ "test001e_UZF_3lay.ims": "c4ef9ebe359def38f0e9ed810b61af0aae9a437c57d54b1db00b8dda20e5b67d",
+ "test001e_UZF_3lay.nam": "078ea7b0a774546a93c2cedfb98dc686395332ece7df6493653b072d43b4b834",
+ "test001e_UZF_3lay.npf": "89181af1fd91fe59ea931aae02fe64f855f27c705ee9200c8a9c23831aa7bace",
+ "test001e_UZF_3lay.obs": "b9857f604c0594a466255f040bd5a47a1687a69ae3be749488bd8736ead7d106",
+ "test001e_UZF_3lay.oc": "5eb327ead17588a1faa8b5c7dd37844f7a63d98351ef8bb41df1162c87a94d02",
+ "test001e_UZF_3lay.sto": "8d808d0c2ae4edc114455db3f1766446f9f9d6d3775c46a70369a57509bff811",
+ "test001e_UZF_3lay.uzf": "97624f1102abef4985bb40f432523df76bd94e069ac8a4aa17455d0d5b8f146e",
+ "test001e_UZF_3lay_obs.hed": "78c67035fc6f0c5c1d6090c1ce1e50dcab75b361e4ed44dc951f11fd3915a388",
+}
+
+for fname, fhash in files.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mf6/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / "mf6" / sim_name,
+ known_hash=fhash,
+ )
# load the model
sim = flopy.mf6.MFSimulation.load(
- sim_ws=sim_ws,
+ sim_ws=data_path / "mf6" / sim_name,
exe_name=exe_name,
verbosity_level=0,
)
# change the simulation path, rewrite the files, and run the model
-temp_dir = TemporaryDirectory()
-sim_ws = temp_dir.name
sim.set_sim_path(sim_ws)
sim.write_simulation(silent=True)
sim.run_simulation(silent=True)
diff --git a/.docs/Notebooks/mf6_parallel_model_splitting_example.py b/.docs/Notebooks/mf6_parallel_model_splitting_example.py
index a753f1fcb5..4c97d4a49d 100644
--- a/.docs/Notebooks/mf6_parallel_model_splitting_example.py
+++ b/.docs/Notebooks/mf6_parallel_model_splitting_example.py
@@ -22,10 +22,13 @@
import sys
from pathlib import Path
+from shutil import copy, copytree
from tempfile import TemporaryDirectory
+import git
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import yaml
import flopy
@@ -33,12 +36,9 @@
from flopy.plot import styles
from flopy.utils.geometry import LineString, Polygon
-geometries = yaml.safe_load(
- open(Path("../../examples/data/groundwater2023/geometries.yml"))
-)
+# Define a few utility functions.
-# define a few utility functions
def string2geom(geostring, conversion=None):
if conversion is None:
multiplier = 1.0
@@ -56,24 +56,72 @@ def string2geom(geostring, conversion=None):
return res
-# ## Example 1: splitting a simple structured grid model
-#
-# This example shows the basics of using the `Mf6Splitter()` class and applies the method to the Freyberg (1988) model.
-
-simulation_ws = Path("../../examples/data/mf6-freyberg")
-sim = flopy.mf6.MFSimulation.load(sim_ws=simulation_ws)
-
-# Create a temporary directory for this example and run the Freyberg (1988) model.
+# Create a temporary directory for this example.
temp_dir = TemporaryDirectory()
workspace = Path(temp_dir.name)
-#
+# Check if we are in the repository and define the data path.
-sim.set_sim_path(workspace)
-sim.write_simulation()
-success, buff = sim.run_simulation(silent=True)
-assert success
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+# Download and load geometries.
+
+geometries_fname = "geometries.yml"
+geometries_fpath = pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/groundwater2023/{geometries_fname}",
+ fname=geometries_fname,
+ path=workspace,
+ known_hash="4fb491f9dbd09ef04d6d067458e9866ac79d96448f70910e78c552131a12b6be",
+)
+geometries = yaml.safe_load(open(geometries_fpath))
+
+# Download the Freyberg 1988 model.
+
+sim_name = "mf6-freyberg"
+file_names = {
+ "bot.asc": "3107f907cb027460fd40ffc16cb797a78babb31988c7da326c9f500fba855b62",
+ "description.txt": "94093335eec6a24711f86d4d217ccd5a7716dd9e01cb6b732bc7757d41675c09",
+ "freyberg.cbc": "c8ad843b1da753eb58cf6c462ac782faf0ca433d6dcb067742d8bd698db271e3",
+ "freyberg.chd": "d8b8ada8d3978daea1758b315be983b5ca892efc7d69bf6b367ceec31e0dd156",
+ "freyberg.dis": "cac230a207cc8483693f7ba8ae29ce40c049036262eac4cebe17a4e2347a8b30",
+ "freyberg.dis.grb": "c8c26fb1fa4b210208134b286d895397cf4b3131f66e1d9dda76338502c7e96a",
+ "freyberg.hds": "926a06411ca658a89db6b5686f51ddeaf5b74ced81239cab1d43710411ba5f5b",
+ "freyberg.ic": "6efb56ee9cdd704b9a76fb9efd6dae750facc5426b828713f2d2cf8d35194120",
+ "freyberg.ims": "6dddae087d85417e3cdaa13e7b24165afb7f9575ab68586f3adb6c1b2d023781",
+ "freyberg.nam": "cee9b7b000fe35d2df26e878d09d465250a39504f87516c897e3fa14dcda081e",
+ "freyberg.npf": "81104d3546045fff0eddf5059465e560b83b492fa5a5acad1907ce18c2b9c15f",
+ "freyberg.oc": "c0715acd75eabcc42c8c47260a6c1abd6c784350983f7e2e6009ddde518b80b8",
+ "freyberg.rch": "a6ec1e0eda14fd2cdf618a5c0243a9caf82686c69242b783410d5abbcf971954",
+ "freyberg.riv": "a8cafc8c317cbe2acbb43e2f0cfe1188cb2277a7a174aeb6f3e6438013de8088",
+ "freyberg.sto": "74d748c2f0adfa0a32ee3f2912115c8f35b91011995b70c1ec6ae1c627242c41",
+ "freyberg.tdis": "9965cbb17caf5b865ea41a4ec04bcb695fe15a38cb539425fdc00abbae385cbe",
+ "freyberg.wel": "f19847de455598de52c05a4be745698c8cb589e5acfb0db6ab1f06ded5ff9310",
+ "k11.asc": "b6a8aa46ef17f7f096d338758ef46e32495eb9895b25d687540d676744f02af5",
+ "mfsim.nam": "6b8d6d7a56c52fb2bff884b3979e3d2201c8348b4bbfd2b6b9752863cbc9975e",
+ "top.asc": "3ad2b131671b9faca7f74c1dd2b2f41875ab0c15027764021a89f9c95dccaa6a",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=fhash,
+ )
+
+copytree(data_path / sim_name, workspace / sim_name)
+
+# Load the simulation, switch the workspace, and run the simulation.
+
+sim = flopy.mf6.MFSimulation.load(sim_ws=data_path / sim_name)
+sim.set_sim_path(workspace / sim_name)
+success, buff = sim.run_simulation(silent=True, report=True)
+assert success, buff
# Visualize the head results and boundary conditions from this model.
@@ -234,7 +282,15 @@ def string2geom(geostring, conversion=None):
#
# Load an ASCII raster file
-ascii_file = Path("../../examples/data/geospatial/fine_topo.asc")
+ascii_file_name = "fine_topo.asc"
+ascii_file = pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/geospatial/{ascii_file_name}",
+ fname=ascii_file_name,
+ path=data_path / "geospatial",
+ known_hash=None,
+)
+
+copy(data_path / "geospatial" / ascii_file_name, workspace / ascii_file_name)
fine_topo = flopy.utils.Raster.load(ascii_file)
fine_topo.plot()
@@ -328,14 +384,7 @@ def string2geom(geostring, conversion=None):
pmv = flopy.plot.PlotMapView(modelgrid=modelgrid)
ax.set_aspect("equal")
pmv.plot_array(modelgrid.top)
- pmv.plot_array(
- intersection_rg,
- masked_values=[
- 0,
- ],
- alpha=0.2,
- cmap="Reds_r",
- )
+ pmv.plot_array(intersection_rg, masked_values=[0], alpha=0.2, cmap="Reds_r")
pmv.plot_inactive()
ax.plot(bp[:, 0], bp[:, 1], "r-")
for seg in segs:
@@ -384,9 +433,7 @@ def string2geom(geostring, conversion=None):
if idomain[r, c] < 1:
continue
conductance = leakance * dx * dy
- gw_discharge_data.append(
- (0, r, c, modelgrid.top[r, c] - 0.5, conductance, 1.0)
- )
+ gw_discharge_data.append((0, r, c, modelgrid.top[r, c] - 0.5, conductance, 1.0))
gw_discharge_data[:10]
# -
@@ -498,9 +545,7 @@ def string2geom(geostring, conversion=None):
# Plot the model results
# +
-water_table = flopy.utils.postprocessing.get_water_table(
- gwf.output.head().get_data()
-)
+water_table = flopy.utils.postprocessing.get_water_table(gwf.output.head().get_data())
heads = gwf.output.head().get_data()
hmin, hmax = water_table.min(), water_table.max()
contours = np.arange(0, 100, 10)
@@ -517,11 +562,7 @@ def string2geom(geostring, conversion=None):
pmv = flopy.plot.PlotMapView(modelgrid=gwf.modelgrid, ax=ax)
h = pmv.plot_array(heads, vmin=hmin, vmax=hmax)
c = pmv.contour_array(
- water_table,
- levels=contours,
- colors="white",
- linewidths=0.75,
- linestyles=":",
+ water_table, levels=contours, colors="white", linewidths=0.75, linestyles=":"
)
plt.clabel(c, fontsize=8)
pmv.plot_inactive()
@@ -637,11 +678,7 @@ def string2geom(geostring, conversion=None):
h = pmv.plot_array(hv[idx], vmin=vmin, vmax=vmax)
if levels is not None:
c = pmv.contour_array(
- hv[idx],
- levels=levels,
- colors="white",
- linewidths=0.75,
- linestyles=":",
+ hv[idx], levels=levels, colors="white", linewidths=0.75, linestyles=":"
)
plt.clabel(c, fontsize=8)
pmv.plot_inactive()
@@ -676,7 +713,7 @@ def string2geom(geostring, conversion=None):
new_sim = mfsplit.split_model(split_array)
temp_dir = TemporaryDirectory()
-workspace = Path("temp")
+workspace = Path(temp_dir.name)
new_ws = workspace / "opt_split_models"
new_sim.set_sim_path(new_ws)
@@ -720,11 +757,7 @@ def string2geom(geostring, conversion=None):
h = pmv.plot_array(hv[idx], vmin=vmin, vmax=vmax)
if levels is not None:
c = pmv.contour_array(
- hv[idx],
- levels=levels,
- colors="white",
- linewidths=0.75,
- linestyles=":",
+ hv[idx], levels=levels, colors="white", linewidths=0.75, linestyles=":"
)
plt.clabel(c, fontsize=8)
pmv.plot_inactive()
diff --git a/.docs/Notebooks/mf6_sfr_tutorial01.py b/.docs/Notebooks/mf6_sfr_tutorial01.py
index 69d65bc400..774b172187 100644
--- a/.docs/Notebooks/mf6_sfr_tutorial01.py
+++ b/.docs/Notebooks/mf6_sfr_tutorial01.py
@@ -17,9 +17,13 @@
# # SFR2 package loading and querying
import os
+import sys
# +
-import sys
+from pathlib import Path
+
+import git
+import pooch
import flopy
@@ -31,17 +35,34 @@
m = flopy.modflow.Modflow()
-# Read the SFR2 file
-f = os.path.join(
- "..", "..", "examples", "data", "mf2005_test", "testsfr2_tab_ICALC2.sfr"
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+# Retrieve the SFR2 file
+sim_name = "mf2005_test"
+fname = "testsfr2_tab_ICALC2.sfr"
+fpath = pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=None,
)
-stuff = open(f).readlines()
+
+# Read the SFR2 file
+
+stuff = open(fpath).readlines()
stuff
# Load the SFR2 file
-sfr = flopy.modflow.ModflowSfr2.load(f, m, nper=50)
+sfr = flopy.modflow.ModflowSfr2.load(fpath, m, nper=50)
sfr.segment_data.keys()
diff --git a/.docs/Notebooks/mf6_simple_model_example.py b/.docs/Notebooks/mf6_simple_model_example.py
index aecc57ed3e..6e941a9b27 100644
--- a/.docs/Notebooks/mf6_simple_model_example.py
+++ b/.docs/Notebooks/mf6_simple_model_example.py
@@ -198,8 +198,6 @@
# ### Post-Process Head Results
#
-# Post-processing MODFLOW 6 results is still a work in progress. There aren't any Flopy plotting functions built in yet, like they are for other MODFLOW versions. So we need to plot the results using general Flopy capabilities. We can also use some of the Flopy ModelMap capabilities for MODFLOW 6, but in order to do so, we need to manually create a SpatialReference object, that is needed for the plotting. Examples of both approaches are shown below.
-#
# First, a link to the heads file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by specifying, in this case, the step number and period number for which we want to retrieve data. A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions are used to make contours of the layers or a cross-section.
# Read the binary head file and plot the results
diff --git a/.docs/Notebooks/mf6_support_example.py b/.docs/Notebooks/mf6_support_example.py
index 9a23b2b344..52f5ca746b 100644
--- a/.docs/Notebooks/mf6_support_example.py
+++ b/.docs/Notebooks/mf6_support_example.py
@@ -54,8 +54,20 @@
from shutil import copyfile
from tempfile import TemporaryDirectory
+import git
+import pooch
+
proj_root = Path.cwd().parent.parent
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
import flopy
# temporary directory
@@ -172,22 +184,9 @@
flopy.mf6.data.mfdatastorage.DataStorageType.internal_array,
flopy.mf6.data.mfdatastorage.DataStorageType.internal_constant,
]
-k_template = flopy.mf6.ModflowGwfnpf.k.empty(
- model, True, layer_storage_types, 100.0
-)
+k_template = flopy.mf6.ModflowGwfnpf.k.empty(model, True, layer_storage_types, 100.0)
# change the value of the second layer to 50.0
-k_template[0]["data"] = [
- 65.0,
- 60.0,
- 55.0,
- 50.0,
- 45.0,
- 40.0,
- 35.0,
- 30.0,
- 25.0,
- 20.0,
-]
+k_template[0]["data"] = [65.0, 60.0, 55.0, 50.0, 45.0, 40.0, 35.0, 30.0, 25.0, 20.0]
k_template[0]["factor"] = 1.5
print(k_template)
# create npf package using the k template to define k
@@ -257,9 +256,14 @@
model, pname="ic", strt=strt, filename=f"{model_name}.ic"
)
# move external file data into model folder
-icv_data_path = os.path.join(
- "..", "..", "examples", "data", "mf6", "notebooks", "iconvert.txt"
+fname = "iconvert.txt"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mf6/notebooks/{fname}",
+ fname=fname,
+ path=data_path / "mf6" / "notebooks",
+ known_hash=None,
)
+icv_data_path = data_path / "mf6" / "notebooks" / fname
copyfile(icv_data_path, os.path.join(sim_path, "iconvert.txt"))
# create storage package
sto_package = flopy.mf6.ModflowGwfsto(
@@ -392,9 +396,7 @@
0: {"filename": "chd_sp1.dat", "data": [[(0, 0, 0), 70.0]]},
1: [[(0, 0, 0), 60.0]],
}
-chd = flopy.mf6.ModflowGwfchd(
- model, maxbound=1, stress_period_data=stress_period_data
-)
+chd = flopy.mf6.ModflowGwfchd(model, maxbound=1, stress_period_data=stress_period_data)
# ## Packages that Support both List-based and Array-based Data
#
@@ -568,9 +570,7 @@
# Data can be modified in several ways. One way is to set data for a given layer within a LayerStorage object, like the one accessed in the code above. Another way is to set the data attribute to the new data. Yet another way is to call the data object's set_data method.
# set data within a LayerStorage object
-hk_layer_one.set_data(
- [120.0, 100.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 25.0, 20.0]
-)
+hk_layer_one.set_data([120.0, 100.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 25.0, 20.0])
print(f"New HK data no factor:\n{hk.get_data()}\n")
# set data attribute to new data
ic_package.strt = 150.0
diff --git a/.docs/Notebooks/mf6_tutorial01.py b/.docs/Notebooks/mf6_tutorial01.py
index b765d7f963..718ed3bdd6 100644
--- a/.docs/Notebooks/mf6_tutorial01.py
+++ b/.docs/Notebooks/mf6_tutorial01.py
@@ -126,11 +126,7 @@
# ### Create the node property flow (`NPF`) Package
-npf = flopy.mf6.ModflowGwfnpf(
- gwf,
- icelltype=1,
- k=k,
-)
+npf = flopy.mf6.ModflowGwfnpf(gwf, icelltype=1, k=k)
# ### Create the constant head (`CHD`) Package
#
@@ -147,10 +143,7 @@
if row_col != 0 and row_col != N - 1:
chd_rec.append(((layer, 0, row_col), h1))
chd_rec.append(((layer, N - 1, row_col), h1))
-chd = flopy.mf6.ModflowGwfchd(
- gwf,
- stress_period_data=chd_rec,
-)
+chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chd_rec)
# The `CHD` Package stored the constant heads in a structured array,
# also called a `numpy.recarray`. We can get a pointer to the recarray
@@ -165,10 +158,7 @@
# Add a well in model layer 10.
wel_rec = [(Nlay - 1, int(N / 4), int(N / 4), q)]
-wel = flopy.mf6.ModflowGwfwel(
- gwf,
- stress_period_data=wel_rec,
-)
+wel = flopy.mf6.ModflowGwfwel(gwf, stress_period_data=wel_rec)
# ### Create the output control (`OC`) Package
#
@@ -263,11 +253,7 @@
pa = modelmap.plot_array(h, vmin=vmin, vmax=vmax)
quadmesh = modelmap.plot_bc("CHD")
linecollection = modelmap.plot_grid(lw=0.5, color="0.5")
-contours = modelmap.contour_array(
- h,
- levels=contour_intervals,
- colors="black",
-)
+contours = modelmap.contour_array(h, levels=contour_intervals, colors="black")
ax.clabel(contours, fmt="%2.1f")
cb = plt.colorbar(pa, shrink=0.5, ax=ax)
# second subplot
@@ -277,11 +263,7 @@
linecollection = modelmap.plot_grid(lw=0.5, color="0.5")
pa = modelmap.plot_array(h, vmin=vmin, vmax=vmax)
quadmesh = modelmap.plot_bc("CHD")
-contours = modelmap.contour_array(
- h,
- levels=contour_intervals,
- colors="black",
-)
+contours = modelmap.contour_array(h, levels=contour_intervals, colors="black")
ax.clabel(contours, fmt="%2.1f")
cb = plt.colorbar(pa, shrink=0.5, ax=ax)
@@ -292,19 +274,11 @@
fig, ax = plt.subplots(1, 1, figsize=(9, 3), constrained_layout=True)
# first subplot
ax.set_title("Row 25")
-modelmap = flopy.plot.PlotCrossSection(
- model=gwf,
- ax=ax,
- line={"row": int(N / 4)},
-)
+modelmap = flopy.plot.PlotCrossSection(model=gwf, ax=ax, line={"row": int(N / 4)})
pa = modelmap.plot_array(h, vmin=vmin, vmax=vmax)
quadmesh = modelmap.plot_bc("CHD")
linecollection = modelmap.plot_grid(lw=0.5, color="0.5")
-contours = modelmap.contour_array(
- h,
- levels=contour_intervals,
- colors="black",
-)
+contours = modelmap.contour_array(h, levels=contour_intervals, colors="black")
ax.clabel(contours, fmt="%2.1f")
cb = plt.colorbar(pa, shrink=0.5, ax=ax)
@@ -318,9 +292,7 @@
#
# First extract the `FLOW-JA-FACE` array from the cell-by-cell budget file
-flowja = gwf.oc.output.budget().get_data(text="FLOW-JA-FACE", kstpkper=(0, 0))[
- 0
-]
+flowja = gwf.oc.output.budget().get_data(text="FLOW-JA-FACE", kstpkper=(0, 0))[0]
# Next extract the flow residual. The MODFLOW 6 binary grid file is passed
# into the function because it contains the ia array that defines
@@ -337,11 +309,7 @@
pa = modelmap.plot_array(residual)
quadmesh = modelmap.plot_bc("CHD")
linecollection = modelmap.plot_grid(lw=0.5, color="0.5")
-contours = modelmap.contour_array(
- h,
- levels=contour_intervals,
- colors="black",
-)
+contours = modelmap.contour_array(h, levels=contour_intervals, colors="black")
ax.clabel(contours, fmt="%2.1f")
plt.colorbar(pa, shrink=0.5)
diff --git a/.docs/Notebooks/mf_error_tutorial01.py b/.docs/Notebooks/mf_error_tutorial01.py
index 367b5775fc..57a28922af 100644
--- a/.docs/Notebooks/mf_error_tutorial01.py
+++ b/.docs/Notebooks/mf_error_tutorial01.py
@@ -19,17 +19,47 @@
# +
import os
import sys
+from pathlib import Path
from tempfile import TemporaryDirectory
+import git
+import pooch
+
import flopy
print(sys.version)
print(f"flopy version: {flopy.__version__}")
# -
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+file_names = [
+ "bcf2ss.ba6",
+ "bcf2ss.bc6",
+ "bcf2ss.dis",
+ "bcf2ss.nam",
+ "bcf2ss.oc",
+ "bcf2ss.pcg",
+ "bcf2ss.rch",
+ "bcf2ss.riv",
+ "bcf2ss.wel",
+]
+for fname in file_names:
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mf2005_test/{fname}",
+ fname=fname,
+ path=data_path / "mf2005_test",
+ known_hash=None,
+ )
+
# #### Set the working directory
-path = os.path.join("..", "..", "examples", "data", "mf2005_test")
+path = data_path / "mf2005_test"
# #### Load example dataset and change the model work space
diff --git a/.docs/Notebooks/mf_load_tutorial.py b/.docs/Notebooks/mf_load_tutorial.py
index 91db0b0db0..4ea9077c28 100644
--- a/.docs/Notebooks/mf_load_tutorial.py
+++ b/.docs/Notebooks/mf_load_tutorial.py
@@ -23,6 +23,10 @@
# +
import os
import sys
+from pathlib import Path
+
+import git
+import pooch
import flopy
@@ -30,6 +34,14 @@
print(f"flopy version: {flopy.__version__}")
# -
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+
# ## The `load()` method
#
# To load a MODFLOW 2005 model, use the `Modflow.load()` method. The method's first argument is the path or name of the model namefile. Other parameters include:
@@ -38,7 +50,25 @@
# - `verbose`: whether to write diagnostic information useful for troubleshooting
# - `check`: whether to check for model configuration errors
-model_ws = os.path.join("..", "..", "examples", "data", "mf2005_test")
+file_names = [
+ "bcf2ss.ba6",
+ "bcf2ss.bc6",
+ "bcf2ss.dis",
+ "bcf2ss.nam",
+ "bcf2ss.oc",
+ "bcf2ss.pcg",
+ "bcf2ss.rch",
+ "bcf2ss.riv",
+ "bcf2ss.wel",
+]
+for fname in file_names:
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mf2005_test/{fname}",
+ fname=fname,
+ path=data_path / "mf2005_test",
+ known_hash=None,
+ )
+model_ws = data_path / "mf2005_test"
ml = flopy.modflow.Modflow.load(
"bcf2ss.nam",
model_ws=model_ws,
@@ -51,7 +81,31 @@
#
# Below we load a model containig auxiliary variables, then access them.
-model_ws = os.path.join("..", "..", "examples", "data", "mp6")
+file_names = [
+ "EXAMPLE.BA6",
+ "EXAMPLE.BUD",
+ "EXAMPLE.DIS",
+ "EXAMPLE.DIS.metadata",
+ "EXAMPLE.HED",
+ "EXAMPLE.LPF",
+ "EXAMPLE.LST",
+ "EXAMPLE.MPBAS",
+ "EXAMPLE.OC",
+ "EXAMPLE.PCG",
+ "EXAMPLE.RCH",
+ "EXAMPLE.RIV",
+ "EXAMPLE.WEL",
+ "EXAMPLE.mpnam",
+ "EXAMPLE.nam",
+]
+for fname in file_names:
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mp6/{fname}",
+ fname=fname,
+ path=data_path / "mp6",
+ known_hash=None,
+ )
+model_ws = data_path / "mp6"
ml = flopy.modflow.Modflow.load(
"EXAMPLE.nam",
model_ws=model_ws,
diff --git a/.docs/Notebooks/mf_tutorial02.py b/.docs/Notebooks/mf_tutorial02.py
index 96a3fcd589..345d329aab 100644
--- a/.docs/Notebooks/mf_tutorial02.py
+++ b/.docs/Notebooks/mf_tutorial02.py
@@ -183,9 +183,7 @@
"print head",
"print budget",
]
-oc = flopy.modflow.ModflowOc(
- mf, stress_period_data=stress_period_data, compact=True
-)
+oc = flopy.modflow.ModflowOc(mf, stress_period_data=stress_period_data, compact=True)
# ## Running the Model
#
diff --git a/.docs/Notebooks/mfusg_conduit_examples.py b/.docs/Notebooks/mfusg_conduit_examples.py
index 187cabd628..4d50423829 100644
--- a/.docs/Notebooks/mfusg_conduit_examples.py
+++ b/.docs/Notebooks/mfusg_conduit_examples.py
@@ -20,11 +20,14 @@
# +
import os
import shutil
+from pathlib import Path
from pprint import pformat
from tempfile import TemporaryDirectory
+import git
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import flopy
@@ -42,9 +45,35 @@
# A vertical conduit well is located at the center of the domain and has a radius of 0.5 m. The well pumps 62,840 m3/d and is open fully to both aquifers from top to bottom. The CLN Process was used with a circular conduit geometry type to discretize the well bore with two conduit cells, one in each layer. The WEL Package was used to pump from the bottom CLN cell.
#
-model_ws = os.path.join(
- "../../examples/data/mfusg_test", "03_conduit_confined"
-)
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+file_names = [
+ "ex3.bas",
+ "ex3.bcf",
+ "ex3.cln",
+ "ex3.dis",
+ "ex3.nam",
+ "ex3.oc",
+ "ex3.sms",
+ "ex3.wel",
+ "run.bat",
+]
+for fname in file_names:
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mfusg_test/03_conduit_confined/{fname}",
+ fname=fname,
+ path=data_path / "mfusg_test" / "03_conduit_confined",
+ known_hash=None,
+ )
+
+model_ws = data_path / "mfusg_test" / "03_conduit_confined"
mf = flopy.mfusg.MfUsg.load(
"ex3.nam", model_ws=model_ws, exe_name="mfusg", check=False, verbose=True
)
@@ -58,9 +87,7 @@
for j in range(mf.dis.nstp[i]):
spd[(i, j)] = ["save head", "save budget"]
-oc = flopy.modflow.ModflowOc(
- mf, stress_period_data=spd, unitnumber=[22, 30, 31, 50]
-)
+oc = flopy.modflow.ModflowOc(mf, stress_period_data=spd, unitnumber=[22, 30, 31, 50])
# +
model_ws = os.path.join(cln_ws, "ex03")
@@ -109,9 +136,7 @@
# +
simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0]
for i in range(nper - 1):
- simflow = np.append(
- simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0]
- )
+ simflow = np.append(simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0])
simflow1 = simflow[simflow["node"] == 1]["q"]
simflow2 = simflow[simflow["node"] == 2]["q"]
@@ -303,9 +328,7 @@
# +
simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0]
for i in range(nper - 1):
- simflow = np.append(
- simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0]
- )
+ simflow = np.append(simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0])
flow_case1 = simflow
# -
@@ -399,9 +422,7 @@
# +
simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0]
for i in range(nper - 1):
- simflow = np.append(
- simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0]
- )
+ simflow = np.append(simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0])
flow_case2 = simflow
# -
@@ -498,9 +519,7 @@
# +
simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0]
for i in range(nper - 1):
- simflow = np.append(
- simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0]
- )
+ simflow = np.append(simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0])
flow_case3 = simflow
# -
@@ -584,9 +603,7 @@
# +
simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0]
for i in range(nper - 1):
- simflow = np.append(
- simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0]
- )
+ simflow = np.append(simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0])
flow_case4 = simflow
# -
@@ -608,55 +625,19 @@
# +
fig = plt.figure(figsize=(8, 11), dpi=150)
ax1 = fig.add_subplot(211)
-ax1.plot(
- simtimes,
- flow_case1[::2,]["q"],
- label="Case A",
-)
-ax1.plot(
- simtimes,
- flow_case2[::2,]["q"],
- label="Case B",
-)
-ax1.plot(
- simtimes,
- flow_case3[::2,]["q"],
- dashes=[6, 2],
- label="Case C",
-)
-ax1.plot(
- simtimes,
- flow_case4[::2,]["q"],
- dashes=[6, 2],
- label="Case D",
-)
+ax1.plot(simtimes, flow_case1[::2,]["q"], label="Case A")
+ax1.plot(simtimes, flow_case2[::2,]["q"], label="Case B")
+ax1.plot(simtimes, flow_case3[::2,]["q"], dashes=[6, 2], label="Case C")
+ax1.plot(simtimes, flow_case4[::2,]["q"], dashes=[6, 2], label="Case D")
ax1.set_xlabel("Time, in days")
ax1.set_ylabel("Layer 1 flow to well")
ax1.legend()
ax2 = fig.add_subplot(212)
-ax2.plot(
- simtimes,
- flow_case1[1::2,]["q"],
- label="Case A",
-)
-ax2.plot(
- simtimes,
- flow_case2[1::2,]["q"],
- label="Case B",
-)
-ax2.plot(
- simtimes,
- flow_case3[1::2,]["q"],
- dashes=[6, 2],
- label="Case C",
-)
-ax2.plot(
- simtimes,
- flow_case4[1::2,]["q"],
- dashes=[6, 2],
- label="Case D",
-)
+ax2.plot(simtimes, flow_case1[1::2,]["q"], label="Case A")
+ax2.plot(simtimes, flow_case2[1::2,]["q"], label="Case B")
+ax2.plot(simtimes, flow_case3[1::2,]["q"], dashes=[6, 2], label="Case C")
+ax2.plot(simtimes, flow_case4[1::2,]["q"], dashes=[6, 2], label="Case D")
ax2.set_xlabel("Time, in days")
ax2.set_ylabel("Layer 2 flow to well")
ax2.legend()
diff --git a/.docs/Notebooks/mfusg_freyberg_example.py b/.docs/Notebooks/mfusg_freyberg_example.py
index 7f92e40f8f..bd1ee73d5d 100644
--- a/.docs/Notebooks/mfusg_freyberg_example.py
+++ b/.docs/Notebooks/mfusg_freyberg_example.py
@@ -28,10 +28,47 @@
# +
from pprint import pformat
+import git
+import pooch
+
import flopy
root_name = "freyberg.usg"
-model_ws = Path.cwd().parent / "../examples/data" / root_name.replace(".", "_")
+
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+file_names = {
+ "freyberg.usg.bas": None,
+ "freyberg.usg.disu": None,
+ "freyberg.usg.ghb": None,
+ "freyberg.usg.gnc": None,
+ "freyberg.usg.gsf": None,
+ "freyberg.usg.gsf.with_comment": None,
+ "freyberg.usg.lpf": None,
+ "freyberg.usg.nam": None,
+ "freyberg.usg.oc": None,
+ "freyberg.usg.rch": None,
+ "freyberg.usg.sfr": None,
+ "freyberg.usg.sms": None,
+ "freyberg.usg.wel": None,
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{root_name.replace('.', '_')}/{fname}",
+ fname=fname,
+ path=data_path / root_name.replace(".", "_"),
+ known_hash=None,
+ )
+
+
+model_ws = data_path / root_name.replace(".", "_")
# -
# Now construct an `UnstructuredGrid` from a grid specification file.
@@ -144,21 +181,13 @@
ax = fig.add_subplot(1, len(lines), i + 1)
ax.set_title(f"Freyberg head cross-section (line {i})")
xsect = flopy.plot.PlotCrossSection(
- modelgrid=mfgrid,
- ax=ax,
- line={"line": lines[i]},
- geographic_coords=True,
+ modelgrid=mfgrid, ax=ax, line={"line": lines[i]}, geographic_coords=True
)
xsect.plot_array(head, head=head, alpha=0.4)
xsect.plot_ibound(ibound=ibound, head=head)
xsect.plot_inactive(ibound=ibound)
contours = xsect.contour_array(
- head,
- masked_values=[999.0],
- head=head,
- levels=levels,
- alpha=1.0,
- colors="blue",
+ head, masked_values=[999.0], head=head, levels=levels, alpha=1.0, colors="blue"
)
plt.clabel(contours, fmt="%.0f", colors="blue", fontsize=12)
xsect.plot_grid(alpha=0.2)
@@ -180,14 +209,8 @@
xsect = flopy.plot.PlotCrossSection(
modelgrid=mfgrid, ax=ax, line={"line": line}, geographic_coords=True
)
- cmap = xsect.plot_array(
- head2,
- masked_values=[-999.99],
- alpha=0.4,
- )
- contours = xsect.contour_array(
- head2, levels=levels, alpha=1.0, colors="blue"
- )
+ cmap = xsect.plot_array(head2, masked_values=[-999.99], alpha=0.4)
+ contours = xsect.contour_array(head2, levels=levels, alpha=1.0, colors="blue")
xsect.plot_inactive(ibound=ibound, color_noflow=(0.8, 0.8, 0.8))
xsect.plot_grid(alpha=0.2)
ax.set_ylim([0, 40]) # set y axis range to ignore low elevations
@@ -199,12 +222,7 @@
xsect = flopy.plot.PlotCrossSection(
modelgrid=mfgrid, ax=ax, line={"line": line}, geographic_coords=True
)
- cmap = xsect.plot_array(
- head,
- masked_values=[-999.99],
- head=head,
- alpha=0.4,
- )
+ cmap = xsect.plot_array(head, masked_values=[-999.99], head=head, alpha=0.4)
contours = xsect.contour_array(
head, head=head, levels=levels, alpha=1.0, colors="blue"
)
@@ -219,12 +237,7 @@
xsect = flopy.plot.PlotCrossSection(
modelgrid=mfgrid, ax=ax, line={"line": line}, geographic_coords=True
)
- cmap = xsect.plot_array(
- head2,
- masked_values=[-999.99],
- head=head2,
- alpha=0.4,
- )
+ cmap = xsect.plot_array(head2, masked_values=[-999.99], head=head2, alpha=0.4)
contours = xsect.contour_array(
head2, head=head2, levels=levels, alpha=1.0, colors="blue"
)
diff --git a/.docs/Notebooks/mfusg_zaidel_example.py b/.docs/Notebooks/mfusg_zaidel_example.py
index 61d961412b..240fd6b0c2 100644
--- a/.docs/Notebooks/mfusg_zaidel_example.py
+++ b/.docs/Notebooks/mfusg_zaidel_example.py
@@ -21,7 +21,7 @@
#
# One of the most challenging numerical cases for MODFLOW arises from drying-rewetting problems often associated with abrupt changes in the elevations of impervious base of a thin unconfined aquifer. This problem simulates a discontinuous water table configuration over a stairway impervious base and flow between constant-head boundaries in column 1 and 200. This problem is based on
#
-# [Zaidel, J. (2013), Discontinuous Steady-State Analytical Solutions of the Boussinesq Equation and Their Numerical Representation by Modflow. Groundwater, 51: 952–959. doi: 10.1111/gwat.12019](https://doi.org/10.1111/gwat.12019)
+# [Zaidel, J. (2013), Discontinuous Steady-State Analytical Solutions of the Boussinesq Equation and Their Numerical Representation by Modflow. Groundwater, 51: 952-959. doi: 10.1111/gwat.12019](https://doi.org/10.1111/gwat.12019)
#
# The model consistes of a grid of 200 columns, 1 row, and 1 layer; a bottom altitude of ranging from 20 to 0 m; constant heads of 23 and 5 m in column 1 and 200, respectively; and a horizontal hydraulic conductivity of $1x10^{-4}$ m/d. The discretization is 5 m in the row direction for all cells.
#
@@ -167,9 +167,7 @@
left=None, bottom=None, right=None, top=None, wspace=0.25, hspace=0.25
)
ax = fig.add_subplot(1, 1, 1)
-ax.plot(
- x, mfusghead[0, 0, :], linewidth=0.75, color="blue", label="MODFLOW-USG"
-)
+ax.plot(x, mfusghead[0, 0, :], linewidth=0.75, color="blue", label="MODFLOW-USG")
ax.fill_between(x, y1=botm[1, 0, :], y2=-5, color="0.5", alpha=0.5)
leg = ax.legend(loc="upper right")
leg.draw_frame(False)
diff --git a/.docs/Notebooks/modelgrid_examples.py b/.docs/Notebooks/modelgrid_examples.py
index 53f711cb63..e9afe08cbf 100644
--- a/.docs/Notebooks/modelgrid_examples.py
+++ b/.docs/Notebooks/modelgrid_examples.py
@@ -29,14 +29,17 @@
# 3) __Useful methods and features__
import os
+import sys
# +
-import sys
+from pathlib import Path
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import flopy
from flopy.discretization import StructuredGrid, UnstructuredGrid, VertexGrid
@@ -52,15 +55,80 @@
mf6_exe = "mf6"
gridgen_exe = "gridgen"
+
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+sim_data = {
+ "freyberg_multilayer_transient": {
+ "freyberg.bas": None,
+ "freyberg.cbc": None,
+ "freyberg.ddn": None,
+ "freyberg.dis": None,
+ "freyberg.drn": None,
+ "freyberg.hds": None,
+ "freyberg.list": None,
+ "freyberg.nam": None,
+ "freyberg.nwt": None,
+ "freyberg.oc": None,
+ "freyberg.rch": None,
+ "freyberg.upw": None,
+ "freyberg.wel": None,
+ },
+ "mf6-freyberg": {
+ "bot.asc": "3107f907cb027460fd40ffc16cb797a78babb31988c7da326c9f500fba855b62",
+ "description.txt": "94093335eec6a24711f86d4d217ccd5a7716dd9e01cb6b732bc7757d41675c09",
+ "freyberg.cbc": "c8ad843b1da753eb58cf6c462ac782faf0ca433d6dcb067742d8bd698db271e3",
+ "freyberg.chd": "d8b8ada8d3978daea1758b315be983b5ca892efc7d69bf6b367ceec31e0dd156",
+ "freyberg.dis": "cac230a207cc8483693f7ba8ae29ce40c049036262eac4cebe17a4e2347a8b30",
+ "freyberg.dis.grb": "c8c26fb1fa4b210208134b286d895397cf4b3131f66e1d9dda76338502c7e96a",
+ "freyberg.hds": "926a06411ca658a89db6b5686f51ddeaf5b74ced81239cab1d43710411ba5f5b",
+ "freyberg.ic": "6efb56ee9cdd704b9a76fb9efd6dae750facc5426b828713f2d2cf8d35194120",
+ "freyberg.ims": "6dddae087d85417e3cdaa13e7b24165afb7f9575ab68586f3adb6c1b2d023781",
+ "freyberg.nam": "cee9b7b000fe35d2df26e878d09d465250a39504f87516c897e3fa14dcda081e",
+ "freyberg.npf": "81104d3546045fff0eddf5059465e560b83b492fa5a5acad1907ce18c2b9c15f",
+ "freyberg.oc": "c0715acd75eabcc42c8c47260a6c1abd6c784350983f7e2e6009ddde518b80b8",
+ "freyberg.rch": "a6ec1e0eda14fd2cdf618a5c0243a9caf82686c69242b783410d5abbcf971954",
+ "freyberg.riv": "a8cafc8c317cbe2acbb43e2f0cfe1188cb2277a7a174aeb6f3e6438013de8088",
+ "freyberg.sto": "74d748c2f0adfa0a32ee3f2912115c8f35b91011995b70c1ec6ae1c627242c41",
+ "freyberg.tdis": "9965cbb17caf5b865ea41a4ec04bcb695fe15a38cb539425fdc00abbae385cbe",
+ "freyberg.wel": "f19847de455598de52c05a4be745698c8cb589e5acfb0db6ab1f06ded5ff9310",
+ "k11.asc": "b6a8aa46ef17f7f096d338758ef46e32495eb9895b25d687540d676744f02af5",
+ "mfsim.nam": "6b8d6d7a56c52fb2bff884b3979e3d2201c8348b4bbfd2b6b9752863cbc9975e",
+ "top.asc": "3ad2b131671b9faca7f74c1dd2b2f41875ab0c15027764021a89f9c95dccaa6a",
+ },
+ "unstructured": {
+ "TriMesh_local.exp": None,
+ "TriMesh_usg.exp": None,
+ "Trimesh_circle.exp": None,
+ "headu.githds": None,
+ "ugrid_iverts.dat": None,
+ "ugrid_verts.dat": None,
+ },
+}
+
+for sim_name in sim_data:
+ for fname, fhash in sim_data[sim_name].items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=fhash,
+ )
+
# +
# set paths to each of our model types for this example notebook
-spth = os.path.join(
- "..", "..", "examples", "data", "freyberg_multilayer_transient"
-)
-spth6 = os.path.join("..", "..", "examples", "data", "mf6-freyberg")
-vpth = os.path.join("..", "..", "examples", "data")
-upth = os.path.join("..", "..", "examples", "data")
-u_data_ws = os.path.join("..", "..", "examples", "data", "unstructured")
+spth = data_path / "freyberg_multilayer_transient"
+spth6 = data_path / "mf6-freyberg"
+vpth = data_path
+upth = data_path
+u_data_ws = data_path / "unstructured"
# temporary workspace
temp_dir = TemporaryDirectory()
@@ -137,9 +205,7 @@
epsg = modelgrid.epsg
proj4 = modelgrid.proj4
-print(
- f"xoff: {xoff}\nyoff: {yoff}\nangrot: {angrot}\nepsg: {epsg}\nproj4: {proj4}"
-)
+print(f"xoff: {xoff}\nyoff: {yoff}\nangrot: {angrot}\nepsg: {epsg}\nproj4: {proj4}")
# -
# #### Setting modelgrid reference information
@@ -422,9 +488,7 @@
# +
# simple functions to load vertices and indice lists
def load_verts(fname):
- verts = np.genfromtxt(
- fname, dtype=[int, float, float], names=["iv", "x", "y"]
- )
+ verts = np.genfromtxt(fname, dtype=[int, float, float], names=["iv", "x", "y"])
verts["iv"] -= 1 # zero based
return verts
@@ -554,9 +618,7 @@ def load_iverts(fname):
# +
# load a modflow-6 freyberg simulation
-sim = flopy.mf6.MFSimulation.load(
- sim_ws=spth6, verbosity_level=0, exe_name=mf6_exe
-)
+sim = flopy.mf6.MFSimulation.load(sim_ws=spth6, verbosity_level=0, exe_name=mf6_exe)
# get a model object from the simulation
ml = sim.get_model("freyberg")
@@ -661,9 +723,7 @@ def load_iverts(fname):
label="Grid cell centers",
ms=4,
)
-plt.legend(
- loc=0,
-)
+plt.legend(loc=0)
plt.title("modelgrid cell vertices and centers")
# +
@@ -679,9 +739,7 @@ def load_iverts(fname):
# plot arrays
for ix, ax in enumerate(axs):
pmv = flopy.plot.PlotMapView(modelgrid=modelgrid, ax=ax)
- pc = pmv.plot_array(
- arrays[ix], masked_values=[1e30], vmin=0, vmax=35, alpha=0.5
- )
+ pc = pmv.plot_array(arrays[ix], masked_values=[1e30], vmin=0, vmax=35, alpha=0.5)
pmv.plot_grid()
pmv.plot_inactive()
ax.set_title(f"Modelgrid: {labels[ix]}")
@@ -818,9 +876,7 @@ def load_iverts(fname):
# create a mf6 WEL package and add it to the existing model
stress_period_data = {0: pdata}
-wel = flopy.mf6.modflow.ModflowGwfwel(
- ml, stress_period_data=stress_period_data
-)
+wel = flopy.mf6.modflow.ModflowGwfwel(ml, stress_period_data=stress_period_data)
# plot the locations from the new WEL package on the modelgrid
fig, ax = plt.subplots(figsize=(10, 10), subplot_kw={"aspect": "equal"})
diff --git a/.docs/Notebooks/modflow_postprocessing_example.py b/.docs/Notebooks/modflow_postprocessing_example.py
index 656aef5d44..12e7984c3d 100644
--- a/.docs/Notebooks/modflow_postprocessing_example.py
+++ b/.docs/Notebooks/modflow_postprocessing_example.py
@@ -22,11 +22,14 @@
# +
import os
import sys
+from pathlib import Path
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import flopy
import flopy.utils.binaryfile as bf
@@ -43,19 +46,89 @@
# +
mfnam = "EXAMPLE.nam"
-model_ws = "../../examples/data/mp6/"
heads_file = "EXAMPLE.HED"
# temporary directory
temp_dir = TemporaryDirectory()
workspace = temp_dir.name
+
+
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+file_names = {
+ "EXAMPLE-1.endpoint": None,
+ "EXAMPLE-1.mpsim": None,
+ "EXAMPLE-2.endpoint": None,
+ "EXAMPLE-2.mplist": None,
+ "EXAMPLE-2.mpsim": None,
+ "EXAMPLE-3.endpoint": None,
+ "EXAMPLE-3.mplist": None,
+ "EXAMPLE-3.mpsim": None,
+ "EXAMPLE-3.pathline": None,
+ "EXAMPLE-4.endpoint": None,
+ "EXAMPLE-4.mplist": None,
+ "EXAMPLE-4.mpsim": None,
+ "EXAMPLE-4.timeseries": None,
+ "EXAMPLE-5.endpoint": None,
+ "EXAMPLE-5.mplist": None,
+ "EXAMPLE-5.mpsim": None,
+ "EXAMPLE-6.endpoint": None,
+ "EXAMPLE-6.mplist": None,
+ "EXAMPLE-6.mpsim": None,
+ "EXAMPLE-6.timeseries": None,
+ "EXAMPLE-7.endpoint": None,
+ "EXAMPLE-7.mplist": None,
+ "EXAMPLE-7.mpsim": None,
+ "EXAMPLE-7.timeseries": None,
+ "EXAMPLE-8.endpoint": None,
+ "EXAMPLE-8.mplist": None,
+ "EXAMPLE-8.mpsim": None,
+ "EXAMPLE-8.timeseries": None,
+ "EXAMPLE-9.endpoint": None,
+ "EXAMPLE-9.mplist": None,
+ "EXAMPLE-9.mpsim": None,
+ "EXAMPLE.BA6": None,
+ "EXAMPLE.BUD": None,
+ "EXAMPLE.DIS": None,
+ "EXAMPLE.DIS.metadata": None,
+ "EXAMPLE.HED": None,
+ "EXAMPLE.LPF": None,
+ "EXAMPLE.LST": None,
+ "EXAMPLE.MPBAS": None,
+ "EXAMPLE.OC": None,
+ "EXAMPLE.PCG": None,
+ "EXAMPLE.RCH": None,
+ "EXAMPLE.RIV": None,
+ "EXAMPLE.WEL": None,
+ "EXAMPLE.mpnam": None,
+ "EXAMPLE.nam": None,
+ "example-1.mplist": None,
+ "example-6.locations": None,
+ "example-7.locations": None,
+ "example-8.locations": None,
+ "example.basemap": None,
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mp6/{fname}",
+ fname=fname,
+ path=data_path / "mp6",
+ known_hash=fhash,
+ )
+
# -
# ### Load example model and head results
-m = flopy.modflow.Modflow.load(mfnam, model_ws=model_ws)
+m = flopy.modflow.Modflow.load(mfnam, model_ws=data_path / "mp6")
-hdsobj = bf.HeadFile(model_ws + heads_file)
+hdsobj = bf.HeadFile(data_path / "mp6" / heads_file)
hds = hdsobj.get_data(kstpkper=(0, 2))
hds.shape
diff --git a/.docs/Notebooks/modpath6_example.py b/.docs/Notebooks/modpath6_example.py
index f0bfbc6f00..093e67adf2 100644
--- a/.docs/Notebooks/modpath6_example.py
+++ b/.docs/Notebooks/modpath6_example.py
@@ -29,10 +29,12 @@
from pprint import pformat
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
+import pooch
import flopy
@@ -48,16 +50,87 @@
# +
from pathlib import Path
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
# temporary directory
temp_dir = TemporaryDirectory()
-model_ws = temp_dir.name
+model_ws = Path(temp_dir.name)
+
+file_names = {
+ "EXAMPLE-1.endpoint": None,
+ "EXAMPLE-1.mpsim": None,
+ "EXAMPLE-2.endpoint": None,
+ "EXAMPLE-2.mplist": None,
+ "EXAMPLE-2.mpsim": None,
+ "EXAMPLE-3.endpoint": None,
+ "EXAMPLE-3.mplist": None,
+ "EXAMPLE-3.mpsim": None,
+ "EXAMPLE-3.pathline": None,
+ "EXAMPLE-4.endpoint": None,
+ "EXAMPLE-4.mplist": None,
+ "EXAMPLE-4.mpsim": None,
+ "EXAMPLE-4.timeseries": None,
+ "EXAMPLE-5.endpoint": None,
+ "EXAMPLE-5.mplist": None,
+ "EXAMPLE-5.mpsim": None,
+ "EXAMPLE-6.endpoint": None,
+ "EXAMPLE-6.mplist": None,
+ "EXAMPLE-6.mpsim": None,
+ "EXAMPLE-6.timeseries": None,
+ "EXAMPLE-7.endpoint": None,
+ "EXAMPLE-7.mplist": None,
+ "EXAMPLE-7.mpsim": None,
+ "EXAMPLE-7.timeseries": None,
+ "EXAMPLE-8.endpoint": None,
+ "EXAMPLE-8.mplist": None,
+ "EXAMPLE-8.mpsim": None,
+ "EXAMPLE-8.timeseries": None,
+ "EXAMPLE-9.endpoint": None,
+ "EXAMPLE-9.mplist": None,
+ "EXAMPLE-9.mpsim": None,
+ "EXAMPLE.BA6": None,
+ "EXAMPLE.BUD": None,
+ "EXAMPLE.DIS": None,
+ "EXAMPLE.DIS.metadata": None,
+ "EXAMPLE.HED": None,
+ "EXAMPLE.LPF": None,
+ "EXAMPLE.LST": None,
+ "EXAMPLE.MPBAS": None,
+ "EXAMPLE.OC": None,
+ "EXAMPLE.PCG": None,
+ "EXAMPLE.RCH": None,
+ "EXAMPLE.RIV": None,
+ "EXAMPLE.WEL": None,
+ "EXAMPLE.mpnam": None,
+ "EXAMPLE.nam": None,
+ "example-1.mplist": None,
+ "example-6.locations": None,
+ "example-7.locations": None,
+ "example-8.locations": None,
+ "example.basemap": None,
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mp6/{fname}",
+ fname=fname,
+ path=data_path / "mp6",
+ known_hash=fhash,
+ )
+
+shutil.copytree(data_path / "mp6", model_ws, dirs_exist_ok=True)
-model_path = Path.cwd().parent.parent / "examples" / "data" / "mp6"
-mffiles = list(model_path.glob("EXAMPLE.*"))
+mffiles = list(model_ws.glob("EXAMPLE.*"))
-m = flopy.modflow.Modflow.load("EXAMPLE.nam", model_ws=model_path)
+m = flopy.modflow.Modflow.load("EXAMPLE.nam", model_ws=model_ws)
-hdsfile = flopy.utils.HeadFile(os.path.join(model_path, "EXAMPLE.HED"))
+hdsfile = flopy.utils.HeadFile(os.path.join(model_ws, "EXAMPLE.HED"))
hdsfile.get_kstpkper()
hds = hdsfile.get_data(kstpkper=(0, 2))
@@ -93,7 +166,7 @@
modelname="ex6",
exe_name="mp6",
modflowmodel=m,
- model_ws=str(model_path),
+ model_ws=str(model_ws),
)
mpb = flopy.modpath.Modpath6Bas(
@@ -109,10 +182,6 @@
start_time=(2, 0, 1.0),
)
-shutil.copy(model_path / "EXAMPLE.DIS", join(model_ws, "EXAMPLE.DIS"))
-shutil.copy(model_path / "EXAMPLE.HED", join(model_ws, "EXAMPLE.HED"))
-shutil.copy(model_path / "EXAMPLE.BUD", join(model_ws, "EXAMPLE.BUD"))
-
mp.change_model_ws(model_ws)
mp.write_name_file()
mp.write_input()
@@ -146,9 +215,7 @@
fpth = os.path.join(model_ws, "starting_locs.shp")
print(type(fpth))
-epobj.write_shapefile(
- well_epd, direction="starting", shpname=fpth, mg=m.modelgrid
-)
+epobj.write_shapefile(well_epd, direction="starting", shpname=fpth, mg=m.modelgrid)
# Read in the pathline file and subset to pathlines that terminated in the well .
@@ -205,7 +272,7 @@
# Replace WEL package with MNW2, and create backward tracking simulation using particles released at MNW well.
m2 = flopy.modflow.Modflow.load(
- "EXAMPLE.nam", model_ws=str(model_path), exe_name="mf2005"
+ "EXAMPLE.nam", model_ws=str(model_ws), exe_name="mf2005"
)
m2.get_package_list()
@@ -294,9 +361,7 @@
pthobj = flopy.utils.PathlineFile(os.path.join(model_ws, "ex6mnw.mppth"))
epdobj = flopy.utils.EndpointFile(os.path.join(model_ws, "ex6mnw.mpend"))
well_epd = epdobj.get_alldata()
-well_pathlines = (
- pthobj.get_alldata()
-) # returns a list of recarrays; one per pathline
+well_pathlines = pthobj.get_alldata() # returns a list of recarrays; one per pathline
# +
fig = plt.figure(figsize=(8, 8))
@@ -311,9 +376,7 @@
)
plt.clabel(contour_set, inline=1, fontsize=14)
-mapview.plot_pathline(
- well_pathlines, travel_time="<10000", layer="all", colors="red"
-)
+mapview.plot_pathline(well_pathlines, travel_time="<10000", layer="all", colors="red")
# -
try:
diff --git a/.docs/Notebooks/modpath7_create_simulation_example.py b/.docs/Notebooks/modpath7_create_simulation_example.py
index f5f0975dde..7fd621bcc6 100644
--- a/.docs/Notebooks/modpath7_create_simulation_example.py
+++ b/.docs/Notebooks/modpath7_create_simulation_example.py
@@ -81,9 +81,7 @@ def get_nodes(locs):
nm = "ex01_mf6"
# Create the Flopy simulation object
-sim = flopy.mf6.MFSimulation(
- sim_name=nm, exe_name=mfexe, version="mf6", sim_ws=ws
-)
+sim = flopy.mf6.MFSimulation(sim_name=nm, exe_name=mfexe, version="mf6", sim_ws=ws)
# Create the Flopy temporal discretization object
pd = (perlen, nstp, tsmult)
@@ -133,9 +131,7 @@ def get_nodes(locs):
flopy.mf6.modflow.mfgwfrcha.ModflowGwfrcha(gwf, recharge=rch)
# wel
wd = [(wel_loc, wel_q)]
-flopy.mf6.modflow.mfgwfwel.ModflowGwfwel(
- gwf, maxbound=1, stress_period_data={0: wd}
-)
+flopy.mf6.modflow.mfgwfwel.ModflowGwfwel(gwf, maxbound=1, stress_period_data={0: wd})
# river
rd = []
for i in range(nrow):
@@ -258,9 +254,7 @@ def get_nodes(locs):
colors = ["green", "orange", "red"]
# +
-f, axes = plt.subplots(
- ncols=3, nrows=2, sharey=True, sharex=True, figsize=(15, 10)
-)
+f, axes = plt.subplots(ncols=3, nrows=2, sharey=True, sharex=True, figsize=(15, 10))
axes = axes.flatten()
idax = 0
@@ -333,12 +327,7 @@ def get_nodes(locs):
mm = flopy.plot.PlotMapView(model=gwf, ax=ax)
mm.plot_grid(lw=0.5)
mm.plot_pathline(
- pwb,
- layer="all",
- colors="blue",
- lw=0.5,
- linestyle=":",
- label="captured by wells",
+ pwb, layer="all", colors="blue", lw=0.5, linestyle=":", label="captured by wells"
)
mm.plot_endpoint(ewb, direction="ending") # , colorbar=True, shrink=0.5);
@@ -348,12 +337,7 @@ def get_nodes(locs):
mm = flopy.plot.PlotMapView(model=gwf, ax=ax)
mm.plot_grid(lw=0.5)
mm.plot_pathline(
- prb,
- layer="all",
- colors="green",
- lw=0.5,
- linestyle=":",
- label="captured by rivers",
+ prb, layer="all", colors="green", lw=0.5, linestyle=":", label="captured by rivers"
)
plt.tight_layout()
diff --git a/.docs/Notebooks/mt3d-usgs_example.py b/.docs/Notebooks/mt3d-usgs_example.py
index 008e285b36..dac13647a5 100644
--- a/.docs/Notebooks/mt3d-usgs_example.py
+++ b/.docs/Notebooks/mt3d-usgs_example.py
@@ -35,12 +35,15 @@
# +
import sys
+from pathlib import Path
from pprint import pformat
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import flopy
@@ -56,6 +59,15 @@
mfexe = "mfnwt"
mtexe = "mt3dusgs"
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
# Make sure modelpth directory exists
if not os.path.isdir(modelpth):
os.makedirs(modelpth, exist_ok=True)
@@ -389,9 +401,7 @@ def calc_strtElev(X, Y):
"CrnkNic.gag5",
"CrnkNic.gag6",
]
-gage = flopy.modflow.ModflowGage(
- mf, numgage=6, gage_data=gages, filenames=files
-)
+gage = flopy.modflow.ModflowGage(mf, numgage=6, gage_data=gages, filenames=files)
# Instantiate linkage with mass transport routing (LMT) package for MODFLOW-NWT (generates linker file)
@@ -589,7 +599,14 @@ def load_ts_from_otis(fname, iobs=1):
ts5_mt3d = load_ts_from_SFT_output(fname_SFTout, nd=619)
# OTIS results located here
-fname_OTIS = "../../examples/data/mt3d_test/mfnwt_mt3dusgs/sft_crnkNic/OTIS_solution.out"
+fname = "OTIS_solution.out"
+fname_OTIS = data_path / "mt3d_test" / "mfnwt_mt3dusgs" / "sft_crnkNic" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mt3d_test/mfnwt_mt3dusgs/sft_crnkNic/{fname}",
+ fname=fname,
+ path=data_path / "mt3d_test" / "mfnwt_mt3dusgs" / "sft_crnkNic",
+ known_hash=None,
+)
# Loading OTIS output
ts1_Otis = load_ts_from_otis(fname_OTIS, 1)
@@ -676,37 +693,17 @@ def set_sizeyaxis(a, fmt, sz):
ax.plot(ts5_Otis[:, 0], ts5_Otis[:, 1], "c-", linewidth=1.0)
ax.plot(
- (ts1_mt3d[:, 0]) / 3600,
- ts1_mt3d[:, 1],
- "kD",
- markersize=2.0,
- mfc="none",
- mec="k",
+ (ts1_mt3d[:, 0]) / 3600, ts1_mt3d[:, 1], "kD", markersize=2.0, mfc="none", mec="k"
)
ax.plot(
- (ts2_mt3d[:, 0]) / 3600,
- ts2_mt3d[:, 1],
- "b*",
- markersize=3.0,
- mfc="none",
- mec="b",
+ (ts2_mt3d[:, 0]) / 3600, ts2_mt3d[:, 1], "b*", markersize=3.0, mfc="none", mec="b"
)
ax.plot((ts3_mt3d[:, 0]) / 3600, ts3_mt3d[:, 1], "r+", markersize=3.0)
ax.plot(
- (ts4_mt3d[:, 0]) / 3600,
- ts4_mt3d[:, 1],
- "g^",
- markersize=2.0,
- mfc="none",
- mec="g",
+ (ts4_mt3d[:, 0]) / 3600, ts4_mt3d[:, 1], "g^", markersize=2.0, mfc="none", mec="g"
)
ax.plot(
- (ts5_mt3d[:, 0]) / 3600,
- ts5_mt3d[:, 1],
- "co",
- markersize=2.0,
- mfc="none",
- mec="c",
+ (ts5_mt3d[:, 0]) / 3600, ts5_mt3d[:, 1], "co", markersize=2.0, mfc="none", mec="c"
)
# customize plot
diff --git a/.docs/Notebooks/mt3dms_examples.py b/.docs/Notebooks/mt3dms_examples.py
index 77996876db..66fac65510 100644
--- a/.docs/Notebooks/mt3dms_examples.py
+++ b/.docs/Notebooks/mt3dms_examples.py
@@ -34,15 +34,18 @@
# 10. Three-Dimensional Field Case Study
import os
+import sys
# +
-import sys
+from pathlib import Path
from pprint import pformat
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import flopy
from flopy.utils.util_array import read1d
@@ -51,7 +54,6 @@
exe_name_mf = "mf2005"
exe_name_mt = "mt3dms"
-datadir = os.path.join("..", "..", "examples", "data", "mt3d_test", "mt3dms")
# temporary directory
temp_dir = TemporaryDirectory()
@@ -62,6 +64,28 @@
print(f"matplotlib version: {mpl.__version__}")
print(f"flopy version: {flopy.__version__}")
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+datadir = data_path / "mt3d_test" / "mt3dms"
+
+file_names = {
+ "p08shead.dat": None,
+ "p10cinit.dat": None,
+ "p10shead.dat": None,
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mt3d_test/mt3dms/{fname}",
+ fname=fname,
+ path=data_path / "mt3d_test" / "mt3dms",
+ known_hash=None,
+ )
# -
@@ -1314,9 +1338,7 @@ def p08(dirname, mixelm):
mx.plot_array(hk, masked_values=[hk[0, 0, 0]], alpha=0.2)
mx.plot_ibound()
mx.plot_grid(color="0.5", alpha=0.2)
-cs = mx.contour_array(
- conc[3], levels=[0.05, 0.1, 0.15, 0.19], masked_values=[1.0e30]
-)
+cs = mx.contour_array(conc[3], levels=[0.05, 0.1, 0.15, 0.19], masked_values=[1.0e30])
ax.set_title("TIME = 20 YEARS")
@@ -1525,9 +1547,7 @@ def p10(dirname, mixelm, perlen=1000, isothm=1, sp2=0.0, ttsmult=1.2):
nrow = 61
ncol = 40
delr = (
- [2000, 1600, 800, 400, 200, 100]
- + 28 * [50]
- + [100, 200, 400, 800, 1600, 2000]
+ [2000, 1600, 800, 400, 200, 100] + 28 * [50] + [100, 200, 400, 800, 1600, 2000]
)
delc = (
[2000, 2000, 2000, 1600, 800, 400, 200, 100]
@@ -1661,9 +1681,7 @@ def p10(dirname, mixelm, perlen=1000, isothm=1, sp2=0.0, ttsmult=1.2):
)
dsp = flopy.mt3d.Mt3dDsp(mt, al=al, trpt=trpt, trpv=trpv)
ssm = flopy.mt3d.Mt3dSsm(mt, crch=0.0)
- rct = flopy.mt3d.Mt3dRct(
- mt, isothm=isothm, igetsc=0, rhob=1.7, sp1=0.176, sp2=sp2
- )
+ rct = flopy.mt3d.Mt3dRct(mt, isothm=isothm, igetsc=0, rhob=1.7, sp1=0.176, sp2=sp2)
mxiter = 1
if isothm == 4:
mxiter = 50
@@ -1774,12 +1792,8 @@ def p10(dirname, mixelm, perlen=1000, isothm=1, sp2=0.0, ttsmult=1.2):
mf, mt, conctvd, cvttvd, mvt0 = p10("p10", 0, perlen=2000, isothm=0)
mf, mt, conctvd, cvttvd, mvt1 = p10("p10", 0, perlen=2000, isothm=1)
mf, mt, conctvd, cvttvd, mvt2 = p10("p10", 0, perlen=2000, isothm=4, sp2=0.1)
-mf, mt, conctvd, cvttvd, mvt3 = p10(
- "p10", 0, perlen=2000, isothm=4, sp2=1.5e-4
-)
-mf, mt, conctvd, cvttvd, mvt4 = p10(
- "p10", 0, perlen=2000, isothm=4, sp2=1.0e-6
-)
+mf, mt, conctvd, cvttvd, mvt3 = p10("p10", 0, perlen=2000, isothm=4, sp2=1.5e-4)
+mf, mt, conctvd, cvttvd, mvt4 = p10("p10", 0, perlen=2000, isothm=4, sp2=1.0e-6)
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1)
diff --git a/.docs/Notebooks/mt3dms_sft_lkt_uzt_tutorial.py b/.docs/Notebooks/mt3dms_sft_lkt_uzt_tutorial.py
index a743fb31c4..35fdc2d670 100644
--- a/.docs/Notebooks/mt3dms_sft_lkt_uzt_tutorial.py
+++ b/.docs/Notebooks/mt3dms_sft_lkt_uzt_tutorial.py
@@ -31,11 +31,14 @@
# +
import os
import sys
+from pathlib import Path
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import numpy as np
import pandas as pd
+import pooch
import flopy
@@ -55,6 +58,15 @@
temp_dir = TemporaryDirectory()
model_ws = temp_dir.name
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
modelpth = os.path.join(model_ws, "no3")
modelname = "no3"
mfexe = "mfnwt"
@@ -124,29 +136,29 @@
# ### Instantiate discretization (DIS) package for MODFLOW-NWT
# +
-elv_pth = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "dis_arrays",
- "grnd_elv.txt",
-)
# Top of Layer 1 elevation determined using GW Vistas and stored locally
+fname = "grnd_elv.txt"
+folder_name = "mt3d_example_sft_lkt_uzt"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/dis_arrays/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "dis_arrays",
+ known_hash=None,
+)
+elv_pth = data_path / folder_name / "dis_arrays" / fname
grndElv = np.loadtxt(elv_pth)
# Bottom of layer 1 elevation also determined from use of GUI and stored locally
-bt1_pth = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "dis_arrays",
- "bot1.txt",
-)
+fname = "bot1.txt"
+folder_name = "mt3d_example_sft_lkt_uzt"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/dis_arrays/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "dis_arrays",
+ known_hash=None,
+)
+bt1_pth = data_path / folder_name / "dis_arrays" / fname
bot1Elv = np.loadtxt(bt1_pth)
bot2Elv = np.ones(bot1Elv.shape) * 100
@@ -226,14 +238,13 @@
# ### Instantiate basic (BAS or BA6) package for MODFLOW-NWT
# +
-ibnd1_pth = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "bas_arrays",
- "ibnd_lay1.txt",
+fname = "ibnd_lay1.txt"
+ibnd1_pth = data_path / folder_name / "bas_arrays" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/bas_arrays/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "bas_arrays",
+ known_hash=None,
)
ibnd1 = np.loadtxt(ibnd1_pth)
ibnd2 = np.ones(ibnd1.shape)
@@ -242,36 +253,33 @@
ibnd = [ibnd1, ibnd2, ibnd3]
ibnd = np.array(ibnd)
-StHd1_pth = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "bas_arrays",
- "strthd1.txt",
+fname = "strthd1.txt"
+StHd1_pth = data_path / folder_name / "bas_arrays" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/bas_arrays/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "bas_arrays",
+ known_hash=None,
)
StHd1 = np.loadtxt(StHd1_pth)
-StHd2_pth = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "bas_arrays",
- "strthd2.txt",
+fname = "strthd2.txt"
+StHd2_pth = data_path / folder_name / "bas_arrays" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/bas_arrays/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "bas_arrays",
+ known_hash=None,
)
StHd2 = np.loadtxt(StHd2_pth)
-StHd3_pth = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "bas_arrays",
- "strthd3.txt",
+fname = "strthd3.txt"
+StHd3_pth = data_path / folder_name / "bas_arrays" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/bas_arrays/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "bas_arrays",
+ known_hash=None,
)
StHd3 = np.loadtxt(StHd3_pth)
@@ -295,36 +303,16 @@
sp = []
for k in [0, 1, 2]: # These indices need to be adjusted for 0-based moronicism
- for i in [
- 0,
- 299,
- ]: # These indices need to be adjusted for 0-based silliness
- for j in np.arange(
- 0, 300, 1
- ): # These indices need to be adjusted for 0-based foolishness
- # Skipping cells not satisfying the conditions below
- if (i == 1 and (j < 27 or j > 31)) or (
- i == 299 and (j < 26 or j > 31)
- ):
+ for i in [0, 299]: # These indices need to be adjusted for 0-based silliness
+ # These indices need to be adjusted for 0-based foolishness
+ # Skipping cells not satisfying the conditions below
+ for j in np.arange(0, 300, 1):
+ if (i == 1 and (j < 27 or j > 31)) or (i == 299 and (j < 26 or j > 31)):
if i % 2 == 0:
- sp.append(
- [
- k,
- i,
- j,
- elev_stpt_row1 - (elev_slp * (j - 1)),
- 11.3636,
- ]
- )
+ sp.append([k, i, j, elev_stpt_row1 - (elev_slp * (j - 1)), 11.3636])
else:
sp.append(
- [
- k,
- i,
- j,
- elev_stpt_row300 - (elev_slp * (j - 1)),
- 11.3636,
- ]
+ [k, i, j, elev_stpt_row300 - (elev_slp * (j - 1)), 11.3636]
)
@@ -342,28 +330,26 @@
# Remember that the cell indices stored in the pre-prepared NO3_ReachInput.csv file are based on 0-based indexing.
# Flopy will convert to 1-based when it writes the files
-rpth = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "sfr_data",
- "no3_reachinput.csv",
+fname = "no3_reachinput.csv"
+rpth = data_path / folder_name / "sfr_data" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/sfr_data/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "sfr_data",
+ known_hash=None,
)
reach_data = np.genfromtxt(rpth, delimiter=",", names=True)
reach_data
# Read pre-prepared segment data into numpy recarrays using numpy.genfromtxt()
-spth = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "sfr_data",
- "no3_segmentdata.csv",
+fname = "no3_segmentdata.csv"
+spth = data_path / folder_name / "sfr_data" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/sfr_data/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "sfr_data",
+ known_hash=None,
)
ss_segment_data = np.genfromtxt(spth, delimiter=",", names=True)
segment_data = {0: ss_segment_data, 1: ss_segment_data}
@@ -404,14 +390,13 @@
# +
# Read pre-prepared lake arrays
-LakArr_pth = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "lak_arrays",
- "lakarr1.txt",
+fname = "lakarr1.txt"
+LakArr_pth = data_path / folder_name / "lak_arrays" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/lak_arrays/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "lak_arrays",
+ known_hash=None,
)
LakArr_lyr1 = np.loadtxt(LakArr_pth)
LakArr_lyr2 = np.zeros(LakArr_lyr1.shape)
@@ -423,7 +408,7 @@
nlakes = int(np.max(LakArr))
ipakcb = ipakcb # From above
theta = -1.0 # Implicit
-nssitr = 10 # Maximum number of iterations for Newton’s method
+nssitr = 10 # Maximum number of iterations for Newton's method
sscncr = 1.000e-03 # Convergence criterion for equilibrium lake stage solution
surfdep = 2.000e00 # Height of small topological variations in lake-bottom
stages = 268.00 # Initial stage of each lake at the beginning of the run
@@ -515,9 +500,7 @@
]
numgage = len(gages)
-gage = flopy.modflow.ModflowGage(
- mf, numgage=numgage, gage_data=gages, filenames=files
-)
+gage = flopy.modflow.ModflowGage(mf, numgage=numgage, gage_data=gages, filenames=files)
# -
# ### Instantiate Unsaturated-Zone Flow (UZF) package for MODFLOW-NWT
@@ -538,23 +521,21 @@
thts = 0.30
thti = 0.13079
-fname_uzbnd = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "uzf_arrays",
- "iuzbnd.txt",
+fname = "iuzbnd.txt"
+fname_uzbnd = data_path / folder_name / "uzf_arrays" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/uzf_arrays/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "uzf_arrays",
+ known_hash=None,
)
-fname_runbnd = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "uzf_arrays",
- "irunbnd.txt",
+fname = "irunbnd.txt"
+fname_runbnd = data_path / folder_name / "uzf_arrays" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/uzf_arrays/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "uzf_arrays",
+ known_hash=None,
)
iuzfbnd = np.loadtxt(fname_uzbnd)
@@ -589,23 +570,21 @@
# ### Instantiate Drain (DRN) package for MODFLOW-NWT
# +
-fname_drnElv = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "drn_arrays",
- "elv.txt",
-)
-fname_drnCond = os.path.join(
- "..",
- "..",
- "examples",
- "data",
- "mt3d_example_sft_lkt_uzt",
- "drn_arrays",
- "cond.txt",
+fname = "elv.txt"
+fname_drnElv = data_path / folder_name / "drn_arrays" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/drn_arrays/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "drn_arrays",
+ known_hash=None,
+)
+fname = "cond.txt"
+fname_drnCond = data_path / folder_name / "drn_arrays" / fname
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/drn_arrays/{fname}",
+ fname=fname,
+ path=data_path / folder_name / "drn_arrays",
+ known_hash=None,
)
drnElv = np.loadtxt(fname_drnElv)
@@ -628,9 +607,7 @@
# Create a dictionary, 1 entry for each of the two stress periods.
stress_period_data = {0: stress_period_data, 1: stress_period_data}
-drn = flopy.modflow.ModflowDrn(
- mf, ipakcb=ipakcb, stress_period_data=stress_period_data
-)
+drn = flopy.modflow.ModflowDrn(mf, ipakcb=ipakcb, stress_period_data=stress_period_data)
# -
# ### Instantiate linkage with mass transport routing (LMT) package for MODFLOW-NWT (generates linker file)
@@ -712,9 +689,7 @@
mxpart = 5000
nadvfd = 1 # (1 = Upstream weighting)
-adv = flopy.mt3d.Mt3dAdv(
- mt, mixelm=mixelm, percel=percel, mxpart=mxpart, nadvfd=nadvfd
-)
+adv = flopy.mt3d.Mt3dAdv(mt, mixelm=mixelm, percel=percel, mxpart=mxpart, nadvfd=nadvfd)
# -
# ### Instantiate generalized conjugate gradient solver (GCG) package for MT3D-USGS
@@ -748,9 +723,7 @@
trpv = 0.1 # ratio of the vertical transverse dispersitvity to 'AL'
dmcoef = 1.0000e-10
-dsp = flopy.mt3d.Mt3dDsp(
- mt, al=al, trpt=trpt, trpv=trpv, dmcoef=dmcoef, multiDiff=True
-)
+dsp = flopy.mt3d.Mt3dDsp(mt, al=al, trpt=trpt, trpv=trpv, dmcoef=dmcoef, multiDiff=True)
# -
# ### Instantiate source-sink mixing (SSM) package for MT3D-USGS
diff --git a/.docs/Notebooks/nwt_option_blocks_tutorial.py b/.docs/Notebooks/nwt_option_blocks_tutorial.py
index 0bd0d45645..ac9f33b665 100644
--- a/.docs/Notebooks/nwt_option_blocks_tutorial.py
+++ b/.docs/Notebooks/nwt_option_blocks_tutorial.py
@@ -26,8 +26,12 @@
# +
import os
import sys
+from pathlib import Path
from tempfile import TemporaryDirectory
+import git
+import pooch
+
import flopy
from flopy.utils import OptionBlock
@@ -35,13 +39,40 @@
print(f"flopy version: {flopy.__version__}")
# +
-load_ws = os.path.join("..", "..", "examples", "data", "options", "sagehen")
# temporary directory
temp_dir = TemporaryDirectory()
model_ws = os.path.join(temp_dir.name, "nwt_options", "output")
# -
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+file_names = {
+ "sagehen.bas": None,
+ "sagehen.dis": None,
+ "sagehen.lpf": None,
+ "sagehen.nam": None,
+ "sagehen.nwt": None,
+ "sagehen.oc": None,
+ "sagehen.sfr": None,
+ "sagehen.uzf": None,
+ "sagehen.wel": None,
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/options/sagehen/{fname}",
+ fname=fname,
+ path=data_path / "options" / "sagehen",
+ known_hash=None,
+ )
+
# ## Loading a MODFLOW-NWT model that has option block options
#
# It is critical to set the `version` flag in `flopy.modflow.Modflow.load()` to `version='mfnwt'`
@@ -52,7 +83,10 @@
mfexe = "mfnwt"
ml = flopy.modflow.Modflow.load(
- "sagehen.nam", model_ws=load_ws, exe_name=mfexe, version="mfnwt"
+ "sagehen.nam",
+ model_ws=data_path / "options" / "sagehen",
+ exe_name=mfexe,
+ version="mfnwt",
)
ml.change_model_ws(new_pth=model_ws)
ml.write_input()
@@ -103,9 +137,7 @@
# And let's load the new UZF file
-uzf2 = flopy.modflow.ModflowUzf1.load(
- os.path.join(model_ws, uzf_name), ml, check=False
-)
+uzf2 = flopy.modflow.ModflowUzf1.load(os.path.join(model_ws, uzf_name), ml, check=False)
# ### Now we can look at the options object, and check if it's block or line format
#
@@ -121,9 +153,7 @@
uzf2.write_file(os.path.join(model_ws, uzf_name))
ml.remove_package("UZF")
-uzf3 = flopy.modflow.ModflowUzf1.load(
- os.path.join(model_ws, uzf_name), ml, check=False
-)
+uzf3 = flopy.modflow.ModflowUzf1.load(os.path.join(model_ws, uzf_name), ml, check=False)
print("\n")
print(uzf3.options)
print(uzf3.options.block)
@@ -193,10 +223,7 @@
# +
wel3 = flopy.modflow.ModflowWel(
- ml,
- stress_period_data=wel.stress_period_data,
- options=options,
- unitnumber=99,
+ ml, stress_period_data=wel.stress_period_data, options=options, unitnumber=99
)
wel3.write_file(os.path.join(model_ws, wel_name))
diff --git a/.docs/Notebooks/pest_tutorial01.py b/.docs/Notebooks/pest_tutorial01.py
index da39e76560..8b591c5f75 100644
--- a/.docs/Notebooks/pest_tutorial01.py
+++ b/.docs/Notebooks/pest_tutorial01.py
@@ -69,9 +69,7 @@
ubound = 1000.0
transform = "log"
-p = flopy.pest.Params(
- mfpackage, partype, parname, startvalue, lbound, ubound, span
-)
+p = flopy.pest.Params(mfpackage, partype, parname, startvalue, lbound, ubound, span)
# -
# At this point, we have enough information to the write a PEST template file for the LPF package. We can do this using the following statement:
@@ -101,9 +99,7 @@
ubound = 1000.0
transform = "log"
-p = flopy.pest.Params(
- mfpackage, partype, parname, startvalue, lbound, ubound, span
-)
+p = flopy.pest.Params(mfpackage, partype, parname, startvalue, lbound, ubound, span)
tw = flopy.pest.templatewriter.TemplateWriter(m, [p])
tw.write_template()
# -
@@ -193,9 +189,7 @@
# For a recharge multiplier, span['idx'] must be None
idx = None
span = {"kpers": [0, 1, 2], "idx": idx}
-p = flopy.pest.Params(
- mfpackage, partype, parname, startvalue, lbound, ubound, span
-)
+p = flopy.pest.Params(mfpackage, partype, parname, startvalue, lbound, ubound, span)
plist.append(p)
# -
@@ -224,9 +218,7 @@
# For a recharge multiplier, span['idx'] must be None
span = {"kpers": [1, 2], "idx": None}
-p = flopy.pest.Params(
- mfpackage, partype, parname, startvalue, lbound, ubound, span
-)
+p = flopy.pest.Params(mfpackage, partype, parname, startvalue, lbound, ubound, span)
plist.append(p)
# +
@@ -243,9 +235,7 @@
idx = np.empty((nrow, ncol), dtype=bool)
idx[0:3, 0:3] = True
span = {"kpers": [1], "idx": idx}
-p = flopy.pest.Params(
- mfpackage, partype, parname, startvalue, lbound, ubound, span
-)
+p = flopy.pest.Params(mfpackage, partype, parname, startvalue, lbound, ubound, span)
plist.append(p)
# +
diff --git a/.docs/Notebooks/plot_array_example.py b/.docs/Notebooks/plot_array_example.py
index e97435a09f..f3608ce56e 100644
--- a/.docs/Notebooks/plot_array_example.py
+++ b/.docs/Notebooks/plot_array_example.py
@@ -23,10 +23,13 @@
import os
import sys
+from pathlib import Path
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import numpy as np
+import pooch
# +
from IPython.display import Image
@@ -44,12 +47,67 @@
version = "mf2005"
exe_name = "mf2005"
-# Set the paths
-loadpth = os.path.join("..", "..", "examples", "data", "secp")
+
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+file_names = {
+ "secp.ba6": None,
+ "secp.chd": None,
+ "secp.dis": None,
+ "secp.gmg": None,
+ "secp.lpf": None,
+ "secp.mlt": None,
+ "secp.nam": None,
+ "secp.oc": None,
+ "secp.rch": None,
+ "secp.riv": None,
+ "secp.wel": None,
+ "secp.zon": None,
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/secp/{fname}",
+ fname=fname,
+ path=data_path / "secp",
+ known_hash=None,
+ )
+
+file_names = {
+ "HK1.DAT": None,
+ "HK10.DAT": None,
+ "HK11.DAT": None,
+ "HK12.DAT": None,
+ "HK13.DAT": None,
+ "HK14.DAT": None,
+ "HK15.DAT": None,
+ "HK16.DAT": None,
+ "HK2.DAT": None,
+ "HK3.DAT": None,
+ "HK4.DAT": None,
+ "HK5.DAT": None,
+ "HK6.DAT": None,
+ "HK7.DAT": None,
+ "HK8.DAT": None,
+ "HK9.DAT": None,
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/secp/ref/{fname}",
+ fname=fname,
+ path=data_path / "secp" / "ref",
+ known_hash=None,
+ )
# temporary directory
temp_dir = TemporaryDirectory()
-modelpth = temp_dir.name
+modelpth = Path(temp_dir.name)
# make sure modelpth directory exists
if not os.path.isdir(modelpth):
@@ -63,14 +121,13 @@
# +
ml = flopy.modflow.Modflow.load(
- "secp.nam", model_ws=loadpth, exe_name=exe_name, version=version
+ "secp.nam", model_ws=data_path / "secp", exe_name=exe_name, version=version
)
ml.change_model_ws(new_pth=modelpth)
ml.write_input()
success, buff = ml.run_model(silent=True)
-if not success:
- print("Something bad happened.")
+assert success
# confirm that the model files have been created
for f in files:
diff --git a/.docs/Notebooks/plot_cross_section_example.py b/.docs/Notebooks/plot_cross_section_example.py
index d5d2ef381b..68f4c61516 100644
--- a/.docs/Notebooks/plot_cross_section_example.py
+++ b/.docs/Notebooks/plot_cross_section_example.py
@@ -27,12 +27,16 @@
# + pycharm={"name": "#%%\n"}
import os
import sys
+from pathlib import Path
from pprint import pformat
+from shutil import copytree
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import flopy
@@ -49,10 +53,41 @@
vmf6 = "mf6"
exe_name_mf6 = "mf6"
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+sim_name = "freyberg"
+
+file_names = {
+ "freyberg.bas": "63266024019fef07306b8b639c6c67d5e4b22f73e42dcaa9db18b5e0f692c097",
+ "freyberg.dis": "62d0163bf36c7ee9f7ee3683263e08a0abcdedf267beedce6dd181600380b0a2",
+ "freyberg.githds": "abe92497b55e6f6c73306e81399209e1cada34cf794a7867d776cfd18303673b",
+ "freyberg.gitlist": "aef02c664344a288264d5f21e08a748150e43bb721a16b0e3f423e6e3e293056",
+ "freyberg.lpf": "06500bff979424f58e5e4fbd07a7bdeb0c78f31bd08640196044b6ccefa7a1fe",
+ "freyberg.nam": "e66321007bb603ef55ed2ba41f4035ba6891da704a4cbd3967f0c66ef1532c8f",
+ "freyberg.oc": "532905839ccbfce01184980c230b6305812610b537520bf5a4abbcd3bd703ef4",
+ "freyberg.pcg": "0d1686fac4680219fffdb56909296c5031029974171e25d4304e70fa96ebfc38",
+ "freyberg.rch": "37a1e113a7ec16b61417d1fa9710dd111a595de738a367bd34fd4a359c480906",
+ "freyberg.riv": "7492a1d5eb23d6812ec7c8227d0ad4d1e1b35631a765c71182b71e3bd6a6d31d",
+ "freyberg.wel": "00aa55f59797c02f0be5318a523b36b168fc6651f238f34e8b0938c04292d3e7",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=fhash,
+ )
+
+
# Set the paths
-loadpth = os.path.join("..", "..", "examples", "data", "freyberg")
tempdir = TemporaryDirectory()
-modelpth = tempdir.name
+modelpth = Path(tempdir.name)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Load and Run an Existing MODFLOW-2005 Model
@@ -60,7 +95,7 @@
# +
ml = flopy.modflow.Modflow.load(
- "freyberg.nam", model_ws=loadpth, exe_name=exe_name_2005, version=v2005
+ "freyberg.nam", model_ws=data_path / sim_name, exe_name=exe_name_2005, version=v2005
)
ml.change_model_ws(new_pth=str(modelpth))
ml.write_input()
@@ -181,9 +216,7 @@
csa = xsect.plot_array(a)
patches = xsect.plot_ibound()
linecollection = xsect.plot_grid()
-t = ax.set_title(
- "Column 6 Cross-Section with Horizontal hydraulic conductivity"
-)
+t = ax.set_title("Column 6 Cross-Section with Horizontal hydraulic conductivity")
cb = plt.colorbar(csa, shrink=0.75)
# + [markdown] pycharm={"name": "#%% md\n"}
@@ -321,6 +354,60 @@
#
# Let's plot the shapefiles and the Freyberg model using `PlotMapView` for visualization purposes and then plot the cross-section.
+file_names = {
+ "bedrock_outcrop_hole.dbf": "c48510bc0b04405e4d3433e6cd892351c8342a7c46215f48332a7e6292249da6",
+ "bedrock_outcrop_hole.sbn": "48fd1496d84822c9637d7f3065edf4dfa2038406be8fa239cb451b1a3b28127c",
+ "bedrock_outcrop_hole.sbx": "9a36aee5f3a4bcff0a453ab743a7523ea19acb8841e8273bbda34f27d7237ea5",
+ "bedrock_outcrop_hole.shp": "25c241ac90dd47be28f761ba60ba94a511744f5219600e35a80a93f19ec99f97",
+ "bedrock_outcrop_hole.shx": "88b06395fa4c58ea04d300e10e6f6ea81e17fb0baa20d8ac78470d19101430be",
+ "bedrock_outcrop_hole_rotate14.dbf": "e05bbfc826fc069666a05e949acc833b54de51b14267c9c54b1c129b4a8ab82d",
+ "bedrock_outcrop_hole_rotate14.sbn": "136d8f86b8a13abc8f0386108228ca398037cf8c28ba6077086fd7e1fd54abf7",
+ "bedrock_outcrop_hole_rotate14.sbx": "1c2f2f2791db9c752fb1b355f13e46a8740ccd66654ae34d130172a3bdcda805",
+ "bedrock_outcrop_hole_rotate14.shp": "3e722d8fa9331ab498dbf9544085b30f60d2e38cc82a0955792d11a4e6a4419d",
+ "bedrock_outcrop_hole_rotate14.shp.xml": "ff6a3e80d10d9e68863ffe224e8130b862c13c2265d3a604342eb20a700d38fd",
+ "bedrock_outcrop_hole_rotate14.shx": "32a75461fab39b21769c474901254e7cbd24073c53d62b494fd70080cfcd3383",
+ "cross_section.cpg": "3ad3031f5503a4404af825262ee8232cc04d4ea6683d42c5dd0a2f2a27ac9824",
+ "cross_section.dbf": "3b050b1d296a7efe1b4f001c78030d5c81f79d3cd101d459e4426944fbd4e8e7",
+ "cross_section.sbn": "3b6a8f72f78f7b0d12e5823d6e8307040cfd5af88a8fb9427687d027aa805126",
+ "cross_section.sbx": "72e33139aaa99a8d12922af3774bd6b1a73613fc1bc852d1a1d1426ef48a832a",
+ "cross_section.shp": "0eb9e37dcbdbb5d932101c4c5bcb971271feb2c1d81d2a5f8dbc0fbf8d799ee5",
+ "cross_section.shp.xml": "ff99002ecd63a843fe628c107dfb02926b6838132c6f503db38b792644fb368e",
+ "cross_section.shx": "c6fa1307e1c32c535842796b24b2a0a07865065ace3324b0f6b1b71e9c1a8e1e",
+ "cross_section_rotate14.cpg": "3ad3031f5503a4404af825262ee8232cc04d4ea6683d42c5dd0a2f2a27ac9824",
+ "cross_section_rotate14.dbf": "72f8ed25c45a92822fe593862e543ae4167357cbc8fba4f24b889aa2bbf2729a",
+ "cross_section_rotate14.sbn": "3f7a3b66cf58be8c979353d2c75777303035e19ff58d96a089dde5c95fa8b597",
+ "cross_section_rotate14.sbx": "7d40bc92b42fde2af01a2805c9205c18c0fe89ae7cf1ba88ac6627b7c6a69b89",
+ "cross_section_rotate14.shp": "5f0ea7a65b5ddc9a43c874035969e30d58ae578aec9feb6b0e8538b68d5bd0d2",
+ "cross_section_rotate14.shp.xml": "79e38d9542ce764ace47883c673cf1d9aab16cd7851ae62a8e9bf27ce1091e13",
+ "cross_section_rotate14.shx": "b750b9d44ef31e0c593e2f78acfc08813667bb73733e6524f1b417e605cae65d",
+ "model_extent.cpg": "3ad3031f5503a4404af825262ee8232cc04d4ea6683d42c5dd0a2f2a27ac9824",
+ "model_extent.dbf": "72f8ed25c45a92822fe593862e543ae4167357cbc8fba4f24b889aa2bbf2729a",
+ "model_extent.sbn": "622376387ac9686e54acc6c57ace348c217d3a82e626274f32911a1d0006a164",
+ "model_extent.sbx": "2957bc1b5c918e20089fb6f6998d60d4488995d174bac21afa8e3a2af90b3489",
+ "model_extent.shp": "c72d5a4c703100e98c356c7645ad4b0bcc124c55e0757e55c8cd8663c7bf15c6",
+ "model_extent.shx": "e8d3b5618f0c248b59284f4f795f5de8207aec5b15ed60ce8da5a021c1043e2f",
+ "wells_locations.dbf": "965c846ec0b8f0d27570ef0bdaadfbcb6e718ed70ab89c8dda01d3b819e7a7de",
+ "wells_locations.sbn": "63f8ad670c6ba53ddec13069e42cfd86f27b6d47c5d0b3f2c25dfd6fb6b55825",
+ "wells_locations.sbx": "8420907d426c44c38315a5bdc0b24fe07a8cd2cc9a7fc60b817500b8cda79a34",
+ "wells_locations.shp": "ee53a4532b513f5b8bcd37ee3468dc4b2c8f6afab6cfc5110d74362c79e52287",
+ "wells_locations.shx": "6e816e96ed0726c2acc61392d2a82df5e9265ab5f5b00dd12f765b139840be79",
+ "wells_locations_rotate14.dbf": "d9b3636b4312c2f76c837e698bcb0d8ef9f4bbaa1765c484787a9f9d7f8bbaae",
+ "wells_locations_rotate14.sbn": "b436e34b8f145966b18d571b47ebc18e35671ec73fca1abbc737d9e1aa984bfb",
+ "wells_locations_rotate14.sbx": "24911f8905155882ce76b0330c9ba5ed449ca985d46833ebc45eee11faabbdaf",
+ "wells_locations_rotate14.shp": "695894af4678358320fb914e872cadb2613fae2e54c2d159f40c02fa558514cf",
+ "wells_locations_rotate14.shp.xml": "288183eb273c1fc2facb49d51c34bcafb16710189242da48f7717c49412f3e29",
+ "wells_locations_rotate14.shx": "da3374865cbf864f81dd69192ab616d1093d2159ac3c682fe2bfc4c295a28e42",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/gis/{fname}",
+ fname=fname,
+ path=data_path / sim_name / "gis",
+ known_hash=fhash,
+ )
+
+copytree(data_path / sim_name / "gis", modelpth / "gis")
+
# + pycharm={"name": "#%%\n"}
# Setup the figure and PlotMapView. Show a very faint map of ibound and
# model grid by specifying a transparency alpha value.
@@ -335,7 +422,7 @@
mapview = flopy.plot.PlotMapView(model=ml)
# Plot a shapefile of
-shp = os.path.join(loadpth, "gis", "bedrock_outcrop_hole_rotate14")
+shp = os.path.join(modelpth, "gis", "bedrock_outcrop_hole_rotate14")
patch_collection = mapview.plot_shapefile(
shp,
edgecolor="green",
@@ -343,13 +430,13 @@
alpha=0.5, # facecolor='none',
)
# Plot a shapefile of a cross-section line
-shp = os.path.join(loadpth, "gis", "cross_section_rotate14")
+shp = os.path.join(modelpth, "gis", "cross_section_rotate14")
patch_collection = mapview.plot_shapefile(
shp, radius=0, lw=3, edgecolor="red", facecolor="None"
)
# Plot a shapefile of well locations
-shp = os.path.join(loadpth, "gis", "wells_locations_rotate14")
+shp = os.path.join(modelpth, "gis", "wells_locations_rotate14")
patch_collection = mapview.plot_shapefile(shp, radius=100, facecolor="red")
# Plot the grid and boundary conditions over the top
@@ -363,7 +450,7 @@
# + pycharm={"name": "#%%\n"}
# get the vertices for cross-section lines in a shapefile
-fpth = os.path.join(loadpth, "gis", "cross_section_rotate14")
+fpth = os.path.join(modelpth, "gis", "cross_section_rotate14")
line = flopy.plot.plotutil.shapefile_get_vertices(fpth)
# Set up the figure
@@ -387,7 +474,7 @@
# + pycharm={"name": "#%%\n"}
# get the vertices for cross-section lines in a shapefile
-fpth = os.path.join(loadpth, "gis", "cross_section_rotate14")
+fpth = os.path.join(modelpth, "gis", "cross_section_rotate14")
line = flopy.plot.plotutil.shapefile_get_vertices(fpth)
# Set up the figure
@@ -409,12 +496,45 @@
#
# `PlotCrossSection` has support for MODFLOW-6 models and operates in the same fashion for Structured Grids, Vertex Grids, and Unstructured Grids. Here is a short example on how to plot with MODFLOW-6 structured grids using a version of the Freyberg model created for MODFLOW-6|
+sim_name = "mf6-freyberg"
+sim_path = modelpth / "mf6"
+file_names = {
+ "bot.asc": "3107f907cb027460fd40ffc16cb797a78babb31988c7da326c9f500fba855b62",
+ "description.txt": "94093335eec6a24711f86d4d217ccd5a7716dd9e01cb6b732bc7757d41675c09",
+ "freyberg.cbc": "c8ad843b1da753eb58cf6c462ac782faf0ca433d6dcb067742d8bd698db271e3",
+ "freyberg.chd": "d8b8ada8d3978daea1758b315be983b5ca892efc7d69bf6b367ceec31e0dd156",
+ "freyberg.dis": "cac230a207cc8483693f7ba8ae29ce40c049036262eac4cebe17a4e2347a8b30",
+ "freyberg.dis.grb": "c8c26fb1fa4b210208134b286d895397cf4b3131f66e1d9dda76338502c7e96a",
+ "freyberg.hds": "926a06411ca658a89db6b5686f51ddeaf5b74ced81239cab1d43710411ba5f5b",
+ "freyberg.ic": "6efb56ee9cdd704b9a76fb9efd6dae750facc5426b828713f2d2cf8d35194120",
+ "freyberg.ims": "6dddae087d85417e3cdaa13e7b24165afb7f9575ab68586f3adb6c1b2d023781",
+ "freyberg.nam": "cee9b7b000fe35d2df26e878d09d465250a39504f87516c897e3fa14dcda081e",
+ "freyberg.npf": "81104d3546045fff0eddf5059465e560b83b492fa5a5acad1907ce18c2b9c15f",
+ "freyberg.oc": "c0715acd75eabcc42c8c47260a6c1abd6c784350983f7e2e6009ddde518b80b8",
+ "freyberg.rch": "a6ec1e0eda14fd2cdf618a5c0243a9caf82686c69242b783410d5abbcf971954",
+ "freyberg.riv": "a8cafc8c317cbe2acbb43e2f0cfe1188cb2277a7a174aeb6f3e6438013de8088",
+ "freyberg.sto": "74d748c2f0adfa0a32ee3f2912115c8f35b91011995b70c1ec6ae1c627242c41",
+ "freyberg.tdis": "9965cbb17caf5b865ea41a4ec04bcb695fe15a38cb539425fdc00abbae385cbe",
+ "freyberg.wel": "f19847de455598de52c05a4be745698c8cb589e5acfb0db6ab1f06ded5ff9310",
+ "k11.asc": "b6a8aa46ef17f7f096d338758ef46e32495eb9895b25d687540d676744f02af5",
+ "mfsim.nam": "6b8d6d7a56c52fb2bff884b3979e3d2201c8348b4bbfd2b6b9752863cbc9975e",
+ "top.asc": "3ad2b131671b9faca7f74c1dd2b2f41875ab0c15027764021a89f9c95dccaa6a",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=fhash,
+ )
+
# +
# load the Freyberg model into mf6-flopy and run the simulation
-sim_name = "mfsim.nam"
-sim_path = os.path.join("..", "..", "examples", "data", "mf6-freyberg")
sim = flopy.mf6.MFSimulation.load(
- sim_name=sim_name, version=vmf6, exe_name=exe_name_mf6, sim_ws=sim_path
+ sim_name="mfsim.nam",
+ version=vmf6,
+ exe_name=exe_name_mf6,
+ sim_ws=data_path / sim_name,
)
sim.set_sim_path(modelpth)
@@ -459,9 +579,7 @@
csa = xsect.plot_array(a)
patches = xsect.plot_ibound()
linecollection = xsect.plot_grid()
-t = ax.set_title(
- "Column 6 Cross-Section with Horizontal hydraulic conductivity"
-)
+t = ax.set_title("Column 6 Cross-Section with Horizontal hydraulic conductivity")
cb = plt.colorbar(csa, shrink=0.75)
# + [markdown] pycharm={"name": "#%% md\n"}
@@ -479,9 +597,7 @@
cbc_file = os.path.join(modelpth, "freyberg.cbc")
cbc = flopy.utils.CellBudgetFile(cbc_file, precision="double")
spdis = cbc.get_data(text="SPDIS")[-1]
-qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge(
- spdis, ml6, head=head
-)
+qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge(spdis, ml6, head=head)
fig = plt.figure(figsize=(18, 5))
ax = fig.add_subplot(1, 1, 1)
@@ -555,17 +671,7 @@ def run_vertex_grid_example(ws):
xmax = 12 * delr
ymin = 8 * delc
ymax = 13 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 1, range(nlay))
rf1shp = os.path.join(gridgen_ws, "rf1")
@@ -573,17 +679,7 @@ def run_vertex_grid_example(ws):
xmax = 11 * delr
ymin = 9 * delc
ymax = 12 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 2, range(nlay))
rf2shp = os.path.join(gridgen_ws, "rf2")
@@ -591,17 +687,7 @@ def run_vertex_grid_example(ws):
xmax = 10 * delr
ymin = 10 * delc
ymax = 11 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 3, range(nlay))
g.build(verbose=False)
@@ -682,10 +768,7 @@ def run_vertex_grid_example(ws):
# welspd = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=1, aux_vars=['iface'])
welspd = [[(2, icpl), -150000, 0] for icpl in welcells["nodenumber"]]
wel = flopy.mf6.ModflowGwfwel(
- gwf,
- print_input=True,
- auxiliary=[("iface",)],
- stress_period_data=welspd,
+ gwf, print_input=True, auxiliary=[("iface",)], stress_period_data=welspd
)
# rch
@@ -696,9 +779,7 @@ def run_vertex_grid_example(ws):
# riv
riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]]
rivcells = g.intersect(riverline, "line", 0)
- rivspd = [
- [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"]
- ]
+ rivspd = [[(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"]]
riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd)
# output control
@@ -1130,10 +1211,7 @@ def build_mf6gwf(sim_folder):
pname="CHD-1",
)
flopy.mf6.ModflowGwfrch(
- gwf,
- stress_period_data=rchspd,
- auxiliary=["concentration"],
- pname="RCH-1",
+ gwf, stress_period_data=rchspd, auxiliary=["concentration"], pname="RCH-1"
)
head_filerecord = f"{name}.hds"
@@ -1198,9 +1276,7 @@ def build_mf6gwt(sim_folder):
("GWFHEAD", "../mf6gwf/flow.hds"),
("GWFBUDGET", "../mf6gwf/flow.bud"),
]
- flopy.mf6.ModflowGwtfmi(
- gwt, flow_imbalance_correction=True, packagedata=pd
- )
+ flopy.mf6.ModflowGwtfmi(gwt, flow_imbalance_correction=True, packagedata=pd)
sourcerecarray = [
("RCH-1", "AUX", "CONCENTRATION"),
]
@@ -1227,10 +1303,7 @@ def build_mf6gwt(sim_folder):
saverecord=saverecord,
printrecord=[
("CONCENTRATION", "LAST"),
- (
- "BUDGET",
- "ALL",
- ),
+ ("BUDGET", "ALL"),
],
)
obs_data = {
@@ -1239,9 +1312,7 @@ def build_mf6gwt(sim_folder):
("obs2", "CONCENTRATION", obs2),
],
}
- flopy.mf6.ModflowUtlobs(
- gwt, digits=10, print_input=True, continuous=obs_data
- )
+ flopy.mf6.ModflowUtlobs(gwt, digits=10, print_input=True, continuous=obs_data)
return sim
@@ -1322,9 +1393,7 @@ def run_keating_model(ws=example_name, silent=True):
# set labels using styles
styles.xlabel(label="x-position (m)")
styles.ylabel(label="elevation (m)")
- styles.heading(
- letter="A.", heading="Simulated hydraulic head", fontsize=10
- )
+ styles.heading(letter="A.", heading="Simulated hydraulic head", fontsize=10)
ax.set_aspect(1.0)
# + [markdown] pycharm={"name": "#%% md\n"}
diff --git a/.docs/Notebooks/plot_map_view_example.py b/.docs/Notebooks/plot_map_view_example.py
index bb79a16e99..ef3a3e8dc6 100644
--- a/.docs/Notebooks/plot_map_view_example.py
+++ b/.docs/Notebooks/plot_map_view_example.py
@@ -28,12 +28,16 @@
# +
import os
import sys
+from pathlib import Path
from pprint import pformat
+from shutil import copytree
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import shapefile
import flopy
@@ -51,21 +55,51 @@
vmf6 = "mf6"
exe_name_mf6 = "mf6"
exe_mp = "mp6"
+sim_name = "freyberg"
# Set the paths
-loadpth = os.path.join("..", "..", "examples", "data", "freyberg")
tempdir = TemporaryDirectory()
-modelpth = tempdir.name
+modelpth = Path(tempdir.name)
+
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Load and Run an Existing MODFLOW-2005 Model
-# A model called the "Freyberg Model" is located in the loadpth folder. In the following code block, we load that model, then change into a new workspace (modelpth) where we recreate and run the model. For this to work properly, the MODFLOW-2005 executable (mf2005) must be in the path. We verify that it worked correctly by checking for the presence of freyberg.hds and freyberg.cbc.
+# A model called the "Freyberg Model" is located in the modelpth folder. In the following code block, we load that model, then change into a new workspace (modelpth) where we recreate and run the model. For this to work properly, the MODFLOW-2005 executable (mf2005) must be in the path. We verify that it worked correctly by checking for the presence of freyberg.hds and freyberg.cbc.
+
+file_names = {
+ "freyberg.bas": "63266024019fef07306b8b639c6c67d5e4b22f73e42dcaa9db18b5e0f692c097",
+ "freyberg.dis": "62d0163bf36c7ee9f7ee3683263e08a0abcdedf267beedce6dd181600380b0a2",
+ "freyberg.githds": "abe92497b55e6f6c73306e81399209e1cada34cf794a7867d776cfd18303673b",
+ "freyberg.gitlist": "aef02c664344a288264d5f21e08a748150e43bb721a16b0e3f423e6e3e293056",
+ "freyberg.lpf": "06500bff979424f58e5e4fbd07a7bdeb0c78f31bd08640196044b6ccefa7a1fe",
+ "freyberg.nam": "e66321007bb603ef55ed2ba41f4035ba6891da704a4cbd3967f0c66ef1532c8f",
+ "freyberg.oc": "532905839ccbfce01184980c230b6305812610b537520bf5a4abbcd3bd703ef4",
+ "freyberg.pcg": "0d1686fac4680219fffdb56909296c5031029974171e25d4304e70fa96ebfc38",
+ "freyberg.rch": "37a1e113a7ec16b61417d1fa9710dd111a595de738a367bd34fd4a359c480906",
+ "freyberg.riv": "7492a1d5eb23d6812ec7c8227d0ad4d1e1b35631a765c71182b71e3bd6a6d31d",
+ "freyberg.wel": "00aa55f59797c02f0be5318a523b36b168fc6651f238f34e8b0938c04292d3e7",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=fhash,
+ )
# +
ml = flopy.modflow.Modflow.load(
- "freyberg.nam", model_ws=loadpth, exe_name=exe_name_2005, version=v2005
+ "freyberg.nam", model_ws=data_path / sim_name, exe_name=exe_name_2005, version=v2005
)
-ml.change_model_ws(new_pth=modelpth)
+ml.change_model_ws(modelpth)
ml.write_input()
success, buff = ml.run_model(silent=True, report=True)
assert success, pformat(buff)
@@ -378,9 +412,7 @@
mapview = flopy.plot.PlotMapView(model=ml)
quadmesh = mapview.plot_ibound()
quadmesh = mapview.plot_array(head, alpha=0.5)
-quiver = mapview.plot_vector(
- sqx, sqy
-) # include the head array for specific discharge
+quiver = mapview.plot_vector(sqx, sqy) # include the head array for specific discharge
linecollection = mapview.plot_grid()
# + [markdown] pycharm={"name": "#%% md\n"}
@@ -434,6 +466,60 @@
#
# The shapefile must have intersecting geographic coordinates as the `PlotMapView` object in order for it to overlay correctly on the plot. The `plot_shapefile()` method and function do not use any of the projection information that may be stored with the shapefile. If you reset `xoff`, `yoff`, and `angrot` in the `ml.modelgrid.set_coord_info()` call below, you will see that the grid will no longer overlay correctly with the shapefile.
+file_names = {
+ "bedrock_outcrop_hole.dbf": "c48510bc0b04405e4d3433e6cd892351c8342a7c46215f48332a7e6292249da6",
+ "bedrock_outcrop_hole.sbn": "48fd1496d84822c9637d7f3065edf4dfa2038406be8fa239cb451b1a3b28127c",
+ "bedrock_outcrop_hole.sbx": "9a36aee5f3a4bcff0a453ab743a7523ea19acb8841e8273bbda34f27d7237ea5",
+ "bedrock_outcrop_hole.shp": "25c241ac90dd47be28f761ba60ba94a511744f5219600e35a80a93f19ec99f97",
+ "bedrock_outcrop_hole.shx": "88b06395fa4c58ea04d300e10e6f6ea81e17fb0baa20d8ac78470d19101430be",
+ "bedrock_outcrop_hole_rotate14.dbf": "e05bbfc826fc069666a05e949acc833b54de51b14267c9c54b1c129b4a8ab82d",
+ "bedrock_outcrop_hole_rotate14.sbn": "136d8f86b8a13abc8f0386108228ca398037cf8c28ba6077086fd7e1fd54abf7",
+ "bedrock_outcrop_hole_rotate14.sbx": "1c2f2f2791db9c752fb1b355f13e46a8740ccd66654ae34d130172a3bdcda805",
+ "bedrock_outcrop_hole_rotate14.shp": "3e722d8fa9331ab498dbf9544085b30f60d2e38cc82a0955792d11a4e6a4419d",
+ "bedrock_outcrop_hole_rotate14.shp.xml": "ff6a3e80d10d9e68863ffe224e8130b862c13c2265d3a604342eb20a700d38fd",
+ "bedrock_outcrop_hole_rotate14.shx": "32a75461fab39b21769c474901254e7cbd24073c53d62b494fd70080cfcd3383",
+ "cross_section.cpg": "3ad3031f5503a4404af825262ee8232cc04d4ea6683d42c5dd0a2f2a27ac9824",
+ "cross_section.dbf": "3b050b1d296a7efe1b4f001c78030d5c81f79d3cd101d459e4426944fbd4e8e7",
+ "cross_section.sbn": "3b6a8f72f78f7b0d12e5823d6e8307040cfd5af88a8fb9427687d027aa805126",
+ "cross_section.sbx": "72e33139aaa99a8d12922af3774bd6b1a73613fc1bc852d1a1d1426ef48a832a",
+ "cross_section.shp": "0eb9e37dcbdbb5d932101c4c5bcb971271feb2c1d81d2a5f8dbc0fbf8d799ee5",
+ "cross_section.shp.xml": "ff99002ecd63a843fe628c107dfb02926b6838132c6f503db38b792644fb368e",
+ "cross_section.shx": "c6fa1307e1c32c535842796b24b2a0a07865065ace3324b0f6b1b71e9c1a8e1e",
+ "cross_section_rotate14.cpg": "3ad3031f5503a4404af825262ee8232cc04d4ea6683d42c5dd0a2f2a27ac9824",
+ "cross_section_rotate14.dbf": "72f8ed25c45a92822fe593862e543ae4167357cbc8fba4f24b889aa2bbf2729a",
+ "cross_section_rotate14.sbn": "3f7a3b66cf58be8c979353d2c75777303035e19ff58d96a089dde5c95fa8b597",
+ "cross_section_rotate14.sbx": "7d40bc92b42fde2af01a2805c9205c18c0fe89ae7cf1ba88ac6627b7c6a69b89",
+ "cross_section_rotate14.shp": "5f0ea7a65b5ddc9a43c874035969e30d58ae578aec9feb6b0e8538b68d5bd0d2",
+ "cross_section_rotate14.shp.xml": "79e38d9542ce764ace47883c673cf1d9aab16cd7851ae62a8e9bf27ce1091e13",
+ "cross_section_rotate14.shx": "b750b9d44ef31e0c593e2f78acfc08813667bb73733e6524f1b417e605cae65d",
+ "model_extent.cpg": "3ad3031f5503a4404af825262ee8232cc04d4ea6683d42c5dd0a2f2a27ac9824",
+ "model_extent.dbf": "72f8ed25c45a92822fe593862e543ae4167357cbc8fba4f24b889aa2bbf2729a",
+ "model_extent.sbn": "622376387ac9686e54acc6c57ace348c217d3a82e626274f32911a1d0006a164",
+ "model_extent.sbx": "2957bc1b5c918e20089fb6f6998d60d4488995d174bac21afa8e3a2af90b3489",
+ "model_extent.shp": "c72d5a4c703100e98c356c7645ad4b0bcc124c55e0757e55c8cd8663c7bf15c6",
+ "model_extent.shx": "e8d3b5618f0c248b59284f4f795f5de8207aec5b15ed60ce8da5a021c1043e2f",
+ "wells_locations.dbf": "965c846ec0b8f0d27570ef0bdaadfbcb6e718ed70ab89c8dda01d3b819e7a7de",
+ "wells_locations.sbn": "63f8ad670c6ba53ddec13069e42cfd86f27b6d47c5d0b3f2c25dfd6fb6b55825",
+ "wells_locations.sbx": "8420907d426c44c38315a5bdc0b24fe07a8cd2cc9a7fc60b817500b8cda79a34",
+ "wells_locations.shp": "ee53a4532b513f5b8bcd37ee3468dc4b2c8f6afab6cfc5110d74362c79e52287",
+ "wells_locations.shx": "6e816e96ed0726c2acc61392d2a82df5e9265ab5f5b00dd12f765b139840be79",
+ "wells_locations_rotate14.dbf": "d9b3636b4312c2f76c837e698bcb0d8ef9f4bbaa1765c484787a9f9d7f8bbaae",
+ "wells_locations_rotate14.sbn": "b436e34b8f145966b18d571b47ebc18e35671ec73fca1abbc737d9e1aa984bfb",
+ "wells_locations_rotate14.sbx": "24911f8905155882ce76b0330c9ba5ed449ca985d46833ebc45eee11faabbdaf",
+ "wells_locations_rotate14.shp": "695894af4678358320fb914e872cadb2613fae2e54c2d159f40c02fa558514cf",
+ "wells_locations_rotate14.shp.xml": "288183eb273c1fc2facb49d51c34bcafb16710189242da48f7717c49412f3e29",
+ "wells_locations_rotate14.shx": "da3374865cbf864f81dd69192ab616d1093d2159ac3c682fe2bfc4c295a28e42",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/gis/{fname}",
+ fname=fname,
+ path=data_path / sim_name / "gis",
+ known_hash=fhash,
+ )
+
+copytree(data_path / sim_name / "gis", modelpth / "gis")
+
# + pycharm={"name": "#%%\n"}
# Setup the figure and PlotMapView. Show a very faint map of ibound and
# model grid by specifying a transparency alpha value.
@@ -446,7 +532,7 @@
mapview = flopy.plot.PlotMapView(model=ml, ax=ax)
# Plot a shapefile of
-shp = os.path.join(loadpth, "gis", "bedrock_outcrop_hole")
+shp = os.path.join(modelpth, "gis", "bedrock_outcrop_hole")
patch_collection = mapview.plot_shapefile(
shp,
edgecolor="green",
@@ -454,13 +540,13 @@
alpha=0.5, # facecolor='none',
)
# Plot a shapefile of a cross-section line
-shp = os.path.join(loadpth, "gis", "cross_section")
+shp = os.path.join(modelpth, "gis", "cross_section")
patch_collection = mapview.plot_shapefile(
shp, radius=0, lw=[3, 1.5], edgecolor=["red", "green"], facecolor="None"
)
# Plot a shapefile of well locations
-shp = os.path.join(loadpth, "gis", "wells_locations")
+shp = os.path.join(modelpth, "gis", "wells_locations")
patch_collection = mapview.plot_shapefile(shp, radius=100, facecolor="red")
# Plot the grid and boundary conditions over the top
@@ -485,7 +571,7 @@
mapview = flopy.plot.PlotMapView(model=ml)
# Plot a shapefile of
-shp = os.path.join(loadpth, "gis", "bedrock_outcrop_hole_rotate14")
+shp = os.path.join(modelpth, "gis", "bedrock_outcrop_hole_rotate14")
patch_collection = mapview.plot_shapefile(
shp,
edgecolor="green",
@@ -493,11 +579,11 @@
alpha=0.5, # facecolor='none',
)
# Plot a shapefile of a cross-section line
-shp = os.path.join(loadpth, "gis", "cross_section_rotate14")
+shp = os.path.join(modelpth, "gis", "cross_section_rotate14")
patch_collection = mapview.plot_shapefile(shp, lw=3, edgecolor="red")
# Plot a shapefile of well locations
-shp = os.path.join(loadpth, "gis", "wells_locations_rotate14")
+shp = os.path.join(modelpth, "gis", "wells_locations_rotate14")
patch_collection = mapview.plot_shapefile(shp, radius=100, facecolor="red")
# Plot the grid and boundary conditions over the top
@@ -529,18 +615,16 @@
# + pycharm={"name": "#%%\n"}
# lets extract some shapes from our shapefiles
-shp = os.path.join(loadpth, "gis", "bedrock_outcrop_hole_rotate14")
+shp = os.path.join(modelpth, "gis", "bedrock_outcrop_hole_rotate14")
with shapefile.Reader(shp) as r:
- polygon_w_hole = [
- r.shape(0),
- ]
+ polygon_w_hole = [r.shape(0)]
-shp = os.path.join(loadpth, "gis", "cross_section_rotate14")
+shp = os.path.join(modelpth, "gis", "cross_section_rotate14")
with shapefile.Reader(shp) as r:
cross_section = r.shapes()
# Plot a shapefile of well locations
-shp = os.path.join(loadpth, "gis", "wells_locations_rotate14")
+shp = os.path.join(modelpth, "gis", "wells_locations_rotate14")
with shapefile.Reader(shp) as r:
wells = r.shapes()
@@ -570,9 +654,7 @@
patch_collection1 = mapview.plot_shapes(cross_section, lw=3, edgecolor="red")
# plot_point(s)
-patch_collection3 = mapview.plot_shapes(
- wells, radius=100, facecolor="k", edgecolor="k"
-)
+patch_collection3 = mapview.plot_shapes(wells, radius=100, facecolor="k", edgecolor="k")
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Working with MODFLOW-6 models
@@ -581,14 +663,46 @@
# + pycharm={"name": "#%%\n"}
# load the Freyberg model into mf6-flopy and run the simulation
-sim_name = "mfsim.nam"
-sim_path = os.path.join("..", "..", "examples", "data", "mf6-freyberg")
+
+sim_name = "mf6-freyberg"
+sim_path = modelpth / "mf6"
+file_names = {
+ "bot.asc": "3107f907cb027460fd40ffc16cb797a78babb31988c7da326c9f500fba855b62",
+ "description.txt": "94093335eec6a24711f86d4d217ccd5a7716dd9e01cb6b732bc7757d41675c09",
+ "freyberg.cbc": "c8ad843b1da753eb58cf6c462ac782faf0ca433d6dcb067742d8bd698db271e3",
+ "freyberg.chd": "d8b8ada8d3978daea1758b315be983b5ca892efc7d69bf6b367ceec31e0dd156",
+ "freyberg.dis": "cac230a207cc8483693f7ba8ae29ce40c049036262eac4cebe17a4e2347a8b30",
+ "freyberg.dis.grb": "c8c26fb1fa4b210208134b286d895397cf4b3131f66e1d9dda76338502c7e96a",
+ "freyberg.hds": "926a06411ca658a89db6b5686f51ddeaf5b74ced81239cab1d43710411ba5f5b",
+ "freyberg.ic": "6efb56ee9cdd704b9a76fb9efd6dae750facc5426b828713f2d2cf8d35194120",
+ "freyberg.ims": "6dddae087d85417e3cdaa13e7b24165afb7f9575ab68586f3adb6c1b2d023781",
+ "freyberg.nam": "cee9b7b000fe35d2df26e878d09d465250a39504f87516c897e3fa14dcda081e",
+ "freyberg.npf": "81104d3546045fff0eddf5059465e560b83b492fa5a5acad1907ce18c2b9c15f",
+ "freyberg.oc": "c0715acd75eabcc42c8c47260a6c1abd6c784350983f7e2e6009ddde518b80b8",
+ "freyberg.rch": "a6ec1e0eda14fd2cdf618a5c0243a9caf82686c69242b783410d5abbcf971954",
+ "freyberg.riv": "a8cafc8c317cbe2acbb43e2f0cfe1188cb2277a7a174aeb6f3e6438013de8088",
+ "freyberg.sto": "74d748c2f0adfa0a32ee3f2912115c8f35b91011995b70c1ec6ae1c627242c41",
+ "freyberg.tdis": "9965cbb17caf5b865ea41a4ec04bcb695fe15a38cb539425fdc00abbae385cbe",
+ "freyberg.wel": "f19847de455598de52c05a4be745698c8cb589e5acfb0db6ab1f06ded5ff9310",
+ "k11.asc": "b6a8aa46ef17f7f096d338758ef46e32495eb9895b25d687540d676744f02af5",
+ "mfsim.nam": "6b8d6d7a56c52fb2bff884b3979e3d2201c8348b4bbfd2b6b9752863cbc9975e",
+ "top.asc": "3ad2b131671b9faca7f74c1dd2b2f41875ab0c15027764021a89f9c95dccaa6a",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=fhash,
+ )
+
sim = flopy.mf6.MFSimulation.load(
- sim_name=sim_name, version=vmf6, exe_name=exe_name_mf6, sim_ws=sim_path
+ sim_name="mfsim.nam",
+ version=vmf6,
+ exe_name=exe_name_mf6,
+ sim_ws=data_path / sim_name,
)
-
-newpth = os.path.join(modelpth)
-sim.set_sim_path(newpth)
+sim.set_sim_path(sim_path)
sim.write_simulation()
success, buff = sim.run_simulation()
if not success:
@@ -668,14 +782,14 @@
# + pycharm={"name": "#%%\n"}
# get the specific discharge from the cell budget file
-cbc_file = os.path.join(newpth, "freyberg.cbc")
+cbc_file = os.path.join(sim_path, "freyberg.cbc")
cbc = flopy.utils.CellBudgetFile(cbc_file)
spdis = cbc.get_data(text="SPDIS")[0]
qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge(spdis, ml6)
# get the head from the head file
-head_file = os.path.join(newpth, "freyberg.hds")
+head_file = os.path.join(sim_path, "freyberg.hds")
head = flopy.utils.HeadFile(head_file)
hdata = head.get_alldata()[0]
@@ -739,17 +853,7 @@ def run_vertex_grid_example(ws):
xmax = 12 * delr
ymin = 8 * delc
ymax = 13 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 1, range(nlay))
rf1shp = os.path.join(gridgen_ws, "rf1")
@@ -757,17 +861,7 @@ def run_vertex_grid_example(ws):
xmax = 11 * delr
ymin = 9 * delc
ymax = 12 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 2, range(nlay))
rf2shp = os.path.join(gridgen_ws, "rf2")
@@ -775,17 +869,7 @@ def run_vertex_grid_example(ws):
xmax = 10 * delr
ymin = 10 * delc
ymax = 11 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 3, range(nlay))
g.build(verbose=False)
@@ -880,9 +964,7 @@ def run_vertex_grid_example(ws):
# riv
riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]]
rivcells = g.intersect(riverline, "line", 0)
- rivspd = [
- [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"]
- ]
+ rivspd = [[(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"]]
riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd)
# output control
@@ -1023,10 +1105,10 @@ def run_vertex_grid_example(ws):
run_vertex_grid_example(modelpth)
# check if model ran properly
-modelpth = os.path.join(modelpth, "mp7_ex2", "mf6")
+mp7modelpth = os.path.join(modelpth, "mp7_ex2", "mf6")
files = ["mp7p2.hds", "mp7p2.cbb"]
for f in files:
- if os.path.isfile(os.path.join(modelpth, f)):
+ if os.path.isfile(os.path.join(mp7modelpth, f)):
msg = f"Output file located: {f}"
print(msg)
else:
@@ -1040,7 +1122,7 @@ def run_vertex_grid_example(ws):
sim_name=vertex_sim_name,
version=vmf6,
exe_name=exe_name_mf6,
- sim_ws=modelpth,
+ sim_ws=mp7modelpth,
)
vertex_ml6 = vertex_sim.get_model("mp7p2")
@@ -1090,7 +1172,7 @@ def run_vertex_grid_example(ws):
# + pycharm={"name": "#%%\n"}
# get the head output for stress period 1 from the modflow6 head file
-head = flopy.utils.HeadFile(os.path.join(modelpth, "mp7p2.hds"))
+head = flopy.utils.HeadFile(os.path.join(mp7modelpth, "mp7p2.hds"))
hdata = head.get_alldata()[0, :, :, :]
fig = plt.figure(figsize=(12, 12))
@@ -1131,11 +1213,11 @@ def run_vertex_grid_example(ws):
# + pycharm={"name": "#%%\n"}
# load the MODPATH-7 results
mp_namea = "mp7p2a_mp"
-fpth = os.path.join(modelpth, f"{mp_namea}.mppth")
+fpth = os.path.join(mp7modelpth, f"{mp_namea}.mppth")
p = flopy.utils.PathlineFile(fpth)
p0 = p.get_alldata()
-fpth = os.path.join(modelpth, f"{mp_namea}.timeseries")
+fpth = os.path.join(mp7modelpth, f"{mp_namea}.timeseries")
ts = flopy.utils.TimeseriesFile(fpth)
ts0 = ts.get_alldata()
@@ -1158,9 +1240,7 @@ def run_vertex_grid_example(ws):
pline = mapview.plot_pathline(p0, layer="all", color="blue", lw=0.75)
colors = ["green", "orange", "red"]
for k in range(3):
- tseries = mapview.plot_timeseries(
- ts0, layer=k, marker="o", lw=0, color=colors[k]
- )
+ tseries = mapview.plot_timeseries(ts0, layer=k, marker="o", lw=0, color=colors[k])
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Plotting specific discharge vectors for DISV
@@ -1168,12 +1248,10 @@ def run_vertex_grid_example(ws):
# + pycharm={"name": "#%%\n"}
cbb = flopy.utils.CellBudgetFile(
- os.path.join(modelpth, "mp7p2.cbb"), precision="double"
+ os.path.join(mp7modelpth, "mp7p2.cbb"), precision="double"
)
spdis = cbb.get_data(text="SPDIS")[0]
-qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge(
- spdis, vertex_ml6
-)
+qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge(spdis, vertex_ml6)
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(1, 1, 1, aspect="equal")
@@ -1196,15 +1274,29 @@ def run_vertex_grid_example(ws):
# set up the notebook for unstructured grid plotting
from flopy.discretization import UnstructuredGrid
-# this is a folder containing some unstructured grids
-datapth = os.path.join("..", "..", "examples", "data", "unstructured")
+datapth = modelpth / "unstructured"
+file_names = {
+ "TriMesh_local.exp": "0be6a1a1743972ba98c9d9e63ac2e457813c0809bfbda120e09a97b04411a65e",
+ "TriMesh_usg.exp": "0b450f2b306253a7b2889796e7a4eea52159f509c7b28a1f65929008dd854e08",
+ "Trimesh_circle.exp": "1efb86bb77060dcec20e752e242076e3bd23046f5e47d20d948bcf4623b3deb7",
+ "headu.githds": "cbe94655d471470d931923f70c7548b161ea4c5a22333b7fab6e2255450cda89",
+ "ugrid_iverts.dat": "7e33ec7f7d1fdbeb6cb7bc8dbcdf35f262c82aaa38dc79b4fb3fe7b53f7c7c1b",
+ "ugrid_verts.dat": "59493b26c8969789bb5a06d999db7a2dac324bffee280925e123007c81e689c7",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/unstructured/{fname}",
+ fname=fname,
+ path=data_path / "unstructured",
+ known_hash=fhash,
+ )
+
+copytree(data_path / "unstructured", datapth, dirs_exist_ok=True)
# simple functions to load vertices and incidence lists
def load_verts(fname):
- verts = np.genfromtxt(
- fname, dtype=[int, float, float], names=["iv", "x", "y"]
- )
+ verts = np.genfromtxt(fname, dtype=[int, float, float], names=["iv", "x", "y"])
verts["iv"] -= 1 # zero based
return verts
@@ -1303,14 +1395,14 @@ def load_iverts(fname):
# + pycharm={"name": "#%%\n"}
# get the specific discharge from the cell budget file
-cbc_file = os.path.join(newpth, "freyberg.cbc")
+cbc_file = os.path.join(sim_path, "freyberg.cbc")
cbc = flopy.utils.CellBudgetFile(cbc_file)
spdis = cbc.get_data(text="SPDIS")[0]
qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge(spdis, ml6)
# get the head from the head file
-head_file = os.path.join(newpth, "freyberg.hds")
+head_file = os.path.join(sim_path, "freyberg.hds")
head = flopy.utils.HeadFile(head_file)
hdata = head.get_alldata()[0]
@@ -1326,9 +1418,7 @@ def load_iverts(fname):
plt.colorbar(quadmesh, shrink=0.75)
# use styles to add a heading, xlabel, ylabel
- styles.heading(
- letter="A.", heading="Specific Discharge (" + r"$L/T$" + ")"
- )
+ styles.heading(letter="A.", heading="Specific Discharge (" + r"$L/T$" + ")")
styles.xlabel(label="Easting")
styles.ylabel(label="Northing")
diff --git a/.docs/Notebooks/raster_intersection_example.py b/.docs/Notebooks/raster_intersection_example.py
index 447581faad..3ea9a71cbc 100644
--- a/.docs/Notebooks/raster_intersection_example.py
+++ b/.docs/Notebooks/raster_intersection_example.py
@@ -29,12 +29,15 @@
import os
import sys
import time
+from pathlib import Path
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
+import pooch
import shapefile
import shapely
@@ -53,12 +56,29 @@
temp_dir = TemporaryDirectory()
workspace = temp_dir.name
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
# ### Raster files can be loaded using the `Raster.load` method
# +
-raster_ws = os.path.join("..", "..", "examples", "data", "options", "dem")
+raster_ws = data_path / "options" / "dem"
raster_name = "dem.img"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/options/dem/{raster_name}",
+ fname=raster_name,
+ path=raster_ws,
+ known_hash=None,
+)
+
+
rio = Raster.load(os.path.join(raster_ws, raster_name))
# -
@@ -91,9 +111,27 @@
# The structured grid example uses the DIS file from the GSFLOW Sagehen example problem to create a modelgrid
# +
-model_ws = os.path.join("..", "..", "examples", "data", "options", "sagehen")
+file_names = {
+ "sagehen.bas": None,
+ "sagehen.dis": None,
+ "sagehen.lpf": None,
+ "sagehen.nam": None,
+ "sagehen.nwt": None,
+ "sagehen.oc": None,
+ "sagehen.sfr": None,
+ "sagehen.uzf": None,
+ "sagehen.wel": None,
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/options/sagehen/{fname}",
+ fname=fname,
+ path=data_path / "options" / "sagehen",
+ known_hash=None,
+ )
+
ml = flopy.modflow.Modflow.load(
- "sagehen.nam", version="mfnwt", model_ws=model_ws
+ "sagehen.nam", version="mfnwt", model_ws=data_path / "options" / "sagehen"
)
xoff = 214110
@@ -132,9 +170,7 @@
# + `"mean"`, `"median"`, `"min"`, `"max"`, and `"mode"` are a function of the number of grid cells.
t0 = time.time()
-dem_data = rio.resample_to_grid(
- ml.modelgrid, band=rio.bands[0], method="nearest"
-)
+dem_data = rio.resample_to_grid(ml.modelgrid, band=rio.bands[0], method="nearest")
resample_time = time.time() - t0
# +
@@ -143,9 +179,7 @@
ax = fig.add_subplot(1, 1, 1, aspect="equal")
pmv = flopy.plot.PlotMapView(modelgrid=ml.modelgrid, ax=ax)
-ax = pmv.plot_array(
- dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax
-)
+ax = pmv.plot_array(dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax)
plt.title(f"Resample time, nearest neighbor: {resample_time:.3f} sec")
plt.colorbar(ax, shrink=0.7)
# -
@@ -162,9 +196,7 @@
ax = fig.add_subplot(1, 1, 1, aspect="equal")
pmv = flopy.plot.PlotMapView(modelgrid=ml.modelgrid, ax=ax)
-ax = pmv.plot_array(
- dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax
-)
+ax = pmv.plot_array(dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax)
plt.title(f"Resample time, bi-linear: {resample_time:.3f} sec")
plt.colorbar(ax, shrink=0.7)
# -
@@ -181,19 +213,14 @@
ax = fig.add_subplot(1, 1, 1, aspect="equal")
pmv = flopy.plot.PlotMapView(modelgrid=ml.modelgrid, ax=ax)
-ax = pmv.plot_array(
- dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax
-)
+ax = pmv.plot_array(dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax)
plt.title(f"Resample time, bi-cubic: {resample_time:.3f} sec")
plt.colorbar(ax, shrink=0.7)
# -
t0 = time.time()
dem_data = rio.resample_to_grid(
- ml.modelgrid,
- band=rio.bands[0],
- method="median",
- extrapolate_edges=True,
+ ml.modelgrid, band=rio.bands[0], method="median", extrapolate_edges=True
)
resample_time = time.time() - t0
@@ -203,9 +230,7 @@
ax = fig.add_subplot(1, 1, 1, aspect="equal")
pmv = flopy.plot.PlotMapView(modelgrid=ml.modelgrid, ax=ax)
-ax = pmv.plot_array(
- dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax
-)
+ax = pmv.plot_array(dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax)
plt.title(f"Resample time, median: {resample_time:.3f} sec")
plt.colorbar(ax, shrink=0.7)
# -
@@ -255,9 +280,7 @@
# +
t0 = time.time()
-dem_data = rio.resample_to_grid(
- mg_unstruct, band=rio.bands[0], method="nearest"
-)
+dem_data = rio.resample_to_grid(mg_unstruct, band=rio.bands[0], method="nearest")
resample_time = time.time() - t0
@@ -268,20 +291,14 @@
pmv = flopy.plot.PlotMapView(modelgrid=mg_unstruct, ax=ax)
ax = pmv.plot_array(
- dem_data,
- masked_values=rio.nodatavals,
- cmap="viridis",
- vmin=vmin,
- vmax=vmax,
+ dem_data, masked_values=rio.nodatavals, cmap="viridis", vmin=vmin, vmax=vmax
)
plt.title(f"Resample time, nearest neighbor: {resample_time:.3f} sec")
plt.colorbar(ax, shrink=0.7)
# +
t0 = time.time()
-dem_data = rio.resample_to_grid(
- mg_unstruct, band=rio.bands[0], method="linear"
-)
+dem_data = rio.resample_to_grid(mg_unstruct, band=rio.bands[0], method="linear")
resample_time = time.time() - t0
@@ -292,22 +309,14 @@
pmv = flopy.plot.PlotMapView(modelgrid=mg_unstruct, ax=ax)
ax = pmv.plot_array(
- dem_data,
- masked_values=rio.nodatavals,
- cmap="viridis",
- vmin=vmin,
- vmax=vmax,
+ dem_data, masked_values=rio.nodatavals, cmap="viridis", vmin=vmin, vmax=vmax
)
plt.title(f"Resample time, bi-linear: {resample_time:.3f} sec")
plt.colorbar(ax, shrink=0.7)
# +
t0 = time.time()
-dem_data = rio.resample_to_grid(
- mg_unstruct,
- band=rio.bands[0],
- method="median",
-)
+dem_data = rio.resample_to_grid(mg_unstruct, band=rio.bands[0], method="median")
resample_time = time.time() - t0
@@ -318,11 +327,7 @@
pmv = flopy.plot.PlotMapView(modelgrid=mg_unstruct, ax=ax)
ax = pmv.plot_array(
- dem_data,
- masked_values=rio.nodatavals,
- cmap="viridis",
- vmin=vmin,
- vmax=vmax,
+ dem_data, masked_values=rio.nodatavals, cmap="viridis", vmin=vmin, vmax=vmax
)
plt.title(f"Resample time, median: {resample_time:.3f} sec")
plt.colorbar(ax, shrink=0.7)
@@ -434,9 +439,7 @@
# +
t0 = time.time()
-dem_data = rio.resample_to_grid(
- mg_unstruct, band=rio.bands[0], method="nearest"
-)
+dem_data = rio.resample_to_grid(mg_unstruct, band=rio.bands[0], method="nearest")
resample_time = time.time() - t0
@@ -447,11 +450,7 @@
pmv = flopy.plot.PlotMapView(modelgrid=mg_unstruct, ax=ax)
ax = pmv.plot_array(
- dem_data,
- masked_values=rio.nodatavals,
- cmap="viridis",
- vmin=vmin,
- vmax=vmax,
+ dem_data, masked_values=rio.nodatavals, cmap="viridis", vmin=vmin, vmax=vmax
)
plt.plot(shape.T[0], shape.T[1], "r-")
plt.title(f"Resample time, nearest neighbor: {resample_time:.3f} sec")
@@ -459,9 +458,7 @@
# +
t0 = time.time()
-dem_data = rio.resample_to_grid(
- mg_unstruct, band=rio.bands[0], method="linear"
-)
+dem_data = rio.resample_to_grid(mg_unstruct, band=rio.bands[0], method="linear")
resample_time = time.time() - t0
@@ -472,11 +469,7 @@
pmv = flopy.plot.PlotMapView(modelgrid=mg_unstruct, ax=ax)
ax = pmv.plot_array(
- dem_data,
- masked_values=rio.nodatavals,
- cmap="viridis",
- vmin=vmin,
- vmax=vmax,
+ dem_data, masked_values=rio.nodatavals, cmap="viridis", vmin=vmin, vmax=vmax
)
plt.plot(shape.T[0], shape.T[1], "r-")
plt.title(f"Resample time, bi-linear: {resample_time:.3f} sec")
@@ -492,6 +485,23 @@
# +
rio = Raster.load(os.path.join(raster_ws, raster_name))
+file_names = [
+ "model_boundary.CPG",
+ "model_boundary.dbf",
+ "model_boundary.prj",
+ "model_boundary.sbn",
+ "model_boundary.sbx",
+ "model_boundary.shp",
+ "model_boundary.shx",
+]
+for fname in file_names:
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/options/dem/{fname}",
+ fname=fname,
+ path=data_path / "options" / "dem",
+ known_hash=None,
+ )
+
shp_name = os.path.join(raster_ws, "model_boundary.shp")
# read in the shapefile
@@ -551,21 +561,11 @@
ax = fig.add_subplot(1, 1, 1, aspect="equal")
pmv = flopy.plot.PlotMapView(modelgrid=mg_unstruct, ax=ax)
-ax = pmv.plot_array(
- top,
- masked_values=[
- 3500,
- ],
- cmap="viridis",
- vmin=vmin,
- vmax=vmax,
-)
+ax = pmv.plot_array(top, masked_values=[3500], cmap="viridis", vmin=vmin, vmax=vmax)
ib = pmv.plot_ibound(ibound)
pmv.plot_grid(linewidth=0.3)
plt.plot(shape[0], shape[1], "r-")
-plt.title(
- "Model top and ibound arrays created using bi-linear raster resampling"
-)
+plt.title("Model top and ibound arrays created using bi-linear raster resampling")
plt.colorbar(ax, shrink=0.7)
# -
diff --git a/.docs/Notebooks/save_binary_data_file_example.py b/.docs/Notebooks/save_binary_data_file_example.py
index b75469f8ce..032685cdc4 100644
--- a/.docs/Notebooks/save_binary_data_file_example.py
+++ b/.docs/Notebooks/save_binary_data_file_example.py
@@ -51,9 +51,7 @@
dtype = np.float32 # or np.float64
mf = flopy.modflow.Modflow(model_ws=model_ws)
-dis = flopy.modflow.ModflowDis(
- mf, nlay=nlay, nrow=nrow, ncol=ncol, delr=20, delc=10
-)
+dis = flopy.modflow.ModflowDis(mf, nlay=nlay, nrow=nrow, ncol=ncol, delr=20, delc=10)
# -
# Create a linear data array
diff --git a/.docs/Notebooks/seawat_henry_example.py b/.docs/Notebooks/seawat_henry_example.py
index 7307f9015c..3d4de1b0ec 100644
--- a/.docs/Notebooks/seawat_henry_example.py
+++ b/.docs/Notebooks/seawat_henry_example.py
@@ -185,9 +185,7 @@
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1, aspect="equal")
-ax.imshow(
- concentration[:, 0, :], interpolation="nearest", extent=(0, Lx, 0, Lz)
-)
+ax.imshow(concentration[:, 0, :], interpolation="nearest", extent=(0, Lx, 0, Lz))
y, x, z = dis.get_node_coordinates()
X, Z = np.meshgrid(x, z[:, 0, 0])
iskip = 3
diff --git a/.docs/Notebooks/sfrpackage_example.py b/.docs/Notebooks/sfrpackage_example.py
index 6ce4273e9d..c69b19b4a9 100644
--- a/.docs/Notebooks/sfrpackage_example.py
+++ b/.docs/Notebooks/sfrpackage_example.py
@@ -34,13 +34,16 @@
import os
import shutil
import sys
+from pathlib import Path
from pprint import pformat
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
+import pooch
import flopy
import flopy.utils.binaryfile as bf
@@ -59,19 +62,58 @@
# assumes executable is in users path statement
exe_name = "mf2005"
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
# # #### copy over the example files to the working directory
# +
# temporary directory
temp_dir = TemporaryDirectory()
-path = temp_dir.name
-
-gpth = os.path.join("..", "..", "examples", "data", "mf2005_test", "test1ss.*")
-for f in glob.glob(gpth):
- shutil.copy(f, path)
-gpth = os.path.join("..", "..", "examples", "data", "mf2005_test", "test1tr.*")
-for f in glob.glob(gpth):
- shutil.copy(f, path)
+workspace = Path(temp_dir.name)
+
+file_names = [
+ "test1ss.ba6",
+ "test1ss.dis",
+ "test1ss.evt",
+ "test1ss.gag",
+ "test1ss.ghb",
+ "test1ss.lpf",
+ "test1ss.nam",
+ "test1ss.oc",
+ "test1ss.rch",
+ "test1ss.sfr",
+ "test1ss.sip",
+ "test1tr.ba6",
+ "test1tr.dis",
+ "test1tr.evt",
+ "test1tr.gag",
+ "test1tr.ghb",
+ "test1tr.gitcbc",
+ "test1tr.githds",
+ "test1tr.lpf",
+ "test1tr.nam",
+ "test1tr.oc",
+ "test1tr.rch",
+ "test1tr.sfr",
+ "test1tr.sip",
+ "test1tr.wel",
+]
+for fname in file_names:
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mf2005_test/{fname}",
+ fname=fname,
+ path=data_path / "mf2005_test",
+ known_hash=None,
+ )
+
+shutil.copytree(data_path / "mf2005_test", workspace, dirs_exist_ok=True)
# -
# ### Load example dataset, skipping the SFR package
@@ -80,7 +122,7 @@
"test1ss.nam",
version="mf2005",
exe_name=exe_name,
- model_ws=path,
+ model_ws=workspace,
load_only=["ghb", "evt", "rch", "dis", "bas6", "oc", "sip", "lpf"],
)
@@ -95,18 +137,23 @@
# For more information on Item 2, see the Online Guide to MODFLOW:
#
-rpth = os.path.join(
- "..", "..", "examples", "data", "sfr_examples", "test1ss_reach_data.csv"
-)
+file_names = ["test1ss_reach_data.csv", "test1ss_segment_data.csv", "test1ss.flw"]
+for fname in file_names:
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/sfr_examples/{fname}",
+ fname=fname,
+ path=data_path / "sfr_examples",
+ known_hash=None,
+ )
+
+rpth = data_path / "sfr_examples" / file_names[0]
reach_data = np.genfromtxt(rpth, delimiter=",", names=True)
reach_data
# ### Segment Data structure
# Segment data are input and stored in a dictionary of record arrays, which
-spth = os.path.join(
- "..", "..", "examples", "data", "sfr_examples", "test1ss_segment_data.csv"
-)
+spth = data_path / "sfr_examples" / file_names[1]
ss_segment_data = np.genfromtxt(spth, delimiter=",", names=True)
segment_data = {0: ss_segment_data}
segment_data[0][0:1]["width1"]
@@ -200,9 +247,7 @@
# ### Load SFR formated water balance output into pandas dataframe using the `SfrFile` class
-sfr_outfile = os.path.join(
- "..", "..", "examples", "data", "sfr_examples", "test1ss.flw"
-)
+sfr_outfile = data_path / "sfr_examples" / file_names[2]
sfrout = SfrFile(sfr_outfile)
df = sfrout.get_dataframe()
df.head()
@@ -233,7 +278,7 @@
# ### Get SFR leakage results from cell budget file
-bpth = os.path.join(path, "test1ss.cbc")
+bpth = os.path.join(workspace, "test1ss.cbc")
cbbobj = bf.CellBudgetFile(bpth)
cbbobj.headers
@@ -242,18 +287,14 @@
# ### Plot leakage in plan view
-im = plt.imshow(
- sfrleak[0], interpolation="none", cmap="coolwarm", vmin=-3, vmax=3
-)
+im = plt.imshow(sfrleak[0], interpolation="none", cmap="coolwarm", vmin=-3, vmax=3)
cb = plt.colorbar(im, label="SFR Leakage, in cubic feet per second")
# ### Plot total streamflow
sfrQ = sfrleak[0].copy()
sfrQ[sfrQ == 0] = np.nan
-sfrQ[df.row.values - 1, df.column.values - 1] = (
- df[["Qin", "Qout"]].mean(axis=1).values
-)
+sfrQ[df.row.values - 1, df.column.values - 1] = df[["Qin", "Qout"]].mean(axis=1).values
im = plt.imshow(sfrQ, interpolation="none")
plt.colorbar(im, label="Streamflow, in cubic feet per second")
@@ -267,9 +308,9 @@
# >mf2005 test1tr.nam
# ```
-flopy.run_model(exe_name, "test1tr.nam", model_ws=path, silent=True)
+flopy.run_model(exe_name, "test1tr.nam", model_ws=workspace, silent=True)
-sfrout_tr = SfrFile(os.path.join(path, "test1tr.flw"))
+sfrout_tr = SfrFile(os.path.join(workspace, "test1tr.flw"))
dftr = sfrout_tr.get_dataframe()
dftr.head()
diff --git a/.docs/Notebooks/shapefile_export_example.py b/.docs/Notebooks/shapefile_export_example.py
index 8026c86d20..99f3b21518 100644
--- a/.docs/Notebooks/shapefile_export_example.py
+++ b/.docs/Notebooks/shapefile_export_example.py
@@ -25,15 +25,18 @@
# * general exporting and importing of geographic data from other sources
import os
+import sys
# +
-import sys
+from pathlib import Path
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
+import pooch
import flopy
@@ -47,8 +50,40 @@
temp_dir = TemporaryDirectory()
outdir = os.path.join(temp_dir.name, "shapefile_export")
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+sim_name = "freyberg"
+
+file_names = {
+ "freyberg.bas": "63266024019fef07306b8b639c6c67d5e4b22f73e42dcaa9db18b5e0f692c097",
+ "freyberg.dis": "62d0163bf36c7ee9f7ee3683263e08a0abcdedf267beedce6dd181600380b0a2",
+ "freyberg.githds": "abe92497b55e6f6c73306e81399209e1cada34cf794a7867d776cfd18303673b",
+ "freyberg.gitlist": "aef02c664344a288264d5f21e08a748150e43bb721a16b0e3f423e6e3e293056",
+ "freyberg.lpf": "06500bff979424f58e5e4fbd07a7bdeb0c78f31bd08640196044b6ccefa7a1fe",
+ "freyberg.nam": "e66321007bb603ef55ed2ba41f4035ba6891da704a4cbd3967f0c66ef1532c8f",
+ "freyberg.oc": "532905839ccbfce01184980c230b6305812610b537520bf5a4abbcd3bd703ef4",
+ "freyberg.pcg": "0d1686fac4680219fffdb56909296c5031029974171e25d4304e70fa96ebfc38",
+ "freyberg.rch": "37a1e113a7ec16b61417d1fa9710dd111a595de738a367bd34fd4a359c480906",
+ "freyberg.riv": "7492a1d5eb23d6812ec7c8227d0ad4d1e1b35631a765c71182b71e3bd6a6d31d",
+ "freyberg.wel": "00aa55f59797c02f0be5318a523b36b168fc6651f238f34e8b0938c04292d3e7",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=data_path / sim_name,
+ known_hash=fhash,
+ )
+
# load an existing model
-model_ws = "../../examples/data/freyberg"
+model_ws = data_path / sim_name
m = flopy.modflow.Modflow.load(
"freyberg.nam",
model_ws=model_ws,
diff --git a/.docs/Notebooks/shapefile_feature_examples.py b/.docs/Notebooks/shapefile_feature_examples.py
index 4d0bf9ff76..b745a24df0 100644
--- a/.docs/Notebooks/shapefile_feature_examples.py
+++ b/.docs/Notebooks/shapefile_feature_examples.py
@@ -31,11 +31,14 @@
import shutil
import sys
import warnings
+from pathlib import Path
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
+import pooch
import flopy
from flopy.export.shapefile_utils import recarray2shp, shp2recarray
@@ -58,6 +61,15 @@
temp_dir = TemporaryDirectory()
workspace = temp_dir.name
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
m = flopy.modflow.Modflow("toy_model", model_ws=workspace)
botm = np.zeros((2, 10, 10))
botm[0, :, :] = 1.5
@@ -99,9 +111,7 @@
# +
from pathlib import Path
-recarray2shp(
- chk.summary_array, geoms, os.path.join(workspace, "test.shp"), crs=26715
-)
+recarray2shp(chk.summary_array, geoms, os.path.join(workspace, "test.shp"), crs=26715)
shape_path = os.path.join(workspace, "test.prj")
# + pycharm={"name": "#%%\n"}
@@ -131,7 +141,14 @@
# * create geometry objects for pathlines from a MODPATH simulation
# * plot the paths using the built in plotting method
-pthfile = PathlineFile("../../examples/data/mp6/EXAMPLE-3.pathline")
+fname = "EXAMPLE-3.pathline"
+pthfile = pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mp6/{fname}",
+ fname=fname,
+ path=data_path / "mp6",
+ known_hash=None,
+)
+pthfile = PathlineFile(pthfile)
pthdata = pthfile._data.view(np.recarray)
# +
@@ -164,7 +181,14 @@
# ## Points
-eptfile = EndpointFile("../../examples/data/mp6/EXAMPLE-3.endpoint")
+fname = "EXAMPLE-3.endpoint"
+eptfile = pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mp6/{fname}",
+ fname=fname,
+ path=data_path / "mp6",
+ known_hash=None,
+)
+eptfile = EndpointFile(eptfile)
eptdata = eptfile.get_alldata()
# +
diff --git a/.docs/Notebooks/swi2package_example1.py b/.docs/Notebooks/swi2package_example1.py
index 4241716e6d..791aba248f 100644
--- a/.docs/Notebooks/swi2package_example1.py
+++ b/.docs/Notebooks/swi2package_example1.py
@@ -150,9 +150,7 @@
hfile = flopy.utils.HeadFile(os.path.join(ml.model_ws, f"{modelname}.hds"))
head = hfile.get_alldata()
# read model zeta
-zfile = flopy.utils.CellBudgetFile(
- os.path.join(ml.model_ws, f"{modelname}.zta")
-)
+zfile = flopy.utils.CellBudgetFile(os.path.join(ml.model_ws, f"{modelname}.zta"))
kstpkper = zfile.get_kstpkper()
zeta = []
for kk in kstpkper:
@@ -164,12 +162,7 @@
plt.figure(figsize=(16, 6))
# define x-values of xcells and plot interface
x = np.arange(0, ncol * delr, delr) + delr / 2.0
-label = [
- "SWI2",
- "_",
- "_",
- "_",
-] # labels with an underscore are not added to legend
+label = ["SWI2", "_", "_", "_"] # labels with an underscore are not added to legend
for i in range(4):
zt = np.ma.masked_outside(zeta[i, 0, 0, :], -39.99999, -0.00001)
plt.plot(x, zt, "r-", lw=1, zorder=10, label=label[i])
@@ -212,11 +205,7 @@
label = ["SWI2", "_", "_", "_"]
for k in range(zeta.shape[0]):
modelxsect.plot_surface(
- zeta[k, :, :, :],
- masked_values=[0, -40.0],
- color="red",
- lw=1,
- label=label[k],
+ zeta[k, :, :, :], masked_values=[0, -40.0], color="red", lw=1, label=label[k]
)
linecollection = modelxsect.plot_grid()
ax.set_title("ModelCrossSection.plot_surface()")
diff --git a/.docs/Notebooks/swi2package_example2.py b/.docs/Notebooks/swi2package_example2.py
index 4820f321f5..c1b505d1a7 100644
--- a/.docs/Notebooks/swi2package_example2.py
+++ b/.docs/Notebooks/swi2package_example2.py
@@ -291,9 +291,7 @@
steady=False,
)
bas = flopy.modflow.ModflowBas(m, ibound=swt_ibound, strt=0.05)
-lpf = flopy.modflow.ModflowLpf(
- m, hk=2.0, vka=2.0, ss=0.0, sy=0.0, laytyp=0, layavg=0
-)
+lpf = flopy.modflow.ModflowLpf(m, hk=2.0, vka=2.0, ss=0.0, sy=0.0, laytyp=0, layavg=0)
oc = flopy.modflow.ModflowOc(m, save_every=1, save_types=["save head"])
pcg = flopy.modflow.ModflowPcg(m)
# Create the MT3DMS model files
@@ -331,9 +329,7 @@
mxstrn=1e8,
)
dsp = flopy.mt3d.Mt3dDsp(m, al=0.0, trpt=1.0, trpv=1.0, dmcoef=0.0)
-gcg = flopy.mt3d.Mt3dGcg(
- m, mxiter=1, iter1=50, isolve=3, cclose=1e-6, iprgcg=5
-)
+gcg = flopy.mt3d.Mt3dGcg(m, mxiter=1, iter1=50, isolve=3, cclose=1e-6, iprgcg=5)
ssm = flopy.mt3d.Mt3dSsm(m, stress_period_data=ssm_data)
# Create the SEAWAT model files
vdf = flopy.seawat.SeawatVdf(
@@ -386,15 +382,7 @@
)
# plot initial conditions
ax = axes[0]
-ax.text(
- -0.075,
- 1.05,
- "A",
- transform=ax.transAxes,
- va="center",
- ha="center",
- size="8",
-)
+ax.text(-0.075, 1.05, "A", transform=ax.transAxes, va="center", ha="center", size="8")
# text(.975, .1, '(a)', transform = ax.transAxes, va = 'center', ha = 'center')
ax.plot([110, 150], [0, -40], "k")
ax.plot([150, 190], [0, -40], "k")
@@ -407,15 +395,7 @@
ax.set_ylabel("Elevation, in meters")
# plot stratified swi2 and seawat results
ax = axes[1]
-ax.text(
- -0.075,
- 1.05,
- "B",
- transform=ax.transAxes,
- va="center",
- ha="center",
- size="8",
-)
+ax.text(-0.075, 1.05, "B", transform=ax.transAxes, va="center", ha="center", size="8")
#
zp = zeta[0, 0, :]
p = (zp < 0.0) & (zp > -40.0)
@@ -447,15 +427,7 @@
ax.set_ylabel("Elevation, in meters")
# plot vd model
ax = axes[2]
-ax.text(
- -0.075,
- 1.05,
- "C",
- transform=ax.transAxes,
- va="center",
- ha="center",
- size="8",
-)
+ax.text(-0.075, 1.05, "C", transform=ax.transAxes, va="center", ha="center", size="8")
dr = zeta[0, 0, :]
ax.plot(x, dr, "b", linewidth=1.5, drawstyle="steps-mid")
dr = zeta2[0, 0, :]
@@ -466,18 +438,10 @@
ax.plot(x, dr, "r", linewidth=0.75, drawstyle="steps-mid")
# fake figures
ax.plot(
- [-100.0, -100],
- [-100.0, -100],
- "b",
- linewidth=1.5,
- label="SWI2 stratified option",
+ [-100.0, -100], [-100.0, -100], "b", linewidth=1.5, label="SWI2 stratified option"
)
ax.plot(
- [-100.0, -100],
- [-100.0, -100],
- "r",
- linewidth=0.75,
- label="SWI2 continuous option",
+ [-100.0, -100], [-100.0, -100], "r", linewidth=0.75, label="SWI2 continuous option"
)
# legend
leg = ax.legend(loc="lower left", numpoints=1)
diff --git a/.docs/Notebooks/swi2package_example3.py b/.docs/Notebooks/swi2package_example3.py
index 4b0486d14f..49428d2e12 100644
--- a/.docs/Notebooks/swi2package_example3.py
+++ b/.docs/Notebooks/swi2package_example3.py
@@ -130,9 +130,9 @@ def LegBar(ax, x0, y0, t0, dx, dy, dt, cc):
# Define SWI2 data.
-zini = np.hstack(
- (-9 * np.ones(24), np.arange(-9, -50, -0.5), -50 * np.ones(94))
-)[np.newaxis, :]
+zini = np.hstack((-9 * np.ones(24), np.arange(-9, -50, -0.5), -50 * np.ones(94)))[
+ np.newaxis, :
+]
iso = np.zeros((1, 200), dtype=int)
iso[:, :30] = -2
@@ -224,15 +224,7 @@ def LegBar(ax, x0, y0, t0, dx, dy, dt, cc):
)
ax = fig.add_subplot(311)
-ax.text(
- -0.075,
- 1.05,
- "A",
- transform=ax.transAxes,
- va="center",
- ha="center",
- size="8",
-)
+ax.text(-0.075, 1.05, "A", transform=ax.transAxes, va="center", ha="center", size="8")
# confining unit
ax.fill(
[-600, 3400, 3400, -600],
@@ -247,9 +239,7 @@ def LegBar(ax, x0, y0, t0, dx, dy, dt, cc):
ax.plot(x[p], zr[p], color=cc[0], linewidth=lw, drawstyle="steps-mid")
#
for i in range(5):
- zt = MergeData(
- ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge
- )
+ zt = MergeData(ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge)
dr = zt.copy()
ax.plot(x, dr, color=cc[i + 1], linewidth=lw, drawstyle="steps-mid")
# Manufacture a legend bar
@@ -260,15 +250,7 @@ def LegBar(ax, x0, y0, t0, dx, dy, dt, cc):
ax.set_xlim(-250.0, 2500.0)
ax = fig.add_subplot(312)
-ax.text(
- -0.075,
- 1.05,
- "B",
- transform=ax.transAxes,
- va="center",
- ha="center",
- size="8",
-)
+ax.text(-0.075, 1.05, "B", transform=ax.transAxes, va="center", ha="center", size="8")
# confining unit
ax.fill(
[-600, 3400, 3400, -600],
@@ -278,9 +260,7 @@ def LegBar(ax, x0, y0, t0, dx, dy, dt, cc):
)
#
for i in range(4, 10):
- zt = MergeData(
- ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge
- )
+ zt = MergeData(ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge)
dr = zt.copy()
ax.plot(x, dr, color=cc[i + 1], linewidth=lw, drawstyle="steps-mid")
# Manufacture a legend bar
@@ -291,15 +271,7 @@ def LegBar(ax, x0, y0, t0, dx, dy, dt, cc):
ax.set_xlim(-250.0, 2500.0)
ax = fig.add_subplot(313)
-ax.text(
- -0.075,
- 1.05,
- "C",
- transform=ax.transAxes,
- va="center",
- ha="center",
- size="8",
-)
+ax.text(-0.075, 1.05, "C", transform=ax.transAxes, va="center", ha="center", size="8")
# confining unit
ax.fill(
[-600, 3400, 3400, -600],
@@ -308,9 +280,7 @@ def LegBar(ax, x0, y0, t0, dx, dy, dt, cc):
ec=[0.8, 0.8, 0.8],
)
#
-zt = MergeData(
- ncol, [zeta[4, 0, 0, :], zeta[4, 1, 0, :], zeta[4, 2, 0, :]], zedge
-)
+zt = MergeData(ncol, [zeta[4, 0, 0, :], zeta[4, 1, 0, :], zeta[4, 2, 0, :]], zedge)
ax.plot(
x,
zt,
diff --git a/.docs/Notebooks/swi2package_example4.py b/.docs/Notebooks/swi2package_example4.py
index 5ecac1b63c..b9c973a98e 100644
--- a/.docs/Notebooks/swi2package_example4.py
+++ b/.docs/Notebooks/swi2package_example4.py
@@ -236,9 +236,7 @@
iswizt=55,
)
oc = flopy.modflow.ModflowOc(ml, stress_period_data=spd)
-pcg = flopy.modflow.ModflowPcg(
- ml, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50
-)
+pcg = flopy.modflow.ModflowPcg(ml, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50)
# -
# Write the simulation 1 MODFLOW input files and run the model
@@ -293,9 +291,7 @@
iswizt=55,
)
oc = flopy.modflow.ModflowOc(ml2, stress_period_data=spd)
-pcg = flopy.modflow.ModflowPcg(
- ml2, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50
-)
+pcg = flopy.modflow.ModflowPcg(ml2, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50)
# -
# Write the simulation 2 MODFLOW input files and run the model
@@ -306,34 +302,26 @@
# Load the simulation 1 `ZETA` data and `ZETA` observations.
# read base model zeta
-zfile = flopy.utils.CellBudgetFile(
- os.path.join(ml.model_ws, f"{modelname}.zta")
-)
+zfile = flopy.utils.CellBudgetFile(os.path.join(ml.model_ws, f"{modelname}.zta"))
kstpkper = zfile.get_kstpkper()
zeta = []
for kk in kstpkper:
zeta.append(zfile.get_data(kstpkper=kk, text="ZETASRF 1")[0])
zeta = np.array(zeta)
# read swi obs
-zobs = np.genfromtxt(
- os.path.join(ml.model_ws, f"{modelname}.zobs.out"), names=True
-)
+zobs = np.genfromtxt(os.path.join(ml.model_ws, f"{modelname}.zobs.out"), names=True)
# Load the simulation 2 `ZETA` data and `ZETA` observations.
# read saltwater well model zeta
-zfile2 = flopy.utils.CellBudgetFile(
- os.path.join(ml2.model_ws, f"{modelname2}.zta")
-)
+zfile2 = flopy.utils.CellBudgetFile(os.path.join(ml2.model_ws, f"{modelname2}.zta"))
kstpkper = zfile2.get_kstpkper()
zeta2 = []
for kk in kstpkper:
zeta2.append(zfile2.get_data(kstpkper=kk, text="ZETASRF 1")[0])
zeta2 = np.array(zeta2)
# read swi obs
-zobs2 = np.genfromtxt(
- os.path.join(ml2.model_ws, f"{modelname2}.zobs.out"), names=True
-)
+zobs2 = np.genfromtxt(os.path.join(ml2.model_ws, f"{modelname2}.zobs.out"), names=True)
# Create arrays for the x-coordinates and the output years
@@ -397,22 +385,10 @@
ax.set_xlabel("Horizontal distance, in meters")
ax.set_ylabel("Elevation, in meters")
ax.text(
- 0.025,
- 0.55,
- "Layer 1",
- transform=ax.transAxes,
- va="center",
- ha="left",
- size="7",
+ 0.025, 0.55, "Layer 1", transform=ax.transAxes, va="center", ha="left", size="7"
)
ax.text(
- 0.025,
- 0.45,
- "Layer 2",
- transform=ax.transAxes,
- va="center",
- ha="left",
- size="7",
+ 0.025, 0.45, "Layer 2", transform=ax.transAxes, va="center", ha="left", size="7"
)
ax.text(
0.975,
@@ -455,22 +431,10 @@
ax.set_xlabel("Horizontal distance, in meters")
ax.set_ylabel("Elevation, in meters")
ax.text(
- 0.025,
- 0.55,
- "Layer 1",
- transform=ax.transAxes,
- va="center",
- ha="left",
- size="7",
+ 0.025, 0.55, "Layer 1", transform=ax.transAxes, va="center", ha="left", size="7"
)
ax.text(
- 0.025,
- 0.45,
- "Layer 2",
- transform=ax.transAxes,
- va="center",
- ha="left",
- size="7",
+ 0.025, 0.45, "Layer 2", transform=ax.transAxes, va="center", ha="left", size="7"
)
ax.text(
0.975,
@@ -513,22 +477,10 @@
ax.set_xlabel("Horizontal distance, in meters")
ax.set_ylabel("Elevation, in meters")
ax.text(
- 0.025,
- 0.55,
- "Layer 1",
- transform=ax.transAxes,
- va="center",
- ha="left",
- size="7",
+ 0.025, 0.55, "Layer 1", transform=ax.transAxes, va="center", ha="left", size="7"
)
ax.text(
- 0.025,
- 0.45,
- "Layer 2",
- transform=ax.transAxes,
- va="center",
- ha="left",
- size="7",
+ 0.025, 0.45, "Layer 2", transform=ax.transAxes, va="center", ha="left", size="7"
)
ax.text(
0.975,
@@ -553,14 +505,7 @@
tz2[i] = zobs["layer2_001"][i + 999]
if zobs2["layer2_001"][i + 999] < 20.0 - 0.1:
tz3[i] = zobs2["layer2_001"][i + 999]
-ax.plot(
- t,
- tz2,
- linestyle="solid",
- color="r",
- linewidth=0.75,
- label="Freshwater well",
-)
+ax.plot(t, tz2, linestyle="solid", color="r", linewidth=0.75, label="Freshwater well")
ax.plot(
t,
tz3,
@@ -576,22 +521,10 @@
ax.set_xlabel("Time, in years")
ax.set_ylabel("Elevation, in meters")
ax.text(
- 0.025,
- 0.55,
- "Layer 1",
- transform=ax.transAxes,
- va="center",
- ha="left",
- size="7",
+ 0.025, 0.55, "Layer 1", transform=ax.transAxes, va="center", ha="left", size="7"
)
ax.text(
- 0.025,
- 0.45,
- "Layer 2",
- transform=ax.transAxes,
- va="center",
- ha="left",
- size="7",
+ 0.025, 0.45, "Layer 2", transform=ax.transAxes, va="center", ha="left", size="7"
)
# -
@@ -609,9 +542,7 @@
modelxsect = flopy.plot.PlotCrossSection(
model=ml, line={"Row": 30}, extent=(0, 3050, -50, -10)
)
-modelxsect.plot_fill_between(
- zeta[4, :, :, :], colors=colors, ax=ax, edgecolors="none"
-)
+modelxsect.plot_fill_between(zeta[4, :, :, :], colors=colors, ax=ax, edgecolors="none")
linecollection = modelxsect.plot_grid(ax=ax)
ax.set_title(f"Recharge year {years[4]}")
diff --git a/.docs/Notebooks/swi2package_example5.py b/.docs/Notebooks/swi2package_example5.py
index 1482352bb6..2a54f70f4c 100644
--- a/.docs/Notebooks/swi2package_example5.py
+++ b/.docs/Notebooks/swi2package_example5.py
@@ -249,9 +249,7 @@
solver2params=solver2params,
)
oc = flopy.modflow.ModflowOc(ml, stress_period_data=ocspd)
-pcg = flopy.modflow.ModflowPcg(
- ml, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50
-)
+pcg = flopy.modflow.ModflowPcg(ml, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50)
# Write input files and run the SWI2 model.
@@ -382,9 +380,7 @@
)
wel = flopy.modflow.ModflowWel(m, stress_period_data=well_data)
oc = flopy.modflow.ModflowOc(m, save_every=365, save_types=["save head"])
-pcg = flopy.modflow.ModflowPcg(
- m, hclose=1.0e-5, rclose=3.0e-3, mxiter=100, iter1=50
-)
+pcg = flopy.modflow.ModflowPcg(m, hclose=1.0e-5, rclose=3.0e-3, mxiter=100, iter1=50)
# Create the basic MT3DMS model data
adv = flopy.mt3d.Mt3dAdv(
m,
@@ -496,24 +492,10 @@
# withdrawal and recovery titles
ax = axes.flatten()[0]
ax.text(
- 0.0,
- 1.03,
- "Withdrawal",
- transform=ax.transAxes,
- va="bottom",
- ha="left",
- size="8",
+ 0.0, 1.03, "Withdrawal", transform=ax.transAxes, va="bottom", ha="left", size="8"
)
ax = axes.flatten()[1]
-ax.text(
- 0.0,
- 1.03,
- "Recovery",
- transform=ax.transAxes,
- va="bottom",
- ha="left",
- size="8",
-)
+ax.text(0.0, 1.03, "Recovery", transform=ax.transAxes, va="bottom", ha="left", size="8")
# dummy items for legend
ax = axes.flatten()[2]
ax.plot(
@@ -608,11 +590,7 @@
zorder=30,
)
cc = ax.contourf(
- X,
- Z,
- conc[itime, :, :],
- levels=[0.0, 1.75, 33.250],
- colors=["w", "0.75", "w"],
+ X, Z, conc[itime, :, :], levels=[0.0, 1.75, 33.250], colors=["w", "0.75", "w"]
)
# set graph limits
ax.set_xlim(0, 500)
@@ -641,15 +619,7 @@
ctxt = f"{iyr} years"
else:
ctxt = f"{iyr} year"
- ax.text(
- 0.95,
- 0.925,
- ctxt,
- transform=ax.transAxes,
- va="top",
- ha="right",
- size="8",
- )
+ ax.text(0.95, 0.925, ctxt, transform=ax.transAxes, va="top", ha="right", size="8")
plt.show()
# -
diff --git a/.docs/Notebooks/uzf_example.py b/.docs/Notebooks/uzf_example.py
index ab206e0c39..00128f6a2b 100644
--- a/.docs/Notebooks/uzf_example.py
+++ b/.docs/Notebooks/uzf_example.py
@@ -19,7 +19,7 @@
# # Unsaturated Zone Flow (UZF) Package demo
# Demonstrates functionality of the flopy UZF module using the example from [Niswonger and others (2006)](https://pubs.usgs.gov/tm/2006/tm6a19/). This is the same as the SFR example problem from Prudic and others (2004;
-# p. 13–19), except the UZF package replaces the ET and RCH packages.
+# p. 13-19), except the UZF package replaces the ET and RCH packages.
#
# #### Problem description:
#
@@ -41,10 +41,12 @@
from pathlib import Path
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
+import pooch
proj_root = Path.cwd().parent.parent
@@ -62,13 +64,42 @@
# assumes executable is in users path statement
exe_name = "mf2005"
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
# Set up a temporary workspace.
# +
temp_dir = TemporaryDirectory()
path = Path(temp_dir.name)
-gpth = proj_root / "examples" / "data" / "mf2005_test" / "UZFtest2.*"
+file_names = [
+ "UZFtest2.ba6",
+ "UZFtest2.dis",
+ "UZFtest2.gag",
+ "UZFtest2.ghb",
+ "UZFtest2.lpf",
+ "UZFtest2.nam",
+ "UZFtest2.oc",
+ "UZFtest2.sfr",
+ "UZFtest2.sip",
+ "UZFtest2.uzf",
+ "UZFtest2.wel",
+]
+for fname in file_names:
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/mf2005_test/{fname}",
+ fname=fname,
+ path=data_path / "mf2005_test",
+ known_hash=None,
+ )
+gpth = data_path / "mf2005_test" / "UZFtest2.*"
for f in glob.glob(str(gpth)):
shutil.copy(f, path)
# -
@@ -102,7 +133,14 @@
# Read the ```irunbnd``` array from an external file.
# +
-irnbndpth = proj_root / "examples" / "data" / "uzf_examples" / "irunbnd.dat"
+fname = "irunbnd.dat"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/uzf_examples/{fname}",
+ fname=fname,
+ path=data_path / "uzf_examples",
+ known_hash=None,
+)
+irnbndpth = data_path / "uzf_examples" / fname
irunbnd = np.loadtxt(irnbndpth)
fig = plt.figure(figsize=(8, 8))
@@ -116,7 +154,14 @@
# Define the ``vks`` (unsaturated zone vertical hydraulic conductivity) array.
# +
-vksbndpth = proj_root / "examples" / "data" / "uzf_examples" / "vks.dat"
+fname = "vks.dat"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/uzf_examples/{fname}",
+ fname=fname,
+ path=data_path / "uzf_examples",
+ known_hash=None,
+)
+vksbndpth = data_path / "uzf_examples" / fname
vks = np.loadtxt(vksbndpth)
fig = plt.figure(figsize=(8, 8))
@@ -134,9 +179,14 @@
m.nrow_ncol_nlay_nper
# +
-finf = np.loadtxt(
- proj_root / "examples" / "data" / "uzf_examples" / "finf.dat"
+fname = "finf.dat"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/uzf_examples/{fname}",
+ fname=fname,
+ path=data_path / "uzf_examples",
+ known_hash=None,
)
+finf = np.loadtxt(data_path / "uzf_examples" / "finf.dat")
finf = np.reshape(finf, (m.nper, m.nrow, m.ncol))
finf = {i: finf[i] for i in range(finf.shape[0])}
@@ -160,9 +210,14 @@
# Define `extwc` (extinction water content) array.
# +
-extwc = np.loadtxt(
- proj_root / "examples" / "data" / "uzf_examples" / "extwc.dat"
+fname = "extwc.dat"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/uzf_examples/{fname}",
+ fname=fname,
+ path=data_path / "uzf_examples",
+ known_hash=None,
)
+extwc = np.loadtxt(data_path / "uzf_examples" / "extwc.dat")
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1, aspect="equal")
@@ -314,8 +369,7 @@
if avail:
df3 = pd.DataFrame(
- data,
- columns=["layer", "time", "head", "uzthick", "depth", "watercontent"],
+ data, columns=["layer", "time", "head", "uzthick", "depth", "watercontent"]
)
df3.head(41)
diff --git a/.docs/Notebooks/vtk_pathlines_example.py b/.docs/Notebooks/vtk_pathlines_example.py
index 647c8d56f1..231ff3f6fc 100644
--- a/.docs/Notebooks/vtk_pathlines_example.py
+++ b/.docs/Notebooks/vtk_pathlines_example.py
@@ -26,6 +26,9 @@
# +
import sys
+import git
+import pooch
+
import flopy
print(sys.version)
@@ -41,7 +44,45 @@
mdl_name = "freyberg"
sim_name = f"mf6-{mdl_name}-vtk-pathlines"
-sim_path = Path.cwd().parent / "../examples/data" / f"mf6-{mdl_name}"
+
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+sim_path = data_path / f"mf6-{mdl_name}"
+file_names = {
+ "bot.asc": "3107f907cb027460fd40ffc16cb797a78babb31988c7da326c9f500fba855b62",
+ "description.txt": "94093335eec6a24711f86d4d217ccd5a7716dd9e01cb6b732bc7757d41675c09",
+ "freyberg.cbc": "c8ad843b1da753eb58cf6c462ac782faf0ca433d6dcb067742d8bd698db271e3",
+ "freyberg.chd": "d8b8ada8d3978daea1758b315be983b5ca892efc7d69bf6b367ceec31e0dd156",
+ "freyberg.dis": "cac230a207cc8483693f7ba8ae29ce40c049036262eac4cebe17a4e2347a8b30",
+ "freyberg.dis.grb": "c8c26fb1fa4b210208134b286d895397cf4b3131f66e1d9dda76338502c7e96a",
+ "freyberg.hds": "926a06411ca658a89db6b5686f51ddeaf5b74ced81239cab1d43710411ba5f5b",
+ "freyberg.ic": "6efb56ee9cdd704b9a76fb9efd6dae750facc5426b828713f2d2cf8d35194120",
+ "freyberg.ims": "6dddae087d85417e3cdaa13e7b24165afb7f9575ab68586f3adb6c1b2d023781",
+ "freyberg.nam": "cee9b7b000fe35d2df26e878d09d465250a39504f87516c897e3fa14dcda081e",
+ "freyberg.npf": "81104d3546045fff0eddf5059465e560b83b492fa5a5acad1907ce18c2b9c15f",
+ "freyberg.oc": "c0715acd75eabcc42c8c47260a6c1abd6c784350983f7e2e6009ddde518b80b8",
+ "freyberg.rch": "a6ec1e0eda14fd2cdf618a5c0243a9caf82686c69242b783410d5abbcf971954",
+ "freyberg.riv": "a8cafc8c317cbe2acbb43e2f0cfe1188cb2277a7a174aeb6f3e6438013de8088",
+ "freyberg.sto": "74d748c2f0adfa0a32ee3f2912115c8f35b91011995b70c1ec6ae1c627242c41",
+ "freyberg.tdis": "9965cbb17caf5b865ea41a4ec04bcb695fe15a38cb539425fdc00abbae385cbe",
+ "freyberg.wel": "f19847de455598de52c05a4be745698c8cb589e5acfb0db6ab1f06ded5ff9310",
+ "k11.asc": "b6a8aa46ef17f7f096d338758ef46e32495eb9895b25d687540d676744f02af5",
+ "mfsim.nam": "6b8d6d7a56c52fb2bff884b3979e3d2201c8348b4bbfd2b6b9752863cbc9975e",
+ "top.asc": "3ad2b131671b9faca7f74c1dd2b2f41875ab0c15027764021a89f9c95dccaa6a",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=sim_path,
+ known_hash=fhash,
+ )
sim = MFSimulation.load(sim_name=sim_name, sim_ws=sim_path)
# -
@@ -56,6 +97,7 @@
sim.set_sim_path(workspace)
# -
+
# Write the input files to the temporary workspace.
sim.write_simulation()
@@ -101,9 +143,7 @@
# +
import numpy as np
-wel_locs = [
- (rec[0][1], rec[0][2]) for rec in (gwf.wel.stress_period_data.data[0])
-]
+wel_locs = [(rec[0][1], rec[0][2]) for rec in (gwf.wel.stress_period_data.data[0])]
print(wel_locs)
# -
@@ -223,7 +263,7 @@ def fill_zone_1():
# +
tracks = {}
particle_ids = set()
-release_locs = list()
+release_locs = []
for i, t in enumerate(pathlines["time"]):
pid = str(round(float(pathlines["particleid"][i])))
@@ -263,11 +303,7 @@ def fill_zone_1():
start_labels.append(f"Particle {pid}")
p.add_point_labels(
- label_coords,
- start_labels,
- font_size=10,
- point_size=15,
- point_color="black",
+ label_coords, start_labels, font_size=10, point_size=15, point_color="black"
)
# zoom in and show the plot
@@ -323,7 +359,7 @@ def fill_zone_1():
# Show the GIF.
# +
-from IPython.core.display import Image
+from IPython.display import Image, display
display(Image(data=open(gif_path, "rb").read(), format="gif"))
# -
diff --git a/.docs/Notebooks/zonebudget_example.py b/.docs/Notebooks/zonebudget_example.py
index ea9ee0bc04..0dada66711 100644
--- a/.docs/Notebooks/zonebudget_example.py
+++ b/.docs/Notebooks/zonebudget_example.py
@@ -28,10 +28,12 @@
from pathlib import Path
from tempfile import TemporaryDirectory
+import git
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
+import pooch
proj_root = Path.cwd().parent.parent
@@ -48,9 +50,28 @@
temp_dir = TemporaryDirectory()
workspace = Path(temp_dir.name)
+# Check if we are in the repository and define the data path.
+
+try:
+ root = Path(git.Repo(".", search_parent_directories=True).working_dir)
+except:
+ root = None
+
+data_path = root / "examples" / "data" if root else Path.cwd()
+
+folder_name = "zonbud_examples"
+
+fname = "freyberg.gitcbc"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/{fname}",
+ fname=fname,
+ path=data_path / folder_name,
+ known_hash=None,
+)
+
# Set path to example datafiles
-loadpth = proj_root / "examples" / "data" / "zonbud_examples"
-cbc_f = loadpth / "freyberg.gitcbc"
+loadpth = data_path / "zonbud_examples"
+cbc_f = loadpth / fname
# -
# ### Read File Containing Zones
@@ -59,6 +80,14 @@
# +
from flopy.utils import ZoneBudget
+fname = "zonef_mlt.zbr"
+pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{folder_name}/{fname}",
+ fname=fname,
+ path=data_path / folder_name,
+ known_hash=None,
+)
+
zone_file = loadpth / "zonef_mlt.zbr"
zon = ZoneBudget.read_zone_file(zone_file)
nlay, nrow, ncol = zon.shape
@@ -219,9 +248,7 @@
zb.get_budget(names=["STORAGE", "WELLS"], zones=["SURF", "UFA"], net=True)
# -
-df = zb.get_dataframes(
- names=["STORAGE", "WELLS"], zones=["SURF", "UFA"], net=True
-)
+df = zb.get_dataframes(names=["STORAGE", "WELLS"], zones=["SURF", "UFA"], net=True)
df.head(6)
@@ -270,12 +297,7 @@ def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
vertical_alignment = "bottom"
horizontal_alignment = "center"
ax.text(
- x,
- y,
- label,
- ha=horizontal_alignment,
- va=vertical_alignment,
- rotation=90,
+ x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90
)
for i, rect in enumerate(rects_out):
@@ -286,12 +308,7 @@ def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
vertical_alignment = "top"
horizontal_alignment = "center"
ax.text(
- x,
- y,
- label,
- ha=horizontal_alignment,
- va=vertical_alignment,
- rotation=90,
+ x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90
)
# horizontal line indicating zero
@@ -315,9 +332,7 @@ def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
- zb = flopy.utils.ZoneBudget(
- cbc_f, zon, kstpkper=None, totim=t, aliases=aliases
- )
+ zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = "STORAGE"
values_in = zb.get_dataframes(names=f"FROM_{recname}").T.squeeze()
@@ -345,7 +360,39 @@ def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
mf6_exe = "mf6"
zb6_exe = "zbud6"
-sim_ws = proj_root / "examples" / "data" / "mf6-freyberg"
+sim_name = "mf6-freyberg"
+sim_ws = data_path / sim_name
+file_names = {
+ "bot.asc": "3107f907cb027460fd40ffc16cb797a78babb31988c7da326c9f500fba855b62",
+ "description.txt": "94093335eec6a24711f86d4d217ccd5a7716dd9e01cb6b732bc7757d41675c09",
+ "freyberg.cbc": "c8ad843b1da753eb58cf6c462ac782faf0ca433d6dcb067742d8bd698db271e3",
+ "freyberg.chd": "d8b8ada8d3978daea1758b315be983b5ca892efc7d69bf6b367ceec31e0dd156",
+ "freyberg.dis": "cac230a207cc8483693f7ba8ae29ce40c049036262eac4cebe17a4e2347a8b30",
+ "freyberg.dis.grb": "c8c26fb1fa4b210208134b286d895397cf4b3131f66e1d9dda76338502c7e96a",
+ "freyberg.hds": "926a06411ca658a89db6b5686f51ddeaf5b74ced81239cab1d43710411ba5f5b",
+ "freyberg.ic": "6efb56ee9cdd704b9a76fb9efd6dae750facc5426b828713f2d2cf8d35194120",
+ "freyberg.ims": "6dddae087d85417e3cdaa13e7b24165afb7f9575ab68586f3adb6c1b2d023781",
+ "freyberg.nam": "cee9b7b000fe35d2df26e878d09d465250a39504f87516c897e3fa14dcda081e",
+ "freyberg.npf": "81104d3546045fff0eddf5059465e560b83b492fa5a5acad1907ce18c2b9c15f",
+ "freyberg.oc": "c0715acd75eabcc42c8c47260a6c1abd6c784350983f7e2e6009ddde518b80b8",
+ "freyberg.rch": "a6ec1e0eda14fd2cdf618a5c0243a9caf82686c69242b783410d5abbcf971954",
+ "freyberg.riv": "a8cafc8c317cbe2acbb43e2f0cfe1188cb2277a7a174aeb6f3e6438013de8088",
+ "freyberg.sto": "74d748c2f0adfa0a32ee3f2912115c8f35b91011995b70c1ec6ae1c627242c41",
+ "freyberg.tdis": "9965cbb17caf5b865ea41a4ec04bcb695fe15a38cb539425fdc00abbae385cbe",
+ "freyberg.wel": "f19847de455598de52c05a4be745698c8cb589e5acfb0db6ab1f06ded5ff9310",
+ "k11.asc": "b6a8aa46ef17f7f096d338758ef46e32495eb9895b25d687540d676744f02af5",
+ "mfsim.nam": "6b8d6d7a56c52fb2bff884b3979e3d2201c8348b4bbfd2b6b9752863cbc9975e",
+ "top.asc": "3ad2b131671b9faca7f74c1dd2b2f41875ab0c15027764021a89f9c95dccaa6a",
+}
+for fname, fhash in file_names.items():
+ pooch.retrieve(
+ url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}",
+ fname=fname,
+ path=sim_ws,
+ known_hash=fhash,
+ )
+
+
cpth = workspace / "zbud6"
cpth.mkdir()
@@ -395,9 +442,7 @@ def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
mt = ml.modeltime
# budget recarray must be pivoted to get volumetric budget!
-zonbud.get_volumetric_budget(
- mt, recarray=zonbud.get_budget(net=True, pivot=True)
-)
+zonbud.get_volumetric_budget(mt, recarray=zonbud.get_budget(net=True, pivot=True))
# -
try:
diff --git a/.docs/create_rstfiles.py b/.docs/create_rstfiles.py
index 6f705b092d..fc37bdfcc9 100644
--- a/.docs/create_rstfiles.py
+++ b/.docs/create_rstfiles.py
@@ -34,11 +34,7 @@ def create_tutorials_rst():
rst_path = project_root_path / ".docs" / "tutorials.rst"
nbs_path = project_root_path / ".docs" / "Notebooks"
filenames = sorted(
- [
- path.name
- for path in nbs_path.rglob("*.py")
- if "tutorial" in path.name
- ]
+ [path.name for path in nbs_path.rglob("*.py") if "tutorial" in path.name]
)
print(f"Creating {rst_path}")
@@ -77,11 +73,7 @@ def create_examples_rst():
rst_path = project_root_path / ".docs" / "examples.rst"
nbs_path = project_root_path / ".docs" / "Notebooks"
filenames = sorted(
- [
- path.name
- for path in nbs_path.rglob("*.py")
- if "example" in path.name
- ]
+ [path.name for path in nbs_path.rglob("*.py") if "example" in path.name]
)
print(f"Creating {rst_path}")
@@ -98,15 +90,12 @@ def create_examples_rst():
"flopy": {"title": "Other FloPy features", "files": []},
"mf6": {"title": "MODFLOW 6 examples", "files": []},
"mfusg": {"title": "MODFLOW USG examples", "files": []},
- "mf2005": {
- "title": "MODFLOW-2005/MODFLOW-NWT examples",
- "files": [],
- },
+ "mf2005": {"title": "MODFLOW-2005/MODFLOW-NWT examples", "files": []},
"modpath": {"title": "MODPATH examples", "files": []},
"mt3d": {"title": "MT3D and SEAWAT examples", "files": []},
"2016gw-paper": {
"title": "Examples from Bakker and others (2016)",
- "description": "Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Starn, J. J. and Fienen, M. N., 2016, Scripting MODFLOW Model Development Using Python and FloPy: Groundwater, v. 54, p. 733–739, https://doi.org/10.1111/gwat.12413.",
+ "description": "Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Starn, J. J. and Fienen, M. N., 2016, Scripting MODFLOW Model Development Using Python and FloPy: Groundwater, v. 54, p. 733–739, https://doi.org/10.1111/gwat.12413.", # noqa: RUF001
"files": [],
},
"2023gw-paper": {
diff --git a/.docs/examples.rst b/.docs/examples.rst
index 14284e0d76..9666112cef 100644
--- a/.docs/examples.rst
+++ b/.docs/examples.rst
@@ -3,6 +3,11 @@ Examples gallery
The following examples illustrate the functionality of Flopy. After the `tutorials `_, the examples are the best resource for learning the underlying capabilities of FloPy.
+The basic set of MODFLOW executables as well as the `optional` dependency group are both required to run the tutorials.
+
+If the tutorial/example scripts detect that they are running within the repository, they will use local example data.
+Otherwise they will download example data files where necessary from GitHub.
+
Preprocessing and Discretization
--------------------------------
diff --git a/.docs/groundwater_paper/scripts/uspb_capture.py b/.docs/groundwater_paper/scripts/uspb_capture.py
index 591ce5277e..8830e68a5b 100644
--- a/.docs/groundwater_paper/scripts/uspb_capture.py
+++ b/.docs/groundwater_paper/scripts/uspb_capture.py
@@ -18,9 +18,7 @@ def cf_model(model, k, i, j, base, Q=-100):
wel.write_file()
model.run_model(silent=True)
# get the results
- hedObj = flopy.utils.HeadFile(
- os.path.join(cf_pth, "DG.hds"), precision="double"
- )
+ hedObj = flopy.utils.HeadFile(os.path.join(cf_pth, "DG.hds"), precision="double")
cbcObj = flopy.utils.CellBudgetFile(
os.path.join(cf_pth, "DG.cbc"), precision="double"
)
@@ -32,9 +30,7 @@ def cf_model(model, k, i, j, base, Q=-100):
v[idx] = np.nan
else:
v1 = cbcObj.get_data(kstpkper=kon, text="DRAINS", full3D=True)[0]
- v2 = cbcObj.get_data(
- kstpkper=kon, text="STREAM LEAKAGE", full3D=True
- )[0]
+ v2 = cbcObj.get_data(kstpkper=kon, text="STREAM LEAKAGE", full3D=True)[0]
v3 = cbcObj.get_data(kstpkper=kon, text="ET", full3D=True)[0]
v[idx] = ((v1.sum() + v2.sum() + v3.sum()) - base) / (-Q)
return v
@@ -58,9 +54,7 @@ def cf_model(model, k, i, j, base, Q=-100):
ml.run_model()
# get base model results
-cbcObj = flopy.utils.CellBudgetFile(
- os.path.join(cf_pth, "DG.cbc"), precision="double"
-)
+cbcObj = flopy.utils.CellBudgetFile(os.path.join(cf_pth, "DG.cbc"), precision="double")
v1 = cbcObj.get_data(kstpkper=(0, 0), text="DRAINS", full3D=True)[0]
v2 = cbcObj.get_data(kstpkper=(0, 0), text="STREAM LEAKAGE", full3D=True)[0]
v3 = cbcObj.get_data(kstpkper=(0, 0), text="ET", full3D=True)[0]
@@ -103,16 +97,8 @@ def cf_model(model, k, i, j, base, Q=-100):
# write some summary information
fs.write(f"Problem size: {nrow} rows and {ncol} columns.\n")
-fs.write(
- "Capture fraction analysis performed every {} rows and columns.\n".format(
- nstep
- )
-)
-fs.write(
- "Maximum number of analyses: {} rows and {} columns.\n".format(
- nrow2, ncol2
- )
-)
+fs.write(f"Capture fraction analysis performed every {nstep} rows and columns.\n")
+fs.write(f"Maximum number of analyses: {nrow2} rows and {ncol2} columns.\n")
# create array to store capture fraction data (subset of model)
cf_array = np.empty((10, nrow2, ncol2), dtype=float)
@@ -131,9 +117,7 @@ def cf_model(model, k, i, j, base, Q=-100):
if ibound[i, j] < 1:
sys.stdout.write(".")
else:
- line = "\nrow {} of {} - col {} of {}\n".format(
- icnt + 1, nrow2, jcnt + 1, ncol2
- )
+ line = f"\nrow {icnt + 1} of {nrow2} - col {jcnt + 1} of {ncol2}\n"
fs.write(line)
sys.stdout.write(line)
s0 = time.time()
@@ -167,7 +151,7 @@ def cf_model(model, k, i, j, base, Q=-100):
fs.close()
# clean up working directory
-filelist = [f for f in os.listdir(cf_pth)]
+filelist = list(os.listdir(cf_pth))
for f in filelist:
os.remove(os.path.join(cf_pth, f))
@@ -175,9 +159,6 @@ def cf_model(model, k, i, j, base, Q=-100):
if not os.path.exists(res_pth):
os.makedirs(res_pth)
for idx in range(10):
- fn = os.path.join(
- res_pth,
- f"USPB_capture_fraction_{nstep:02d}_{idx + 1:02d}.dat",
- )
+ fn = os.path.join(res_pth, f"USPB_capture_fraction_{nstep:02d}_{idx + 1:02d}.dat")
print(f"saving capture fraction data to...{os.path.basename(fn)}")
np.savetxt(fn, cf_array[idx, :, :], delimiter=" ")
diff --git a/.docs/groundwater_paper/scripts/uspb_capture_par.py b/.docs/groundwater_paper/scripts/uspb_capture_par.py
index 1803ba5fe2..8f90ddccde 100644
--- a/.docs/groundwater_paper/scripts/uspb_capture_par.py
+++ b/.docs/groundwater_paper/scripts/uspb_capture_par.py
@@ -43,9 +43,7 @@ def load_base_model(klay):
def get_baseQ(model):
- sys.stdout.write(
- "\nrunning base model to get base head-dependent flow\n\n"
- )
+ sys.stdout.write("\nrunning base model to get base head-dependent flow\n\n")
success, report = model.run_model(silent=True, report=True)
sys.stdout.write(f"Base model run: {report[-3]}\n")
@@ -54,9 +52,7 @@ def get_baseQ(model):
os.path.join(model.model_ws, "DG.cbc"), precision=precision
)
v1 = cbcObj.get_data(kstpkper=(0, 0), text="DRAINS", full3D=True)[0]
- v2 = cbcObj.get_data(kstpkper=(0, 0), text="STREAM LEAKAGE", full3D=True)[
- 0
- ]
+ v2 = cbcObj.get_data(kstpkper=(0, 0), text="STREAM LEAKAGE", full3D=True)[0]
v3 = cbcObj.get_data(kstpkper=(0, 0), text="ET", full3D=True)[0]
return v1.sum() + v2.sum() + v3.sum()
@@ -96,20 +92,14 @@ def copy_files(ml, nproc):
(1, 99): ["save head", "save budget", "print budget"],
(1, 100): [],
}
- oc = flopy.modflow.ModflowOc(
- ml, stress_period_data=stress_period_data
- )
+ oc = flopy.modflow.ModflowOc(ml, stress_period_data=stress_period_data)
# write the input files
ml.write_input()
else:
if not os.path.exists(cf_pths[idx]):
os.makedirs(cf_pths[idx])
- filelist = [f for f in os.listdir(cf_pths[0])]
- sys.stdout.write(
- "copying files from {} to {}\n".format(
- cf_pths[0], cf_pths[idx]
- )
- )
+ filelist = list(os.listdir(cf_pths[0]))
+ sys.stdout.write(f"copying files from {cf_pths[0]} to {cf_pths[idx]}\n")
for f in filelist:
if os.path.splitext(f)[1].lower() in exclude:
continue
@@ -179,9 +169,7 @@ def cf_model(imod, ion, nmax, k, i, j, Qt, base, hdry):
sys.stdout.write(f" model number {imod} working directory: {pth}\n")
make_well(pth, k, i, j, Qt)
success, elt = run_model(pth)
- line = "\nModel run: {} of {} (model number {})\n".format(
- ion + 1, nmax, imod
- )
+ line = f"\nModel run: {ion + 1} of {nmax} (model number {imod})\n"
line += f" row {i + 1} - col {j + 1}\n"
line += f" {elt}\n"
# get the results
@@ -200,26 +188,18 @@ def cf_model(imod, ion, nmax, k, i, j, Qt, base, hdry):
if h[idx, 1] == hdry:
v[idx] = np.nan
else:
- v1 = cbcObj.get_data(
- kstpkper=kon, text="DRAINS", full3D=True
- )[0]
+ v1 = cbcObj.get_data(kstpkper=kon, text="DRAINS", full3D=True)[0]
v2 = cbcObj.get_data(
kstpkper=kon, text="STREAM LEAKAGE", full3D=True
)[0]
- v3 = cbcObj.get_data(kstpkper=kon, text="ET", full3D=True)[
- 0
- ]
+ v3 = cbcObj.get_data(kstpkper=kon, text="ET", full3D=True)[0]
v[idx] = ((v1.sum() + v2.sum() + v3.sum()) - base) / (-Qt)
except:
- line += " Error: Model run: {} of {} (model number {}) - ".format(
- ion + 1, nmax, imod
- )
+ line += f" Error: Model run: {ion + 1} of {nmax} (model number {imod}) - "
line += "could not process model results.\n"
v[:] = np.nan
else:
- line += " Error: Model run: {} of {} (model number {}) ".format(
- ion + 1, nmax, imod
- )
+ line += f" Error: Model run: {ion + 1} of {nmax} (model number {imod}) "
line += "did not execute successfully\n"
v[:] = np.nan
sys.stdout.write(line)
@@ -232,15 +212,11 @@ def doit():
ncores = mp.cpu_count()
if nproc > ncores:
sys.stdout.write(
- "Requested {} cores but only {} cores are available.\n\n\n".format(
- nproc, ncores
- )
+ f"Requested {nproc} cores but only {ncores} cores are available.\n\n\n"
)
else:
sys.stdout.write(
- "Requested {} cores and {} cores are available.\n\n\n".format(
- nproc, ncores
- )
+ f"Requested {nproc} cores and {ncores} cores are available.\n\n\n"
)
# paths
@@ -266,24 +242,12 @@ def doit():
ncol2 = ncol // nstep
# open summary file
- fs = open(
- os.path.join("data", "uspb", f"uspb_capture_{nstep}.out"),
- "w",
- 0,
- )
+ fs = open(os.path.join("data", "uspb", f"uspb_capture_{nstep}.out"), "w", 0)
# write some summary information
fs.write(f"Problem size: {nrow} rows and {ncol} columns.\n")
- fs.write(
- "Capture fraction analysis performed every {} rows and columns.\n".format(
- nstep
- )
- )
- fs.write(
- "Maximum number of analyses: {} rows and {} columns.\n".format(
- nrow2, ncol2
- )
- )
+ fs.write(f"Capture fraction analysis performed every {nstep} rows and columns.\n")
+ fs.write(f"Maximum number of analyses: {nrow2} rows and {ncol2} columns.\n")
# create array to store capture fraction data (subset of model)
cf_array = np.empty((10, nrow2, ncol2), dtype=float)
@@ -346,7 +310,7 @@ def doit():
# clean up working directories
for idx in range(nproc):
- filelist = [f for f in os.listdir(cf_pths[idx])]
+ filelist = list(os.listdir(cf_pths[idx]))
for f in filelist:
os.remove(os.path.join(cf_pths[idx], f))
@@ -355,14 +319,9 @@ def doit():
os.makedirs(res_pth)
for idx in range(10):
fn = os.path.join(
- res_pth,
- f"USPB_capture_fraction_{nstep:02d}_{idx + 1:02d}.dat",
- )
- sys.stdout.write(
- "saving capture fraction data to...{}\n".format(
- os.path.basename(fn)
- )
+ res_pth, f"USPB_capture_fraction_{nstep:02d}_{idx + 1:02d}.dat"
)
+ sys.stdout.write(f"saving capture fraction data to...{os.path.basename(fn)}\n")
np.savetxt(fn, cf_array[idx, :, :], delimiter=" ")
diff --git a/.docs/md/version_changes.md b/.docs/md/version_changes.md
index 42fe779c7d..d4213be081 100644
--- a/.docs/md/version_changes.md
+++ b/.docs/md/version_changes.md
@@ -1,4 +1,34 @@
# Changelog
+### Version 3.9.0
+
+#### New features
+
+* [feat(plot_centers)](https://github.com/modflowpy/flopy/commit/c6a41abb496039b65fbe52fd6c3f2df011e492be): Add plot_centers support to PlotMapView and PlotCrossSection (#2318). Committed by Joshua Larsen on 2024-10-07.
+* [feat(get-modflow)](https://github.com/modflowpy/flopy/commit/d800ce5638e7a0983985881f2fa5d37207e3560b): Support windows extended build (#2356). Committed by mjreno on 2024-11-06.
+* [feat(binaryfile)](https://github.com/modflowpy/flopy/commit/4eac63176f1328a22a55e1cf131db55b8ca929a8): Add head/budget file reversal script (#2383). Committed by wpbonelli on 2024-11-27.
+
+#### Bug fixes
+
+* [fix(ZoneFile6.load)](https://github.com/modflowpy/flopy/commit/99d57e6fd0d0d41c364f9c76f7800ec3be0d179a): Add split statement to input read (#2330). Committed by Joshua Larsen on 2024-10-09.
+* [fix(resample_to_grid)](https://github.com/modflowpy/flopy/commit/15f1b94a45487e42aa36afdd2abb2b111c2197a6): Fix unintended extrapolation (#2331). Committed by Joshua Larsen on 2024-10-09.
+* [fix(utils)](https://github.com/modflowpy/flopy/commit/558f4a8c9d17241e21e7d41c3a75a9a6380a9fb1): Exclude ncf from mf6 output utils (#2336). Committed by mjreno on 2024-10-16.
+* [fix(masked_4D_arrays)](https://github.com/modflowpy/flopy/commit/332a310ef0b43130fd2f37cf7cd4abe954484524): Allow re-use of preceding spd data if empty (#2314). Committed by martclanor on 2024-10-20.
+* [fix(gridintersect)](https://github.com/modflowpy/flopy/commit/34043ab58eb254feee0c2f47cb63e057905b49d7): Fix multiple issues (#2343). Committed by Davíd Brakenhoff on 2024-10-25.
+
+#### Refactoring
+
+* [refactor(PackageContainer)](https://github.com/modflowpy/flopy/commit/f378f84a5677b99191ce178bb1c5b67ac1d1bd66): Compose not inherit, deprecate methods (#2324). Committed by Marnix on 2024-10-14.
+* [refactor(Modpath7.create_mp7)](https://github.com/modflowpy/flopy/commit/3a2c4946a047e20e35341d7e4266f8dae3ac5316): Expose porosity parameter of Modpath7Bas (#2340). Committed by martclanor on 2024-10-20.
+* [refactor(gridintersect)](https://github.com/modflowpy/flopy/commit/f8810c20097004a940e96ee0f2bc0229366a3899): Clean up gridintersect (#2346). Committed by Davíd Brakenhoff on 2024-10-24.
+* [refactor(Mf6Splitter)](https://github.com/modflowpy/flopy/commit/acc5a5b6580bdd6e93a24970db7d0b62d54e2485): Added split_multi_model method (#2352). Committed by Joshua Larsen on 2024-11-06.
+* [refactor(mf6)](https://github.com/modflowpy/flopy/commit/4e0906426ef70235a7546c31d263904fa3249a9d): Deprecate mf6 checks (#2357). Committed by wpbonelli on 2024-11-06.
+* [refactor](https://github.com/modflowpy/flopy/commit/a5545c6fc23c2ea1476f5b5650ce294ae9d2509f): Apply suggestions from pyupgrade (#2361). Committed by Mike Taves on 2024-11-11.
+* [refactor](https://github.com/modflowpy/flopy/commit/bb9824e8aaea04a43d911724445cf0610301d236): Fix long lines to resolve check E501 (#2368). Committed by Mike Taves on 2024-11-14.
+* [refactor](https://github.com/modflowpy/flopy/commit/373b82d3b864fe54bf5675e2a1aabbe4b6eee58e): Resolve ruff check F821 for undefined names (#2374). Committed by Mike Taves on 2024-11-18.
+* [refactor](https://github.com/modflowpy/flopy/commit/22b5992ccd0e8280ee831611e8823bdb0d22ae3b): Apply fixes for flake8 comprehensions (C4) (#2376). Committed by Mike Taves on 2024-11-18.
+* [refactor(deprecations)](https://github.com/modflowpy/flopy/commit/1993af155f9db97f5e4fb1a0090926e4cb65cf19): Deprecate flopy.mf6.utils.reference module (#2375). Committed by wpbonelli on 2024-11-19.
+* [refactor](https://github.com/modflowpy/flopy/commit/4c1bf6cb486f8bd5bd9d25c5c7cf159fc65ecd4b): Apply Ruff-specific rule checks (#2377). Committed by Mike Taves on 2024-11-22.
+
### Version 3.8.2
#### Bug fixes
diff --git a/.docs/pysrc/tutorial2.py b/.docs/pysrc/tutorial2.py
index b045ff9f7e..4c08fd3853 100644
--- a/.docs/pysrc/tutorial2.py
+++ b/.docs/pysrc/tutorial2.py
@@ -108,9 +108,7 @@
"print head",
"print budget",
]
-oc = flopy.modflow.ModflowOc(
- mf, stress_period_data=stress_period_data, compact=True
-)
+oc = flopy.modflow.ModflowOc(mf, stress_period_data=stress_period_data, compact=True)
# Write the model input files
mf.write_input()
diff --git a/.docs/tutorials.rst b/.docs/tutorials.rst
index 7b25255048..56a7b37733 100644
--- a/.docs/tutorials.rst
+++ b/.docs/tutorials.rst
@@ -3,6 +3,11 @@ Tutorials
The following tutorials demonstrate basic FloPy features and usage with MODFLOW 2005, MODFLOW 6, and related programs.
+The basic set of MODFLOW executables as well as the `optional` dependency group are both required to run the tutorials.
+
+If the tutorial/example scripts detect that they are running within the repository, they will use local example data.
+Otherwise they will download example data files where necessary from GitHub.
+
FloPy
-----
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000000..1d0306afdc
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,20 @@
+# Introduce end-of-line normalization (#913)
+de38a5bb9559a4acb14317a9038ce8267c432c03
+
+# Initial GitHub actions for linting (#954)
+675d12b07b1c7f93c2bce9da29790cf364d52505
+
+# apply consistent multi-line string formatting (#1166)
+2912b1dfc3fbfc926128702f222b7b1ca9f15c6c
+
+# use f-strings to format str (#1212)
+5d4324ddb6ed5abad231295d63e28cdf779e5e07
+
+# consistently use relative imports (#1330)
+16b84e8e7933b98c63eaa4fbeb47f6e28a45d611
+
+# switch to ruff, ignore known test warnings, numpy compat fixes (#2124)
+b142b081516e6ac78e426257c7472dd4a9994b89
+
+# reformat codebase with longer line length (#2362)
+12a3bcd1ba8b229e00e2da8a448e7866c16fdb29
diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml
index a3de02b4df..b62f9a185a 100644
--- a/.github/workflows/benchmark.yml
+++ b/.github/workflows/benchmark.yml
@@ -12,7 +12,7 @@ jobs:
fail-fast: false
matrix:
os: [ ubuntu-latest, macos-latest, windows-latest ]
- python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ]
+ python-version: [ 3.9, "3.10", "3.11", "3.12" ]
defaults:
run:
shell: bash -l {0}
@@ -23,7 +23,7 @@ jobs:
uses: actions/checkout@v4
- name: Setup Micromamba
- uses: mamba-org/setup-micromamba@v1
+ uses: mamba-org/setup-micromamba@v2
with:
environment-file: etc/environment.yml
cache-environment: true
diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml
index 07e7b73bcc..68f310fa10 100644
--- a/.github/workflows/commit.yml
+++ b/.github/workflows/commit.yml
@@ -70,6 +70,9 @@ jobs:
- name: Check format
run: ruff format . --check
+ - name: Check spelling
+ run: codespell
+
- name: Check CITATION.cff
run: |
cffconvert --validate
@@ -121,7 +124,7 @@ jobs:
- name: Upload coverage
if: github.repository_owner == 'modflowpy' && (github.event_name == 'push' || github.event_name == 'pull_request')
- uses: codecov/codecov-action@v3
+ uses: codecov/codecov-action@v5
with:
files: autotest/coverage.xml
@@ -133,11 +136,11 @@ jobs:
fail-fast: false
matrix:
os: [ ubuntu-latest, macos-latest, windows-latest ]
- python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ]
+ python-version: [ 3.9, "3.10", "3.11", "3.12" ]
defaults:
run:
shell: bash -l {0}
- timeout-minutes: 45
+ timeout-minutes: 60
steps:
- name: Checkout repo
@@ -181,10 +184,13 @@ jobs:
working-directory: autotest
run: |
pytest -v -m="not example" -n=auto --cov=flopy --cov-append --cov-report=xml --durations=0 --keep-failed=.failed --dist loadfile
- coverage report
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ - name: Report coverage
+ working-directory: autotest
+ run: coverage report
+
- name: Upload failed test outputs
uses: actions/upload-artifact@v4
if: failure()
@@ -194,6 +200,6 @@ jobs:
- name: Upload coverage
if: github.repository_owner == 'modflowpy' && (github.event_name == 'push' || github.event_name == 'pull_request')
- uses: codecov/codecov-action@v3
+ uses: codecov/codecov-action@v5
with:
files: autotest/coverage.xml
diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml
index 590a1578f3..04ccd791ff 100644
--- a/.github/workflows/examples.yml
+++ b/.github/workflows/examples.yml
@@ -12,7 +12,7 @@ jobs:
fail-fast: false
matrix:
os: [ ubuntu-latest, macos-latest, windows-latest ]
- python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ]
+ python-version: [ 3.9, "3.10", "3.11", "3.12" ]
defaults:
run:
shell: bash -l {0}
@@ -22,7 +22,7 @@ jobs:
uses: actions/checkout@v4
- name: Setup Micromamba
- uses: mamba-org/setup-micromamba@v1
+ uses: mamba-org/setup-micromamba@v2
with:
environment-file: etc/environment.yml
cache-environment: true
diff --git a/.github/workflows/mf6.yml b/.github/workflows/mf6.yml
index 72011cdfc4..50a3ccce12 100644
--- a/.github/workflows/mf6.yml
+++ b/.github/workflows/mf6.yml
@@ -80,7 +80,7 @@ jobs:
- name: Upload coverage to Codecov
if:
github.repository_owner == 'modflowpy' && (github.event_name == 'push' || github.event_name == 'pull_request')
- uses: codecov/codecov-action@v3
+ uses: codecov/codecov-action@v5
with:
files: ./modflow6/autotest/coverage.xml
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 0dd186bedc..4b482bb4ee 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -173,7 +173,7 @@ jobs:
# actions/download-artifact won't look at previous workflow runs but we need to in order to get changelog
- name: Download artifacts
- uses: dawidd6/action-download-artifact@v6
+ uses: dawidd6/action-download-artifact@v7
- name: Draft release
env:
diff --git a/CITATION.cff b/CITATION.cff
index 316641a089..4509967430 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -3,8 +3,8 @@ message: If you use this software, please cite both the article from preferred-c
references, and the software itself.
type: software
title: FloPy
-version: 3.8.2
-date-released: '2024-10-03'
+version: 3.9.0
+date-released: '2024-12-20'
doi: 10.5066/F7BK19FH
abstract: A Python package to create, run, and post-process MODFLOW-based models.
repository-artifact: https://pypi.org/project/flopy
diff --git a/DEVELOPER.md b/DEVELOPER.md
index dcb2f9a113..0782ca4787 100644
--- a/DEVELOPER.md
+++ b/DEVELOPER.md
@@ -5,34 +5,37 @@ This document describes how to set up a FloPy development environment, run the e
-- [Requirements & installation](#requirements--installation)
- - [Git](#git)
- - [Python](#python)
- - [Python IDEs](#python-ides)
- - [Visual Studio Code](#visual-studio-code)
- - [PyCharm](#pycharm)
- - [MODFLOW executables](#modflow-executables)
- - [Scripted installation](#scripted-installation)
- - [Manually installing executables](#manually-installing-executables)
- - [Linux](#linux)
- - [Mac](#mac)
- - [Updating FloPy packages](#updating-flopy-packages)
-- [Examples](#examples)
- - [Developing new examples](#developing-new-examples)
-- [Tests](#tests)
- - [Configuring tests](#configuring-tests)
- - [Running tests](#running-tests)
- - [Selecting tests with markers](#selecting-tests-with-markers)
- - [Writing tests](#writing-tests)
- - [Debugging tests](#debugging-tests)
- - [Performance testing](#performance-testing)
- - [Benchmarking](#benchmarking)
- - [Profiling](#profiling)
- - [Snapshot testing](#snapshot-testing)
-- [Branching model](#branching-model)
-- [Deprecation policy](#deprecation-policy)
-- [Miscellaneous](#miscellaneous)
- - [Locating the root](#locating-the-root)
+- [Developing FloPy](#developing-flopy)
+ - [Requirements \& installation](#requirements--installation)
+ - [Git](#git)
+ - [Python](#python)
+ - [Python IDEs](#python-ides)
+ - [Visual Studio Code](#visual-studio-code)
+ - [PyCharm](#pycharm)
+ - [MODFLOW executables](#modflow-executables)
+ - [Scripted installation](#scripted-installation)
+ - [Manually installing executables](#manually-installing-executables)
+ - [Linux](#linux)
+ - [Mac](#mac)
+ - [Updating FloPy packages](#updating-flopy-packages)
+ - [Examples](#examples)
+ - [Developing new examples](#developing-new-examples)
+ - [Tests](#tests)
+ - [Configuring tests](#configuring-tests)
+ - [Running tests](#running-tests)
+ - [Selecting tests with markers](#selecting-tests-with-markers)
+ - [Writing tests](#writing-tests)
+ - [Debugging tests](#debugging-tests)
+ - [Debugging tests in VS Code](#debugging-tests-in-vs-code)
+ - [Performance testing](#performance-testing)
+ - [Benchmarking](#benchmarking)
+ - [Profiling](#profiling)
+ - [Snapshot testing](#snapshot-testing)
+ - [Branching model](#branching-model)
+ - [Deprecation policy](#deprecation-policy)
+ - [Miscellaneous](#miscellaneous)
+ - [Locating the root](#locating-the-root)
+ - [Dependency analysis](#dependency-analysis)
@@ -49,24 +52,42 @@ To develop FloPy you must have the following software installed on your machine:
You will need [Git](https://git-scm.com) and/or the **GitHub app** (for [Mac](https://mac.github.com) or [Windows](https://windows.github.com)).
GitHub's [Guide to Installing Git](https://help.github.com/articles/set-up-git) is a good source of information.
+Optionally, the [`git blame`](https://git-scm.com/docs/git-blame) tool can be configured to work locally using:
+
+```sh
+git config blame.ignoreRevsFile .git-blame-ignore-revs
+```
+
### Python
FloPy supports several recent versions of Python, loosely following [NEP 29](https://numpy.org/neps/nep-0029-deprecation_policy.html#implementation).
-Install Python >=3.8.1, via [standalone download](https://www.python.org/downloads/) or a distribution like [Anaconda](https://www.anaconda.com/products/individual) or [miniconda](https://docs.conda.io/en/latest/miniconda.html). (An [infinite recursion bug](https://github.com/python/cpython/pull/17098) in 3.8.0's [`shutil.copytree`](https://github.com/python/cpython/commit/65c92c5870944b972a879031abd4c20c4f0d7981) can cause test failures if the destination is a subdirectory of the source.)
+Install Python >=3.9 via [standalone download](https://www.python.org/downloads/) or a distribution like [Anaconda](https://www.anaconda.com/products/individual) or [miniconda](https://docs.conda.io/en/latest/miniconda.html).
Then install FloPy and core dependencies from the project root:
- pip install .
+```sh
+pip install .
+```
The FloPy package has a number of [optional dependencies](.docs/optional_dependencies.md), as well as extra dependencies required for linting, testing, and building documentation. Extra dependencies are listed in the `test`, `lint`, `optional`, and `doc` groups under the `[project.optional-dependencies]` section in `pyproject.toml`. Core, linting, testing and optional dependencies are included in the Conda environment in `etc/environment.yml`. Only core dependencies are included in the PyPI package — to install extra dependency groups with pip, use `pip install ".[]"`. For instance, to install all development dependencies:
- pip install ".[dev]"
+```sh
+pip install ".[dev]"
+```
Alternatively, with Anaconda or Miniconda:
- conda env create -f etc/environment.yml
- conda activate flopy
+```sh
+conda env create -f etc/environment.yml
+conda activate flopy
+```
+
+For the tests to work, flopy must also be installed to the "flopy" environment:
+
+```sh
+pip install -e .
+```
#### Python IDEs
@@ -83,6 +104,10 @@ VSCode users on Windows may need to run `conda init`, then open a fresh terminal
To locate a Conda environment's Python executable, run `where python` with the environment activated.
+The [Debugging tests in VS Code](#debugging-tests-in-vs-code) section below has additional tips for using VS Code to debug tests interactively.
+
+See also this [VS Code Tutorial](https://doi-usgs.github.io/python-for-hydrology/latest/notebooks/part0_python_intro/07b_VSCode.html) from the USGS Python for Hydrology course.
+
##### PyCharm
To configure a Python interpreter in PyCharm, navigate to `Settings -> Project -> Python Interpreter`, click the gear icon, then select `Add Interpreter`. This presents a wizard to create a new virtual environment or select an existing one.
@@ -101,35 +126,41 @@ A utility script is provided to easily download and install executables: after i
To download and extract all executables for Linux (e.g., Ubuntu):
-```shell
+```sh
wget https://github.com/MODFLOW-USGS/executables/releases/download/8.0/linux.zip && \
unzip linux.zip -d /path/to/your/install/location
```
Then add the install location to the `PATH`
- export PATH="/path/to/install/location:$PATH"
+```sh
+export PATH="/path/to/install/location:$PATH"
+```
##### Mac
The same commands should work to download and extract executables for OSX:
-```shell
+```sh
wget https://github.com/MODFLOW-USGS/executables/releases/download/8.0/mac.zip && \
unzip mac.zip -d /path/to/your/install/location
```
Then add the install location to your `PATH`
- export PATH="/path/to/your/install/location:$PATH"
+```sh
+export PATH="/path/to/your/install/location:$PATH"
+```
On OSX you may see unidentified developer warnings upon running the executables. To disable warnings and enable permissions for all binaries at once, navigate to the install directory and run
- `for f in *; do xattr -d com.apple.quarantine "$f" && chmod +x "$f"; done;`
+```sh
+`for f in *; do xattr -d com.apple.quarantine "$f" && chmod +x "$f"; done;`
+```
When run on OSX, certain tests (e.g., `t032_test.py::test_polygon_from_ij`) may produce errors like
-```shell
+```sh
URLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1129)'))
```
@@ -145,19 +176,19 @@ A number of examples demonstrating FloPy features are located in `.docs/Notebook
To convert a Python example script to an `.ipynb` notebook, run:
-```
+```sh
jupytext --from py --to ipynb path/to/script.py
```
To work with `.ipynb` notebooks from a browser interface, you will need `jupyter` installed (`jupyter` is included with the `test` optional dependency group in `pyproject.toml`). Some of the notebooks use testing dependencies and [optional dependencies](.docs/optional_dependencies.md) as well. The conda environment provided in `etc/environment.yml` already includes all dependencies needed to run the examples. To install all development dependencies at once using `pip`:
-```shell
+```sh
pip install ".[dev]"
```
To start a local Jupyter notebook server, run:
-```shell
+```sh
jupyter notebook
```
@@ -165,7 +196,7 @@ jupyter notebook
Submissions of high-quality examples that demonstrate the use of FloPy are encouraged, as are edits to existing examples to improve the code quality, performance, or clarity of presentation.
-There are two kinds of examples: tutorials and full-fledged examples.
+There are two kinds of examples: tutorials and full-fledged examples.
If a script's filename contains "tutorial", it will automatically be assigned to the [Tutorials](https://flopy.readthedocs.io/en/latest/tutorials.html) page on the documentation site.
@@ -175,7 +206,7 @@ If a script's filename contains "example", it is considered a full-fledged examp
All tutorials and examples should include a header with the following format:
-```
+```py
# ---
# jupyter
# jupytext:
@@ -191,7 +222,7 @@ All tutorials and examples should include a header with the following format:
Contents above the `metadata` attribute can be auto-generated with `jupytext` by first-converting an example script to a notebook, and then back to a script (i.e. a round-trip conversion). For instance:
-```shell
+```sh
jupytext --from py --to ipynb .docs/Notebooks/your_example.py
jupytext --from ipynb --to py .docs/Notebooks/your_example.ipynb
```
@@ -205,7 +236,7 @@ The `section` attribute assigns the example to a group within the rendered docum
**Note**: Examples are rendered into a thumbnail gallery view by [nbsphinx](https://github.com/spatialaudio/nbsphinx) when the [online documentation](https://flopy.readthedocs.io/en/latest/) is built. At least one plot/visualization is recommended in order to provide a thumbnail for each example notebook in the [Examples gallery](https://flopy.readthedocs.io/en/latest/notebooks.html)gallery.
-**Note**: Thumbnails for the examples gallery are generated automatically from the notebook header (typically the first line, begining with a single '#'), and by default, the last plot generated. Thumbnails can be customized to use any plot in the notebook, or an external image, as described [here](https://nbsphinx.readthedocs.io/en/0.9.1/subdir/gallery.html).
+**Note**: Thumbnails for the examples gallery are generated automatically from the notebook header (typically the first line, beginning with a single '#'), and by default, the last plot generated. Thumbnails can be customized to use any plot in the notebook, or an external image, as described [here](https://nbsphinx.readthedocs.io/en/0.9.1/subdir/gallery.html).
Each example should create and (attempt to) dispose of its own isolated temporary workspace. On Windows, Python's `TemporaryDirectory` can raise permissions errors, so cleanup is trapped with `try/except`. Some scripts also accept a `--quiet` flag, curtailing verbose output, and a `--keep` option to specify a working directory of the user's choice.
@@ -231,17 +262,23 @@ Environment variables can be set as usual, but a more convenient way to store va
Tests must be run from the `autotest` directory. To run a single test script in verbose mode:
- pytest -v test_conftest.py
+```sh
+pytest -v test_conftest.py
+```
The `test_conftest.py` script tests the test suite's `pytest` configuration. This includes shared fixtures providing a single source of truth for the location of example data, as well as various other fixtures and utilities.
Tests matching a pattern can be run with `-k`, e.g.:
- pytest -v -k "export"
+```sh
+pytest -v -k "export"
+```
To run all tests in parallel, using however many cores your machine is willing to spare:
- pytest -v -n auto
+```sh
+pytest -v -n auto
+```
The `-n auto` option configures the `pytest-xdist` extension to query your computer for the number of processors available. To explicitly set the number of cores, substitute an integer for `auto` in the `-n` argument, e.g. `pytest -v -n 2`. (The space between `-n` and the number of processors can be replaced with `=`, e.g. `-n=2`.)
@@ -257,15 +294,21 @@ Markers are a `pytest` feature that can be used to select subsets of tests. Mark
Markers can be used with the `-m ` option. For example, to run only fast tests:
- pytest -v -n auto -m "not slow"
+```sh
+pytest -v -n auto -m "not slow"
+```
Markers can be applied in boolean combinations with `and` and `not`. For instance, to run fast tests in parallel, excluding example scripts/notebooks and regression tests:
- pytest -v -n auto -m "not slow and not example and not regression"
+```sh
+pytest -v -n auto -m "not slow and not example and not regression"
+```
A CLI option `--smoke` (short form `-S`) is provided as an alias for the above. For instance:
- pytest -v -n auto -S
+```sh
+pytest -v -n auto -S
+```
This should complete in under a minute on most machines. Smoke testing aims to cover a reasonable fraction of the codebase while being fast enough to run often during development. (To preserve this ability, new tests should be marked as slow if they take longer than a second or two to complete.)
@@ -273,18 +316,47 @@ This should complete in under a minute on most machines. Smoke testing aims to c
### Writing tests
-Test functions and files should be named informatively, with related tests grouped in the same file. The test suite runs on GitHub Actions in parallel, so tests should not access the working space of other tests, example scripts, tutorials or notebooks. A number of shared test fixtures are [imported](conftest.py) from [`modflow-devtools`](https://github.com/MODFLOW-USGS/modflow-devtools). These include keepable temporary directory fixtures and miscellanous utilities (see `modflow-devtools` repository README for more information on fixture usage). New tests should use these facilities where possible. See also the [contribution guidelines](CONTRIBUTING.md) before submitting a pull request.
+Test functions and files should be named informatively, with related tests grouped in the same file. The test suite runs on GitHub Actions in parallel, so tests should not access the working space of other tests, example scripts, tutorials or notebooks. A number of shared test fixtures are [imported](conftest.py) from [`modflow-devtools`](https://github.com/MODFLOW-USGS/modflow-devtools). These include keepable temporary directory fixtures and miscellaneous utilities (see `modflow-devtools` repository README for more information on fixture usage). New tests should use these facilities where possible. See also the [contribution guidelines](CONTRIBUTING.md) before submitting a pull request.
### Debugging tests
To debug a failed test it can be helpful to inspect its output, which is cleaned up automatically by default. `modflow-devtools` provides temporary directory fixtures that allow optionally keeping test outputs in a specified location. To run a test and keep its output, use the `--keep` option to provide a save location:
- pytest test_export.py --keep exports_scratch
+```sh
+pytest test_export.py --keep exports_scratch
+```
This will retain any files created by the test in `exports_scratch` in the current working directory. Any tests using the function-scoped `function_tmpdir` and related fixtures (e.g. `class_tmpdir`, `module_tmpdir`) defined in `modflow_devtools/fixtures` are compatible with this mechanism.
There is also a `--keep-failed ` option which preserves the outputs of failed tests in the given location, however this option is only compatible with function-scoped temporary directories (the `function_tmpdir` fixture).
+#### Debugging tests in VS Code
+
+When writing tests to develop a new feature or reproduce and fix a bug, it can often be helpful to debug tests interactively in an IDE. In addition to the [documentation](https://code.visualstudio.com/docs/python/testing), the following tips might be helpful for getting test debugging to work in VS Code:
+
+- Add the following to the `settings.json` file:
+
+ ```json
+ "python.testing.pytestArgs": ["."],
+ "python.testing.unittestEnabled": false,
+ "python.testing.pytestEnabled": true,
+ "python.testing.cwd": "${workspaceFolder}/autotest"
+ ```
+
+ Notes:
+ - The first three may be already set correctly by default, but the last item is needed for VS Code to discover the tests correctly and run the tests from the `autotest` folder.
+ - The first three settings can also be set via the [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette) by entering `Python: Configure Tests`, and following the prompts.
+- Make sure the python interpreter is set correctly.
+- If test discovery is taking too long or not working, it may be helpful to install the [Python Tests Explorer for Visual Studio Code](https://marketplace.visualstudio.com/items?itemName=LittleFoxTeam.vscode-python-test-adapter) extension.
+- Test discovery issues can often be troubleshot by running `pytest --collect-only` at the terminal, though this may be prohibitively slow with the Flopy test suite.
+- Note that the [debug console](https://code.visualstudio.com/docs/editor/debugging#_user-interface) can also be used for interactive plotting. If plots aren't showing up, try adding a `pyplot.pause()` statement at the end. For example `import matplotlib.pyplot as plt; plt.imshow(array); plt.pause(1)`
+- The number of columns displayed for a `pandas` `DataFrame` can be adjusted by executing these lines in the debug console:
+
+```sh
+pd.options.display.max_columns =
+pd.options.display.width = 0
+```
+
### Performance testing
Performance testing is accomplished with [`pytest-benchmark`](https://pytest-benchmark.readthedocs.io/en/latest/index.html).
@@ -373,7 +445,7 @@ See the linked article for more detail.
### Locating the root
-Python scripts and notebooks often need to reference files elsewhere in the project.
+Python scripts and notebooks often need to reference files elsewhere in the project.
To allow scripts to be run from anywhere in the project hierarchy, scripts should locate the project root relative to themselves, then use paths relative to the root for file access, rather than using relative paths (e.g., `../some/path`).
@@ -381,4 +453,21 @@ For a script in a subdirectory of the root, for instance, the conventional appro
```Python
project_root_path = Path(__file__).parent.parent
-```
\ No newline at end of file
+```
+
+### Dependency analysis
+
+For dependency analysis between internal modules, the `dev` optional dependencies installs `tach`.
+This is a package that visualizes the imports defined in the python files.
+More information on the usage of this package can be found on the [tach documentation page](https://docs.gauge.sh/usage/commands).
+
+The `tach.toml` file is already checked in and can simply be modified via `tach mod`.
+We have set the root to `root_module = "ignore"`, because it only cluttered the drawing.
+You can call the following commands to generate a new overview.
+
+```sh
+tach sync
+tach show --mermaid
+```
+
+You can inspect the results in the [Mermaid Live Editor](https://mermaid.live/).
diff --git a/README.md b/README.md
index f145e49fb6..bfe9bfbe5e 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
-### Version 3.8.2
+### Version 3.9.0
[![flopy continuous integration](https://github.com/modflowpy/flopy/actions/workflows/commit.yml/badge.svg?branch=develop)](https://github.com/modflowpy/flopy/actions/workflows/commit.yml)
[![Read the Docs](https://github.com/modflowpy/flopy/actions/workflows/rtd.yml/badge.svg?branch=develop)](https://github.com/modflowpy/flopy/actions/workflows/rtd.yml)
@@ -32,7 +32,7 @@ Documentation
Installation
-----------------------------------------------
-FloPy requires **Python** 3.8+ with:
+FloPy requires **Python** 3.9+ with:
```
numpy >=1.20.3
@@ -150,7 +150,7 @@ How to Cite
##### ***Software/Code citation for FloPy:***
-[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.8.2: U.S. Geological Survey Software Release, 03 October 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH)
+[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.9.0: U.S. Geological Survey Software Release, 20 December 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH)
Additional FloPy Related Publications
diff --git a/autotest/conftest.py b/autotest/conftest.py
index 13e25a530a..327f518292 100644
--- a/autotest/conftest.py
+++ b/autotest/conftest.py
@@ -2,7 +2,6 @@
from importlib import metadata
from pathlib import Path
from platform import system
-from typing import List
import matplotlib.pyplot as plt
import pytest
@@ -52,7 +51,7 @@ def flopy_data_path() -> Path:
@pytest.fixture(scope="session")
-def example_shapefiles(example_data_path) -> List[Path]:
+def example_shapefiles(example_data_path) -> list[Path]:
return [f.resolve() for f in (example_data_path / "prj_test").glob("*")]
@@ -72,8 +71,8 @@ def close_plot(request):
@pytest.fixture(scope="session", autouse=True)
def patch_macos_ci_matplotlib():
- # use noninteractive matplotlib backend if in Mac OS CI to avoid pytest-xdist node failure
- # e.g. https://github.com/modflowpy/flopy/runs/7748574375?check_suite_focus=true#step:9:57
+ # use noninteractive matplotlib backend if in Mac OS CI to avoid pytest-xdist
+ # node failure
if is_in_ci() and system().lower() == "darwin":
import matplotlib
@@ -87,7 +86,7 @@ def patch_macos_ci_matplotlib():
def pytest_runtest_makereport(item, call):
# this is necessary so temp dir fixtures can
# inspect test results and check for failure
- # (see https://doc.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures)
+ # (see https://doc.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures) # noqa
outcome = yield
rep = outcome.get_result()
@@ -102,8 +101,9 @@ def pytest_addoption(parser):
"--show-plots",
action="store_true",
default=False,
- help="Show any figure windows created by test cases. (Useful to display plots for visual inspection, "
- "but automated tests should probably also check patch collections or figure & axis properties.)",
+ help="Show any figure windows created by test cases. (Useful to display "
+ "plots for visual inspection, but automated tests should probably also "
+ "check patch collections or figure & axis properties.)",
)
# for test_generate_classes.py
@@ -111,7 +111,8 @@ def pytest_addoption(parser):
"--ref",
action="append",
type=str,
- help="Include extra refs to test. Useful for testing branches on a fork, e.g. /modflow6/.",
+ help="Include extra refs to test. Useful for testing branches on a fork, "
+ "e.g. /modflow6/.",
)
@@ -157,7 +158,5 @@ def pytest_report_header(config):
if installed:
lines.append(f"{optional} packages: {', '.join(installed)}")
if not_found:
- lines.append(
- f"{optional} packages not found: {', '.join(not_found)}"
- )
+ lines.append(f"{optional} packages not found: {', '.join(not_found)}")
return "\n".join(lines)
diff --git a/autotest/regression/test_mf6.py b/autotest/regression/test_mf6.py
index e1c5850887..4c7a9fb7dc 100644
--- a/autotest/regression/test_mf6.py
+++ b/autotest/regression/test_mf6.py
@@ -71,13 +71,9 @@ def test_ts(function_tmpdir, example_data_path):
)
# create the Flopy groundwater flow (gwf) model object
model_nam_file = f"{name}.nam"
- gwf = flopy.mf6.ModflowGwf(
- sim, modelname=name, model_nam_file=model_nam_file
- )
+ gwf = flopy.mf6.ModflowGwf(sim, modelname=name, model_nam_file=model_nam_file)
# create the flopy iterative model solver (ims) package object
- ims = flopy.mf6.modflow.mfims.ModflowIms(
- sim, pname="ims", complexity="SIMPLE"
- )
+ ims = flopy.mf6.modflow.mfims.ModflowIms(sim, pname="ims", complexity="SIMPLE")
# create the discretization package
bot = np.linspace(-3.0, -50.0 / 3.0, 3)
delrow = delcol = 4.0
@@ -160,9 +156,7 @@ def test_ts(function_tmpdir, example_data_path):
for layer, cond in zip(range(1, 3), [15.0, 1500.0]):
for row in range(0, 15):
if row < 10:
- ghb_period.append(
- ((layer, row, 9), "tides", cond, "Estuary-L2")
- )
+ ghb_period.append(((layer, row, 9), "tides", cond, "Estuary-L2"))
else:
ghb_period.append(((layer, row, 9), "wl", cond, "Estuary-L2"))
ghb_spd_ts[0] = ghb_period
@@ -326,9 +320,7 @@ def test_np001(function_tmpdir, example_data_path):
sim, time_units="DAYS", nper=1, perioddata=[(2.0, 1, 1.0)]
)
# specifying the tdis package twice should remove the old tdis package
- tdis_package = ModflowTdis(
- sim, time_units="DAYS", nper=2, perioddata=tdis_rc
- )
+ tdis_package = ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc)
# first ims file to be replaced
ims_package = ModflowIms(
sim,
@@ -364,9 +356,7 @@ def test_np001(function_tmpdir, example_data_path):
number_orthogonalizations=2,
)
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
# test case insensitive lookup
assert sim.get_model(model_name.upper()) is not None
@@ -498,9 +488,7 @@ def test_np001(function_tmpdir, example_data_path):
stress_period_data=well_spd,
)
wel_package.stress_period_data.add_transient_key(1)
- wel_package.stress_period_data.set_data(
- {1: {"filename": "wel.txt", "factor": 1.0}}
- )
+ wel_package.stress_period_data.set_data({1: {"filename": "wel.txt", "factor": 1.0}})
# test getting data from a binary file
well_data = wel_package.stress_period_data.get_data(0)
@@ -561,22 +549,7 @@ def test_np001(function_tmpdir, example_data_path):
ic_array = ic_data.get_data()
assert array_util.array_comp(
ic_array,
- [
- [
- [
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- ]
- ]
- ],
+ [[[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]]],
)
# make folder to save simulation
@@ -585,9 +558,7 @@ def test_np001(function_tmpdir, example_data_path):
# write simulation to new location
sim.set_all_data_external()
sim.write_simulation()
- assert (
- sim.simulation_data.max_columns_of_data == dis_package.ncol.get_data()
- )
+ assert sim.simulation_data.max_columns_of_data == dis_package.ncol.get_data()
# test package file with relative path to simulation path
wel_path = os.path.join(ws, "well_folder", f"{model_name}.wel")
assert os.path.exists(wel_path)
@@ -636,14 +607,10 @@ def test_np001(function_tmpdir, example_data_path):
wel_path = os.path.join(ws, md_folder, "well_folder", f"{model_name}.wel")
assert os.path.exists(wel_path)
# test data file was recreated by set_all_data_external
- riv_path = (
- function_tmpdir / "data" / "np001_mod.riv_stress_period_data_1.txt"
- )
+ riv_path = function_tmpdir / "data" / "np001_mod.riv_stress_period_data_1.txt"
assert os.path.exists(riv_path)
- assert (
- sim.simulation_data.max_columns_of_data == dis_package.ncol.get_data()
- )
+ assert sim.simulation_data.max_columns_of_data == dis_package.ncol.get_data()
# run simulation from new path with external files
sim.run_simulation()
@@ -872,12 +839,8 @@ def test_np002(function_tmpdir, example_data_path):
assert name.memory_print_option.get_data() is None
tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)]
- tdis_package = ModflowTdis(
- sim, time_units="DAYS", nper=2, perioddata=tdis_rc
- )
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ tdis_package = ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc)
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
ims_package = ModflowIms(
sim,
print_option="ALL",
@@ -903,18 +866,7 @@ def test_np002(function_tmpdir, example_data_path):
top = {
"filename": "top data.txt",
"factor": 1.0,
- "data": [
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- ],
+ "data": [100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0],
}
botm = {"filename": "botm.txt", "factor": 1.0}
dis_package = ModflowGwfdis(
@@ -933,18 +885,7 @@ def test_np002(function_tmpdir, example_data_path):
assert sim.simulation_data.max_columns_of_data == 22
sim.simulation_data.max_columns_of_data = dis_package.ncol.get_data()
- ic_vals = [
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- ]
+ ic_vals = [100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]
ic_package = ModflowGwfic(model, strt=ic_vals, filename=f"{model_name}.ic")
ic_package.strt.store_as_external_file("initial_heads.txt")
npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=1, k=100.0)
@@ -1144,12 +1085,8 @@ def test021_twri(function_tmpdir, example_data_path):
)
sim.set_sim_path(function_tmpdir)
tdis_rc = [(86400.0, 1, 1.0)]
- tdis_package = ModflowTdis(
- sim, time_units="SECONDS", nper=1, perioddata=tdis_rc
- )
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ tdis_package = ModflowTdis(sim, time_units="SECONDS", nper=1, perioddata=tdis_rc)
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
ims_package = ModflowIms(
sim,
print_option="SUMMARY",
@@ -1170,9 +1107,7 @@ def test021_twri(function_tmpdir, example_data_path):
fname = "top.bin"
nrow = 15
ncol = 15
- data_folder = os.path.join(
- sim.simulation_data.mfpath.get_sim_path(), fname
- )
+ data_folder = os.path.join(sim.simulation_data.mfpath.get_sim_path(), fname)
f = open(data_folder, "wb")
header = flopy.utils.BinaryHeader.create(
bintype="HEAD",
@@ -1193,13 +1128,7 @@ def test021_twri(function_tmpdir, example_data_path):
header_data=header,
)
f.close()
- top = {
- "factor": 1.0,
- "filename": fname,
- "data": None,
- "binary": True,
- "iprn": 1,
- }
+ top = {"factor": 1.0, "filename": fname, "data": None, "binary": True, "iprn": 1}
dis_package = ModflowGwfdis(
model,
@@ -1214,12 +1143,7 @@ def test021_twri(function_tmpdir, example_data_path):
)
strt = [
{"filename": "strt.txt", "factor": 1.0, "data": 0.0},
- {
- "filename": "strt2.bin",
- "factor": 1.0,
- "data": 1.0,
- "binary": "True",
- },
+ {"filename": "strt2.bin", "factor": 1.0, "data": 1.0, "binary": "True"},
2.0,
]
ic_package = ModflowGwfic(model, strt=strt, filename=f"{model_name}.ic")
@@ -1353,9 +1277,7 @@ def test005_create_tests_advgw_tidal(function_tmpdir, example_data_path):
expected_head_file = expected_output_folder / "AdvGW_tidal.hds"
# create simulation
- sim = MFSimulation(
- sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth
- )
+ sim = MFSimulation(sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth)
# test tdis package deletion
tdis_package = ModflowTdis(
sim, time_units="DAYS", nper=1, perioddata=[(2.0, 2, 1.0)]
@@ -1368,12 +1290,8 @@ def test005_create_tests_advgw_tidal(function_tmpdir, example_data_path):
(10.0, 120, 1.0),
(10.0, 120, 1.0),
]
- tdis_package = ModflowTdis(
- sim, time_units="DAYS", nper=4, perioddata=tdis_rc
- )
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ tdis_package = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc)
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
ims_package = ModflowIms(
sim,
print_option="SUMMARY",
@@ -1427,9 +1345,7 @@ def test005_create_tests_advgw_tidal(function_tmpdir, example_data_path):
DataStorageType.internal_constant,
DataStorageType.internal_array,
]
- ss_template = ModflowGwfsto.ss.empty(
- model, True, layer_storage_types, 0.000001
- )
+ ss_template = ModflowGwfsto.ss.empty(model, True, layer_storage_types, 0.000001)
sto_package = ModflowGwfsto(
model,
save_flows=True,
@@ -1489,9 +1405,7 @@ def test005_create_tests_advgw_tidal(function_tmpdir, example_data_path):
ts_dict = {
"filename": os.path.join("well-rates", "well-rates.ts"),
"timeseries": timeseries,
- "time_series_namerecord": [
- ("well_1_rate", "well_2_rate", "well_3_rate")
- ],
+ "time_series_namerecord": [("well_1_rate", "well_2_rate", "well_3_rate")],
"interpolation_methodrecord": [("stepwise", "stepwise", "stepwise")],
}
# test removing package with child packages
@@ -1571,9 +1485,7 @@ def test005_create_tests_advgw_tidal(function_tmpdir, example_data_path):
ghb_period_array = []
for layer, cond in zip(range(1, 3), [15.0, 1500.0]):
for row in range(0, 15):
- ghb_period_array.append(
- ((layer, row, 9), "tides", cond, "Estuary-L2")
- )
+ ghb_period_array.append(((layer, row, 9), "tides", cond, "Estuary-L2"))
ghb_period[0] = ghb_period_array
# build ts ghb
@@ -1671,15 +1583,7 @@ def test005_create_tests_advgw_tidal(function_tmpdir, example_data_path):
("rv2-upper", "RIV", "riv2_upper"),
("rv-2-7-4", "RIV", (0, 6, 3)),
("rv2-8-5", "RIV", (0, 6, 4)),
- (
- "rv-2-9-6",
- "RIV",
- (
- 0,
- 5,
- 5,
- ),
- ),
+ ("rv-2-9-6", "RIV", (0, 5, 5)),
],
"riv_flowsA.csv": [
("riv1-3-1", "RIV", (0, 2, 0)),
@@ -1989,16 +1893,10 @@ def test004_create_tests_bcfss(function_tmpdir, example_data_path):
expected_head_file = os.path.join(expected_output_folder, "bcf2ss.hds")
# create simulation
- sim = MFSimulation(
- sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=pth
- )
+ sim = MFSimulation(sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=pth)
tdis_rc = [(1.0, 1, 1.0), (1.0, 1, 1.0)]
- tdis_package = ModflowTdis(
- sim, time_units="DAYS", nper=2, perioddata=tdis_rc
- )
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ tdis_package = ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc)
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
ims_package = ModflowIms(
sim,
print_option="ALL",
@@ -2089,9 +1987,7 @@ def test004_create_tests_bcfss(function_tmpdir, example_data_path):
riv_period_array = []
aux_vals = [1.0, 5.0, 4.0, 8.0, 3.0, "bad value", 5.5, 6.3, 8.1, 18.3]
for row in range(0, 10):
- riv_period_array.append(
- ((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0)
- )
+ riv_period_array.append(((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0))
riv_period[0] = riv_period_array
riv_package = ModflowGwfriv(
model,
@@ -2102,25 +1998,17 @@ def test004_create_tests_bcfss(function_tmpdir, example_data_path):
)
chk = riv_package.check()
summary = ".".join(chk.summary_array.desc)
- assert (
- summary == "Invalid non-numeric value 'bad value' in auxiliary "
- "data."
- )
+ assert summary == "Invalid non-numeric value 'bad value' in auxiliary data."
# test with boundnames
riv_package.boundnames = True
riv_period_array = []
for row in range(0, 10):
- riv_period_array.append(
- ((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0)
- )
+ riv_period_array.append(((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0))
riv_period[0] = riv_period_array
riv_package.stress_period_data = riv_period
chk = riv_package.check()
summary = ".".join(chk.summary_array.desc)
- assert (
- summary == "Invalid non-numeric value 'bad value' in auxiliary "
- "data."
- )
+ assert summary == "Invalid non-numeric value 'bad value' in auxiliary data."
# fix aux variable
riv_package.boundnames = False
@@ -2128,9 +2016,7 @@ def test004_create_tests_bcfss(function_tmpdir, example_data_path):
riv_period_array = []
aux_vals = [1.0, 5.0, 4.0, 8.0, 3.0, 5.0, 5.5, 6.3, 8.1, 18.3]
for row in range(0, 10):
- riv_period_array.append(
- ((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0)
- )
+ riv_period_array.append(((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0))
riv_period[0] = riv_period_array
riv_package.stress_period_data = riv_period
# check again
@@ -2186,21 +2072,13 @@ def test035_create_tests_fhb(function_tmpdir, example_data_path):
model_name = "fhb2015"
pth = example_data_path / "mf6" / "create_tests" / test_ex_name
expected_output_folder = os.path.join(pth, "expected_output")
- expected_head_file = os.path.join(
- expected_output_folder, "fhb2015_fhb.hds"
- )
+ expected_head_file = os.path.join(expected_output_folder, "fhb2015_fhb.hds")
# create simulation
- sim = MFSimulation(
- sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=pth
- )
+ sim = MFSimulation(sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=pth)
tdis_rc = [(400.0, 10, 1.0), (200.0, 4, 1.0), (400.0, 6, 1.1)]
- tdis_package = ModflowTdis(
- sim, time_units="DAYS", nper=3, perioddata=tdis_rc
- )
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ tdis_package = ModflowTdis(sim, time_units="DAYS", nper=3, perioddata=tdis_rc)
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
ims_package = ModflowIms(
sim,
print_option="SUMMARY",
@@ -2230,9 +2108,7 @@ def test035_create_tests_fhb(function_tmpdir, example_data_path):
filename=f"{model_name}.dis",
)
ic_package = ModflowGwfic(model, strt=0.0, filename=f"{model_name}.ic")
- npf_package = ModflowGwfnpf(
- model, perched=True, icelltype=0, k=20.0, k33=1.0
- )
+ npf_package = ModflowGwfnpf(model, perched=True, icelltype=0, k=20.0, k33=1.0)
oc_package = ModflowGwfoc(
model,
head_filerecord="fhb2015_fhb.hds",
@@ -2247,9 +2123,7 @@ def test035_create_tests_fhb(function_tmpdir, example_data_path):
model, storagecoefficient=True, iconvert=0, ss=0.01, sy=0.0
)
time = model.modeltime
- assert not (
- time.steady_state[0] or time.steady_state[1] or time.steady_state[2]
- )
+ assert not (time.steady_state[0] or time.steady_state[1] or time.steady_state[2])
wel_period = {0: [((0, 1, 0), "flow")]}
wel_package = ModflowGwfwel(
model,
@@ -2272,9 +2146,7 @@ def test035_create_tests_fhb(function_tmpdir, example_data_path):
interpolation_methodrecord="linear",
)
- chd_period = {
- 0: [((0, 0, 9), "head"), ((0, 1, 9), "head"), ((0, 2, 9), "head")]
- }
+ chd_period = {0: [((0, 0, 9), "head"), ((0, 1, 9), "head"), ((0, 2, 9), "head")]}
chd_package = ModflowGwfchd(
model,
print_input=True,
@@ -2336,12 +2208,8 @@ def test006_create_tests_gwf3_disv(function_tmpdir, example_data_path):
)
sim.set_sim_path(function_tmpdir)
tdis_rc = [(1.0, 1, 1.0)]
- tdis_package = ModflowTdis(
- sim, time_units="DAYS", nper=1, perioddata=tdis_rc
- )
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ tdis_package = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc)
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
ims_package = ModflowIms(
sim,
print_option="SUMMARY",
@@ -2494,13 +2362,9 @@ def test006_create_tests_gwf3_disv(function_tmpdir, example_data_path):
0,
0,
]
- ic_package = ModflowGwfic(
- model, strt=strt_list, filename=f"{model_name}.ic"
- )
+ ic_package = ModflowGwfic(model, strt=strt_list, filename=f"{model_name}.ic")
k = {"filename": "k.bin", "factor": 1.0, "data": 1.0, "binary": "True"}
- npf_package = ModflowGwfnpf(
- model, save_flows=True, icelltype=0, k=k, k33=1.0
- )
+ npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=0, k=k, k33=1.0)
k_data = npf_package.k.get_data()
assert k_data[0, 0] == 1.0
@@ -2622,13 +2486,9 @@ def test006_create_tests_2models_gnc(function_tmpdir, example_data_path):
expected_head_file_2 = os.path.join(expected_output_folder, "model2.hds")
# create simulation
- sim = MFSimulation(
- sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth
- )
+ sim = MFSimulation(sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth)
tdis_rc = [(1.0, 1, 1.0)]
- tdis_package = ModflowTdis(
- sim, time_units="DAYS", nper=1, perioddata=tdis_rc
- )
+ tdis_package = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc)
model_1 = ModflowGwf(
sim,
modelname=model_name_1,
@@ -2782,12 +2642,8 @@ def test006_create_tests_2models_gnc(function_tmpdir, example_data_path):
1.0,
0.0,
]
- ic_package_1 = ModflowGwfic(
- model_1, strt=strt_list, filename=f"{model_name_1}.ic"
- )
- ic_package_2 = ModflowGwfic(
- model_2, strt=1.0, filename=f"{model_name_2}.ic"
- )
+ ic_package_1 = ModflowGwfic(model_1, strt=strt_list, filename=f"{model_name_1}.ic")
+ ic_package_2 = ModflowGwfic(model_2, strt=1.0, filename=f"{model_name_2}.ic")
npf_package_1 = ModflowGwfnpf(
model_1, save_flows=True, perched=True, icelltype=0, k=1.0, k33=1.0
)
@@ -2853,11 +2709,7 @@ def test006_create_tests_2models_gnc(function_tmpdir, example_data_path):
)
sim.remove_package(exg_package.package_type)
- exg_data = {
- "filename": "exg_data.txt",
- "data": exgrecarray,
- "binary": True,
- }
+ exg_data = {"filename": "exg_data.txt", "data": exgrecarray, "binary": True}
exg_package = ModflowGwfgwf(
sim,
print_input=True,
@@ -2963,16 +2815,10 @@ def test050_create_tests_circle_island(function_tmpdir, example_data_path):
expected_head_file = expected_output_folder / "ci.output.hds"
# create simulation
- sim = MFSimulation(
- sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth
- )
+ sim = MFSimulation(sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth)
tdis_rc = [(1.0, 1, 1.0)]
- tdis_package = ModflowTdis(
- sim, time_units="DAYS", nper=1, perioddata=tdis_rc
- )
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ tdis_package = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc)
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
ims_package = ModflowIms(
sim,
print_option="SUMMARY",
@@ -3001,9 +2847,7 @@ def test050_create_tests_circle_island(function_tmpdir, example_data_path):
filename=f"{model_name}.disv",
)
ic_package = ModflowGwfic(model, strt=0.0, filename=f"{model_name}.ic")
- npf_package = ModflowGwfnpf(
- model, save_flows=True, icelltype=0, k=10.0, k33=0.2
- )
+ npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=0, k=10.0, k33=0.2)
oc_package = ModflowGwfoc(
model,
budget_filerecord="ci.output.cbc",
@@ -3012,9 +2856,7 @@ def test050_create_tests_circle_island(function_tmpdir, example_data_path):
printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
)
- stress_period_data = testutils.read_ghbrecarray(
- os.path.join(pth, "ghb.txt"), 2
- )
+ stress_period_data = testutils.read_ghbrecarray(os.path.join(pth, "ghb.txt"), 2)
ghb_package = ModflowGwfghb(
model, maxbound=3173, stress_period_data=stress_period_data
)
@@ -3064,9 +2906,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path):
expected_head_file = expected_output_folder / "test1tr.hds"
# create simulation
- sim = MFSimulation(
- sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth
- )
+ sim = MFSimulation(sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth)
sim.name_file.continue_.set_data(True)
tdis_rc = [(1577889000, 50, 1.1), (1577889000, 50, 1.1)]
tdis_package = ModflowTdis(
@@ -3076,9 +2916,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path):
perioddata=tdis_rc,
filename="simulation.tdis",
)
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
model.name_file.save_flows.set_data(True)
ims_package = ModflowIms(
sim,
@@ -3122,9 +2960,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path):
)
strt = testutils.read_std_array(os.path.join(pth, "strt.txt"), "float")
strt_int = ["internal", "factor", 1.0, "iprn", 0, strt]
- ic_package = ModflowGwfic(
- model, strt=strt_int, filename=f"{model_name}.ic"
- )
+ ic_package = ModflowGwfic(model, strt=strt_int, filename=f"{model_name}.ic")
k_vals = testutils.read_std_array(os.path.join(pth, "k.txt"), "float")
k = ["internal", "factor", 3.000e-03, "iprn", 0, k_vals]
@@ -3136,9 +2972,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path):
budget_filerecord="test1tr.cbc",
head_filerecord="test1tr.hds",
saverecord={0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)]},
- printrecord={
- 0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)]
- },
+ printrecord={0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)]},
)
sy_vals = testutils.read_std_array(os.path.join(pth, "sy.txt"), "float")
@@ -3177,9 +3011,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path):
filename="test028_sfr.evt.obs", print_input=True, continuous=obs_dict
)
- stress_period_data = {
- 0: [((0, 12, 0), 988.0, 0.038), ((0, 13, 8), 1045.0, 0.038)]
- }
+ stress_period_data = {0: [((0, 12, 0), 988.0, 0.038), ((0, 13, 8), 1045.0, 0.038)]}
ghb_package = ModflowGwfghb(
model, maxbound=2, stress_period_data=stress_period_data
)
@@ -3319,9 +3151,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path):
# test hpc package
part = [("model1", 1), ("model2", 2)]
- hpc = ModflowUtlhpc(
- sim, dev_log_mpi=True, partitions=part, filename="test.hpc"
- )
+ hpc = ModflowUtlhpc(sim, dev_log_mpi=True, partitions=part, filename="test.hpc")
assert sim.hpc.dev_log_mpi.get_data()
assert hpc.filename == "test.hpc"
@@ -3479,9 +3309,7 @@ def test_create_tests_transport(function_tmpdir, example_data_path):
ic = ModflowGwfic(gwf, strt=strt)
# node property flow
- npf = ModflowGwfnpf(
- gwf, save_flows=False, icelltype=laytyp[idx], k=hk, k33=hk
- )
+ npf = ModflowGwfnpf(gwf, save_flows=False, icelltype=laytyp[idx], k=hk, k33=hk)
# storage
sto = ModflowGwfsto(
gwf,
@@ -3573,9 +3401,7 @@ def test_create_tests_transport(function_tmpdir, example_data_path):
gwt,
budget_filerecord=f"{gwtname}.cbc",
concentration_filerecord=f"{gwtname}.ucn",
- concentrationprintrecord=[
- ("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")
- ],
+ concentrationprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("CONCENTRATION", "ALL")],
printrecord=[("CONCENTRATION", "ALL"), ("BUDGET", "ALL")],
)
@@ -3596,9 +3422,7 @@ def test_create_tests_transport(function_tmpdir, example_data_path):
sim.run_simulation()
# inspect cells
- cell_list = [
- (0, 0, 0),
- ]
+ cell_list = [(0, 0, 0)]
out_file = function_tmpdir / "inspect_transport_gwf.csv"
gwf.inspect_cells(cell_list, output_file_path=out_file)
out_file = function_tmpdir / "inspect_transport_gwt.csv"
@@ -3639,18 +3463,10 @@ def test001a_tharmonic(function_tmpdir, example_data_path):
pth = example_data_path / "mf6" / test_ex_name
expected_output_folder = os.path.join(pth, "expected_output")
- expected_head_file_a = os.path.join(
- expected_output_folder, "flow15_flow_unch.hds"
- )
- expected_head_file_b = os.path.join(
- expected_output_folder, "flow15_flow_adj.hds"
- )
- expected_cbc_file_a = os.path.join(
- expected_output_folder, "flow15_flow_unch.cbc"
- )
- expected_cbc_file_b = os.path.join(
- expected_output_folder, "flow15_flow_adj.cbc"
- )
+ expected_head_file_a = os.path.join(expected_output_folder, "flow15_flow_unch.hds")
+ expected_head_file_b = os.path.join(expected_output_folder, "flow15_flow_adj.hds")
+ expected_cbc_file_a = os.path.join(expected_output_folder, "flow15_flow_unch.cbc")
+ expected_cbc_file_b = os.path.join(expected_output_folder, "flow15_flow_adj.cbc")
array_util = PyListUtil()
@@ -3693,13 +3509,9 @@ def test001a_tharmonic(function_tmpdir, example_data_path):
# compare output to expected results
head_new = function_tmpdir / "flow15_flow.hds"
- assert compare_heads(
- None, None, files1=[expected_head_file_a], files2=[head_new]
- )
+ assert compare_heads(None, None, files1=[expected_head_file_a], files2=[head_new])
- budget_frf = sim.simulation_data.mfdata[
- (model_name, "CBC", "FLOW-JA-FACE")
- ]
+ budget_frf = sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")]
assert array_util.array_comp(budget_frf_valid, budget_frf)
# change some settings
@@ -3747,13 +3559,9 @@ def test001a_tharmonic(function_tmpdir, example_data_path):
# compare output to expected results
head_new = os.path.join(save_folder, "flow15_flow.hds")
- assert compare_heads(
- None, None, files1=[expected_head_file_b], files2=[head_new]
- )
+ assert compare_heads(None, None, files1=[expected_head_file_b], files2=[head_new])
- budget_frf = sim.simulation_data.mfdata[
- (model_name, "CBC", "FLOW-JA-FACE")
- ]
+ budget_frf = sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")]
assert array_util.array_comp(budget_frf_valid, budget_frf)
@@ -3773,9 +3581,7 @@ def test003_gwfs_disv(function_tmpdir, example_data_path):
array_util = PyListUtil()
# load simulation
- sim = MFSimulation.load(
- model_name, "mf6", "mf6", data_folder, verify_data=True
- )
+ sim = MFSimulation.load(model_name, "mf6", "mf6", data_folder, verify_data=True)
# make temp folder to save simulation
sim.set_sim_path(function_tmpdir)
@@ -3795,13 +3601,9 @@ def test003_gwfs_disv(function_tmpdir, example_data_path):
)
head_new = os.path.join(function_tmpdir, "model.hds")
- assert compare_heads(
- None, None, files1=[expected_head_file_a], files2=[head_new]
- )
+ assert compare_heads(None, None, files1=[expected_head_file_a], files2=[head_new])
- budget_frf = sim.simulation_data.mfdata[
- (model_name, "CBC", "FLOW-JA-FACE")
- ]
+ budget_frf = sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")]
assert array_util.array_comp(budget_fjf_valid, budget_frf)
model = sim.get_model(model_name)
@@ -3831,19 +3633,13 @@ def test003_gwfs_disv(function_tmpdir, example_data_path):
# get expected results
budget_obj = CellBudgetFile(expected_cbc_file_b, precision="double")
- budget_fjf_valid = np.array(
- budget_obj.get_data(text="FLOW JA FACE", full3D=True)
- )
+ budget_fjf_valid = np.array(budget_obj.get_data(text="FLOW JA FACE", full3D=True))
# compare output to expected results
head_new = os.path.join(save_folder, "model.hds")
- assert compare_heads(
- None, None, files1=[expected_head_file_b], files2=[head_new]
- )
+ assert compare_heads(None, None, files1=[expected_head_file_b], files2=[head_new])
- budget_frf = sim.simulation_data.mfdata[
- (model_name, "CBC", "FLOW-JA-FACE")
- ]
+ budget_frf = sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")]
assert array_util.array_comp(budget_fjf_valid, budget_frf)
@@ -3856,12 +3652,8 @@ def test005_advgw_tidal(function_tmpdir, example_data_path):
model_name = "gwf_1"
pth = example_data_path / "mf6" / test_ex_name
expected_output_folder = os.path.join(pth, "expected_output")
- expected_head_file_a = os.path.join(
- expected_output_folder, "AdvGW_tidal_unch.hds"
- )
- expected_head_file_b = os.path.join(
- expected_output_folder, "AdvGW_tidal_adj.hds"
- )
+ expected_head_file_a = os.path.join(expected_output_folder, "AdvGW_tidal_unch.hds")
+ expected_head_file_b = os.path.join(expected_output_folder, "AdvGW_tidal_adj.hds")
# load simulation
sim = MFSimulation.load(
@@ -3939,13 +3731,9 @@ def test006_2models_different_dis(function_tmpdir, example_data_path):
expected_head_file_2 = os.path.join(expected_output_folder, "model2.hds")
# create simulation
- sim = MFSimulation(
- sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth
- )
+ sim = MFSimulation(sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth)
tdis_rc = [(1.0, 1, 1.0)]
- tdis_package = ModflowTdis(
- sim, time_units="DAYS", nper=1, perioddata=tdis_rc
- )
+ tdis_package = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc)
model_1 = ModflowGwf(
sim,
modelname=model_name_1,
@@ -3999,12 +3787,8 @@ def test006_2models_different_dis(function_tmpdir, example_data_path):
cell2d=c2drecarray,
filename=f"{model_name_2}.disv",
)
- ic_package_1 = ModflowGwfic(
- model_1, strt=1.0, filename=f"{model_name_1}.ic"
- )
- ic_package_2 = ModflowGwfic(
- model_2, strt=1.0, filename=f"{model_name_2}.ic"
- )
+ ic_package_1 = ModflowGwfic(model_1, strt=1.0, filename=f"{model_name_1}.ic")
+ ic_package_2 = ModflowGwfic(model_2, strt=1.0, filename=f"{model_name_2}.ic")
npf_package_1 = ModflowGwfnpf(
model_1, save_flows=True, perched=True, icelltype=0, k=1.0, k33=1.0
)
@@ -4042,9 +3826,7 @@ def test006_2models_different_dis(function_tmpdir, example_data_path):
maxbound=30,
stress_period_data=stress_period_data,
)
- exgrecarray = testutils.read_exchangedata(
- os.path.join(pth, "exg.txt"), 3, 2
- )
+ exgrecarray = testutils.read_exchangedata(os.path.join(pth, "exg.txt"), 3, 2)
exg_data = {
"filename": "exg_data.bin",
"data": exgrecarray,
@@ -4074,9 +3856,7 @@ def test006_2models_different_dis(function_tmpdir, example_data_path):
)
gnc_path = os.path.join("gnc", "test006_2models_gnc.gnc")
- gncrecarray = testutils.read_gncrecarray(
- os.path.join(pth, "gnc.txt"), 3, 2
- )
+ gncrecarray = testutils.read_gncrecarray(os.path.join(pth, "gnc.txt"), 3, 2)
gnc_package = exg_package.gnc.initialize(
filename=gnc_path,
print_input=True,
@@ -4191,9 +3971,7 @@ def test006_gwf3(function_tmpdir, example_data_path):
budget_fjf = np.array(
sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")]
)
- assert array_util.array_comp(
- np.array(budget_fjf_valid), np.array(budget_fjf)
- )
+ assert array_util.array_comp(np.array(budget_fjf_valid), np.array(budget_fjf))
# change some settings
model = sim.get_model(model_name)
@@ -4238,16 +4016,15 @@ def test006_gwf3(function_tmpdir, example_data_path):
budget_fjf = np.array(
sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")]
)
- assert array_util.array_comp(
- np.array(budget_fjf_valid), np.array(budget_fjf)
- )
+ assert array_util.array_comp(np.array(budget_fjf_valid), np.array(budget_fjf))
# confirm that files did move
save_folder = function_tmpdir / "save02"
save_folder.mkdir()
sim.set_sim_path(save_folder)
- # write with "copy_external_files" turned off so external files do not get copied to new location
+ # write with "copy_external_files" turned off so external files
+ # do not get copied to new location
sim.write_simulation(ext_file_action=ExtFileAction.copy_none)
# store strt in an external binary file
@@ -4264,10 +4041,7 @@ def test006_gwf3(function_tmpdir, example_data_path):
assert success, f"simulation {sim.name} rerun(3) did not run"
# get expected results
- budget_obj = CellBudgetFile(
- expected_cbc_file_b,
- precision="double",
- )
+ budget_obj = CellBudgetFile(expected_cbc_file_b, precision="double")
budget_fjf_valid = np.array(
budget_obj.get_data(text=" FLOW JA FACE", full3D=True)
)
@@ -4286,9 +4060,7 @@ def test006_gwf3(function_tmpdir, example_data_path):
budget_fjf = np.array(
sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")]
)
- assert array_util.array_comp(
- np.array(budget_fjf_valid), np.array(budget_fjf)
- )
+ assert array_util.array_comp(np.array(budget_fjf_valid), np.array(budget_fjf))
# confirm that files did not move
assert not os.path.isfile(os.path.join(save_folder, "flow.disu.ja.dat"))
@@ -4311,12 +4083,8 @@ def test045_lake1ss_table(function_tmpdir, example_data_path):
model_name = "lakeex1b"
pth = example_data_path / "mf6" / test_ex_name
expected_output_folder = os.path.join(pth, "expected_output")
- expected_head_file_a = os.path.join(
- expected_output_folder, "lakeex1b_unch.hds"
- )
- expected_head_file_b = os.path.join(
- expected_output_folder, "lakeex1b_adj.hds"
- )
+ expected_head_file_a = os.path.join(expected_output_folder, "lakeex1b_unch.hds")
+ expected_head_file_b = os.path.join(expected_output_folder, "lakeex1b_adj.hds")
# load simulation
sim = MFSimulation.load(
@@ -4427,9 +4195,7 @@ def test006_2models_mvr(function_tmpdir, example_data_path):
expected_head_file_bb = expected_output_folder / "model2_adj.hds"
# load simulation
- sim = MFSimulation.load(
- sim_name, "mf6", "mf6", data_folder, verify_data=True
- )
+ sim = MFSimulation.load(sim_name, "mf6", "mf6", data_folder, verify_data=True)
# make temp folder to save simulation
sim.set_sim_path(ws)
@@ -4497,22 +4263,21 @@ def test006_2models_mvr(function_tmpdir, example_data_path):
exg_pkg.exchangedata.set_data(exg_data)
# test getting packages
- pkg_dict = parent_model.package_dict
- assert len(pkg_dict) == 6
- pkg_names = parent_model.package_names
- assert len(pkg_names) == 6
+ pkg_list = parent_model.get_package()
+ assert len(pkg_list) == 6
# confirm that this is a copy of the original dictionary with references
# to the packages
- del pkg_dict[pkg_names[0]]
- assert len(pkg_dict) == 5
- pkg_dict = parent_model.package_dict
- assert len(pkg_dict) == 6
-
- old_val = pkg_dict["dis"].nlay.get_data()
- pkg_dict["dis"].nlay = 22
- pkg_dict = parent_model.package_dict
- assert pkg_dict["dis"].nlay.get_data() == 22
- pkg_dict["dis"].nlay = old_val
+ del pkg_list[0]
+ assert len(pkg_list) == 5
+ pkg_list = parent_model.get_package()
+ assert len(pkg_list) == 6
+
+ dis_pkg = parent_model.get_package("dis")
+ old_val = dis_pkg.nlay.get_data()
+ dis_pkg.nlay = 22
+ pkg_list = parent_model.get_package()
+ assert dis_pkg.nlay.get_data() == 22
+ dis_pkg.nlay = old_val
# write simulation again
save_folder = function_tmpdir / "save"
@@ -4560,8 +4325,8 @@ def test006_2models_mvr(function_tmpdir, example_data_path):
model = sim.get_model(model_name)
for package in model_package_check:
assert (
- package in model.package_type_dict
- or package in sim.package_type_dict
+ model.get_package(package, type_only=True) is not None
+ or sim.get_package(package, type_only=True) is not None
) == (package in load_only or f"{package}6" in load_only)
assert (len(sim._exchange_files) > 0) == (
"gwf6-gwf6" in load_only or "gwf-gwf" in load_only
@@ -4577,10 +4342,10 @@ def test006_2models_mvr(function_tmpdir, example_data_path):
)
model_parent = sim.get_model("parent")
model_child = sim.get_model("child")
- assert "oc" not in model_parent.package_type_dict
- assert "oc" in model_child.package_type_dict
- assert "npf" in model_parent.package_type_dict
- assert "npf" not in model_child.package_type_dict
+ assert model_parent.get_package("oc") is None
+ assert model_child.get_package("oc") is not None
+ assert model_parent.get_package("npf") is not None
+ assert model_child.get_package("npf") is None
# test running a runnable load_only case
sim = MFSimulation.load(
@@ -4646,19 +4411,15 @@ def test001e_uzf_3lay(function_tmpdir, example_data_path):
["ic6", "ims", "obs6", "oc6"],
]
for load_only in load_only_lists:
- sim = MFSimulation.load(
- model_name, "mf6", "mf6", pth, load_only=load_only
- )
+ sim = MFSimulation.load(model_name, "mf6", "mf6", pth, load_only=load_only)
sim.set_sim_path(function_tmpdir)
model = sim.get_model()
for package in model_package_check:
- assert (package in model.package_type_dict) == (
+ assert (model.get_package(package, type_only=True) is not None) == (
package in load_only or f"{package}6" in load_only
)
# test running a runnable load_only case
- sim = MFSimulation.load(
- model_name, "mf6", "mf6", pth, load_only=load_only_lists[0]
- )
+ sim = MFSimulation.load(model_name, "mf6", "mf6", pth, load_only=load_only_lists[0])
sim.set_sim_path(function_tmpdir)
success, buff = sim.run_simulation()
assert success, f"simulation {sim.name} from load did not run"
@@ -4675,12 +4436,7 @@ def test001e_uzf_3lay(function_tmpdir, example_data_path):
sim.remove_package(ims)
ims = ModflowIms(sim, print_option="SUMMARY", complexity="COMPLEX")
- sim.register_ims_package(
- ims,
- [
- "GwF_1",
- ],
- )
+ sim.register_ims_package(ims, ["GwF_1"])
sim.write_simulation()
success, buff = sim.run_simulation()
@@ -4812,9 +4568,7 @@ def test036_twrihfb(function_tmpdir, example_data_path):
cond_data[0][index][2] = 2.1
cond.set_data(cond_data[0], 0)
- rch = sim.simulation_data.mfdata[
- (model_name, "rcha", "period", "recharge")
- ]
+ rch = sim.simulation_data.mfdata[(model_name, "rcha", "period", "recharge")]
rch_data = rch.get_data()
assert rch_data[0][5, 1] == 0.00000003
@@ -4865,9 +4619,7 @@ def test027_timeseriestest(function_tmpdir, example_data_path):
sim.write_simulation()
# reload sim
- sim = MFSimulation.load(
- model_name, "mf6", "mf6", function_tmpdir, verify_data=True
- )
+ sim = MFSimulation.load(model_name, "mf6", "mf6", function_tmpdir, verify_data=True)
sim.write_simulation()
# run simulation
@@ -4938,9 +4690,7 @@ def test099_create_tests_int_ext(function_tmpdir, example_data_path):
perioddata=tdis_rc,
filename="simulation.tdis",
)
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
model.name_file.save_flows.set_data(True)
ims_package = ModflowIms(
sim,
@@ -4984,9 +4734,7 @@ def test099_create_tests_int_ext(function_tmpdir, example_data_path):
)
strt = np.ones((15, 10), float) * 50.0
strt_int = {"filename": "strt.txt", "factor": 0.8, "iprn": 0, "data": strt}
- ic_package = ModflowGwfic(
- model, strt=strt_int, filename=f"{model_name}.ic"
- )
+ ic_package = ModflowGwfic(model, strt=strt_int, filename=f"{model_name}.ic")
k_vals = np.ones((15, 10), float) * 10.0
assert k_vals[0, 0] == 10.0
@@ -4999,9 +4747,7 @@ def test099_create_tests_int_ext(function_tmpdir, example_data_path):
budget_filerecord="test1tr.cbc",
head_filerecord="test1tr.hds",
saverecord={0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)]},
- printrecord={
- 0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)]
- },
+ printrecord={0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)]},
)
sy_vals = np.ones((15, 10), float) * 0.1
diff --git a/autotest/regression/test_mf6_pandas.py b/autotest/regression/test_mf6_pandas.py
index 7ef875ada4..703ea34e69 100644
--- a/autotest/regression/test_mf6_pandas.py
+++ b/autotest/regression/test_mf6_pandas.py
@@ -81,9 +81,7 @@ def test_pandas_001(function_tmpdir, example_data_path):
assert sim.simulation_data.use_pandas
tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)]
- tdis_package = ModflowTdis(
- sim, time_units="DAYS", nper=2, perioddata=tdis_rc
- )
+ tdis_package = ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc)
# replace with real ims file
ims_package = ModflowIms(
sim,
@@ -101,9 +99,7 @@ def test_pandas_001(function_tmpdir, example_data_path):
preconditioner_drop_tolerance=0.01,
number_orthogonalizations=2,
)
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
top = {"filename": "top.txt", "data": 100.0}
botm = {"filename": "botm.txt", "data": 50.0}
dis_package = ModflowGwfdis(
diff --git a/autotest/regression/test_modflow.py b/autotest/regression/test_modflow.py
index 792cef2f76..9fee80b695 100644
--- a/autotest/regression/test_modflow.py
+++ b/autotest/regression/test_modflow.py
@@ -74,9 +74,7 @@ def test_uzf_unit_numbers(function_tmpdir, uzf_example_path):
# compare budget terms
fsum = join(function_tmpdir, f"{splitext(mfnam)[0]}.budget.out")
- success = compare_budget(
- fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
- )
+ success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum)
assert success, "budget comparison failure"
@@ -92,10 +90,12 @@ def test_unitnums(function_tmpdir, mf2005_test_path):
assert m.load_fail is False, "failed to load all packages"
v = (m.nlay, m.nrow, m.ncol, m.nper)
- assert v == (1, 7, 100, 50), (
- "modflow-2005 testsfr2_tab does not have "
- "1 layer, 7 rows, and 100 columns"
- )
+ assert v == (
+ 1,
+ 7,
+ 100,
+ 50,
+ ), "modflow-2005 testsfr2_tab does not have 1 layer, 7 rows, and 100 columns"
success, buff = m.run_model(silent=False)
assert success, "base model run did not terminate successfully"
@@ -112,9 +112,7 @@ def test_unitnums(function_tmpdir, mf2005_test_path):
fn1 = join(model_ws2, mfnam)
fsum = join(ws, f"{splitext(mfnam)[0]}.budget.out")
- success = compare_budget(
- fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
- )
+ success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum)
assert success, "budget comparison failure"
@@ -131,9 +129,7 @@ def test_gage(function_tmpdir, example_data_path):
copytree(pth, ws)
# load the modflow model
- mf = Modflow.load(
- "testsfr2_tab.nam", verbose=True, model_ws=ws, exe_name="mf2005"
- )
+ mf = Modflow.load("testsfr2_tab.nam", verbose=True, model_ws=ws, exe_name="mf2005")
# run the modflow-2005 model
success, buff = mf.run_model()
@@ -168,10 +164,7 @@ def test_gage(function_tmpdir, example_data_path):
@pytest.mark.regression
@pytest.mark.parametrize(
"namfile",
- [
- __example_data_path / "pcgn_test" / nf
- for nf in ["twri.nam", "MNW2.nam"]
- ],
+ [__example_data_path / "pcgn_test" / nf for nf in ["twri.nam", "MNW2.nam"]],
)
def test_mf2005pcgn(function_tmpdir, namfile):
ws = function_tmpdir / "ws"
@@ -208,9 +201,7 @@ def test_mf2005pcgn(function_tmpdir, namfile):
assert success, "head comparison failure"
fsum = function_tmpdir / f"{Path(namfile).stem}.budget.out"
- success = compare_budget(
- fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
- )
+ success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum)
assert success, "budget comparison failure"
@@ -250,9 +241,7 @@ def test_mf2005gmg(function_tmpdir, namfile):
assert success, "head comparison failure"
fsum = function_tmpdir / f"{Path(namfile).stem}.budget.out"
- success = compare_budget(
- fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
- )
+ success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum)
assert success, "budget comparison failure"
@@ -318,9 +307,7 @@ def test_mf2005(function_tmpdir, namfile):
# compare budgets
fsum = ws / f"{Path(namfile).stem}.budget.out"
- success = compare_budget(
- fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
- )
+ success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum)
assert success, "budget comparison failure"
@@ -344,9 +331,7 @@ def test_mf2005fhb(function_tmpdir, namfile):
ws = function_tmpdir / "ws"
copytree(Path(namfile).parent, ws)
- m = Modflow.load(
- Path(namfile).name, model_ws=ws, verbose=True, exe_name="mf2005"
- )
+ m = Modflow.load(Path(namfile).name, model_ws=ws, verbose=True, exe_name="mf2005")
assert m.load_fail is False
success, buff = m.run_model(silent=False)
@@ -366,9 +351,7 @@ def test_mf2005fhb(function_tmpdir, namfile):
assert success, "head comparison failure"
fsum = join(ws, f"{Path(namfile).stem}.budget.out")
- success = compare_budget(
- fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
- )
+ success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum)
assert success, "budget comparison failure"
@@ -394,14 +377,14 @@ def test_mf2005_lake(function_tmpdir, namfile, mf2005_test_path):
fn0 = join(ws, Path(namfile).name)
- # write free format files - won't run without resetting to free format - evt external file issue
+ # write free format files -
+ # won't run without resetting to free format - evt external file issue
m.free_format_input = True
# rewrite files
model_ws2 = join(ws, "external")
- m.change_model_ws(
- model_ws2, reset_external=True
- ) # l1b2k_bath won't run without this
+ # l1b2k_bath won't run without this
+ m.change_model_ws(model_ws2, reset_external=True)
m.write_input()
success, buff = m.run_model()
@@ -410,7 +393,5 @@ def test_mf2005_lake(function_tmpdir, namfile, mf2005_test_path):
fsum = join(ws, f"{Path(namfile).stem}.budget.out")
- success = compare_budget(
- fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
- )
+ success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum)
assert success, "budget comparison failure"
diff --git a/autotest/regression/test_str.py b/autotest/regression/test_str.py
index 9f9aab3c6c..50756aef10 100644
--- a/autotest/regression/test_str.py
+++ b/autotest/regression/test_str.py
@@ -88,9 +88,7 @@ def test_str_fixed_free(function_tmpdir, example_data_path):
except:
m2 = None
- assert (
- m2 is not None
- ), "could not load the fixed format model with aux variables"
+ assert m2 is not None, "could not load the fixed format model with aux variables"
for p in function_tmpdir.glob("*"):
p.unlink()
@@ -114,9 +112,7 @@ def test_str_fixed_free(function_tmpdir, example_data_path):
except:
m2 = None
- assert (
- m2 is not None
- ), "could not load the free format model with aux variables"
+ assert m2 is not None, "could not load the free format model with aux variables"
# compare the fixed and free format head files
fn1 = function_tmpdir / "str.nam"
diff --git a/autotest/regression/test_swi2.py b/autotest/regression/test_swi2.py
index 86dc3d93b8..496940282c 100644
--- a/autotest/regression/test_swi2.py
+++ b/autotest/regression/test_swi2.py
@@ -16,9 +16,7 @@ def swi_path(example_data_path):
@requires_exe("mf2005")
@pytest.mark.slow
@pytest.mark.regression
-@pytest.mark.parametrize(
- "namfile", ["swiex1.nam", "swiex2_strat.nam", "swiex3.nam"]
-)
+@pytest.mark.parametrize("namfile", ["swiex1.nam", "swiex2_strat.nam", "swiex3.nam"])
def test_mf2005swi2(function_tmpdir, swi_path, namfile):
name = namfile.replace(".nam", "")
ws = function_tmpdir / "ws"
@@ -37,9 +35,8 @@ def test_mf2005swi2(function_tmpdir, swi_path, namfile):
# rewrite files
model_ws2 = os.path.join(ws, "flopy")
- m.change_model_ws(
- model_ws2, reset_external=True
- ) # l1b2k_bath won't run without this
+ # l1b2k_bath won't run without this
+ m.change_model_ws(model_ws2, reset_external=True)
m.write_input()
success, buff = m.run_model()
@@ -47,8 +44,6 @@ def test_mf2005swi2(function_tmpdir, swi_path, namfile):
fn1 = os.path.join(model_ws2, namfile)
fsum = os.path.join(ws, f"{os.path.splitext(namfile)[0]}.budget.out")
- success = compare_budget(
- fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
- )
+ success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum)
assert success, "budget comparison failure"
diff --git a/autotest/regression/test_wel.py b/autotest/regression/test_wel.py
index 0c4a6cc0fd..110bb4148e 100644
--- a/autotest/regression/test_wel.py
+++ b/autotest/regression/test_wel.py
@@ -84,9 +84,7 @@ def test_binary_well(function_tmpdir):
m.remove_package("WEL")
# recreate well package with binary output
- wel = ModflowWel(
- m, stress_period_data=wel_data, binary=True, dtype=wd.dtype
- )
+ wel = ModflowWel(m, stress_period_data=wel_data, binary=True, dtype=wd.dtype)
# write the model to the new path
m.write_input()
@@ -97,14 +95,10 @@ def test_binary_well(function_tmpdir):
fn1 = os.path.join(pth, f"{mfnam}.nam")
# compare the files
- fsum = os.path.join(
- function_tmpdir, f"{os.path.splitext(mfnam)[0]}.head.out"
- )
+ fsum = os.path.join(function_tmpdir, f"{os.path.splitext(mfnam)[0]}.head.out")
assert compare_heads(fn0, fn1, outfile=fsum), "head comparison failure"
- fsum = os.path.join(
- function_tmpdir, f"{os.path.splitext(mfnam)[0]}.budget.out"
- )
+ fsum = os.path.join(function_tmpdir, f"{os.path.splitext(mfnam)[0]}.budget.out")
assert compare_budget(
fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
), "budget comparison failure"
diff --git a/autotest/test_binaryfile.py b/autotest/test_binaryfile.py
index 09351f4bfd..dd5b8161b4 100644
--- a/autotest/test_binaryfile.py
+++ b/autotest/test_binaryfile.py
@@ -122,9 +122,7 @@ def test_headfile_build_index(example_data_path):
)
# check first and last recorddict
list_recordarray = hds.recordarray.tolist()
- assert list_recordarray[0] == (
- (1, 1, 1.0, 1.0, b" HEAD", 20, 40, 1)
- )
+ assert list_recordarray[0] == ((1, 1, 1.0, 1.0, b" HEAD", 20, 40, 1))
assert list_recordarray[-1] == (
(1, 1097, 1.0, 1097.0, b" HEAD", 20, 40, 3)
)
@@ -179,12 +177,8 @@ def test_concentration_build_index(example_data_path):
)
# check first and last recorddict
list_recordarray = ucn.recordarray.tolist()
- assert list_recordarray[0] == (
- (29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 1)
- )
- assert list_recordarray[-1] == (
- (29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 8)
- )
+ assert list_recordarray[0] == ((29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 1))
+ assert list_recordarray[-1] == ((29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 8))
assert ucn.times == [np.float32(100.0)]
assert ucn.kstpkper == [(1, 1)]
np.testing.assert_array_equal(ucn.iposarray, np.arange(8) * 1304 + 44)
@@ -212,9 +206,7 @@ def test_concentration_build_index(example_data_path):
def test_binaryfile_writeread(function_tmpdir, nwt_model_path):
model = "Pr3_MFNWT_lower.nam"
- ml = flopy.modflow.Modflow.load(
- model, version="mfnwt", model_ws=nwt_model_path
- )
+ ml = flopy.modflow.Modflow.load(model, version="mfnwt", model_ws=nwt_model_path)
# change the model work space
ml.change_model_ws(function_tmpdir)
#
@@ -442,9 +434,7 @@ def test_binaryfile_read(function_tmpdir, freyberg_model_path):
assert np.array_equal(
h0, h1
), "binary head read using totim != head read using kstpkper"
- assert np.array_equal(
- h0, h2
- ), "binary head read using totim != head read using idx"
+ assert np.array_equal(h0, h2), "binary head read using totim != head read using idx"
ts = h.get_ts((0, 7, 5))
expected = 26.00697135925293
@@ -478,9 +468,7 @@ def test_binaryfile_read_context(freyberg_model_path):
def test_binaryfile_reverse_mf6_dis(function_tmpdir):
name = "reverse_dis"
- sim = flopy.mf6.MFSimulation(
- sim_name=name, sim_ws=function_tmpdir, exe_name="mf6"
- )
+ sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6")
tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)]
nper = len(tdis_rc)
tdis = flopy.mf6.ModflowTdis(sim, nper=nper, perioddata=tdis_rc)
@@ -514,7 +502,7 @@ def test_binaryfile_reverse_mf6_dis(function_tmpdir):
assert success, pformat(buff)
# reverse head file in place and check reversal
- head_file = flopy.utils.HeadFile(function_tmpdir / head_file, tdis=tdis)
+ head_file = flopy.utils.HeadFile(function_tmpdir / head_file)
heads = head_file.get_alldata()
assert heads.shape == (nper, 2, 10, 10)
head_file.reverse()
@@ -523,20 +511,14 @@ def test_binaryfile_reverse_mf6_dis(function_tmpdir):
# reverse budget and write to separate file
budget_file_rev_path = function_tmpdir / f"{budget_file}_rev"
- budget_file = flopy.utils.CellBudgetFile(
- function_tmpdir / budget_file, tdis=tdis
- )
+ budget_file = flopy.utils.CellBudgetFile(function_tmpdir / budget_file)
budget_file.reverse(budget_file_rev_path)
- budget_file_rev = flopy.utils.CellBudgetFile(
- budget_file_rev_path, tdis=tdis
- )
+ budget_file_rev = flopy.utils.CellBudgetFile(budget_file_rev_path)
for kper in range(nper):
assert np.allclose(heads[kper], heads_rev[-kper + 1])
budget = budget_file.get_data(text="FLOW-JA-FACE", totim=kper)[0]
- budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[
- 0
- ]
+ budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[0]
assert budget.shape == budget_rev.shape
assert np.allclose(budget, -budget_rev)
@@ -544,9 +526,7 @@ def test_binaryfile_reverse_mf6_dis(function_tmpdir):
@requires_pkg("shapely")
def test_binaryfile_reverse_mf6_disv(function_tmpdir):
name = "reverse_disv"
- sim = flopy.mf6.MFSimulation(
- sim_name=name, sim_ws=function_tmpdir, exe_name="mf6"
- )
+ sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6")
tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)]
nper = len(tdis_rc)
tdis = flopy.mf6.ModflowTdis(sim, nper=nper, perioddata=tdis_rc)
@@ -574,7 +554,7 @@ def test_binaryfile_reverse_mf6_disv(function_tmpdir):
assert success, pformat(buff)
# reverse head file in place and check reversal
- head_file = flopy.utils.HeadFile(function_tmpdir / head_file, tdis=tdis)
+ head_file = flopy.utils.HeadFile(function_tmpdir / head_file)
heads = head_file.get_alldata()
assert heads.shape == (nper, 2, 1, 100)
head_file.reverse()
@@ -583,20 +563,14 @@ def test_binaryfile_reverse_mf6_disv(function_tmpdir):
# reverse budget and write to separate file
budget_file_rev_path = function_tmpdir / f"{budget_file}_rev"
- budget_file = flopy.utils.CellBudgetFile(
- function_tmpdir / budget_file, tdis=tdis
- )
+ budget_file = flopy.utils.CellBudgetFile(function_tmpdir / budget_file)
budget_file.reverse(budget_file_rev_path)
- budget_file_rev = flopy.utils.CellBudgetFile(
- budget_file_rev_path, tdis=tdis
- )
+ budget_file_rev = flopy.utils.CellBudgetFile(budget_file_rev_path, tdis=tdis)
for kper in range(nper):
assert np.allclose(heads[kper], heads_rev[-kper + 1])
budget = budget_file.get_data(text="FLOW-JA-FACE", totim=kper)[0]
- budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[
- 0
- ]
+ budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[0]
assert budget.shape == budget_rev.shape
assert np.allclose(budget, -budget_rev)
@@ -609,9 +583,7 @@ def test_binaryfile_reverse_mf6_disu(example_data_path, function_tmpdir):
)
tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)]
nper = len(tdis_rc)
- tdis = flopy.mf6.ModflowTdis(
- sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
- )
+ tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc)
sim.set_sim_path(function_tmpdir)
sim.write_simulation()
sim.run_simulation()
@@ -623,7 +595,7 @@ def test_binaryfile_reverse_mf6_disu(example_data_path, function_tmpdir):
# reverse and write to a separate file
head_file_rev_path = function_tmpdir / "flow_rev.hds"
head_file.reverse(filename=head_file_rev_path)
- head_file_rev = HeadFile(head_file_rev_path, tdis=tdis)
+ head_file_rev = HeadFile(head_file_rev_path)
# load budget file
file_path = function_tmpdir / "flow.cbc"
@@ -632,7 +604,7 @@ def test_binaryfile_reverse_mf6_disu(example_data_path, function_tmpdir):
# reverse and write to a separate file
budget_file_rev_path = function_tmpdir / "flow_rev.cbc"
budget_file.reverse(filename=budget_file_rev_path)
- budget_file_rev = CellBudgetFile(budget_file_rev_path, tdis=tdis)
+ budget_file_rev = CellBudgetFile(budget_file_rev_path)
# check that data from both files have the same shape
assert head_file.get_alldata().shape == (nper, 1, 1, 121)
@@ -665,9 +637,7 @@ def test_binaryfile_reverse_mf6_disu(example_data_path, function_tmpdir):
assert np.array_equal(f_data[0][0], rf_data[0][0])
budget = budget_file.get_data(text="FLOW-JA-FACE", totim=idx)[0]
- budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=idx)[
- 0
- ]
+ budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=idx)[0]
assert budget.shape == budget_rev.shape
assert np.allclose(budget, -budget_rev)
@@ -791,9 +761,7 @@ def test_read_mf6_2sp(mf6_gwf_2sp_st_tr):
@pytest.mark.parametrize("compact", [True, False])
def test_read_mf2005_freyberg(example_data_path, function_tmpdir, compact):
- m = flopy.modflow.Modflow.load(
- example_data_path / "freyberg" / "freyberg.nam",
- )
+ m = flopy.modflow.Modflow.load(example_data_path / "freyberg" / "freyberg.nam")
m.change_model_ws(function_tmpdir)
oc = m.get_package("OC")
oc.compact = compact
diff --git a/autotest/test_binarygrid_util.py b/autotest/test_binarygrid_util.py
index 4c79aaf7d8..4750c3fef3 100644
--- a/autotest/test_binarygrid_util.py
+++ b/autotest/test_binarygrid_util.py
@@ -43,27 +43,20 @@ def test_mfgrddis_modelgrid(mfgrd_test_path):
plt.close()
extents = modelgrid.extent
- errmsg = (
- f"extents {extents} of {fn} does not equal (0.0, 8000.0, 0.0, 8000.0)"
- )
+ errmsg = f"extents {extents} of {fn} does not equal (0.0, 8000.0, 0.0, 8000.0)"
assert extents == (0.0, 8000.0, 0.0, 8000.0), errmsg
ncpl = modelgrid.ncol * modelgrid.nrow
- assert (
- modelgrid.ncpl == ncpl
- ), f"ncpl ({modelgrid.ncpl}) does not equal {ncpl}"
+ assert modelgrid.ncpl == ncpl, f"ncpl ({modelgrid.ncpl}) does not equal {ncpl}"
nvert = modelgrid.nvert
iverts = modelgrid.iverts
maxvertex = max([max(sublist[1:]) for sublist in iverts])
- assert (
- maxvertex + 1 == nvert
- ), f"nvert ({maxvertex + 1}) does not equal {nvert}"
+ assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}"
verts = modelgrid.verts
- assert nvert == verts.shape[0], (
- f"number of vertex (x, y) pairs ({verts.shape[0]}) "
- f"does not equal {nvert}"
- )
+ assert (
+ nvert == verts.shape[0]
+ ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
def test_mfgrddisv_MfGrdFile(mfgrd_test_path):
@@ -107,19 +100,14 @@ def test_mfgrddisv_modelgrid(mfgrd_test_path):
nvert = mg.nvert
iverts = mg.iverts
maxvertex = max([max(sublist[1:]) for sublist in iverts])
- assert (
- maxvertex + 1 == nvert
- ), f"nvert ({maxvertex + 1}) does not equal {nvert}"
+ assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}"
verts = mg.verts
- assert nvert == verts.shape[0], (
- f"number of vertex (x, y) pairs ({verts.shape[0]}) "
- f"does not equal {nvert}"
- )
+ assert (
+ nvert == verts.shape[0]
+ ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
cellxy = np.column_stack(mg.xyzcellcenters[:2])
- errmsg = (
- f"shape of flow.disv centroids {cellxy.shape} not equal to (218, 2)."
- )
+ errmsg = f"shape of flow.disv centroids {cellxy.shape} not equal to (218, 2)."
assert cellxy.shape == (218, 2), errmsg
@@ -166,11 +154,8 @@ def test_mfgrddisu_modelgrid(mfgrd_test_path):
nvert = mg.nvert
iverts = mg.iverts
maxvertex = max([max(sublist[1:]) for sublist in iverts])
- assert (
- maxvertex + 1 == nvert
- ), f"nvert ({maxvertex + 1}) does not equal {nvert}"
+ assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}"
verts = mg.verts
- assert nvert == verts.shape[0], (
- f"number of vertex (x, y) pairs ({verts.shape[0]}) "
- f"does not equal {nvert}"
- )
+ assert (
+ nvert == verts.shape[0]
+ ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
diff --git a/autotest/test_cbc_full3D.py b/autotest/test_cbc_full3D.py
index 54bad1064e..7183b380a9 100644
--- a/autotest/test_cbc_full3D.py
+++ b/autotest/test_cbc_full3D.py
@@ -73,14 +73,14 @@ def load_mf6(path, ws_out):
def cbc_eval_size(cbcobj, nnodes, shape3d):
cbc_pth = cbcobj.filename
- assert cbcobj.nnodes == nnodes, (
- f"{cbc_pth} nnodes ({cbcobj.nnodes}) " f"does not equal {nnodes}"
- )
+ assert (
+ cbcobj.nnodes == nnodes
+ ), f"{cbc_pth} nnodes ({cbcobj.nnodes}) does not equal {nnodes}"
a = np.squeeze(np.ones(cbcobj.shape, dtype=float))
b = np.squeeze(np.ones(shape3d, dtype=float))
- assert a.shape == b.shape, (
- f"{cbc_pth} shape {cbcobj.shape} " f"does not conform to {shape3d}"
- )
+ assert (
+ a.shape == b.shape
+ ), f"{cbc_pth} shape {cbcobj.shape} does not conform to {shape3d}"
def cbc_eval_data(cbcobj, shape3d):
@@ -92,9 +92,7 @@ def cbc_eval_data(cbcobj, shape3d):
times = cbcobj.get_times()
for name in names:
text = name.strip()
- arr = np.squeeze(
- cbcobj.get_data(text=text, totim=times[0], full3D=True)[0]
- )
+ arr = np.squeeze(cbcobj.get_data(text=text, totim=times[0], full3D=True)[0])
if text != "FLOW-JA-FACE":
b = np.squeeze(np.ones(shape3d, dtype=float))
assert arr.shape == b.shape, (
@@ -132,7 +130,7 @@ def test_cbc_full3D_mf6(function_tmpdir, path):
sim.run_simulation()
# get the groundwater model and determine the size of the model grid
- gwf_name = list(sim.model_names)[0]
+ gwf_name = next(iter(sim.model_names))
gwf = sim.get_model(gwf_name)
nnodes, shape3d = gwf.modelgrid.nnodes, gwf.modelgrid.shape
diff --git a/autotest/test_cellbudgetfile.py b/autotest/test_cellbudgetfile.py
index ebcaf15a94..dafff15bd3 100644
--- a/autotest/test_cellbudgetfile.py
+++ b/autotest/test_cellbudgetfile.py
@@ -114,7 +114,7 @@ def test_cellbudgetfile_build_index_compact(example_data_path):
52,
)
assert list_recorddict[-1] == (
- (1, 1097, b"FLOW LOWER FACE ", 20, 40, -3, 1, 1.0, 1.0, 1097.0, b"", b"", b"", b""),
+ (1, 1097, b"FLOW LOWER FACE ", 20, 40, -3, 1, 1.0, 1.0, 1097.0, b"", b"", b"", b""), # noqa
42648784,
)
# fmt: on
@@ -209,7 +209,7 @@ def test_cellbudgetfile_build_index_mf6(example_data_path):
assert list_recorddict[-1] == (
(120, 4, b" EVT", 10, 15, -3, 6,
0.08333333333333333, 10.000000000000002, 30.99999999999983,
- b"GWF_1 ", b"GWF_1 ", b"GWF_1 ", b"EVT "),
+ b"GWF_1 ", b"GWF_1 ", b"GWF_1 ", b"EVT "), # noqa
13414144,
)
# fmt: on
@@ -350,13 +350,7 @@ def test_budgetfile_detect_precision_single(path):
@pytest.mark.parametrize(
"path",
- [
- _example_data_path
- / "mf6"
- / "test006_gwf3"
- / "expected_output"
- / "flow_adj.cbc",
- ],
+ [_example_data_path / "mf6" / "test006_gwf3" / "expected_output" / "flow_adj.cbc"],
)
def test_budgetfile_detect_precision_double(path):
file = CellBudgetFile(path, precision="auto")
@@ -484,15 +478,13 @@ def test_cellbudgetfile_readrecord(example_data_path):
with pytest.raises(TypeError) as e:
v.get_data()
- assert str(e.value).startswith(
- "get_data() missing 1 required argument"
- ), str(e.exception)
+ assert str(e.value).startswith("get_data() missing 1 required argument"), str(
+ e.exception
+ )
t = v.get_data(text="STREAM LEAKAGE")
assert len(t) == 30, "length of stream leakage data != 30"
- assert (
- t[0].shape[0] == 36
- ), "sfr budget data does not have 36 reach entries"
+ assert t[0].shape[0] == 36, "sfr budget data does not have 36 reach entries"
t = v.get_data(text="STREAM LEAKAGE", full3D=True)
assert t[0].shape == (1, 15, 10), (
@@ -517,11 +509,10 @@ def test_cellbudgetfile_readrecord(example_data_path):
for idx, kk in enumerate(kstpkper):
t0 = v.get_data(kstpkper=kk, text=record.strip())[0]
t1 = v.get_data(idx=indices[idx], text=record)[0]
- assert np.array_equal(
- t0, t1
- ), "binary budget item {0} read using kstpkper != binary budget item {0} read using idx".format(
- record
- )
+ assert np.array_equal(t0, t1), (
+ "binary budget item {0} read using kstpkper != "
+ "binary budget item {0} read using idx"
+ ).format(record)
# idx can be either an int or a list of ints
s9 = v.get_data(idx=9)
@@ -579,11 +570,10 @@ def test_cellbudgetfile_readrecord_waux(example_data_path):
for idx, kk in enumerate(kstpkper):
t0 = v.get_data(kstpkper=kk, text=record.strip())[0]
t1 = v.get_data(idx=indices[idx], text=record)[0]
- assert np.array_equal(
- t0, t1
- ), "binary budget item {0} read using kstpkper != binary budget item {0} read using idx".format(
- record
- )
+ assert np.array_equal(t0, t1), (
+ "binary budget item {0} read using kstpkper != "
+ "binary budget item {0} read using idx"
+ ).format(record)
v.close()
@@ -597,9 +587,7 @@ def test_cellbudgetfile_reverse_mf2005(example_data_path, function_tmpdir):
sim_name = "test1tr"
# load simulation and extract tdis
- sim = MFSimulation.load(
- sim_name=sim_name, sim_ws=example_data_path / "mf2005_test"
- )
+ sim = MFSimulation.load(sim_name=sim_name, sim_ws=example_data_path / "mf2005_test")
tdis = sim.get_package("tdis")
mf2005_model_path = example_data_path / sim_name
diff --git a/autotest/test_compare.py b/autotest/test_compare.py
index f96865cc3b..3a961243b2 100644
--- a/autotest/test_compare.py
+++ b/autotest/test_compare.py
@@ -26,9 +26,7 @@ def test_diffmax():
a1 = np.array([1, 2, 3])
a2 = np.array([4, 5, 7])
d, indices = _diffmax(a1, a2)
- indices = indices[
- 0
- ] # return value is a tuple of arrays (1 for each dimension)
+ indices = indices[0] # return value is a tuple of arrays (1 for each dimension)
assert d == 4
assert list(indices) == [2]
@@ -37,9 +35,7 @@ def test_difftol():
a1 = np.array([1, 2, 3])
a2 = np.array([3, 5, 7])
d, indices = _difftol(a1, a2, 2.5)
- indices = indices[
- 0
- ] # return value is a tuple of arrays (1 for each dimension)
+ indices = indices[0] # return value is a tuple of arrays (1 for each dimension)
assert d == 4
print(d, indices)
assert list(indices) == [1, 2]
@@ -123,9 +119,7 @@ def comparison_model_1(function_tmpdir):
m.remove_package("WEL")
# recreate well package with binary output
- wel = ModflowWel(
- m, stress_period_data=wel_data, binary=True, dtype=wd.dtype
- )
+ wel = ModflowWel(m, stress_period_data=wel_data, binary=True, dtype=wd.dtype)
m.write_input()
diff --git a/autotest/test_copy.py b/autotest/test_copy.py
index 52072ad15b..af4c4e1e27 100644
--- a/autotest/test_copy.py
+++ b/autotest/test_copy.py
@@ -54,10 +54,6 @@ def model_is_copy(m1, m2):
if k in [
"_packagelist",
"_package_paths",
- "package_key_dict",
- "package_type_dict",
- "package_name_dict",
- "package_filename_dict",
"_ftype_num_dict",
]:
continue
@@ -83,31 +79,20 @@ def package_is_copy(pk1, pk2):
"""
for k, v in pk1.__dict__.items():
v2 = pk2.__dict__[k]
- if v2 is v and type(v) not in [
- bool,
- str,
- type(None),
- float,
- int,
- tuple,
- ]:
+ if v2 is v and type(v) not in [bool, str, type(None), float, int, tuple]:
# Deep copy doesn't work for ModflowUtltas
if not inspect.isclass(v):
return False
if k in [
"_child_package_groups",
"_data_list",
- "_packagelist",
- "_simulation_data",
+ "simulation_data",
"blocks",
"dimensions",
- "package_key_dict",
- "package_name_dict",
- "package_filename_dict",
- "package_type_dict",
"post_block_comments",
"simulation_data",
"structure",
+ "_package_container",
]:
continue
elif isinstance(v, MFPackage):
@@ -177,8 +162,8 @@ def list_is_copy(mflist1, mflist2):
if mflist2 is mflist1:
return False
if isinstance(mflist1, MFTransientList):
- data1 = {per: ra for per, ra in enumerate(mflist1.array)}
- data2 = {per: ra for per, ra in enumerate(mflist2.array)}
+ data1 = dict(enumerate(mflist1.array))
+ data2 = dict(enumerate(mflist2.array))
elif isinstance(mflist1, MFList):
data1 = {0: mflist1.array}
data2 = {0: mflist2.array}
@@ -189,14 +174,7 @@ def list_is_copy(mflist1, mflist2):
if k not in data2:
return False
v2 = data2[k]
- if v2 is v and type(v) not in [
- bool,
- str,
- type(None),
- float,
- int,
- tuple,
- ]:
+ if v2 is v and type(v) not in [bool, str, type(None), float, int, tuple]:
return False
if v is None and v2 is None:
continue
diff --git a/autotest/test_datautil.py b/autotest/test_datautil.py
index d4a735a289..a40f850306 100644
--- a/autotest/test_datautil.py
+++ b/autotest/test_datautil.py
@@ -10,4 +10,4 @@ def test_split_data_line():
assert len(spl) == len(exp)
# whitespace is not removed, todo: can it be?
# or is it needed to support Modflow input file format?
- assert all(any([e in s for s in spl]) for e in exp)
+ assert all(any(e in s for s in spl) for e in exp)
diff --git a/autotest/test_dis_cases.py b/autotest/test_dis_cases.py
index ae6ba62e96..41d527924d 100644
--- a/autotest/test_dis_cases.py
+++ b/autotest/test_dis_cases.py
@@ -62,9 +62,7 @@ def get_vlist(i, j, nrow, ncol):
[icpl, cellxy[icpl, 0], cellxy[icpl, 1], 4] + iverts[icpl]
for icpl in range(ncpl)
]
- vertices = [
- [ivert, verts[ivert, 0], verts[ivert, 1]] for ivert in range(nvert)
- ]
+ vertices = [[ivert, verts[ivert, 0], verts[ivert, 1]] for ivert in range(nvert)]
xorigin = 3000
yorigin = 1000
angrot = 10
diff --git a/autotest/test_export.py b/autotest/test_export.py
index ec0d57a748..bd7c091b9b 100644
--- a/autotest/test_export.py
+++ b/autotest/test_export.py
@@ -2,17 +2,12 @@
import os
import shutil
from pathlib import Path
-from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pytest
from flaky import flaky
-from modflow_devtools.markers import (
- excludes_platform,
- requires_exe,
- requires_pkg,
-)
+from modflow_devtools.markers import excludes_platform, requires_exe, requires_pkg
from modflow_devtools.misc import has_pkg
import flopy
@@ -56,7 +51,7 @@
import pyproj
-def namfiles() -> List[Path]:
+def namfiles() -> list[Path]:
mf2005_path = get_example_data_path() / "mf2005_test"
return list(mf2005_path.rglob("*.nam"))
@@ -98,25 +93,8 @@ def disu_sim(name, tmpdir, missing_arrays=False):
xmax = 12 * delr
ymin = 8 * delc
ymax = 13 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
- g.add_refinement_features(
- rfpoly,
- "polygon",
- 2,
- [
- 0,
- ],
- )
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
+ g.add_refinement_features(rfpoly, "polygon", 2, [0])
g.build(verbose=False)
gridprops = g.get_gridprops_disu6()
@@ -129,12 +107,8 @@ def disu_sim(name, tmpdir, missing_arrays=False):
gwf = ModflowGwf(sim, modelname=name, save_flows=True)
dis = ModflowGwfdisu(gwf, **gridprops)
- ic = ModflowGwfic(
- gwf, strt=np.random.random_sample(gwf.modelgrid.nnodes) * 350
- )
- npf = ModflowGwfnpf(
- gwf, k=np.random.random_sample(gwf.modelgrid.nnodes) * 10
- )
+ ic = ModflowGwfic(gwf, strt=np.random.random_sample(gwf.modelgrid.nnodes) * 350)
+ npf = ModflowGwfnpf(gwf, k=np.random.random_sample(gwf.modelgrid.nnodes) * 10)
return sim
@@ -179,9 +153,7 @@ def unstructured_grid(example_data_path):
@requires_pkg("pyshp", name_map={"pyshp": "shapefile"})
@pytest.mark.parametrize("pathlike", (True, False))
-def test_output_helper_shapefile_export(
- pathlike, function_tmpdir, example_data_path
-):
+def test_output_helper_shapefile_export(pathlike, function_tmpdir, example_data_path):
ml = Modflow.load(
"freyberg.nam",
model_ws=str(example_data_path / "freyberg_multilayer_transient"),
@@ -194,11 +166,7 @@ def test_output_helper_shapefile_export(
else:
outpath = os.path.join(function_tmpdir, "test.shp")
flopy.export.utils.output_helper(
- outpath,
- ml,
- {"HDS": head, "cbc": cbc},
- mflay=1,
- kper=10,
+ outpath, ml, {"HDS": head, "cbc": cbc}, mflay=1, kper=10
)
@@ -209,9 +177,7 @@ def test_freyberg_export(function_tmpdir, example_data_path):
name = "freyberg"
namfile = f"{name}.nam"
ws = example_data_path / name
- m = flopy.modflow.Modflow.load(
- namfile, model_ws=ws, check=False, verbose=False
- )
+ m = flopy.modflow.Modflow.load(namfile, model_ws=ws, check=False, verbose=False)
# test export at model, package and object levels
shpfile_path = function_tmpdir / "model.shp"
@@ -250,9 +216,7 @@ def test_freyberg_export(function_tmpdir, example_data_path):
part.unlink()
assert not shape.with_suffix(".prj").exists()
- m.modelgrid = StructuredGrid(
- delc=m.dis.delc.array, delr=m.dis.delr.array, crs=3070
- )
+ m.modelgrid = StructuredGrid(delc=m.dis.delc.array, delr=m.dis.delr.array, crs=3070)
# test export with a modelgrid, regardless of whether or not wkt was found
m.drn.stress_period_data.export(shape, sparse=True)
for suffix in [".dbf", ".prj", ".shp", ".shx"]:
@@ -260,9 +224,7 @@ def test_freyberg_export(function_tmpdir, example_data_path):
assert part.exists()
part.unlink()
- m.modelgrid = StructuredGrid(
- delc=m.dis.delc.array, delr=m.dis.delr.array, crs=3070
- )
+ m.modelgrid = StructuredGrid(delc=m.dis.delc.array, delr=m.dis.delr.array, crs=3070)
# verify that attributes have same modelgrid as parent
assert m.drn.stress_period_data.mg.crs == m.modelgrid.crs
assert m.drn.stress_period_data.mg.xoffset == m.modelgrid.xoffset
@@ -321,17 +283,13 @@ def test_disu_export(function_tmpdir, missing_arrays):
@pytest.mark.parametrize("crs", (None, 26916))
@requires_pkg("netCDF4", "pyproj")
def test_export_output(crs, function_tmpdir, example_data_path):
- ml = Modflow.load(
- "freyberg.nam", model_ws=str(example_data_path / "freyberg")
- )
+ ml = Modflow.load("freyberg.nam", model_ws=str(example_data_path / "freyberg"))
ml.modelgrid.crs = crs
hds_pth = os.path.join(ml.model_ws, "freyberg.githds")
hds = flopy.utils.HeadFile(hds_pth)
out_pth = function_tmpdir / f"freyberg_{crs}.out.nc"
- nc = flopy.export.utils.output_helper(
- out_pth, ml, {"freyberg.githds": hds}
- )
+ nc = flopy.export.utils.output_helper(out_pth, ml, {"freyberg.githds": hds})
var = nc.nc.variables.get("head")
arr = var[:]
ibound_mask = ml.bas6.ibound.array == 0
@@ -394,9 +352,7 @@ def test_export_shapefile_polygon_closed(function_tmpdir):
m = flopy.modflow.Modflow("test.nam", crs="EPSG:32614", xll=xll, yll=yll)
- flopy.modflow.ModflowDis(
- m, delr=spacing, delc=spacing, nrow=nrow, ncol=ncol
- )
+ flopy.modflow.ModflowDis(m, delr=spacing, delc=spacing, nrow=nrow, ncol=ncol)
shp_file = os.path.join(function_tmpdir, "test_polygon.shp")
m.dis.export(shp_file)
@@ -449,8 +405,7 @@ def test_export_array(function_tmpdir, example_data_path):
if "cellsize" in line.lower():
val = float(line.strip().split()[-1])
rot_cellsize = (
- np.cos(np.radians(m.modelgrid.angrot))
- * m.modelgrid.delr[0]
+ np.cos(np.radians(m.modelgrid.angrot)) * m.modelgrid.delr[0]
)
break
@@ -618,9 +573,7 @@ def test_export_array2(function_tmpdir):
crs = 4431
# no epsg code
- modelgrid = StructuredGrid(
- delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1
- )
+ modelgrid = StructuredGrid(delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1)
filename = os.path.join(function_tmpdir, "myarray1.shp")
a = np.arange(nrow * ncol).reshape((nrow, ncol))
export_array(modelgrid, filename, a)
@@ -636,9 +589,7 @@ def test_export_array2(function_tmpdir):
assert os.path.isfile(filename), "did not create array shapefile"
# with passing in epsg code
- modelgrid = StructuredGrid(
- delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1
- )
+ modelgrid = StructuredGrid(delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1)
filename = os.path.join(function_tmpdir, "myarray3.shp")
a = np.arange(nrow * ncol).reshape((nrow, ncol))
export_array(modelgrid, filename, a, crs=crs)
@@ -656,10 +607,7 @@ def test_array3d_export_structured(function_tmpdir):
ncol = int((xur - xll) / spacing)
nrow = int((yur - yll) / spacing)
sim = flopy.mf6.MFSimulation("sim", sim_ws=function_tmpdir)
- gwf = flopy.mf6.ModflowGwf(
- sim,
- modelname="array3d_export_unstructured",
- )
+ gwf = flopy.mf6.ModflowGwf(sim, modelname="array3d_export_unstructured")
flopy.mf6.ModflowGwfdis(
gwf,
nlay=3,
@@ -711,9 +659,7 @@ def test_export_array_contours_structured(function_tmpdir):
crs = 4431
# no epsg code
- modelgrid = StructuredGrid(
- delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1
- )
+ modelgrid = StructuredGrid(delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1)
filename = function_tmpdir / "myarraycontours1.shp"
a = np.arange(nrow * ncol).reshape((nrow, ncol))
export_array_contours(modelgrid, filename, a)
@@ -731,9 +677,7 @@ def test_export_array_contours_structured(function_tmpdir):
assert os.path.isfile(filename), "did not create contour shapefile"
# with passing in coordinate reference
- modelgrid = StructuredGrid(
- delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1
- )
+ modelgrid = StructuredGrid(delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1)
filename = function_tmpdir / "myarraycontours3.shp"
a = np.arange(nrow * ncol).reshape((nrow, ncol))
export_array_contours(modelgrid, filename, a, crs=crs)
@@ -741,9 +685,7 @@ def test_export_array_contours_structured(function_tmpdir):
@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"})
-def test_export_array_contours_unstructured(
- function_tmpdir, unstructured_grid
-):
+def test_export_array_contours_unstructured(function_tmpdir, unstructured_grid):
from shapefile import Reader
grid = unstructured_grid
@@ -825,9 +767,7 @@ def test_export_contourf(function_tmpdir, example_data_path):
with Reader(filename) as r:
shapes = r.shapes()
# expect 65 with standard mpl contours (structured grids), 86 with tricontours
- assert (
- len(shapes) >= 65
- ), "multipolygons were skipped in contourf routine"
+ assert len(shapes) >= 65, "multipolygons were skipped in contourf routine"
# debugging
# for s in shapes:
@@ -851,9 +791,7 @@ def test_export_contours(function_tmpdir, example_data_path):
levels = np.arange(10, 30, 0.5)
mapview = flopy.plot.PlotMapView(model=ml)
- contour_set = mapview.contour_array(
- head, masked_values=[999.0], levels=levels
- )
+ contour_set = mapview.contour_array(head, masked_values=[999.0], levels=levels)
export_contours(filename, contour_set)
plt.close()
@@ -874,8 +812,10 @@ def test_export_contours(function_tmpdir, example_data_path):
@pytest.mark.mf6
-@requires_pkg("shapely")
-def test_mf6_grid_shp_export(function_tmpdir):
+@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"})
+def test_export_mf6_shp(function_tmpdir):
+ from shapefile import Reader
+
nlay = 2
nrow = 10
ncol = 10
@@ -939,27 +879,17 @@ def test_mf6_grid_shp_export(function_tmpdir):
tdis = flopy.mf6.modflow.mftdis.ModflowTdis(
sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=perioddata
)
- gwf = flopy.mf6.ModflowGwf(
- sim, modelname=mf6name, model_nam_file=f"{mf6name}.nam"
- )
+ gwf = flopy.mf6.ModflowGwf(sim, modelname=mf6name, model_nam_file=f"{mf6name}.nam")
dis6 = flopy.mf6.ModflowGwfdis(
gwf, pname="dis", nlay=nlay, nrow=nrow, ncol=ncol, top=top, botm=botm
)
- def cellid(k, i, j, nrow, ncol):
- return k * nrow * ncol + i * ncol + j
-
# Riv6
- spd6 = flopy.mf6.ModflowGwfriv.stress_period_data.empty(
- gwf, maxbound=len(spd)
- )
+ spd6 = flopy.mf6.ModflowGwfriv.stress_period_data.empty(gwf, maxbound=len(spd))
spd6[0]["cellid"] = list(zip(spd.k, spd.i, spd.j))
for c in spd.dtype.names:
if c in spd6[0].dtype.names:
spd6[0][c] = spd[c]
- # MFTransient list apparently requires entries for additional stress periods,
- # even if they are the same
- spd6[1] = spd6[0]
riv6 = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=spd6)
rch6 = flopy.mf6.ModflowGwfrcha(gwf, recharge=rech)
@@ -971,13 +901,10 @@ def cellid(k, i, j, nrow, ncol):
), f"variable {k} is not equal"
pass
- if not has_pkg("shapefile"):
- return
-
m.export(function_tmpdir / "mfnwt.shp")
gwf.export(function_tmpdir / "mf6.shp")
- # check that the two shapefiles are the same
+ # check that the shapefiles are the same
ra = shp2recarray(function_tmpdir / "mfnwt.shp")
ra6 = shp2recarray(function_tmpdir / "mf6.shp")
@@ -1002,6 +929,29 @@ def cellid(k, i, j, nrow, ncol):
else:
assert np.abs(it - it6) < 1e-6
+ # Compare exported riv shapefiles
+ riv.export(function_tmpdir / "riv.shp")
+ riv6.export(function_tmpdir / "riv6.shp")
+ with (
+ Reader(function_tmpdir / "riv.shp") as riv_shp,
+ Reader(function_tmpdir / "riv6.shp") as riv6_shp,
+ ):
+ assert list(riv_shp.shapeRecord(-1).record) == list(
+ riv6_shp.shapeRecord(-1).record
+ )
+
+ # Check wel export with timeseries
+ wel_spd_0 = flopy.mf6.ModflowGwfwel.stress_period_data.empty(
+ gwf, maxbound=1, timeseries=True
+ )
+ wel_spd_0[0][0] = ((0, 0, 0), -99.0)
+ wel = flopy.mf6.ModflowGwfwel(
+ gwf,
+ maxbound=1,
+ stress_period_data={0: wel_spd_0[0]},
+ )
+ wel.export(function_tmpdir / "wel_test.shp")
+
@requires_pkg("pyshp", name_map={"pyshp": "shapefile"})
@pytest.mark.slow
@@ -1016,9 +966,7 @@ def test_export_huge_shapefile(function_tmpdir):
tsmult = 1
botm = np.zeros((nlay, nrow, ncol))
- m = flopy.modflow.Modflow(
- "junk", version="mfnwt", model_ws=function_tmpdir
- )
+ m = flopy.modflow.Modflow("junk", version="mfnwt", model_ws=function_tmpdir)
flopy.modflow.ModflowDis(
m,
nlay=nlay,
@@ -1170,6 +1118,30 @@ def count_lines_in_file(filepath):
return n
+def get_vtk_xml_info(filepath):
+ """Read VTK XML file, return dict of basic information."""
+ import xml.etree.ElementTree as ET
+
+ tree = ET.parse(filepath)
+ root = tree.getroot()
+ assert root.tag == "VTKFile"
+ grid_type = root.get("type")
+ grid = root.find(grid_type)
+ assert len(grid) == 1
+ piece = grid[0]
+ assert piece.tag == "Piece"
+ info = {
+ "type": grid_type,
+ "version": root.get("version"),
+ "number_of_cells": int(piece.get("NumberOfCells")),
+ "number_of_points": int(piece.get("NumberOfPoints")),
+ }
+ for elem in piece:
+ names = [subelem.get("Name") for subelem in elem]
+ info[elem.tag.lower() + "_names"] = names
+ return info
+
+
def is_binary_file(filepath):
is_binary = False
with open(filepath) as f:
@@ -1186,9 +1158,7 @@ def test_vtk_export_array2d(function_tmpdir, example_data_path):
# test mf 2005 freyberg
mpath = example_data_path / "freyberg_multilayer_transient"
namfile = "freyberg.nam"
- m = Modflow.load(
- namfile, model_ws=mpath, verbose=False, load_only=["dis", "bas6"]
- )
+ m = Modflow.load(namfile, model_ws=mpath, verbose=False, load_only=["dis", "bas6"])
# export and check
m.dis.top.export(function_tmpdir, name="top", fmt="vtk", binary=False)
@@ -1258,14 +1228,25 @@ def test_vtk_transient_array_2d(function_tmpdir, example_data_path):
# export and check
m.rch.rech.export(ws, fmt="vtk", kpers=kpers, binary=False, xml=True)
- assert count_lines_in_file(function_tmpdir / "rech_000001.vtk") == 26837
- assert count_lines_in_file(function_tmpdir / "rech_001096.vtk") == 26837
+ found_fnames = [pth.name for pth in function_tmpdir.iterdir()]
+ expected_fnames = [f"rech_{kper:06d}.vtk" for kper in kpers]
+ assert set(found_fnames) == set(expected_fnames)
+ for fname in expected_fnames:
+ filetocheck = function_tmpdir / fname
+ info = get_vtk_xml_info(filetocheck)
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == ["rech_"]
+ assert info["pointdata_names"] == []
+ filetocheck.unlink()
# with binary
-
m.rch.rech.export(ws, fmt="vtk", binary=True, kpers=kpers)
- assert is_binary_file(function_tmpdir / "rech_000001.vtk")
- assert is_binary_file(function_tmpdir / "rech_001096.vtk")
+ found_fnames = [pth.name for pth in function_tmpdir.iterdir()]
+ expected_fnames = [f"rech_{kper:06d}.vtk" for kper in kpers]
+ assert set(found_fnames) == set(expected_fnames)
+ for fname in expected_fnames:
+ assert is_binary_file(function_tmpdir / fname)
@requires_pkg("vtk")
@@ -1285,12 +1266,19 @@ def test_vtk_add_packages(function_tmpdir, example_data_path):
# dis export and check
# todo: pakbase.export() for vtk!!!!
m.dis.export(ws, fmt="vtk", xml=True, binary=False)
- filetocheck = function_tmpdir / "DIS.vtk"
- assert count_lines_in_file(filetocheck) == 27239
+ info = get_vtk_xml_info(function_tmpdir / "DIS.vtk")
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == ["top", "botm"]
+ assert info["pointdata_names"] == []
# upw with point scalar output
m.upw.export(ws, fmt="vtk", xml=True, binary=False, point_scalars=True)
- assert count_lines_in_file(function_tmpdir / "UPW.vtk") == 42445
+ info = get_vtk_xml_info(function_tmpdir / "UPW.vtk")
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == []
+ assert info["pointdata_names"] == ["hk", "hani", "vka", "ss", "sy"]
# bas with smoothing on
m.bas6.export(ws, fmt="vtk", binary=False, smooth=True)
@@ -1298,9 +1286,14 @@ def test_vtk_add_packages(function_tmpdir, example_data_path):
# transient package drain
kpers = [0, 1, 1096]
+ expected_fnames = [f"DRN_{kper:06d}.vtu" for kper in kpers]
m.drn.export(ws, fmt="vtk", binary=False, xml=True, kpers=kpers, pvd=True)
- assert count_lines_in_file(function_tmpdir / "DRN_000001.vtu") == 27239
- assert count_lines_in_file(function_tmpdir / "DRN_001096.vtu") == 27239
+ for fname in expected_fnames:
+ info = get_vtk_xml_info(function_tmpdir / fname)
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == ["DRN_elev", "DRN_cond"]
+ assert info["pointdata_names"] == []
# dis with binary
m.dis.export(ws, fmt="vtk", binary=True)
@@ -1347,42 +1340,46 @@ def test_vtk_binary_head_export(function_tmpdir, example_data_path):
namfile = "freyberg.nam"
hdsfile = mpth / "freyberg.hds"
heads = HeadFile(hdsfile)
- m = Modflow.load(
- namfile, model_ws=mpth, verbose=False, load_only=["dis", "bas6"]
- )
+ m = Modflow.load(namfile, model_ws=mpth, verbose=False, load_only=["dis", "bas6"])
filetocheck = function_tmpdir / "freyberg_head_000003.vtu"
# export and check
vtkobj = Vtk(m, pvd=True, xml=True)
- vtkobj.add_heads(
- heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)]
- )
+ vtkobj.add_heads(heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)])
vtkobj.write(function_tmpdir / "freyberg_head")
- assert count_lines_in_file(filetocheck) == 34
+ info = get_vtk_xml_info(filetocheck)
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == ["head"]
+ assert info["pointdata_names"] == []
filetocheck.unlink()
# with point scalars
vtkobj = Vtk(m, pvd=True, xml=True, point_scalars=True)
- vtkobj.add_heads(
- heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)]
- )
+ vtkobj.add_heads(heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)])
vtkobj.write(function_tmpdir / "freyberg_head")
- assert count_lines_in_file(filetocheck) == 34
+ info = get_vtk_xml_info(filetocheck)
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == []
+ assert info["pointdata_names"] == ["head"]
filetocheck.unlink()
# with smoothing
vtkobj = Vtk(m, pvd=True, xml=True, smooth=True)
- vtkobj.add_heads(
- heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)]
- )
+ vtkobj.add_heads(heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)])
vtkobj.write(function_tmpdir / "freyberg_head")
- assert count_lines_in_file(filetocheck) == 34
+ info = get_vtk_xml_info(filetocheck)
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == ["head"]
+ assert info["pointdata_names"] == []
@requires_pkg("vtk")
@@ -1394,27 +1391,35 @@ def test_vtk_cbc(function_tmpdir, example_data_path):
namfile = "freyberg.nam"
cbcfile = os.path.join(mpth, "freyberg.cbc")
cbc = CellBudgetFile(cbcfile)
- m = Modflow.load(
- namfile, model_ws=mpth, verbose=False, load_only=["dis", "bas6"]
- )
+ m = Modflow.load(namfile, model_ws=mpth, verbose=False, load_only=["dis", "bas6"])
# export and check with point scalar
vtkobj = Vtk(m, binary=False, xml=True, pvd=True, point_scalars=True)
vtkobj.add_cell_budget(cbc, kstpkper=[(0, 0), (0, 1), (0, 2)])
vtkobj.write(function_tmpdir / "freyberg_CBC")
- assert (
- count_lines_in_file(function_tmpdir / "freyberg_CBC_000000.vtu")
- == 39243
- )
+ info = get_vtk_xml_info(function_tmpdir / "freyberg_CBC_000000.vtu")
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == []
+ expected_pointdata_names = [
+ " CONSTANT HEAD",
+ "FLOW RIGHT FACE ",
+ "FLOW FRONT FACE ",
+ "FLOW LOWER FACE ",
+ ]
+ assert info["pointdata_names"] == expected_pointdata_names
# with point scalars and binary
vtkobj = Vtk(m, xml=True, pvd=True, point_scalars=True)
vtkobj.add_cell_budget(cbc, kstpkper=[(0, 0), (0, 1), (0, 2)])
vtkobj.write(function_tmpdir / "freyberg_CBC")
- assert (
- count_lines_in_file(function_tmpdir / "freyberg_CBC_000000.vtu") == 28
- )
+
+ info = get_vtk_xml_info(function_tmpdir / "freyberg_CBC_000000.vtu")
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == []
+ assert info["pointdata_names"] == expected_pointdata_names
@requires_pkg("vtk")
@@ -1442,7 +1447,11 @@ def test_vtk_vector(function_tmpdir, example_data_path):
vtkobj.add_vector(q, "discharge")
vtkobj.write(filenametocheck)
- assert count_lines_in_file(filenametocheck) == 36045
+ info = get_vtk_xml_info(filenametocheck)
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == []
+ assert info["pointdata_names"] == ["discharge"]
# with point scalars and binary
vtkobj = Vtk(m, point_scalars=True)
@@ -1459,14 +1468,22 @@ def test_vtk_vector(function_tmpdir, example_data_path):
vtkobj.add_vector(q, "discharge")
vtkobj.write(filenametocheck)
- assert count_lines_in_file(filenametocheck) == 27645
+ info = get_vtk_xml_info(filenametocheck)
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == ["discharge"]
+ assert info["pointdata_names"] == []
# with values directly given at vertices and binary
vtkobj = Vtk(m, xml=True, binary=True)
vtkobj.add_vector(q, "discharge")
vtkobj.write(filenametocheck)
- assert count_lines_in_file(filenametocheck) == 25
+ info = get_vtk_xml_info(filenametocheck)
+ assert info["number_of_cells"] == 2400
+ assert info["number_of_points"] == 19200
+ assert info["celldata_names"] == ["discharge"]
+ assert info["pointdata_names"] == []
@requires_pkg("vtk")
@@ -1477,9 +1494,7 @@ def test_vtk_unstructured(function_tmpdir, unstructured_grid):
grid = unstructured_grid
outfile = function_tmpdir / "disu_grid.vtu"
- vtkobj = Vtk(
- modelgrid=grid, vertical_exageration=2, binary=True, smooth=False
- )
+ vtkobj = Vtk(modelgrid=grid, vertical_exageration=2, binary=True, smooth=False)
vtkobj.add_array(grid.top, "top")
vtkobj.add_array(grid.botm, "botm")
vtkobj.write(outfile)
@@ -1495,9 +1510,7 @@ def test_vtk_unstructured(function_tmpdir, unstructured_grid):
top2 = vtk_to_numpy(data.GetCellData().GetArray("top"))
- assert np.allclose(
- np.ravel(grid.top), top2
- ), "Field data not properly written"
+ assert np.allclose(np.ravel(grid.top), top2), "Field data not properly written"
@requires_pkg("vtk", "pyvista")
@@ -1601,9 +1614,7 @@ def test_vtk_pathline(function_tmpdir, example_data_path):
prsity=0.2,
prsityCB=0.2,
)
- sim = mpp.create_mpsim(
- trackdir="backward", simtype="pathline", packages="WEL"
- )
+ sim = mpp.create_mpsim(trackdir="backward", simtype="pathline", packages="WEL")
mpp.write_input()
mpp.run_model()
@@ -1632,9 +1643,7 @@ def test_vtk_pathline(function_tmpdir, example_data_path):
from vtkmodules.util import numpy_support
totim = numpy_support.vtk_to_numpy(data.GetPointData().GetArray("time"))
- pid = numpy_support.vtk_to_numpy(
- data.GetPointData().GetArray("particleid")
- )
+ pid = numpy_support.vtk_to_numpy(data.GetPointData().GetArray("particleid"))
maxtime = 0
for p in plines:
@@ -1649,14 +1658,13 @@ def test_vtk_pathline(function_tmpdir, example_data_path):
def grid2disvgrid(nrow, ncol):
- """Simple function to create disv verts and iverts for a regular grid of size nrow, ncol"""
+ """Simple function to create disv verts and iverts for a regular grid of
+ size nrow, ncol"""
def lower_left_point(i, j, ncol):
return i * (ncol + 1) + j
- mg = np.meshgrid(
- np.linspace(0, ncol, ncol + 1), np.linspace(0, nrow, nrow + 1)
- )
+ mg = np.meshgrid(np.linspace(0, ncol, ncol + 1), np.linspace(0, nrow, nrow + 1))
verts = np.vstack((mg[0].flatten(), mg[1].flatten())).transpose()
# in the creation of iverts here, we intentionally do not close the cell polygon
@@ -1673,9 +1681,7 @@ def lower_left_point(i, j, ncol):
def load_verts(fname):
- verts = np.genfromtxt(
- fname, dtype=[int, float, float], names=["iv", "x", "y"]
- )
+ verts = np.genfromtxt(fname, dtype=[int, float, float], names=["iv", "x", "y"])
verts["iv"] -= 1 # zero based
return verts
@@ -1712,9 +1718,7 @@ def test_vtk_add_model_without_packages_names(function_tmpdir):
dis = ModflowGwfdis(gwf, nrow=3, ncol=3)
ic = ModflowGwfic(gwf)
npf = ModflowGwfnpf(gwf, save_specific_discharge=True)
- chd = ModflowGwfchd(
- gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 2, 2), 0.0]]
- )
+ chd = ModflowGwfchd(gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 2, 2), 0.0]])
# Export model without specifying packages_names parameter
@@ -1932,12 +1936,7 @@ def test_vtk_export_disu1_grid(function_tmpdir, example_data_path):
)
outfile = function_tmpdir / "disu_grid.vtu"
- vtkobj = Vtk(
- modelgrid=modelgrid,
- vertical_exageration=2,
- binary=True,
- smooth=False,
- )
+ vtkobj = Vtk(modelgrid=modelgrid, vertical_exageration=2, binary=True, smooth=False)
vtkobj.add_array(modelgrid.top, "top")
vtkobj.add_array(modelgrid.botm, "botm")
vtkobj.write(outfile)
@@ -2013,12 +2012,7 @@ def test_vtk_export_disu2_grid(function_tmpdir, example_data_path):
)
outfile = function_tmpdir / "disu_grid.vtu"
- vtkobj = Vtk(
- modelgrid=modelgrid,
- vertical_exageration=2,
- binary=True,
- smooth=False,
- )
+ vtkobj = Vtk(modelgrid=modelgrid, vertical_exageration=2, binary=True, smooth=False)
vtkobj.add_array(modelgrid.top, "top")
vtkobj.add_array(modelgrid.botm, "botm")
vtkobj.write(outfile)
diff --git a/autotest/test_flopy_io.py b/autotest/test_flopy_io.py
index bb09cd2207..9ee570d044 100644
--- a/autotest/test_flopy_io.py
+++ b/autotest/test_flopy_io.py
@@ -29,32 +29,22 @@ def test_relpath_safe(function_tmpdir, scrub, use_paths):
and splitdrive(function_tmpdir)[0] != splitdrive(getcwd())[0]
):
if use_paths:
- assert (
- Path(relpath_safe(function_tmpdir))
- == function_tmpdir.absolute()
- )
+ assert Path(relpath_safe(function_tmpdir)) == function_tmpdir.absolute()
assert relpath_safe(Path(which("mf6"))) == str(
Path(which("mf6")).absolute()
)
else:
assert (
- Path(relpath_safe(str(function_tmpdir)))
- == function_tmpdir.absolute()
- )
- assert relpath_safe(which("mf6")) == str(
- Path(which("mf6")).absolute()
+ Path(relpath_safe(str(function_tmpdir))) == function_tmpdir.absolute()
)
+ assert relpath_safe(which("mf6")) == str(Path(which("mf6")).absolute())
else:
if use_paths:
- assert Path(
- relpath_safe(function_tmpdir, function_tmpdir.parent)
- ) == Path(function_tmpdir.name)
+ assert Path(relpath_safe(function_tmpdir, function_tmpdir.parent)) == Path(
+ function_tmpdir.name
+ )
assert (
- Path(
- relpath_safe(
- function_tmpdir, function_tmpdir.parent.parent
- )
- )
+ Path(relpath_safe(function_tmpdir, function_tmpdir.parent.parent))
== Path(function_tmpdir.parent.name) / function_tmpdir.name
)
assert relpath_safe(Path(which("mf6"))) == relpath(
@@ -73,9 +63,7 @@ def test_relpath_safe(function_tmpdir, scrub, use_paths):
)
== Path(function_tmpdir.parent.name) / function_tmpdir.name
)
- assert relpath_safe(which("mf6")) == relpath(
- which("mf6"), getcwd()
- )
+ assert relpath_safe(which("mf6")) == relpath(which("mf6"), getcwd())
# test user login obfuscation
with set_dir("/"):
diff --git a/autotest/test_flopy_module.py b/autotest/test_flopy_module.py
index 1a4bd40ea3..1114b96009 100644
--- a/autotest/test_flopy_module.py
+++ b/autotest/test_flopy_module.py
@@ -58,9 +58,7 @@ def test_modflow_unstructured(function_tmpdir):
wel = flopy.mfusg.MfUsgWel(mf, stress_period_data={0: [[0, -100]]})
assert isinstance(wel, flopy.mfusg.MfUsgWel)
- ghb = flopy.modflow.ModflowGhb(
- mf, stress_period_data={0: [[1, 5.9, 1000.0]]}
- )
+ ghb = flopy.modflow.ModflowGhb(mf, stress_period_data={0: [[1, 5.9, 1000.0]]})
assert isinstance(ghb, flopy.modflow.ModflowGhb)
oc = flopy.modflow.ModflowOc(mf)
@@ -141,9 +139,7 @@ def test_mflist_reference(function_tmpdir):
# assert shp.numRecords == nrow * ncol
-def test_pyinstaller_flopy_runs_without_dfn_folder(
- flopy_data_path, example_data_path
-):
+def test_pyinstaller_flopy_runs_without_dfn_folder(flopy_data_path, example_data_path):
"""
Test to ensure that flopy can load a modflow 6 simulation without dfn
files being present.
diff --git a/autotest/test_gage.py b/autotest/test_gage.py
index 04d3879b06..93d1c0a168 100644
--- a/autotest/test_gage.py
+++ b/autotest/test_gage.py
@@ -121,9 +121,7 @@ def test_gage_files(function_tmpdir):
break
assert found, f"{f} not in name file entries"
iu = abs(gages[idx][1])
- assert (
- iu == iun
- ), f"{f} unit not equal to {iu} - name file unit = {iun}"
+ assert iu == iun, f"{f} unit not equal to {iu} - name file unit = {iun}"
def test_gage_filenames0(function_tmpdir):
@@ -207,6 +205,4 @@ def test_gage_filenames(function_tmpdir):
break
assert found, f"{f} not in name file entries"
iu = abs(gages[idx][1])
- assert (
- iu == iun
- ), f"{f} unit not equal to {iu} - name file unit = {iun}"
+ assert iu == iun, f"{f} unit not equal to {iu} - name file unit = {iun}"
diff --git a/autotest/test_generate_classes.py b/autotest/test_generate_classes.py
index db812aa40b..c1de3bc706 100644
--- a/autotest/test_generate_classes.py
+++ b/autotest/test_generate_classes.py
@@ -1,8 +1,9 @@
import sys
+from collections.abc import Iterable
from os import environ
from pathlib import Path
+from platform import system
from pprint import pprint
-from typing import Iterable
from warnings import warn
import pytest
@@ -30,7 +31,6 @@ def pytest_generate_tests(metafunc):
against all of the versions of mf6io flopy guarantees
support for- maybe develop and latest release? Though
some backwards compatibility seems ideal if possible.
- This would need changes in GH Actions CI test matrix.
"""
owner = "MODFLOW-USGS"
@@ -86,8 +86,10 @@ def test_generate_classes_from_github_refs(
# create virtual environment
venv = function_tmpdir / "venv"
- python = venv / "bin" / "python"
- pip = venv / "bin" / "pip"
+ win = system() == "Windows"
+ bin = "Scripts" if win else "bin"
+ python = venv / bin / ("python" + (".exe" if win else ""))
+ pip = venv / bin / ("pip" + (".exe" if win else ""))
cli_run([str(venv)])
print(f"Using temp venv at {venv} to test class generation from {ref}")
@@ -99,11 +101,15 @@ def test_generate_classes_from_github_refs(
# get creation time of files
flopy_path = (
- venv
- / "lib"
- / f"python{sys.version_info.major}.{sys.version_info.minor}"
- / "site-packages"
- / "flopy"
+ (venv / "Lib" / "site-packages" / "flopy")
+ if win
+ else (
+ venv
+ / "lib"
+ / f"python{sys.version_info.major}.{sys.version_info.minor}"
+ / "site-packages"
+ / "flopy"
+ )
)
assert flopy_path.is_dir()
mod_files = list((flopy_path / "mf6" / "modflow").rglob("*")) + list(
@@ -144,10 +150,7 @@ def get_mtime(f):
modified_files = [
mod_files[i]
for i, (before, after) in enumerate(
- zip(
- mod_file_times,
- [get_mtime(f) for f in mod_files],
- )
+ zip(mod_file_times, [get_mtime(f) for f in mod_files])
)
if after > 0 and after > before
]
diff --git a/autotest/test_geospatial_util.py b/autotest/test_geospatial_util.py
index 9132e1d1be..decbcf3706 100644
--- a/autotest/test_geospatial_util.py
+++ b/autotest/test_geospatial_util.py
@@ -147,10 +147,7 @@ def multilinestring():
def test_import_geospatial_utils():
- from flopy.utils.geospatial_utils import (
- GeoSpatialCollection,
- GeoSpatialUtil,
- )
+ from flopy.utils.geospatial_utils import GeoSpatialCollection, GeoSpatialUtil
@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"})
@@ -422,9 +419,7 @@ def test_point_collection(point, multipoint):
is_equal = gi == gi1[ix]
if not is_equal:
- raise AssertionError(
- "GeoSpatialCollection Point conversion error"
- )
+ raise AssertionError("GeoSpatialCollection Point conversion error")
@requires_pkg("shapely", "geojson", "geopandas")
@@ -452,9 +447,7 @@ def test_linestring_collection(linestring, multilinestring):
is_equal = gi == gi1[ix]
if not is_equal:
- raise AssertionError(
- "GeoSpatialCollection Linestring conversion error"
- )
+ raise AssertionError("GeoSpatialCollection Linestring conversion error")
@requires_pkg("shapely", "geojson", "geopandas")
diff --git a/autotest/test_get_modflow.py b/autotest/test_get_modflow.py
index d5dc4d6791..2ec79d5e49 100644
--- a/autotest/test_get_modflow.py
+++ b/autotest/test_get_modflow.py
@@ -20,16 +20,15 @@
flopy_dir = get_project_root_path()
get_modflow_script = flopy_dir / "flopy" / "utils" / "get_modflow.py"
bindir_options = {
- "flopy": Path(expandvars(r"%LOCALAPPDATA%\flopy")) / "bin"
- if system() == "Windows"
- else Path.home() / ".local" / "share" / "flopy" / "bin",
- "python": Path(sys.prefix)
- / ("Scripts" if system() == "Windows" else "bin"),
+ "flopy": (
+ Path(expandvars(r"%LOCALAPPDATA%\flopy")) / "bin"
+ if system() == "Windows"
+ else Path.home() / ".local" / "share" / "flopy" / "bin"
+ ),
+ "python": Path(sys.prefix) / ("Scripts" if system() == "Windows" else "bin"),
"home": Path.home() / ".local" / "bin",
}
-owner_options = [
- "MODFLOW-USGS",
-]
+owner_options = ["MODFLOW-USGS"]
repo_options = {
"executables": [
"crt",
@@ -128,9 +127,7 @@ def test_get_release(repo):
}
else:
for ostag in expected_ostags:
- assert any(
- ostag in a for a in actual_assets
- ), f"dist not found for {ostag}"
+ assert any(ostag in a for a in actual_assets), f"dist not found for {ostag}"
@pytest.mark.parametrize("bindir", bindir_options.keys())
@@ -251,13 +248,7 @@ def test_script_valid_options(function_tmpdir, downloads_dir):
def test_script(function_tmpdir, owner, repo, downloads_dir):
bindir = str(function_tmpdir)
stdout, stderr, returncode = run_get_modflow_script(
- bindir,
- "--owner",
- owner,
- "--repo",
- repo,
- "--downloads-dir",
- downloads_dir,
+ bindir, "--owner", owner, "--repo", repo, "--downloads-dir", downloads_dir
)
if rate_limit_msg in stderr:
pytest.skip(f"GitHub {rate_limit_msg}")
@@ -276,9 +267,7 @@ def test_script(function_tmpdir, owner, repo, downloads_dir):
def test_python_api(function_tmpdir, owner, repo, downloads_dir):
bindir = str(function_tmpdir)
try:
- get_modflow(
- bindir, owner=owner, repo=repo, downloads_dir=downloads_dir
- )
+ get_modflow(bindir, owner=owner, repo=repo, downloads_dir=downloads_dir)
except HTTPError as err:
if err.code == 403:
pytest.skip(f"GitHub {rate_limit_msg}")
diff --git a/autotest/test_grid.py b/autotest/test_grid.py
index f5eba2f233..c57ca2125d 100644
--- a/autotest/test_grid.py
+++ b/autotest/test_grid.py
@@ -33,6 +33,12 @@
import pyproj
+epsg_3070_proj4_str = (
+ "+proj=tmerc +lat_0=0 +lon_0=-90 +k=0.9996 +x_0=520000 "
+ "+y_0=-4480000 +datum=NAD83 +units=m +no_defs "
+)
+
+
@pytest.fixture
def minimal_unstructured_grid_info():
d = {
@@ -87,9 +93,7 @@ def test_rotation():
mg2 = StructuredGrid(delc=m.dis.delc.array, delr=m.dis.delr.array)
mg2._angrot = -45.0
- mg2.set_coord_info(
- mg2._xul_to_xll(xul), mg2._yul_to_yll(yul), angrot=-45.0
- )
+ mg2.set_coord_info(mg2._xul_to_xll(xul), mg2._yul_to_yll(yul), angrot=-45.0)
xll2, yll2 = mg2.xoffset, mg2.yoffset
assert np.abs(mg2.xvertices[0, 0] - xul) < 1e-4
@@ -178,9 +182,7 @@ def test_get_lrc_get_node():
nlay, nrow, ncol = 3, 4, 5
nnodes = nlay * nrow * ncol
ml = Modflow()
- dis = ModflowDis(
- ml, nlay=nlay, nrow=nrow, ncol=ncol, top=50, botm=[0, -1, -2]
- )
+ dis = ModflowDis(ml, nlay=nlay, nrow=nrow, ncol=ncol, top=50, botm=[0, -1, -2])
nodes = list(range(nnodes))
indices = np.indices((nlay, nrow, ncol))
layers = indices[0].flatten()
@@ -236,9 +238,7 @@ def test_get_rc_from_node_coordinates():
def load_verts(fname):
- verts = np.genfromtxt(
- fname, dtype=[int, float, float], names=["iv", "x", "y"]
- )
+ verts = np.genfromtxt(fname, dtype=[int, float, float], names=["iv", "x", "y"])
verts["iv"] -= 1 # zero based
return verts
@@ -303,16 +303,12 @@ def test_intersection(dis_model, disv_model):
else:
print("In real_world coordinates:")
try:
- row, col = dis_model.modelgrid.intersect(
- x, y, local=local, forgive=forgive
- )
+ row, col = dis_model.modelgrid.intersect(x, y, local=local, forgive=forgive)
cell2d_disv = disv_model.modelgrid.intersect(
x, y, local=local, forgive=forgive
)
except Exception as e:
- if not forgive and any(
- ["outside of the model area" in k for k in e.args]
- ):
+ if not forgive and any("outside of the model area" in k for k in e.args):
pass
else: # should be forgiving x,y out of grid
raise e
@@ -352,10 +348,8 @@ def test_structured_xyz_intersect(example_data_path):
def test_vertex_xyz_intersect(example_data_path):
- sim = MFSimulation.load(
- sim_ws=example_data_path / "mf6" / "test003_gwfs_disv"
- )
- ml = sim.get_model(list(sim.model_names)[0])
+ sim = MFSimulation.load(sim_ws=example_data_path / "mf6" / "test003_gwfs_disv")
+ ml = sim.get_model(next(iter(sim.model_names)))
mg = ml.modelgrid
assert mg.size == np.prod((mg.nlay, mg.ncpl))
@@ -385,12 +379,8 @@ def test_unstructured_xyz_intersect(example_data_path):
ncpl = np.array(3 * [len(iverts)])
nnodes = np.sum(ncpl)
- top = np.ones(
- (nnodes),
- )
- botm = np.ones(
- (nnodes),
- )
+ top = np.ones((nnodes))
+ botm = np.ones((nnodes))
# set top and botm elevations
i0 = 0
@@ -449,29 +439,24 @@ def test_structured_from_gridspec(example_data_path, spc_file):
0, # xmin
8000 * np.sin(theta) + 8000 * np.cos(theta), # xmax
8000 * np.sin(theta) * np.tan(theta / 2), # ymin
- 8000 + 8000 * np.sin(theta),
- ) # ymax
+ 8000 + 8000 * np.sin(theta), # ymax
+ )
errmsg = f"extents {extents} of {fn} does not equal {rotated_extents}"
assert all(
- [np.isclose(x, x0) for x, x0 in zip(modelgrid.extent, rotated_extents)]
+ np.isclose(x, x0) for x, x0 in zip(modelgrid.extent, rotated_extents)
), errmsg
ncpl = modelgrid.ncol * modelgrid.nrow
- assert (
- modelgrid.ncpl == ncpl
- ), f"ncpl ({modelgrid.ncpl}) does not equal {ncpl}"
+ assert modelgrid.ncpl == ncpl, f"ncpl ({modelgrid.ncpl}) does not equal {ncpl}"
nvert = modelgrid.nvert
iverts = modelgrid.iverts
maxvertex = max([max(sublist[1:]) for sublist in iverts])
- assert (
- maxvertex + 1 == nvert
- ), f"nvert ({maxvertex + 1}) does not equal {nvert}"
+ assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}"
verts = modelgrid.verts
- assert nvert == verts.shape[0], (
- f"number of vertex (x, y) pairs ({verts.shape[0]}) "
- f"does not equal {nvert}"
- )
+ assert (
+ nvert == verts.shape[0]
+ ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
@requires_pkg("shapely")
@@ -485,17 +470,13 @@ def test_unstructured_from_argus_mesh(example_data_path):
print(f" Number of nodes: {g.nnodes}")
-def test_unstructured_from_verts_and_iverts(
- function_tmpdir, example_data_path
-):
+def test_unstructured_from_verts_and_iverts(function_tmpdir, example_data_path):
datapth = example_data_path / "unstructured"
# simple functions to load vertices and incidence lists
def load_verts(fname):
print(f"Loading vertices from: {fname}")
- verts = np.genfromtxt(
- fname, dtype=[int, float, float], names=["iv", "x", "y"]
- )
+ verts = np.genfromtxt(fname, dtype=[int, float, float], names=["iv", "x", "y"])
verts["iv"] -= 1 # zero based
return verts
@@ -553,8 +534,7 @@ def unstructured_from_gridspec_driver(example_data_path, gsf_file):
# check vertices
expected_verts = [
- (float(s[0]), float(s[1]), float(s[2]))
- for s in split[3 : (3 + nverts)]
+ (float(s[0]), float(s[1]), float(s[2])) for s in split[3 : (3 + nverts)]
]
for i, ev in enumerate(expected_verts[:10]):
assert grid.verts[i][0] == ev[0]
@@ -566,14 +546,7 @@ def unstructured_from_gridspec_driver(example_data_path, gsf_file):
# check nodes
expected_nodes = [
- (
- int(s[0]),
- float(s[1]),
- float(s[2]),
- float(s[3]),
- int(s[4]),
- int(s[5]),
- )
+ (int(s[0]), float(s[1]), float(s[2]), float(s[3]), int(s[4]), int(s[5]))
for s in split[(3 + nverts) : -1]
]
for i, en in enumerate(expected_nodes):
@@ -601,16 +574,11 @@ def test_unstructured_from_gridspec_comments(example_data_path):
(None, None),
(26916, "EPSG:26916"),
("epsg:5070", "EPSG:5070"),
- (
- "+proj=tmerc +lat_0=0 +lon_0=-90 +k=0.9996 +x_0=520000 +y_0=-4480000 +datum=NAD83 +units=m +no_defs ",
- "EPSG:3070",
- ),
+ (epsg_3070_proj4_str, "EPSG:3070"),
pytest.param(4269, None, marks=pytest.mark.xfail),
),
)
-def test_grid_crs(
- minimal_unstructured_grid_info, crs, expected_srs, function_tmpdir
-):
+def test_grid_crs(minimal_unstructured_grid_info, crs, expected_srs, function_tmpdir):
expected_epsg = None
if match := re.findall(r"epsg:([\d]+)", expected_srs or "", re.IGNORECASE):
expected_epsg = int(match[0])
@@ -636,9 +604,7 @@ def do_checks(g):
do_checks(VertexGrid(vertices=d["vertices"], crs=crs))
# only check deprecations if pyproj is available
- pyproj_avail_context = (
- pytest.deprecated_call() if HAS_PYPROJ else nullcontext()
- )
+ pyproj_avail_context = pytest.deprecated_call() if HAS_PYPROJ else nullcontext()
# test deprecated 'epsg' parameter
if isinstance(crs, int):
@@ -675,10 +641,7 @@ def do_checks(g):
(None, None),
(26916, "EPSG:26916"),
("epsg:5070", "EPSG:5070"),
- (
- "+proj=tmerc +lat_0=0 +lon_0=-90 +k=0.9996 +x_0=520000 +y_0=-4480000 +datum=NAD83 +units=m +no_defs ",
- "EPSG:3070",
- ),
+ (epsg_3070_proj4_str, "EPSG:3070"),
("ESRI:102733", "ESRI:102733"),
pytest.param(4269, None, marks=pytest.mark.xfail),
),
@@ -722,9 +685,7 @@ def do_checks(g, *, exp_srs=expected_srs, exp_epsg=expected_epsg):
do_checks(sg, exp_srs="EPSG:26915", exp_epsg=26915)
# only check deprecations if pyproj is available
- pyproj_avail_context = (
- pytest.deprecated_call() if HAS_PYPROJ else nullcontext()
- )
+ pyproj_avail_context = pytest.deprecated_call() if HAS_PYPROJ else nullcontext()
# test deprecated 'epsg' parameter
if isinstance(crs, int):
@@ -923,9 +884,7 @@ def test_tocvfd3():
bt = -100.0 * np.ones((nlay, nrow, ncol))
idomain = np.ones((nlay, nrow, ncol))
idomain[:, 2:5, 2:5] = 0
- sg1 = StructuredGrid(
- delr=delr, delc=delc, top=tp, botm=bt, idomain=idomain
- )
+ sg1 = StructuredGrid(delr=delr, delc=delc, top=tp, botm=bt, idomain=idomain)
# inner grid
nlay = 1
nrow = ncol = 9
@@ -979,9 +938,7 @@ def test_area_centroid_polygon():
xc, yc = centroid_of_polygon(pts)
result = np.array([xc, yc])
answer = np.array((685055.1035824707, 6295543.12059913))
- assert np.allclose(
- result, answer
- ), "cvfdutil centroid of polygon incorrect"
+ assert np.allclose(result, answer), "cvfdutil centroid of polygon incorrect"
x, y = list(zip(*pts))
result = area_of_polygon(x, y)
answer = 11.228131838368032
@@ -1035,9 +992,7 @@ def test_unstructured_minimal_grid_ctor(minimal_unstructured_grid_info):
[(2.0, 1), (2.0, 0.0)],
[(2.0, 0), (1.0, 0.0)],
]
- assert (
- g.grid_lines == grid_lines
- ), f"\n{g.grid_lines} \n /= \n{grid_lines}"
+ assert g.grid_lines == grid_lines, f"\n{g.grid_lines} \n /= \n{grid_lines}"
assert g.extent == (0, 2, 0, 1)
xv, yv, zv = g.xyzvertices
assert xv == [[0, 1, 1, 0], [1, 2, 2, 1]]
@@ -1082,9 +1037,7 @@ def test_unstructured_complete_grid_ctor(minimal_unstructured_grid_info):
],
}
assert isinstance(g.grid_lines, dict)
- assert (
- g.grid_lines == grid_lines
- ), f"\n{g.grid_lines} \n /= \n{grid_lines}"
+ assert g.grid_lines == grid_lines, f"\n{g.grid_lines} \n /= \n{grid_lines}"
assert g.extent == (0, 2, 0, 1)
xv, yv, zv = g.xyzvertices
assert xv == [[0, 1, 1, 0], [1, 2, 2, 1]]
@@ -1172,11 +1125,7 @@ def test_voronoi_vertex_grid(function_tmpdir):
),
)
def test_voronoi_grid(request, function_tmpdir, grid_info):
- name = (
- request.node.name.replace("/", "_")
- .replace("\\", "_")
- .replace(":", "_")
- )
+ name = request.node.name.replace("/", "_").replace("\\", "_").replace(":", "_")
ncpl, vor, gridprops, grid = grid_info
# TODO: debug off-by-3 issue
@@ -1196,11 +1145,7 @@ def test_voronoi_grid(request, function_tmpdir, grid_info):
ax = fig.add_subplot()
ax.set_aspect("equal")
grid.plot(ax=ax)
- ax.plot(
- grid.xcellcenters[invalid_cells],
- grid.ycellcenters[invalid_cells],
- "ro",
- )
+ ax.plot(grid.xcellcenters[invalid_cells], grid.ycellcenters[invalid_cells], "ro")
plt.savefig(function_tmpdir / f"{name}.png")
assert ncpl == gridprops["ncpl"] or almost_right
@@ -1228,9 +1173,7 @@ def test_structured_thickness(structured_grid):
thickness = structured_grid.cell_thickness
assert np.allclose(thickness, 5.0), "thicknesses != 5."
- sat_thick = structured_grid.saturated_thickness(
- structured_grid.botm + 10.0
- )
+ sat_thick = structured_grid.saturated_thickness(structured_grid.botm + 10.0)
assert np.allclose(sat_thick, thickness), "saturated thicknesses != 5."
sat_thick = structured_grid.saturated_thickness(structured_grid.botm + 5.0)
@@ -1242,9 +1185,7 @@ def test_structured_thickness(structured_grid):
sat_thick = structured_grid.saturated_thickness(structured_grid.botm)
assert np.allclose(sat_thick, 0.0), "saturated thicknesses != 0."
- sat_thick = structured_grid.saturated_thickness(
- structured_grid.botm - 100.0
- )
+ sat_thick = structured_grid.saturated_thickness(structured_grid.botm - 100.0)
assert np.allclose(sat_thick, 0.0), "saturated thicknesses != 0."
@@ -1272,27 +1213,19 @@ def test_unstructured_thickness(unstructured_grid):
thickness = unstructured_grid.cell_thickness
assert np.allclose(thickness, 5.0), "thicknesses != 5."
- sat_thick = unstructured_grid.saturated_thickness(
- unstructured_grid.botm + 10.0
- )
+ sat_thick = unstructured_grid.saturated_thickness(unstructured_grid.botm + 10.0)
assert np.allclose(sat_thick, thickness), "saturated thicknesses != 5."
- sat_thick = unstructured_grid.saturated_thickness(
- unstructured_grid.botm + 5.0
- )
+ sat_thick = unstructured_grid.saturated_thickness(unstructured_grid.botm + 5.0)
assert np.allclose(sat_thick, thickness), "saturated thicknesses != 5."
- sat_thick = unstructured_grid.saturated_thickness(
- unstructured_grid.botm + 2.5
- )
+ sat_thick = unstructured_grid.saturated_thickness(unstructured_grid.botm + 2.5)
assert np.allclose(sat_thick, 2.5), "saturated thicknesses != 2.5"
sat_thick = unstructured_grid.saturated_thickness(unstructured_grid.botm)
assert np.allclose(sat_thick, 0.0), "saturated thicknesses != 0."
- sat_thick = unstructured_grid.saturated_thickness(
- unstructured_grid.botm - 100.0
- )
+ sat_thick = unstructured_grid.saturated_thickness(unstructured_grid.botm - 100.0)
assert np.allclose(sat_thick, 0.0), "saturated thicknesses != 0."
@@ -1316,9 +1249,7 @@ def test_unstructured_neighbors(unstructured_grid):
rook_neighbors = unstructured_grid.neighbors(5)
assert np.allclose(rook_neighbors, [0, 10, 1, 6, 11, 2, 7, 12])
- queen_neighbors = unstructured_grid.neighbors(
- 5, method="queen", reset=True
- )
+ queen_neighbors = unstructured_grid.neighbors(5, method="queen", reset=True)
assert np.allclose(queen_neighbors, [0, 10, 1, 6, 11, 2, 3, 7, 8, 12, 13])
@@ -1331,9 +1262,7 @@ def test_structured_ncb_thickness():
), "grid cell_thickness attribute returns incorrect shape"
thickness = grid.remove_confining_beds(grid.cell_thickness)
- assert (
- thickness.shape == grid.shape
- ), "quasi3d confining beds not properly removed"
+ assert thickness.shape == grid.shape, "quasi3d confining beds not properly removed"
sat_thick = grid.saturated_thickness(grid.cell_thickness)
assert (
@@ -1447,9 +1376,7 @@ def test_geo_dataframe(structured_grid, vertex_grid, unstructured_grid):
cv = grid.get_cell_vertices(node)
for coord in coords:
if coord not in cv:
- raise AssertionError(
- f"Cell vertices incorrect for node={node}"
- )
+ raise AssertionError(f"Cell vertices incorrect for node={node}")
def test_unstructured_iverts_cleanup():
@@ -1465,12 +1392,7 @@ def test_unstructured_iverts_cleanup():
iac, ja = [], []
for cell, neigh in neighbors.items():
iac.append(len(neigh) + 1)
- ja.extend(
- [
- cell,
- ]
- + neigh
- )
+ ja.extend([cell] + neigh)
# build iverts and verts without using shared vertices
verts, iverts = [], []
@@ -1508,6 +1430,4 @@ def test_unstructured_iverts_cleanup():
clean_ugrid = ugrid.clean_iverts()
if clean_ugrid.nvert != cleaned_vert_num:
- raise AssertionError(
- "Improper number of vertices for cleaned 'shared' iverts"
- )
+ raise AssertionError("Improper number of vertices for cleaned 'shared' iverts")
diff --git a/autotest/test_grid_cases.py b/autotest/test_grid_cases.py
index 5f3e748e21..7e62981534 100644
--- a/autotest/test_grid_cases.py
+++ b/autotest/test_grid_cases.py
@@ -38,18 +38,8 @@ def structured_cbd_small():
laycbd = np.array([1, 2, 0], dtype=int)
ncb = np.count_nonzero(laycbd)
dx = dy = 150
- delc = np.array(
- [
- dy,
- ]
- * nrow
- )
- delr = np.array(
- [
- dx,
- ]
- * ncol
- )
+ delc = np.array([dy] * nrow)
+ delr = np.array([dx] * ncol)
top = np.ones((15, 15))
botm = np.ones((nlay + ncb, nrow, ncol))
elevations = np.array([-10, -20, -40, -50, -70])[:, np.newaxis]
@@ -139,20 +129,8 @@ def unstructured_small():
[4, 5, 8, 7],
[6, 7, 10, 9],
]
- xcenters = [
- 0.5,
- 1.5,
- 0.5,
- 1.5,
- 0.5,
- ]
- ycenters = [
- 2.5,
- 2.5,
- 1.5,
- 1.5,
- 0.5,
- ]
+ xcenters = [0.5, 1.5, 0.5, 1.5, 0.5]
+ ycenters = [2.5, 2.5, 1.5, 1.5, 0.5]
idomain = np.ones((nlay, 5), dtype=int)
top = np.ones((nlay, 5), dtype=float)
top[0, :] = 10.0
@@ -264,9 +242,7 @@ def voronoi_rectangle():
xmax = 2.0
ymin = 0.0
ymax = 1.0
- poly = np.array(
- ((xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax))
- )
+ poly = np.array(((xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)))
max_area = 0.001
angle = 30
@@ -289,7 +265,7 @@ def voronoi_circle():
radius = 100.0
x = radius * np.cos(theta)
y = radius * np.sin(theta)
- poly = [(x, y) for x, y in zip(x, y)]
+ poly = list(zip(x, y))
max_area = 50
angle = 30
@@ -313,13 +289,13 @@ def voronoi_nested_circles():
radius = 100.0
x = radius * np.cos(theta)
y = radius * np.sin(theta)
- circle_poly = [(x, y) for x, y in zip(x, y)]
+ circle_poly = list(zip(x, y))
theta = np.arange(0.0, 2 * np.pi, 0.2)
radius = 30.0
x = radius * np.cos(theta) + 25.0
y = radius * np.sin(theta) + 25.0
- inner_circle_poly = [(x, y) for x, y in zip(x, y)]
+ inner_circle_poly = list(zip(x, y))
polys = [circle_poly, inner_circle_poly]
max_area = 100
@@ -351,9 +327,7 @@ def voronoi_polygons():
tri.add_polygon(active_domain)
tri.add_polygon(area1)
tri.add_polygon(area2)
- tri.add_region(
- (1, 1), 0, maximum_area=100
- ) # point inside active domain
+ tri.add_region((1, 1), 0, maximum_area=100) # point inside active domain
tri.add_region((11, 11), 1, maximum_area=10) # point inside area1
tri.add_region((61, 61), 2, maximum_area=3) # point inside area2
tri.build(verbose=False)
@@ -381,7 +355,7 @@ def voronoi_many_polygons():
radius = 10.0
x = radius * np.cos(theta) + 50.0
y = radius * np.sin(theta) + 70.0
- circle_poly0 = [(x, y) for x, y in zip(x, y)]
+ circle_poly0 = list(zip(x, y))
tri.add_polygon(circle_poly0)
tri.add_hole((50, 70))
@@ -390,7 +364,7 @@ def voronoi_many_polygons():
radius = 10.0
x = radius * np.cos(theta) + 70.0
y = radius * np.sin(theta) + 20.0
- circle_poly1 = [(x, y) for x, y in zip(x, y)]
+ circle_poly1 = list(zip(x, y))
tri.add_polygon(circle_poly1)
# add line through domain to force conforming cells
@@ -400,9 +374,7 @@ def voronoi_many_polygons():
# then regions and other polygons should follow
tri.add_polygon(area1)
tri.add_polygon(area2)
- tri.add_region(
- (1, 1), 0, maximum_area=100
- ) # point inside active domain
+ tri.add_region((1, 1), 0, maximum_area=100) # point inside active domain
tri.add_region((11, 11), 1, maximum_area=10) # point inside area1
tri.add_region((70, 70), 2, maximum_area=1) # point inside area2
diff --git a/autotest/test_gridgen.py b/autotest/test_gridgen.py
index 66a8a3a98c..eb3fc4bdd4 100644
--- a/autotest/test_gridgen.py
+++ b/autotest/test_gridgen.py
@@ -74,16 +74,10 @@ def test_add_active_domain(function_tmpdir, grid_type):
"ad0",
]:
print(
- "Testing add_active_domain() for",
- grid_type,
- "grid with features",
- feature,
+ "Testing add_active_domain() for", grid_type, "grid with features", feature
)
gridgen = Gridgen(bgrid, model_ws=function_tmpdir)
- gridgen.add_active_domain(
- feature,
- range(bgrid.nlay),
- )
+ gridgen.add_active_domain(feature, range(bgrid.nlay))
gridgen.build()
grid = (
VertexGrid(**gridgen.get_gridprops_vertexgrid())
@@ -122,12 +116,7 @@ def test_add_refinement_feature(function_tmpdir, grid_type):
features,
)
gridgen = Gridgen(bgrid, model_ws=function_tmpdir)
- gridgen.add_refinement_features(
- features,
- "polygon",
- 1,
- range(bgrid.nlay),
- )
+ gridgen.add_refinement_features(features, "polygon", 1, range(bgrid.nlay))
gridgen.build()
grid = (
VertexGrid(**gridgen.get_gridprops_vertexgrid())
@@ -160,9 +149,7 @@ def test_mf6disv(function_tmpdir):
botm = [top - k * dz for k in range(1, nlay + 1)]
# Create a dummy model and regular grid to use as the base grid for gridgen
- sim = flopy.mf6.MFSimulation(
- sim_name=name, sim_ws=function_tmpdir, exe_name="mf6"
- )
+ sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6")
gwf = flopy.mf6.ModflowGwf(sim, modelname=name)
dis = flopy.mf6.ModflowGwfdis(
@@ -193,17 +180,13 @@ def test_mf6disv(function_tmpdir):
# build run and post-process the MODFLOW 6 model
name = "mymodel"
- sim = flopy.mf6.MFSimulation(
- sim_name=name, sim_ws=function_tmpdir, exe_name="mf6"
- )
+ sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6")
tdis = flopy.mf6.ModflowTdis(sim)
ims = flopy.mf6.ModflowIms(sim, linear_acceleration="bicgstab")
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
disv = flopy.mf6.ModflowGwfdisv(gwf, **disv_gridprops)
ic = flopy.mf6.ModflowGwfic(gwf)
- npf = flopy.mf6.ModflowGwfnpf(
- gwf, xt3doptions=True, save_specific_discharge=True
- )
+ npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=True, save_specific_discharge=True)
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdspd)
budget_file = f"{name}.bud"
head_file = f"{name}.hds"
@@ -297,9 +280,7 @@ def sim_disu_diff_layers(function_tmpdir):
botm = [top - k * dz for k in range(1, nlay + 1)]
# Create a dummy model and regular grid to use as the base grid for gridgen
- sim = flopy.mf6.MFSimulation(
- sim_name=name, sim_ws=function_tmpdir, exe_name="mf6"
- )
+ sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6")
gwf = flopy.mf6.ModflowGwf(sim, modelname=name)
dis = flopy.mf6.ModflowGwfdis(
@@ -328,17 +309,13 @@ def sim_disu_diff_layers(function_tmpdir):
# build run and post-process the MODFLOW 6 model
name = "mymodel"
- sim = flopy.mf6.MFSimulation(
- sim_name=name, sim_ws=function_tmpdir, exe_name="mf6"
- )
+ sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6")
tdis = flopy.mf6.ModflowTdis(sim)
ims = flopy.mf6.ModflowIms(sim, linear_acceleration="bicgstab")
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
disu = flopy.mf6.ModflowGwfdisu(gwf, **disu_gridprops)
ic = flopy.mf6.ModflowGwfic(gwf)
- npf = flopy.mf6.ModflowGwfnpf(
- gwf, xt3doptions=True, save_specific_discharge=True
- )
+ npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=True, save_specific_discharge=True)
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdspd)
budget_file = f"{name}.bud"
head_file = f"{name}.hds"
@@ -374,6 +351,8 @@ def test_mf6disu(sim_disu_diff_layers):
gwf.modelgrid.write_shapefile(fname)
fname = ws / "model.shp"
gwf.export(fname)
+ fname = ws / "chd.shp"
+ gwf.chd.export(fname)
sim.run_simulation(silent=True)
head = gwf.output.head().get_data()
@@ -390,11 +369,7 @@ def test_mf6disu(sim_disu_diff_layers):
pmv.plot_array(head.flatten(), cmap="jet", vmin=vmin, vmax=vmax)
pmv.plot_grid(colors="k", alpha=0.1)
pmv.contour_array(
- head,
- levels=[0.2, 0.4, 0.6, 0.8],
- linewidths=3.0,
- vmin=vmin,
- vmax=vmax,
+ head, levels=[0.2, 0.4, 0.6, 0.8], linewidths=3.0, vmin=vmin, vmax=vmax
)
ax.set_title(f"Layer {ilay + 1}")
pmv.plot_vector(spdis["qx"], spdis["qy"], color="white")
@@ -421,9 +396,7 @@ def test_mf6disu(sim_disu_diff_layers):
raise AssertionError("Boundary condition was not drawn")
for col in ax.collections:
- if not isinstance(
- col, (QuadMesh, PathCollection, LineCollection)
- ):
+ if not isinstance(col, (QuadMesh, PathCollection, LineCollection)):
raise AssertionError("Unexpected collection type")
plt.close()
@@ -544,9 +517,7 @@ def test_mfusg(function_tmpdir):
ax.set_aspect("equal")
pmv.plot_array(head[ilay], cmap="jet", vmin=vmin, vmax=vmax)
pmv.plot_grid(colors="k", alpha=0.1)
- pmv.contour_array(
- head[ilay], levels=[0.2, 0.4, 0.6, 0.8], linewidths=3.0
- )
+ pmv.contour_array(head[ilay], levels=[0.2, 0.4, 0.6, 0.8], linewidths=3.0)
ax.set_title(f"Layer {ilay + 1}")
# pmv.plot_specific_discharge(spdis, color='white')
fname = "results.png"
@@ -572,9 +543,7 @@ def test_mfusg(function_tmpdir):
raise AssertionError("Boundary condition was not drawn")
for col in ax.collections:
- if not isinstance(
- col, (QuadMesh, PathCollection, LineCollection)
- ):
+ if not isinstance(col, (QuadMesh, PathCollection, LineCollection)):
raise AssertionError("Unexpected collection type")
plt.close()
@@ -585,13 +554,10 @@ def test_mfusg(function_tmpdir):
m.run_model()
# also test load of unstructured LPF with keywords
- lpf2 = flopy.mfusg.MfUsgLpf.load(
- function_tmpdir / f"{name}.lpf", m, check=False
- )
+ lpf2 = flopy.mfusg.MfUsgLpf.load(function_tmpdir / f"{name}.lpf", m, check=False)
msg = "NOCVCORRECTION and NOVFC should be in lpf options but at least one is not."
assert (
- "NOVFC" in lpf2.options.upper()
- and "NOCVCORRECTION" in lpf2.options.upper()
+ "NOVFC" in lpf2.options.upper() and "NOCVCORRECTION" in lpf2.options.upper()
), msg
# test disu, bas6, lpf shapefile export for mfusg unstructured models
@@ -678,17 +644,7 @@ def test_gridgen(function_tmpdir):
xmax = 12 * delr
ymin = 8 * delc
ymax = 13 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 1, range(nlay))
g6.add_refinement_features(rfpoly, "polygon", 1, range(nlay))
gu.add_refinement_features(rfpoly, "polygon", 1, range(nlay))
@@ -698,17 +654,7 @@ def test_gridgen(function_tmpdir):
xmax = 11 * delr
ymin = 9 * delc
ymax = 12 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 2, range(nlay))
g6.add_refinement_features(rfpoly, "polygon", 2, range(nlay))
gu.add_refinement_features(rfpoly, "polygon", 2, range(nlay))
@@ -718,37 +664,17 @@ def test_gridgen(function_tmpdir):
xmax = 10 * delr
ymin = 10 * delc
ymax = 11 * delc
- rfpoly = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
g.add_refinement_features(rfpoly, "polygon", 3, range(nlay))
g6.add_refinement_features(rfpoly, "polygon", 3, range(nlay))
gu.add_refinement_features(rfpoly, "polygon", 3, range(nlay))
- # inactivate parts of mfusg layer 2 to test vertical-pass-through option
+ # deactivate parts of mfusg layer 2 to test vertical-pass-through option
xmin = 0 * delr
xmax = 18 * delr
ymin = 0 * delc
ymax = 18 * delc
- adpoly2 = [
- [
- [
- (xmin, ymin),
- (xmax, ymin),
- (xmax, ymax),
- (xmin, ymax),
- (xmin, ymin),
- ]
- ]
- ]
+ adpoly2 = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]]]
gu.add_active_domain(adpoly2, layers=[1])
adpoly1_3 = [[[(0.0, 0.0), (Lx, 0.0), (Lx, Ly), (0.0, Ly), (0.0, 0.0)]]]
gu.add_active_domain(adpoly1_3, layers=[0, 2])
@@ -766,15 +692,13 @@ def test_gridgen(function_tmpdir):
points = [(4750.0, 5250.0)]
cells = g.intersect(points, "point", 0)
n = cells["nodenumber"][0]
- msg = (
- f"gridgen point intersect did not identify the correct cell {n} <> 308"
- )
+ msg = f"gridgen point intersect did not identify the correct cell {n} <> 308"
assert n == 308, msg
# test the gridgen line intersection
line = [[(Lx, Ly), (Lx, 0.0)]]
cells = g.intersect(line, "line", 0)
- nlist = [n for n in cells["nodenumber"]]
+ nlist = list(cells["nodenumber"])
nlist2 = [
19,
650,
@@ -798,9 +722,8 @@ def test_gridgen(function_tmpdir):
455,
384,
]
- msg = (
- "gridgen line intersect did not identify the correct "
- "cells {} <> {}".format(nlist, nlist2)
+ msg = "gridgen line intersect did not identify the correct cells {} <> {}".format(
+ nlist, nlist2
)
assert nlist == nlist2, msg
@@ -824,10 +747,7 @@ def test_gridgen(function_tmpdir):
"be (with vertical pass through activated)."
)
assert (
- len(
- ja0[(ja0 > disu_vp.nodelay[0]) & (ja0 <= sum(disu_vp.nodelay[:2]))]
- )
- == 0
+ len(ja0[(ja0 > disu_vp.nodelay[0]) & (ja0 <= sum(disu_vp.nodelay[:2]))]) == 0
), msg
# test mfusg without vertical pass-through
@@ -862,9 +782,7 @@ def test_flopy_issue_1492(function_tmpdir):
botm = [top - k * dz for k in range(1, nlay + 1)]
# Create a dummy model and regular grid to use as the base grid for gridgen
- sim = flopy.mf6.MFSimulation(
- sim_name=name, sim_ws=function_tmpdir, exe_name="mf6"
- )
+ sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6")
gwf = flopy.mf6.ModflowGwf(sim, modelname=name)
dis = flopy.mf6.ModflowGwfdis(
gwf,
@@ -906,9 +824,7 @@ def test_flopy_issue_1492(function_tmpdir):
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
disv = flopy.mf6.ModflowGwfdisv(gwf, **disv_gridprops)
ic = flopy.mf6.ModflowGwfic(gwf)
- npf = flopy.mf6.ModflowGwfnpf(
- gwf, xt3doptions=True, save_specific_discharge=True
- )
+ npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=True, save_specific_discharge=True)
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdspd)
budget_file = name + ".bud"
head_file = name + ".hds"
diff --git a/autotest/test_gridintersect.py b/autotest/test_gridintersect.py
index 268de09f90..15ec8b494b 100644
--- a/autotest/test_gridintersect.py
+++ b/autotest/test_gridintersect.py
@@ -1,19 +1,15 @@
-import os
-
import matplotlib.pyplot as plt
import numpy as np
import pytest
from modflow_devtools.markers import requires_pkg
from modflow_devtools.misc import has_pkg
-import flopy
import flopy.discretization as fgrid
-import flopy.plot as fplot
-from flopy.modflow import Modflow
-from flopy.utils import Raster
from flopy.utils.gridintersect import GridIntersect
from flopy.utils.triangle import Triangle
+# TODO: remove all structured tests in v3.10.0, see TODO's in the tests
+
if has_pkg("shapely", strict=True):
from shapely.geometry import (
LineString,
@@ -126,35 +122,41 @@ def get_rect_vertex_grid(angrot=0.0, xyoffset=0.0):
@requires_pkg("shapely")
def test_rect_grid_3d_point_outside():
- botm = np.concatenate([np.ones(4), np.zeros(4)]).reshape(2, 2, 2)
- gr = get_rect_grid(top=np.ones(4), botm=botm)
- ix = GridIntersect(gr, method="structured")
+ botm = np.concatenate([np.ones(4), np.zeros(4)]).reshape((2, 2, 2))
+ gr = get_rect_grid(top=np.ones(4).reshape((2, 2)), botm=botm)
+ ix = GridIntersect(gr, method="vertex")
result = ix.intersect(Point(25.0, 25.0, 0.5))
assert len(result) == 0
-@requires_pkg("shapely")
-def test_rect_grid_3d_point_inside():
- botm = np.concatenate([np.ones(4), 0.5 * np.ones(4), np.zeros(4)]).reshape(
- 3, 2, 2
- )
- gr = get_rect_grid(top=np.ones(4), botm=botm)
- ix = GridIntersect(gr, method="structured")
- result = ix.intersect(Point(2.0, 2.0, 0.2))
- assert result.cellids[0] == (1, 1, 0)
+# TODO: fix 3D point tests to work when above or below grid
+# @requires_pkg("shapely")
+# def test_rect_grid_3d_point_inside():
+# botm = np.concatenate(
+# [
+# np.ones(4),
+# 0.5 * np.ones(4),
+# np.zeros(4),
+# ]
+# ).reshape((3, 2, 2))
+# gr = get_rect_grid(top=np.ones(4).reshape((2, 2)), botm=botm)
+# ix = GridIntersect(gr, method="vertex")
+# result = ix.intersect(Point(2.0, 2.0, 0.2))
+# assert result.cellids[0] == (1, 0)
-@requires_pkg("shapely")
-def test_rect_grid_3d_point_above():
- botm = np.concatenate([np.ones(4), np.zeros(4)]).reshape(2, 2, 2)
- gr = get_rect_grid(top=np.ones(4), botm=botm)
- ix = GridIntersect(gr, method="structured")
- result = ix.intersect(Point(2.0, 2.0, 2))
- assert len(result) == 0
+# @requires_pkg("shapely")
+# def test_rect_grid_3d_point_above():
+# botm = np.concatenate([np.ones(4), np.zeros(4)]).reshape((2, 2, 2))
+# gr = get_rect_grid(top=np.ones(4).reshape((2, 2)), botm=botm)
+# ix = GridIntersect(gr, method="vertex")
+# result = ix.intersect(Point(2.0, 2.0, 2.0))
+# assert len(result) == 0
@requires_pkg("shapely")
def test_rect_grid_point_outside():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
# use GeoSpatialUtil to convert to shapely geometry
@@ -164,6 +166,7 @@ def test_rect_grid_point_outside():
@requires_pkg("shapely")
def test_rect_grid_point_on_outer_boundary():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(Point(20.0, 10.0))
@@ -173,6 +176,7 @@ def test_rect_grid_point_on_outer_boundary():
@requires_pkg("shapely")
def test_rect_grid_point_on_inner_boundary():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(Point(10.0, 10.0))
@@ -182,6 +186,7 @@ def test_rect_grid_point_on_inner_boundary():
@requires_pkg("shapely")
def test_rect_grid_multipoint_in_one_cell():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(MultiPoint([Point(1.0, 1.0), Point(2.0, 2.0)]))
@@ -191,6 +196,7 @@ def test_rect_grid_multipoint_in_one_cell():
@requires_pkg("shapely")
def test_rect_grid_multipoint_in_multiple_cells():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(MultiPoint([Point(1.0, 1.0), Point(12.0, 12.0)]))
@@ -207,7 +213,8 @@ def test_rect_grid_multipoint_in_multiple_cells():
def test_rect_grid_point_outside_shapely(rtree):
gr = get_rect_grid()
ix = GridIntersect(gr, method="vertex", rtree=rtree)
- result = ix.intersect(Point(25.0, 25.0))
+ # use GeoSpatialUtil to convert to shapely geometry
+ result = ix.intersect((25.0, 25.0), shapetype="point")
assert len(result) == 0
@@ -334,6 +341,7 @@ def test_tri_grid_multipoint_in_multiple_cells(rtree):
@requires_pkg("shapely")
@rtree_toggle
def test_rect_grid_point_on_all_vertices_return_all_ix(rtree):
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured", rtree=rtree)
n_intersections = [1, 2, 1, 2, 4, 2, 1, 2, 1]
@@ -369,6 +377,7 @@ def test_tri_grid_points_on_all_vertices_return_all_ix_shapely(rtree):
@requires_pkg("shapely")
def test_rect_grid_linestring_outside():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(LineString([(25.0, 25.0), (21.0, 5.0)]))
@@ -377,6 +386,7 @@ def test_rect_grid_linestring_outside():
@requires_pkg("shapely")
def test_rect_grid_linestring_in_2cells():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(LineString([(5.0, 5.0), (15.0, 5.0)]))
@@ -388,6 +398,7 @@ def test_rect_grid_linestring_in_2cells():
@requires_pkg("shapely")
def test_rect_grid_linestring_on_outer_boundary():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(LineString([(15.0, 20.0), (5.0, 20.0)]))
@@ -399,6 +410,7 @@ def test_rect_grid_linestring_on_outer_boundary():
@requires_pkg("shapely")
def test_rect_grid_linestring_on_inner_boundary():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(LineString([(5.0, 10.0), (15.0, 10.0)]))
@@ -410,14 +422,12 @@ def test_rect_grid_linestring_on_inner_boundary():
@requires_pkg("shapely")
def test_rect_grid_multilinestring_in_one_cell():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(
MultiLineString(
- [
- LineString([(1.0, 1), (9.0, 1.0)]),
- LineString([(1.0, 9.0), (9.0, 9.0)]),
- ]
+ [LineString([(1.0, 1), (9.0, 1.0)]), LineString([(1.0, 9.0), (9.0, 9.0)])]
)
)
assert len(result) == 1
@@ -427,6 +437,7 @@ def test_rect_grid_multilinestring_in_one_cell():
@requires_pkg("shapely")
def test_rect_grid_multilinestring_in_multiple_cells():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(
@@ -443,6 +454,7 @@ def test_rect_grid_multilinestring_in_multiple_cells():
@requires_pkg("shapely")
def test_rect_grid_linestring_in_and_out_of_cell():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(LineString([(5.0, 9), (15.0, 5.0), (5.0, 1.0)]))
@@ -454,16 +466,16 @@ def test_rect_grid_linestring_in_and_out_of_cell():
@requires_pkg("shapely")
def test_rect_grid_linestring_in_and_out_of_cell2():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
- result = ix.intersect(
- LineString([(5, 15), (5.0, 9), (15.0, 5.0), (5.0, 1.0)])
- )
+ result = ix.intersect(LineString([(5, 15), (5.0, 9), (15.0, 5.0), (5.0, 1.0)]))
assert len(result) == 3
@requires_pkg("shapely")
def test_rect_grid_linestrings_on_boundaries_return_all_ix():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
x, y = ix._rect_grid_to_geoms_cellids()[0][0].exterior.xy
@@ -476,6 +488,7 @@ def test_rect_grid_linestrings_on_boundaries_return_all_ix():
@requires_pkg("shapely")
def test_rect_grid_linestring_starting_on_vertex():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(LineString([(10.0, 10.0), (15.0, 5.0)]))
@@ -539,10 +552,7 @@ def test_rect_grid_multilinestring_in_one_cell_shapely(rtree):
ix = GridIntersect(gr, method="vertex", rtree=rtree)
result = ix.intersect(
MultiLineString(
- [
- LineString([(1.0, 1), (9.0, 1.0)]),
- LineString([(1.0, 9.0), (9.0, 9.0)]),
- ]
+ [LineString([(1.0, 1), (9.0, 1.0)]), LineString([(1.0, 9.0), (9.0, 9.0)])]
)
)
assert len(result) == 1
@@ -579,6 +589,24 @@ def test_rect_grid_linestring_in_and_out_of_cell_shapely(rtree):
assert np.allclose(result.lengths.sum(), 21.540659228538015)
+@requires_pkg("shapely")
+def test_rect_grid_linestring_in_and_out_of_cell2_shapely():
+ gr = get_rect_grid()
+ ix = GridIntersect(gr, method="vertex")
+ result = ix.intersect(LineString([(5, 15), (5.0, 9), (15.0, 5.0), (5.0, 1.0)]))
+ assert len(result) == 3
+
+
+@requires_pkg("shapely")
+def test_rect_grid_linestring_starting_on_vertex_shapely():
+ gr = get_rect_grid()
+ ix = GridIntersect(gr, method="vertex")
+ result = ix.intersect(LineString([(10.0, 10.0), (15.0, 5.0)]))
+ assert len(result) == 1
+ assert np.allclose(result.lengths.sum(), np.sqrt(50))
+ assert result.cellids[0] == (1, 1)
+
+
@requires_pkg("shapely")
@rtree_toggle
def test_rect_grid_linestrings_on_boundaries_return_all_ix_shapely(rtree):
@@ -674,10 +702,7 @@ def test_tri_grid_multilinestring_in_one_cell(rtree):
ix = GridIntersect(gr, rtree=rtree)
result = ix.intersect(
MultiLineString(
- [
- LineString([(1.0, 1), (9.0, 1.0)]),
- LineString([(2.0, 2.0), (9.0, 2.0)]),
- ]
+ [LineString([(1.0, 1), (9.0, 1.0)]), LineString([(2.0, 2.0), (9.0, 2.0)])]
)
)
assert len(result) == 1
@@ -737,11 +762,32 @@ def test_tri_grid_linestring_cell_boundary_return_all_ix_shapely(rtree):
assert len(r) == 3
+@requires_pkg("shapely")
+def test_rect_vertex_grid_linestring_geomcollection():
+ gr = get_rect_vertex_grid()
+ ix = GridIntersect(gr, method="vertex")
+ ls = LineString(
+ [
+ (20.0, 0.0),
+ (5.0, 5.0),
+ (15.0, 7.5),
+ (10.0, 10.0),
+ (5.0, 15.0),
+ (10.0, 19.0),
+ (10.0, 20.0),
+ ]
+ )
+ result = ix.intersect(ls)
+ assert len(result) == 3
+ assert np.allclose(result.lengths.sum(), ls.length)
+
+
# %% test polygon structured
@requires_pkg("shapely")
def test_rect_grid_polygon_outside():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(Polygon([(21.0, 11.0), (23.0, 17.0), (25.0, 11.0)]))
@@ -750,17 +796,17 @@ def test_rect_grid_polygon_outside():
@requires_pkg("shapely")
def test_rect_grid_polygon_in_2cells():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
- result = ix.intersect(
- Polygon([(2.5, 5.0), (7.5, 5.0), (7.5, 15.0), (2.5, 15.0)])
- )
+ result = ix.intersect(Polygon([(2.5, 5.0), (7.5, 5.0), (7.5, 15.0), (2.5, 15.0)]))
assert len(result) == 2
assert result.areas.sum() == 50.0
@requires_pkg("shapely")
def test_rect_grid_polygon_on_outer_boundary():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(
@@ -771,29 +817,22 @@ def test_rect_grid_polygon_on_outer_boundary():
@requires_pkg("shapely")
def test_rect_grid_polygon_running_along_boundary():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
result = ix.intersect(
Polygon(
- [
- (5.0, 5.0),
- (5.0, 10.0),
- (9.0, 10.0),
- (9.0, 15.0),
- (1.0, 15.0),
- (1.0, 5.0),
- ]
+ [(5.0, 5.0), (5.0, 10.0), (9.0, 10.0), (9.0, 15.0), (1.0, 15.0), (1.0, 5.0)]
)
)
@requires_pkg("shapely")
def test_rect_grid_polygon_on_inner_boundary():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
- result = ix.intersect(
- Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)])
- )
+ result = ix.intersect(Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)]))
assert len(result) == 2
assert result.areas.sum() == 50.0
@@ -805,6 +844,7 @@ def test_rect_grid_polygon_on_inner_boundary():
@requires_pkg("shapely")
def test_rect_grid_polygon_multiple_polygons():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
p = Polygon(
[
@@ -832,6 +872,7 @@ def test_rect_grid_polygon_multiple_polygons():
@requires_pkg("shapely")
def test_rect_grid_multiple_disjoint_polygons_on_inner_boundaries():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
p1 = Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)])
@@ -850,6 +891,7 @@ def test_rect_grid_multiple_disjoint_polygons_on_inner_boundaries():
@requires_pkg("shapely")
@pytest.mark.parametrize("transform", [True, False])
def test_rect_grid_polygon_reintersects_cell(transform):
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
if transform:
gr.set_coord_info(xoff=1, yoff=1, angrot=10.5)
@@ -884,6 +926,7 @@ def test_rect_grid_polygon_reintersects_cell(transform):
@requires_pkg("shapely")
def test_rect_grid_multipolygon_in_one_cell():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
p1 = Polygon([(1.0, 1.0), (8.0, 1.0), (8.0, 3.0), (1.0, 3.0)])
@@ -896,6 +939,7 @@ def test_rect_grid_multipolygon_in_one_cell():
@requires_pkg("shapely")
def test_rect_grid_multipolygon_in_multiple_cells():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
p1 = Polygon([(1.0, 1.0), (19.0, 1.0), (19.0, 3.0), (1.0, 3.0)])
@@ -908,6 +952,7 @@ def test_rect_grid_multipolygon_in_multiple_cells():
@requires_pkg("shapely")
def test_rect_grid_polygon_with_hole():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, method="structured")
p = Polygon(
@@ -922,6 +967,7 @@ def test_rect_grid_polygon_with_hole():
@requires_pkg("shapely")
@rtree_toggle
def test_rect_grid_polygon_contains_centroid(rtree):
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, rtree=rtree)
p = Polygon(
@@ -935,6 +981,7 @@ def test_rect_grid_polygon_contains_centroid(rtree):
@requires_pkg("shapely")
@rtree_toggle
def test_rect_grid_polygon_min_area(rtree):
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr, rtree=rtree)
p = Polygon(
@@ -947,6 +994,7 @@ def test_rect_grid_polygon_min_area(rtree):
@requires_pkg("shapely")
def test_rect_grid_polygon_centroid_and_min_area():
+ # TODO: remove in 3.10.0
gr = get_rect_grid()
ix = GridIntersect(gr)
p = Polygon(
@@ -974,9 +1022,7 @@ def test_rect_grid_polygon_outside_shapely(rtree):
def test_rect_grid_polygon_in_2cells_shapely(rtree):
gr = get_rect_grid()
ix = GridIntersect(gr, method="vertex", rtree=rtree)
- result = ix.intersect(
- Polygon([(2.5, 5.0), (7.5, 5.0), (7.5, 15.0), (2.5, 15.0)])
- )
+ result = ix.intersect(Polygon([(2.5, 5.0), (7.5, 5.0), (7.5, 15.0), (2.5, 15.0)]))
assert len(result) == 2
assert result.areas.sum() == 50.0
@@ -992,14 +1038,23 @@ def test_rect_grid_polygon_on_outer_boundary_shapely(rtree):
assert len(result) == 0
+@requires_pkg("shapely")
+def test_rect_grid_polygon_running_along_boundary_shapely():
+ gr = get_rect_grid()
+ ix = GridIntersect(gr, method="vertex")
+ result = ix.intersect(
+ Polygon(
+ [(5.0, 5.0), (5.0, 10.0), (9.0, 10.0), (9.0, 15.0), (1.0, 15.0), (1.0, 5.0)]
+ )
+ )
+
+
@requires_pkg("shapely")
@rtree_toggle
def test_rect_grid_polygon_on_inner_boundary_shapely(rtree):
gr = get_rect_grid()
ix = GridIntersect(gr, method="vertex", rtree=rtree)
- result = ix.intersect(
- Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)])
- )
+ result = ix.intersect(Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)]))
assert len(result) == 2
assert result.areas.sum() == 50.0
@@ -1050,14 +1105,7 @@ def test_rect_grid_polygon_in_edge_in_cell(rtree):
gr = get_rect_grid()
ix = GridIntersect(gr, method="vertex", rtree=rtree)
p = Polygon(
- [
- (0.0, 5.0),
- (3.0, 0.0),
- (7.0, 0.0),
- (10.0, 5.0),
- (10.0, -1.0),
- (0.0, -1.0),
- ]
+ [(0.0, 5.0), (3.0, 0.0), (7.0, 0.0), (10.0, 5.0), (10.0, -1.0), (0.0, -1.0)]
)
result = ix.intersect(p)
assert len(result) == 1
@@ -1082,9 +1130,7 @@ def test_tri_grid_polygon_in_2cells(rtree):
if gr == -1:
return
ix = GridIntersect(gr, rtree=rtree)
- result = ix.intersect(
- Polygon([(2.5, 5.0), (5.0, 5.0), (5.0, 15.0), (2.5, 15.0)])
- )
+ result = ix.intersect(Polygon([(2.5, 5.0), (5.0, 5.0), (5.0, 15.0), (2.5, 15.0)]))
assert len(result) == 2
assert result.areas.sum() == 25.0
@@ -1109,9 +1155,7 @@ def test_tri_grid_polygon_on_inner_boundary(rtree):
if gr == -1:
return
ix = GridIntersect(gr, rtree=rtree)
- result = ix.intersect(
- Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)])
- )
+ result = ix.intersect(Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)]))
assert len(result) == 4
assert result.areas.sum() == 50.0
@@ -1197,6 +1241,7 @@ def test_tri_grid_polygon_contains_centroid(rtree):
@requires_pkg("shapely")
def test_point_offset_rot_structured_grid():
+ # TODO: remove in 3.10.0
sgr = get_rect_grid(angrot=45.0, xyoffset=10.0)
p = Point(10.0, 10 + np.sqrt(200.0))
ix = GridIntersect(sgr, method="structured")
@@ -1210,6 +1255,7 @@ def test_point_offset_rot_structured_grid():
@requires_pkg("shapely")
def test_linestring_offset_rot_structured_grid():
+ # TODO: remove in 3.10.0
sgr = get_rect_grid(angrot=45.0, xyoffset=10.0)
ls = LineString([(5, 25), (15, 25)])
ix = GridIntersect(sgr, method="structured")
@@ -1223,6 +1269,7 @@ def test_linestring_offset_rot_structured_grid():
@requires_pkg("shapely")
def test_polygon_offset_rot_structured_grid():
+ # TODO: remove in 3.10.0
sgr = get_rect_grid(angrot=45.0, xyoffset=10.0)
p = Polygon(
[
@@ -1337,232 +1384,3 @@ def test_polygon_offset_rot_vertex_grid_shapely(rtree):
ix = GridIntersect(sgr, method="vertex", rtree=rtree, local=True)
result = ix.intersect(p)
assert len(result) == 0
-
-
-# %% test rasters
-
-
-@requires_pkg("rasterstats", "scipy", "shapely")
-def test_rasters(example_data_path):
- ws = example_data_path / "options"
- raster_name = "dem.img"
-
- rio = Raster.load(ws / "dem" / raster_name)
-
- ml = Modflow.load(
- "sagehen.nam", version="mfnwt", model_ws=os.path.join(ws, "sagehen")
- )
- xoff = 214110
- yoff = 4366620
- ml.modelgrid.set_coord_info(xoff, yoff)
-
- # test sampling points and polygons
- val = rio.sample_point(xoff + 2000, yoff + 2000, band=1)
- print(val - 2336.3965)
- if abs(val - 2336.3965) > 1e-4:
- raise AssertionError
-
- x0, x1, y0, y1 = rio.bounds
-
- x0 += 1000
- y0 += 1000
- x1 -= 1000
- y1 -= 1000
- shape = np.array([(x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)])
-
- data = rio.sample_polygon(shape, band=rio.bands[0])
- if data.size != 267050:
- raise AssertionError
- if abs(np.min(data) - 1942.1735) > 1e-4:
- raise AssertionError
- if (np.max(data) - 2608.557) > 1e-4:
- raise AssertionError
-
- rio.crop(shape)
- data = rio.get_array(band=rio.bands[0], masked=True)
- if data.size != 267050:
- raise AssertionError
- if abs(np.min(data) - 1942.1735) > 1e-4:
- raise AssertionError
- if (np.max(data) - 2608.557) > 1e-4:
- raise AssertionError
-
- data = rio.resample_to_grid(
- ml.modelgrid, band=rio.bands[0], method="nearest"
- )
- if data.size != 5913:
- raise AssertionError
- if abs(np.min(data) - 1942.1735) > 1e-4:
- raise AssertionError
- if abs(np.max(data) - 2605.6204) > 1e-4:
- raise AssertionError
-
- del rio
-
-
-# %% test raster sampling methods
-
-
-@pytest.mark.slow
-@requires_pkg("rasterstats")
-def test_raster_sampling_methods(example_data_path):
- ws = example_data_path / "options"
- raster_name = "dem.img"
-
- rio = Raster.load(ws / "dem" / raster_name)
-
- ml = Modflow.load("sagehen.nam", version="mfnwt", model_ws=ws / "sagehen")
- xoff = 214110
- yoff = 4366620
- ml.modelgrid.set_coord_info(xoff, yoff)
-
- x0, x1, y0, y1 = rio.bounds
-
- x0 += 3000
- y0 += 3000
- x1 -= 3000
- y1 -= 3000
- shape = np.array([(x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)])
-
- rio.crop(shape)
-
- methods = {
- "min": 2088.52343,
- "max": 2103.54882,
- "mean": 2097.05054,
- "median": 2097.36254,
- "mode": 2088.52343,
- "nearest": 2097.81079,
- "linear": 2097.81079,
- "cubic": 2097.81079,
- }
-
- for method, value in methods.items():
- data = rio.resample_to_grid(
- ml.modelgrid, band=rio.bands[0], method=method
- )
-
- print(data[30, 37])
- if np.abs(data[30, 37] - value) > 1e-05:
- raise AssertionError(
- f"{method} resampling returning incorrect values"
- )
-
-
-@requires_pkg("rasterio")
-def test_raster_reprojection(example_data_path):
- ws = example_data_path / "options" / "dem"
- raster_name = "dem.img"
-
- wgs_epsg = 4326
- wgs_xmin = -120.32116799649168
- wgs_ymax = 39.46620605907534
-
- raster = Raster.load(ws / raster_name)
-
- print(raster.crs.to_epsg())
- wgs_raster = raster.to_crs(crs=f"EPSG:{wgs_epsg}")
-
- if not wgs_raster.crs.to_epsg() == wgs_epsg:
- raise AssertionError(f"Raster not converted to EPSG {wgs_epsg}")
-
- transform = wgs_raster._meta["transform"]
- if not np.isclose(transform.c, wgs_xmin) and not np.isclose(
- transform.f, wgs_ymax
- ):
- raise AssertionError(f"Raster not reprojected to EPSG {wgs_epsg}")
-
- raster.to_crs(epsg=wgs_epsg, inplace=True)
- transform2 = raster._meta["transform"]
- for ix, val in enumerate(transform):
- if not np.isclose(val, transform2[ix]):
- raise AssertionError("In place reprojection not working")
-
-
-@requires_pkg("rasterio")
-def test_create_raster_from_array_modelgrid(example_data_path):
- ws = example_data_path / "options" / "dem"
- raster_name = "dem.img"
-
- raster = Raster.load(ws / raster_name)
-
- xsize = 200
- ysize = 100
- xmin, xmax, ymin, ymax = raster.bounds
-
- nbands = 5
- nlay = 1
- nrow = int(np.floor((ymax - ymin) / ysize))
- ncol = int(np.floor((xmax - xmin) / xsize))
-
- delc = np.full((nrow,), ysize)
- delr = np.full((ncol,), xsize)
-
- grid = flopy.discretization.StructuredGrid(
- delc=delc,
- delr=delr,
- top=np.ones((nrow, ncol)),
- botm=np.zeros((nlay, nrow, ncol)),
- idomain=np.ones((nlay, nrow, ncol), dtype=int),
- xoff=xmin,
- yoff=ymin,
- crs=raster.crs,
- )
-
- array = np.random.random((grid.ncpl * nbands,)) * 100
- robj = Raster.raster_from_array(array, grid)
-
- if nbands != len(robj.bands):
- raise AssertionError("Number of raster bands is incorrect")
-
- array = array.reshape((nbands, nrow, ncol))
- for band in robj.bands:
- ra = robj.get_array(band)
- np.testing.assert_allclose(
- array[band - 1],
- ra,
- err_msg="Array not properly reshaped or converted to raster",
- )
-
-
-@requires_pkg("rasterio", "affine")
-def test_create_raster_from_array_transform(example_data_path):
- import affine
-
- ws = example_data_path / "options" / "dem"
- raster_name = "dem.img"
-
- raster = Raster.load(ws / raster_name)
-
- transform = raster._meta["transform"]
- array = raster.get_array(band=raster.bands[0])
-
- array = np.expand_dims(array, axis=0)
- # same location but shrink raster by factor 2
- new_transform = affine.Affine(
- transform.a / 2, 0, transform.c, 0, transform.e / 2, transform.f
- )
-
- robj = Raster.raster_from_array(
- array, crs=raster.crs, transform=new_transform
- )
-
- rxmin, rxmax, rymin, rymax = robj.bounds
- xmin, xmax, ymin, ymax = raster.bounds
-
- if (
- not ((xmax - xmin) / (rxmax - rxmin)) == 2
- or not ((ymax - ymin) / (rymax - rymin)) == 2
- ):
- raise AssertionError("Transform based raster not working properly")
-
-
-if __name__ == "__main__":
- sgr = get_rect_grid(angrot=45.0, xyoffset=10.0)
- ls = LineString([(5, 10.0 + np.sqrt(200.0)), (15, 10.0 + np.sqrt(200.0))])
- ix = GridIntersect(sgr, method="structured")
- result = ix.intersect(ls)
- assert len(result) == 2
- ix = GridIntersect(sgr, method="structured", local=True)
- result = ix.intersect(ls)
- assert len(result) == 0
diff --git a/autotest/test_headufile.py b/autotest/test_headufile.py
index e00f5106c8..e8dd089421 100644
--- a/autotest/test_headufile.py
+++ b/autotest/test_headufile.py
@@ -5,13 +5,7 @@
from flopy.discretization import UnstructuredGrid
from flopy.mfusg import MfUsg, MfUsgDisU, MfUsgLpf, MfUsgSms
-from flopy.modflow import (
- Modflow,
- ModflowBas,
- ModflowChd,
- ModflowDis,
- ModflowOc,
-)
+from flopy.modflow import Modflow, ModflowBas, ModflowChd, ModflowDis, ModflowOc
from flopy.utils import HeadUFile
from flopy.utils.gridgen import Gridgen
from flopy.utils.gridutil import get_lni
@@ -96,9 +90,7 @@ def test_get_ts_single_node(mfusg_model):
# test if single node idx works
one_hds = head_file.get_ts(idx=300)
- assert (
- one_hds[0, 1] == head[0][300]
- ), "head from 'get_ts' != head from 'get_data'"
+ assert one_hds[0, 1] == head[0][300], "head from 'get_ts' != head from 'get_data'"
@requires_exe("mfusg", "gridgen")
@@ -145,9 +137,9 @@ def test_get_lni(mfusg_model):
head = head_file.get_data()
def get_expected():
- exp = dict()
+ exp = {}
for l, ncpl in enumerate(list(grid.ncpl)):
- exp[l] = dict()
+ exp[l] = {}
for nn in range(ncpl):
exp[l][nn] = head[l][nn]
return exp
diff --git a/autotest/test_hydmodfile.py b/autotest/test_hydmodfile.py
index 3bf29a2689..e3592ac8f2 100644
--- a/autotest/test_hydmodfile.py
+++ b/autotest/test_hydmodfile.py
@@ -55,13 +55,9 @@ def test_hydmodfile_create(function_tmpdir):
def test_hydmodfile_load(function_tmpdir, hydmod_model_path):
model = "test1tr.nam"
- m = Modflow.load(
- model, version="mf2005", model_ws=hydmod_model_path, verbose=True
- )
+ m = Modflow.load(model, version="mf2005", model_ws=hydmod_model_path, verbose=True)
hydref = m.hyd
- assert isinstance(
- hydref, ModflowHyd
- ), "Did not load hydmod package...test1tr.hyd"
+ assert isinstance(hydref, ModflowHyd), "Did not load hydmod package...test1tr.hyd"
m.change_model_ws(function_tmpdir)
m.hyd.write_file()
@@ -101,9 +97,7 @@ def test_hydmodfile_read(hydmod_model_path):
for label in labels:
data = h.get_data(obsname=label)
- assert data.shape == (
- len(times),
- ), f"data shape is not ({len(times)},)"
+ assert data.shape == (len(times),), f"data shape is not ({len(times)},)"
data = h.get_data()
assert data.shape == (len(times),), f"data shape is not ({len(times)},)"
@@ -137,9 +131,7 @@ def test_mf6obsfile_read(mf6_obs_model_path):
assert isinstance(h, Mf6Obs)
ntimes = h.get_ntimes()
- assert (
- ntimes == 3
- ), f"Not enough times in {txt} file...{os.path.basename(pth)}"
+ assert ntimes == 3, f"Not enough times in {txt} file...{os.path.basename(pth)}"
times = h.get_times()
assert len(times) == 3, "Not enough times in {} file...{}".format(
@@ -167,14 +159,10 @@ def test_mf6obsfile_read(mf6_obs_model_path):
for label in labels:
data = h.get_data(obsname=label)
- assert data.shape == (
- len(times),
- ), f"data shape is not ({len(times)},)"
+ assert data.shape == (len(times),), f"data shape is not ({len(times)},)"
data = h.get_data()
- assert data.shape == (
- len(times),
- ), f"data shape is not ({len(times)},)"
+ assert data.shape == (len(times),), f"data shape is not ({len(times)},)"
assert (
len(data.dtype.names) == nitems + 1
), f"data column length is not {len(nitems + 1)}"
diff --git a/autotest/test_lake_connections.py b/autotest/test_lake_connections.py
index 71bab3d002..f74de60af3 100644
--- a/autotest/test_lake_connections.py
+++ b/autotest/test_lake_connections.py
@@ -40,9 +40,7 @@ def export_ascii_grid(modelgrid, file_path, v, nodata=0.0):
np.savetxt(f, v, fmt="%.4f")
-def get_lake_connection_data(
- nrow, ncol, delr, delc, lakibd, idomain, lakebed_leakance
-):
+def get_lake_connection_data(nrow, ncol, delr, delc, lakibd, idomain, lakebed_leakance):
# derived from original modflow6-examples function in ex-gwt-prudic2004t2
lakeconnectiondata = []
nlakecon = [0, 0]
@@ -160,24 +158,12 @@ def test_base_run(function_tmpdir, example_data_path):
# export bottom, water levels, and k11 as ascii raster files
# for interpolation in test_lake()
bot = gwf.dis.botm.array.squeeze()
- export_ascii_grid(
- gwf.modelgrid,
- function_tmpdir / "bot.asc",
- bot,
- )
+ export_ascii_grid(gwf.modelgrid, function_tmpdir / "bot.asc", bot)
top = gwf.output.head().get_data().squeeze() + 2.0
top = np.where(gwf.dis.idomain.array.squeeze() < 1.0, 0.0, top)
- export_ascii_grid(
- gwf.modelgrid,
- function_tmpdir / "top.asc",
- top,
- )
+ export_ascii_grid(gwf.modelgrid, function_tmpdir / "top.asc", top)
k11 = gwf.npf.k.array.squeeze()
- export_ascii_grid(
- gwf.modelgrid,
- function_tmpdir / "k11.asc",
- k11,
- )
+ export_ascii_grid(gwf.modelgrid, function_tmpdir / "k11.asc", k11)
@requires_exe("mf6")
@@ -262,9 +248,10 @@ def test_lake(function_tmpdir, example_data_path):
pakdata_dict[0] == 54
), f"number of lake connections ({pakdata_dict[0]}) not equal to 54."
- assert len(connectiondata) == 54, (
- "number of lake connectiondata entries ({}) not equal "
- "to 54.".format(len(connectiondata))
+ assert (
+ len(connectiondata) == 54
+ ), "number of lake connectiondata entries ({}) not equal to 54.".format(
+ len(connectiondata)
)
lak_pak_data = []
@@ -323,13 +310,7 @@ def test_embedded_lak_ex01(function_tmpdir, example_data_path):
)
delc = delr
top = 500.0
- botm = (
- 107.0,
- 97.0,
- 87.0,
- 77.0,
- 67.0,
- )
+ botm = (107.0, 97.0, 87.0, 77.0, 67.0)
lake_map = np.ones(shape3d, dtype=np.int32) * -1
lake_map[0, 6:11, 6:11] = 0
lake_map[1, 7:10, 7:10] = 0
@@ -338,13 +319,7 @@ def test_embedded_lak_ex01(function_tmpdir, example_data_path):
strt = 115.0
k11 = 30
- k33 = (
- 1179.0,
- 30.0,
- 30.0,
- 30.0,
- 30.0,
- )
+ k33 = (1179.0, 30.0, 30.0, 30.0, 30.0)
mpath = example_data_path / "mf2005_test"
ml = Modflow.load(
@@ -462,9 +437,10 @@ def test_embedded_lak_ex01(function_tmpdir, example_data_path):
pakdata_dict[0] == 57
), f"number of lake connections ({pakdata_dict[0]}) not equal to 57."
- assert len(connectiondata) == 57, (
- "number of lake connectiondata entries ({}) not equal "
- "to 57.".format(len(connectiondata))
+ assert (
+ len(connectiondata) == 57
+ ), "number of lake connectiondata entries ({}) not equal to 57.".format(
+ len(connectiondata)
)
lak_pak_data = []
@@ -503,8 +479,8 @@ def test_embedded_lak_prudic(example_data_path):
nlay = 8 # Number of layers
nrow = 36 # Number of rows
ncol = 23 # Number of columns
- delr = float(405.665) # Column width ($ft$)
- delc = float(403.717) # Row width ($ft$)
+ delr = 405.665 # Column width ($ft$)
+ delc = 403.717 # Row width ($ft$)
delv = 15.0 # Layer thickness ($ft$)
top = 100.0 # Top of the model ($ft$)
@@ -517,10 +493,7 @@ def test_embedded_lak_prudic(example_data_path):
bot0 = np.loadtxt(fname)
botm = np.array(
[bot0]
- + [
- np.ones(shape2d, dtype=float) * (bot0 - (delv * k))
- for k in range(1, nlay)
- ]
+ + [np.ones(shape2d, dtype=float) * (bot0 - (delv * k)) for k in range(1, nlay)]
)
fname = data_ws / "prudic2004t2_idomain1.dat"
idomain0 = np.loadtxt(fname, dtype=np.int32)
@@ -559,34 +532,20 @@ def test_embedded_lak_prudic(example_data_path):
for idx, nconn in enumerate(lakconn):
assert pakdata_dict[idx] == nconn, (
"number of connections calculated by get_lak_connections ({}) "
- "not equal to {} for lake {}.".format(
- pakdata_dict[idx], nconn, idx + 1
- )
+ "not equal to {} for lake {}.".format(pakdata_dict[idx], nconn, idx + 1)
)
# compare connectiondata
for idx, (cd, cdbase) in enumerate(zip(connectiondata, cdata)):
- for jdx in (
- 0,
- 1,
- 2,
- 3,
- 7,
- 8,
- ):
+ for jdx in (0, 1, 2, 3, 7, 8):
match = True
- if jdx not in (
- 7,
- 8,
- ):
+ if jdx not in (7, 8):
if cd[jdx] != cdbase[jdx]:
match = False
else:
match = np.allclose(cd[jdx], cdbase[jdx])
if not match:
- print(
- f"connection data do match for connection {idx} for lake {cd[0]}"
- )
+ print(f"connection data do match for connection {idx} for lake {cd[0]}")
break
assert match, f"connection data do not match for connection {jdx}"
@@ -606,8 +565,8 @@ def test_embedded_lak_prudic_mixed(example_data_path):
nlay = 8 # Number of layers
nrow = 36 # Number of rows
ncol = 23 # Number of columns
- delr = float(405.665) # Column width ($ft$)
- delc = float(403.717) # Row width ($ft$)
+ delr = 405.665 # Column width ($ft$)
+ delc = 403.717 # Row width ($ft$)
delv = 15.0 # Layer thickness ($ft$)
top = 100.0 # Top of the model ($ft$)
@@ -620,10 +579,7 @@ def test_embedded_lak_prudic_mixed(example_data_path):
bot0 = np.loadtxt(fname)
botm = np.array(
[bot0]
- + [
- np.ones(shape2d, dtype=float) * (bot0 - (delv * k))
- for k in range(1, nlay)
- ]
+ + [np.ones(shape2d, dtype=float) * (bot0 - (delv * k)) for k in range(1, nlay)]
)
fname = data_ws / "prudic2004t2_idomain1.dat"
idomain0 = np.loadtxt(fname, dtype=np.int32)
@@ -664,8 +620,6 @@ def test_embedded_lak_prudic_mixed(example_data_path):
for data in connectiondata:
lakeno, bedleak = data[0], data[4]
if lakeno == 0:
- assert (
- bedleak == "none"
- ), f"bedleak for lake 0 is not 'none' ({bedleak})"
+ assert bedleak == "none", f"bedleak for lake 0 is not 'none' ({bedleak})"
else:
assert bedleak == 1.0, f"bedleak for lake 1 is not 1.0 ({bedleak})"
diff --git a/autotest/test_lgr.py b/autotest/test_lgr.py
index 2a01b8ae8c..fc20a1ae43 100644
--- a/autotest/test_lgr.py
+++ b/autotest/test_lgr.py
@@ -84,7 +84,9 @@ def singleModel(
# Variables for the BAS package
ibound = np.ones((nlay, nrow, ncol), dtype=np.int32)
if iChild > 0:
- iBndBnd = 59 # code for child cell to be linked to parent; value assigned to ibflg in the LGR-data
+ # code for child cell to be linked to parent;
+ # value assigned to ibflg in the LGR-data
+ iBndBnd = 59
else:
iBndBnd = -1
ibound[:, 0, :] = iBndBnd
diff --git a/autotest/test_lgrutil.py b/autotest/test_lgrutil.py
index 67d7a53259..e064fc4d87 100644
--- a/autotest/test_lgrutil.py
+++ b/autotest/test_lgrutil.py
@@ -74,16 +74,7 @@ def test_lgrutil():
errmsg = f"{ans1} /= {exchange_data[0]}"
assert exchange_data[0] == ans1, errmsg
- ans2 = [
- (2, 3, 3),
- (1, 8, 8),
- 0,
- 50.0,
- 50,
- 1111.1111111111113,
- 180.0,
- 100.0,
- ]
+ ans2 = [(2, 3, 3), (1, 8, 8), 0, 50.0, 50, 1111.1111111111113, 180.0, 100.0]
errmsg = f"{ans2} /= {exchange_data[-1]}"
assert exchange_data[-1] == ans2, errmsg
@@ -91,10 +82,7 @@ def test_lgrutil():
assert len(exchange_data) == 72 + 81, errmsg
# list of parent cells connected to a child cell
- assert lgr.get_parent_connections(0, 0, 0) == [
- ((0, 1, 0), -1),
- ((0, 0, 1), 2),
- ]
+ assert lgr.get_parent_connections(0, 0, 0) == [((0, 1, 0), -1), ((0, 0, 1), 2)]
assert lgr.get_parent_connections(1, 8, 8) == [
((1, 3, 4), 1),
((1, 4, 3), -2),
diff --git a/autotest/test_listbudget.py b/autotest/test_listbudget.py
index a9d7ce7929..aa6a586f69 100644
--- a/autotest/test_listbudget.py
+++ b/autotest/test_listbudget.py
@@ -7,12 +7,7 @@
from modflow_devtools.markers import requires_pkg
from modflow_devtools.misc import has_pkg
-from flopy.utils import (
- Mf6ListBudget,
- MfListBudget,
- MfusgListBudget,
- MtListBudget,
-)
+from flopy.utils import Mf6ListBudget, MfListBudget, MfusgListBudget, MtListBudget
def test_mflistfile(example_data_path):
@@ -68,9 +63,7 @@ def test_mflist_reducedpumping(example_data_path):
"""
test reading reduced pumping data from list file
"""
- pth = (
- example_data_path / "mfusg_test" / "03B_conduit_unconfined" / "output"
- )
+ pth = example_data_path / "mfusg_test" / "03B_conduit_unconfined" / "output"
list_file = pth / "ex3B.lst"
mflist = MfusgListBudget(list_file)
assert isinstance(mflist.get_reduced_pumping(), np.recarray)
@@ -82,14 +75,7 @@ def test_mf6listfile(example_data_path):
assert os.path.exists(list_file)
mflist = Mf6ListBudget(list_file)
names = mflist.get_record_names()
- for item in [
- "RCH_IN",
- "RCH2_IN",
- "RCH3_IN",
- "RCH_OUT",
- "RCH2_OUT",
- "RCH3_OUT",
- ]:
+ for item in ["RCH_IN", "RCH2_IN", "RCH3_IN", "RCH_OUT", "RCH2_OUT", "RCH3_OUT"]:
assert item in names, f"{item} not found in names"
assert len(names) == 26
inc = mflist.get_incremental()
@@ -99,9 +85,7 @@ def test_mflist_reducedpumping_fail(example_data_path):
"""
test failure for reading reduced pumping data from list file
"""
- pth = (
- example_data_path / "mfusg_test" / "03A_conduit_unconfined" / "output"
- )
+ pth = example_data_path / "mfusg_test" / "03A_conduit_unconfined" / "output"
list_file = pth / "ex3A.lst"
# Catch before flopy to avoid masking file not found assert
if not os.path.isfile(list_file):
diff --git a/autotest/test_mbase.py b/autotest/test_mbase.py
index 732b188776..59e796b057 100644
--- a/autotest/test_mbase.py
+++ b/autotest/test_mbase.py
@@ -65,16 +65,12 @@ def test_resolve_exe_by_rel_path(function_tmpdir, use_ext, forgive):
assert actual.lower() == expected
assert which(actual)
- # check behavior if exe DNE
- with pytest.warns(UserWarning) if forgive else pytest.raises(
- FileNotFoundError
- ):
+ # check behavior if exe does not exist
+ with pytest.warns(UserWarning) if forgive else pytest.raises(FileNotFoundError):
assert not resolve_exe("../bin/mf2005", forgive)
-def test_run_model_when_namefile_not_in_model_ws(
- mf6_model_path, function_tmpdir
-):
+def test_run_model_when_namefile_not_in_model_ws(mf6_model_path, function_tmpdir):
# copy input files to temp workspace
ws = function_tmpdir / "ws"
copytree(mf6_model_path, ws)
@@ -171,9 +167,7 @@ def test_run_model_exe_rel_path(mf6_model_path, function_tmpdir, use_ext):
relpath_safe(Path(which("mf6") or "")),
],
)
-def test_run_model_custom_print(
- mf6_model_path, function_tmpdir, use_paths, exe
-):
+def test_run_model_custom_print(mf6_model_path, function_tmpdir, use_paths, exe):
ws = function_tmpdir / "ws"
copytree(mf6_model_path, ws)
diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py
index 6401fa8fb3..9a1a43b6d0 100644
--- a/autotest/test_mf6.py
+++ b/autotest/test_mf6.py
@@ -72,13 +72,7 @@
mfims,
mftdis,
)
-from flopy.utils import (
- CellBudgetFile,
- HeadFile,
- Mf6ListBudget,
- Mf6Obs,
- ZoneBudget6,
-)
+from flopy.utils import CellBudgetFile, HeadFile, Mf6ListBudget, Mf6Obs, ZoneBudget6
from flopy.utils.observationfile import CsvFile
from flopy.utils.triangle import Triangle
from flopy.utils.voronoi import VoronoiGrid
@@ -233,9 +227,7 @@ def get_gwt_model(sim, gwtname, gwtpath, modelshape, sourcerecarray=None):
gwt,
budget_filerecord=f"{gwtname}.cbc",
concentration_filerecord=f"{gwtname}.ucn",
- concentrationprintrecord=[
- ("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")
- ],
+ concentrationprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")],
printrecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")],
)
@@ -287,9 +279,7 @@ def test_load_and_run_sim_when_namefile_uses_abs_paths(
for l in lines:
pattern = f"{model_name}."
if pattern in l:
- l = l.replace(
- pattern, str(workspace.absolute()) + os.sep + pattern
- )
+ l = l.replace(pattern, str(workspace.absolute()) + os.sep + pattern)
f.write(l)
# load, check and run simulation
@@ -301,9 +291,7 @@ def test_load_and_run_sim_when_namefile_uses_abs_paths(
@requires_exe("mf6")
@pytest.mark.parametrize("sep", ["win", "posix"])
-def test_load_sim_when_namefile_uses_rel_paths(
- function_tmpdir, example_data_path, sep
-):
+def test_load_sim_when_namefile_uses_rel_paths(function_tmpdir, example_data_path, sep):
# copy model input files to temp workspace
model_name = "freyberg"
workspace = function_tmpdir / "ws"
@@ -320,23 +308,13 @@ def test_load_sim_when_namefile_uses_rel_paths(
if sep == "win":
l = to_win_sep(
l.replace(
- pattern,
- "../"
- + workspace.name
- + "/"
- + model_name
- + ".",
+ pattern, "../" + workspace.name + "/" + model_name + "."
)
)
else:
l = to_posix_sep(
l.replace(
- pattern,
- "../"
- + workspace.name
- + "/"
- + model_name
- + ".",
+ pattern, "../" + workspace.name + "/" + model_name + "."
)
)
f.write(l)
@@ -375,23 +353,13 @@ def test_write_simulation_always_writes_posix_path_separators(
if sep == "win":
l = to_win_sep(
l.replace(
- pattern,
- "../"
- + workspace.name
- + "/"
- + model_name
- + ".",
+ pattern, "../" + workspace.name + "/" + model_name + "."
)
)
else:
l = to_posix_sep(
l.replace(
- pattern,
- "../"
- + workspace.name
- + "/"
- + model_name
- + ".",
+ pattern, "../" + workspace.name + "/" + model_name + "."
)
)
f.write(l)
@@ -516,9 +484,7 @@ def test_subdir(function_tmpdir):
), "Something wrong with model external paths"
sim_r.set_all_data_internal()
- sim_r.set_all_data_external(
- external_data_folder=os.path.join("dat", "dat_l2")
- )
+ sim_r.set_all_data_external(external_data_folder=os.path.join("dat", "dat_l2"))
sim_r.write_simulation()
sim_r2 = MFSimulation.load(
@@ -823,9 +789,7 @@ def test_binary_read(function_tmpdir):
nrow = 10
ncol = 10
- modelgrid = flopy.discretization.StructuredGrid(
- nlay=nlay, nrow=nrow, ncol=ncol
- )
+ modelgrid = flopy.discretization.StructuredGrid(nlay=nlay, nrow=nrow, ncol=ncol)
arr = np.arange(nlay * nrow * ncol).astype(np.float64)
data_shape = (nlay, nrow, ncol)
@@ -864,9 +828,10 @@ def test_binary_read(function_tmpdir):
bf, data_shape, data_size, np.float64, modelgrid
)[0]
- assert np.allclose(
- arr, arr2
- ), f"Binary read for layered structured failed with {'Path' if isinstance(binfile, Path) else 'str'}"
+ assert np.allclose(arr, arr2), (
+ "Binary read for layered structured failed with "
+ + ("Path" if isinstance(binfile, Path) else "str")
+ )
binfile = function_tmpdir / "structured_flat.hds"
with open(binfile, "wb") as foo:
@@ -934,27 +899,17 @@ def test_props_and_write(function_tmpdir):
# workspace as str
sim = MFSimulation(sim_ws=str(function_tmpdir))
assert isinstance(sim, MFSimulation)
- assert (
- sim.simulation_data.mfpath.get_sim_path()
- == function_tmpdir
- == sim.sim_path
- )
+ assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path
# workspace as Path
sim = MFSimulation(sim_ws=function_tmpdir)
assert isinstance(sim, MFSimulation)
- assert (
- sim.simulation_data.mfpath.get_sim_path()
- == function_tmpdir
- == sim.sim_path
- )
+ assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path
tdis = ModflowTdis(sim)
assert isinstance(tdis, ModflowTdis)
- gwfgwf = ModflowGwfgwf(
- sim, exgtype="gwf6-gwf6", exgmnamea="gwf1", exgmnameb="gwf2"
- )
+ gwfgwf = ModflowGwfgwf(sim, exgtype="gwf6-gwf6", exgmnamea="gwf1", exgmnameb="gwf2")
assert isinstance(gwfgwf, ModflowGwfgwf)
gwf = ModflowGwf(sim)
@@ -1088,9 +1043,7 @@ def test_set_sim_path(function_tmpdir, use_paths):
sim.set_sim_path(new_ws if use_paths else str(new_ws))
tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)]
- tdis = mftdis.ModflowTdis(
- sim, time_units="DAYS", nper=2, perioddata=tdis_rc
- )
+ tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc)
# create model instance
model = mfgwf.ModflowGwf(
@@ -1128,9 +1081,7 @@ def test_create_and_run_model(function_tmpdir, use_paths):
sim_ws=str(function_tmpdir),
)
tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)]
- tdis = mftdis.ModflowTdis(
- sim, time_units="DAYS", nper=2, perioddata=tdis_rc
- )
+ tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc)
# create model instance
model = mfgwf.ModflowGwf(
@@ -1169,23 +1120,10 @@ def test_create_and_run_model(function_tmpdir, use_paths):
)
ic_package = mfgwfic.ModflowGwfic(
model,
- strt=[
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- 100.0,
- ],
+ strt=[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0],
filename=f"{model_name}.ic",
)
- npf_package = mfgwfnpf.ModflowGwfnpf(
- model, save_flows=True, icelltype=1, k=100.0
- )
+ npf_package = mfgwfnpf.ModflowGwfnpf(model, save_flows=True, icelltype=1, k=100.0)
sto_package = mfgwfsto.ModflowGwfsto(
model, save_flows=True, iconvert=1, ss=0.000001, sy=0.15
@@ -1255,9 +1193,7 @@ def test_get_set_data_record(function_tmpdir):
sim_ws=str(function_tmpdir),
)
tdis_rc = [(10.0, 4, 1.0), (6.0, 3, 1.0)]
- tdis = mftdis.ModflowTdis(
- sim, time_units="DAYS", nper=2, perioddata=tdis_rc
- )
+ tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc)
# create model instance
model = mfgwf.ModflowGwf(
@@ -1367,9 +1303,7 @@ def test_get_set_data_record(function_tmpdir):
wel = model.get_package("wel")
spd_record = wel.stress_period_data.get_record()
well_sp_1 = spd_record[0]
- assert (
- well_sp_1["filename"] == "testrecordmodel.wel_stress_period_data_1.txt"
- )
+ assert well_sp_1["filename"] == "testrecordmodel.wel_stress_period_data_1.txt"
assert well_sp_1["binary"] is False
assert well_sp_1["data"][0][0] == (0, 9, 2)
assert well_sp_1["data"][0][1] == -50.0
@@ -1491,10 +1425,7 @@ def test_get_set_data_record(function_tmpdir):
assert 0 in spd_record
assert isinstance(spd_record[0], dict)
assert "filename" in spd_record[0]
- assert (
- spd_record[0]["filename"]
- == "testrecordmodel.rch_stress_period_data_1.txt"
- )
+ assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt"
assert "binary" in spd_record[0]
assert spd_record[0]["binary"] is False
assert "data" in spd_record[0]
@@ -1510,10 +1441,7 @@ def test_get_set_data_record(function_tmpdir):
spd_record = rch_package.stress_period_data.get_record()
assert isinstance(spd_record[0], dict)
assert "filename" in spd_record[0]
- assert (
- spd_record[0]["filename"]
- == "testrecordmodel.rch_stress_period_data_1.txt"
- )
+ assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt"
assert "binary" in spd_record[0]
assert spd_record[0]["binary"] is False
assert "data" in spd_record[0]
@@ -1688,9 +1616,7 @@ def test_sfr_connections(function_tmpdir, example_data_path):
sim2.set_all_data_external()
sim2.write_simulation()
success, buff = sim2.run_simulation()
- assert (
- success
- ), f"simulation {sim2.name} did not run after being reloaded"
+ assert success, f"simulation {sim2.name} did not run after being reloaded"
# test sfr recarray data
model2 = sim2.get_model()
@@ -1735,9 +1661,7 @@ def test_array(function_tmpdir):
model_name = "test_array"
out_dir = function_tmpdir
tdis_name = f"{sim_name}.tdis"
- sim = MFSimulation(
- sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir
- )
+ sim = MFSimulation(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir)
tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)]
tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc)
ims_package = ModflowIms(
@@ -1756,9 +1680,7 @@ def test_array(function_tmpdir):
preconditioner_drop_tolerance=0.01,
number_orthogonalizations=2,
)
- model = ModflowGwf(
- sim, modelname=model_name, model_nam_file=f"{model_name}.nam"
- )
+ model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam")
dis = ModflowGwfdis(
model,
@@ -2109,9 +2031,7 @@ def test_multi_model(function_tmpdir):
# gwf-gwf
gwfgwf_data = []
for col in range(0, ncol):
- gwfgwf_data.append(
- [(0, 0, col), (0, 0, col), 1, 0.5, 0.5, 1.0, 0.0, 1.0]
- )
+ gwfgwf_data.append([(0, 0, col), (0, 0, col), 1, 0.5, 0.5, 1.0, 0.0, 1.0])
gwfgwf = ModflowGwfgwf(
sim,
exgtype="GWF6-GWF6",
@@ -2128,9 +2048,7 @@ def test_multi_model(function_tmpdir):
wel_name_1 = wel_1.name[0]
lak_name_2 = lak_2.name[0]
package_data = [(gwf1.name, wel_name_1), (gwf2.name, lak_name_2)]
- period_data = [
- (gwf1.name, wel_name_1, 0, gwf2.name, lak_name_2, 0, "FACTOR", 1.0)
- ]
+ period_data = [(gwf1.name, wel_name_1, 0, gwf2.name, lak_name_2, 0, "FACTOR", 1.0)]
fname = "gwfgwf.input.mvr"
gwfgwf.mvr.initialize(
filename=fname,
@@ -2263,18 +2181,10 @@ def test_multi_model(function_tmpdir):
assert fi_out[1][2] is None
assert fi_out[2][2] == "MIXED"
- spca1 = ModflowUtlspca(
- gwt2, filename="gwt_model_1.rch1.spc", print_input=True
- )
- spca2 = ModflowUtlspca(
- gwt2, filename="gwt_model_1.rch2.spc", print_input=False
- )
- spca3 = ModflowUtlspca(
- gwt2, filename="gwt_model_1.rch3.spc", print_input=True
- )
- spca4 = ModflowUtlspca(
- gwt2, filename="gwt_model_1.rch4.spc", print_input=True
- )
+ spca1 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch1.spc", print_input=True)
+ spca2 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch2.spc", print_input=False)
+ spca3 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch3.spc", print_input=True)
+ spca4 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch4.spc", print_input=True)
# test writing and loading spca packages
sim2.write_simulation()
@@ -2311,8 +2221,7 @@ def test_multi_model(function_tmpdir):
with pytest.raises(
flopy.mf6.mfbase.FlopyException,
- match='Extraneous kwargs "param_does_not_exist" '
- "provided to MFPackage.",
+ match='Extraneous kwargs "param_does_not_exist" provided to MFPackage.',
):
# test kwargs error checking
wel = ModflowGwfwel(
@@ -2406,7 +2315,8 @@ def test_remove_model(function_tmpdir, example_data_path):
files = list(function_tmpdir.glob("*"))
assert not any("model2" in f.name for f in files)
- # there should be no model or solver entry for the child model in the simulation namefile
+ # there should be no model or solver entry for the child model
+ # in the simulation namefile
lines = open(function_tmpdir / "mfsim.nam").readlines()
lines = [l.lower().strip() for l in lines]
assert not any("model2" in l for l in lines)
diff --git a/autotest/test_mfnwt.py b/autotest/test_mfnwt.py
index 792e54b025..17d3c33270 100644
--- a/autotest/test_mfnwt.py
+++ b/autotest/test_mfnwt.py
@@ -30,9 +30,7 @@ def analytical_water_table_solution(h1, h2, z, R, K, L, x):
def fnwt_model_files(pattern):
path = get_example_data_path() / "nwt_test"
- return [
- os.path.join(path, f) for f in os.listdir(path) if f.endswith(pattern)
- ]
+ return [os.path.join(path, f) for f in os.listdir(path) if f.endswith(pattern)]
@pytest.mark.parametrize("nwtfile", fnwt_model_files(".nwt"))
@@ -59,9 +57,7 @@ def test_nwt_pack_load(function_tmpdir, nwtfile):
ml2 = Modflow(model_ws=function_tmpdir, version="mfnwt")
nwt2 = ModflowNwt.load(fn, ml2)
lst = [
- a
- for a in dir(nwt)
- if not a.startswith("__") and not callable(getattr(nwt, a))
+ a for a in dir(nwt) if not a.startswith("__") and not callable(getattr(nwt, a))
]
for l in lst:
msg = (
@@ -91,9 +87,7 @@ def test_nwt_model_load(function_tmpdir, namfile):
p = ml.get_package(pn)
p2 = ml2.get_package(pn)
lst = [
- a
- for a in dir(p)
- if not a.startswith("__") and not callable(getattr(p, a))
+ a for a in dir(p) if not a.startswith("__") and not callable(getattr(p, a))
]
for l in lst:
msg = (
@@ -229,9 +223,7 @@ def test_mfnwt_run(function_tmpdir):
ax.set_ylabel("Error, in m")
ax = fig.add_subplot(1, 3, 3)
- ax.plot(
- x, 100.0 * (head[0, 0, :] - hac) / hac, linewidth=1, color="blue"
- )
+ ax.plot(x, 100.0 * (head[0, 0, :] - hac) / hac, linewidth=1, color="blue")
ax.set_xlabel("Horizontal distance, in m")
ax.set_ylabel("Percent Error")
diff --git a/autotest/test_mfreadnam.py b/autotest/test_mfreadnam.py
index 8f9e7cb77c..286cffce57 100644
--- a/autotest/test_mfreadnam.py
+++ b/autotest/test_mfreadnam.py
@@ -72,9 +72,7 @@ def test_get_entries_from_namefile_mf2005(path):
id="freyberg",
),
pytest.param(
- _example_data_path
- / "freyberg_multilayer_transient"
- / "freyberg.nam",
+ _example_data_path / "freyberg_multilayer_transient" / "freyberg.nam",
{
"crs": "+proj=utm +zone=14 +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
"rotation": 15.0,
diff --git a/autotest/test_mfsimlist.py b/autotest/test_mfsimlist.py
index 88f9a397b4..6cad2cac79 100644
--- a/autotest/test_mfsimlist.py
+++ b/autotest/test_mfsimlist.py
@@ -84,14 +84,12 @@ def test_mfsimlist_iterations(function_tmpdir):
it_outer = mfsimlst.get_outer_iterations()
assert it_outer == it_outer_answer, (
- f"outer iterations is not equal to {it_outer_answer} "
- + f"({it_outer})"
+ f"outer iterations is not equal to {it_outer_answer} " + f"({it_outer})"
)
it_total = mfsimlst.get_total_iterations()
assert it_total == it_total_answer, (
- f"total iterations is not equal to {it_total_answer} "
- + f"({it_total})"
+ f"total iterations is not equal to {it_total_answer} " + f"({it_total})"
)
@@ -117,8 +115,7 @@ def test_mfsimlist_memory(function_tmpdir):
virtual_memory = mfsimlst.get_memory_usage(virtual=True)
if not np.isnan(virtual_memory):
assert virtual_memory == virtual_answer, (
- f"virtual memory is not equal to {virtual_answer} "
- + f"({virtual_memory})"
+ f"virtual memory is not equal to {virtual_answer} " + f"({virtual_memory})"
)
non_virtual_memory = mfsimlst.get_non_virtual_memory_usage()
diff --git a/autotest/test_mnw.py b/autotest/test_mnw.py
index 20ec79188c..bb7c3adb6e 100644
--- a/autotest/test_mnw.py
+++ b/autotest/test_mnw.py
@@ -53,8 +53,7 @@ def test_load(function_tmpdir, mnw2_examples_path):
).max() < 0.01
assert (
np.abs(
- mnw2_2.stress_period_data[0].qdes
- - mnw2_3.stress_period_data[0].qdes
+ mnw2_2.stress_period_data[0].qdes - mnw2_3.stress_period_data[0].qdes
).min()
< 0.01
)
@@ -104,57 +103,9 @@ def test_make_package(function_tmpdir, dataframe):
# make the package from the tables (ztop, zbotm format)
node_data = np.array(
[
- (
- 0,
- 1,
- 1,
- 9.5,
- 7.1,
- "well1",
- "skin",
- -1,
- 0,
- 0,
- 0,
- 1.0,
- 2.0,
- 5.0,
- 6.2,
- ),
- (
- 1,
- 1,
- 1,
- 7.1,
- 5.1,
- "well1",
- "skin",
- -1,
- 0,
- 0,
- 0,
- 0.5,
- 2.0,
- 5.0,
- 6.2,
- ),
- (
- 2,
- 3,
- 3,
- 9.1,
- 3.7,
- "well2",
- "skin",
- -1,
- 0,
- 0,
- 0,
- 1.0,
- 2.0,
- 5.0,
- 4.1,
- ),
+ (0, 1, 1, 9.5, 7.1, "well1", "skin", -1, 0, 0, 0, 1.0, 2.0, 5.0, 6.2),
+ (1, 1, 1, 7.1, 5.1, "well1", "skin", -1, 0, 0, 0, 0.5, 2.0, 5.0, 6.2),
+ (2, 3, 3, 9.1, 3.7, "well2", "skin", -1, 0, 0, 0, 1.0, 2.0, 5.0, 4.1),
],
dtype=[
("index", " 0)
spd = m4.mnw2.stress_period_data[0]
inds = spd.k, spd.i, spd.j
- assert np.array_equal(
- np.array(inds).transpose(), np.array([(2, 1, 1), (1, 3, 3)])
- )
+ assert np.array_equal(np.array(inds).transpose(), np.array([(2, 1, 1), (1, 3, 3)]))
m4.write_input()
# make the package from the objects
# reuse second per pumping for last stress period
- mnw2fromobj = ModflowMnw2(
- model=m4, mnwmax=2, mnw=mnw2_4.mnw, itmp=[2, 2, -1]
- )
+ mnw2fromobj = ModflowMnw2(model=m4, mnwmax=2, mnw=mnw2_4.mnw, itmp=[2, 2, -1])
# verify that the two input methods produce the same results
assert np.array_equal(
mnw2_4.stress_period_data[1], mnw2fromobj.stress_period_data[1]
@@ -296,9 +223,7 @@ def test_make_package(function_tmpdir, dataframe):
m5 = Modflow("mnw2example", model_ws=ws)
dis = ModflowDis(nrow=5, ncol=5, nlay=3, nper=3, top=10, botm=0, model=m5)
mnw2_5 = ModflowMnw2.load(mnw2_4.fn_path, m5)
- assert np.array_equal(
- mnw2_4.stress_period_data[1], mnw2_5.stress_period_data[1]
- )
+ assert np.array_equal(mnw2_4.stress_period_data[1], mnw2_5.stress_period_data[1])
@pytest.mark.parametrize("dataframe", [True, False])
@@ -350,9 +275,7 @@ def test_mnw2_create_file(function_tmpdir, dataframe):
nnodes=nlayers[i],
nper=len(stress_period_data.index),
node_data=(
- node_data.to_records(index=False)
- if not dataframe
- else node_data
+ node_data.to_records(index=False) if not dataframe else node_data
),
stress_period_data=(
stress_period_data.to_records(index=False)
@@ -367,9 +290,7 @@ def test_mnw2_create_file(function_tmpdir, dataframe):
model=mf,
mnwmax=len(wells),
mnw=wells,
- itmp=list(
- (np.ones(len(stress_period_data.index)) * len(wellids)).astype(int)
- ),
+ itmp=list((np.ones(len(stress_period_data.index)) * len(wellids)).astype(int)),
)
if len(mnw2.node_data) != 6:
@@ -483,7 +404,7 @@ def test_blank_lines(function_tmpdir):
wellids = ["eb-33", "eb-35", "eb-36"]
rates = [np.float32(-11229.2), np.float32(-534.72), np.float32(-534.72)]
- wellids2 = sorted(list(mnw2.mnw.keys()))
+ wellids2 = sorted(mnw2.mnw.keys())
emsg = "incorrect keys returned from load mnw2"
assert wellids2 == wellids, emsg
@@ -504,9 +425,7 @@ def test_blank_lines(function_tmpdir):
def test_make_well():
w1 = Mnw(wellid="Case-1")
- assert (
- w1.wellid == "case-1"
- ), "did not correctly convert well id to lower case"
+ assert w1.wellid == "case-1", "did not correctly convert well id to lower case"
def test_checks(mnw2_examples_path):
diff --git a/autotest/test_model_dot_plot.py b/autotest/test_model_dot_plot.py
index da8cac18d4..f4cfb377aa 100644
--- a/autotest/test_model_dot_plot.py
+++ b/autotest/test_model_dot_plot.py
@@ -14,9 +14,7 @@ def test_vertex_model_dot_plot(example_data_path):
rcParams["figure.max_open_warning"] = 36
# load up the vertex example problem
- sim = MFSimulation.load(
- sim_ws=example_data_path / "mf6" / "test003_gwftri_disv"
- )
+ sim = MFSimulation.load(sim_ws=example_data_path / "mf6" / "test003_gwftri_disv")
disv_ml = sim.get_model("gwf_1")
ax = disv_ml.plot()
assert isinstance(ax, list)
@@ -44,9 +42,7 @@ def test_dataset_dot_plot(function_tmpdir, example_data_path):
assert len(ax) == 2, f"number of hy axes ({len(ax)}) is not equal to 2"
-def test_dataset_dot_plot_nlay_ne_plottable(
- function_tmpdir, example_data_path
-):
+def test_dataset_dot_plot_nlay_ne_plottable(function_tmpdir, example_data_path):
import matplotlib.pyplot as plt
loadpth = example_data_path / "mf2005_test"
@@ -66,9 +62,7 @@ def test_model_dot_plot_export(function_tmpdir, example_data_path):
ml.plot(mflay=0, filename_base=fh, file_extension="png")
files = [f for f in listdir(function_tmpdir) if f.endswith(".png")]
if len(files) < 10:
- raise AssertionError(
- "ml.plot did not properly export all supported data types"
- )
+ raise AssertionError("ml.plot did not properly export all supported data types")
for f in files:
t = f.split("_")
diff --git a/autotest/test_model_splitter.py b/autotest/test_model_splitter.py
index c7b3c4b53e..c2ddeab87c 100644
--- a/autotest/test_model_splitter.py
+++ b/autotest/test_model_splitter.py
@@ -1,5 +1,6 @@
import numpy as np
import pytest
+import yaml
from modflow_devtools.markers import requires_exe, requires_pkg
from modflow_devtools.misc import set_dir
@@ -235,9 +236,7 @@ def test_save_load_node_mapping(function_tmpdir):
for k, v1 in original_node_map.items():
v2 = saved_node_map[k]
if not v1 == v2:
- raise AssertionError(
- "Node map read/write not returning proper values"
- )
+ raise AssertionError("Node map read/write not returning proper values")
array_dict = {}
for model in range(nparts):
@@ -307,9 +306,7 @@ def test_control_records(function_tmpdir):
],
)
- wel_rec = [
- ((0, 4, 5), -10),
- ]
+ wel_rec = [((0, 4, 5), -10)]
spd = {
0: wel_rec,
@@ -344,23 +341,17 @@ def test_control_records(function_tmpdir):
raise AssertionError("Constants not being preserved for MFArray")
if kls[1].data_storage_type.value != 3 or kls[1].binary:
- raise AssertionError(
- "External ascii files not being preserved for MFArray"
- )
+ raise AssertionError("External ascii files not being preserved for MFArray")
k33ls = ml1.npf.k33._data_storage.layer_storage.multi_dim_list
if k33ls[1].data_storage_type.value != 3 or not k33ls[1].binary:
- raise AssertionError(
- "Binary file input not being preserved for MFArray"
- )
+ raise AssertionError("Binary file input not being preserved for MFArray")
spd_ls1 = ml1.wel.stress_period_data.get_record(1)
spd_ls2 = ml1.wel.stress_period_data.get_record(2)
if spd_ls1["filename"] is None or spd_ls1["binary"]:
- raise AssertionError(
- "External ascii files not being preserved for MFList"
- )
+ raise AssertionError("External ascii files not being preserved for MFList")
if spd_ls2["filename"] is None or not spd_ls2["binary"]:
raise AssertionError(
@@ -389,22 +380,8 @@ def test_empty_packages(function_tmpdir):
k33=20.0,
)
ic = flopy.mf6.ModflowGwfic(gwf, strt=0.0)
- chd = flopy.mf6.ModflowGwfchd(
- gwf,
- stress_period_data={
- 0: [
- ((0, 0, 13), 0.0),
- ]
- },
- )
- wel = flopy.mf6.ModflowGwfwel(
- gwf,
- stress_period_data={
- 0: [
- ((0, 0, 0), 1.0),
- ]
- },
- )
+ chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data={0: [((0, 0, 13), 0.0)]})
+ wel = flopy.mf6.ModflowGwfwel(gwf, stress_period_data={0: [((0, 0, 0), 1.0)]})
# Build SFR records
packagedata = [
@@ -452,11 +429,7 @@ def test_empty_packages(function_tmpdir):
nreaches=14,
packagedata=packagedata,
connectiondata=connectiondata,
- perioddata={
- 0: [
- (0, "INFLOW", 1.0),
- ]
- },
+ perioddata={0: [(0, "INFLOW", 1.0)]},
)
array = np.zeros((nrow, ncol), dtype=int)
@@ -467,19 +440,19 @@ def test_empty_packages(function_tmpdir):
m0 = new_sim.get_model(f"{base_name}_0")
m1 = new_sim.get_model(f"{base_name}_1")
- if "chd_0" in m0.package_dict:
- raise AssertionError(f"Empty CHD file written to {base_name}_0 model")
-
- if "wel_0" in m1.package_dict:
- raise AssertionError(f"Empty WEL file written to {base_name}_1 model")
+ assert not m0.get_package(
+ name="chd_0"
+ ), f"Empty CHD file written to {base_name}_0 model"
+ assert not m1.get_package(
+ name="wel_0"
+ ), f"Empty WEL file written to {base_name}_1 model"
mvr_status0 = m0.sfr.mover.array
mvr_status1 = m0.sfr.mover.array
- if not mvr_status0 or not mvr_status1:
- raise AssertionError(
- "Mover status being overwritten in options splitting"
- )
+ assert (
+ mvr_status0 and mvr_status1
+ ), "Mover status being overwritten in options splitting"
@requires_exe("mf6")
@@ -566,18 +539,14 @@ def test_transient_array(function_tmpdir):
for name in new_sim.model_names:
g = new_sim.get_model(name)
d = {}
- for key in (
- 0,
- 2,
- ):
- d[key] = g.sto.steady_state.get_data(key).get_data()
+ for key in (0, 2):
+ d[key] = g.sto.steady_state.get_data(key)
assert d == steady, (
- "storage steady_state dictionary "
- + f"does not match for model '{name}'"
+ "storage steady_state dictionary " + f"does not match for model '{name}'"
)
d = {}
for key in (1,):
- d[key] = g.sto.transient.get_data(key).get_data()
+ d[key] = g.sto.transient.get_data(key)
assert d == transient, (
"storage package transient dictionary "
+ f"does not match for model '{name}'"
@@ -681,11 +650,7 @@ def test_idomain_none(function_tmpdir):
head_dict = {}
for idx, modelname in enumerate(new_sim.model_names):
mnum = int(modelname.split("_")[-1])
- h = (
- new_sim.get_model(modelname)
- .output.head()
- .get_data(kstpkper=kstpkper)
- )
+ h = new_sim.get_model(modelname).output.head().get_data(kstpkper=kstpkper)
head_dict[mnum] = h
new_head = ms.reconstruct_array(head_dict)
@@ -727,39 +692,11 @@ def test_unstructured_complex_disu(function_tmpdir):
iac, ja, ihc, cl12, hwva, angldegx = [], [], [], [], [], []
for cell, neigh in neighbors.items():
iac.append(len(neigh) + 1)
- ihc.extend(
- [
- 1,
- ]
- * (len(neigh) + 1)
- )
- ja.extend(
- [
- cell,
- ]
- + neigh
- )
- cl12.extend(
- [
- 0,
- ]
- + [
- 1,
- ]
- * len(neigh)
- )
- hwva.extend(
- [
- 0,
- ]
- + [
- 1,
- ]
- * len(neigh)
- )
- adx = [
- 0,
- ]
+ ihc.extend([1] * (len(neigh) + 1))
+ ja.extend([cell] + neigh)
+ cl12.extend([0] + [1] * len(neigh))
+ hwva.extend([0] + [1] * len(neigh))
+ adx = [0]
for n in neigh:
ev = cell - n
if ev == -1 * ncol:
@@ -829,9 +766,7 @@ def test_unstructured_complex_disu(function_tmpdir):
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=spd)
spd = {0: [("HEAD", "LAST")]}
- oc = flopy.mf6.ModflowGwfoc(
- gwf, head_filerecord=f"{mname}.hds", saverecord=spd
- )
+ oc = flopy.mf6.ModflowGwfoc(gwf, head_filerecord=f"{mname}.hds", saverecord=spd)
sim.write_simulation()
sim.run_simulation()
@@ -859,3 +794,446 @@ def test_unstructured_complex_disu(function_tmpdir):
diff = np.abs(heads - new_heads)
if np.max(diff) > 1e-07:
raise AssertionError("Reconstructed head results outside of tolerance")
+
+
+@requires_exe("mf6")
+@requires_pkg("pymetis")
+@requires_pkg("scipy")
+def test_multi_model(function_tmpdir):
+ from scipy.spatial import KDTree
+
+ def string2geom(geostring, conversion=None):
+ if conversion is None:
+ multiplier = 1.0
+ else:
+ multiplier = float(conversion)
+ res = []
+ for line in geostring.split("\n"):
+ if not any(line):
+ continue
+ line = line.strip()
+ line = line.split(" ")
+ x = float(line[0]) * multiplier
+ y = float(line[1]) * multiplier
+ res.append((x, y))
+ return res
+
+ sim_path = function_tmpdir
+ split_sim_path = sim_path / "model_split"
+ data_path = get_example_data_path()
+
+ ascii_file = data_path / "geospatial/fine_topo.asc"
+ fine_topo = flopy.utils.Raster.load(ascii_file)
+
+ with open(data_path / "groundwater2023/geometries.yml") as foo:
+ geometry = yaml.safe_load(foo)
+
+ Lx = 180000
+ Ly = 100000
+ dx = 2500.0
+ dy = 2500.0
+ nrow = int(Ly / dy) + 1
+ ncol = int(Lx / dx) + 1
+ boundary = string2geom(geometry["boundary"])
+ bp = np.array(boundary)
+
+ stream_segs = (
+ geometry["streamseg1"],
+ geometry["streamseg2"],
+ geometry["streamseg3"],
+ geometry["streamseg4"],
+ )
+ sgs = [string2geom(sg) for sg in stream_segs]
+
+ modelgrid = flopy.discretization.StructuredGrid(
+ nlay=1,
+ delr=np.full(ncol, dx),
+ delc=np.full(nrow, dy),
+ xoff=0.0,
+ yoff=0.0,
+ top=np.full((nrow, ncol), 1000.0),
+ botm=np.full((1, nrow, ncol), -100.0),
+ )
+
+ ixs = flopy.utils.GridIntersect(modelgrid, method="vertex", rtree=True)
+ result = ixs.intersect([boundary], shapetype="Polygon")
+ r, c = list(zip(*list(result.cellids)))
+ idomain = np.zeros(modelgrid.shape, dtype=int)
+ idomain[:, r, c] = 1
+ modelgrid._idomain = idomain
+
+ top = fine_topo.resample_to_grid(
+ modelgrid,
+ band=fine_topo.bands[0],
+ method="linear",
+ extrapolate_edges=True,
+ )
+ modelgrid._top = top
+
+ # intersect stream segments
+ cellids = []
+ lengths = []
+ for sg in stream_segs:
+ sg = string2geom(sg)
+ v = ixs.intersect(sg, shapetype="LineString", sort_by_cellid=True)
+ cellids += v["cellids"].tolist()
+ lengths += v["lengths"].tolist()
+
+ r, c = list(zip(*cellids))
+ idomain[:, r, c] = 2
+ modelgrid._idomain = idomain
+
+ nlay = 5
+ dv0 = 5.0
+ hyd_cond = 10.0
+ hk = np.full((nlay, nrow, ncol), hyd_cond)
+ hk[1, :, 25:] = hyd_cond * 0.001
+ hk[3, :, 10:] = hyd_cond * 0.00005
+
+ # drain leakage
+ leakance = hyd_cond / (0.5 * dv0)
+
+ drn_data = []
+ for cellid, length in zip(cellids, lengths):
+ x = modelgrid.xcellcenters[cellid]
+ width = 5.0 + (14.0 / Lx) * (Lx - x)
+ conductance = leakance * length * width
+ if not isinstance(cellid, tuple):
+ cellid = (cellid,)
+ drn_data.append((0, *cellid, top[cellid], conductance))
+
+ discharge_data = []
+ area = dx * dy
+ for r in range(nrow):
+ for c in range(ncol):
+ if idomain[0, r, c] == 1:
+ conductance = leakance * area
+ discharge_data.append((0, r, c, top[r, c] - 0.5, conductance, 1.0))
+
+ topc = np.zeros((nlay, nrow, ncol), dtype=float)
+ botm = np.zeros((nlay, nrow, ncol), dtype=float)
+ topc[0] = modelgrid.top.copy()
+ botm[0] = topc[0] - dv0
+ for idx in range(1, nlay):
+ dv0 *= 1.5
+ topc[idx] = botm[idx - 1]
+ botm[idx] = topc[idx] - dv0
+
+ strt = np.tile([modelgrid.top], (nlay, 1, 1))
+ idomain = np.tile([modelgrid.idomain[0]], (5, 1, 1))
+
+ # setup recharge
+ dist_from_riv = 10000.0
+
+ grid_xx = modelgrid.xcellcenters
+ grid_yy = modelgrid.ycellcenters
+ riv_idxs = np.array(cellids)
+ riv_xx = grid_xx[riv_idxs[:, 0], riv_idxs[:, 1]]
+ riv_yy = grid_yy[riv_idxs[:, 0], riv_idxs[:, 1]]
+
+ river_xy = np.column_stack((riv_xx.ravel(), riv_yy.ravel()))
+ grid_xy = np.column_stack((grid_xx.ravel(), grid_yy.ravel()))
+ tree = KDTree(river_xy)
+ distance, index = tree.query(grid_xy)
+
+ index2d = index.reshape(nrow, ncol)
+ distance2d = distance.reshape(nrow, ncol)
+
+ mountain_array = np.asarray(distance2d > dist_from_riv).nonzero()
+ mountain_idxs = np.array(list(zip(mountain_array[0], mountain_array[1])))
+
+ valley_array = np.asarray(distance2d <= dist_from_riv).nonzero()
+ valley_idxs = np.array(list(zip(valley_array[0], valley_array[1])))
+
+ max_recharge = 0.0001
+
+ rch_orig = max_recharge * np.ones((nrow, ncol))
+
+ rch_mnt = np.zeros((nrow, ncol))
+ for idx in mountain_idxs:
+ rch_mnt[idx[0], idx[1]] = max_recharge
+
+ rch_val = np.zeros((nrow, ncol))
+ for idx in valley_idxs:
+ rch_val[idx[0], idx[1]] = max_recharge
+
+ sim = flopy.mf6.MFSimulation(
+ sim_ws=sim_path,
+ exe_name="mf6",
+ memory_print_option="summary",
+ )
+
+ nper = 10
+ nsteps = 1
+ year = 365.25
+ dt = 1000 * year
+ tdis = flopy.mf6.ModflowTdis(
+ sim, nper=nper, perioddata=nper * [(nsteps * dt, nsteps, 1.0)]
+ )
+
+ gwfname = "gwf"
+
+ imsgwf = flopy.mf6.ModflowIms(
+ sim,
+ complexity="simple",
+ print_option="SUMMARY",
+ linear_acceleration="bicgstab",
+ outer_maximum=1000,
+ inner_maximum=100,
+ outer_dvclose=1e-4,
+ inner_dvclose=1e-5,
+ preconditioner_levels=2,
+ relaxation_factor=0.0,
+ filename=f"{gwfname}.ims",
+ )
+
+ gwf = flopy.mf6.ModflowGwf(
+ sim,
+ modelname=gwfname,
+ print_input=False,
+ save_flows=True,
+ newtonoptions="NEWTON UNDER_RELAXATION",
+ )
+
+ dis = flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ delr=dx,
+ delc=dy,
+ idomain=idomain,
+ top=modelgrid.top,
+ botm=botm,
+ xorigin=0.0,
+ yorigin=0.0,
+ )
+
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
+
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf,
+ save_specific_discharge=True,
+ icelltype=1,
+ k=hk,
+ )
+
+ sto = flopy.mf6.ModflowGwfsto(
+ gwf,
+ save_flows=True,
+ iconvert=1,
+ ss=0.00001,
+ sy=0.35,
+ steady_state={0: True, 1: False},
+ transient={0: False, 1: True},
+ )
+
+ rch0 = flopy.mf6.ModflowGwfrcha(
+ gwf,
+ pname="rch_original",
+ recharge={0: rch_orig, 1: 0.0},
+ filename="gwf_original.rch",
+ )
+
+ rch1 = flopy.mf6.ModflowGwfrcha(
+ gwf,
+ pname="rch_mountain",
+ recharge={1: rch_mnt},
+ auxiliary="CONCENTRATION",
+ aux={1: 1.0},
+ filename="gwf_mountain.rch",
+ )
+
+ rch2 = flopy.mf6.ModflowGwfrcha(
+ gwf,
+ pname="rch_valley",
+ recharge={1: rch_val},
+ auxiliary="CONCENTRATION",
+ aux={1: 1.0},
+ filename="gwf_valley.rch",
+ )
+
+ drn = flopy.mf6.ModflowGwfdrn(
+ gwf,
+ stress_period_data=drn_data,
+ pname="river",
+ filename=f"{gwfname}_riv.drn",
+ )
+
+ drn_gwd = flopy.mf6.ModflowGwfdrn(
+ gwf,
+ auxiliary=["depth"],
+ auxdepthname="depth",
+ stress_period_data=discharge_data,
+ pname="gwd",
+ filename=f"{gwfname}_gwd.drn",
+ )
+
+ wel_spd = {0: [[4, 20, 30, 0.0], [2, 20, 60, 0.0], [2, 30, 50, 0.0]]}
+
+ wel = flopy.mf6.ModflowGwfwel(
+ gwf,
+ print_input=False,
+ print_flows=False,
+ stress_period_data=wel_spd,
+ )
+
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ head_filerecord=f"{gwf.name}.hds",
+ budget_filerecord=f"{gwf.name}.cbc",
+ saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ printrecord=[("BUDGET", "ALL")],
+ )
+
+ sim.register_ims_package(imsgwf, [gwf.name])
+
+ def build_gwt_model(sim, gwtname, rch_package):
+ conc_start = 0.0
+ diffc = 0.0
+ alphal = 0.1
+ porosity = 0.35
+ gwf = sim.get_model("gwf")
+ modelgrid = gwf.modelgrid
+
+ gwt = flopy.mf6.ModflowGwt(
+ sim,
+ modelname=gwtname,
+ print_input=False,
+ save_flows=True,
+ )
+
+ nlay, nrow, ncol = modelgrid.shape
+
+ dis = flopy.mf6.ModflowGwtdis(
+ gwt,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ delr=dx,
+ delc=dy,
+ idomain=modelgrid.idomain,
+ top=modelgrid.top,
+ botm=botm,
+ xorigin=0.0,
+ yorigin=0.0,
+ )
+
+ # initial conditions
+ ic = flopy.mf6.ModflowGwtic(gwt, strt=conc_start, filename=f"{gwtname}.ic")
+
+ # advection
+ adv = flopy.mf6.ModflowGwtadv(gwt, scheme="tvd", filename=f"{gwtname}.adv")
+
+ # dispersion
+ dsp = flopy.mf6.ModflowGwtdsp(
+ gwt,
+ diffc=diffc,
+ alh=alphal,
+ alv=alphal,
+ ath1=0.0,
+ atv=0.0,
+ filename=f"{gwtname}.dsp",
+ )
+
+ # mass storage and transfer
+ mst = flopy.mf6.ModflowGwtmst(gwt, porosity=porosity, filename=f"{gwtname}.mst")
+
+ # sources
+ sourcerecarray = [
+ (rch_package, "AUX", "CONCENTRATION"),
+ ]
+ ssm = flopy.mf6.ModflowGwtssm(
+ gwt, sources=sourcerecarray, filename=f"{gwtname}.ssm"
+ )
+
+ # output control
+ oc = flopy.mf6.ModflowGwtoc(
+ gwt,
+ budget_filerecord=f"{gwtname}.cbc",
+ concentration_filerecord=f"{gwtname}.ucn",
+ saverecord=[("CONCENTRATION", "ALL"), ("BUDGET", "ALL")],
+ )
+
+ return gwt
+
+ imsgwt = flopy.mf6.ModflowIms(
+ sim,
+ complexity="complex",
+ print_option="SUMMARY",
+ linear_acceleration="bicgstab",
+ outer_maximum=1000,
+ inner_maximum=100,
+ outer_dvclose=1e-4,
+ inner_dvclose=1e-5,
+ filename="gwt.ims",
+ )
+
+ gwt_mnt = build_gwt_model(sim, "gwt_mnt", "rch_mountain")
+ sim.register_ims_package(imsgwt, [gwt_mnt.name])
+
+ gwt_val = build_gwt_model(sim, "gwt_val", "rch_valley")
+ sim.register_ims_package(imsgwt, [gwt_val.name])
+
+ gwfgwt = flopy.mf6.ModflowGwfgwt(
+ sim,
+ exgtype="GWF6-GWT6",
+ exgmnamea=gwfname,
+ exgmnameb=gwt_mnt.name,
+ filename="gwfgwt_mnt.exg",
+ )
+
+ gwfgwt = flopy.mf6.ModflowGwfgwt(
+ sim,
+ exgtype="GWF6-GWT6",
+ exgmnamea=gwfname,
+ exgmnameb=gwt_val.name,
+ filename="gwfgwt_val.exg",
+ )
+
+ sim.write_simulation()
+ sim.run_simulation()
+
+ nparts = 2
+ mfs = Mf6Splitter(sim)
+ array = mfs.optimize_splitting_mask(nparts)
+ new_sim = mfs.split_multi_model(array)
+ new_sim.set_sim_path(split_sim_path)
+ new_sim.write_simulation()
+ new_sim.run_simulation()
+
+ # compare results for each of the models
+ splits = list(range(nparts))
+ for name in sim.model_names:
+ gwm = sim.get_model(name)
+ if "concentration()" in gwm.output.methods():
+ X = gwm.output.concentration().get_alldata()[-1]
+ else:
+ X = gwm.output.head().get_alldata()[-1]
+
+ array_dict = {}
+ for split in splits:
+ mname = f"{name}_{split}"
+ sp_gwm = new_sim.get_model(mname)
+ if "concentration()" in sp_gwm.output.methods():
+ X0 = sp_gwm.output.concentration().get_alldata()[-1]
+ else:
+ X0 = sp_gwm.output.head().get_alldata()[-1]
+
+ array_dict[split] = X0
+
+ X_split = mfs.reconstruct_array(array_dict)
+
+ err_msg = f"Outputs from {name} and split model are not within tolerance"
+ X_split[idomain == 0] = np.nan
+ X[idomain == 0] = np.nan
+ if name == "gwf":
+ np.testing.assert_allclose(X, X_split, equal_nan=True, err_msg=err_msg)
+ else:
+ diff = np.abs(X_split - X)
+ diff = np.nansum(diff)
+ if diff > 10.25:
+ raise AssertionError(
+ "Difference between output arrays: "
+ f"{diff :.2f} greater than tolerance"
+ )
diff --git a/autotest/test_modflow.py b/autotest/test_modflow.py
index 5239ce4c66..304b4f6f9a 100644
--- a/autotest/test_modflow.py
+++ b/autotest/test_modflow.py
@@ -3,7 +3,6 @@
import os
import shutil
from pathlib import Path
-from typing import Dict
import numpy as np
import pandas as pd
@@ -48,10 +47,7 @@ def parameters_model_path(example_data_path):
@pytest.mark.parametrize(
"namfile",
[Path("freyberg") / "freyberg.nam"]
- + [
- Path("parameters") / f"{nf}.nam"
- for nf in ["Oahu_01", "twrip", "twrip_upw"]
- ],
+ + [Path("parameters") / f"{nf}.nam" for nf in ["Oahu_01", "twrip", "twrip_upw"]],
)
def test_modflow_load(namfile, example_data_path):
mpath = Path(example_data_path / namfile).parent
@@ -86,21 +82,13 @@ def test_modflow_load(namfile, example_data_path):
[
pytest.param(
_example_data_path / "freyberg" / "freyberg.nam",
- {
- "crs": None,
- "epsg": None,
- "angrot": 0.0,
- "xoffset": 0.0,
- "yoffset": 0.0,
- },
+ {"crs": None, "epsg": None, "angrot": 0.0, "xoffset": 0.0, "yoffset": 0.0},
id="freyberg",
),
pytest.param(
- _example_data_path
- / "freyberg_multilayer_transient"
- / "freyberg.nam",
+ _example_data_path / "freyberg_multilayer_transient" / "freyberg.nam",
{
- "proj4": "+proj=utm +zone=14 +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
+ "proj4": "+proj=utm +zone=14 +ellps=WGS84 +datum=WGS84 +units=m +no_defs", # noqa
"angrot": 15.0,
"xoffset": 622241.1904510253,
"yoffset": 3343617.741737109,
@@ -113,12 +101,7 @@ def test_modflow_load(namfile, example_data_path):
/ "mfnwt_mt3dusgs"
/ "sft_crnkNic"
/ "CrnkNic.nam",
- {
- "epsg": 26916,
- "angrot": 0.0,
- "xoffset": 0.0,
- "yoffset": 0.0,
- },
+ {"epsg": 26916, "angrot": 0.0, "xoffset": 0.0, "yoffset": 0.0},
id="CrnkNic",
),
],
@@ -145,9 +128,7 @@ def test_modflow_load_when_nam_dne():
def test_mbase_modelgrid(function_tmpdir):
- ml = Modflow(
- modelname="test", xll=500.0, rotation=12.5, start_datetime="1/1/2016"
- )
+ ml = Modflow(modelname="test", xll=500.0, rotation=12.5, start_datetime="1/1/2016")
try:
print(ml.modelgrid.xcentergrid)
except:
@@ -217,12 +198,8 @@ def test_mt_modelgrid(function_tmpdir):
verbose=True,
)
- assert (
- swt.modelgrid.xoffset == mt.modelgrid.xoffset == ml.modelgrid.xoffset
- )
- assert (
- swt.modelgrid.yoffset == mt.modelgrid.yoffset == ml.modelgrid.yoffset
- )
+ assert swt.modelgrid.xoffset == mt.modelgrid.xoffset == ml.modelgrid.xoffset
+ assert swt.modelgrid.yoffset == mt.modelgrid.yoffset == ml.modelgrid.yoffset
assert mt.modelgrid.crs == ml.modelgrid.crs == swt.modelgrid.crs
assert mt.modelgrid.angrot == ml.modelgrid.angrot == swt.modelgrid.angrot
assert np.array_equal(mt.modelgrid.idomain, ml.modelgrid.idomain)
@@ -251,12 +228,8 @@ def test_mt_modelgrid(function_tmpdir):
verbose=True,
)
- assert (
- ml.modelgrid.xoffset == mt.modelgrid.xoffset == swt.modelgrid.xoffset
- )
- assert (
- mt.modelgrid.yoffset == ml.modelgrid.yoffset == swt.modelgrid.yoffset
- )
+ assert ml.modelgrid.xoffset == mt.modelgrid.xoffset == swt.modelgrid.xoffset
+ assert mt.modelgrid.yoffset == ml.modelgrid.yoffset == swt.modelgrid.yoffset
assert mt.modelgrid.crs == ml.modelgrid.crs == swt.modelgrid.crs
assert mt.modelgrid.angrot == ml.modelgrid.angrot == swt.modelgrid.angrot
assert np.array_equal(mt.modelgrid.idomain, ml.modelgrid.idomain)
@@ -273,14 +246,11 @@ def test_exe_selection(example_data_path, function_tmpdir):
assert Path(Modflow().exe_name).stem == exe_name
assert Path(Modflow(exe_name=None).exe_name).stem == exe_name
assert (
- Path(Modflow.load(namfile_path, model_ws=model_path).exe_name).stem
- == exe_name
+ Path(Modflow.load(namfile_path, model_ws=model_path).exe_name).stem == exe_name
)
assert (
Path(
- Modflow.load(
- namfile_path, exe_name=None, model_ws=model_path
- ).exe_name
+ Modflow.load(namfile_path, exe_name=None, model_ws=model_path).exe_name
).stem
== exe_name
)
@@ -291,21 +261,19 @@ def test_exe_selection(example_data_path, function_tmpdir):
assert Path(Modflow(exe_name=exe_name).exe_name).stem == exe_name
assert (
Path(
- Modflow.load(
- namfile_path, exe_name=exe_name, model_ws=model_path
- ).exe_name
+ Modflow.load(namfile_path, exe_name=exe_name, model_ws=model_path).exe_name
).stem
== exe_name
)
- # init/load should warn if exe DNE
+ # init/load should warn if exe does not exist
exe_name = "not_an_exe"
with pytest.warns(UserWarning):
ml = Modflow(exe_name=exe_name)
with pytest.warns(UserWarning):
ml = Modflow.load(namfile_path, exe_name=exe_name, model_ws=model_path)
- # run should error if exe DNE
+ # run should error if exe does not exist
ml = Modflow.load(namfile_path, exe_name=exe_name, model_ws=model_path)
ml.change_model_ws(function_tmpdir)
ml.write_input()
@@ -421,13 +389,9 @@ def test_load_twri_grid(example_data_path):
name = "twri.nam"
ml = Modflow.load(name, model_ws=mpath, check=False)
mg = ml.modelgrid
- assert isinstance(
- mg, StructuredGrid
- ), "modelgrid is not an StructuredGrid instance"
+ assert isinstance(mg, StructuredGrid), "modelgrid is not an StructuredGrid instance"
shape = (3, 15, 15)
- assert (
- mg.shape == shape
- ), f"modelgrid shape {mg.shape} not equal to {shape}"
+ assert mg.shape == shape, f"modelgrid shape {mg.shape} not equal to {shape}"
thickness = mg.cell_thickness
shape = (5, 15, 15)
assert (
@@ -485,9 +449,7 @@ def test_mg(function_tmpdir):
# test that transform for arbitrary coordinates
# is working in same as transform for model grid
- mg2 = StructuredGrid(
- delc=ms.dis.delc.array, delr=ms.dis.delr.array, lenuni=2
- )
+ mg2 = StructuredGrid(delc=ms.dis.delc.array, delr=ms.dis.delr.array, lenuni=2)
x = mg2.xcellcenters[0]
y = mg2.ycellcenters[0]
mg2.set_coord_info(xoff=xll, yoff=yll, angrot=angrot)
@@ -523,9 +485,7 @@ def test_dynamic_xll_yll():
xll, yll = 286.80, 29.03
# test scaling of length units
ms2 = Modflow()
- dis = ModflowDis(
- ms2, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr, delc=delc
- )
+ dis = ModflowDis(ms2, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr, delc=delc)
ms2.modelgrid.set_coord_info(xoff=xll, yoff=yll, angrot=30.0)
xll1, yll1 = ms2.modelgrid.xoffset, ms2.modelgrid.yoffset
@@ -977,10 +937,7 @@ def test_properties_check(function_tmpdir):
)
ind3_errors = chk.summary_array[ind3]["desc"]
- assert (
- "zero or negative horizontal hydraulic conductivity values"
- in ind1_errors
- )
+ assert "zero or negative horizontal hydraulic conductivity values" in ind1_errors
assert (
"horizontal hydraulic conductivity values below checker threshold of 1e-11"
in ind1_errors
@@ -994,10 +951,7 @@ def test_properties_check(function_tmpdir):
"horizontal hydraulic conductivity values above checker threshold of 100000.0"
in ind2_errors
)
- assert (
- "zero or negative vertical hydraulic conductivity values"
- in ind2_errors
- )
+ assert "zero or negative vertical hydraulic conductivity values" in ind2_errors
assert (
"vertical hydraulic conductivity values above checker threshold of 100000.0"
in ind3_errors
@@ -1043,9 +997,7 @@ def test_rchload(function_tmpdir):
m1 = Modflow("rchload1", model_ws=ws)
dis1 = ModflowDis(m1, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper)
a = np.random.random((nrow, ncol))
- rech1 = Util2d(
- m1, (nrow, ncol), np.float32, a, "rech", cnstnt=1.0, how="openclose"
- )
+ rech1 = Util2d(m1, (nrow, ncol), np.float32, a, "rech", cnstnt=1.0, how="openclose")
rch1 = ModflowRch(m1, rech={0: rech1})
m1.write_input()
@@ -1060,9 +1012,7 @@ def test_rchload(function_tmpdir):
m2 = Modflow("rchload2", model_ws=ws)
dis2 = ModflowDis(m2, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper)
a = np.random.random((nrow, ncol))
- rech2 = Util2d(
- m2, (nrow, ncol), np.float32, a, "rech", cnstnt=2.0, how="openclose"
- )
+ rech2 = Util2d(m2, (nrow, ncol), np.float32, a, "rech", cnstnt=2.0, how="openclose")
rch2 = ModflowRch(m2, rech={0: rech2})
m2.write_input()
@@ -1222,14 +1172,7 @@ def test_load_with_list_reader(function_tmpdir):
# create the wells, but use an all float dtype to write a binary file
# use one-based values
- weldt = np.dtype(
- [
- ("k", "= 0, f"'{wrn_msg}' warning message not issued"
- assert (
- w[ipos].category == UserWarning
- ), f"Warning category: {w[0].category}"
+ assert w[ipos].category == UserWarning, f"Warning category: {w[0].category}"
gcg = Mt3dRct(mt)
rct = Mt3dGcg(mt)
@@ -501,27 +497,13 @@ def test_mt3d_create_woutmfmodel(function_tmpdir):
mt.write_input()
# confirm that MT3D files exist
- assert os.path.isfile(
- os.path.join(model_ws, f"{mt.name}.{btn.extension[0]}")
- )
- assert os.path.isfile(
- os.path.join(model_ws, f"{mt.name}.{adv.extension[0]}")
- )
- assert os.path.isfile(
- os.path.join(model_ws, f"{mt.name}.{dsp.extension[0]}")
- )
- assert os.path.isfile(
- os.path.join(model_ws, f"{mt.name}.{ssm.extension[0]}")
- )
- assert os.path.isfile(
- os.path.join(model_ws, f"{mt.name}.{gcg.extension[0]}")
- )
- assert os.path.isfile(
- os.path.join(model_ws, f"{mt.name}.{rct.extension[0]}")
- )
- assert os.path.isfile(
- os.path.join(model_ws, f"{mt.name}.{tob.extension[0]}")
- )
+ assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{btn.extension[0]}"))
+ assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{adv.extension[0]}"))
+ assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{dsp.extension[0]}"))
+ assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{ssm.extension[0]}"))
+ assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{gcg.extension[0]}"))
+ assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{rct.extension[0]}"))
+ assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{tob.extension[0]}"))
def test_mt3d_pht3d(function_tmpdir):
@@ -536,9 +518,7 @@ def test_mt3d_pht3d(function_tmpdir):
mt.write_input()
# confirm that MT3D files exist
- assert os.path.isfile(
- os.path.join(model_ws, f"{mt.name}.{phc.extension[0]}")
- )
+ assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{phc.extension[0]}"))
def test_mt3d_multispecies(function_tmpdir):
@@ -548,10 +528,7 @@ def test_mt3d_multispecies(function_tmpdir):
nrow = 20
ncol = 20
nper = 10
- mf = Modflow(
- modelname=modelname,
- model_ws=function_tmpdir,
- )
+ mf = Modflow(modelname=modelname, model_ws=function_tmpdir)
dis = ModflowDis(mf, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper)
lpf = ModflowLpf(mf)
rch = ModflowRch(mf)
@@ -567,9 +544,7 @@ def test_mt3d_multispecies(function_tmpdir):
verbose=True,
)
sconc3 = np.random.random((nrow, ncol))
- btn = Mt3dBtn(
- mt, ncomp=ncomp, sconc=1.0, sconc2=2.0, sconc3=sconc3, sconc5=5.0
- )
+ btn = Mt3dBtn(mt, ncomp=ncomp, sconc=1.0, sconc2=2.0, sconc3=sconc3, sconc5=5.0)
# check obs I/O
mt.btn.obs = np.array([[0, 2, 300], [0, 1, 250]])
crch32 = np.random.random((nrow, ncol))
@@ -593,10 +568,7 @@ def test_mt3d_multispecies(function_tmpdir):
# Create a second MODFLOW model
modelname2 = "multispecies2"
- mf2 = Modflow(
- modelname=modelname2,
- model_ws=function_tmpdir,
- )
+ mf2 = Modflow(modelname=modelname2, model_ws=function_tmpdir)
dis2 = ModflowDis(mf2, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper)
# Load the MT3D model into mt2 and then write it out
@@ -17799,9 +17771,7 @@ def test_lkt_with_multispecies(function_tmpdir):
mxpart = 5000
nadvfd = 1 # (1 = Upstream weighting)
- adv = Mt3dAdv(
- mt, mixelm=mixelm, percel=percel, mxpart=mxpart, nadvfd=nadvfd
- )
+ adv = Mt3dAdv(mt, mixelm=mixelm, percel=percel, mxpart=mxpart, nadvfd=nadvfd)
## Instantiate generalized conjugate gradient solver (GCG)
# package for MT3D-USGS
@@ -17970,9 +17940,7 @@ def test_mt3d_ssm_with_nodata_in_1st_sp(function_tmpdir):
assert success, "MT3D did not run"
ws = function_tmpdir / "ws2"
- mf2 = Modflow.load(
- "model_mf.nam", model_ws=function_tmpdir, exe_name="mf2005"
- )
+ mf2 = Modflow.load("model_mf.nam", model_ws=function_tmpdir, exe_name="mf2005")
mf2.change_model_ws(ws)
mt2 = Mt3dms.load(
"model_mt.nam",
@@ -18007,9 +17975,7 @@ def test_none_spdtype(function_tmpdir):
wel = ModflowWel(mf, stress_period_data=spd)
pcg = ModflowPcg(mf)
mf.write_input()
- mf2 = Modflow.load(
- "modflowtest.nam", model_ws=function_tmpdir, verbose=True
- )
+ mf2 = Modflow.load("modflowtest.nam", model_ws=function_tmpdir, verbose=True)
success, buff = mf.run_model(report=True)
assert success
diff --git a/autotest/test_nwt_ag.py b/autotest/test_nwt_ag.py
index 5dc285ca93..8a76aa08e6 100644
--- a/autotest/test_nwt_ag.py
+++ b/autotest/test_nwt_ag.py
@@ -27,11 +27,7 @@ def test_load_write_agwater(function_tmpdir, example_data_path):
ml.change_model_ws(function_tmpdir)
ag1.write_file()
- ml2 = Modflow(
- "Agwater1",
- version="mfnwt",
- model_ws=function_tmpdir,
- )
+ ml2 = Modflow("Agwater1", version="mfnwt", model_ws=function_tmpdir)
ag2 = ModflowAg.load(function_tmpdir / agfile, ml2, nper=49)
assert repr(ag1) == repr(ag2), "comparison failed"
@@ -55,11 +51,7 @@ def test_load_write_agwater_uzf(function_tmpdir, example_data_path):
ml.change_model_ws(function_tmpdir)
uzf1.write_file()
- ml2 = Modflow(
- "Agwater1",
- version="mfnwt",
- model_ws=function_tmpdir,
- )
+ ml2 = Modflow("Agwater1", version="mfnwt", model_ws=function_tmpdir)
dis2 = ModflowDis(ml2, nlay=1, nrow=15, ncol=10, nper=49)
uzf2 = ModflowUzf1.load(function_tmpdir / uzffile, ml2)
diff --git a/autotest/test_obs.py b/autotest/test_obs.py
index 215b1893a4..6f8664c628 100644
--- a/autotest/test_obs.py
+++ b/autotest/test_obs.py
@@ -32,12 +32,7 @@ def test_hob_simple(function_tmpdir):
shape2d = (nrow, ncol)
ib = np.ones(shape3d, dtype=int)
ib[0, 0, 0] = -1
- m = Modflow(
- modelname=modelname,
- model_ws=ws,
- verbose=False,
- exe_name="mf2005",
- )
+ m = Modflow(modelname=modelname, model_ws=ws, verbose=False, exe_name="mf2005")
dis = ModflowDis(m, nlay=1, nrow=11, ncol=11, nper=2, perlen=[1, 1])
bas = ModflowBas(m, ibound=ib, strt=10.0)
@@ -124,9 +119,7 @@ def test_obs_load_and_write(function_tmpdir, example_data_path):
shutil.copyfile(src, dst)
# load the modflow model
- mf = Modflow.load(
- "tc1-true.nam", verbose=True, model_ws=ws, exe_name="mf2005"
- )
+ mf = Modflow.load("tc1-true.nam", verbose=True, model_ws=ws, exe_name="mf2005")
# run the modflow-2005 model
success, buff = mf.run_model(silent=False)
@@ -164,9 +157,7 @@ def test_obs_load_and_write(function_tmpdir, example_data_path):
raise ValueError("could not load new HOB output file")
# load the modflow model
- m = Modflow.load(
- "tc1-true.nam", verbose=True, model_ws=ws, exe_name="mf2005"
- )
+ m = Modflow.load("tc1-true.nam", verbose=True, model_ws=ws, exe_name="mf2005")
model_ws2 = os.path.join(ws, "flwob")
m.change_model_ws(new_pth=model_ws2, reset_external=True)
@@ -237,9 +228,7 @@ def test_obs_load_and_write(function_tmpdir, example_data_path):
s = f"nqtfb loaded from {m.drob.fn_path} read incorrectly"
assert drob.nqtfb == m.drob.nqtfb, s
s = f"obsnam loaded from {m.drob.fn_path} read incorrectly"
- assert list([n for n in drob.obsnam]) == list(
- [n for n in m.drob.obsnam]
- ), s
+ assert list(drob.obsnam) == list(m.drob.obsnam), s
s = f"flwobs loaded from {m.drob.fn_path} read incorrectly"
assert np.array_equal(drob.flwobs, m.drob.flwobs), s
s = f"layer loaded from {m.drob.fn_path} read incorrectly"
@@ -419,12 +408,10 @@ def test_multilayerhob_pr_multiline():
problem_hob = [
"2 4 7",
"1 1",
- "A19E1_1 -2 140 91 1 1 -0.28321 -0.05389"
- " 69 1 1 1 # A19E1 8/13/1975",
+ "A19E1_1 -2 140 91 1 1 -0.28321 -0.05389 69 1 1 1 # A19E1 8/13/1975",
"3 0.954",
"4 0.046",
- "A19E1_2 -2 140 91 1 1 -0.28321 -0.05389"
- " 72 1 1 1 # A19E1 10/9/1975",
+ "A19E1_2 -2 140 91 1 1 -0.28321 -0.05389 72 1 1 1 # A19E1 10/9/1975",
"3 0.954",
"4 0.046",
]
diff --git a/autotest/test_particledata.py b/autotest/test_particledata.py
index f5363f957d..e183bc962b 100644
--- a/autotest/test_particledata.py
+++ b/autotest/test_particledata.py
@@ -39,11 +39,7 @@ def get_nn(grid: StructuredGrid, k, i, j):
def flatten(a):
return [
- [
- *chain.from_iterable(
- xx if isinstance(xx, tuple) else [xx] for xx in x
- )
- ]
+ [*chain.from_iterable(xx if isinstance(xx, tuple) else [xx] for xx in x)]
for x in a
]
@@ -57,9 +53,7 @@ def test_get_extent_structured_multilayer():
for k in range(grid.nlay):
extent = get_extent(grid, k=k, i=i, j=j)
assert extent.minz == grid.botm[k, i, j]
- assert extent.maxz == (
- grid.top[i, j] if k == 0 else grid.botm[k - 1, i, j]
- )
+ assert extent.maxz == (grid.top[i, j] if k == 0 else grid.botm[k - 1, i, j])
# test initializers
@@ -222,9 +216,8 @@ def test_particledata_to_prp_dis_1():
# check conversion
assert len(rpts_prt) == len(cells)
- assert all(
- len(c) == 7 for c in rpts_prt
- ) # each coord should be a tuple (irpt, k, i, j, x, y, z)
+ # each coord should be a tuple (irpt, k, i, j, x, y, z)
+ assert all(len(c) == 7 for c in rpts_prt)
# expected
exp = np.rec.fromrecords(
@@ -296,9 +289,7 @@ def test_particledata_to_prp_dis_9():
@pytest.mark.parametrize("lx", [None, 0.5, 0.25]) # local x coord
@pytest.mark.parametrize("ly", [None, 0.5, 0.25]) # local y coord
-@pytest.mark.parametrize(
- "localz", [False, True]
-) # whether to return local z coords
+@pytest.mark.parametrize("localz", [False, True]) # whether to return local z coords
def test_particledata_to_prp_disv_1(lx, ly, localz):
"""
1 particle in bottom left cell, testing with default
@@ -328,9 +319,8 @@ def test_particledata_to_prp_disv_1(lx, ly, localz):
# check conversion succeeded
assert len(rpts_prt) == len(locs)
- assert all(
- len(c) == 6 for c in rpts_prt
- ) # each coord should be a tuple (irpt, k, j, x, y, z)
+ # each coord should be a tuple (irpt, k, j, x, y, z)
+ assert all(len(c) == 6 for c in rpts_prt)
for rpt in rpts_prt:
assert np.isclose(rpt[3], lx[0] if lx else 0.5) # check x
assert np.isclose(rpt[4], ly[0] if ly else 0.5) # check y
@@ -356,9 +346,7 @@ def test_particledata_to_prp_disv_1(lx, ly, localz):
# plt.show()
-@pytest.mark.parametrize(
- "localz", [False, True]
-) # whether to return local z coords
+@pytest.mark.parametrize("localz", [False, True]) # whether to return local z coords
def test_particledata_to_prp_disv_9(localz):
# minimal vertex grid
grid = GridCases().vertex_small()
@@ -403,23 +391,17 @@ def test_particledata_to_prp_disv_9(localz):
assert np.allclose(rpts_prt, rpts_exp, atol=1e-3)
-@pytest.mark.parametrize(
- "localz", [False, True]
-) # whether to return local z coords
+@pytest.mark.parametrize("localz", [False, True]) # whether to return local z coords
def test_lrcparticledata_to_prp_divisions_defaults(localz, array_snapshot):
sd_data = CellDataType()
regions = [[0, 0, 1, 0, 1, 1]]
- part_data = LRCParticleData(
- subdivisiondata=[sd_data], lrcregions=[regions]
- )
+ part_data = LRCParticleData(subdivisiondata=[sd_data], lrcregions=[regions])
grid = GridCases().structured_small()
rpts_prt = flatten(list(part_data.to_prp(grid, localz=localz)))
num_cells = reduce(
sum,
[
- (lrc[3] - lrc[0] + 1)
- * (lrc[4] - lrc[1] + 1)
- * (lrc[5] - lrc[2] + 1)
+ (lrc[3] - lrc[0] + 1) * (lrc[4] - lrc[1] + 1) * (lrc[5] - lrc[2] + 1)
for lrc in regions
],
)
@@ -523,9 +505,8 @@ def test_lrcparticledata_to_prp_top_bottom():
for lrc in lrcregions
]
)
- assert (
- len(rpts_prt) == num_cells * rd * cd * 2
- ) # 1 particle each on top and bottom faces
+ # 1 particle each on top and bottom faces
+ assert len(rpts_prt) == num_cells * rd * cd * 2
# particle should be centered on each face
verts = grid.get_cell_vertices(1, 1)
@@ -568,9 +549,7 @@ def test_lrcparticledata_to_prp_1_per_face(array_snapshot):
assert rpts_prt == array_snapshot
-@pytest.mark.parametrize(
- "localz", [False, True]
-) # whether to return local z coords
+@pytest.mark.parametrize("localz", [False, True]) # whether to return local z coords
def test_nodeparticledata_to_prp_disv_defaults(
function_tmpdir, example_data_path, localz
):
@@ -585,9 +564,7 @@ def test_nodeparticledata_to_prp_disv_defaults(
pdat = NodeParticleData()
# load gwf simulation, switch workspace, write input files, and run
- sim = MFSimulation.load(
- sim_ws=example_data_path / "mf6" / "test003_gwfs_disv"
- )
+ sim = MFSimulation.load(sim_ws=example_data_path / "mf6" / "test003_gwfs_disv")
gwf = sim.get_model("gwf_1")
grid = gwf.modelgrid
gwf_name = "gwf"
@@ -635,8 +612,7 @@ def test_nodeparticledata_to_prp_disv_defaults(
mp7_pls = pd.concat([pd.DataFrame(ra) for ra in pldata])
mp7_pls = mp7_pls.sort_values(by=["time", "particleid"]).head(27)
mp7_rpts = [
- [0, r.k, r.x, r.y, r.zloc if localz else r.z]
- for r in mp7_pls.itertuples()
+ [0, r.k, r.x, r.y, r.zloc if localz else r.z] for r in mp7_pls.itertuples()
] # omit rpt index
mp7_rpts.sort()
@@ -750,9 +726,7 @@ def test_nodeparticledata_prp_disv_big(function_tmpdir):
rowdivisions6=4,
columndivisions6=4,
)
- pgdata = flopy.modpath.NodeParticleData(
- subdivisiondata=facedata, nodes=nodew
- )
+ pgdata = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew)
# convert to PRP package data
rpts_prt = flatten(list(pgdata.to_prp(grid)))
@@ -804,9 +778,7 @@ def test_lrcparticledata_write(function_tmpdir):
@pytest.fixture
def mf6_sim(function_tmpdir):
name = "tiny-gwf"
- sim = flopy.mf6.MFSimulation(
- sim_name=name, sim_ws=function_tmpdir, exe_name="mf6"
- )
+ sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6")
tdis = flopy.mf6.ModflowTdis(sim)
ims = flopy.mf6.ModflowIms(sim)
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
diff --git a/autotest/test_particlegroup.py b/autotest/test_particlegroup.py
index a664d74e7f..e98b9ab379 100644
--- a/autotest/test_particlegroup.py
+++ b/autotest/test_particlegroup.py
@@ -44,7 +44,8 @@ def test_pgroup_release_data():
)
assert type(pgrd2.releaseinterval) == type(ripg2), (
f"mp7: pgroup with releaseoption 2 returned "
- f"type(releaseinterval)={type(pgrd2.releaseinterval)}. Should remain as {type(ripg2)}"
+ f"type(releaseinterval)={type(pgrd2.releaseinterval)}. "
+ f"Should remain as {type(ripg2)}"
)
assert len(pgrd3.releasetimes) == nripg3, (
f"mp7: pgroup with releaseoption 3 returned "
diff --git a/autotest/test_plot_cross_section.py b/autotest/test_plot_cross_section.py
index 4340d3471a..a0f058e665 100644
--- a/autotest/test_plot_cross_section.py
+++ b/autotest/test_plot_cross_section.py
@@ -9,9 +9,7 @@
@pytest.mark.mf6
-@pytest.mark.xfail(
- reason="sometimes get LineCollections instead of PatchCollections"
-)
+@pytest.mark.xfail(reason="sometimes get LineCollections instead of PatchCollections")
def test_cross_section_bc_gwfs_disv(example_data_path):
mpath = example_data_path / "mf6" / "test003_gwfs_disv"
sim = MFSimulation.load(sim_ws=mpath)
@@ -29,9 +27,7 @@ def test_cross_section_bc_gwfs_disv(example_data_path):
@pytest.mark.mf6
-@pytest.mark.xfail(
- reason="sometimes get LineCollections instead of PatchCollections"
-)
+@pytest.mark.xfail(reason="sometimes get LineCollections instead of PatchCollections")
def test_cross_section_bc_lake2tr(example_data_path):
mpath = example_data_path / "mf6" / "test045_lake2tr"
sim = MFSimulation.load(sim_ws=mpath)
@@ -50,9 +46,7 @@ def test_cross_section_bc_lake2tr(example_data_path):
@pytest.mark.mf6
-@pytest.mark.xfail(
- reason="sometimes get LineCollections instead of PatchCollections"
-)
+@pytest.mark.xfail(reason="sometimes get LineCollections instead of PatchCollections")
def test_cross_section_bc_2models_mvr(example_data_path):
mpath = example_data_path / "mf6" / "test006_2models_mvr"
sim = MFSimulation.load(sim_ws=mpath)
@@ -70,9 +64,7 @@ def test_cross_section_bc_2models_mvr(example_data_path):
@pytest.mark.mf6
-@pytest.mark.xfail(
- reason="sometimes get LineCollections instead of PatchCollections"
-)
+@pytest.mark.xfail(reason="sometimes get LineCollections instead of PatchCollections")
def test_cross_section_bc_UZF_3lay(example_data_path):
mpath = example_data_path / "mf6" / "test001e_UZF_3lay"
sim = MFSimulation.load(sim_ws=mpath)
@@ -92,7 +84,8 @@ def test_cross_section_bc_UZF_3lay(example_data_path):
def structured_square_grid(side: int = 10, thick: int = 10):
"""
- Creates a basic 1-layer structured grid with the given thickness and number of cells per side
+ Creates a basic 1-layer structured grid with the given thickness and number of
+ cells per side
Parameters
----------
side : The number of cells per side
@@ -157,9 +150,7 @@ def test_cross_section_valid_line_representations(line):
# make sure parsed points are identical for all line representations
assert np.allclose(lxc.pts, fxc.pts) and np.allclose(lxc.pts, sxc.pts)
- assert (
- set(lxc.xypts.keys()) == set(fxc.xypts.keys()) == set(sxc.xypts.keys())
- )
+ assert set(lxc.xypts.keys()) == set(fxc.xypts.keys()) == set(sxc.xypts.keys())
for k in lxc.xypts.keys():
assert np.allclose(lxc.xypts[k], fxc.xypts[k]) and np.allclose(
lxc.xypts[k], sxc.xypts[k]
@@ -206,9 +197,7 @@ def test_plot_limits():
user_extent = 0, 500, 0, 25
ax.axis(user_extent)
- pxc = flopy.plot.PlotCrossSection(
- modelgrid=grid, ax=ax, line={"column": 4}
- )
+ pxc = flopy.plot.PlotCrossSection(modelgrid=grid, ax=ax, line={"column": 4})
pxc.plot_grid()
lims = ax.axes.viewLim
@@ -218,9 +207,7 @@ def test_plot_limits():
plt.close(fig)
fig, ax = plt.subplots(figsize=(8, 8))
- pxc = flopy.plot.PlotCrossSection(
- modelgrid=grid, ax=ax, line={"column": 4}
- )
+ pxc = flopy.plot.PlotCrossSection(modelgrid=grid, ax=ax, line={"column": 4})
pxc.plot_grid()
lims = ax.axes.viewLim
@@ -228,3 +215,46 @@ def test_plot_limits():
raise AssertionError("PlotMapView auto extent setting not working")
plt.close(fig)
+
+
+def test_plot_centers():
+ from matplotlib.collections import PathCollection
+
+ nlay = 1
+ nrow = 10
+ ncol = 10
+
+ delc = np.ones((nrow,))
+ delr = np.ones((ncol,))
+ top = np.ones((nrow, ncol))
+ botm = np.zeros((nlay, nrow, ncol))
+ idomain = np.ones(botm.shape, dtype=int)
+
+ idomain[0, :, 0:3] = 0
+
+ grid = flopy.discretization.StructuredGrid(
+ delc=delc, delr=delr, top=top, botm=botm, idomain=idomain
+ )
+
+ line = {"line": [(0, 0), (10, 10)]}
+ active_xc_cells = 7
+
+ pxc = flopy.plot.PlotCrossSection(modelgrid=grid, line=line)
+ pc = pxc.plot_centers()
+
+ if not isinstance(pc, PathCollection):
+ raise AssertionError("plot_centers() not returning PathCollection object")
+
+ verts = pc._offsets
+ if not verts.shape[0] == active_xc_cells:
+ raise AssertionError("plot_centers() not properly masking inactive cells")
+
+ center_dict = pxc.projctr
+ edge_dict = pxc.projpts
+
+ for node, center in center_dict.items():
+ verts = np.array(edge_dict[node]).T
+ xmin = np.min(verts[0])
+ xmax = np.max(verts[0])
+ if xmax < center < xmin:
+ raise AssertionError("Cell center not properly drawn on cross-section")
diff --git a/autotest/test_plot_map_view.py b/autotest/test_plot_map_view.py
index e6fc424b67..386242d1d9 100644
--- a/autotest/test_plot_map_view.py
+++ b/autotest/test_plot_map_view.py
@@ -215,9 +215,7 @@ def test_map_view_contour_array_structured(function_tmpdir, ndim, rng):
elif ndim == 2:
# 1 layer as 2D
pmv = PlotMapView(modelgrid=grid, layer=l)
- contours = pmv.contour_array(
- a=arr.reshape(nlay, nrow, ncol)[l, :, :]
- )
+ contours = pmv.contour_array(a=arr.reshape(nlay, nrow, ncol)[l, :, :])
plt.savefig(function_tmpdir / f"map_view_contour_{ndim}d_l{l}.png")
plt.clf()
elif ndim == 3:
@@ -276,3 +274,40 @@ def test_plot_limits():
raise AssertionError("PlotMapView auto extent setting not working")
plt.close(fig)
+
+
+def test_plot_centers():
+ nlay = 1
+ nrow = 10
+ ncol = 10
+
+ delc = np.ones((nrow,))
+ delr = np.ones((ncol,))
+ top = np.ones((nrow, ncol))
+ botm = np.zeros((nlay, nrow, ncol))
+ idomain = np.ones(botm.shape, dtype=int)
+
+ idomain[0, :, 0:3] = 0
+ active_cells = np.count_nonzero(idomain)
+
+ grid = flopy.discretization.StructuredGrid(
+ delc=delc, delr=delr, top=top, botm=botm, idomain=idomain
+ )
+
+ xcenters = grid.xcellcenters.ravel()
+ ycenters = grid.ycellcenters.ravel()
+ xycenters = list(zip(xcenters, ycenters))
+
+ pmv = flopy.plot.PlotMapView(modelgrid=grid)
+ pc = pmv.plot_centers()
+ if not isinstance(pc, PathCollection):
+ raise AssertionError("plot_centers() not returning PathCollection object")
+
+ verts = pc._offsets
+ if not verts.shape[0] == active_cells:
+ raise AssertionError("plot_centers() not properly masking inactive cells")
+
+ for vert in verts:
+ vert = tuple(vert)
+ if vert not in xycenters:
+ raise AssertionError("center location not properly plotted")
diff --git a/autotest/test_plot_particle_tracks.py b/autotest/test_plot_particle_tracks.py
index 8ba4342bf2..57cbf618d8 100644
--- a/autotest/test_plot_particle_tracks.py
+++ b/autotest/test_plot_particle_tracks.py
@@ -6,6 +6,7 @@
from matplotlib.collections import LineCollection, PathCollection
from modflow_devtools.markers import requires_exe
+import flopy
from flopy.modflow import Modflow
from flopy.modpath import Modpath6, Modpath6Bas
from flopy.plot import PlotCrossSection, PlotMapView
@@ -29,9 +30,7 @@ def modpath_model(function_tmpdir, example_data_path):
model_ws=function_tmpdir,
)
- mpb = Modpath6Bas(
- mp, hdry=ml.lpf.hdry, laytyp=ml.lpf.laytyp, ibound=1, prsity=0.1
- )
+ mpb = Modpath6Bas(mp, hdry=ml.lpf.hdry, laytyp=ml.lpf.laytyp, ibound=1, prsity=0.1)
sim = mp.create_mpsim(
trackdir="forward",
@@ -49,9 +48,7 @@ def test_plot_map_view_mp6_plot_pathline(modpath_model):
mp.run_model(silent=False)
pthobj = PathlineFile(join(mp.model_ws, "ex6.mppth"))
- well_pathlines = pthobj.get_destination_pathline_data(
- dest_cells=[(4, 12, 12)]
- )
+ well_pathlines = pthobj.get_destination_pathline_data(dest_cells=[(4, 12, 12)])
def test_plot(pl):
mx = PlotMapView(model=ml)
@@ -82,9 +79,7 @@ def test_plot_cross_section_mp6_plot_pathline(modpath_model):
mp.run_model(silent=False)
pthobj = PathlineFile(join(mp.model_ws, "ex6.mppth"))
- well_pathlines = pthobj.get_destination_pathline_data(
- dest_cells=[(4, 12, 12)]
- )
+ well_pathlines = pthobj.get_destination_pathline_data(dest_cells=[(4, 12, 12)])
def test_plot(pl):
mx = PlotCrossSection(model=ml, line={"row": 4})
@@ -153,9 +148,7 @@ def test_plot_map_view_mp6_endpoint(modpath_model):
# colorbar: color by time to termination
mv = PlotMapView(model=ml)
mv.plot_bc("WEL", kper=2, color="blue")
- ep = mv.plot_endpoint(
- endpts, direction="ending", shrink=0.5, colorbar=True
- )
+ ep = mv.plot_endpoint(endpts, direction="ending", shrink=0.5, colorbar=True)
# plt.show()
assert isinstance(ep, PathCollection)
diff --git a/autotest/test_plot_quasi3d.py b/autotest/test_plot_quasi3d.py
index f26eb03dac..3ffdda12c3 100644
--- a/autotest/test_plot_quasi3d.py
+++ b/autotest/test_plot_quasi3d.py
@@ -101,9 +101,7 @@ def quasi3d_model(function_tmpdir):
@requires_exe("mf2005")
def test_map_plot_with_quasi3d_layers(quasi3d_model):
# read output
- hf = HeadFile(
- os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.hds")
- )
+ hf = HeadFile(os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.hds"))
head = hf.get_data(totim=1.0)
cbb = CellBudgetFile(
os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.cbc")
@@ -127,9 +125,7 @@ def test_map_plot_with_quasi3d_layers(quasi3d_model):
@requires_exe("mf2005")
def test_cross_section_with_quasi3d_layers(quasi3d_model):
# read output
- hf = HeadFile(
- os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.hds")
- )
+ hf = HeadFile(os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.hds"))
head = hf.get_data(totim=1.0)
cbb = CellBudgetFile(
os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.cbc")
diff --git a/autotest/test_plotutil.py b/autotest/test_plotutil.py
index 8aa228f8aa..ae88d1133d 100644
--- a/autotest/test_plotutil.py
+++ b/autotest/test_plotutil.py
@@ -329,15 +329,11 @@
@pytest.mark.parametrize("dataframe", [True, False])
def test_to_mp7_pathlines(dataframe):
prt_pls = (
- PRT_TEST_PATHLINES
- if dataframe
- else PRT_TEST_PATHLINES.to_records(index=False)
+ PRT_TEST_PATHLINES if dataframe else PRT_TEST_PATHLINES.to_records(index=False)
)
mp7_pls = to_mp7_pathlines(prt_pls)
assert (
- type(prt_pls)
- == type(mp7_pls)
- == (pd.DataFrame if dataframe else np.recarray)
+ type(prt_pls) == type(mp7_pls) == (pd.DataFrame if dataframe else np.recarray)
)
assert len(mp7_pls) == 10
assert set(
@@ -361,15 +357,11 @@ def test_to_mp7_pathlines_empty(dataframe):
@pytest.mark.parametrize("dataframe", [True, False])
def test_to_mp7_pathlines_noop(dataframe):
prt_pls = (
- MP7_TEST_PATHLINES
- if dataframe
- else MP7_TEST_PATHLINES.to_records(index=False)
+ MP7_TEST_PATHLINES if dataframe else MP7_TEST_PATHLINES.to_records(index=False)
)
mp7_pls = to_mp7_pathlines(prt_pls)
assert (
- type(prt_pls)
- == type(mp7_pls)
- == (pd.DataFrame if dataframe else np.recarray)
+ type(prt_pls) == type(mp7_pls) == (pd.DataFrame if dataframe else np.recarray)
)
assert len(mp7_pls) == 2
assert set(
@@ -383,9 +375,7 @@ def test_to_mp7_pathlines_noop(dataframe):
@pytest.mark.parametrize("dataframe", [True, False])
def test_to_mp7_endpoints(dataframe):
mp7_eps = to_mp7_endpoints(
- PRT_TEST_PATHLINES
- if dataframe
- else PRT_TEST_PATHLINES.to_records(index=False)
+ PRT_TEST_PATHLINES if dataframe else PRT_TEST_PATHLINES.to_records(index=False)
)
assert len(mp7_eps) == 1
assert np.isclose(mp7_eps.time[0], PRT_TEST_PATHLINES.t.max())
@@ -411,9 +401,7 @@ def test_to_mp7_endpoints_empty(dataframe):
def test_to_mp7_endpoints_noop(dataframe):
"""Test a recarray or dataframe which already contains MP7 endpoint data"""
mp7_eps = to_mp7_endpoints(
- MP7_TEST_ENDPOINTS
- if dataframe
- else MP7_TEST_ENDPOINTS.to_records(index=False)
+ MP7_TEST_ENDPOINTS if dataframe else MP7_TEST_ENDPOINTS.to_records(index=False)
)
assert np.array_equal(
mp7_eps if dataframe else pd.DataFrame(mp7_eps), MP7_TEST_ENDPOINTS
@@ -423,9 +411,7 @@ def test_to_mp7_endpoints_noop(dataframe):
@pytest.mark.parametrize("dataframe", [True, False])
def test_to_prt_pathlines_roundtrip(dataframe):
mp7_pls = to_mp7_pathlines(
- PRT_TEST_PATHLINES
- if dataframe
- else PRT_TEST_PATHLINES.to_records(index=False)
+ PRT_TEST_PATHLINES if dataframe else PRT_TEST_PATHLINES.to_records(index=False)
)
prt_pls = to_prt_pathlines(mp7_pls)
if not dataframe:
diff --git a/autotest/test_postprocessing.py b/autotest/test_postprocessing.py
index d45ab84f3c..fc9bef3596 100644
--- a/autotest/test_postprocessing.py
+++ b/autotest/test_postprocessing.py
@@ -101,9 +101,7 @@ def test_get_structured_faceflows(function_tmpdir, nlay, nrow, ncol):
iface = 6 # top
for i in range(0, max_dim):
# ((layer,row,col),head,iface)
- cell_id = (
- (0, 0, i) if ncol > 1 else (0, i, 0) if nrow > 1 else (i, 0, 0)
- )
+ cell_id = (0, 0, i) if ncol > 1 else (0, i, 0) if nrow > 1 else (i, 0, 0)
chd_rec.append((cell_id, h[i], iface))
chd = flopy.mf6.ModflowGwfchd(
gwf,
@@ -234,9 +232,7 @@ def test_get_structured_faceflows_freyberg(
@pytest.mark.mf6
@requires_exe("mf6")
-def test_get_structured_faceflows_idomain(
- function_tmpdir,
-):
+def test_get_structured_faceflows_idomain(function_tmpdir):
name = "gsffi"
Lx = 1000
@@ -362,12 +358,8 @@ def test_get_structured_faceflows_idomain(
Qv_aqui = cbf0[~idx].sum() # Flow across aquitard
print(f"Total flow across bottom of upper aquifer {Qv_sum:0.2f} m^3/d")
- print(
- f"Flow across bottom of upper aquifer to aquitard {Qv_aqui:0.2f} m^3/d"
- )
- print(
- f"Flow across bottom of upper aquifer to lower aquifer {Qv_wind:0.2f} m^3/d"
- )
+ print(f"Flow across bottom of upper aquifer to aquitard {Qv_aqui:0.2f} m^3/d")
+ print(f"Flow across bottom of upper aquifer to lower aquifer {Qv_wind:0.2f} m^3/d")
print(np.isclose(-Qwell, Qv_sum, atol=1e-3))
assert np.isclose(-Qwell, Qv_sum, atol=1e-3)
@@ -430,14 +422,10 @@ def test_structured_faceflows_3d_shape(function_tmpdir):
tdis = ModflowTdis(sim)
ims = ModflowIms(sim)
gwf = ModflowGwf(sim, modelname=name, save_flows=True)
- dis = ModflowGwfdis(
- gwf, nlay=3, nrow=10, ncol=10, top=0, botm=[-1, -2, -3]
- )
+ dis = ModflowGwfdis(gwf, nlay=3, nrow=10, ncol=10, top=0, botm=[-1, -2, -3])
ic = ModflowGwfic(gwf)
npf = ModflowGwfnpf(gwf, save_specific_discharge=True)
- chd = ModflowGwfchd(
- gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 9, 9), 0.0]]
- )
+ chd = ModflowGwfchd(gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 9, 9), 0.0]])
budget_file = name + ".bud"
head_file = name + ".hds"
oc = ModflowGwfoc(
@@ -456,15 +444,9 @@ def test_structured_faceflows_3d_shape(function_tmpdir):
flowja,
grb_file=function_tmpdir / "mymodel.dis.grb",
)
- assert (
- frf.shape == head.shape
- ), f"frf.shape {frf.shape} != head.shape {head.shape}"
- assert (
- fff.shape == head.shape
- ), f"frf.shape {frf.shape} != head.shape {head.shape}"
- assert (
- flf.shape == head.shape
- ), f"frf.shape {frf.shape} != head.shape {head.shape}"
+ assert frf.shape == head.shape, f"frf.shape {frf.shape} != head.shape {head.shape}"
+ assert fff.shape == head.shape, f"frf.shape {frf.shape} != head.shape {head.shape}"
+ assert flf.shape == head.shape, f"frf.shape {frf.shape} != head.shape {head.shape}"
def test_get_transmissivities(function_tmpdir):
diff --git a/autotest/test_rasters.py b/autotest/test_rasters.py
new file mode 100644
index 0000000000..3a9e5a87b5
--- /dev/null
+++ b/autotest/test_rasters.py
@@ -0,0 +1,216 @@
+import os
+
+import numpy as np
+import pytest
+from modflow_devtools.markers import requires_pkg
+
+import flopy
+from flopy.modflow import Modflow
+from flopy.utils import Raster
+
+# %% test rasters
+
+
+@requires_pkg("rasterstats", "scipy", "shapely")
+def test_rasters(example_data_path):
+ ws = example_data_path / "options"
+ raster_name = "dem.img"
+
+ rio = Raster.load(ws / "dem" / raster_name)
+
+ ml = Modflow.load(
+ "sagehen.nam", version="mfnwt", model_ws=os.path.join(ws, "sagehen")
+ )
+ xoff = 214110
+ yoff = 4366620
+ ml.modelgrid.set_coord_info(xoff, yoff)
+
+ # test sampling points and polygons
+ val = rio.sample_point(xoff + 2000, yoff + 2000, band=1)
+ print(val - 2336.3965)
+ if abs(val - 2336.3965) > 1e-4:
+ raise AssertionError
+
+ x0, x1, y0, y1 = rio.bounds
+
+ x0 += 1000
+ y0 += 1000
+ x1 -= 1000
+ y1 -= 1000
+ shape = np.array([(x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)])
+
+ data = rio.sample_polygon(shape, band=rio.bands[0])
+ if data.size != 267050:
+ raise AssertionError
+ if abs(np.min(data) - 1942.1735) > 1e-4:
+ raise AssertionError
+ if (np.max(data) - 2608.557) > 1e-4:
+ raise AssertionError
+
+ rio.crop(shape)
+ data = rio.get_array(band=rio.bands[0], masked=True)
+ if data.size != 267050:
+ raise AssertionError
+ if abs(np.min(data) - 1942.1735) > 1e-4:
+ raise AssertionError
+ if (np.max(data) - 2608.557) > 1e-4:
+ raise AssertionError
+
+ data = rio.resample_to_grid(ml.modelgrid, band=rio.bands[0], method="nearest")
+ if data.size != 5913:
+ raise AssertionError
+ if abs(np.min(data) - 1942.1735) > 1e-4:
+ raise AssertionError
+ if abs(np.max(data) - 2605.6204) > 1e-4:
+ raise AssertionError
+
+ del rio
+
+
+# %% test raster sampling methods
+
+
+@pytest.mark.slow
+@requires_pkg("rasterstats")
+def test_raster_sampling_methods(example_data_path):
+ ws = example_data_path / "options"
+ raster_name = "dem.img"
+
+ rio = Raster.load(ws / "dem" / raster_name)
+
+ ml = Modflow.load("sagehen.nam", version="mfnwt", model_ws=ws / "sagehen")
+ xoff = 214110
+ yoff = 4366620
+ ml.modelgrid.set_coord_info(xoff, yoff)
+
+ x0, x1, y0, y1 = rio.bounds
+
+ x0 += 3000
+ y0 += 3000
+ x1 -= 3000
+ y1 -= 3000
+ shape = np.array([(x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)])
+
+ rio.crop(shape)
+
+ methods = {
+ "min": 2088.52343,
+ "max": 2103.54882,
+ "mean": 2097.05054,
+ "median": 2097.36254,
+ "mode": 2088.52343,
+ "nearest": 2097.81079,
+ "linear": 2097.81079,
+ "cubic": 2097.81079,
+ }
+
+ for method, value in methods.items():
+ data = rio.resample_to_grid(ml.modelgrid, band=rio.bands[0], method=method)
+
+ print(data[30, 37])
+ if np.abs(data[30, 37] - value) > 1e-05:
+ raise AssertionError(f"{method} resampling returning incorrect values")
+
+
+@requires_pkg("rasterio")
+def test_raster_reprojection(example_data_path):
+ ws = example_data_path / "options" / "dem"
+ raster_name = "dem.img"
+
+ wgs_epsg = 4326
+ wgs_xmin = -120.32116799649168
+ wgs_ymax = 39.46620605907534
+
+ raster = Raster.load(ws / raster_name)
+
+ print(raster.crs.to_epsg())
+ wgs_raster = raster.to_crs(crs=f"EPSG:{wgs_epsg}")
+
+ if not wgs_raster.crs.to_epsg() == wgs_epsg:
+ raise AssertionError(f"Raster not converted to EPSG {wgs_epsg}")
+
+ transform = wgs_raster._meta["transform"]
+ if not np.isclose(transform.c, wgs_xmin) and not np.isclose(transform.f, wgs_ymax):
+ raise AssertionError(f"Raster not reprojected to EPSG {wgs_epsg}")
+
+ raster.to_crs(epsg=wgs_epsg, inplace=True)
+ transform2 = raster._meta["transform"]
+ for ix, val in enumerate(transform):
+ if not np.isclose(val, transform2[ix]):
+ raise AssertionError("In place reprojection not working")
+
+
+@requires_pkg("rasterio")
+def test_create_raster_from_array_modelgrid(example_data_path):
+ ws = example_data_path / "options" / "dem"
+ raster_name = "dem.img"
+
+ raster = Raster.load(ws / raster_name)
+
+ xsize = 200
+ ysize = 100
+ xmin, xmax, ymin, ymax = raster.bounds
+
+ nbands = 5
+ nlay = 1
+ nrow = int(np.floor((ymax - ymin) / ysize))
+ ncol = int(np.floor((xmax - xmin) / xsize))
+
+ delc = np.full((nrow,), ysize)
+ delr = np.full((ncol,), xsize)
+
+ grid = flopy.discretization.StructuredGrid(
+ delc=delc,
+ delr=delr,
+ top=np.ones((nrow, ncol)),
+ botm=np.zeros((nlay, nrow, ncol)),
+ idomain=np.ones((nlay, nrow, ncol), dtype=int),
+ xoff=xmin,
+ yoff=ymin,
+ crs=raster.crs,
+ )
+
+ array = np.random.random((grid.ncpl * nbands,)) * 100
+ robj = Raster.raster_from_array(array, grid)
+
+ if nbands != len(robj.bands):
+ raise AssertionError("Number of raster bands is incorrect")
+
+ array = array.reshape((nbands, nrow, ncol))
+ for band in robj.bands:
+ ra = robj.get_array(band)
+ np.testing.assert_allclose(
+ array[band - 1],
+ ra,
+ err_msg="Array not properly reshaped or converted to raster",
+ )
+
+
+@requires_pkg("rasterio", "affine")
+def test_create_raster_from_array_transform(example_data_path):
+ import affine
+
+ ws = example_data_path / "options" / "dem"
+ raster_name = "dem.img"
+
+ raster = Raster.load(ws / raster_name)
+
+ transform = raster._meta["transform"]
+ array = raster.get_array(band=raster.bands[0])
+
+ array = np.expand_dims(array, axis=0)
+ # same location but shrink raster by factor 2
+ new_transform = affine.Affine(
+ transform.a / 2, 0, transform.c, 0, transform.e / 2, transform.f
+ )
+
+ robj = Raster.raster_from_array(array, crs=raster.crs, transform=new_transform)
+
+ rxmin, rxmax, rymin, rymax = robj.bounds
+ xmin, xmax, ymin, ymax = raster.bounds
+
+ if (
+ not ((xmax - xmin) / (rxmax - rxmin)) == 2
+ or not ((ymax - ymin) / (rymax - rymin)) == 2
+ ):
+ raise AssertionError("Transform based raster not working properly")
diff --git a/autotest/test_seawat.py b/autotest/test_seawat.py
index aba1c125c6..fa64d873e9 100644
--- a/autotest/test_seawat.py
+++ b/autotest/test_seawat.py
@@ -195,9 +195,7 @@ def test_seawat2_henry(function_tmpdir):
def swt4_namfiles():
- return [
- str(p) for p in (get_example_data_path() / "swtv4_test").rglob("*.nam")
- ]
+ return [str(p) for p in (get_example_data_path() / "swtv4_test").rglob("*.nam")]
@requires_exe("swtv4")
@@ -245,7 +243,7 @@ def test_seawat_load_only(function_tmpdir):
m = Seawat.load(
model_name, model_ws=function_tmpdir, load_only=load_only, verbose=True
)
- assert set([pkg.upper() for pkg in load_only]) == set(m.get_package_list())
+ assert {pkg.upper() for pkg in load_only} == set(m.get_package_list())
def test_vdf_vsc(function_tmpdir):
diff --git a/autotest/test_sfr.py b/autotest/test_sfr.py
index c50dcffe50..9fb2ece8be 100644
--- a/autotest/test_sfr.py
+++ b/autotest/test_sfr.py
@@ -116,9 +116,7 @@ def sfr_data():
r["iseg"] = sorted(list(range(1, 10)) * 3)
r["ireach"] = [1, 2, 3] * 9
- d = create_empty_recarray(
- 9, dtype=np.dtype([("nseg", int), ("outseg", int)])
- )
+ d = create_empty_recarray(9, dtype=np.dtype([("nseg", int), ("outseg", int)]))
d["nseg"] = range(1, 10)
d["outseg"] = [4, 0, 6, 8, 3, 8, 1, 2, 8]
return r, d
@@ -193,9 +191,7 @@ def sfr_process(mfnam, sfrfile, model_ws, outfolder):
"UZFtest2.nam", "UZFtest2.sfr", mf2005_model_path, function_tmpdir
)
- assert isinstance(
- sfr.plot()[0], matplotlib.axes.Axes
- ) # test the plot() method
+ assert isinstance(sfr.plot()[0], matplotlib.axes.Axes) # test the plot() method
matplotlib.pyplot.close()
def interpolate_to_reaches(sfr):
@@ -216,12 +212,7 @@ def interpolate_to_reaches(sfr):
]
xp = [dist[0], dist[-1]]
assert (
- np.sum(
- np.abs(
- reaches[reachvar]
- - np.interp(dist, xp, fp).tolist()
- )
- )
+ np.sum(np.abs(reaches[reachvar] - np.interp(dist, xp, fp).tolist()))
< 0.01
)
return reach_data
@@ -239,15 +230,11 @@ def interpolate_to_reaches(sfr):
out_inds = np.asarray(sfr.reach_data.reachID == outreach).nonzero()
assert (
sfr.reach_data.slope[reach_inds]
- == (
- sfr.reach_data.strtop[reach_inds] - sfr.reach_data.strtop[out_inds]
- )
+ == (sfr.reach_data.strtop[reach_inds] - sfr.reach_data.strtop[out_inds])
/ sfr.reach_data.rchlen[reach_inds]
)
chk = sfr.check()
- assert (
- sfr.reach_data.slope.min() < 0.0001 and "minimum slope" in chk.warnings
- )
+ assert sfr.reach_data.slope.min() < 0.0001 and "minimum slope" in chk.warnings
# negative segments for lakes shouldn't be included in segment numbering order check
assert "segment numbering order" not in chk.warnings
sfr.reach_data.slope[0] = 1.1
@@ -456,9 +443,7 @@ def test_example(mf2005_model_path):
nparseg = 0
const = 1.486 # constant for manning's equation, units of cfs
dleak = 0.0001 # closure tolerance for stream stage computation
- ipakcb = (
- 53 # flag for writing SFR output to cell-by-cell budget (on unit 53)
- )
+ ipakcb = 53 # flag for writing SFR output to cell-by-cell budget (on unit 53)
istcb2 = 81 # flag for writing SFR output to text file
dataset_5 = {0: [nss, 0, 0]} # dataset 5 (see online guide)
@@ -682,11 +667,7 @@ def test_SfrFile(function_tmpdir, sfr_examples_path, mf2005_model_path):
]
sfrout = SfrFile(sfr_examples_path / "sfroutput2.txt")
assert sfrout.ncol == 18, sfrout.ncol
- assert sfrout.names == common_names + [
- "Qwt",
- "delUzstor",
- "gw_head",
- ], sfrout.names
+ assert sfrout.names == common_names + ["Qwt", "delUzstor", "gw_head"], sfrout.names
assert sfrout.times == [(0, 0), (49, 1)], sfrout.times
df = sfrout.get_dataframe()
@@ -735,9 +716,7 @@ def test_SfrFile(function_tmpdir, sfr_examples_path, mf2005_model_path):
assert df.gradient.values[-1] == 5.502e-02
assert df.shape == (1080, 20)
- ml = Modflow.load(
- "test1tr.nam", model_ws=mf2005_model_path, exe_name="mf2005"
- )
+ ml = Modflow.load("test1tr.nam", model_ws=mf2005_model_path, exe_name="mf2005")
ml.change_model_ws(function_tmpdir)
ml.write_input()
ml.run_model()
@@ -815,9 +794,8 @@ def test_sfrcheck(function_tmpdir, mf2005_model_path):
chk.routing()
assert "circular routing" in chk.passed
chk.overlapping_conductance()
- assert (
- "overlapping conductance" in chk.warnings
- ) # this example model has overlapping conductance
+ # this example model has overlapping conductance
+ assert "overlapping conductance" in chk.warnings
chk.elevations()
for test in [
"segment elevations",
@@ -848,9 +826,7 @@ def test_sfrcheck(function_tmpdir, mf2005_model_path):
# throw warning if isfropt=1 and strtop at default
assert "maximum streambed top" in chk.warnings
assert "minimum streambed top" in chk.warnings
- m.sfr.reach_data["strtop"] = m.sfr._interpolate_to_reaches(
- "elevup", "elevdn"
- )
+ m.sfr.reach_data["strtop"] = m.sfr._interpolate_to_reaches("elevup", "elevdn")
m.sfr.get_slopes()
m.sfr.reach_data["strhc1"] = 1.0
m.sfr.reach_data["strthick"] = 1.0
@@ -899,8 +875,7 @@ def test_isfropt_icalc(function_tmpdir, example_data_path, isfropt, icalc):
success = ml.run_model()[0]
if not success:
raise AssertionError(
- f"sfrtest{isfropt}{icalc}.nam "
- "is broken, please fix SFR 6a, 6bc logic!"
+ f"sfrtest{isfropt}{icalc}.nam is broken, please fix SFR 6a, 6bc logic!"
)
@@ -958,8 +933,6 @@ def test_mf2005(function_tmpdir, namfile):
)
for name in str2.dtype2.names:
assert (
- np.array_equal(
- str2.segment_data[0][name], m.str.segment_data[0][name]
- )
+ np.array_equal(str2.segment_data[0][name], m.str.segment_data[0][name])
is True
)
diff --git a/autotest/test_shapefile_utils.py b/autotest/test_shapefile_utils.py
index 5d1292c7df..a81eec6f2e 100644
--- a/autotest/test_shapefile_utils.py
+++ b/autotest/test_shapefile_utils.py
@@ -7,10 +7,7 @@
import flopy
from flopy.discretization import StructuredGrid, UnstructuredGrid, VertexGrid
-from flopy.export.shapefile_utils import (
- model_attributes_to_shapefile,
- shp2recarray,
-)
+from flopy.export.shapefile_utils import model_attributes_to_shapefile, shp2recarray
from flopy.utils.crs import get_shapefile_crs
from .test_export import disu_sim
@@ -23,9 +20,7 @@ def test_model_attributes_to_shapefile(example_data_path, function_tmpdir):
name = "freyberg"
namfile = f"{name}.nam"
ws = example_data_path / name
- m = flopy.modflow.Modflow.load(
- namfile, model_ws=ws, check=False, verbose=False
- )
+ m = flopy.modflow.Modflow.load(namfile, model_ws=ws, check=False, verbose=False)
shpfile_path = function_tmpdir / f"{name}.shp"
pakg_names = ["DIS", "BAS6", "LPF", "WEL", "RIV", "RCH", "OC", "PCG"]
model_attributes_to_shapefile(shpfile_path, m, pakg_names)
@@ -33,9 +28,7 @@ def test_model_attributes_to_shapefile(example_data_path, function_tmpdir):
# freyberg mf6 model
name = "mf6-freyberg"
- sim = flopy.mf6.MFSimulation.load(
- sim_name=name, sim_ws=example_data_path / name
- )
+ sim = flopy.mf6.MFSimulation.load(sim_name=name, sim_ws=example_data_path / name)
m = sim.get_model()
shpfile_path = function_tmpdir / f"{name}.shp"
pakg_names = ["dis", "bas6", "npf", "wel", "riv", "rch", "oc", "pcg"]
diff --git a/autotest/test_specific_discharge.py b/autotest/test_specific_discharge.py
index cd218ab301..0b7045680d 100644
--- a/autotest/test_specific_discharge.py
+++ b/autotest/test_specific_discharge.py
@@ -47,10 +47,7 @@
ModflowWel,
)
from flopy.plot import PlotCrossSection, PlotMapView
-from flopy.utils.postprocessing import (
- get_extended_budget,
- get_specific_discharge,
-)
+from flopy.utils.postprocessing import get_extended_budget, get_specific_discharge
# model domain, grid definition and properties
Lx = 100.0
@@ -197,9 +194,7 @@ def mf6_model(function_tmpdir):
# create tdis package
tdis_rc = [(1.0, 1, 1.0)]
- tdis = ModflowTdis(
- sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc
- )
+ tdis = ModflowTdis(sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc)
# create gwf model
gwf = ModflowGwf(
@@ -267,8 +262,7 @@ def mf6_model(function_tmpdir):
# create ghb package
ghbspd = [
- [(ghb_i[0], ghb_i[1], ghb_i[2]), ghb_i[3], ghb_i[4]]
- for ghb_i in ghb_list
+ [(ghb_i[0], ghb_i[1], ghb_i[2]), ghb_i[3], ghb_i[4]] for ghb_i in ghb_list
]
ghb = ModflowGwfghb(gwf, print_input=True, stress_period_data=ghbspd)
@@ -281,8 +275,7 @@ def mf6_model(function_tmpdir):
# create drn package
drnspd = [
- [(drn_i[0], drn_i[1], drn_i[2]), drn_i[3], drn_i[4]]
- for drn_i in drn_list
+ [(drn_i[0], drn_i[1], drn_i[2]), drn_i[3], drn_i[4]] for drn_i in drn_list
]
drn = ModflowGwfdrn(gwf, print_input=True, stress_period_data=drnspd)
@@ -359,9 +352,7 @@ def test_extended_budget_default(mf2005_model):
mf.run_model()
# load and postprocess
- Qx_ext, Qy_ext, Qz_ext = get_extended_budget(
- function_tmpdir / "mf2005.cbc"
- )
+ Qx_ext, Qy_ext, Qz_ext = get_extended_budget(function_tmpdir / "mf2005.cbc")
# basic check
basic_check(Qx_ext, Qy_ext, Qz_ext)
@@ -389,9 +380,7 @@ def extended_budget_comprehensive(function_tmpdir):
basic_check(Qx_ext, Qy_ext, Qz_ext)
# local balance check
- local_balance_check(
- Qx_ext, Qy_ext, Qz_ext, function_tmpdir / "mf2005.hds", mf
- )
+ local_balance_check(Qx_ext, Qy_ext, Qz_ext, function_tmpdir / "mf2005.hds", mf)
# overall check
overall = np.sum(Qx_ext) + np.sum(Qy_ext) + np.sum(Qz_ext)
@@ -493,7 +482,7 @@ def specific_discharge_comprehensive(function_tmpdir):
@pytest.mark.mf6
@pytest.mark.xfail(
- reason="occasional Unexpected collection type: "
+ reason="occasional Unexpected collection type: " # noqa
)
def test_specific_discharge_mf6(mf6_model):
# build and run MODFLOW 6 model
@@ -501,9 +490,7 @@ def test_specific_discharge_mf6(mf6_model):
sim.run_simulation()
# load and postprocess
- sim = MFSimulation.load(
- sim_name="mf6", sim_ws=function_tmpdir, verbosity_level=0
- )
+ sim = MFSimulation.load(sim_name="mf6", sim_ws=function_tmpdir, verbosity_level=0)
gwf = sim.get_model("mf6")
hds = bf.HeadFile(function_tmpdir / "mf6.hds")
head = hds.get_data()
@@ -528,9 +515,7 @@ def test_specific_discharge_mf6(mf6_model):
ax = modelmap.ax
assert len(ax.collections) != 0, "Discharge vector was not drawn"
for col in ax.collections:
- assert isinstance(
- col, Quiver
- ), f"Unexpected collection type: {type(col)}"
+ assert isinstance(col, Quiver), f"Unexpected collection type: {type(col)}"
assert np.sum(quiver.Umask) == 1
pos = np.sum(quiver.X) + np.sum(quiver.Y)
assert np.allclose(pos, 1600.0)
diff --git a/autotest/test_subwt.py b/autotest/test_subwt.py
index 484a0f3bd1..45c0e1f5aa 100644
--- a/autotest/test_subwt.py
+++ b/autotest/test_subwt.py
@@ -68,9 +68,7 @@ def test_subwt(function_tmpdir, ibound_path):
sp2_wells.append([1, 8, 9, -72000.0])
sp2_wells.append([3, 11, 6, -72000.0])
- ModflowWel(
- ml, stress_period_data={0: sp1_wells, 1: sp2_wells, 2: sp1_wells}
- )
+ ModflowWel(ml, stress_period_data={0: sp1_wells, 1: sp2_wells, 2: sp1_wells})
ModflowSwt(
ml,
diff --git a/autotest/test_swr_binaryread.py b/autotest/test_swr_binaryread.py
index 02bf20a96b..2022193248 100644
--- a/autotest/test_swr_binaryread.py
+++ b/autotest/test_swr_binaryread.py
@@ -42,12 +42,8 @@ def test_swr_binary_stage(swr_test_path, ipos):
for idx in range(ntimes):
r = sobj.get_data(idx=idx)
- assert (
- r is not None
- ), "SwrStage could not read data with get_data(idx=)"
- assert r.shape == (
- 18,
- ), "SwrStage stage data shape does not equal (18,)"
+ assert r is not None, "SwrStage could not read data with get_data(idx=)"
+ assert r.shape == (18,), "SwrStage stage data shape does not equal (18,)"
assert (
len(r.dtype.names) == 2
), "SwrStage stage data dtype does not have 2 entries"
@@ -63,9 +59,7 @@ def test_swr_binary_stage(swr_test_path, ipos):
assert (
r is not None
), "SwrStage could not read data with get_data(kswrkstpkper=)"
- assert r.shape == (
- 18,
- ), "SwrStage stage data shape does not equal (18,)"
+ assert r.shape == (18,), "SwrStage stage data shape does not equal (18,)"
assert (
len(r.dtype.names) == 2
), "SwrStage stage data dtype does not have 2 entries"
@@ -75,20 +69,14 @@ def test_swr_binary_stage(swr_test_path, ipos):
for time in times:
r = sobj.get_data(totim=time)
- assert (
- r is not None
- ), "SwrStage could not read data with get_data(tottim=)"
- assert r.shape == (
- 18,
- ), "SwrStage stage data shape does not equal (18,)"
+ assert r is not None, "SwrStage could not read data with get_data(tottim=)"
+ assert r.shape == (18,), "SwrStage stage data shape does not equal (18,)"
assert (
len(r.dtype.names) == 2
), "SwrStage stage data dtype does not have 2 entries"
ts = sobj.get_ts(irec=17)
- assert ts.shape == (
- 336,
- ), "SwrStage stage timeseries shape does not equal (336,)"
+ assert ts.shape == (336,), "SwrStage stage timeseries shape does not equal (336,)"
assert (
len(ts.dtype.names) == 2
), "SwrStage stage time series stage data dtype does not have 2 entries"
@@ -111,15 +99,9 @@ def test_swr_binary_budget(swr_test_path, ipos):
for idx in range(ntimes):
r = sobj.get_data(idx=idx)
- assert (
- r is not None
- ), "SwrBudget could not read data with get_data(idx=)"
- assert r.shape == (
- 18,
- ), "SwrBudget budget data shape does not equal (18,)"
- assert (
- len(r.dtype.names) == 15
- ), "SwrBudget data dtype does not have 15 entries"
+ assert r is not None, "SwrBudget could not read data with get_data(idx=)"
+ assert r.shape == (18,), "SwrBudget budget data shape does not equal (18,)"
+ assert len(r.dtype.names) == 15, "SwrBudget data dtype does not have 15 entries"
# plt.bar(range(18), r['inf-out'])
# plt.show()
@@ -135,9 +117,7 @@ def test_swr_binary_budget(swr_test_path, ipos):
assert (
r is not None
), "SwrBudget could not read data with get_data(kswrkstpkper=)"
- assert r.shape == (
- 18,
- ), "SwrBudget budget data shape does not equal (18,)"
+ assert r.shape == (18,), "SwrBudget budget data shape does not equal (18,)"
assert (
len(r.dtype.names) == 15
), "SwrBudget budget data dtype does not have 15 entries"
@@ -147,20 +127,14 @@ def test_swr_binary_budget(swr_test_path, ipos):
for time in times:
r = sobj.get_data(totim=time)
- assert (
- r is not None
- ), "SwrBudget could not read data with get_data(tottim=)"
- assert r.shape == (
- 18,
- ), "SwrBudget budget data shape does not equal (18,)"
+ assert r is not None, "SwrBudget could not read data with get_data(tottim=)"
+ assert r.shape == (18,), "SwrBudget budget data shape does not equal (18,)"
assert (
len(r.dtype.names) == 15
), "SwrBudget budget data dtype does not have 15 entries"
ts = sobj.get_ts(irec=17)
- assert ts.shape == (
- 336,
- ), "SwrBudget budget timeseries shape does not equal (336,)"
+ assert ts.shape == (336,), "SwrBudget budget timeseries shape does not equal (336,)"
assert (
len(ts.dtype.names) == 15
), "SwrBudget time series budget data dtype does not have 15 entries"
@@ -179,10 +153,7 @@ def test_swr_binary_qm(swr_test_path, ipos):
assert nrecords == (40, 18), "SwrFlow records does not equal (40, 18)"
connect = sobj.get_connectivity()
- assert connect.shape == (
- 40,
- 3,
- ), "SwrFlow connectivity shape does not equal (40, 3)"
+ assert connect.shape == (40, 3), "SwrFlow connectivity shape does not equal (40, 3)"
ntimes = sobj.get_ntimes()
assert ntimes == 336, "SwrFlow ntimes does not equal 336"
@@ -191,9 +162,7 @@ def test_swr_binary_qm(swr_test_path, ipos):
r = sobj.get_data(idx=idx)
assert r is not None, "SwrFlow could not read data with get_data(idx=)"
assert r.shape == (40,), "SwrFlow qm data shape does not equal (40,)"
- assert (
- len(r.dtype.names) == 3
- ), "SwrFlow qm data dtype does not have 3 entries"
+ assert len(r.dtype.names) == 3, "SwrFlow qm data dtype does not have 3 entries"
# plt.bar(range(40), r['flow'])
# plt.show()
@@ -206,39 +175,27 @@ def test_swr_binary_qm(swr_test_path, ipos):
for kkk in kswrkstpkper:
r = sobj.get_data(kswrkstpkper=kkk)
- assert (
- r is not None
- ), "SwrFlow could not read data with get_data(kswrkstpkper=)"
+ assert r is not None, "SwrFlow could not read data with get_data(kswrkstpkper=)"
assert r.shape == (40,), "SwrFlow qm data shape does not equal (40,)"
- assert (
- len(r.dtype.names) == 3
- ), "SwrFlow qm data dtype does not have 3 entries"
+ assert len(r.dtype.names) == 3, "SwrFlow qm data dtype does not have 3 entries"
times = sobj.get_times()
assert len(times) == 336, "SwrFlow times length does not equal 336"
for time in times:
r = sobj.get_data(totim=time)
- assert (
- r is not None
- ), "SwrFlow could not read data with get_data(tottim=)"
+ assert r is not None, "SwrFlow could not read data with get_data(tottim=)"
assert r.shape == (40,), "SwrFlow qm data shape does not equal (40,)"
- assert (
- len(r.dtype.names) == 3
- ), "SwrFlow qm data dtype does not have 3 entries"
+ assert len(r.dtype.names) == 3, "SwrFlow qm data dtype does not have 3 entries"
ts = sobj.get_ts(irec=17, iconn=16)
- assert ts.shape == (
- 336,
- ), "SwrFlow qm timeseries shape does not equal (336,)"
+ assert ts.shape == (336,), "SwrFlow qm timeseries shape does not equal (336,)"
assert (
len(ts.dtype.names) == 3
), "SwrFlow time series qm data dtype does not have 3 entries"
ts2 = sobj.get_ts(irec=16, iconn=17)
- assert ts2.shape == (
- 336,
- ), "SwrFlow qm timeseries shape does not equal (336,)"
+ assert ts2.shape == (336,), "SwrFlow qm timeseries shape does not equal (336,)"
assert (
len(ts2.dtype.names) == 3
), "SwrFlow time series qm data dtype does not have 3 entries"
@@ -262,12 +219,8 @@ def test_swr_binary_qaq(swr_test_path, ipos):
for idx in range(ntimes):
r = sobj.get_data(idx=idx)
- assert (
- r is not None
- ), "SwrExchange could not read data with get_data(idx=)"
- assert r.shape == (
- 21,
- ), "SwrExchange qaq data shape does not equal (21,)"
+ assert r is not None, "SwrExchange could not read data with get_data(idx=)"
+ assert r.shape == (21,), "SwrExchange qaq data shape does not equal (21,)"
assert (
len(r.dtype.names) == 11
), "SwrExchange qaq data dtype does not have 11 entries"
@@ -286,9 +239,7 @@ def test_swr_binary_qaq(swr_test_path, ipos):
assert (
r is not None
), "SwrExchange could not read data with get_data(kswrkstpkper=)"
- assert r.shape == (
- 21,
- ), "SwrExchange qaq data shape does not equal (21,)"
+ assert r.shape == (21,), "SwrExchange qaq data shape does not equal (21,)"
assert (
len(r.dtype.names) == 11
), "SwrExchange qaq data dtype does not have 11 entries"
@@ -298,20 +249,14 @@ def test_swr_binary_qaq(swr_test_path, ipos):
for time in times:
r = sobj.get_data(totim=time)
- assert (
- r is not None
- ), "SwrExchange could not read data with get_data(tottim=)"
- assert r.shape == (
- 21,
- ), "SwrExchange qaq data shape does not equal (21,)"
+ assert r is not None, "SwrExchange could not read data with get_data(tottim=)"
+ assert r.shape == (21,), "SwrExchange qaq data shape does not equal (21,)"
assert (
len(r.dtype.names) == 11
), "SwrExchange qaq data dtype does not have 11 entries"
ts = sobj.get_ts(irec=17, klay=0)
- assert ts.shape == (
- 350,
- ), "SwrExchange timeseries shape does not equal (350,)"
+ assert ts.shape == (350,), "SwrExchange timeseries shape does not equal (350,)"
assert (
len(ts.dtype.names) == 11
), "SwrExchange time series qaq data dtype does not have 11 entries"
@@ -334,12 +279,8 @@ def test_swr_binary_structure(swr_test_path, ipos):
for idx in range(ntimes):
r = sobj.get_data(idx=idx)
- assert (
- r is not None
- ), "SwrStructure could not read data with get_data(idx=)"
- assert r.shape == (
- 2,
- ), "SwrStructure structure data shape does not equal (2,)"
+ assert r is not None, "SwrStructure could not read data with get_data(idx=)"
+ assert r.shape == (2,), "SwrStructure structure data shape does not equal (2,)"
assert (
len(r.dtype.names) == 8
), "SwrStructure structure data dtype does not have 8 entries"
@@ -355,9 +296,7 @@ def test_swr_binary_structure(swr_test_path, ipos):
assert (
r is not None
), "SwrStructure could not read data with get_data(kswrkstpkper=)"
- assert r.shape == (
- 2,
- ), "SwrStructure structure data shape does not equal (2,)"
+ assert r.shape == (2,), "SwrStructure structure data shape does not equal (2,)"
assert (
len(r.dtype.names) == 8
), "SwrStructure structure data dtype does not have 8 entries"
@@ -367,20 +306,14 @@ def test_swr_binary_structure(swr_test_path, ipos):
for time in times:
r = sobj.get_data(totim=time)
- assert (
- r is not None
- ), "SwrStructure could not read data with get_data(tottim=)"
- assert r.shape == (
- 2,
- ), "SwrStructure structure data shape does not equal (2,)"
+ assert r is not None, "SwrStructure could not read data with get_data(tottim=)"
+ assert r.shape == (2,), "SwrStructure structure data shape does not equal (2,)"
assert (
len(r.dtype.names) == 8
), "SwrStructure structure data dtype does not have 8 entries"
ts = sobj.get_ts(irec=17, istr=0)
- assert ts.shape == (
- 336,
- ), "SwrStructure timeseries shape does not equal (336,)"
+ assert ts.shape == (336,), "SwrStructure timeseries shape does not equal (336,)"
assert (
len(ts.dtype.names) == 8
), "SwrStructure time series structure data dtype does not have 8 entries"
@@ -410,41 +343,25 @@ def test_swr_binary_obs(swr_test_path, ipos):
assert len(times) == 336, "SwrFile times length does not equal 336"
ts = sobj.get_data()
- assert ts.shape == (
- 336,
- ), "SwrObs length of data array does not equal (336,)"
- assert (
- len(ts.dtype.names) == 10
- ), "SwrObs data does not have totim + 9 observations"
+ assert ts.shape == (336,), "SwrObs length of data array does not equal (336,)"
+ assert len(ts.dtype.names) == 10, "SwrObs data does not have totim + 9 observations"
ts = sobj.get_data(obsname="OBS5")
- assert ts.shape == (
- 336,
- ), "SwrObs length of data array does not equal (336,)"
- assert (
- len(ts.dtype.names) == 2
- ), "SwrObs data does not have totim + 1 observation"
+ assert ts.shape == (336,), "SwrObs length of data array does not equal (336,)"
+ assert len(ts.dtype.names) == 2, "SwrObs data does not have totim + 1 observation"
# plt.plot(ts['totim'], ts['OBS5'])
# plt.show()
for idx in range(ntimes):
d = sobj.get_data(idx=idx)
- assert d.shape == (
- 1,
- ), "SwrObs length of data array does not equal (1,)"
- assert (
- len(d.dtype.names) == nobs + 1
- ), "SwrObs data does not have nobs + 1"
+ assert d.shape == (1,), "SwrObs length of data array does not equal (1,)"
+ assert len(d.dtype.names) == nobs + 1, "SwrObs data does not have nobs + 1"
for time in times:
d = sobj.get_data(totim=time)
- assert d.shape == (
- 1,
- ), "SwrObs length of data array does not equal (1,)"
- assert (
- len(d.dtype.names) == nobs + 1
- ), "SwrObs data does not have nobs + 1"
+ assert d.shape == (1,), "SwrObs length of data array does not equal (1,)"
+ assert len(d.dtype.names) == nobs + 1, "SwrObs data does not have nobs + 1"
# test get_dataframes()
for idx in range(ntimes):
diff --git a/autotest/test_template_writer.py b/autotest/test_template_writer.py
index f6e7ae39c9..cfd7c7a95a 100644
--- a/autotest/test_template_writer.py
+++ b/autotest/test_template_writer.py
@@ -56,7 +56,8 @@ def test_tpl_layered(function_tmpdir):
partype = "hk"
parname = "HK_LAYER_1-3"
- # Span indicates that the hk parameter applies as a multiplier to layers 0 and 2 (MODFLOW layers 1 and 3)
+ # Span indicates that the hk parameter applies as a multiplier to layers 0 and 2
+ # (MODFLOW layers 1 and 3)
span = {"layers": [0, 2]}
# These parameters have not affect yet, but may in the future
diff --git a/autotest/test_usg.py b/autotest/test_usg.py
index 969ca6041d..f9e03c6db4 100644
--- a/autotest/test_usg.py
+++ b/autotest/test_usg.py
@@ -8,13 +8,7 @@
from autotest.conftest import get_example_data_path
from flopy.mfusg import MfUsg, MfUsgDisU, MfUsgLpf, MfUsgSms, MfUsgWel
-from flopy.modflow import (
- ModflowBas,
- ModflowDis,
- ModflowDrn,
- ModflowGhb,
- ModflowOc,
-)
+from flopy.modflow import ModflowBas, ModflowDis, ModflowDrn, ModflowGhb, ModflowOc
from flopy.utils import TemporalReference, Util2d, Util3d
@@ -60,9 +54,7 @@ def test_usg_disu_load(function_tmpdir, mfusg_01A_nestedgrid_nognc_model_path):
):
if isinstance(value1, (Util2d, Util3d)):
assert np.array_equal(value1.array, value2.array)
- elif isinstance(
- value1, list
- ): # this is for the jagged _get_neighbours list
+ elif isinstance(value1, list): # this is for the jagged _get_neighbours list
assert np.all([np.all(v1 == v2) for v1, v2 in zip(value1, value2)])
elif not isinstance(value1, TemporalReference):
assert value1 == value2
@@ -138,10 +130,7 @@ def test_usg_model(function_tmpdir):
@requires_exe("mfusg")
def test_usg_load_01B(function_tmpdir, mfusg_01A_nestedgrid_nognc_model_path):
- print(
- "testing 1-layer unstructured mfusg model "
- "loading: 01A_nestedgrid_nognc.nam"
- )
+ print("testing 1-layer unstructured mfusg model loading: 01A_nestedgrid_nognc.nam")
fname = mfusg_01A_nestedgrid_nognc_model_path / "flow.nam"
assert os.path.isfile(fname), f"nam file not found {fname}"
diff --git a/autotest/test_util_2d_and_3d.py b/autotest/test_util_2d_and_3d.py
index 2080458033..09056f1a51 100644
--- a/autotest/test_util_2d_and_3d.py
+++ b/autotest/test_util_2d_and_3d.py
@@ -51,9 +51,7 @@ def test_transient3d():
# Make a transient 3d array with changing entries and then verify that
# they can be reproduced through indexing
- a = np.arange((nlay * nrow * ncol), dtype=np.float32).reshape(
- (nlay, nrow, ncol)
- )
+ a = np.arange((nlay * nrow * ncol), dtype=np.float32).reshape((nlay, nrow, ncol))
t3d = {0: a, 2: 1025, 3: a, 4: 1000.0}
t3d = Transient3d(ml, (nlay, nrow, ncol), np.float32, t3d, "fake")
assert np.array_equal(t3d[0].array, a)
@@ -178,9 +176,7 @@ def stress_util2d(model_ws, ml, nlay, nrow, ncol):
files = os.listdir(ml.model_ws)
print("\n\nexternal files: " + ",".join(files) + "\n\n")
- ml1 = Modflow.load(
- ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False
- )
+ ml1 = Modflow.load(ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False)
print("testing load")
assert not ml1.load_fail
# check that both binary and cnstnt are being respected through
@@ -198,9 +194,7 @@ def stress_util2d(model_ws, ml, nlay, nrow, ncol):
else:
files = os.listdir(ml.model_ws)
print("\n\nexternal files: " + ",".join(files) + "\n\n")
- ml1 = Modflow.load(
- ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False
- )
+ ml1 = Modflow.load(ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False)
print("testing load")
assert not ml1.load_fail
assert np.array_equal(ml1.lpf.vka.array, vk * 2.0)
@@ -209,9 +203,7 @@ def stress_util2d(model_ws, ml, nlay, nrow, ncol):
# more binary testing
ml.lpf.vka[0]._array[0, 0] *= 3.0
ml.write_input()
- ml1 = Modflow.load(
- ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False
- )
+ ml1 = Modflow.load(ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False)
assert np.array_equal(ml.lpf.vka.array, ml1.lpf.vka.array)
assert np.array_equal(ml.lpf.hk.array, ml1.lpf.hk.array)
@@ -236,9 +228,7 @@ def stress_util2d_for_joe_the_file_king(ml, nlay, nrow, ncol):
assert np.array_equal(ml.lpf.hk.array, hk)
assert np.array_equal(ml.lpf.vka.array, vk * 2.0)
- ml1 = Modflow.load(
- ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False
- )
+ ml1 = Modflow.load(ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False)
print("testing load")
assert not ml1.load_fail
assert np.array_equal(ml1.lpf.vka.array, vk * 2.0)
@@ -249,9 +239,7 @@ def stress_util2d_for_joe_the_file_king(ml, nlay, nrow, ncol):
# more binary testing
ml.lpf.vka[0]._array[0, 0] *= 3.0
ml.write_input()
- ml1 = Modflow.load(
- ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False
- )
+ ml1 = Modflow.load(ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False)
assert np.array_equal(ml.lpf.vka.array, ml1.lpf.vka.array)
assert np.array_equal(ml.lpf.hk.array, ml1.lpf.hk.array)
@@ -433,9 +421,7 @@ def test_append_mflist(function_tmpdir):
wel2 = ModflowWel(ml, stress_period_data=sp_data2)
wel3 = ModflowWel(
ml,
- stress_period_data=wel2.stress_period_data.append(
- wel1.stress_period_data
- ),
+ stress_period_data=wel2.stress_period_data.append(wel1.stress_period_data),
)
ml.write_input()
@@ -457,18 +443,8 @@ def test_mflist(function_tmpdir, example_data_path):
for per, data in spd.data.items():
dfdata = (
df.xs(per, level="per")
- .dropna(
- subset=[
- "flux",
- ],
- axis=0,
- )
- .loc[
- :,
- [
- "flux",
- ],
- ]
+ .dropna(subset=["flux"], axis=0)
+ .loc[:, ["flux"]]
.to_records(index=True)
.astype(data.dtype)
)
@@ -617,12 +593,7 @@ def test_mflist_fromfile(function_tmpdir):
[(0, 1, 2, -50.0), (0, 5, 5, -50.0)], columns=["k", "i", "j", "flux"]
)
wpth = os.path.join(function_tmpdir, "wel_000.dat")
- wel_data.to_csv(
- wpth,
- index=False,
- sep=" ",
- header=False,
- )
+ wel_data.to_csv(wpth, index=False, sep=" ", header=False)
nwt_model = Modflow(
"nwt_testmodel",
diff --git a/autotest/test_util_geometry.py b/autotest/test_util_geometry.py
index 053ef2f631..6105688859 100644
--- a/autotest/test_util_geometry.py
+++ b/autotest/test_util_geometry.py
@@ -78,12 +78,8 @@ def test_point_in_polygon_faces():
xpts_v, ypts_v = list(zip(*cell))
xpts_v = np.array([xpts_v])
ypts_v = np.array([ypts_v])
- xpts = np.array(
- [[xpts_v[0, 0], xpts_v[0, 2], np.mean(xpts_v), np.mean(xpts_v)]]
- )
- ypts = np.array(
- [[np.mean(ypts_v), np.mean(ypts_v), ypts_v[0, 0], ypts_v[0, 2]]]
- )
+ xpts = np.array([[xpts_v[0, 0], xpts_v[0, 2], np.mean(xpts_v), np.mean(xpts_v)]])
+ ypts = np.array([[np.mean(ypts_v), np.mean(ypts_v), ypts_v[0, 0], ypts_v[0, 2]]])
mask = point_in_polygon(xpts, ypts, cell)
assert mask.sum() == 2 # only inner faces
debug_plot(grid, cell, xpts, ypts, mask)
diff --git a/autotest/test_uzf.py b/autotest/test_uzf.py
index 9726163f02..48be6c6ea2 100644
--- a/autotest/test_uzf.py
+++ b/autotest/test_uzf.py
@@ -52,18 +52,10 @@ def test_create_uzf(function_tmpdir, mf2005_test_path, uzf_test_path):
verbose=True,
)
rm = [True if ".uz" in f else False for f in m.external_fnames]
- m.external_fnames = [
- f for i, f in enumerate(m.external_fnames) if not rm[i]
- ]
- m.external_binflag = [
- f for i, f in enumerate(m.external_binflag) if not rm[i]
- ]
- m.external_output = [
- f for i, f in enumerate(m.external_output) if not rm[i]
- ]
- m.external_units = [
- f for i, f in enumerate(m.external_output) if not rm[i]
- ]
+ m.external_fnames = [f for i, f in enumerate(m.external_fnames) if not rm[i]]
+ m.external_binflag = [f for i, f in enumerate(m.external_binflag) if not rm[i]]
+ m.external_output = [f for i, f in enumerate(m.external_output) if not rm[i]]
+ m.external_units = [f for i, f in enumerate(m.external_output) if not rm[i]]
datpth = uzf_test_path
irnbndpth = os.path.join(datpth, "irunbnd.dat")
@@ -198,16 +190,11 @@ def test_create_uzf(function_tmpdir, mf2005_test_path, uzf_test_path):
assert np.abs(np.sum(uzf.vks.array) / uzf.vks.cnstnt - 116.0) < 1e-5
assert uzf.eps._Util2d__value == 3.5
assert np.abs(uzf.thts._Util2d__value - 0.30) < 1e-5
- assert (
- np.abs(np.sum(uzf.extwc[0].array) / uzf.extwc[0].cnstnt - 176.0) < 1e4
- )
+ assert np.abs(np.sum(uzf.extwc[0].array) / uzf.extwc[0].cnstnt - 176.0) < 1e4
for per in [0, 1]:
assert np.abs(uzf.pet[per]._Util2d__value - 5e-8) < 1e-10
for per in range(m.nper):
- assert (
- np.abs(np.sum(uzf.finf[per].array) / uzf.finf[per].cnstnt - 339.0)
- < 1e4
- )
+ assert np.abs(np.sum(uzf.finf[per].array) / uzf.finf[per].cnstnt - 339.0) < 1e4
assert True
uzf.write_file()
m2 = Modflow("UZFtest2_2", model_ws=ws)
@@ -226,20 +213,16 @@ def test_create_uzf(function_tmpdir, mf2005_test_path, uzf_test_path):
for i, a in enumerate(a1):
assert a == l2[i]
- # load uzf test problem for nwt model with 'nwt_11_fmt'-style options and 'open/close' array types
+ # load uzf test problem for nwt model with 'nwt_11_fmt'-style options
+ # and 'open/close' array types
tpth = uzf_test_path / "load_uzf_for_nwt"
- [
- shutil.copy(os.path.join(tpth, f), os.path.join(ws, f))
- for f in os.listdir(tpth)
- ]
+ [shutil.copy(os.path.join(tpth, f), os.path.join(ws, f)) for f in os.listdir(tpth)]
m3 = Modflow("UZFtest3", version="mfnwt", verbose=True)
m3.model_ws = ws
dis = ModflowDis.load(os.path.join(tpth, "UZFtest3.dis"), m3)
uzf = ModflowUzf1.load(os.path.join(tpth, "UZFtest3.uzf"), m3)
assert np.sum(uzf.iuzfbnd.array) == 28800
- assert np.isclose(
- np.sum(uzf.finf.array) / uzf.finf[per].cnstnt, 13.7061, atol=1e-4
- )
+ assert np.isclose(np.sum(uzf.finf.array) / uzf.finf[per].cnstnt, 13.7061, atol=1e-4)
@requires_exe("mfnwt")
@@ -301,18 +284,10 @@ def test_read_write_nwt_options(function_tmpdir):
uzfopt.write_options(os.path.join(ws, "uzfopt.txt"))
sfropt.write_options(os.path.join(ws, "sfropt.txt"))
- welopt = OptionBlock.load_options(
- os.path.join(ws, "welopt.txt"), ModflowWel
- )
- welopt2 = OptionBlock.load_options(
- os.path.join(ws, "welopt2.txt"), ModflowWel
- )
- uzfopt = OptionBlock.load_options(
- os.path.join(ws, "uzfopt.txt"), ModflowUzf1
- )
- sfropt = OptionBlock.load_options(
- os.path.join(ws, "sfropt.txt"), ModflowSfr2
- )
+ welopt = OptionBlock.load_options(os.path.join(ws, "welopt.txt"), ModflowWel)
+ welopt2 = OptionBlock.load_options(os.path.join(ws, "welopt2.txt"), ModflowWel)
+ uzfopt = OptionBlock.load_options(os.path.join(ws, "uzfopt.txt"), ModflowUzf1)
+ sfropt = OptionBlock.load_options(os.path.join(ws, "sfropt.txt"), ModflowSfr2)
assert repr(welopt) == welstr
assert repr(welopt2) == welstr2
@@ -473,9 +448,7 @@ def test_load_write_uzf_option_block(function_tmpdir, options_path):
uzf2.write_file(os.path.join(function_tmpdir, uzf_name2))
ml.remove_package("UZF")
- uzf3 = ModflowUzf1.load(
- os.path.join(function_tmpdir, uzf_name2), ml, check=False
- )
+ uzf3 = ModflowUzf1.load(os.path.join(function_tmpdir, uzf_name2), ml, check=False)
assert uzf3.options.smoothfact == 0.4
assert uzf3.smoothfact == 0.4
@@ -507,9 +480,7 @@ def test_load_write_uzf_option_line(function_tmpdir, options_path):
uzf.write_file(os.path.join(function_tmpdir, uzf_name2))
ml.remove_package("UZF")
- uzf2 = ModflowUzf1.load(
- os.path.join(function_tmpdir, uzf_name2), ml, check=False
- )
+ uzf2 = ModflowUzf1.load(os.path.join(function_tmpdir, uzf_name2), ml, check=False)
assert uzf2.nosurfleak
assert uzf2.etsquare
@@ -618,10 +589,7 @@ def test_uzf_negative_iuzfopt(function_tmpdir):
steady=[False, False],
)
bas = ModflowBas(ml, strt=9, ibound=1)
- upw = ModflowUpw(
- ml,
- vka=0.1,
- )
+ upw = ModflowUpw(ml, vka=0.1)
oc = ModflowOc(ml)
nwt = ModflowNwt(ml, options="SIMPLE")
@@ -643,9 +611,7 @@ def test_uzf_negative_iuzfopt(function_tmpdir):
success, buff = ml.run_model()
assert success, "UZF model with -1 iuzfopt failed to run"
- ml2 = Modflow.load(
- "uzf_neg.nam", version="mfnwt", model_ws=function_tmpdir
- )
+ ml2 = Modflow.load("uzf_neg.nam", version="mfnwt", model_ws=function_tmpdir)
np.testing.assert_array_equal(
ml2.uzf.pet.array, np.full((2, 1, 10, 10), 0.1, np.float32)
@@ -656,15 +622,15 @@ def test_uzf_negative_iuzfopt(function_tmpdir):
def test_optionsblock_auxillary_typo():
- # Incorrect: auxillary
+ # Incorrect: auxillary # codespell:ignore
# Correct: auxiliary
options = OptionBlock("", ModflowWel, block=True)
assert options.auxiliary == []
with pytest.deprecated_call():
- assert options.auxillary == []
+ assert options.auxillary == [] # codespell:ignore
with pytest.deprecated_call():
- options.auxillary = ["aux", "iface"]
+ options.auxillary = ["aux", "iface"] # codespell:ignore
assert options.auxiliary == ["aux", "iface"]
options.auxiliary = []
with pytest.deprecated_call():
- assert options.auxillary == []
+ assert options.auxillary == [] # codespell:ignore
diff --git a/autotest/test_zonbud_utility.py b/autotest/test_zonbud_utility.py
index 86991e6b26..cf492c543d 100644
--- a/autotest/test_zonbud_utility.py
+++ b/autotest/test_zonbud_utility.py
@@ -39,11 +39,7 @@ def read_zonebudget_file(fname):
# Read time step information for this block
if "Time Step" in line:
- kstp, kper, totim = (
- int(items[1]) - 1,
- int(items[3]) - 1,
- float(items[5]),
- )
+ kstp, kper, totim = (int(items[1]) - 1, int(items[3]) - 1, float(items[5]))
continue
# Get names of zones
@@ -63,11 +59,7 @@ def read_zonebudget_file(fname):
continue
# Get mass-balance information for this block
- elif (
- "Total" in items[0]
- or "IN-OUT" in items[0]
- or "Percent Error" in items[0]
- ):
+ elif "Total" in items[0] or "IN-OUT" in items[0] or "Percent Error" in items[0]:
continue
# End of block
@@ -78,12 +70,7 @@ def read_zonebudget_file(fname):
if record.startswith(("FROM_", "TO_")):
record = "_".join(record.split("_")[1:])
vals = [float(i) for i in items[1:-1]]
- row = (
- totim,
- kstp,
- kper,
- record,
- ) + tuple(v for v in vals)
+ row = (totim, kstp, kper, record) + tuple(v for v in vals)
rows.append(row)
dtype_list = [
("totim", float),
@@ -119,8 +106,8 @@ def test_compare2zonebudget(cbc_f, zon_f, zbud_f, rtol):
continue
if r1[0].shape[0] != r2[0].shape[0]:
continue
- a1 = np.array([v for v in zb_arr[zonenames][r1[0]][0]])
- a2 = np.array([v for v in fp_arr[zonenames][r2[0]][0]])
+ a1 = np.array(list(zb_arr[zonenames][r1[0]][0]))
+ a2 = np.array(list(fp_arr[zonenames][r2[0]][0]))
allclose = np.allclose(a1, a2, rtol)
mxdiff = np.abs(a1 - a2).max()
@@ -147,9 +134,7 @@ def test_zonbud_aliases(cbc_f, zon_f):
"""
zon = ZoneBudget.read_zone_file(zon_f)
aliases = {1: "Trey", 2: "Mike", 4: "Wilson", 0: "Carini"}
- zb = ZoneBudget(
- cbc_f, zon, kstpkper=(0, 1096), aliases=aliases, verbose=True
- )
+ zb = ZoneBudget(cbc_f, zon, kstpkper=(0, 1096), aliases=aliases, verbose=True)
bud = zb.get_budget()
assert bud[bud["name"] == "FROM_Mike"].shape[0] > 0, "No records returned."
@@ -195,9 +180,7 @@ def test_zonbud_readwrite_zbarray(function_tmpdir):
"""
x = np.random.randint(100, 200, size=(5, 150, 200))
ZoneBudget.write_zone_file(function_tmpdir / "randint", x)
- ZoneBudget.write_zone_file(
- function_tmpdir / "randint", x, fmtin=35, iprn=2
- )
+ ZoneBudget.write_zone_file(function_tmpdir / "randint", x, fmtin=35, iprn=2)
z = ZoneBudget.read_zone_file(function_tmpdir / "randint")
assert np.array_equal(x, z), "Input and output arrays do not match."
@@ -328,7 +311,7 @@ def test_zonebudget_6(function_tmpdir, example_data_path):
df = zb.get_dataframes()
- assert list(df)[0] == "test_alias", "Alias testing failed"
+ assert next(iter(df)) == "test_alias", "Alias testing failed"
@pytest.mark.mf6
diff --git a/docs/PyPI_release.md b/docs/PyPI_release.md
index a7e4751714..5a63b8b591 100644
--- a/docs/PyPI_release.md
+++ b/docs/PyPI_release.md
@@ -30,4 +30,4 @@ How to Cite
*Software/Code citation for FloPy:*
-[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.8.2: U.S. Geological Survey Software Release, 03 October 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH)
+[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.9.0: U.S. Geological Survey Software Release, 20 December 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH)
diff --git a/etc/environment.yml b/etc/environment.yml
index 504f80c86c..1128785219 100644
--- a/etc/environment.yml
+++ b/etc/environment.yml
@@ -5,7 +5,7 @@ dependencies:
- pip
# required
- - python>=3.8
+ - python>=3.9
- numpy>=1.20.3
- matplotlib>=1.4.0
- pandas>=2.0.0
@@ -15,7 +15,7 @@ dependencies:
- ruff
# test
- - coverage
+ - coverage!=7.6.5
- flaky
- filelock
- jupyter
@@ -40,7 +40,7 @@ dependencies:
- fiona
- descartes
- pyproj
- - shapely>=1.8
+ - shapely>=2.0
- geos
- geojson
- vtk
diff --git a/flopy/__init__.py b/flopy/__init__.py
index 6eb8fc5411..56cc0da7ee 100644
--- a/flopy/__init__.py
+++ b/flopy/__init__.py
@@ -43,18 +43,18 @@
__all__ = [
"__author__",
"__version__",
+ "discretization",
+ "export",
+ "mf6",
"modflow",
- "mt3d",
- "seawat",
- "modpath",
"modflowlgr",
"modflowusg",
- "utils",
- "plot",
- "export",
+ "modpath",
+ "mt3d",
"pest",
- "mf6",
- "discretization",
+ "plot",
"run_model",
+ "seawat",
+ "utils",
"which",
]
diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py
index e2346e574f..544c3fc1bb 100644
--- a/flopy/discretization/grid.py
+++ b/flopy/discretization/grid.py
@@ -425,9 +425,7 @@ def cell_thickness(self):
def thick(self):
"""Raises AttributeError, use :meth:`cell_thickness`."""
# DEPRECATED since version 3.4.0
- raise AttributeError(
- "'thick' has been removed; use 'cell_thickness()'"
- )
+ raise AttributeError("'thick' has been removed; use 'cell_thickness()'")
def saturated_thickness(self, array, mask=None):
"""
@@ -563,8 +561,7 @@ def zcellcenters(self):
@property
def xyzcellcenters(self):
raise NotImplementedError(
- "must define get_cellcenters in child "
- "class to use this base class"
+ "must define get_cellcenters in child class to use this base class"
)
@property
@@ -626,9 +623,7 @@ def convert_grid(self, factor):
-------
Grid object
"""
- raise NotImplementedError(
- "convert_grid must be defined in the child class"
- )
+ raise NotImplementedError("convert_grid must be defined in the child class")
def _set_neighbors(self, reset=False, method="rook"):
"""
@@ -647,8 +642,8 @@ def _set_neighbors(self, reset=False, method="rook"):
"""
if self._neighbors is None or reset:
node_num = 0
- neighbors = {i: list() for i in range(len(self.iverts))}
- edge_set = {i: list() for i in range(len(self.iverts))}
+ neighbors = {i: [] for i in range(len(self.iverts))}
+ edge_set = {i: [] for i in range(len(self.iverts))}
geoms = []
node_nums = []
if method == "rook":
@@ -686,9 +681,7 @@ def _set_neighbors(self, reset=False, method="rook"):
pass
# convert use dict to create a set that preserves insertion order
- self._neighbors = {
- i: list(dict.fromkeys(v)) for i, v in neighbors.items()
- }
+ self._neighbors = {i: list(dict.fromkeys(v)) for i, v in neighbors.items()}
self._edge_set = edge_set
def neighbors(self, node=None, **kwargs):
@@ -878,7 +871,7 @@ def map_polygons(self):
def get_lni(self, nodes):
"""
- Get the layer index and within-layer node index (both 0-based) for the given nodes
+ Get the 0-based layer index and within-layer node index for the given nodes
Parameters
----------
@@ -939,9 +932,7 @@ def get_coords(self, x, y):
x += self._xoff
y += self._yoff
- return geometry.rotate(
- x, y, self._xoff, self._yoff, self.angrot_radians
- )
+ return geometry.rotate(x, y, self._xoff, self._yoff, self.angrot_radians)
def get_local_coords(self, x, y):
"""
@@ -1218,9 +1209,7 @@ def _zcoords(self):
if self.top is not None and self.botm is not None:
zcenters = []
top_3d = np.expand_dims(self.top, 0)
- zbdryelevs = np.concatenate(
- (top_3d, np.atleast_2d(self.botm)), axis=0
- )
+ zbdryelevs = np.concatenate((top_3d, np.atleast_2d(self.botm)), axis=0)
for ix in range(1, len(zbdryelevs)):
zcenters.append((zbdryelevs[ix - 1] + zbdryelevs[ix]) / 2.0)
@@ -1230,9 +1219,7 @@ def _zcoords(self):
return zbdryelevs, zcenters
# Exporting
- def write_shapefile(
- self, filename="grid.shp", crs=None, prjfile=None, **kwargs
- ):
+ def write_shapefile(self, filename="grid.shp", crs=None, prjfile=None, **kwargs):
"""
Write a shapefile of the grid with just the row and column attributes.
@@ -1262,6 +1249,4 @@ def write_shapefile(
# initialize grid from a grb file
@classmethod
def from_binary_grid_file(cls, file_path, verbose=False):
- raise NotImplementedError(
- "must define from_binary_grid_file in child class"
- )
+ raise NotImplementedError("must define from_binary_grid_file in child class")
diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py
index 59834b3826..64b0f0d0b1 100644
--- a/flopy/discretization/structuredgrid.py
+++ b/flopy/discretization/structuredgrid.py
@@ -222,11 +222,7 @@ def is_valid(self):
@property
def is_complete(self):
- if (
- self.__delc is not None
- and self.__delr is not None
- and super().is_complete
- ):
+ if self.__delc is not None and self.__delr is not None and super().is_complete:
return True
return False
@@ -353,9 +349,7 @@ def xyzvertices(self):
pass
xgrid, ygrid = self.get_coords(xgrid, ygrid)
if zgrid is not None:
- self._cache_dict[cache_index] = CachedData(
- [xgrid, ygrid, zgrid]
- )
+ self._cache_dict[cache_index] = CachedData([xgrid, ygrid, zgrid])
else:
self._cache_dict[cache_index] = CachedData([xgrid, ygrid])
@@ -397,9 +391,7 @@ def zedges(self):
cache_index not in self._cache_dict
or self._cache_dict[cache_index].out_of_date
):
- zedges = np.concatenate(
- (np.array([self.top[0, 0]]), self.botm[:, 0, 0])
- )
+ zedges = np.concatenate((np.array([self.top[0, 0]]), self.botm[:, 0, 0]))
self._cache_dict[cache_index] = CachedData(zedges)
if self._copy_cache:
return self._cache_dict[cache_index].data
@@ -480,9 +472,7 @@ def xyzcellcenters(self):
if np.any(quasi3d):
ibs[1:] = ibs[1:] + np.cumsum(quasi3d)[: self.__nlay - 1]
for l, ib in enumerate(ibs[1:], 1):
- z[l, :, :] = (
- self._botm[ib - 1, :, :] + self._botm[ib, :, :]
- ) / 2.0
+ z[l, :, :] = (self._botm[ib - 1, :, :] + self._botm[ib, :, :]) / 2.0
else:
z = None
if self._has_ref_coordinates:
@@ -531,9 +521,7 @@ def grid_lines(self):
if self._has_ref_coordinates:
lines_trans = []
for ln in lines:
- lines_trans.append(
- [self.get_coords(*ln[0]), self.get_coords(*ln[1])]
- )
+ lines_trans.append([self.get_coords(*ln[0]), self.get_coords(*ln[1])])
return lines_trans
return lines
@@ -597,15 +585,15 @@ def is_regular_z(self):
rel_tol = 1.0e-5
# regularity test in z direction
- rel_diff_thick0 = (
- self.delz[0, :, :] - self.delz[0, 0, 0]
- ) / self.delz[0, 0, 0]
+ rel_diff_thick0 = (self.delz[0, :, :] - self.delz[0, 0, 0]) / self.delz[
+ 0, 0, 0
+ ]
failed = np.abs(rel_diff_thick0) > rel_tol
is_regular_z = np.count_nonzero(failed) == 0
for k in range(1, self.nlay):
- rel_diff_zk = (
- self.delz[k, :, :] - self.delz[0, :, :]
- ) / self.delz[0, :, :]
+ rel_diff_zk = (self.delz[k, :, :] - self.delz[0, :, :]) / self.delz[
+ 0, :, :
+ ]
failed = np.abs(rel_diff_zk) > rel_tol
is_regular_z = is_regular_z and np.count_nonzero(failed) == 0
@@ -633,9 +621,7 @@ def is_regular_xy(self):
first_equal = np.abs(rel_diff_0) <= rel_tol
# combine with regularity tests in x and z directions
- is_regular_xy = (
- first_equal and self.is_regular_x and self.is_regular_y
- )
+ is_regular_xy = first_equal and self.is_regular_x and self.is_regular_y
self._cache_dict[cache_index] = CachedData(is_regular_xy)
if self._copy_cache:
@@ -661,9 +647,7 @@ def is_regular_xz(self):
first_equal = np.abs(rel_diff_0) <= rel_tol
# combine with regularity tests in x and z directions
- is_regular_xz = (
- first_equal and self.is_regular_x and self.is_regular_z
- )
+ is_regular_xz = first_equal and self.is_regular_x and self.is_regular_z
self._cache_dict[cache_index] = CachedData(is_regular_xz)
if self._copy_cache:
@@ -689,9 +673,7 @@ def is_regular_yz(self):
first_equal = np.abs(rel_diff_0) <= rel_tol
# combine with regularity tests in x and y directions
- is_regular_yz = (
- first_equal and self.is_regular_y and self.is_regular_z
- )
+ is_regular_yz = first_equal and self.is_regular_y and self.is_regular_z
self._cache_dict[cache_index] = CachedData(is_regular_yz)
if self._copy_cache:
@@ -717,9 +699,7 @@ def is_regular(self):
first_equal = np.abs(rel_diff_0) <= rel_tol
# combine with regularity tests in x, y and z directions
- is_regular = (
- first_equal and self.is_regular_z and self.is_regular_xy
- )
+ is_regular = first_equal and self.is_regular_z and self.is_regular_xy
self._cache_dict[cache_index] = CachedData(is_regular)
if self._copy_cache:
@@ -744,9 +724,9 @@ def is_rectilinear(self):
# rectilinearity test in z direction
is_rect_z = True
for k in range(self.nlay):
- rel_diff_zk = (
- self.delz[k, :, :] - self.delz[k, 0, 0]
- ) / self.delz[k, 0, 0]
+ rel_diff_zk = (self.delz[k, :, :] - self.delz[k, 0, 0]) / self.delz[
+ k, 0, 0
+ ]
failed = np.abs(rel_diff_zk) > rel_tol
is_rect_z = is_rect_z and np.count_nonzero(failed) == 0
@@ -815,9 +795,7 @@ def convert_grid(self, factor):
angrot=self.angrot,
)
else:
- raise AssertionError(
- "Grid is not complete and cannot be converted"
- )
+ raise AssertionError("Grid is not complete and cannot be converted")
###############
### Methods ###
@@ -930,9 +908,7 @@ def intersect(self, x, y, z=None, local=False, forgive=False):
if forgive:
col = np.nan
else:
- raise Exception(
- "x, y point given is outside of the model area"
- )
+ raise Exception("x, y point given is outside of the model area")
else:
col = np.asarray(xcomp).nonzero()[0][-1]
@@ -941,9 +917,7 @@ def intersect(self, x, y, z=None, local=False, forgive=False):
if forgive:
row = np.nan
else:
- raise Exception(
- "x, y point given is outside of the model area"
- )
+ raise Exception("x, y point given is outside of the model area")
else:
row = np.asarray(ycomp).nonzero()[0][-1]
if np.any(np.isnan([row, col])):
@@ -1022,9 +996,7 @@ def get_cell_vertices(self, *args, **kwargs):
"""
if kwargs:
if args:
- raise TypeError(
- "mixed positional and keyword arguments not supported"
- )
+ raise TypeError("mixed positional and keyword arguments not supported")
elif "node" in kwargs:
_, i, j = self.get_lrc(kwargs.pop("node"))[0]
elif "i" in kwargs and "j" in kwargs:
@@ -1229,10 +1201,7 @@ def array_at_verts(self, a):
zcenters = self.zcellcenters
if self._idomain is not None:
zcenters = np.where(inactive, np.nan, zcenters)
- if (
- not self.is_rectilinear
- or np.count_nonzero(np.isnan(zcenters)) != 0
- ):
+ if not self.is_rectilinear or np.count_nonzero(np.isnan(zcenters)) != 0:
zedges = np.nanmean(self.top_botm_withnan, axis=(1, 2))
else:
zedges = self.top_botm_withnan[:, 0, 0]
@@ -1289,9 +1258,7 @@ def array_at_verts(self, a):
xyoutput[:, 0] = youtput[0, :, :].ravel()
xyoutput[:, 1] = xoutput[0, :, :].ravel()
averts2d = interp_func(xyoutput)
- averts2d = averts2d.reshape(
- (1, self.nrow + 1, self.ncol + 1)
- )
+ averts2d = averts2d.reshape((1, self.nrow + 1, self.ncol + 1))
averts = averts2d * np.ones(shape_verts)
elif self.nrow == 1:
# in this case we need a 2d interpolation in the x, z plane
@@ -1307,9 +1274,7 @@ def array_at_verts(self, a):
xzoutput[:, 0] = zoutput[:, 0, :].ravel()
xzoutput[:, 1] = xoutput[:, 0, :].ravel()
averts2d = interp_func(xzoutput)
- averts2d = averts2d.reshape(
- (self.nlay + 1, 1, self.ncol + 1)
- )
+ averts2d = averts2d.reshape((self.nlay + 1, 1, self.ncol + 1))
averts = averts2d * np.ones(shape_verts)
elif self.ncol == 1:
# in this case we need a 2d interpolation in the y, z plane
@@ -1325,9 +1290,7 @@ def array_at_verts(self, a):
yzoutput[:, 0] = zoutput[:, :, 0].ravel()
yzoutput[:, 1] = youtput[:, :, 0].ravel()
averts2d = interp_func(yzoutput)
- averts2d = averts2d.reshape(
- (self.nlay + 1, self.nrow + 1, 1)
- )
+ averts2d = averts2d.reshape((self.nlay + 1, self.nrow + 1, 1))
averts = averts2d * np.ones(shape_verts)
else:
# 3d interpolation
@@ -1797,11 +1760,7 @@ def from_binary_grid_file(cls, file_path, verbose=False):
yorigin = grb_obj.yorigin
angrot = grb_obj.angrot
- nlay, nrow, ncol = (
- grb_obj.nlay,
- grb_obj.nrow,
- grb_obj.ncol,
- )
+ nlay, nrow, ncol = (grb_obj.nlay, grb_obj.nrow, grb_obj.ncol)
delr, delc = grb_obj.delr, grb_obj.delc
top, botm = grb_obj.top, grb_obj.bot
top.shape = (nrow, ncol)
diff --git a/flopy/discretization/unstructuredgrid.py b/flopy/discretization/unstructuredgrid.py
index ed0a201930..dd24107567 100644
--- a/flopy/discretization/unstructuredgrid.py
+++ b/flopy/discretization/unstructuredgrid.py
@@ -273,18 +273,14 @@ def cell2d(self):
@property
def iverts(self):
if self._iverts is not None:
- return [
- [ivt for ivt in t if ivt is not None] for t in self._iverts
- ]
+ return [[ivt for ivt in t if ivt is not None] for t in self._iverts]
@property
def verts(self):
if self._vertices is None:
return self._vertices
else:
- verts = np.array(
- [list(t)[1:] for t in self._vertices], dtype=float
- ).T
+ verts = np.array([list(t)[1:] for t in self._vertices], dtype=float).T
x, y = transform(
verts[0],
verts[1],
@@ -578,8 +574,7 @@ def map_polygons(self):
self._polygons[ilay].append(p)
else:
self._polygons = [
- Path(self.get_cell_vertices(nn))
- for nn in range(self.ncpl[0])
+ Path(self.get_cell_vertices(nn)) for nn in range(self.ncpl[0])
]
return copy.copy(self._polygons)
@@ -623,12 +618,12 @@ def neighbors(self, node=None, **kwargs):
reset = kwargs.pop("reset", False)
if method == "iac":
if self._neighbors is None or reset:
- neighors = {}
+ neighbors = {}
idx0 = 0
for node, ia in enumerate(self._iac):
idx1 = idx0 + ia
- neighors[node] = list(self._ja[idx0 + 1 : idx1])
- self._neighbors = neighors
+ neighbors[node] = list(self._ja[idx0 + 1 : idx1])
+ self._neighbors = neighbors
if node is not None:
return self._neighbors[node]
else:
@@ -650,10 +645,7 @@ def convert_grid(self, factor):
"""
if self.is_complete:
return UnstructuredGrid(
- vertices=[
- [i[0], i[1] * factor, i[2] * factor]
- for i in self._vertices
- ],
+ vertices=[[i[0], i[1] * factor, i[2] * factor] for i in self._vertices],
iverts=self._iverts,
xcenters=self._xc * factor,
ycenters=self._yc * factor,
@@ -665,9 +657,7 @@ def convert_grid(self, factor):
angrot=self.angrot,
)
else:
- raise AssertionError(
- "Grid is not complete and cannot be converted"
- )
+ raise AssertionError("Grid is not complete and cannot be converted")
def clean_iverts(self, inplace=False):
"""
@@ -691,9 +681,7 @@ def clean_iverts(self, inplace=False):
if vert in vset:
vset[vert].add(rec[0])
else:
- vset[vert] = {
- rec[0],
- }
+ vset[vert] = {rec[0]}
cnt = 0
ivert_remap = {}
@@ -877,9 +865,7 @@ def _build_grid_geometry_info(self):
xvertices = xvertxform
yvertices = yvertxform
- self._cache_dict[cache_index_cc] = CachedData(
- [xcenters, ycenters, zcenters]
- )
+ self._cache_dict[cache_index_cc] = CachedData([xcenters, ycenters, zcenters])
self._cache_dict[cache_index_vert] = CachedData(
[xvertices, yvertices, zvertices]
)
@@ -1149,9 +1135,7 @@ def from_gridspec(cls, file_path: Union[str, os.PathLike]):
with open(file_path) as file:
def split_line():
- return [
- head.upper() for head in file.readline().strip().split()
- ]
+ return [head.upper() for head in file.readline().strip().split()]
header = split_line()
while header[0][0] == "#":
@@ -1191,16 +1175,12 @@ def split_line():
verts_provided = len(line) - 6
if verts_declared != verts_provided:
raise ValueError(
- f"Cell {nn} declares {verts_declared} vertices but provides {verts_provided}"
+ f"Cell {nn} declares {verts_declared} vertices "
+ f"but provides {verts_provided}"
)
- verts = [
- int(vert) - 1 for vert in line[6 : 6 + verts_declared]
- ]
- elevs = [
- zverts[int(line[i]) - 1]
- for i in range(6, 6 + verts_declared)
- ]
+ verts = [int(vert) - 1 for vert in line[6 : 6 + verts_declared]]
+ elevs = [zverts[int(line[i]) - 1] for i in range(6, 6 + verts_declared)]
xcenters.append(xc)
ycenters.append(yc)
diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py
index c45c4e245e..7e811f506c 100644
--- a/flopy/discretization/vertexgrid.py
+++ b/flopy/discretization/vertexgrid.py
@@ -167,16 +167,12 @@ def iverts(self):
@property
def cell1d(self):
if self._cell1d is not None:
- return [
- [ivt for ivt in t if ivt is not None] for t in self._cell1d
- ]
+ return [[ivt for ivt in t if ivt is not None] for t in self._cell1d]
@property
def cell2d(self):
if self._cell2d is not None:
- return [
- [ivt for ivt in t if ivt is not None] for t in self._cell2d
- ]
+ return [[ivt for ivt in t if ivt is not None] for t in self._cell2d]
@property
def verts(self):
@@ -226,7 +222,7 @@ def grid_lines(self):
if self.cell1d is not None:
close_cell = False
- # go through each cell and create a line segement for each face
+ # go through each cell and create a line segment for each face
lines = []
ncpl = len(xgrid)
for icpl in range(ncpl):
@@ -241,9 +237,7 @@ def grid_lines(self):
]
)
if close_cell:
- lines.append(
- [(xcoords[-1], ycoords[-1]), (xcoords[0], ycoords[0])]
- )
+ lines.append([(xcoords[-1], ycoords[-1]), (xcoords[0], ycoords[0])])
self._copy_cache = True
return lines
@@ -336,13 +330,9 @@ def convert_grid(self, factor):
"""
if self.is_complete:
return VertexGrid(
- vertices=[
- [i[0], i[1] * factor, i[2] * factor]
- for i in self._vertices
- ],
+ vertices=[[i[0], i[1] * factor, i[2] * factor] for i in self._vertices],
cell2d=[
- [i[0], i[1] * factor, i[2] * factor] + i[3:]
- for i in self._cell2d
+ [i[0], i[1] * factor, i[2] * factor] + i[3:] for i in self._cell2d
],
top=self.top * factor,
botm=self.botm * factor,
@@ -352,9 +342,7 @@ def convert_grid(self, factor):
angrot=self.angrot,
)
else:
- raise AssertionError(
- "Grid is not complete and cannot be converted"
- )
+ raise AssertionError("Grid is not complete and cannot be converted")
def intersect(self, x, y, z=None, local=False, forgive=False):
"""
diff --git a/flopy/export/metadata.py b/flopy/export/metadata.py
index edb47af451..3677f0ae6f 100644
--- a/flopy/export/metadata.py
+++ b/flopy/export/metadata.py
@@ -64,33 +64,27 @@ def __init__(self, sciencebase_id, model):
self.naming_authority = "ScienceBase" # org. that provides the id
# Well-behaved generic netCDF applications should append a line containing:
# date, time of day, user name, program name and command arguments.
- self.source = (
- model.model_ws
- ) # The method of production of the original data.
+ self.source = model.model_ws # The method of production of the original data.
# If it was model-generated, source should name the model and its version.
# This attribute is defined in the CF Conventions.
self.acknowledgement = self._get_xml_attribute("datacred")
- self.date_created = self.sb["provenance"]["linkProcess"].get(
- "dateCreated"
- )
+ self.date_created = self.sb["provenance"]["linkProcess"].get("dateCreated")
self.creator_name = self.creator.get("name")
self.creator_email = self.creator.get("email")
- self.creator_institution = self.creator["organization"].get(
- "displayText"
- )
- self.institution = (
- self.creator_institution
- ) # also in CF convention for global attributes
+ self.creator_institution = self.creator["organization"].get("displayText")
+ # also in CF convention for global attributes
+ self.institution = self.creator_institution
self.project = self.sb["title"]
- self.publisher_name = [
+ self.publisher_name = next(
d.get("name")
for d in self.sb["contacts"]
if "publisher" in d.get("type").lower()
- ][0]
- self.publisher_email = self.sb["provenance"]["linkProcess"].get(
- "processedBy"
)
- self.publisher_url = "https://www2.usgs.gov/water/" # self.sb['provenance']['linkProcess'].get('linkReference')
+ self.publisher_email = self.sb["provenance"]["linkProcess"].get("processedBy")
+ # TODO: should publisher_url be obtained from linkReference?
+ # publisher_url = self.sb['provenance']['linkProcess'].get('linkReference')
+ publisher_url = "https://www2.usgs.gov/water/"
+ self.publisher_url = publisher_url
self.geospatial_bounds_crs = "EPSG:4326"
self.geospatial_lat_min = self.bounds.get("minY")
self.geospatial_lat_max = self.bounds.get("maxY")
@@ -111,7 +105,7 @@ def __init__(self, sciencebase_id, model):
def _get_xml_attribute(self, attr):
try:
- return list(self.xmlroot.iter(attr))[0].text
+ return next(iter(self.xmlroot.iter(attr))).text
except:
return None
@@ -121,11 +115,9 @@ def bounds(self):
@property
def creator(self):
- return [
- d
- for d in self.sb["contacts"]
- if "point of contact" in d["type"].lower()
- ][0]
+ return next(
+ d for d in self.sb["contacts"] if "point of contact" in d["type"].lower()
+ )
@property
def creator_url(self):
@@ -172,9 +164,7 @@ def references(self):
"""
r = [self.citation]
links = [
- d.get("uri")
- for d in self.sb["webLinks"]
- if "link" in d.get("type").lower()
+ d.get("uri") for d in self.sb["webLinks"] if "link" in d.get("type").lower()
]
return r + links
@@ -190,9 +180,7 @@ def time_coverage(self):
l = self.sb["dates"]
tc = {}
for t in ["start", "end"]:
- tc[t] = [d.get("dateString") for d in l if t in d["type"].lower()][
- 0
- ]
+ tc[t] = next(d.get("dateString") for d in l if t in d["type"].lower())
if not np.all(self.model_time.steady_state) and pd is not None:
# replace with times from model reference
tc["start"] = self.model_time.start_datetime
diff --git a/flopy/export/netcdf.py b/flopy/export/netcdf.py
index eda44e1172..e130923293 100644
--- a/flopy/export/netcdf.py
+++ b/flopy/export/netcdf.py
@@ -188,9 +188,7 @@ def __init__(
self.dimension_names = ("layer", "y", "x")
STANDARD_VARS.extend(["delc", "delr"])
else:
- raise Exception(
- f"Grid type {self.model_grid.grid_type} not supported."
- )
+ raise Exception(f"Grid type {self.model_grid.grid_type} not supported.")
self.shape = self.model_grid.shape
parser = import_optional_dependency("dateutil.parser")
@@ -201,9 +199,7 @@ def __init__(
crs = get_authority_crs(self.model_grid.crs)
if crs is None:
- self.logger.warn(
- "model has no coordinate reference system specified. "
- )
+ self.logger.warn("model has no coordinate reference system specified. ")
self.model_crs = crs
self.transformer = None
self.grid_units = self.model_grid.units
@@ -219,8 +215,8 @@ def __init__(
self.log("initializing attributes")
self.nc_crs_str = "epsg:4326"
self.nc_crs_longname = "https://www.opengis.net/def/crs/EPSG/0/4326"
- self.nc_semi_major = float(6378137.0)
- self.nc_inverse_flat = float(298.257223563)
+ self.nc_semi_major = 6378137.0
+ self.nc_inverse_flat = 298.257223563
self.global_attributes = {}
self.global_attributes["namefile"] = self.model.namefile
@@ -247,9 +243,7 @@ def __init__(
}
for n, v in spatial_attribs.items():
self.global_attributes["flopy_sr_" + n] = v
- self.global_attributes["start_datetime"] = (
- self.model_time.start_datetime
- )
+ self.global_attributes["start_datetime"] = self.model_time.start_datetime
self.fillvalue = FILLVALUE
@@ -279,18 +273,14 @@ def __add__(self, other):
new_net = NetCdf.zeros_like(self)
if np.isscalar(other) or isinstance(other, np.ndarray):
for vname in self.var_attr_dict.keys():
- new_net.nc.variables[vname][:] = (
- self.nc.variables[vname][:] + other
- )
+ new_net.nc.variables[vname][:] = self.nc.variables[vname][:] + other
elif isinstance(other, NetCdf):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
self.nc.variables[vname][:] + other.nc.variables[vname][:]
)
else:
- raise Exception(
- f"NetCdf.__add__(): unrecognized other:{type(other)}"
- )
+ raise Exception(f"NetCdf.__add__(): unrecognized other:{type(other)}")
new_net.nc.sync()
return new_net
@@ -298,18 +288,14 @@ def __sub__(self, other):
new_net = NetCdf.zeros_like(self)
if np.isscalar(other) or isinstance(other, np.ndarray):
for vname in self.var_attr_dict.keys():
- new_net.nc.variables[vname][:] = (
- self.nc.variables[vname][:] - other
- )
+ new_net.nc.variables[vname][:] = self.nc.variables[vname][:] - other
elif isinstance(other, NetCdf):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
self.nc.variables[vname][:] - other.nc.variables[vname][:]
)
else:
- raise Exception(
- f"NetCdf.__sub__(): unrecognized other:{type(other)}"
- )
+ raise Exception(f"NetCdf.__sub__(): unrecognized other:{type(other)}")
new_net.nc.sync()
return new_net
@@ -317,18 +303,14 @@ def __mul__(self, other):
new_net = NetCdf.zeros_like(self)
if np.isscalar(other) or isinstance(other, np.ndarray):
for vname in self.var_attr_dict.keys():
- new_net.nc.variables[vname][:] = (
- self.nc.variables[vname][:] * other
- )
+ new_net.nc.variables[vname][:] = self.nc.variables[vname][:] * other
elif isinstance(other, NetCdf):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
self.nc.variables[vname][:] * other.nc.variables[vname][:]
)
else:
- raise Exception(
- f"NetCdf.__mul__(): unrecognized other:{type(other)}"
- )
+ raise Exception(f"NetCdf.__mul__(): unrecognized other:{type(other)}")
new_net.nc.sync()
return new_net
@@ -340,19 +322,14 @@ def __truediv__(self, other):
with np.errstate(invalid="ignore"):
if np.isscalar(other) or isinstance(other, np.ndarray):
for vname in self.var_attr_dict.keys():
- new_net.nc.variables[vname][:] = (
- self.nc.variables[vname][:] / other
- )
+ new_net.nc.variables[vname][:] = self.nc.variables[vname][:] / other
elif isinstance(other, NetCdf):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
- self.nc.variables[vname][:]
- / other.nc.variables[vname][:]
+ self.nc.variables[vname][:] / other.nc.variables[vname][:]
)
else:
- raise Exception(
- f"NetCdf.__sub__(): unrecognized other:{type(other)}"
- )
+ raise Exception(f"NetCdf.__sub__(): unrecognized other:{type(other)}")
new_net.nc.sync()
return new_net
@@ -420,21 +397,14 @@ def nc_crs(self):
return get_authority_crs(self.nc_crs_str)
@classmethod
- def zeros_like(
- cls, other, output_filename=None, verbose=None, logger=None
- ):
+ def zeros_like(cls, other, output_filename=None, verbose=None, logger=None):
new_net = NetCdf.empty_like(
- other,
- output_filename=output_filename,
- verbose=verbose,
- logger=logger,
+ other, output_filename=output_filename, verbose=verbose, logger=logger
)
# add the vars to the instance
for vname in other.var_attr_dict.keys():
if new_net.nc.variables.get(vname) is not None:
- new_net.logger.warn(
- f"variable {vname} already defined, skipping"
- )
+ new_net.logger.warn(f"variable {vname} already defined, skipping")
continue
new_net.log(f"adding variable {vname}")
var = other.nc.variables[vname]
@@ -447,10 +417,7 @@ def zeros_like(
new_data = np.zeros_like(data)
new_data[mask] = FILLVALUE
new_var = new_net.create_variable(
- vname,
- other.var_attr_dict[vname],
- var.dtype,
- dimensions=var.dimensions,
+ vname, other.var_attr_dict[vname], var.dtype, dimensions=var.dimensions
)
new_var[:] = new_data
new_net.log(f"adding variable {vname}")
@@ -463,19 +430,13 @@ def zeros_like(
return new_net
@classmethod
- def empty_like(
- cls, other, output_filename=None, verbose=None, logger=None
- ):
+ def empty_like(cls, other, output_filename=None, verbose=None, logger=None):
if output_filename is None:
- output_filename = (
- str(time.mktime(datetime.now().timetuple())) + ".nc"
- )
+ output_filename = str(time.mktime(datetime.now().timetuple())) + ".nc"
while os.path.exists(output_filename):
print(f"{output_filename}...already exists")
- output_filename = (
- str(time.mktime(datetime.now().timetuple())) + ".nc"
- )
+ output_filename = str(time.mktime(datetime.now().timetuple())) + ".nc"
print("creating temporary netcdf file..." + output_filename)
new_net = cls(
@@ -487,9 +448,7 @@ def empty_like(
)
return new_net
- def difference(
- self, other, minuend="self", mask_zero_diff=True, onlydiff=True
- ):
+ def difference(self, other, minuend="self", mask_zero_diff=True, onlydiff=True):
"""
make a new NetCDF instance that is the difference with another
netcdf file
@@ -540,8 +499,7 @@ def difference(
diff = self_vars.symmetric_difference(other_vars)
if len(diff) > 0:
self.logger.warn(
- "variables are not the same between the two nc files: "
- + ",".join(diff)
+ "variables are not the same between the two nc files: " + ",".join(diff)
)
return
@@ -607,9 +565,7 @@ def difference(
# check for non-zero diffs
if onlydiff and d_data.sum() == 0.0:
- self.logger.warn(
- f"var {vname} has zero differences, skipping..."
- )
+ self.logger.warn(f"var {vname} has zero differences, skipping...")
continue
self.logger.warn(
@@ -645,9 +601,7 @@ def difference(
def write(self):
"""write the nc object to disk"""
self.log("writing nc file")
- assert (
- self.nc is not None
- ), "netcdf.write() error: nc file not initialized"
+ assert self.nc is not None, "netcdf.write() error: nc file not initialized"
# write any new attributes that have been set since
# initializing the file
@@ -671,9 +625,7 @@ def initialize_geometry(self):
# Check if using newer pyproj version conventions
if version.parse(pyproj.__version__) < version.parse("2.2"):
- raise ValueError(
- "The FloPy NetCDF module requires pyproj >= 2.2.0."
- )
+ raise ValueError("The FloPy NetCDF module requires pyproj >= 2.2.0.")
print("initialize_geometry::")
@@ -705,9 +657,7 @@ def initialize_geometry(self):
self.xs, self.ys = self.transformer.transform(xs, ys)
# get transformed bounds and record to check against ScienceBase later
- bbox = np.array(
- [[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]]
- )
+ bbox = np.array([[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]])
x, y = self.transformer.transform(*bbox.transpose())
self.bounds = x.min(), y.min(), x.max(), y.max()
else:
@@ -747,10 +697,7 @@ def initialize_file(self, time_values=None):
# write some attributes
self.log("setting standard attributes")
- self.nc.setncattr(
- "Conventions",
- f"CF-1.6, ACDD-1.3, flopy {version}",
- )
+ self.nc.setncattr("Conventions", f"CF-1.6, ACDD-1.3, flopy {version}")
self.nc.setncattr(
"date_created", datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
)
@@ -812,10 +759,7 @@ def initialize_file(self, time_values=None):
"positive": self.z_positive,
}
elev = self.create_variable(
- "elevation",
- attribs,
- precision_str="f8",
- dimensions=self.dimension_names,
+ "elevation", attribs, precision_str="f8", dimensions=self.dimension_names
)
elev[:] = self.zs
@@ -846,10 +790,7 @@ def initialize_file(self, time_values=None):
"_CoordinateAxisType": "Lat",
}
lat = self.create_variable(
- "latitude",
- attribs,
- precision_str="f8",
- dimensions=self.dimension_names[1:],
+ "latitude", attribs, precision_str="f8", dimensions=self.dimension_names[1:]
)
lat[:] = self.ys
@@ -862,10 +803,7 @@ def initialize_file(self, time_values=None):
"axis": "X",
}
x = self.create_variable(
- "x_proj",
- attribs,
- precision_str="f8",
- dimensions=self.dimension_names[1:],
+ "x_proj", attribs, precision_str="f8", dimensions=self.dimension_names[1:]
)
x[:] = self.model_grid.xyzcellcenters[0]
@@ -878,10 +816,7 @@ def initialize_file(self, time_values=None):
"axis": "Y",
}
y = self.create_variable(
- "y_proj",
- attribs,
- precision_str="f8",
- dimensions=self.dimension_names[1:],
+ "y_proj", attribs, precision_str="f8", dimensions=self.dimension_names[1:]
)
y[:] = self.model_grid.xyzcellcenters[1]
@@ -921,7 +856,8 @@ def initialize_file(self, time_values=None):
delc.comments = (
"This is the row spacing that applied to the UNROTATED grid. "
"This grid HAS been rotated before being saved to NetCDF. "
- "To compute the unrotated grid, use the origin point and this array."
+ "To compute the unrotated grid, use the origin point and "
+ "this array."
)
# delr
@@ -937,7 +873,8 @@ def initialize_file(self, time_values=None):
delr.comments = (
"This is the col spacing that applied to the UNROTATED grid. "
"This grid HAS been rotated before being saved to NetCDF. "
- "To compute the unrotated grid, use the origin point and this array."
+ "To compute the unrotated grid, use the origin point and "
+ "this array."
)
# Workaround for CF/CDM.
@@ -1010,9 +947,7 @@ def initialize_group(
f"{dim} information must be supplied to dimension data"
)
else:
- self.nc.groups[group].createDimension(
- dim, len(dimension_data[dim])
- )
+ self.nc.groups[group].createDimension(dim, len(dimension_data[dim]))
self.log(f"created {group} group dimensions")
@@ -1020,9 +955,7 @@ def initialize_group(
for dim in dimensions:
if dim.lower() == "time":
if "time" not in attributes:
- unit_value = (
- f"{self.time_units} since {self.start_datetime}"
- )
+ unit_value = f"{self.time_units} since {self.start_datetime}"
attribs = {
"units": unit_value,
"standard_name": "time",
@@ -1035,11 +968,7 @@ def initialize_group(
attribs = attributes["time"]
time = self.create_group_variable(
- group,
- "time",
- attribs,
- precision_str="f8",
- dimensions=("time",),
+ group, "time", attribs, precision_str="f8", dimensions=("time",)
)
time[:] = np.asarray(time_values)
@@ -1058,22 +987,14 @@ def initialize_group(
attribs = attributes["zone"]
zone = self.create_group_variable(
- group,
- "zone",
- attribs,
- precision_str="i4",
- dimensions=("zone",),
+ group, "zone", attribs, precision_str="i4", dimensions=("zone",)
)
zone[:] = np.asarray(dimension_data["zone"])
else:
attribs = attributes[dim]
var = self.create_group_variable(
- group,
- dim,
- attribs,
- precision_str="f8",
- dimensions=dim_names,
+ group, dim, attribs, precision_str="f8", dimensions=dim_names
)
var[:] = np.asarray(dimension_data[dim])
self.nc.sync()
@@ -1116,22 +1037,15 @@ def create_group_variable(
"""
name = self.normalize_name(name)
- if (
- name in STANDARD_VARS
- and name in self.nc.groups[group].variables.keys()
- ):
+ if name in STANDARD_VARS and name in self.nc.groups[group].variables.keys():
return
if name in self.nc.groups[group].variables.keys():
if self.forgive:
- self.logger.warn(
- f"skipping duplicate {group} group variable: {name}"
- )
+ self.logger.warn(f"skipping duplicate {group} group variable: {name}")
return
else:
- raise Exception(
- f"duplicate {group} group variable name: {name}"
- )
+ raise Exception(f"duplicate {group} group variable name: {name}")
self.log(f"creating group {group} variable: {name}")
@@ -1150,11 +1064,7 @@ def create_group_variable(
self.var_attr_dict[f"{group}/{name}"] = attributes
var = self.nc.groups[group].createVariable(
- name,
- precision_str,
- dimensions,
- fill_value=self.fillvalue,
- zlib=True,
+ name, precision_str, dimensions, fill_value=self.fillvalue, zlib=True
)
for k, v in attributes.items():
@@ -1213,10 +1123,7 @@ def create_variable(
# long_name = attributes.pop("long_name",name)
if name in STANDARD_VARS and name in self.nc.variables.keys():
return
- if (
- name not in self.var_attr_dict.keys()
- and name in self.nc.variables.keys()
- ):
+ if name not in self.var_attr_dict.keys() and name in self.nc.variables.keys():
if self.forgive:
self.logger.warn(f"skipping duplicate variable: {name}")
return
@@ -1238,11 +1145,7 @@ def create_variable(
self.var_attr_dict[name] = attributes
var = self.nc.createVariable(
- name,
- precision_str,
- dimensions,
- fill_value=self.fillvalue,
- zlib=True,
+ name, precision_str, dimensions, fill_value=self.fillvalue, zlib=True
)
for k, v in attributes.items():
try:
@@ -1309,7 +1212,7 @@ def add_sciencebase_metadata(self, id, check=True):
"get_sciencebase_xml_metadata",
"get_sciencebase_metadata",
}
- towrite = sorted(list(attr.difference(skip)))
+ towrite = sorted(attr.difference(skip))
for k in towrite:
v = md.__getattribute__(k)
if v is not None:
@@ -1325,7 +1228,8 @@ def add_sciencebase_metadata(self, id, check=True):
return md
def _check_vs_sciencebase(self, md):
- """Check that model bounds read from flopy are consistent with those in ScienceBase."""
+ """Check that model bounds read from flopy are consistent with
+ those in ScienceBase."""
xmin, ymin, xmax, ymax = self.bounds
tol = 1e-5
assert md.geospatial_lon_min - xmin < tol
diff --git a/flopy/export/shapefile_utils.py b/flopy/export/shapefile_utils.py
index 7c2303874a..d0075a903c 100644
--- a/flopy/export/shapefile_utils.py
+++ b/flopy/export/shapefile_utils.py
@@ -10,7 +10,7 @@
import sys
import warnings
from pathlib import Path
-from typing import List, Optional, Union
+from typing import Optional, Union
from warnings import warn
import numpy as np
@@ -114,9 +114,7 @@ def write_grid_shapefile(
)
elif mg.grid_type == "structured":
verts = [
- mg.get_cell_vertices(i, j)
- for i in range(mg.nrow)
- for j in range(mg.ncol)
+ mg.get_cell_vertices(i, j) for i in range(mg.nrow) for j in range(mg.ncol)
]
elif mg.grid_type == "vertex":
verts = [mg.get_cell_vertices(cellid) for cellid in range(mg.ncpl)]
@@ -171,10 +169,7 @@ def write_grid_shapefile(
).transpose()
else:
names = ["node", "layer"] + list(array_dict.keys())
- dtypes = [
- ("node", np.dtype("int")),
- ("layer", np.dtype("int")),
- ] + [
+ dtypes = [("node", np.dtype("int")), ("layer", np.dtype("int"))] + [
(enforce_10ch_limit([name])[0], array_dict[name].dtype)
for name in names[2:]
]
@@ -184,9 +179,7 @@ def write_grid_shapefile(
istart, istop = mg.get_layer_node_range(ilay)
layer[istart:istop] = ilay + 1
at = np.vstack(
- [node]
- + [layer]
- + [array_dict[name].ravel() for name in names[2:]]
+ [node] + [layer] + [array_dict[name].ravel() for name in names[2:]]
).transpose()
names = enforce_10ch_limit(names)
@@ -197,9 +190,7 @@ def write_grid_shapefile(
at = np.array([tuple(i) for i in at], dtype=dtypes)
# write field information
- fieldinfo = {
- name: get_pyshp_field_info(dtype.name) for name, dtype in dtypes
- }
+ fieldinfo = {name: get_pyshp_field_info(dtype.name) for name, dtype in dtypes}
for n in names:
w.field(n, *fieldinfo[n])
@@ -308,11 +299,7 @@ def model_attributes_to_shapefile(
attrs.remove("start_datetime")
for attr in attrs:
a = pak.__getattribute__(attr)
- if (
- a is None
- or not hasattr(a, "data_type")
- or a.name == "thickness"
- ):
+ if a is None or not hasattr(a, "data_type") or a.name == "thickness":
continue
if a.data_type == DataType.array2d:
if a.array is None or a.array.shape != horz_shape:
@@ -336,7 +323,8 @@ def model_attributes_to_shapefile(
if a.array.shape == horz_shape:
if hasattr(a, "shape"):
if a.shape[1] is None: # usg unstructured Util3d
- # return a flattened array, with a.name[0] (a per-layer list)
+ # return a flattened array,
+ # with a.name[0] (a per-layer list)
array_dict[a.name[0]] = a.array
else:
array_dict[a.name] = a.array
@@ -377,14 +365,36 @@ def model_attributes_to_shapefile(
assert arr.shape == horz_shape
array_dict[name] = arr
elif a.data_type == DataType.transientlist:
- try:
- list(a.masked_4D_arrays_itr())
- except:
+ # Skip empty transientlist
+ if not a.data:
continue
+
+ # Use first recarray kper to check transientlist
+ for kper in a.data.keys():
+ if isinstance(a.data[kper], np.recarray):
+ break
+ # Skip transientlist if all elements are of object type
+ if all(
+ dtype == np.object_
+ for dtype, _ in a.data[kper].dtype.fields.values()
+ ):
+ continue
+
for name, array in a.masked_4D_arrays_itr():
+ n = shape_attr_name(name, length=4)
for kper in range(array.shape[0]):
+ # guard clause for disu case
+ # array is (kper, node) only
+ if len(array.shape) == 2:
+ aname = f"{n}{kper + 1}"
+ arr = array[kper]
+ assert arr.shape == horz_shape
+ if np.all(np.isnan(arr)):
+ continue
+ array_dict[aname] = arr
+ continue
+ # non-disu case
for k in range(array.shape[1]):
- n = shape_attr_name(name, length=4)
aname = f"{n}{k + 1}{kper + 1}"
arr = array[kper][k]
assert arr.shape == horz_shape
@@ -399,9 +409,7 @@ def model_attributes_to_shapefile(
):
for ilay in range(a.model.modelgrid.nlay):
u2d = a[ilay]
- name = (
- f"{shape_attr_name(u2d.name)}_{ilay + 1}"
- )
+ name = f"{shape_attr_name(u2d.name)}_{ilay + 1}"
arr = u2d.array
assert arr.shape == horz_shape
array_dict[name] = arr
@@ -467,7 +475,7 @@ def shape_attr_name(name, length=6, keep_layer=False):
return n
-def enforce_10ch_limit(names: List[str], warnings: bool = True) -> List[str]:
+def enforce_10ch_limit(names: list[str], warnings: bool = True) -> list[str]:
"""Enforce 10 character limit for fieldnames.
Add suffix for duplicate names starting at 0.
@@ -545,14 +553,10 @@ def shp2recarray(shpname: Union[str, os.PathLike]):
sf = import_optional_dependency("shapefile")
sfobj = sf.Reader(str(shpname))
- dtype = [
- (str(f[0]), get_pyshp_field_dtypes(f[1])) for f in sfobj.fields[1:]
- ]
+ dtype = [(str(f[0]), get_pyshp_field_dtypes(f[1])) for f in sfobj.fields[1:]]
geoms = GeoSpatialCollection(sfobj).flopy_geometry
- records = [
- tuple(r) + (geoms[i],) for i, r in enumerate(sfobj.iterRecords())
- ]
+ records = [tuple(r) + (geoms[i],) for i, r in enumerate(sfobj.iterRecords())]
dtype += [("geometry", object)]
recarray = np.array(records, dtype=dtype).view(np.recarray)
@@ -614,9 +618,7 @@ def recarray2shp(
from ..utils.geospatial_utils import GeoSpatialCollection
if len(recarray) != len(geoms):
- raise IndexError(
- "Number of geometries must equal the number of records!"
- )
+ raise IndexError("Number of geometries must equal the number of records!")
if len(recarray) == 0:
raise Exception("Recarray is empty")
diff --git a/flopy/export/utils.py b/flopy/export/utils.py
index ef446b7654..d6902d41ec 100644
--- a/flopy/export/utils.py
+++ b/flopy/export/utils.py
@@ -47,9 +47,7 @@ def ensemble_helper(
"""
f_in, f_out = None, None
for m in models[1:]:
- assert (
- m.get_nrow_ncol_nlay_nper() == models[0].get_nrow_ncol_nlay_nper()
- )
+ assert m.get_nrow_ncol_nlay_nper() == models[0].get_nrow_ncol_nlay_nper()
if inputs_filename is not None:
f_in = models[0].export(inputs_filename, **kwargs)
vdict = {}
@@ -129,9 +127,7 @@ def ensemble_helper(
if i >= 2:
if not add_reals:
f_out.write()
- f_out = NetCdf.empty_like(
- mean, output_filename=outputs_filename
- )
+ f_out = NetCdf.empty_like(mean, output_filename=outputs_filename)
f_out.append(mean, suffix="**mean**")
f_out.append(stdev, suffix="**stdev**")
@@ -156,9 +152,7 @@ def _add_output_nc_variable(
if logger:
logger.log(f"creating array for {var_name}")
- array = np.zeros(
- (len(times), shape3d[0], shape3d[1], shape3d[2]), dtype=np.float32
- )
+ array = np.zeros((len(times), shape3d[0], shape3d[1], shape3d[2]), dtype=np.float32)
array[:] = np.nan
if isinstance(out_obj, ZBNetOutput):
@@ -229,10 +223,7 @@ def _add_output_nc_variable(
try:
dim_tuple = ("time",) + nc.dimension_names
var = nc.create_variable(
- var_name,
- attribs,
- precision_str=precision_str,
- dimensions=dim_tuple,
+ var_name, attribs, precision_str=precision_str, dimensions=dim_tuple
)
except Exception as e:
estr = f"error creating variable {var_name}:\n{e!s}"
@@ -307,7 +298,8 @@ def output_helper(
Parameters
----------
f : str or PathLike or NetCdf or dict
- filepath to write output to (must have .shp or .nc extension), NetCDF object, or dictionary
+ filepath to write output to (must have .shp or .nc extension),
+ NetCDF object, or dictionary
ml : flopy.mbase.ModelInterface derived type
oudic : dict
output_filename,flopy datafile/cellbudgetfile instance
@@ -404,13 +396,9 @@ def output_helper(
logger.warn(msg)
elif verbose:
print(msg)
- times = [t for t in common_times[::stride]]
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".nc":
- f = NetCdf(
- f, ml, time_values=times, logger=logger, forgive=forgive, **kwargs
- )
+ times = list(common_times[::stride])
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc":
+ f = NetCdf(f, ml, time_values=times, logger=logger, forgive=forgive, **kwargs)
elif isinstance(f, NetCdf):
otimes = list(f.nc.variables["time"][:])
assert otimes == times
@@ -500,9 +488,7 @@ def output_helper(
pass
for text, array in zonebud.arrays.items():
- _add_output_nc_zonebudget_variable(
- f, array, text, zonebud.flux, logger
- )
+ _add_output_nc_zonebudget_variable(f, array, text, zonebud.flux, logger)
# write the zone array to standard output
_add_output_nc_variable(
@@ -530,9 +516,7 @@ def output_helper(
attrib_name = "conc"
else:
attrib_name = "head"
- plotarray = np.atleast_3d(
- out_obj.get_alldata().transpose()
- ).transpose()
+ plotarray = np.atleast_3d(out_obj.get_alldata().transpose()).transpose()
for per in range(plotarray.shape[0]):
for k in range(plotarray.shape[1]):
@@ -581,16 +565,15 @@ def output_helper(
return f
-def model_export(
- f: Union[str, os.PathLike, NetCdf, dict], ml, fmt=None, **kwargs
-):
+def model_export(f: Union[str, os.PathLike, NetCdf, dict], ml, fmt=None, **kwargs):
"""
Method to export a model to a shapefile or netcdf file
Parameters
----------
f : str or PathLike or NetCdf or dict
- file path (".nc" for netcdf or ".shp" for shapefile) or NetCDF object or dictionary
+ file path (".nc" for netcdf or ".shp" for shapefile),
+ NetCDF object, or dictionary
ml : flopy.modflow.mbase.ModelInterface object
flopy model object
fmt : str
@@ -616,14 +599,10 @@ def model_export(
if package_names is None:
package_names = [pak.name[0] for pak in ml.packagelist]
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".nc":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc":
f = NetCdf(f, ml, **kwargs)
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".shp":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp":
shapefile_utils.model_attributes_to_shapefile(
f, ml, package_names=package_names, **kwargs
)
@@ -661,9 +640,7 @@ def model_export(
smooth=smooth,
point_scalars=point_scalars,
)
- vtkobj.add_model(
- ml, masked_values=masked_values, selpaklist=package_names
- )
+ vtkobj.add_model(ml, masked_values=masked_values, selpaklist=package_names)
vtkobj.write(os.path.join(f, name), kpers)
else:
@@ -683,7 +660,8 @@ def package_export(
Parameters
----------
f : str or PathLike or NetCdf or dict
- output file path (extension .shp for shapefile or .nc for netcdf) or NetCDF object or dictionary
+ output file path (extension .shp for shapefile or .nc for netcdf),
+ NetCDF object, or dictionary
pak : flopy.pakbase.Package object
package to export
fmt : str
@@ -710,14 +688,10 @@ def package_export(
"""
assert isinstance(pak, PackageInterface)
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".nc":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc":
f = NetCdf(f, pak.parent, **kwargs)
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".shp":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp":
shapefile_utils.model_attributes_to_shapefile(
f, pak.parent, package_names=pak.name, verbose=verbose, **kwargs
)
@@ -808,9 +782,7 @@ def generic_array_export(
flopy model object
"""
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".nc":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc":
assert "model" in kwargs.keys(), (
"creating a new netCDF using generic_array_helper requires a "
"'model' kwarg"
@@ -833,8 +805,7 @@ def generic_array_export(
long_name = kwargs.pop("long_name", var_name)
if len(kwargs) > 0:
f.logger.warn(
- "generic_array_helper(): unrecognized kwargs:"
- + ",".join(kwargs.keys())
+ "generic_array_helper(): unrecognized kwargs:" + ",".join(kwargs.keys())
)
attribs = {"long_name": long_name}
attribs["coordinates"] = coords
@@ -845,10 +816,7 @@ def generic_array_export(
raise Exception(f"error processing {var_name}: all NaNs")
try:
var = f.create_variable(
- var_name,
- attribs,
- precision_str=precision_str,
- dimensions=dimensions,
+ var_name, attribs, precision_str=precision_str, dimensions=dimensions
)
except Exception as e:
estr = f"error creating variable {var_name}:\n{e!s}"
@@ -887,24 +855,17 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs):
"""
if not isinstance(mfl, (DataListInterface, DataInterface)):
- err = (
- "mflist_helper only helps instances that support "
- "DataListInterface"
- )
+ err = "mflist_helper only helps instances that support DataListInterface"
raise AssertionError(err)
modelgrid = mfl.model.modelgrid
if "modelgrid" in kwargs:
modelgrid = kwargs.pop("modelgrid")
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".nc":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc":
f = NetCdf(f, mfl.model, **kwargs)
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".shp":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp":
sparse = kwargs.get("sparse", False)
kper = kwargs.get("kper", 0)
squeeze = kwargs.get("squeeze", True)
@@ -952,16 +913,20 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs):
prjfile = kwargs.get("prjfile", None)
polys = np.array([Polygon(v) for v in verts])
recarray2shp(
- ra,
- geoms=polys,
- shpname=f,
- mg=modelgrid,
- crs=crs,
- prjfile=prjfile,
+ ra, geoms=polys, shpname=f, mg=modelgrid, crs=crs, prjfile=prjfile
)
elif isinstance(f, NetCdf) or isinstance(f, dict):
base_name = mfl.package.name[0].lower()
+ # Use first recarray kper to check mflist
+ for kper in mfl.data.keys():
+ if isinstance(mfl.data[kper], np.recarray):
+ break
+ # Skip mflist if all elements are of object type
+ if all(
+ dtype == np.object_ for dtype, _ in mfl.data[kper].dtype.fields.values()
+ ):
+ return f
for name, array in mfl.masked_4D_arrays_itr():
var_name = f"{base_name}_{name}"
@@ -972,9 +937,7 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs):
units = None
if var_name in NC_UNITS_FORMAT:
- units = NC_UNITS_FORMAT[var_name].format(
- f.grid_units, f.time_units
- )
+ units = NC_UNITS_FORMAT[var_name].format(f.grid_units, f.time_units)
precision_str = NC_PRECISION_TYPE[mfl.dtype[name].type]
if var_name in NC_LONG_NAMES:
attribs = {"long_name": NC_LONG_NAMES[var_name]}
@@ -991,10 +954,7 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs):
try:
dim_tuple = ("time",) + f.dimension_names
var = f.create_variable(
- var_name,
- attribs,
- precision_str=precision_str,
- dimensions=dim_tuple,
+ var_name, attribs, precision_str=precision_str, dimensions=dim_tuple
)
except Exception as e:
estr = f"error creating variable {var_name}:\n{e!s}"
@@ -1036,10 +996,7 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs):
"""
if not isinstance(t2d, DataInterface):
- err = (
- "transient2d_helper only helps instances that support "
- "DataInterface"
- )
+ err = "transient2d_helper only helps instances that support DataInterface"
raise AssertionError(err)
min_valid = kwargs.get("min_valid", -1.0e9)
@@ -1049,14 +1006,10 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs):
if "modelgrid" in kwargs:
modelgrid = kwargs.pop("modelgrid")
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".nc":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc":
f = NetCdf(f, t2d.model, **kwargs)
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".shp":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp":
array_dict = {}
for kper in range(t2d.model.modeltime.nper):
u2d = t2d[kper]
@@ -1095,9 +1048,7 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs):
units = "unitless"
if var_name in NC_UNITS_FORMAT:
- units = NC_UNITS_FORMAT[var_name].format(
- f.grid_units, f.time_units
- )
+ units = NC_UNITS_FORMAT[var_name].format(f.grid_units, f.time_units)
try:
precision_str = NC_PRECISION_TYPE[t2d.dtype]
except:
@@ -1115,10 +1066,7 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs):
try:
dim_tuple = ("time",) + f.dimension_names
var = f.create_variable(
- var_name,
- attribs,
- precision_str=precision_str,
- dimensions=dim_tuple,
+ var_name, attribs, precision_str=precision_str, dimensions=dim_tuple
)
except Exception as e:
estr = f"error creating variable {var_name}:\n{e!s}"
@@ -1147,7 +1095,7 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs):
if hasattr(t2d, "transient_2ds"):
d = t2d.transient_2ds
else:
- d = {ix: i for ix, i in enumerate(t2d.array)}
+ d = dict(enumerate(t2d.array))
else:
raise AssertionError("No data available to export")
@@ -1198,14 +1146,10 @@ def array3d_export(f: Union[str, os.PathLike], u3d, fmt=None, **kwargs):
if "modelgrid" in kwargs:
modelgrid = kwargs.pop("modelgrid")
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".nc":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc":
f = NetCdf(f, u3d.model, **kwargs)
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".shp":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp":
array_dict = {}
array_shape = u3d.array.shape
@@ -1267,9 +1211,7 @@ def array3d_export(f: Union[str, os.PathLike], u3d, fmt=None, **kwargs):
array[np.isnan(array)] = f.fillvalue
units = "unitless"
if var_name in NC_UNITS_FORMAT:
- units = NC_UNITS_FORMAT[var_name].format(
- f.grid_units, f.time_units
- )
+ units = NC_UNITS_FORMAT[var_name].format(f.grid_units, f.time_units)
precision_str = NC_PRECISION_TYPE[u3d.dtype]
if var_name in NC_LONG_NAMES:
attribs = {"long_name": NC_LONG_NAMES[var_name]}
@@ -1328,9 +1270,7 @@ def array3d_export(f: Union[str, os.PathLike], u3d, fmt=None, **kwargs):
raise NotImplementedError(f"unrecognized export argument:{f}")
-def array2d_export(
- f: Union[str, os.PathLike], u2d, fmt=None, verbose=False, **kwargs
-):
+def array2d_export(f: Union[str, os.PathLike], u2d, fmt=None, verbose=False, **kwargs):
"""
export helper for Util2d instances
@@ -1363,14 +1303,10 @@ def array2d_export(
if "modelgrid" in kwargs:
modelgrid = kwargs.pop("modelgrid")
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".nc":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc":
f = NetCdf(f, u2d.model, **kwargs)
- if (isinstance(f, str) or isinstance(f, Path)) and Path(
- f
- ).suffix.lower() == ".shp":
+ if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp":
name = shapefile_utils.shape_attr_name(u2d.name, keep_layer=True)
shapefile_utils.write_grid_shapefile(
f, modelgrid, {name: u2d.array}, verbose=verbose
@@ -1418,9 +1354,7 @@ def array2d_export(
units = "unitless"
if var_name in NC_UNITS_FORMAT:
- units = NC_UNITS_FORMAT[var_name].format(
- f.grid_units, f.time_units
- )
+ units = NC_UNITS_FORMAT[var_name].format(f.grid_units, f.time_units)
precision_str = NC_PRECISION_TYPE[u2d.dtype]
if var_name in NC_LONG_NAMES:
attribs = {"long_name": NC_LONG_NAMES[var_name]}
@@ -1533,9 +1467,7 @@ def export_array(
filename = str(filename)
if filename.lower().endswith(".asc"):
if (
- len(np.unique(modelgrid.delr))
- != len(np.unique(modelgrid.delc))
- != 1
+ len(np.unique(modelgrid.delr)) != len(np.unique(modelgrid.delc)) != 1
or modelgrid.delr[0] != modelgrid.delc[0]
):
raise ValueError("Arc ascii arrays require a uniform grid.")
@@ -1559,9 +1491,7 @@ def export_array(
cellsize = np.max((dx, dy))
xoffset, yoffset = xmin, ymin
- filename = (
- ".".join(filename.split(".")[:-1]) + ".asc"
- ) # enforce .asc ending
+ filename = ".".join(filename.split(".")[:-1]) + ".asc" # enforce .asc ending
nrow, ncol = a.shape
a[np.isnan(a)] = nodata
txt = f"ncols {ncol}\n"
@@ -1580,9 +1510,7 @@ def export_array(
elif filename.lower().endswith(".tif"):
if (
- len(np.unique(modelgrid.delr))
- != len(np.unique(modelgrid.delc))
- != 1
+ len(np.unique(modelgrid.delr)) != len(np.unique(modelgrid.delc)) != 1
or modelgrid.delr[0] != modelgrid.delc[0]
):
raise ValueError("GeoTIFF export require a uniform grid.")
@@ -1643,11 +1571,7 @@ def export_array(
except ImportError:
crs = None
write_grid_shapefile(
- filename,
- modelgrid,
- array_dict={fieldname: a},
- nan_val=nodata,
- crs=crs,
+ filename, modelgrid, array_dict={fieldname: a}, nan_val=nodata, crs=crs
)
@@ -1750,9 +1674,7 @@ def export_contours(
recarray2shp(ra, geoms, filename, **kwargs)
-def export_contourf(
- filename, contours, fieldname="level", verbose=False, **kwargs
-):
+def export_contourf(filename, contours, fieldname="level", verbose=False, **kwargs):
"""
Write matplotlib filled contours to shapefile.
diff --git a/flopy/export/vtk.py b/flopy/export/vtk.py
index e25c94695e..7f15c25168 100644
--- a/flopy/export/vtk.py
+++ b/flopy/export/vtk.py
@@ -141,9 +141,7 @@ def __init__(
vtk = import_optional_dependency("vtk")
if model is None and modelgrid is None:
- raise AssertionError(
- "A model or modelgrid must be provided to use Vtk"
- )
+ raise AssertionError("A model or modelgrid must be provided to use Vtk")
elif model is not None:
self.modelgrid = model.modelgrid
@@ -421,28 +419,21 @@ def _build_grid_geometry(self):
adji = (adjk * self.ncpl) + i
zv = self.top[adji] * self.vertical_exageration
else:
- zv = (
- self.botm[adjk - 1][i]
- * self.vertical_exageration
- )
+ zv = self.botm[adjk - 1][i] * self.vertical_exageration
points.append([xv, yv, zv])
v1 += 1
cell_faces = [
- [v for v in range(v0, v1)],
+ list(range(v0, v1)),
[v + self.nvpl for v in range(v0, v1)],
]
for v in range(v0, v1):
if v != v1 - 1:
- cell_faces.append(
- [v + 1, v, v + self.nvpl, v + self.nvpl + 1]
- )
+ cell_faces.append([v + 1, v, v + self.nvpl, v + self.nvpl + 1])
else:
- cell_faces.append(
- [v0, v, v + self.nvpl, v0 + self.nvpl]
- )
+ cell_faces.append([v0, v, v + self.nvpl, v0 + self.nvpl])
v0 = v1
faces.append(cell_faces)
@@ -574,9 +565,7 @@ def _build_hfbs(self, pkg):
pts = []
for v in v1:
- ix = np.asarray(
- (v2.T[0] == v[0]) & (v2.T[1] == v[1])
- ).nonzero()
+ ix = np.asarray((v2.T[0] == v[0]) & (v2.T[1] == v[1])).nonzero()
if len(ix[0]) > 0 and len(pts) < 2:
pts.append(v2[ix[0][0]])
@@ -614,9 +603,7 @@ def _build_hfbs(self, pkg):
polygon.GetPointIds().SetNumberOfIds(4)
for ix, iv in enumerate(face):
polygon.GetPointIds().SetId(ix, iv)
- polydata.InsertNextCell(
- polygon.GetCellType(), polygon.GetPointIds()
- )
+ polydata.InsertNextCell(polygon.GetCellType(), polygon.GetPointIds())
# and then set the hydchr data
vtk_arr = numpy_support.numpy_to_vtk(
@@ -800,8 +787,8 @@ def add_transient_array(self, d, name=None, masked_values=None):
if not self._vtk_geometry_set:
self._set_vtk_grid_geometry()
- k = list(d.keys())[0]
- transient = dict()
+ k = next(iter(d.keys()))
+ transient = {}
if isinstance(d[k], DataInterface):
if d[k].data_type in (DataType.array2d, DataType.array3d):
if name is None:
@@ -820,9 +807,7 @@ def add_transient_array(self, d, name=None, masked_values=None):
transient[kper] = array
else:
if name is None:
- raise ValueError(
- "name must be specified when providing numpy arrays"
- )
+ raise ValueError("name must be specified when providing numpy arrays")
for kper, trarray in d.items():
if trarray.size != self.nnodes:
array = np.zeros(self.nnodes) * np.nan
@@ -857,7 +842,7 @@ def add_transient_list(self, mflist, masked_values=None):
mfl = mflist.array
if isinstance(mfl, dict):
for arr_name, arr4d in mflist.array.items():
- d = {kper: array for kper, array in enumerate(arr4d)}
+ d = dict(enumerate(arr4d))
name = f"{pkg_name}_{arr_name}"
self.add_transient_array(d, name)
else:
@@ -911,9 +896,7 @@ def add_vector(self, vector, name, masked_values=None):
tv[ix, : self.ncpl] = q
vector = tv
else:
- raise AssertionError(
- "Size of vector must be 3 * nnodes or 3 * ncpl"
- )
+ raise AssertionError("Size of vector must be 3 * nnodes or 3 * ncpl")
else:
vector = np.reshape(vector, (3, self.nnodes))
@@ -954,7 +937,7 @@ def add_transient_vector(self, d, name, masked_values=None):
self._set_vtk_grid_geometry()
if self.__transient_data:
- k = list(self.__transient_data.keys())[0]
+ k = next(iter(self.__transient_data.keys()))
if len(d) != len(self.__transient_data[k]):
print(
"Transient vector not same size as transient arrays time "
@@ -967,10 +950,7 @@ def add_transient_vector(self, d, name, masked_values=None):
if not isinstance(value, np.ndarray):
value = np.array(value)
- if (
- value.size != 3 * self.ncpl
- or value.size != 3 * self.nnodes
- ):
+ if value.size != 3 * self.ncpl or value.size != 3 * self.nnodes:
raise AssertionError(
"Size of vector must be 3 * nnodes or 3 * ncpl"
)
@@ -1041,7 +1021,7 @@ def add_package(self, pkg, masked_values=None):
value.transient_2ds, item, masked_values
)
else:
- d = {ix: i for ix, i in enumerate(value.array)}
+ d = dict(enumerate(value.array))
self.add_transient_array(d, item, masked_values)
elif value.data_type == DataType.transient3d:
@@ -1106,11 +1086,7 @@ def add_pathline_points(self, pathlines, timeseries=False):
if len(pathlines) == 0:
return
pathlines = [
- (
- pl.to_records(index=False)
- if isinstance(pl, pd.DataFrame)
- else pl
- )
+ (pl.to_records(index=False) if isinstance(pl, pd.DataFrame) else pl)
for pl in pathlines
]
fields = pathlines[0].dtype.names
@@ -1135,9 +1111,7 @@ def add_pathline_points(self, pathlines, timeseries=False):
}
if all(k in pathlines.dtype.names for k in mpx_fields):
pids = np.unique(pathlines.particleid)
- pathlines = [
- pathlines[pathlines.particleid == pid] for pid in pids
- ]
+ pathlines = [pathlines[pathlines.particleid == pid] for pid in pids]
elif all(k in pathlines.dtype.names for k in prt_fields):
pls = []
for imdl in np.unique(pathlines.imdl):
@@ -1148,15 +1122,14 @@ def add_pathline_points(self, pathlines, timeseries=False):
& (pathlines.iprp == iprp)
& (pathlines.irpt == irpt)
]
- pls.extend(
- [pl[pl.trelease == t] for t in np.unique(pl.t)]
- )
+ pls.extend([pl[pl.trelease == t] for t in np.unique(pl.t)])
pathlines = pls
else:
raise ValueError("Unrecognized pathline dtype")
else:
raise ValueError(
- "Unsupported pathline format, expected array, recarray, dataframe, or list"
+ "Unsupported pathline format, expected array, recarray, "
+ "dataframe, or list"
)
if not timeseries:
@@ -1228,11 +1201,11 @@ def add_heads(self, hds, kstpkper=None, masked_values=None):
# reset totim based on values read from head file
times = hds.get_times()
kstpkpers = hds.get_kstpkper()
- self._totim = {ki: time for (ki, time) in zip(kstpkpers, times)}
+ self._totim = dict(zip(kstpkpers, times))
text = hds.text.decode()
- d = dict()
+ d = {}
for ki in kstpkper:
d[ki] = hds.get_data(ki)
@@ -1240,9 +1213,7 @@ def add_heads(self, hds, kstpkper=None, masked_values=None):
self.add_transient_array(d, name=text, masked_values=masked_values)
self.__transient_output_data = True
- def add_cell_budget(
- self, cbc, text=None, kstpkper=None, masked_values=None
- ):
+ def add_cell_budget(self, cbc, text=None, kstpkper=None, masked_values=None):
"""
Method to add cell budget data to vtk
@@ -1268,9 +1239,7 @@ def add_cell_budget(
)
records = cbc.get_unique_record_names(decode=True)
- imeth_dict = {
- record: imeth for (record, imeth) in zip(records, cbc.imethlist)
- }
+ imeth_dict = dict(zip(records, cbc.imethlist))
if text is None:
keylist = records
else:
@@ -1290,7 +1259,7 @@ def add_cell_budget(
# reset totim based on values read from budget file
times = cbc.get_times()
kstpkpers = cbc.get_kstpkper()
- self._totim = {ki: time for (ki, time) in zip(kstpkpers, times)}
+ self._totim = dict(zip(kstpkpers, times))
for name in keylist:
d = {}
@@ -1304,8 +1273,7 @@ def add_cell_budget(
if array.size < self.nnodes:
if array.size < self.ncpl:
raise AssertionError(
- "Array size must be equal to "
- "either ncpl or nnodes"
+ "Array size must be equal to either ncpl or nnodes"
)
array = np.zeros(self.nnodes) * np.nan
@@ -1366,9 +1334,7 @@ def _set_particle_track_data(self, points, lines=None, arrays=None):
for ii in range(0, npts):
poly.GetPointIds().SetId(ii, i)
i += 1
- self.vtk_pathlines.InsertNextCell(
- poly.GetCellType(), poly.GetPointIds()
- )
+ self.vtk_pathlines.InsertNextCell(poly.GetCellType(), poly.GetPointIds())
# create a vtkVertex for each point
# necessary if arrays (time & particle ID) live on points?
@@ -1471,9 +1437,7 @@ def write(self, f: Union[str, os.PathLike], kper=None):
else:
w.SetInputData(grid)
- if (
- self.__transient_data or self.__transient_vector
- ) and ix == 0:
+ if (self.__transient_data or self.__transient_vector) and ix == 0:
if self.__transient_data:
cnt = 0
for per, d in self.__transient_data.items():
diff --git a/flopy/mbase.py b/flopy/mbase.py
index 0cf0f727e2..c662854d0a 100644
--- a/flopy/mbase.py
+++ b/flopy/mbase.py
@@ -17,7 +17,7 @@
from pathlib import Path
from shutil import which
from subprocess import PIPE, STDOUT, Popen
-from typing import List, Optional, Tuple, Union
+from typing import Optional, Union
from warnings import warn
import numpy as np
@@ -44,12 +44,11 @@
iprn = -1
-def resolve_exe(
- exe_name: Union[str, os.PathLike], forgive: bool = False
-) -> str:
+def resolve_exe(exe_name: Union[str, os.PathLike], forgive: bool = False) -> str:
"""
- Resolves the absolute path of the executable, raising FileNotFoundError if the executable
- cannot be found (set forgive to True to return None and warn instead of raising an error).
+ Resolves the absolute path of the executable, raising FileNotFoundError
+ if the executable cannot be found (set forgive to True to return None
+ and warn instead of raising an error).
Parameters
----------
@@ -140,9 +139,7 @@ def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop.append(idx)
self.file_data.append(
- FileDataEntry(
- fname, unit, binflag=binflag, output=output, package=package
- )
+ FileDataEntry(fname, unit, binflag=binflag, output=output, package=package)
)
return
@@ -314,10 +311,7 @@ def _check(self, chk, level=1):
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
- f=None,
- verbose=False,
- level=level - 1,
- checktype=chk.__class__,
+ f=None, verbose=False, level=level - 1, checktype=chk.__class__
)
# model level checks
@@ -342,12 +336,11 @@ def _check(self, chk, level=1):
# add package check results to model level check summary
for r in results.values():
- if (
- r is not None and r.summary_array is not None
- ): # currently SFR doesn't have one
- chk.summary_array = np.append(
- chk.summary_array, r.summary_array
- ).view(np.recarray)
+ if r is not None and r.summary_array is not None:
+ # currently SFR doesn't have one
+ chk.summary_array = np.append(chk.summary_array, r.summary_array).view(
+ np.recarray
+ )
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
@@ -403,9 +396,7 @@ def __init__(
self._packagelist = []
self.heading = ""
self.exe_name = (
- "mf2005"
- if exe_name is None
- else resolve_exe(exe_name, forgive=True)
+ "mf2005" if exe_name is None else resolve_exe(exe_name, forgive=True)
)
self._verbose = verbose
self.external_path = None
@@ -439,10 +430,7 @@ def __init__(
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
if kwargs:
- warn(
- f"unhandled keywords: {kwargs}",
- category=UserWarning,
- )
+ warn(f"unhandled keywords: {kwargs}", category=UserWarning)
# build model discretization objects
self._modelgrid = Grid(
@@ -669,9 +657,7 @@ def remove_package(self, pname):
if iu in self.package_units:
self.package_units.remove(iu)
return
- raise StopIteration(
- "Package name " + pname + " not found in Package list"
- )
+ raise StopIteration("Package name " + pname + " not found in Package list")
def __getattr__(self, item):
"""
@@ -729,11 +715,7 @@ def __getattr__(self, item):
return None
# to avoid infinite recursion
- if (
- item == "_packagelist"
- or item == "packagelist"
- or item == "mfnam_packages"
- ):
+ if item == "_packagelist" or item == "packagelist" or item == "mfnam_packages":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
@@ -890,9 +872,7 @@ def add_output(
if self.verbose:
self._output_msg(-1, add=True)
- def remove_output(
- self, fname: Optional[Union[str, os.PathLike]] = None, unit=None
- ):
+ def remove_output(self, fname: Optional[Union[str, os.PathLike]] = None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
@@ -927,9 +907,7 @@ def remove_output(
msg = "either fname or unit must be passed to remove_output()"
raise TypeError(msg)
- def get_output(
- self, fname: Optional[Union[str, os.PathLike]] = None, unit=None
- ):
+ def get_output(self, fname: Optional[Union[str, os.PathLike]] = None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
@@ -989,8 +967,7 @@ def set_output_attribute(
break
else:
raise TypeError(
- "either fname or unit must be passed "
- "to set_output_attribute()"
+ "either fname or unit must be passed to set_output_attribute()"
)
if attr is not None:
if idx is not None:
@@ -1033,8 +1010,7 @@ def get_output_attribute(
break
else:
raise TypeError(
- "either fname or unit must be passed "
- "to set_output_attribute()"
+ "either fname or unit must be passed to set_output_attribute()"
)
v = None
if attr is not None:
@@ -1077,7 +1053,9 @@ def add_external(
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
- msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
+ msg = (
+ f"BaseModel.add_external() warning: replacing existing unit {unit}"
+ )
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
@@ -1290,7 +1268,8 @@ def change_model_ws(
if not os.path.exists(new_pth):
try:
print(
- f"\ncreating model workspace...\n {flopy_io.relpath_safe(new_pth)}"
+ "\ncreating model workspace...\n "
+ + flopy_io.relpath_safe(new_pth)
)
os.makedirs(new_pth)
except:
@@ -1300,9 +1279,7 @@ def change_model_ws(
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
- print(
- f"\nchanging model workspace...\n {flopy_io.relpath_safe(new_pth)}"
- )
+ print(f"\nchanging model workspace...\n {flopy_io.relpath_safe(new_pth)}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
@@ -1311,9 +1288,7 @@ def change_model_ws(
if (
hasattr(self, "external_path")
and self.external_path is not None
- and not os.path.exists(
- os.path.join(self._model_ws, self.external_path)
- )
+ and not os.path.exists(os.path.join(self._model_ws, self.external_path))
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
@@ -1325,9 +1300,7 @@ def change_model_ws(
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
- for ext_file, output in zip(
- self.external_fnames, self.external_output
- ):
+ for ext_file, output in zip(self.external_fnames, self.external_output):
# this is a wicked mess
if output:
new_ext_file = ext_file
@@ -1369,23 +1342,17 @@ def __setattr__(self, key, value):
elif key == "model_ws":
self.change_model_ws(value)
elif key == "tr":
- assert isinstance(
- value, discretization.reference.TemporalReference
- )
+ assert isinstance(value, discretization.reference.TemporalReference)
if self.dis is not None:
self.dis.tr = value
else:
- raise Exception(
- "cannot set TemporalReference - ModflowDis not found"
- )
+ raise Exception("cannot set TemporalReference - ModflowDis not found")
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
- raise Exception(
- "cannot set start_datetime - ModflowDis not found"
- )
+ raise Exception("cannot set start_datetime - ModflowDis not found")
else:
super().__setattr__(key, value)
@@ -1395,7 +1362,7 @@ def run_model(
pause=False,
report=False,
normal_msg="normal termination",
- ) -> Tuple[bool, List[str]]:
+ ) -> tuple[bool, list[str]]:
"""
This method will run the model using subprocess.Popen.
@@ -1580,11 +1547,9 @@ def check(
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
- otherpackage = [
- k
- for k, v in package_units.items()
- if v == p.unit_number[i]
- ][0]
+ otherpackage = next(
+ k for k, v in package_units.items() if v == p.unit_number[i]
+ )
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
@@ -1644,9 +1609,7 @@ def plot(self, SelPackList=None, **kwargs):
"""
from .plot import PlotUtilities
- axes = PlotUtilities._plot_model_helper(
- self, SelPackList=SelPackList, **kwargs
- )
+ axes = PlotUtilities._plot_model_helper(self, SelPackList=SelPackList, **kwargs)
return axes
def to_shapefile(self, *args, **kwargs):
@@ -1667,7 +1630,7 @@ def run_model(
use_async=False,
cargs=None,
custom_print=None,
-) -> Tuple[bool, List[str]]:
+) -> tuple[bool, list[str]]:
"""
Run the model using subprocess.Popen, optionally collecting stdout and printing
timestamped progress. Model workspace, namefile, executable to use, and several
@@ -1736,13 +1699,12 @@ def run_model(
exe_path = resolve_exe(exe_name)
if not silent:
print(
- f"FloPy is using the following executable to run the model: {flopy_io.relpath_safe(exe_path, model_ws)}"
+ "FloPy is using the following executable to run the model: "
+ + flopy_io.relpath_safe(exe_path, model_ws)
)
# make sure namefile exists
- if namefile is not None and not os.path.isfile(
- os.path.join(model_ws, namefile)
- ):
+ if namefile is not None and not os.path.isfile(os.path.join(model_ws, namefile)):
raise FileNotFoundError(
f"The namefile for this model does not exist: {namefile}"
)
diff --git a/flopy/mf6/data/mfdatalist.py b/flopy/mf6/data/mfdatalist.py
index ab5a2aa663..529177c41c 100644
--- a/flopy/mf6/data/mfdatalist.py
+++ b/flopy/mf6/data/mfdatalist.py
@@ -1546,51 +1546,7 @@ def data(self):
@property
def masked_4D_arrays(self):
- """Returns list data as a masked 4D array."""
- model_grid = self.data_dimensions.get_model_grid()
- nper = self.data_dimensions.package_dim.model_dim[
- 0
- ].simulation_time.get_num_stress_periods()
- # get the first kper
- arrays = self.to_array(kper=0, mask=True)
-
- if arrays is not None:
- # initialize these big arrays
- if model_grid.grid_type() == DiscretizationType.DIS:
- m4ds = {}
- for name, array in arrays.items():
- m4d = np.zeros(
- (
- nper,
- model_grid.num_layers,
- model_grid.num_rows,
- model_grid.num_columns,
- )
- )
- m4d[0, :, :, :] = array
- m4ds[name] = m4d
- for kper in range(1, nper):
- arrays = self.to_array(kper=kper, mask=True)
- for name, array in arrays.items():
- m4ds[name][kper, :, :, :] = array
- return m4ds
- else:
- m3ds = {}
- for name, array in arrays.items():
- m3d = np.zeros(
- (
- nper,
- model_grid.num_layers,
- model_grid.num_cells_per_layer(),
- )
- )
- m3d[0, :, :] = array
- m3ds[name] = m3d
- for kper in range(1, nper):
- arrays = self.to_array(kper=kper, mask=True)
- for name, array in arrays.items():
- m3ds[name][kper, :, :] = array
- return m3ds
+ return dict(self.masked_4D_arrays_itr())
def masked_4D_arrays_itr(self):
"""Returns list data as an iterator of a masked 4D array."""
diff --git a/flopy/mf6/data/mfdataplist.py b/flopy/mf6/data/mfdataplist.py
index 48f1aa02ef..331177ef08 100644
--- a/flopy/mf6/data/mfdataplist.py
+++ b/flopy/mf6/data/mfdataplist.py
@@ -2278,47 +2278,22 @@ def set_data(self, data, key=None, autofill=False):
def masked_4D_arrays_itr(self):
"""Returns list data as an iterator of a masked 4D array."""
- model_grid = self.data_dimensions.get_model_grid()
nper = self.data_dimensions.package_dim.model_dim[
0
].simulation_time.get_num_stress_periods()
- # get the first kper
- arrays = self.to_array(kper=0, mask=True)
-
- if arrays is not None:
- # initialize these big arrays
- for name, array in arrays.items():
- if model_grid.grid_type() == DiscretizationType.DIS:
- m4d = np.zeros(
- (
- nper,
- model_grid.num_layers(),
- model_grid.num_rows(),
- model_grid.num_columns(),
- )
- )
- m4d[0, :, :, :] = array
- for kper in range(1, nper):
- arrays = self.to_array(kper=kper, mask=True)
- for tname, array in arrays.items():
- if tname == name:
- m4d[kper, :, :, :] = array
- yield name, m4d
- else:
- m3d = np.zeros(
- (
- nper,
- model_grid.num_layers(),
- model_grid.num_cells_per_layer(),
- )
- )
- m3d[0, :, :] = array
- for kper in range(1, nper):
- arrays = self.to_array(kper=kper, mask=True)
- for tname, array in arrays.items():
- if tname == name:
- m3d[kper, :, :] = array
- yield name, m3d
+
+ # get the first kper array to extract array shape and names
+ arrays_kper_0 = self.to_array(kper=0, mask=True)
+ shape_per_spd = next(iter(arrays_kper_0.values())).shape
+
+ for name in arrays_kper_0.keys():
+ ma = np.zeros((nper, *shape_per_spd))
+ for kper in range(nper):
+ # If new_arrays is not None, overwrite arrays
+ if new_arrays := self.to_array(kper=kper, mask=True):
+ arrays = new_arrays
+ ma[kper] = arrays[name]
+ yield name, ma
def _set_data_record(
self,
diff --git a/flopy/mf6/data/mfdatautil.py b/flopy/mf6/data/mfdatautil.py
index ec244c3fea..45d67913ae 100644
--- a/flopy/mf6/data/mfdatautil.py
+++ b/flopy/mf6/data/mfdatautil.py
@@ -15,7 +15,7 @@ def iterable(obj, any_iterator=False):
if any_iterator:
try:
my_iter = iter(obj)
- except TypeError as te:
+ except TypeError:
return False
return True
else:
diff --git a/flopy/mf6/mfbase.py b/flopy/mf6/mfbase.py
index d8ff7c1ff4..e2ad3416d9 100644
--- a/flopy/mf6/mfbase.py
+++ b/flopy/mf6/mfbase.py
@@ -11,7 +11,6 @@
from pathlib import Path
from shutil import copyfile
from typing import Union
-from warnings import warn
# internal handled exceptions
@@ -454,24 +453,13 @@ class PackageContainer:
modflow_models = []
models_by_type = {}
- def __init__(self, simulation_data, name):
- self.type = "PackageContainer"
- self.simulation_data = simulation_data
- self.name = name
- self._packagelist = []
+ def __init__(self, simulation_data):
+ self._simulation_data = simulation_data
+ self.packagelist = []
self.package_type_dict = {}
self.package_name_dict = {}
self.package_filename_dict = {}
- @property
- def package_key_dict(self):
- warnings.warn(
- "package_key_dict has been deprecated, use "
- "package_type_dict instead",
- category=DeprecationWarning,
- )
- return self.package_type_dict
-
@staticmethod
def package_list():
"""Static method that returns the list of available packages.
@@ -554,9 +542,9 @@ def package_names(self):
"""Returns a list of package names."""
return list(self.package_name_dict.keys())
- def _add_package(self, package, path):
+ def add_package(self, package):
# put in packages list and update lookup dictionaries
- self._packagelist.append(package)
+ self.packagelist.append(package)
if package.package_name is not None:
self.package_name_dict[package.package_name.lower()] = package
if package.filename is not None:
@@ -565,9 +553,9 @@ def _add_package(self, package, path):
self.package_type_dict[package.package_type.lower()] = []
self.package_type_dict[package.package_type.lower()].append(package)
- def _remove_package(self, package):
- if package in self._packagelist:
- self._packagelist.remove(package)
+ def remove_package(self, package):
+ if package in self.packagelist:
+ self.packagelist.remove(package)
if (
package.package_name is not None
and package.package_name.lower() in self.package_name_dict
@@ -587,7 +575,7 @@ def _remove_package(self, package):
# collect keys of items to be removed from main dictionary
items_to_remove = []
- for key in self.simulation_data.mfdata:
+ for key in self._simulation_data.mfdata:
is_subkey = True
for pitem, ditem in zip(package.path, key):
if pitem != ditem:
@@ -598,7 +586,7 @@ def _remove_package(self, package):
# remove items from main dictionary
for key in items_to_remove:
- del self.simulation_data.mfdata[key]
+ del self._simulation_data.mfdata[key]
def _rename_package(self, package, new_name):
# fix package_name_dict key
@@ -609,7 +597,7 @@ def _rename_package(self, package, new_name):
del self.package_name_dict[package.package_name.lower()]
self.package_name_dict[new_name.lower()] = package
# get keys to fix in main dictionary
- main_dict = self.simulation_data.mfdata
+ main_dict = self._simulation_data.mfdata
items_to_fix = []
for key in main_dict:
is_subkey = True
@@ -648,7 +636,7 @@ def get_package(self, name=None, type_only=False, name_only=False):
"""
if name is None:
- return self._packagelist[:]
+ return self.packagelist[:]
# search for full package name
if name.lower() in self.package_name_dict and not type_only:
@@ -669,7 +657,7 @@ def get_package(self, name=None, type_only=False, name_only=False):
# search for partial and case-insensitive package name
if not type_only:
- for pp in self._packagelist:
+ for pp in self.packagelist:
if pp.package_name is not None:
# get first package of the type requested
package_name = pp.package_name.lower()
@@ -680,11 +668,6 @@ def get_package(self, name=None, type_only=False, name_only=False):
return None
- def register_package(self, package):
- """Base method for registering a package. Should be overridden."""
- path = (package.package_name,)
- return (path, None)
-
@staticmethod
def _load_only_dict(load_only):
if load_only is None:
diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py
index 3a54e4f525..76429bdec0 100644
--- a/flopy/mf6/mfmodel.py
+++ b/flopy/mf6/mfmodel.py
@@ -1,7 +1,8 @@
import inspect
import os
import sys
-from typing import Union
+import warnings
+from typing import Optional, Union
import numpy as np
@@ -31,7 +32,7 @@
from .utils.output_util import MF6Output
-class MFModel(PackageContainer, ModelInterface):
+class MFModel(ModelInterface):
"""
MODFLOW-6 model base class. Represents a single model in a simulation.
@@ -83,7 +84,7 @@ def __init__(
verbose=False,
**kwargs,
):
- super().__init__(simulation.simulation_data, modelname)
+ self._package_container = PackageContainer(simulation.simulation_data)
self.simulation = simulation
self.simulation_data = simulation.simulation_data
self.name = modelname
@@ -137,7 +138,7 @@ def __init__(
# build model name file
# create name file based on model type - support different model types
- package_obj = self.package_factory("nam", model_type[0:3])
+ package_obj = PackageContainer.package_factory("nam", model_type[0:3])
if not package_obj:
excpt_str = (
f"Name file could not be found for model{model_type[0:3]}."
@@ -238,6 +239,80 @@ def _get_data_str(self, formal):
)
return data_str
+ @property
+ def package_key_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_type_dict
+
+ @property
+ def package_dict(self):
+ """Returns a copy of the package name dictionary.
+
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_dict
+
+ @property
+ def package_names(self):
+ """Returns a list of package names.
+
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_names
+
+ @property
+ def package_type_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_type_dict
+
+ @property
+ def package_name_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_name_dict
+
+ @property
+ def package_filename_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_filename_dict
+
@property
def nper(self):
"""Number of stress periods.
@@ -638,7 +713,7 @@ def modelgrid(self):
@property
def packagelist(self):
"""List of model packages."""
- return self._packagelist
+ return self._package_container.packagelist
@property
def namefile(self):
@@ -750,6 +825,12 @@ def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
+ Warning
+ -------
+ The MF6 check mechanism is deprecated pending reimplementation
+ in a future release. While the checks API will remain in place
+ through 3.x, it may be unstable, and will likely change in 4.x.
+
Parameters
----------
f : str or file handle
@@ -775,6 +856,7 @@ def check(self, f=None, verbose=True, level=1):
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
+
# check instance for model-level check
chk = mf6check(self, f=f, verbose=verbose, level=level)
@@ -844,7 +926,7 @@ def load_base(
)
# build case consistent load_only dictionary for quick lookups
- load_only = instance._load_only_dict(load_only)
+ load_only = PackageContainer._load_only_dict(load_only)
# load name file
instance.name_file.load(strict)
@@ -882,10 +964,12 @@ def load_base(
):
if (
load_only is not None
- and not instance._in_pkg_list(
+ and not PackageContainer._in_pkg_list(
priority_packages, ftype_orig, pname
)
- and not instance._in_pkg_list(load_only, ftype_orig, pname)
+ and not PackageContainer._in_pkg_list(
+ load_only, ftype_orig, pname
+ )
):
if (
simulation.simulation_data.verbosity_level.value
@@ -1373,7 +1457,8 @@ def is_valid(self):
for package_struct in self.structure.package_struct_objs.values():
if (
not package_struct.optional
- and package_struct.file_type not in self.package_type_dict
+ and package_struct.file_type
+ not in self._package_container.package_type_dict
):
return False
@@ -1480,7 +1565,29 @@ def _remove_package_from_dictionaries(self, package):
# remove package from local dictionaries and lists
if package.path in self._package_paths:
del self._package_paths[package.path]
- self._remove_package(package)
+ self._package_container.remove_package(package)
+
+ def get_package(self, name=None, type_only=False, name_only=False):
+ """
+ Finds a package by package name, package key, package type, or partial
+ package name. returns either a single package, a list of packages,
+ or None.
+
+ Parameters
+ ----------
+ name : str
+ Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc.
+ type_only : bool
+ Search for package by type only
+ name_only : bool
+ Search for package by name only
+
+ Returns
+ -------
+ pp : Package object
+
+ """
+ return self._package_container.get_package(name, type_only, name_only)
def remove_package(self, package_name):
"""
@@ -1552,7 +1659,7 @@ def remove_package(self, package_name):
value_,
traceback_,
None,
- self._simulation_data.debug,
+ self.simulation_data.debug,
)
try:
self.name_file.packages.set_data(new_rec_array)
@@ -1637,7 +1744,7 @@ def update_package_filename(self, package, new_name):
value_,
traceback_,
None,
- self._simulation_data.debug,
+ self.simulation_data.debug,
)
try:
self.name_file.packages.set_data(new_rec_array)
@@ -1704,6 +1811,12 @@ def set_all_data_external(
):
"""Sets the model's list and array data to be stored externally.
+ Warning
+ -------
+ The MF6 check mechanism is deprecated pending reimplementation
+ in a future release. While the checks API will remain in place
+ through 3.x, it may be unstable, and will likely change in 4.x.
+
Parameters
----------
check_data : bool
@@ -1794,12 +1907,15 @@ def register_package(
)
elif (
not set_package_name
- and package.package_name in self.package_name_dict
+ and package.package_name
+ in self._package_container.package_name_dict
):
# package of this type with this name already
# exists, replace it
self.remove_package(
- self.package_name_dict[package.package_name]
+ self._package_container.package_name_dict[
+ package.package_name
+ ]
)
if (
self.simulation_data.verbosity_level.value
@@ -1842,7 +1958,10 @@ def register_package(
# check for other registered packages of this type
name_iter = datautil.NameIter(package.package_type, False)
for package_name in name_iter:
- if package_name not in self.package_name_dict:
+ if (
+ package_name
+ not in self._package_container.package_name_dict
+ ):
package.package_name = package_name
suffix = package_name.split("_")
if (
@@ -1861,15 +1980,19 @@ def register_package(
if set_package_filename:
# filename uses model base name
package._filename = f"{self.name}.{package.package_type}"
- if package._filename in self.package_filename_dict:
+ if (
+ package._filename
+ in self._package_container.package_filename_dict
+ ):
# auto generate a unique file name and register it
file_name = MFFileMgmt.unique_file_name(
- package._filename, self.package_filename_dict
+ package._filename,
+ self._package_container.package_filename_dict,
)
package._filename = file_name
if add_to_package_list:
- self._add_package(package, path)
+ self._package_container.add_package(package)
# add obs file to name file if it does not have a parent
if package.package_type in self.structure.package_struct_objs or (
@@ -1918,7 +2041,7 @@ def load_package(
strict,
ref_path,
dict_package_name=None,
- parent_package=None,
+ parent_package: Optional[MFPackage] = None,
):
"""
Loads a package from a file. This method is used internally by FloPy
@@ -1989,7 +2112,7 @@ def load_package(
model_type = model_type[0:-1]
# create package
- package_obj = self.package_factory(ftype, model_type)
+ package_obj = PackageContainer.package_factory(ftype, model_type)
package = package_obj(
self,
filename=fname,
@@ -2002,7 +2125,9 @@ def load_package(
package.load(strict)
except ReadAsArraysException:
# create ReadAsArrays package and load it instead
- package_obj = self.package_factory(f"{ftype}a", model_type)
+ package_obj = PackageContainer.package_factory(
+ f"{ftype}a", model_type
+ )
package = package_obj(
self,
filename=fname,
@@ -2014,10 +2139,10 @@ def load_package(
package.load(strict)
# register child package with the model
- self._add_package(package, package.path)
+ self._package_container.add_package(package)
if parent_package is not None:
# register child package with the parent package
- parent_package._add_package(package, package.path)
+ parent_package.add_package(package)
return package
diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py
index 3c809d667b..c9eb52a881 100644
--- a/flopy/mf6/mfpackage.py
+++ b/flopy/mf6/mfpackage.py
@@ -4,6 +4,7 @@
import inspect
import os
import sys
+import warnings
import numpy as np
@@ -258,7 +259,7 @@ def write_header(self, fd):
if len(self.data_items) > 1:
for data_item in self.data_items[1:]:
entry = data_item.get_file_entry(values_only=True)
- fd.write("%s" % (entry.rstrip()))
+ fd.write(str(entry).rstrip())
if self.get_comment().text:
fd.write(" ")
self.get_comment().write(fd)
@@ -1331,6 +1332,12 @@ def set_all_data_external(
base_name is external file name's prefix, check_data determines
if data error checking is enabled during this process.
+ Warning
+ -------
+ The MF6 check mechanism is deprecated pending reimplementation
+ in a future release. While the checks API will remain in place
+ through 3.x, it may be unstable, and will likely change in 4.x.
+
Parameters
----------
base_name : str
@@ -1343,6 +1350,7 @@ def set_all_data_external(
Whether file will be stored as binary
"""
+
for key, dataset in self.datasets.items():
lst_data = isinstance(dataset, mfdatalist.MFList) or isinstance(
dataset, mfdataplist.MFPandasList
@@ -1396,12 +1404,19 @@ def set_all_data_internal(self, check_data=True):
check_data determines if data error checking is enabled during this
process.
+ Warning
+ -------
+ The MF6 check mechanism is deprecated pending reimplementation
+ in a future release. While the checks API will remain in place
+ through 3.x, it may be unstable, and will likely change in 4.x.
+
Parameters
----------
check_data : bool
Whether to do data error checking.
"""
+
for key, dataset in self.datasets.items():
if (
isinstance(dataset, mfdataarray.MFArray)
@@ -1643,7 +1658,9 @@ def is_allowed(self):
return True
def is_valid(self):
- """Returns true of the block is valid."""
+ """
+ Returns true if the block is valid.
+ """
# check data sets
for dataset in self.datasets.values():
# Non-optional datasets must be enabled
@@ -1663,7 +1680,7 @@ def is_valid(self):
return False
-class MFPackage(PackageContainer, PackageInterface):
+class MFPackage(PackageInterface):
"""
Provides an interface for the user to specify data to build a package.
@@ -1755,9 +1772,10 @@ def __init__(
self.model_or_sim.simulation_data.debug,
)
- super().__init__(self.model_or_sim.simulation_data, self.model_name)
-
- self._simulation_data = self.model_or_sim.simulation_data
+ self._package_container = PackageContainer(
+ self.model_or_sim.simulation_data
+ )
+ self.simulation_data = self.model_or_sim.simulation_data
self.blocks = {}
self.container_type = []
@@ -1829,7 +1847,7 @@ def __init__(
if self.path is None:
if (
- self._simulation_data.verbosity_level.value
+ self.simulation_data.verbosity_level.value
>= VerbosityLevel.normal.value
):
print(
@@ -1990,14 +2008,111 @@ def data_list(self):
# return [data_object, data_object, ...]
return self._data_list
- def _add_package(self, package, path):
+ @property
+ def package_key_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_type_dict
+
+ @property
+ def package_names(self):
+ """Returns a list of package names.
+
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_names
+
+ @property
+ def package_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_dict
+
+ @property
+ def package_type_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_type_dict
+
+ @property
+ def package_name_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_name_dict
+
+ @property
+ def package_filename_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_filename_dict
+
+ def get_package(self, name=None, type_only=False, name_only=False):
+ """
+ Finds a package by package name, package key, package type, or partial
+ package name. returns either a single package, a list of packages,
+ or None.
+
+ Parameters
+ ----------
+ name : str
+ Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc.
+ type_only : bool
+ Search for package by type only
+ name_only : bool
+ Search for package by name only
+
+ Returns
+ -------
+ pp : Package object
+
+ """
+ return self._package_container.get_package(name, type_only, name_only)
+
+ def add_package(self, package):
pkg_type = package.package_type.lower()
- if pkg_type in self.package_type_dict:
- for existing_pkg in self.package_type_dict[pkg_type]:
+ if pkg_type in self._package_container.package_type_dict:
+ for existing_pkg in self._package_container.package_type_dict[
+ pkg_type
+ ]:
if existing_pkg is package:
# do not add the same package twice
return
- super()._add_package(package, path)
+ self._package_container.add_package(package)
def _get_aux_data(self, aux_names):
if hasattr(self, "stress_period_data"):
@@ -2031,7 +2146,16 @@ def _boundnames_active(self):
return False
def check(self, f=None, verbose=True, level=1, checktype=None):
- """Data check, returns True on success."""
+ """
+ Data check, returns True on success.
+
+ Warning
+ -------
+ The MF6 check mechanism is deprecated pending reimplementation
+ in a future release. While the checks API will remain in place
+ through 3.x, it may be unstable, and will likely change in 4.x.
+ """
+
if checktype is None:
checktype = mf6check
# do general checks
@@ -2196,7 +2320,7 @@ def _get_block_header_info(self, line, path):
header_variable_strs = []
arr_clean_line = line.strip().split()
header_comment = MFComment(
- "", path + (arr_clean_line[1],), self._simulation_data, 0
+ "", path + (arr_clean_line[1],), self.simulation_data, 0
)
# break header into components
if len(arr_clean_line) < 2:
@@ -2216,14 +2340,14 @@ def _get_block_header_info(self, line, path):
value_,
traceback_,
message,
- self._simulation_data.debug,
+ self.simulation_data.debug,
)
elif len(arr_clean_line) == 2:
return MFBlockHeader(
arr_clean_line[1],
header_variable_strs,
header_comment,
- self._simulation_data,
+ self.simulation_data,
path,
)
else:
@@ -2243,7 +2367,7 @@ def _get_block_header_info(self, line, path):
arr_clean_line[1],
header_variable_strs,
header_comment,
- self._simulation_data,
+ self.simulation_data,
path,
)
@@ -2304,7 +2428,7 @@ def _update_size_defs(self):
# informational message to the user
if (
- self._simulation_data.verbosity_level.value
+ self.simulation_data.verbosity_level.value
>= VerbosityLevel.normal.value
):
print(
@@ -2537,21 +2661,25 @@ def build_child_packages_container(self, pkg_type, filerecord):
"""Builds a container object for any child packages. This method is
only intended for FloPy internal use."""
# get package class
- package_obj = self.package_factory(
+ package_obj = PackageContainer.package_factory(
pkg_type, self.model_or_sim.model_type
)
# create child package object
child_pkgs_name = f"utl{pkg_type}packages"
- child_pkgs_obj = self.package_factory(child_pkgs_name, "")
+ child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "")
if child_pkgs_obj is None and self.model_or_sim.model_type is None:
# simulation level object, try just the package type in the name
child_pkgs_name = f"{pkg_type}packages"
- child_pkgs_obj = self.package_factory(child_pkgs_name, "")
+ child_pkgs_obj = PackageContainer.package_factory(
+ child_pkgs_name, ""
+ )
if child_pkgs_obj is None:
# see if the package is part of one of the supported model types
for model_type in MFStructure().sim_struct.model_types:
child_pkgs_name = f"{model_type}{pkg_type}packages"
- child_pkgs_obj = self.package_factory(child_pkgs_name, "")
+ child_pkgs_obj = PackageContainer.package_factory(
+ child_pkgs_name, ""
+ )
if child_pkgs_obj is not None:
break
child_pkgs = child_pkgs_obj(
@@ -2581,7 +2709,7 @@ def build_child_package(self, pkg_type, data, parameter_name, filerecord):
# build child package file name
child_path = package_group.next_default_file_path()
# create new empty child package
- package_obj = self.package_factory(
+ package_obj = PackageContainer.package_factory(
pkg_type, self.model_or_sim.model_type
)
package = package_obj(
@@ -2656,7 +2784,7 @@ def build_mfdata(self, var_name, data=None):
if var_name in block.data_structures:
if block.name not in self.blocks:
self.blocks[block.name] = MFBlock(
- self._simulation_data,
+ self.simulation_data,
self.dimensions,
block,
self.path + (key,),
@@ -2686,7 +2814,7 @@ def build_mfdata(self, var_name, data=None):
value_,
traceback_,
message,
- self._simulation_data.debug,
+ self.simulation_data.debug,
)
def set_model_relative_path(self, model_ws):
@@ -2702,7 +2830,7 @@ def set_model_relative_path(self, model_ws):
for key, block in self.blocks.items():
block.set_model_relative_path(model_ws)
# update sub-packages
- for package in self._packagelist:
+ for package in self._package_container.packagelist:
package.set_model_relative_path(model_ws)
def set_all_data_external(
@@ -2737,7 +2865,7 @@ def set_all_data_external(
binary,
)
# set sub-packages
- for package in self._packagelist:
+ for package in self._package_container.packagelist:
package.set_all_data_external(
check_data,
external_data_folder,
@@ -2758,7 +2886,7 @@ def set_all_data_internal(self, check_data=True):
for key, block in self.blocks.items():
block.set_all_data_internal(check_data)
# set sub-packages
- for package in self._packagelist:
+ for package in self._package_container.packagelist:
package.set_all_data_internal(check_data)
def load(self, strict=True):
@@ -2796,7 +2924,7 @@ def load(self, strict=True):
value_,
traceback_,
message,
- self._simulation_data.debug,
+ self.simulation_data.debug,
)
try:
@@ -2842,11 +2970,11 @@ def is_valid(self):
def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize):
# init
- self._simulation_data.mfdata[self.path + ("pkg_hdr_comments",)] = (
- MFComment("", self.path, self._simulation_data)
+ self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)] = (
+ MFComment("", self.path, self.simulation_data)
)
self.post_block_comments = MFComment(
- "", self.path, self._simulation_data
+ "", self.path, self.simulation_data
)
blocks_read = 0
@@ -2881,7 +3009,7 @@ def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize):
value_,
traceback_,
message,
- self._simulation_data.debug,
+ self.simulation_data.debug,
mfde,
)
@@ -2936,7 +3064,7 @@ def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize):
].repeating():
# warn and skip block
if (
- self._simulation_data.verbosity_level.value
+ self.simulation_data.verbosity_level.value
>= VerbosityLevel.normal.value
):
warning_str = (
@@ -2955,7 +3083,7 @@ def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize):
and len(bhval) > 0
and bhs[0].name == "iper"
):
- nper = self._simulation_data.mfdata[
+ nper = self.simulation_data.mfdata[
("tdis", "dimensions", "nper")
].get_data()
bhval_int = datautil.DatumUtil.is_int(bhval[0])
@@ -2974,7 +3102,7 @@ def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize):
)
# reset comments
self.post_block_comments = MFComment(
- "", self.path, self._simulation_data
+ "", self.path, self.simulation_data
)
cur_block.load(
@@ -2982,7 +3110,7 @@ def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize):
)
# write post block comment
- self._simulation_data.mfdata[
+ self.simulation_data.mfdata[
cur_block.block_headers[-1].blk_post_comment_path
] = self.post_block_comments
@@ -3006,7 +3134,7 @@ def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize):
self.post_block_comments.add_text(
str(line), True
)
- self._simulation_data.mfdata[
+ self.simulation_data.mfdata[
cur_block.block_headers[-1].blk_post_comment_path
] = self.post_block_comments
@@ -3070,7 +3198,7 @@ def create_package_dimensions(self):
if self.container_type[0] == PackageContainerType.model:
model_dims = [
modeldimensions.ModelDimensions(
- self.path[0], self._simulation_data
+ self.path[0], self.simulation_data
)
]
else:
@@ -3078,7 +3206,7 @@ def create_package_dimensions(self):
# model. figure out which model to use and return a dimensions
# object for that model
if self.dfn_file_name[0:3] == "exg":
- exchange_rec_array = self._simulation_data.mfdata[
+ exchange_rec_array = self.simulation_data.mfdata[
("nam", "exchanges", "exchanges")
].get_data()
if exchange_rec_array is None:
@@ -3087,10 +3215,10 @@ def create_package_dimensions(self):
if exchange[1].lower() == self._filename.lower():
model_dims = [
modeldimensions.ModelDimensions(
- exchange[2], self._simulation_data
+ exchange[2], self.simulation_data
),
modeldimensions.ModelDimensions(
- exchange[3], self._simulation_data
+ exchange[3], self.simulation_data
),
]
break
@@ -3136,10 +3264,10 @@ def create_package_dimensions(self):
# assign models to gnc package
model_dims = [
modeldimensions.ModelDimensions(
- model_1, self._simulation_data
+ model_1, self.simulation_data
),
modeldimensions.ModelDimensions(
- model_2, self._simulation_data
+ model_2, self.simulation_data
),
]
elif self.parent_file is not None:
@@ -3148,14 +3276,12 @@ def create_package_dimensions(self):
model_name = md.model_name
model_dims.append(
modeldimensions.ModelDimensions(
- model_name, self._simulation_data
+ model_name, self.simulation_data
)
)
else:
model_dims = [
- modeldimensions.ModelDimensions(
- None, self._simulation_data
- )
+ modeldimensions.ModelDimensions(None, self.simulation_data)
]
return modeldimensions.PackageDimensions(
model_dims, self.structure, self.path
@@ -3166,7 +3292,7 @@ def _store_comment(self, line, found_first_block):
if found_first_block:
self.post_block_comments.text += line
else:
- self._simulation_data.mfdata[
+ self.simulation_data.mfdata[
self.path + ("pkg_hdr_comments",)
].text += line
@@ -3190,13 +3316,13 @@ def _write_blocks(self, fd, ext_file_action):
value_,
traceback_,
message,
- self._simulation_data.debug,
+ self.simulation_data.debug,
)
# write initial comments
pkg_hdr_comments_path = self.path + ("pkg_hdr_comments",)
- if pkg_hdr_comments_path in self._simulation_data.mfdata:
- self._simulation_data.mfdata[
+ if pkg_hdr_comments_path in self.simulation_data.mfdata:
+ self.simulation_data.mfdata[
self.path + ("pkg_hdr_comments",)
].write(fd, False)
@@ -3219,14 +3345,14 @@ def get_file_path(self):
-------
file path : str
"""
- if self.path[0] in self._simulation_data.mfpath.model_relative_path:
+ if self.path[0] in self.simulation_data.mfpath.model_relative_path:
return os.path.join(
- self._simulation_data.mfpath.get_model_path(self.path[0]),
+ self.simulation_data.mfpath.get_model_path(self.path[0]),
self._filename,
)
else:
return os.path.join(
- self._simulation_data.mfpath.get_sim_path(), self._filename
+ self.simulation_data.mfpath.get_sim_path(), self._filename
)
def export(self, f, **kwargs):
diff --git a/flopy/mf6/mfsimbase.py b/flopy/mf6/mfsimbase.py
index 793bee6b9c..74dc8d036a 100644
--- a/flopy/mf6/mfsimbase.py
+++ b/flopy/mf6/mfsimbase.py
@@ -4,7 +4,7 @@
import sys
import warnings
from pathlib import Path
-from typing import List, Optional, Union
+from typing import Optional, Union, cast
import numpy as np
@@ -21,7 +21,8 @@
PackageContainerType,
VerbosityLevel,
)
-from flopy.mf6.mfpackage import MFPackage
+from flopy.mf6.mfmodel import MFModel
+from flopy.mf6.mfpackage import MFChildPackages, MFPackage
from flopy.mf6.modflow import mfnam, mftdis
from flopy.mf6.utils import binaryfile_utils, mfobservation
@@ -392,7 +393,7 @@ def _update_str_format(self):
)
-class MFSimulationBase(PackageContainer):
+class MFSimulationBase:
"""
Entry point into any MODFLOW simulation.
@@ -471,7 +472,8 @@ def __init__(
lazy_io=False,
use_pandas=True,
):
- super().__init__(MFSimulationData(sim_ws, self), sim_name)
+ self.name = sim_name
+ self.simulation_data = MFSimulationData(sim_ws, self)
self.simulation_data.verbosity_level = self._resolve_verbosity_level(
verbosity_level
)
@@ -479,6 +481,7 @@ def __init__(
self.simulation_data.use_pandas = use_pandas
if lazy_io:
self.simulation_data.lazy_io = True
+ self._package_container = PackageContainer(self.simulation_data)
# verify metadata
fpdata = mfstructure.MFStructure()
@@ -630,7 +633,7 @@ def _get_data_str(self, formal):
)
)
- for package in self._packagelist:
+ for package in self._package_container.packagelist:
pk_str = package._get_data_str(formal, False)
if formal:
if len(pk_str.strip()) > 0:
@@ -689,9 +692,83 @@ def exchange_files(self):
"""
return self._exchange_files.values()
+ @property
+ def package_key_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_type_dict
+
+ @property
+ def package_dict(self):
+ """Returns a copy of the package name dictionary.
+
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_dict
+
+ @property
+ def package_names(self):
+ """Returns a list of package names.
+
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_names
+
+ @property
+ def package_type_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_type_dict
+
+ @property
+ def package_name_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_name_dict
+
+ @property
+ def package_filename_dict(self):
+ """
+ .. deprecated:: 3.9
+ This method is for internal use only and will be deprecated.
+ """
+ warnings.warn(
+ "This method is for internal use only and will be deprecated.",
+ category=DeprecationWarning,
+ )
+ return self._package_container.package_filename_dict
+
@staticmethod
def load(
- cls_child,
+ cls_child: type["MFSimulationBase"],
sim_name="modflowsim",
version="mf6",
exe_name: Union[str, os.PathLike] = "mf6",
@@ -778,7 +855,7 @@ def load(
print("loading simulation...")
# build case consistent load_only dictionary for quick lookups
- load_only = instance._load_only_dict(load_only)
+ load_only = PackageContainer._load_only_dict(load_only)
# load simulation name file
if verbosity_level.value >= VerbosityLevel.normal.value:
@@ -868,7 +945,7 @@ def load(
message=message,
)
for exgfile in exch_data:
- if load_only is not None and not instance._in_pkg_list(
+ if load_only is not None and not PackageContainer._in_pkg_list(
load_only, exgfile[0], exgfile[2]
):
if (
@@ -891,7 +968,7 @@ def load(
exchange_name = f"{exchange_type}_EXG_{exchange_file_num}"
# find package class the corresponds to this exchange type
- package_obj = instance.package_factory(
+ package_obj = PackageContainer.package_factory(
exchange_type.replace("-", "").lower(), ""
)
if not package_obj:
@@ -912,7 +989,7 @@ def load(
value_,
traceback_,
message,
- instance._simulation_data.debug,
+ instance.simulation_data.debug,
)
# build and load exchange package object
@@ -930,7 +1007,7 @@ def load(
f" loading exchange package {exchange_file._get_pname()}..."
)
exchange_file.load(strict)
- instance._add_package(exchange_file, exchange_file.path)
+ instance._package_container.add_package(exchange_file)
instance._exchange_files[exgfile[1]] = exchange_file
# load simulation packages
@@ -953,7 +1030,7 @@ def load(
)
for solution_group in solution_group_dict.values():
for solution_info in solution_group:
- if load_only is not None and not instance._in_pkg_list(
+ if load_only is not None and not PackageContainer._in_pkg_list(
load_only, solution_info[0], solution_info[2]
):
if (
@@ -965,7 +1042,7 @@ def load(
)
continue
# create solution package
- sln_package_obj = instance.package_factory(
+ sln_package_obj = PackageContainer.package_factory(
solution_info[0][:-1].lower(), ""
)
sln_package = sln_package_obj(
@@ -986,6 +1063,28 @@ def load(
instance.check()
return instance
+ def get_package(self, name=None, type_only=False, name_only=False):
+ """
+ Finds a package by package name, package key, package type, or partial
+ package name. returns either a single package, a list of packages,
+ or None.
+
+ Parameters
+ ----------
+ name : str
+ Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc.
+ type_only : bool
+ Search for package by type only
+ name_only : bool
+ Search for package by name only
+
+ Returns
+ -------
+ pp : Package object
+
+ """
+ return self._package_container.get_package(name, type_only, name_only)
+
def check(
self,
f: Optional[Union[str, os.PathLike]] = None,
@@ -995,6 +1094,12 @@ def check(
"""
Check model data for common errors.
+ Warning
+ -------
+ The MF6 check mechanism is deprecated pending reimplementation
+ in a future release. While the checks API will remain in place
+ through 3.x, it may be unstable, and will likely change in 4.x.
+
Parameters
----------
f : str or PathLike, optional
@@ -1021,6 +1126,7 @@ def check(
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
+
# check instance for simulation-level check
chk_list = []
@@ -1070,7 +1176,7 @@ def load_package(
strict,
ref_path: Union[str, os.PathLike],
dict_package_name=None,
- parent_package=None,
+ parent_package: Optional[MFPackage] = None,
):
"""
Load a package from a file.
@@ -1130,7 +1236,7 @@ def load_package(
dict_package_name = ftype
# get package object from file type
- package_obj = self.package_factory(ftype, "")
+ package_obj = PackageContainer.package_factory(ftype, "")
# create package
package = package_obj(
self,
@@ -1142,19 +1248,19 @@ def load_package(
package.load(strict)
self._other_files[package.filename] = package
# register child package with the simulation
- self._add_package(package, package.path)
+ self._package_container.add_package(package)
if parent_package is not None:
# register child package with the parent package
- parent_package._add_package(package, package.path)
+ parent_package.add_package(package)
return package
def register_ims_package(
- self, solution_file: MFPackage, model_list: Union[str, List[str]]
+ self, solution_file: MFPackage, model_list: Union[str, list[str]]
):
self.register_solution_package(solution_file, model_list)
def register_solution_package(
- self, solution_file: MFPackage, model_list: Union[str, List[str]]
+ self, solution_file: MFPackage, model_list: Union[str, list[str]]
):
"""
Register a solution package with the simulation.
@@ -1245,14 +1351,14 @@ def register_solution_package(
"New solution package will replace old package"
".".format(file.package_name)
)
- self._remove_package(self._solution_files[file.filename])
+ self._package_container.remove_package(
+ self._solution_files[file.filename]
+ )
del self._solution_files[file.filename]
break
# register solution package
if not in_simulation:
- self._add_package(
- solution_file, self._get_package_path(solution_file)
- )
+ self._package_container.add_package(solution_file)
# do not allow a solution package to be registered twice with the
# same simulation
if not in_simulation:
@@ -1331,7 +1437,7 @@ def _create_package(self, package_type, package_data):
)
raise MFDataException(package=package_type, message=message)
# find package - only supporting utl packages for now
- package_obj = self.package_factory(package_type, "utl")
+ package_obj = PackageContainer.package_factory(package_type, "utl")
if package_obj is not None:
# determine file name
if "filename" not in package_data:
@@ -1487,6 +1593,12 @@ def set_all_data_external(
):
"""Sets the simulation's list and array data to be stored externally.
+ Warning
+ -------
+ The MF6 check mechanism is deprecated pending reimplementation
+ in a future release. While the checks API will remain in place
+ through 3.x, it may be unstable, and will likely change in 4.x.
+
Parameters
----------
check_data: bool
@@ -1501,6 +1613,7 @@ def set_all_data_external(
binary: bool
Whether file will be stored as binary
"""
+
# copy any files whose paths have changed
self.simulation_data.mfpath.copy_files()
# set data external for all packages in all models
@@ -1537,6 +1650,7 @@ def set_all_data_external(
def set_all_data_internal(self, check_data=True):
# set data external for all packages in all models
+
for model in self._models.values():
model.set_all_data_internal(check_data)
# set data external for solution packages
@@ -1767,7 +1881,7 @@ def remove_package(self, package_name):
if package.filename in self._other_files:
del self._other_files[package.filename]
- self._remove_package(package)
+ self._package_container.remove_package(package)
# if this is a package referenced from a filerecord, remove filerecord
# from name file
@@ -1777,7 +1891,10 @@ def remove_package(self, package_name):
if isinstance(file_record, mfdata.MFData):
file_record.set_data(None)
if hasattr(self.name_file, package.package_type):
- child_pkgs = getattr(self.name_file, package.package_type)
+ child_pkgs = cast(
+ MFChildPackages,
+ getattr(self.name_file, package.package_type),
+ )
child_pkgs._remove_packages(package.filename, True)
@property
@@ -1793,7 +1910,7 @@ def model_dict(self):
"""
return self._models.copy()
- def get_model(self, model_name=None):
+ def get_model(self, model_name=None) -> Optional[MFModel]:
"""
Returns the models in the simulation with a given model name, name
file name, or model type.
@@ -2006,7 +2123,7 @@ def _remove_package_by_type(self, package):
if (
package.package_type.lower() == "tdis"
and self._tdis_file is not None
- and self._tdis_file in self._packagelist
+ and self._tdis_file in self._package_container.packagelist
):
# tdis package already exists. there can be only one tdis
# package. remove existing tdis package
@@ -2018,11 +2135,11 @@ def _remove_package_by_type(self, package):
"WARNING: tdis package already exists. Replacing "
"existing tdis package."
)
- self._remove_package(self._tdis_file)
+ self._package_container.remove_package(self._tdis_file)
elif (
package.package_type.lower()
in mfstructure.MFStructure().flopy_dict["solution_packages"]
- and pname in self.package_name_dict
+ and pname in self._package_container.package_name_dict
):
if (
self.simulation_data.verbosity_level.value
@@ -2033,11 +2150,14 @@ def _remove_package_by_type(self, package):
f"{package.package_name.lower()} already exists. "
"Replacing existing package."
)
- self._remove_package(self.package_name_dict[pname])
+ self._package_container.remove_package(
+ self._package_container.package_name_dict[pname]
+ )
else:
if (
package.filename in self._other_files
- and self._other_files[package.filename] in self._packagelist
+ and self._other_files[package.filename]
+ in self._package_container.packagelist
):
# other package with same file name already exists. remove old
# package
@@ -2049,7 +2169,9 @@ def _remove_package_by_type(self, package):
f"WARNING: package with name {pname} already exists. "
"Replacing existing package."
)
- self._remove_package(self._other_files[package.filename])
+ self._package_container.remove_package(
+ self._other_files[package.filename]
+ )
del self._other_files[package.filename]
def register_package(
@@ -2097,7 +2219,7 @@ def register_package(
# all but solution packages get added here. solution packages
# are added during solution package registration
self._remove_package_by_type(package)
- self._add_package(package, path)
+ self._package_container.add_package(package)
sln_dict = mfstructure.MFStructure().flopy_dict["solution_packages"]
if package.package_type.lower() == "nam":
if not package.internal_package:
@@ -2520,7 +2642,7 @@ def _is_in_solution_group(self, item, index, any_idx_after=False):
def plot(
self,
- model_list: Optional[Union[str, List[str]]] = None,
+ model_list: Optional[Union[str, list[str]]] = None,
SelPackList=None,
**kwargs,
):
diff --git a/flopy/mf6/utils/lakpak_utils.py b/flopy/mf6/utils/lakpak_utils.py
index 8b1e1b25b1..94b52a5206 100644
--- a/flopy/mf6/utils/lakpak_utils.py
+++ b/flopy/mf6/utils/lakpak_utils.py
@@ -40,7 +40,7 @@ def get_lak_connections(modelgrid, lake_map, idomain=None, bedleak=None):
Returns
-------
idomain : ndarry
- idomain adjusted to inactivate cells with lakes
+ idomain adjusted to deactivate cells with lakes
connection_dict : dict
dictionary with the zero-based lake number keys and number of
connections in a lake values
diff --git a/flopy/mf6/utils/model_splitter.py b/flopy/mf6/utils/model_splitter.py
index a73daf27af..a4db88e75f 100644
--- a/flopy/mf6/utils/model_splitter.py
+++ b/flopy/mf6/utils/model_splitter.py
@@ -117,7 +117,7 @@
}
-class Mf6Splitter(object):
+class Mf6Splitter:
"""
A class for splitting a single model into a multi-model simulation
@@ -173,6 +173,9 @@ def __init__(self, sim, modelname=None):
self._fdigits = 1
+ # multi-model splitting attr
+ self._multimodel_exchange_gwf_names = {}
+
@property
def new_simulation(self):
"""
@@ -372,7 +375,7 @@ def optimize_splitting_mask(self, nparts):
lak_array = np.zeros((ncpl,), dtype=int)
laks = []
hfbs = []
- for _, package in self._model.package_dict.items():
+ for package in self._model.get_package():
if isinstance(
package,
(
@@ -941,6 +944,8 @@ def _create_sln_tdis(self):
new_sim : MFSimulation object
"""
for pak in self._sim.sim_package_list:
+ if pak.package_abbr in ("gwfgwt", "gwfgwf", "gwfgwe"):
+ continue
pak_cls = PackageContainer.package_factory(pak.package_abbr, "")
signature = inspect.signature(pak_cls)
d = {"simulation": self._new_sim, "loading_package": False}
@@ -1019,6 +1024,7 @@ def _remap_filerecords(self, item, value, mapped_data):
"budgetcsv_filerecord",
"stage_filerecord",
"obs_filerecord",
+ "concentration_filerecord",
):
value = value.array
if value is None:
@@ -2487,7 +2493,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None):
for mkey, model in self._model_dict.items():
idx = np.asarray(new_model2 == mkey).nonzero()
tmp_node = new_node2[idx]
- cidx = np.asarray((tmp_node != None)).nonzero() # noqa: E711
+ cidx = np.asarray(tmp_node != None).nonzero() # noqa: E711
tmp_cellid = model.modelgrid.get_lrc(
tmp_node[cidx].to_list()
)
@@ -3009,7 +3015,13 @@ def _remap_package(self, package, ismvr=False):
elif isinstance(value, mfdatascalar.MFScalarTransient):
for mkey in self._model_dict.keys():
- mapped_data[mkey][item] = value._data_storage
+ val_dict = {}
+ for (
+ perkey,
+ data_storage,
+ ) in value._data_storage.items():
+ val_dict[perkey] = data_storage.get_data()
+ mapped_data[mkey][item] = val_dict
elif isinstance(value, mfdatascalar.MFScalar):
for mkey in self._model_dict.keys():
mapped_data[mkey][item] = value.data
@@ -3072,28 +3084,29 @@ def _create_exchanges(self):
dict
"""
d = {}
+ exchange_kwargs = {}
built = []
nmodels = list(self._model_dict.keys())
- if self._model.name_file.newtonoptions is not None:
- newton = self._model.name_file.newtonoptions.array
- if isinstance(newton, list):
- newton = True
- else:
- newton = None
-
- if self._model.npf.xt3doptions is not None:
- xt3d = self._model.npf.xt3doptions.array
- if isinstance(xt3d, list):
- xt3d = True
- else:
- xt3d = None
+ if hasattr(self._model.name_file, "newtonoptions"):
+ if self._model.name_file.newtonoptions is not None:
+ newton = self._model.name_file.newtonoptions.array
+ if isinstance(newton, list):
+ exchange_kwargs["newton"] = True
+
+ if hasattr(self._model, "npf"):
+ if self._model.npf.xt3doptions is not None:
+ xt3d = self._model.npf.xt3doptions.array
+ if isinstance(xt3d, list):
+ exchange_kwargs["xt3d"] = True
if self._model_type.lower() == "gwf":
extension = "gwfgwf"
exchgcls = modflow.ModflowGwfgwf
+ check_multi_model = False
elif self._model_type.lower() == "gwt":
extension = "gwtgwt"
exchgcls = modflow.ModflowGwtgwt
+ check_multi_model = True
else:
raise NotImplementedError()
@@ -3111,6 +3124,14 @@ def _create_exchanges(self):
if m1 == m0:
continue
exchange_data = []
+ if check_multi_model:
+ if self._multimodel_exchange_gwf_names:
+ exchange_kwargs["gwfmodelname1"] = (
+ self._multimodel_exchange_gwf_names[m0]
+ )
+ exchange_kwargs["gwfmodelname2"] = (
+ self._multimodel_exchange_gwf_names[m1]
+ )
for node0, exg_list in exg_nodes.items():
if grid0.idomain[node0] < 1:
continue
@@ -3145,9 +3166,9 @@ def _create_exchanges(self):
exgmnameb=mname1,
nexg=len(exchange_data),
exchangedata=exchange_data,
- filename=f"sim_{m0 :0{self._fdigits}d}_{m1 :0{self._fdigits}d}.{extension}",
- newton=newton,
- xt3d=xt3d,
+ filename=f"sim_{mname0}_{mname1}.{extension}",
+ pname=f"{mname0}_{mname1}",
+ **exchange_kwargs,
)
d[f"{mname0}_{mname1}"] = exchg
@@ -3155,7 +3176,8 @@ def _create_exchanges(self):
for _, model in self._model_dict.items():
# turn off save_specific_discharge if it's on
- model.npf.save_specific_discharge = None
+ if hasattr(model, "npf"):
+ model.npf.save_specific_discharge = None
else:
xc = self._modelgrid.xcellcenters.ravel()
@@ -3164,17 +3186,27 @@ def _create_exchanges(self):
for m0, model in self._model_dict.items():
exg_nodes = self._new_connections[m0]["external"]
for m1 in nmodels:
+ exchange_data = []
if m1 in built:
continue
if m1 == m0:
continue
+
+ if check_multi_model:
+ if self._multimodel_exchange_gwf_names:
+ exchange_kwargs["gwfmodelname1"] = (
+ self._multimodel_exchange_gwf_names[m0]
+ )
+ exchange_kwargs["gwfmodelname2"] = (
+ self._multimodel_exchange_gwf_names[m1]
+ )
+
modelgrid0 = model.modelgrid
modelgrid1 = self._model_dict[m1].modelgrid
ncpl0 = modelgrid0.ncpl
ncpl1 = modelgrid1.ncpl
idomain0 = modelgrid0.idomain
idomain1 = modelgrid1.idomain
- exchange_data = []
for node0, exg_list in exg_nodes.items():
for exg in exg_list:
if exg[0] != m1:
@@ -3283,9 +3315,9 @@ def _create_exchanges(self):
auxiliary=["ANGLDEGX", "CDIST"],
nexg=len(exchange_data),
exchangedata=exchange_data,
- filename=f"sim_{m0 :0{self._fdigits}d}_{m1 :0{self._fdigits}d}.{extension}",
- newton=newton,
- xt3d=xt3d,
+ filename=f"sim_{mname0}_{mname1}.{extension}",
+ pname=f"{mname0}_{mname1}",
+ **exchange_kwargs,
)
d[f"{mname0}_{mname1}"] = exchg
@@ -3300,12 +3332,55 @@ def _create_exchanges(self):
filename=f"{mname0}_{mname1}.mvr",
)
- d[f"{mname0}_{mname1}_mvr"] = exchg
+ d[f"{mname0}_{mname1}_mvr"] = mvr
built.append(m0)
return d
+ def create_multi_model_exchanges(self, mname0, mname1):
+ """
+ Method to create multi-model exchange packages, e.g., GWF-GWT
+
+ Parameters
+ ----------
+ mname0 :
+ mname1 :
+
+ Returns
+ -------
+
+ """
+ exchange_classes = {
+ "gwfgwt": modflow.ModflowGwfgwt,
+ "gwfgwe": modflow.ModflowGwfgwe,
+ }
+ ml0 = self._new_sim.get_model(mname0)
+ ml1 = self._new_sim.get_model(mname1)
+
+ mtype0 = ml0.model_type[:3]
+ mtype1 = ml1.model_type[:3]
+
+ if mtype0.lower() != "gwf":
+ raise AssertionError(
+ f"GWF must be the first specified type for multimodel "
+ f"exchanges: type supplied {mtype1.upper()} "
+ )
+
+ if mtype1.lower() not in ("gwt", "gwe"):
+ raise NotImplementedError(
+ f"Unsupported exchange type GWF-{mtype1.upper()}"
+ )
+
+ exchangecls = exchange_classes[f"{mtype0}{mtype1}"]
+ filename = f"{mname0}_{mname1}.exg"
+ exchangecls(
+ self._new_sim,
+ exgmnamea=mname0,
+ exgmnameb=mname1,
+ filename=filename,
+ )
+
def split_model(self, array):
"""
User method to split a model based on an array
@@ -3366,3 +3441,119 @@ def split_model(self, array):
epaks = self._create_exchanges()
return self._new_sim
+
+ def split_multi_model(self, array):
+ """
+ Method to split integrated models such as GWF-GWT or GWF-GWE models.
+ Note: this method will not work to split multiple connected GWF models
+
+ Parameters
+ ----------
+ array : np.ndarray
+ integer array of new model numbers. Array must either be of
+ dimension (NROW, NCOL), (NCPL), or (NNODES for unstructured grid
+ models).
+
+ Returns
+ -------
+ MFSimulation object
+ """
+ if not self._allow_splitting:
+ raise AssertionError(
+ "Mf6Splitter cannot split a model that "
+ "is part of a split simulation"
+ )
+
+ # set number formatting string for file paths
+ array = np.array(array).astype(int)
+ s = str(np.max(array))
+ self._fdigits = len(s)
+
+ # get model names and types, assert that first model is a GWF model
+ model_names = self._sim.model_names
+ models = [self._sim.get_model(mn) for mn in model_names]
+ model_types = [type(ml) for ml in models]
+
+ ix = model_types.index(modflow.ModflowGwf)
+ if ix != 0:
+ idxs = [
+ ix,
+ ] + [idx for idx in range(len(model_names)) if idx != ix]
+ model_names = [model_names[idx] for idx in idxs]
+ models = [models[idx] for idx in idxs]
+ model_types = [model_types[idx] for idx in idxs]
+ self.switch_models(modelname=model_names[0], remap_nodes=True)
+
+ # assert consistent idomain and modelgrid shapes!
+ shapes = [ml.modelgrid.shape for ml in models]
+ idomains = [ml.modelgrid.idomain for ml in models]
+ gwf_shape = shapes.pop(0)
+ gwf_idomain = idomains.pop(0)
+
+ for ix, shape in enumerate(shapes):
+ idomain = idomains[ix]
+ mname = model_names[ix + 1]
+ if shape != gwf_shape:
+ raise AssertionError(
+ f"Model {mname} shape {shape} is not consistent with GWF "
+ f"model shape {gwf_shape}"
+ )
+
+ gwf_inactive = np.where(gwf_idomain == 0)
+ inactive = np.where(idomain == 0)
+ if not np.allclose(inactive, gwf_inactive):
+ raise AssertionError(
+ f"Model {mname} idomain is not consistent with GWF "
+ f"model idomain"
+ )
+
+ gwf_base = model_names[0]
+ model_labels = [
+ f"{i :{self._fdigits}d}" for i in sorted(np.unique(array))
+ ]
+
+ self._multimodel_exchange_gwf_names = {
+ int(i): f"{gwf_base}_{i}" for i in model_labels
+ }
+
+ new_sim = self.split_model(array)
+ for mname in model_names[1:]:
+ self.switch_models(modelname=mname, remap_nodes=False)
+ new_sim = self.split_model(array)
+
+ for mbase in model_names[1:]:
+ for label in model_labels:
+ mname0 = f"{gwf_base}_{label}"
+ mname1 = f"{mbase}_{label}"
+ self.create_multi_model_exchanges(mname0, mname1)
+
+ # register models to correct IMS package
+ solution_recarray = self._sim.name_file.solutiongroup.data[0]
+ sln_mname_cols = [
+ i for i in solution_recarray.dtype.names if "slnmnames" in i
+ ]
+ if len(solution_recarray) > 1:
+ # need to associate solutions with solution groups
+ imspkgs = []
+ imspkg_names = []
+ for pkg in new_sim.sim_package_list:
+ if isinstance(pkg, modflow.ModflowIms):
+ imspkgs.append(pkg)
+ imspkg_names.append(pkg.filename)
+
+ for record in solution_recarray:
+ fname = record.slnfname
+ ims_ix = imspkg_names.index(fname)
+ ims_obj = imspkgs[ims_ix]
+ mnames = []
+ for col in sln_mname_cols:
+ mbase = record[col]
+ if mbase is None:
+ continue
+
+ for label in model_labels:
+ mnames.append(f"{mbase}_{label}")
+
+ new_sim.register_ims_package(ims_obj, mnames)
+
+ return self._new_sim
diff --git a/flopy/mf6/utils/output_util.py b/flopy/mf6/utils/output_util.py
index c4f6dfcf9b..6d1e720976 100644
--- a/flopy/mf6/utils/output_util.py
+++ b/flopy/mf6/utils/output_util.py
@@ -35,7 +35,7 @@ def __init__(self, obj, budgetkey=None):
"csv": self.__csv,
"package_convergence": self.__csv,
}
- delist = ("ts", "wc")
+ delist = ("ts", "wc", "ncf")
self._obj = obj
self._methods = []
self._sim_ws = obj.simulation_data.mfpath.get_sim_path()
@@ -112,7 +112,7 @@ def __init__(self, obj, budgetkey=None):
self._methods.append(f"{rectype}()")
if rectype == "obs":
data = None
- for ky in obj._simulation_data.mfdata:
+ for ky in obj.simulation_data.mfdata:
if obj.path == (ky[0:2]):
if str(ky[-2]).lower() == "fileout":
data = [[ky[-1]]]
@@ -122,14 +122,14 @@ def __init__(self, obj, budgetkey=None):
and str(ky[-1]) == "output"
):
if (
- obj._simulation_data.mfdata[
+ obj.simulation_data.mfdata[
ky
].array[0][0]
== "fileout"
):
data = [
[
- obj._simulation_data.mfdata[
+ obj.simulation_data.mfdata[
ky
].array[0][-2]
]
diff --git a/flopy/mf6/utils/reference.py b/flopy/mf6/utils/reference.py
index 1f472b505b..3517f12df8 100644
--- a/flopy/mf6/utils/reference.py
+++ b/flopy/mf6/utils/reference.py
@@ -1,6 +1,10 @@
"""
Module spatial referencing for flopy model objects
+.. deprecated:: 3.9
+ This module will be removed in FloPy 3.10+. Use
+ the :mod:`flopy.discretization` module instead.
+
"""
import numpy as np
@@ -10,6 +14,11 @@ class StructuredSpatialReference:
"""
a simple class to locate the model grid in x-y space
+ .. deprecated:: 3.9
+ This class will be removed in FloPy 3.10+. Use
+ :class:`~flopy.discretization.structuredgrid.StructuredGrid`
+ instead.
+
Parameters
----------
@@ -544,6 +553,11 @@ class VertexSpatialReference:
"""
a simple class to locate the model grid in x-y space
+ .. deprecated:: 3.9
+ This class will be removed in FloPy 3.10+. Use
+ :class:`~flopy.discretization.vertexgrid.VertexGrid`
+ instead.
+
Parameters
----------
xvdict: dictionary
@@ -852,33 +866,36 @@ class SpatialReference:
"""
A dynamic inheritance class that locates a gridded model in space
- Parameters
- ----------
- delr : numpy ndarray
- the model discretization delr vector
- delc : numpy ndarray
- the model discretization delc vector
- lenuni : int
- the length units flag from the discretization package
- xul : float
- the x coordinate of the upper left corner of the grid
- yul : float
- the y coordinate of the upper left corner of the grid
- rotation : float
- the counter-clockwise rotation (in degrees) of the grid
- proj4_str: str
- a PROJ4 string that identifies the grid in space. warning:
- case sensitive!
- xadj : float
- vertex grid: x vertex adjustment factor
- yadj : float
- vertex grid: y vertex adjustment factor
- xvdict: dict
- dictionary of x-vertices by cellnum ex. {0: (0,1,1,0)}
- yvdict: dict
- dictionary of y-vertices by cellnum ex. {0: (1,1,0,0)}
- distype: str
- model grid discretization type
+ .. deprecated:: 3.9
+ This class will be removed in FloPy 3.10+.
+
+ Parameters
+ ----------
+ delr : numpy ndarray
+ the model discretization delr vector
+ delc : numpy ndarray
+ the model discretization delc vector
+ lenuni : int
+ the length units flag from the discretization package
+ xul : float
+ the x coordinate of the upper left corner of the grid
+ yul : float
+ the y coordinate of the upper left corner of the grid
+ rotation : float
+ the counter-clockwise rotation (in degrees) of the grid
+ proj4_str: str
+ a PROJ4 string that identifies the grid in space. warning:
+ case sensitive!
+ xadj : float
+ vertex grid: x vertex adjustment factor
+ yadj : float
+ vertex grid: y vertex adjustment factor
+ xvdict: dict
+ dictionary of x-vertices by cellnum ex. {0: (0,1,1,0)}
+ yvdict: dict
+ dictionary of y-vertices by cellnum ex. {0: (1,1,0,0)}
+ distype: str
+ model grid discretization type
"""
def __new__(
diff --git a/flopy/mfusg/__init__.py b/flopy/mfusg/__init__.py
index 6790a64896..5bc35e4146 100644
--- a/flopy/mfusg/__init__.py
+++ b/flopy/mfusg/__init__.py
@@ -12,12 +12,12 @@
__all__ = [
"MfUsg",
- "MfUsgDisU",
"MfUsgBcf",
- "MfUsgLpf",
- "MfUsgWel",
"MfUsgCln",
"MfUsgClnDtypes",
- "MfUsgSms",
+ "MfUsgDisU",
"MfUsgGnc",
+ "MfUsgLpf",
+ "MfUsgSms",
+ "MfUsgWel",
]
diff --git a/flopy/mfusg/cln_dtypes.py b/flopy/mfusg/cln_dtypes.py
index aac2e9a6f6..745422238d 100644
--- a/flopy/mfusg/cln_dtypes.py
+++ b/flopy/mfusg/cln_dtypes.py
@@ -58,10 +58,7 @@ def get_gwconn_dtype(structured=True):
("ifcon", int), # index of connectivity equation
("fskin", np.float32), # leakance across a skin
("flengw", np.float32), # length of connection
- (
- "faniso",
- np.float32,
- ), # anisotropy or thickness of sediments
+ ("faniso", np.float32), # anisotropy or thickness of sediments
("icgwadi", int), # flag of vertical flow correction
]
)
@@ -73,10 +70,7 @@ def get_gwconn_dtype(structured=True):
("ifcon", int), # index of connectivity equation
("fskin", np.float32), # leakance across a skin
("flengw", np.float32), # length of connection
- (
- "faniso",
- np.float32,
- ), # anisotropy or thickness of sediments
+ ("faniso", np.float32), # anisotropy or thickness of sediments
("icgwadi", int), # flag of vertical flow correction
]
)
@@ -100,16 +94,10 @@ def get_clncirc_dtype(bhe=False):
[
("iconduityp", int), # index of circular conduit type
("frad", np.float32), # radius
- (
- "conduitk",
- np.float32,
- ), # conductivity or resistance factor
+ ("conduitk", np.float32), # conductivity or resistance factor
("tcond", np.float32), # thermal conductivity of bhe tube
("tthk", np.float32), # thickness
- (
- "tcfluid",
- np.float32,
- ), # thermal conductivity of the fluid
+ ("tcfluid", np.float32), # thermal conductivity of the fluid
("tconv", np.float32), # thermal convective coefficient
]
)
@@ -118,10 +106,7 @@ def get_clncirc_dtype(bhe=False):
[
("iconduityp", int), # index of circular conduit type
("frad", np.float32), # radius
- (
- "conduitk",
- np.float32,
- ), # conductivity or resistance factor
+ ("conduitk", np.float32), # conductivity or resistance factor
]
)
return dtype
@@ -145,10 +130,7 @@ def get_clnrect_dtype(bhe=False):
("irectyp", int), # index of rectangular conduit type
("flength", np.float32), # width
("fheight", np.float32), # height
- (
- "conduitk",
- np.float32,
- ), # conductivity or resistance factor
+ ("conduitk", np.float32), # conductivity or resistance factor
("tcond", np.float32), # thermal conductivity of bhe tube
("tthk", np.float32), # thickness of bhe tube
("tcfluid", np.float32), # thermal conductivity of fluid
@@ -161,10 +143,7 @@ def get_clnrect_dtype(bhe=False):
("irectyp", int), # index of rectangular conduit type
("flength", np.float32), # width
("fheight", np.float32), # height
- (
- "conduitk",
- np.float32,
- ), # conductivity or resistance factor
+ ("conduitk", np.float32), # conductivity or resistance factor
]
)
return dtype
diff --git a/flopy/mfusg/mfusg.py b/flopy/mfusg/mfusg.py
index 6f84d2b40f..d456bcff5f 100644
--- a/flopy/mfusg/mfusg.py
+++ b/flopy/mfusg/mfusg.py
@@ -196,9 +196,7 @@ def load(
# similar to modflow command: if file does not exist , try file.nam
namefile_path = os.path.join(model_ws, f)
- if not os.path.isfile(namefile_path) and os.path.isfile(
- f"{namefile_path}.nam"
- ):
+ if not os.path.isfile(namefile_path) and os.path.isfile(f"{namefile_path}.nam"):
namefile_path += ".nam"
if not os.path.isfile(namefile_path):
raise OSError(f"cannot find name file: {namefile_path}")
@@ -209,9 +207,7 @@ def load(
if verbose:
print(f"\nCreating new model with name: {modelname}\n{50 * '-'}\n")
- attribs = mfreadnam.attribs_from_namfile_header(
- os.path.join(model_ws, f)
- )
+ attribs = mfreadnam.attribs_from_namfile_header(os.path.join(model_ws, f))
model = cls(
modelname,
@@ -270,9 +266,7 @@ def load(
cls._set_output_external(model, ext_unit_dict)
# send messages re: success/failure of loading
- cls._send_load_messages(
- model, files_successfully_loaded, files_not_loaded
- )
+ cls._send_load_messages(model, files_successfully_loaded, files_not_loaded)
if check:
model.check(f=f"{model.name}.chk", verbose=model.verbose, level=0)
@@ -281,9 +275,7 @@ def load(
return model
@classmethod
- def _load_packages(
- cls, model, ext_unit_dict, ext_pkg_d, load_only, forgive
- ):
+ def _load_packages(cls, model, ext_unit_dict, ext_pkg_d, load_only, forgive):
"""
Method to load packages into the MODFLOW-USG Model Class.
For internal class use - should not be called by the user.
@@ -338,17 +330,16 @@ def _load_packages(
# try loading packages in ext_unit_dict
for key, item in ext_unit_dict.items():
if item.package is not None:
- (
- files_successfully_loaded,
- files_not_loaded,
- ) = cls._load_ext_unit_dict_paks(
- model,
- ext_unit_dict,
- load_only,
- item,
- forgive,
- files_successfully_loaded,
- files_not_loaded,
+ (files_successfully_loaded, files_not_loaded) = (
+ cls._load_ext_unit_dict_paks(
+ model,
+ ext_unit_dict,
+ load_only,
+ item,
+ forgive,
+ files_successfully_loaded,
+ files_not_loaded,
+ )
)
elif "data" not in item.filetype.lower():
files_not_loaded.append(item.filename)
@@ -435,9 +426,7 @@ def _prepare_external_files(model, key, item):
if key not in model.external_units:
model.external_fnames.append(item.filename)
model.external_units.append(key)
- model.external_binflag.append(
- "binary" in item.filetype.lower()
- )
+ model.external_binflag.append("binary" in item.filetype.lower())
model.external_output.append(False)
@staticmethod
@@ -504,9 +493,7 @@ def _set_output_external(model, ext_unit_dict):
)
@staticmethod
- def _send_load_messages(
- model, files_successfully_loaded, files_not_loaded
- ):
+ def _send_load_messages(model, files_successfully_loaded, files_not_loaded):
"""Send messages re: success/failure of loading."""
# write message indicating packages that were successfully loaded
if model.verbose:
@@ -552,7 +539,5 @@ def fmt_string(array):
)
raise TypeError(msg)
else:
- raise TypeError(
- "mfusg.fmt_string error: unknown vtype in" f"field: {field}"
- )
+ raise TypeError(f"mfusg.fmt_string error: unknown vtype in field: {field}")
return "".join(fmts)
diff --git a/flopy/mfusg/mfusgbcf.py b/flopy/mfusg/mfusgbcf.py
index 290b8f37b9..a0046d151b 100644
--- a/flopy/mfusg/mfusgbcf.py
+++ b/flopy/mfusg/mfusgbcf.py
@@ -201,12 +201,7 @@ def __init__(
if not structured:
njag = dis.njag
self.anglex = Util2d(
- model,
- (njag,),
- np.float32,
- anglex,
- "anglex",
- locat=self.unit_number[0],
+ model, (njag,), np.float32, anglex, "anglex", locat=self.unit_number[0]
)
# item 1
@@ -220,12 +215,7 @@ def __init__(
)
if not structured:
self.ksat = Util2d(
- model,
- (njag,),
- np.float32,
- ksat,
- "ksat",
- locat=self.unit_number[0],
+ model, (njag,), np.float32, ksat, "ksat", locat=self.unit_number[0]
)
if add_package:
@@ -261,9 +251,7 @@ def write_file(self, f=None):
# LAYCON array
for layer in range(nlay):
if self.intercellt[layer] > 0:
- f_obj.write(
- f"{self.intercellt[layer]:1d} {self.laycon[layer]:1d} "
- )
+ f_obj.write(f"{self.intercellt[layer]:1d} {self.laycon[layer]:1d} ")
else:
f_obj.write(f"0{self.laycon[layer]:1d} ")
f_obj.write("\n")
@@ -344,7 +332,8 @@ def load(cls, f, model, ext_unit_dict=None):
>>> import flopy
>>> m = flopy.mfusg.MfUsg()
>>> disu = flopy.mfusg.MfUsgDisU(
- model=m, nlay=1, nodes=1, iac=[1], njag=1,ja=np.array([0]), fahl=[1.0], cl12=[1.0])
+ ... model=m, nlay=1, nodes=1, iac=[1], njag=1,ja=np.array([0]),
+ ... fahl=[1.0], cl12=[1.0])
>>> bcf = flopy.mfusg.MfUsgBcf.load('test.bcf', m)
"""
msg = (
@@ -384,12 +373,8 @@ def load(cls, f, model, ext_unit_dict=None):
int(text_list[5]),
)
- ikvflag = type_from_iterable(
- text_list, index=6, _type=int, default_val=0
- )
- ikcflag = type_from_iterable(
- text_list, index=7, _type=int, default_val=0
- )
+ ikvflag = type_from_iterable(text_list, index=6, _type=int, default_val=0)
+ ikcflag = type_from_iterable(text_list, index=7, _type=int, default_val=0)
# LAYCON array
laycon, intercellt = cls._load_laycon(f_obj, model)
@@ -397,9 +382,7 @@ def load(cls, f, model, ext_unit_dict=None):
# TRPY array
if model.verbose:
print(" loading TRPY...")
- trpy = Util2d.load(
- f_obj, model, (nlay,), np.float32, "trpy", ext_unit_dict
- )
+ trpy = Util2d.load(f_obj, model, (nlay,), np.float32, "trpy", ext_unit_dict)
# property data for each layer based on options
transient = not dis.steady.all()
@@ -430,9 +413,7 @@ def load(cls, f, model, ext_unit_dict=None):
if (not model.structured) and abs(ikcflag == 1):
if model.verbose:
print(" loading ksat (njag)...")
- ksat = Util2d.load(
- f_obj, model, (njag,), np.float32, "ksat", ext_unit_dict
- )
+ ksat = Util2d.load(f_obj, model, (njag,), np.float32, "ksat", ext_unit_dict)
f_obj.close()
@@ -595,12 +576,7 @@ def _load_layer_arrays(
if model.verbose:
print(f" loading sf1 layer {layer + 1:3d}...")
sf1[layer] = Util2d.load(
- f_obj,
- model,
- util2d_shape,
- np.float32,
- "sf1",
- ext_unit_dict,
+ f_obj, model, util2d_shape, np.float32, "sf1", ext_unit_dict
)
# hy/tran, and kv/vcont
@@ -625,12 +601,7 @@ def _load_layer_arrays(
if model.verbose:
print(f" loading sf2 layer {layer + 1:3d}...")
sf2[layer] = Util2d.load(
- f_obj,
- model,
- util2d_shape,
- np.float32,
- "sf2",
- ext_unit_dict,
+ f_obj, model, util2d_shape, np.float32, "sf2", ext_unit_dict
)
# wetdry
@@ -638,12 +609,7 @@ def _load_layer_arrays(
if model.verbose:
print(f" loading sf2 layer {layer + 1:3d}...")
wetdry[layer] = Util2d.load(
- f_obj,
- model,
- util2d_shape,
- np.float32,
- "wetdry",
- ext_unit_dict,
+ f_obj, model, util2d_shape, np.float32, "wetdry", ext_unit_dict
)
return sf1, tran, hy, vcont, sf2, wetdry, kv
@@ -683,12 +649,7 @@ def _load_hy_tran_kv_vcont(f_obj, model, laycon_k, ext_unit_dict, ikvflag):
if model.verbose:
print(f" loading tran layer {layer + 1:3d}...")
_tran = Util2d.load(
- f_obj,
- model,
- util2d_shape,
- np.float32,
- "tran",
- ext_unit_dict,
+ f_obj, model, util2d_shape, np.float32, "tran", ext_unit_dict
)
else:
if model.verbose:
@@ -704,12 +665,7 @@ def _load_hy_tran_kv_vcont(f_obj, model, laycon_k, ext_unit_dict, ikvflag):
if model.verbose:
print(f" loading vcont layer {layer + 1:3d}...")
_vcont = Util2d.load(
- f_obj,
- model,
- util2d_shape,
- np.float32,
- "vcont",
- ext_unit_dict,
+ f_obj, model, util2d_shape, np.float32, "vcont", ext_unit_dict
)
elif (ikvflag == 1) and (model.nlay > 1):
if model.verbose:
diff --git a/flopy/mfusg/mfusgcln.py b/flopy/mfusg/mfusgcln.py
index 04b3723817..a193039b32 100644
--- a/flopy/mfusg/mfusgcln.py
+++ b/flopy/mfusg/mfusgcln.py
@@ -13,7 +13,7 @@
Process for MODFLOW-USG, GSI Environmental, March, 2021
Panday, Sorab, Langevin, C.D., Niswonger, R.G., Ibaraki, Motomu, and Hughes,
-J.D., 2013, MODFLOW–USG version 1: An unstructured grid version of MODFLOW
+J.D., 2013, MODFLOW-USG version 1: An unstructured grid version of MODFLOW
for simulating groundwater flow and tightly coupled processes using a control
volume finite-difference formulation: U.S. Geological Survey Techniques and
Methods, book 6, chap. A45, 66 p.
@@ -273,9 +273,7 @@ def __init__(
raise Exception("mfcln: CLN-GW connections not provided")
if len(cln_gwc) != nclngwc:
- raise Exception(
- "mfcln: Number of CLN-GW connections not equal to nclngwc"
- )
+ raise Exception("mfcln: Number of CLN-GW connections not equal to nclngwc")
structured = self.parent.structured
@@ -334,15 +332,12 @@ def _define_cln_networks(self, model):
raise Exception("mfcln: CLN network not defined")
if self.ncln < 0:
- raise Exception(
- "mfcln: negative number of CLN segments in CLN package"
- )
+ raise Exception("mfcln: negative number of CLN segments in CLN package")
if self.ncln > 0: # Linear CLN segments
if self.nndcln is None:
raise Exception(
- "mfcln: number of nodes for each CLN segment must be "
- "provided"
+ "mfcln: number of nodes for each CLN segment must be provided"
)
self.nndcln = Util2d(
model,
@@ -360,9 +355,8 @@ def _define_cln_networks(self, model):
# Node number provided for each segment to simulate CLN networks
elif self.iclnnds > 0:
self.nclnnds = self.iclnnds
- self.nodeno = (
- np.asarray(set(self.clncon), dtype=object) + 1
- ) # can be jagged
+ # can be jagged
+ self.nodeno = np.asarray(set(self.clncon), dtype=object) + 1
else:
raise Exception("mfcln: Node number = 0")
@@ -391,9 +385,7 @@ def _define_cln_networks(self, model):
if self.ja_cln is None:
raise Exception("mfcln: ja_cln must be provided")
if abs(self.ja_cln[0]) != 1:
- raise Exception(
- "mfcln: first ja_cln entry (node 1) is not 1 or -1."
- )
+ raise Exception("mfcln: first ja_cln entry (node 1) is not 1 or -1.")
self.ja_cln = Util2d(
model,
(self.nja_cln,),
@@ -407,14 +399,10 @@ def _define_cln_geometries(self):
"""Initialises CLN geometry types."""
# Circular conduit geometry types
if self.nconduityp <= 0 or self.cln_circ is None:
- raise Exception(
- "mfcln: Circular conduit properties must be provided"
- )
+ raise Exception("mfcln: Circular conduit properties must be provided")
if len(self.cln_circ) != self.nconduityp:
- raise Exception(
- "mfcln: Number of circular properties not equal nconduityp"
- )
+ raise Exception("mfcln: Number of circular properties not equal nconduityp")
self.cln_circ = self._make_recarray(
self.cln_circ, dtype=MfUsgClnDtypes.get_clncirc_dtype(self.bhe)
@@ -472,28 +460,18 @@ def write_file(self, f=None, check=False):
f_cln.write(self.iac_cln.get_file_entry())
f_cln.write(self.ja_cln.get_file_entry())
- np.savetxt(
- f_cln, self.node_prop, fmt=fmt_string(self.node_prop), delimiter=""
- )
+ np.savetxt(f_cln, self.node_prop, fmt=fmt_string(self.node_prop), delimiter="")
- np.savetxt(
- f_cln, self.cln_gwc, fmt=fmt_string(self.cln_gwc), delimiter=""
- )
+ np.savetxt(f_cln, self.cln_gwc, fmt=fmt_string(self.cln_gwc), delimiter="")
if self.nconduityp > 0:
np.savetxt(
- f_cln,
- self.cln_circ,
- fmt=fmt_string(self.cln_circ),
- delimiter="",
+ f_cln, self.cln_circ, fmt=fmt_string(self.cln_circ), delimiter=""
)
if self.nrectyp > 0:
np.savetxt(
- f_cln,
- self.cln_rect,
- fmt=fmt_string(self.cln_rect),
- delimiter="",
+ f_cln, self.cln_rect, fmt=fmt_string(self.cln_rect), delimiter=""
)
f_cln.write(self.ibound.get_file_entry())
@@ -596,14 +574,9 @@ def load(cls, f, model, pak_type="cln", ext_unit_dict=None, **kwargs):
) = cls._load_items_0_1(f, model)
# Items 3, or 4/5/6
- (
- nndcln,
- clncon,
- nja_cln,
- iac_cln,
- ja_cln,
- nclnnds,
- ) = cls._load_items_3to6(f, model, ncln, iclnnds, ext_unit_dict)
+ (nndcln, clncon, nja_cln, iac_cln, ja_cln, nclnnds) = cls._load_items_3to6(
+ f, model, ncln, iclnnds, ext_unit_dict
+ )
if model.verbose:
print(" Reading node_prop...")
@@ -624,15 +597,11 @@ def load(cls, f, model, pak_type="cln", ext_unit_dict=None, **kwargs):
if model.verbose:
print(" Reading ibound...")
- ibound = Util2d.load(
- f, model, (nclnnds, 1), np.int32, "ibound", ext_unit_dict
- )
+ ibound = Util2d.load(f, model, (nclnnds, 1), np.int32, "ibound", ext_unit_dict)
if model.verbose:
print(" Reading strt...")
- strt = Util2d.load(
- f, model, (nclnnds, 1), np.float32, "strt", ext_unit_dict
- )
+ strt = Util2d.load(f, model, (nclnnds, 1), np.float32, "strt", ext_unit_dict)
if hasattr(f, "read"):
f.close()
@@ -649,10 +618,9 @@ def load(cls, f, model, pak_type="cln", ext_unit_dict=None, **kwargs):
funcs = [abs] + [int] * 3 + [abs] * 2
for idx, (item, func) in enumerate(zip(file_unit_items, funcs)):
if item > 0:
- (
- unitnumber[idx + 1],
- filenames[idx + 1],
- ) = model.get_ext_dict_attr(ext_unit_dict, unit=func(item))
+ (unitnumber[idx + 1], filenames[idx + 1]) = model.get_ext_dict_attr(
+ ext_unit_dict, unit=func(item)
+ )
model.add_pop_key_list(func(item))
# create dis object instance
@@ -704,16 +672,9 @@ def _load_items_0_1(f_obj, model):
line_text = line.strip().split()
line_text[:8] = [int(item) for item in line_text[:8]]
- (
- ncln,
- iclnnds,
- iclncb,
- iclnhd,
- iclndd,
- iclnib,
- nclngwc,
- nconduityp,
- ) = line_text[:8]
+ (ncln, iclnnds, iclncb, iclnhd, iclndd, iclnib, nclngwc, nconduityp) = (
+ line_text[:8]
+ )
# Options keywords
nrectyp = 0
diff --git a/flopy/mfusg/mfusgdisu.py b/flopy/mfusg/mfusgdisu.py
index 327b55664d..617cfff98d 100644
--- a/flopy/mfusg/mfusgdisu.py
+++ b/flopy/mfusg/mfusgdisu.py
@@ -267,9 +267,7 @@ def __init__(
self.idsymrd = idsymrd
# LAYCBD
- self.laycbd = Util2d(
- model, (self.nlay,), np.int32, laycbd, name="laycbd"
- )
+ self.laycbd = Util2d(model, (self.nlay,), np.int32, laycbd, name="laycbd")
self.laycbd[-1] = 0 # bottom layer must be zero
# NODELAY
@@ -333,12 +331,7 @@ def __init__(
if iac is None:
raise Exception("iac must be provided")
self.iac = Util2d(
- model,
- (self.nodes,),
- np.int32,
- iac,
- name="iac",
- locat=self.unit_number[0],
+ model, (self.nodes,), np.int32, iac, name="iac", locat=self.unit_number[0]
)
assert self.iac.array.sum() == njag, "The sum of iac must equal njag."
if ja is None:
@@ -347,12 +340,7 @@ def __init__(
# convert from zero-based to one-based
ja += 1
self.ja = Util2d(
- model,
- (self.njag,),
- np.int32,
- ja,
- name="ja",
- locat=self.unit_number[0],
+ model, (self.njag,), np.int32, ja, name="ja", locat=self.unit_number[0]
)
self.ivc = None
if self.ivsd == 1:
@@ -375,20 +363,10 @@ def __init__(
if cl2 is None:
raise Exception("idsymrd is 1 but cl2 was not specified.")
self.cl1 = Util2d(
- model,
- (njags,),
- np.float32,
- cl1,
- name="cl1",
- locat=self.unit_number[0],
+ model, (njags,), np.float32, cl1, name="cl1", locat=self.unit_number[0]
)
self.cl2 = Util2d(
- model,
- (njags,),
- np.float32,
- cl2,
- name="cl2",
- locat=self.unit_number[0],
+ model, (njags,), np.float32, cl2, name="cl2", locat=self.unit_number[0]
)
if idsymrd == 0:
@@ -411,22 +389,13 @@ def __init__(
elif idsymrd == 0:
n = self.njag
self.fahl = Util2d(
- model,
- (n,),
- np.float32,
- fahl,
- name="fahl",
- locat=self.unit_number[0],
+ model, (n,), np.float32, fahl, name="fahl", locat=self.unit_number[0]
)
# Stress period information
- self.perlen = Util2d(
- model, (self.nper,), np.float32, perlen, name="perlen"
- )
+ self.perlen = Util2d(model, (self.nper,), np.float32, perlen, name="perlen")
self.nstp = Util2d(model, (self.nper,), np.int32, nstp, name="nstp")
- self.tsmult = Util2d(
- model, (self.nper,), np.float32, tsmult, name="tsmult"
- )
+ self.tsmult = Util2d(model, (self.nper,), np.float32, tsmult, name="tsmult")
self.steady = Util2d(model, (self.nper,), bool, steady, name="steady")
self.itmuni_dict = {
@@ -449,9 +418,7 @@ def __init__(
lenuni=self.lenuni,
)
- self.tr = TemporalReference(
- itmuni=self.itmuni, start_datetime=start_datetime
- )
+ self.tr = TemporalReference(itmuni=self.itmuni, start_datetime=start_datetime)
self.start_datetime = start_datetime
@@ -500,7 +467,7 @@ def zcentroids(self):
@property
def ncpl(self):
- return self.nodes / self.nlay
+ return self.nodes // self.nlay
@classmethod
def load(cls, f, model, ext_unit_dict=None, check=True):
@@ -546,8 +513,8 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if model.version != "mfusg":
print(
- "Warning: model version was reset from '{}' to 'mfusg' "
- "in order to load a DISU file".format(model.version)
+ f"Warning: model version was reset from '{model.version}' "
+ "to 'mfusg' in order to load a DISU file"
)
model.version = "mfusg"
@@ -613,9 +580,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
# dataset 3 -- nodelay
if model.verbose:
print(" loading NODELAY...")
- nodelay = Util2d.load(
- f, model, (nlay,), np.int32, "nodelay", ext_unit_dict
- )
+ nodelay = Util2d.load(f, model, (nlay,), np.int32, "nodelay", ext_unit_dict)
if model.verbose:
print(f" NODELAY {nodelay}")
@@ -624,9 +589,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
print(" loading TOP...")
top = [0] * nlay
for k in range(nlay):
- tpk = Util2d.load(
- f, model, (nodelay[k],), np.float32, "top", ext_unit_dict
- )
+ tpk = Util2d.load(f, model, (nodelay[k],), np.float32, "top", ext_unit_dict)
top[k] = tpk
if model.verbose:
for k, tpk in enumerate(top):
@@ -637,9 +600,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
print(" loading BOT...")
bot = [0] * nlay
for k in range(nlay):
- btk = Util2d.load(
- f, model, (nodelay[k],), np.float32, "btk", ext_unit_dict
- )
+ btk = Util2d.load(f, model, (nodelay[k],), np.float32, "btk", ext_unit_dict)
bot[k] = btk
if model.verbose:
for k, btk in enumerate(bot):
@@ -682,9 +643,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if ivsd == 1:
if model.verbose:
print(" loading IVC...")
- ivc = Util2d.load(
- f, model, (njag,), np.int32, "ivc", ext_unit_dict
- )
+ ivc = Util2d.load(f, model, (njag,), np.int32, "ivc", ext_unit_dict)
if model.verbose:
print(f" IVC {ivc}")
@@ -693,9 +652,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if idsymrd == 1:
if model.verbose:
print(" loading CL1...")
- cl1 = Util2d.load(
- f, model, (njags,), np.float32, "cl1", ext_unit_dict
- )
+ cl1 = Util2d.load(f, model, (njags,), np.float32, "cl1", ext_unit_dict)
if model.verbose:
print(f" CL1 {cl1}")
@@ -704,9 +661,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if idsymrd == 1:
if model.verbose:
print(" loading CL2...")
- cl2 = Util2d.load(
- f, model, (njags,), np.float32, "cl2", ext_unit_dict
- )
+ cl2 = Util2d.load(f, model, (njags,), np.float32, "cl2", ext_unit_dict)
if model.verbose:
print(f" CL2 {cl2}")
@@ -715,9 +670,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if idsymrd == 0:
if model.verbose:
print(" loading CL12...")
- cl12 = Util2d.load(
- f, model, (njag,), np.float32, "cl12", ext_unit_dict
- )
+ cl12 = Util2d.load(f, model, (njag,), np.float32, "cl12", ext_unit_dict)
if model.verbose:
print(f" CL12 {cl12}")
@@ -879,9 +832,7 @@ def write_file(self):
# Item 13: NPER, NSTP, TSMULT, Ss/tr
for t in range(self.nper):
- f_dis.write(
- f"{self.perlen[t]:14f}{self.nstp[t]:14d}{self.tsmult[t]:10f} "
- )
+ f_dis.write(f"{self.perlen[t]:14f}{self.nstp[t]:14d}{self.tsmult[t]:10f} ")
if self.steady[t]:
f_dis.write(" SS\n")
else:
diff --git a/flopy/mfusg/mfusggnc.py b/flopy/mfusg/mfusggnc.py
index 2d12a47e3f..2114e46db7 100644
--- a/flopy/mfusg/mfusggnc.py
+++ b/flopy/mfusg/mfusggnc.py
@@ -128,9 +128,7 @@ def __init__(
if 0 < numalphaj < 6:
self.numalphaj = numalphaj
else:
- raise Exception(
- "mfgnc: incorrect number of adjacent contributing nodes"
- )
+ raise Exception("mfgnc: incorrect number of adjacent contributing nodes")
self.i2kn = i2kn
self.isymgncn = isymgncn
@@ -140,9 +138,7 @@ def __init__(
raise Exception("mfgnc: GNC data must be provided")
if len(gncdata) != self.numgnc:
- raise Exception(
- "mfgnc: Length of GNC data must equal number of GNC nodes"
- )
+ raise Exception("mfgnc: Length of GNC data must equal number of GNC nodes")
self.dtype = MfUsgGnc.get_default_dtype(self.numalphaj, self.iflalphan)
@@ -199,12 +195,7 @@ def write_file(self, f=None, check=False):
@staticmethod
def get_default_dtype(numalphaj, iflalphan):
"""Returns default GNC dtypes."""
- dtype = np.dtype(
- [
- ("NodeN", int),
- ("NodeM", int),
- ]
- ).descr
+ dtype = np.dtype([("NodeN", int), ("NodeM", int)]).descr
for idx in range(numalphaj):
dtype.append((f"Node{idx:d}", ">> import flopy
>>> m = flopy.mfusg.MfUsg()
>>> disu = flopy.mfusg.MfUsgDisU(
- model=m, nlay=1, nodes=1, iac=[1], njag=1,ja=np.array([0]), fahl=[1.0], cl12=[1.0])
+ ... model=m, nlay=1, nodes=1, iac=[1], njag=1,ja=np.array([0]),
+ ... fahl=[1.0], cl12=[1.0])
>>> lpf = flopy.mfusg.MfUsgLpf(m)
"""
@@ -299,23 +300,13 @@ def __init__(
if not structured:
njag = dis.njag
self.anglex = Util2d(
- model,
- (njag,),
- np.float32,
- anglex,
- "anglex",
- locat=self.unit_number[0],
+ model, (njag,), np.float32, anglex, "anglex", locat=self.unit_number[0]
)
if not structured:
njag = dis.njag
self.ksat = Util2d(
- model,
- (njag,),
- np.float32,
- ksat,
- "ksat",
- locat=self.unit_number[0],
+ model, (njag,), np.float32, ksat, "ksat", locat=self.unit_number[0]
)
if add_package:
@@ -337,11 +328,7 @@ def write_file(self, check=True, f=None):
"""
# allows turning off package checks when writing files at model level
if check:
- self.check(
- f=f"{self.name[0]}.chk",
- verbose=self.parent.verbose,
- level=1,
- )
+ self.check(f=f"{self.name[0]}.chk", verbose=self.parent.verbose, level=1)
# get model information
nlay = self.parent.nlay
@@ -379,9 +366,7 @@ def write_file(self, check=True, f=None):
# Item 7: WETFCT, IWETIT, IHDWET
iwetdry = self.laywet.sum()
if iwetdry > 0:
- f_obj.write(
- f"{self.wetfct:10f}{self.iwetit:10d}{self.ihdwet:10d}\n"
- )
+ f_obj.write(f"{self.wetfct:10f}{self.iwetit:10d}{self.ihdwet:10d}\n")
transient = not dis.steady.all()
structured = self.parent.structured
@@ -442,7 +427,8 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
>>> import flopy
>>> m = flopy.mfusg.MfUsg()
>>> disu = flopy.mfusg.MfUsgDisU(
- model=m, nlay=1, nodes=1, iac=[1], njag=1,ja=np.array([0]), fahl=[1.0], cl12=[1.0])
+ ... model=m, nlay=1, nodes=1, iac=[1], njag=1,
+ ... ja=np.array([0]), fahl=[1.0], cl12=[1.0])
>>> lpf = flopy.mfusg.MfUsgLpf.load('test.lpf', m)
"""
msg = (
@@ -480,17 +466,9 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
novfc,
) = cls._load_item1(line, model)
- (
- laytyp,
- layavg,
- chani,
- layvka,
- laywet,
- wetfct,
- iwetit,
- ihdwet,
- iwetdry,
- ) = cls._load_items_2_to_7(f_obj, model)
+ (laytyp, layavg, chani, layvka, laywet, wetfct, iwetit, ihdwet, iwetdry) = (
+ cls._load_items_2_to_7(f_obj, model)
+ )
# ANGLEX for unstructured grid with anisotropy
anis = any(ch != 1 for ch in chani)
@@ -522,9 +500,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if abs(ikcflag) == 1:
if model.verbose:
print(" loading ksat...")
- ksat = Util2d.load(
- f_obj, model, (njag,), np.float32, "ksat", ext_unit_dict
- )
+ ksat = Util2d.load(f_obj, model, (njag,), np.float32, "ksat", ext_unit_dict)
f_obj.close()
@@ -566,11 +542,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
filenames=filenames,
)
if check:
- lpf.check(
- f=f"{lpf.name[0]}.chk",
- verbose=lpf.parent.verbose,
- level=0,
- )
+ lpf.check(f=f"{lpf.name[0]}.chk", verbose=lpf.parent.verbose, level=0)
return lpf
@staticmethod
@@ -593,9 +565,7 @@ def _load_item1(line, model):
]
constantcv = "CONSTANTCV" in [item.upper() for item in text_list]
thickstrt = "THICKSTRT" in [item.upper() for item in text_list]
- nocvcorrection = "NOCVCORRECTION" in [
- item.upper() for item in text_list
- ]
+ nocvcorrection = "NOCVCORRECTION" in [item.upper() for item in text_list]
novfc = "NOVFC" in [item.upper() for item in text_list]
return (
@@ -659,17 +629,7 @@ def _load_items_2_to_7(f_obj, model):
int(text_list[2]),
)
- return (
- laytyp,
- layavg,
- chani,
- layvka,
- laywet,
- wetfct,
- iwetit,
- ihdwet,
- iwetdry,
- )
+ return (laytyp, layavg, chani, layvka, laywet, wetfct, iwetit, ihdwet, iwetdry)
@staticmethod
def _load_hy_tran_kv_vcont(
@@ -723,12 +683,7 @@ def _load_hy_tran_kv_vcont(
print(f" loading hani layer {layer + 1:3d}...")
if "hani" not in par_types:
hani_k = Util2d.load(
- f_obj,
- model,
- util2d_shape,
- np.float32,
- "hani",
- ext_unit_dict,
+ f_obj, model, util2d_shape, np.float32, "hani", ext_unit_dict
)
else:
f_obj.readline()
@@ -793,18 +748,10 @@ def _load_layer_properties(
util2d_shape = get_util2d_shape_for_layer(model, layer=layer)
if ikcflag == 0:
- (
- hk[layer],
- hani[layer],
- vka[layer],
- ) = self._load_hy_tran_kv_vcont(
+ (hk[layer], hani[layer], vka[layer]) = self._load_hy_tran_kv_vcont(
f_obj,
model,
- {
- "layer": layer,
- "layvka": layvka[layer],
- "chani": chani[layer],
- },
+ {"layer": layer, "layvka": layvka[layer], "chani": chani[layer]},
ext_unit_dict,
(par_types, parm_dict),
)
@@ -825,12 +772,7 @@ def _load_layer_properties(
print(f" loading vkcb layer {layer + 1:3d}...")
if "vkcb" not in par_types:
vkcb[layer] = Util2d.load(
- f_obj,
- model,
- util2d_shape,
- np.float32,
- "vkcb",
- ext_unit_dict,
+ f_obj, model, util2d_shape, np.float32, "vkcb", ext_unit_dict
)
else:
_ = f_obj.readline()
@@ -843,20 +785,13 @@ def _load_layer_properties(
if model.verbose:
print(f" loading wetdry layer {layer + 1:3d}...")
wetdry[layer] = Util2d.load(
- f_obj,
- model,
- util2d_shape,
- np.float32,
- "wetdry",
- ext_unit_dict,
+ f_obj, model, util2d_shape, np.float32, "wetdry", ext_unit_dict
)
return hk, hani, vka, ss, sy, vkcb, wetdry
@staticmethod
- def _load_storage(
- f_obj, model, layer_vars, ext_unit_dict, par_types_parm_dict
- ):
+ def _load_storage(f_obj, model, layer_vars, ext_unit_dict, par_types_parm_dict):
"""
Loads ss, sy file entries.
@@ -902,12 +837,7 @@ def _load_storage(
print(f" loading sy layer {layer + 1:3d}...")
if "sy" not in par_types:
sy_k = Util2d.load(
- f_obj,
- model,
- util2d_shape,
- np.float32,
- "sy",
- ext_unit_dict,
+ f_obj, model, util2d_shape, np.float32, "sy", ext_unit_dict
)
else:
f_obj.readline()
diff --git a/flopy/mfusg/mfusgsms.py b/flopy/mfusg/mfusgsms.py
index 865e9d5f4c..afd00d11c2 100644
--- a/flopy/mfusg/mfusgsms.py
+++ b/flopy/mfusg/mfusgsms.py
@@ -434,8 +434,8 @@ def load(cls, f, model, ext_unit_dict=None):
if model.version != "mfusg":
print(
- "Warning: model version was reset from '{}' to 'mfusg' "
- "in order to load a SMS file".format(model.version)
+ f"Warning: model version was reset from '{model.version}' "
+ "to 'mfusg' in order to load a SMS file"
)
model.version = "mfusg"
@@ -463,10 +463,7 @@ def load(cls, f, model, ext_unit_dict=None):
# Record 1b -- line will have already been read
if model.verbose:
- print(
- " loading HCLOSE HICLOSE MXITER ITER1 "
- "IPRSMS NONLINMETH LINMETH..."
- )
+ print(" loading HCLOSE HICLOSE MXITER ITER1 IPRSMS NONLINMETH LINMETH...")
ll = line_parse(line)
hclose = float(ll.pop(0))
hiclose = float(ll.pop(0))
diff --git a/flopy/mfusg/mfusgwel.py b/flopy/mfusg/mfusgwel.py
index 493c7a19c7..9a859734b4 100644
--- a/flopy/mfusg/mfusgwel.py
+++ b/flopy/mfusg/mfusgwel.py
@@ -229,9 +229,7 @@ def __init__(
if dtype is not None:
self.dtype = dtype
else:
- self.dtype = self.get_default_dtype(
- structured=self.parent.structured
- )
+ self.dtype = self.get_default_dtype(structured=self.parent.structured)
# determine if any aux variables in dtype
options = self._check_for_aux(options)
@@ -239,9 +237,7 @@ def __init__(
self.options = options
# initialize MfList
- self.stress_period_data = MfList(
- self, stress_period_data, binary=binary
- )
+ self.stress_period_data = MfList(self, stress_period_data, binary=binary)
if add_package:
self.parent.add_package(self)
@@ -297,9 +293,7 @@ def write_file(self, f=None):
f_wel.write(f"{self.heading}\n")
- mxact = (
- self.stress_period_data.mxact + self.cln_stress_period_data.mxact
- )
+ mxact = self.stress_period_data.mxact + self.cln_stress_period_data.mxact
line = f" {mxact:9d} {self.ipakcb:9d} "
if self.options is None:
diff --git a/flopy/modflow/mf.py b/flopy/modflow/mf.py
index b1c2a65aed..dbf717ea34 100644
--- a/flopy/modflow/mf.py
+++ b/flopy/modflow/mf.py
@@ -228,9 +228,8 @@ def __init__(
def __repr__(self):
nrow, ncol, nlay, nper = self.get_nrow_ncol_nlay_nper()
# structured case
- s = (
- "MODFLOW {} layer(s) {} row(s) {} column(s) "
- "{} stress period(s)".format(nlay, nrow, ncol, nper)
+ s = "MODFLOW {} layer(s) {} row(s) {} column(s) {} stress period(s)".format(
+ nlay, nrow, ncol, nper
)
return s
@@ -264,11 +263,7 @@ def modelgrid(self):
else:
ibound = None
# take the first non-None entry
- crs = (
- self._modelgrid.crs
- or self._modelgrid.proj4
- or self._modelgrid.epsg
- )
+ crs = self._modelgrid.crs or self._modelgrid.proj4 or self._modelgrid.epsg
common_kwargs = {
"crs": crs,
"xoff": self._modelgrid.xoffset,
@@ -292,10 +287,7 @@ def modelgrid(self):
ja=self.disu.ja.array,
**common_kwargs,
)
- print(
- "WARNING: Model grid functionality limited for unstructured "
- "grid."
- )
+ print("WARNING: Model grid functionality limited for unstructured grid.")
else:
# build structured grid
self._modelgrid = StructuredGrid(
@@ -449,16 +441,12 @@ def write_name_file(self):
if self.glo.unit_number[0] > 0:
f_nam.write(
"{:14s} {:5d} {}\n".format(
- self.glo.name[0],
- self.glo.unit_number[0],
- self.glo.file_name[0],
+ self.glo.name[0], self.glo.unit_number[0], self.glo.file_name[0]
)
)
f_nam.write(
"{:14s} {:5d} {}\n".format(
- self.lst.name[0],
- self.lst.unit_number[0],
- self.lst.file_name[0],
+ self.lst.name[0], self.lst.unit_number[0], self.lst.file_name[0]
)
)
f_nam.write(str(self.get_name_file_entries()))
@@ -483,9 +471,7 @@ def write_name_file(self):
f_nam.write(f"DATA {u:5d} {f}\n")
# write the output files
- for u, f, b in zip(
- self.output_units, self.output_fnames, self.output_binflag
- ):
+ for u, f, b in zip(self.output_units, self.output_fnames, self.output_binflag):
if u == 0:
continue
if b:
@@ -744,7 +730,8 @@ def load(
# DEPRECATED since version 3.3.4
if ml.version == "mfusg":
raise ValueError(
- "flopy.modflow.Modflow no longer supports mfusg; use flopy.mfusg.MfUsg() instead"
+ "flopy.modflow.Modflow no longer supports mfusg; "
+ "use flopy.mfusg.MfUsg() instead"
)
# reset unit number for glo file
@@ -840,21 +827,15 @@ def load(
)
else:
item.package.load(
- item.filehandle,
- ml,
- ext_unit_dict=ext_unit_dict,
+ item.filehandle, ml, ext_unit_dict=ext_unit_dict
)
files_successfully_loaded.append(item.filename)
if ml.verbose:
- print(
- f" {item.filetype:4s} package load...success"
- )
+ print(f" {item.filetype:4s} package load...success")
except Exception as e:
ml.load_fail = True
if ml.verbose:
- print(
- f" {item.filetype:4s} package load...failed"
- )
+ print(f" {item.filetype:4s} package load...failed")
print(f" {e!s}")
files_not_loaded.append(item.filename)
else:
@@ -867,15 +848,11 @@ def load(
)
else:
item.package.load(
- item.filehandle,
- ml,
- ext_unit_dict=ext_unit_dict,
+ item.filehandle, ml, ext_unit_dict=ext_unit_dict
)
files_successfully_loaded.append(item.filename)
if ml.verbose:
- print(
- f" {item.filetype:4s} package load...success"
- )
+ print(f" {item.filetype:4s} package load...success")
else:
if ml.verbose:
print(f" {item.filetype:4s} package load...skipped")
@@ -893,9 +870,7 @@ def load(
if key not in ml.external_units:
ml.external_fnames.append(item.filename)
ml.external_units.append(key)
- ml.external_binflag.append(
- "binary" in item.filetype.lower()
- )
+ ml.external_binflag.append("binary" in item.filetype.lower())
ml.external_output.append(False)
else:
raise KeyError(f"unhandled case: {key}, {item}")
diff --git a/flopy/modflow/mfaddoutsidefile.py b/flopy/modflow/mfaddoutsidefile.py
index b9935cd24f..10cb48de4f 100644
--- a/flopy/modflow/mfaddoutsidefile.py
+++ b/flopy/modflow/mfaddoutsidefile.py
@@ -8,9 +8,7 @@ class mfaddoutsidefile(Package):
def __init__(self, model, name, extension, unitnumber):
# call base package constructor
- super().__init__(
- model, extension, name, unitnumber, allowDuplicates=True
- )
+ super().__init__(model, extension, name, unitnumber, allowDuplicates=True)
self.parent.add_package(self)
def __repr__(self):
diff --git a/flopy/modflow/mfag.py b/flopy/modflow/mfag.py
index 9c0e5125f8..5e7b16e9da 100644
--- a/flopy/modflow/mfag.py
+++ b/flopy/modflow/mfag.py
@@ -60,141 +60,91 @@ class ModflowAg(Package):
"""
- _options = dict(
- [
- ("noprint", OptionBlock.simple_flag),
- (
- "irrigation_diversion",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 2,
- OptionBlock.vars: dict(
- [
- ("numirrdiversions", OptionBlock.simple_int),
- ("maxcellsdiversion", OptionBlock.simple_int),
- ]
- ),
- },
- ),
- (
- "irrigation_well",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 2,
- OptionBlock.vars: dict(
- [
- ("numirrwells", OptionBlock.simple_int),
- ("maxcellswell", OptionBlock.simple_int),
- ]
- ),
- },
- ),
- (
- "supplemental_well",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 2,
- OptionBlock.vars: dict(
- [
- ("numsupwells", OptionBlock.simple_int),
- ("maxdiversions", OptionBlock.simple_int),
- ]
- ),
- },
- ),
- (
- "maxwells",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 1,
- OptionBlock.vars: dict(
- [("nummaxwell", OptionBlock.simple_int)]
- ),
- },
- ),
- ("tabfiles", OptionBlock.simple_tabfile),
- ("phiramp", OptionBlock.simple_flag),
- (
- "etdemand",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 1,
- OptionBlock.vars: {
- "accel": {
- OptionBlock.dtype: float,
- OptionBlock.nested: False,
- OptionBlock.optional: True,
- }
- },
- },
- ),
- ("trigger", OptionBlock.simple_flag),
- ("timeseries_diversion", OptionBlock.simple_flag),
- ("timeseries_well", OptionBlock.simple_flag),
- ("timeseries_diversionet", OptionBlock.simple_flag),
- ("timeseries_wellet", OptionBlock.simple_flag),
- (
- "diversionlist",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 1,
- OptionBlock.vars: dict(
- [("unit_diversionlist", OptionBlock.simple_int)]
- ),
- },
- ),
- (
- "welllist",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 1,
- OptionBlock.vars: dict(
- [("unit_welllist", OptionBlock.simple_int)]
- ),
- },
- ),
- (
- "wellirrlist",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 1,
- OptionBlock.vars: dict(
- [("unit_wellirrlist", OptionBlock.simple_int)]
- ),
- },
- ),
- (
- "diversionirrlist",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 1,
- OptionBlock.vars: dict(
- [("unit_diversionirrlist", OptionBlock.simple_int)]
- ),
- },
- ),
- (
- "wellcbc",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 1,
- OptionBlock.vars: dict(
- [("unitcbc", OptionBlock.simple_int)]
- ),
- },
- ),
- ]
- )
+ _options = {
+ "noprint": OptionBlock.simple_flag,
+ "irrigation_diversion": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 2,
+ OptionBlock.vars: {
+ "numirrdiversions": OptionBlock.simple_int,
+ "maxcellsdiversion": OptionBlock.simple_int,
+ },
+ },
+ "irrigation_well": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 2,
+ OptionBlock.vars: {
+ "numirrwells": OptionBlock.simple_int,
+ "maxcellswell": OptionBlock.simple_int,
+ },
+ },
+ "supplemental_well": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 2,
+ OptionBlock.vars: {
+ "numsupwells": OptionBlock.simple_int,
+ "maxdiversions": OptionBlock.simple_int,
+ },
+ },
+ "maxwells": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 1,
+ OptionBlock.vars: {"nummaxwell": OptionBlock.simple_int},
+ },
+ "tabfiles": OptionBlock.simple_tabfile,
+ "phiramp": OptionBlock.simple_flag,
+ "etdemand": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 1,
+ OptionBlock.vars: {
+ "accel": {
+ OptionBlock.dtype: float,
+ OptionBlock.nested: False,
+ OptionBlock.optional: True,
+ }
+ },
+ },
+ "trigger": OptionBlock.simple_flag,
+ "timeseries_diversion": OptionBlock.simple_flag,
+ "timeseries_well": OptionBlock.simple_flag,
+ "timeseries_diversionet": OptionBlock.simple_flag,
+ "timeseries_wellet": OptionBlock.simple_flag,
+ "diversionlist": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 1,
+ OptionBlock.vars: {"unit_diversionlist": OptionBlock.simple_int},
+ },
+ "welllist": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 1,
+ OptionBlock.vars: {"unit_welllist": OptionBlock.simple_int},
+ },
+ "wellirrlist": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 1,
+ OptionBlock.vars: {"unit_wellirrlist": OptionBlock.simple_int},
+ },
+ "diversionirrlist": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 1,
+ OptionBlock.vars: {"unit_diversionirrlist": OptionBlock.simple_int},
+ },
+ "wellcbc": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 1,
+ OptionBlock.vars: {"unitcbc": OptionBlock.simple_int},
+ },
+ }
def __init__(
self,
@@ -211,9 +161,7 @@ def __init__(
nper=0,
):
if "nwt" not in model.version:
- raise AssertionError(
- "Model version must be mfnwt to use the AG package"
- )
+ raise AssertionError("Model version must be mfnwt to use the AG package")
# setup the package parent class
if unitnumber is None:
@@ -357,9 +305,7 @@ def write_file(self, check=False):
foo.write("TIME SERIES \n")
for record in self.time_series:
if record["keyword"] in ("welletall", "wellall"):
- foo.write(
- f"{record['keyword']} {record['unit']}\n".upper()
- )
+ foo.write(f"{record['keyword']} {record['unit']}\n".upper())
else:
foo.write(fmt.format(*record).upper())
@@ -450,9 +396,7 @@ def write_file(self, check=False):
)
else:
foo.write(
- fmt20.format(
- rec["segid"], rec["numcell"]
- )
+ fmt20.format(rec["segid"], rec["numcell"])
)
for i in range(num):
@@ -503,9 +447,7 @@ def write_file(self, check=False):
)
else:
foo.write(
- fmt24.format(
- rec["wellid"] + 1, rec["numcell"]
- )
+ fmt24.format(rec["wellid"] + 1, rec["numcell"])
)
for i in range(num):
@@ -540,9 +482,7 @@ def write_file(self, check=False):
num = rec["numcell"]
foo.write(
- fmt28.format(
- rec["wellid"] + 1, rec["numcell"]
- )
+ fmt28.format(rec["wellid"] + 1, rec["numcell"])
)
for i in range(num):
@@ -558,8 +498,7 @@ def write_file(self, check=False):
else:
foo.write(
"{:d} {:f}\n".format(
- rec[f"segid{i}"],
- rec[f"fracsup{i}"],
+ rec[f"segid{i}"], rec[f"fracsup{i}"]
)
)
@@ -623,21 +562,10 @@ def get_default_dtype(maxells=0, block="well"):
dtype : (list, tuple)
"""
if block == "well":
- dtype = [
- ("k", int),
- ("i", int),
- ("j", int),
- ("flux", float),
- ]
+ dtype = [("k", int), ("i", int), ("j", int), ("flux", float)]
elif block == "tabfile_well":
- dtype = [
- ("unit", int),
- ("tabval", int),
- ("k", int),
- ("i", int),
- ("j", int),
- ]
+ dtype = [("unit", int), ("tabval", int), ("k", int), ("i", int), ("j", int)]
elif block == "time series":
dtype = [("keyword", object), ("id", int), ("unit", int)]
diff --git a/flopy/modflow/mfbas.py b/flopy/modflow/mfbas.py
index 66433fd3dd..9332af2628 100644
--- a/flopy/modflow/mfbas.py
+++ b/flopy/modflow/mfbas.py
@@ -191,9 +191,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None):
neighbors = chk.get_neighbors(self.ibound.array)
if isinstance(neighbors, np.ndarray):
- neighbors[np.isnan(neighbors)] = (
- 0 # set neighbors at edges to 0 (inactive)
- )
+ neighbors[np.isnan(neighbors)] = 0 # set neighbors at edges to 0 (inactive)
chk.values(
self.ibound.array,
(self.ibound.array > 0) & np.all(neighbors < 1, axis=0),
@@ -225,11 +223,7 @@ def write_file(self, check=True):
"""
# allows turning off package checks when writing files at model level
if check:
- self.check(
- f=f"{self.name[0]}.chk",
- verbose=self.parent.verbose,
- level=1,
- )
+ self.check(f=f"{self.name[0]}.chk", verbose=self.parent.verbose, level=1)
# Open file for writing
f_bas = open(self.fn_path, "w")
# First line: heading
@@ -385,9 +379,5 @@ def load(cls, f, model, ext_unit_dict=None, check=True, **kwargs):
filenames=filenames,
)
if check:
- bas.check(
- f=f"{bas.name[0]}.chk",
- verbose=bas.parent.verbose,
- level=0,
- )
+ bas.check(f=f"{bas.name[0]}.chk", verbose=bas.parent.verbose, level=0)
return bas
diff --git a/flopy/modflow/mfbcf.py b/flopy/modflow/mfbcf.py
index 83d676448c..714bd88e31 100644
--- a/flopy/modflow/mfbcf.py
+++ b/flopy/modflow/mfbcf.py
@@ -145,12 +145,7 @@ def __init__(
locat=self.unit_number[0],
)
self.laycon = Util2d(
- model,
- (nlay,),
- np.int32,
- laycon,
- name="laycon",
- locat=self.unit_number[0],
+ model, (nlay,), np.int32, laycon, name="laycon", locat=self.unit_number[0]
)
self.trpy = Util2d(
model,
@@ -282,9 +277,7 @@ def write_file(self, f=None):
f_bcf.write(self.vcont[k].get_file_entry())
if transient and ((self.laycon[k] == 2) or (self.laycon[k] == 3)):
f_bcf.write(self.sf2[k].get_file_entry())
- if (self.iwdflg != 0) and (
- (self.laycon[k] == 1) or (self.laycon[k] == 3)
- ):
+ if (self.iwdflg != 0) and ((self.laycon[k] == 1) or (self.laycon[k] == 3)):
f_bcf.write(self.wetdry[k].get_file_entry())
f_bcf.close()
@@ -402,9 +395,7 @@ def load(cls, f, model, ext_unit_dict=None):
# TRPY array
if model.verbose:
print(" loading TRPY...")
- trpy = Util2d.load(
- f, model, (nlay,), np.float32, "trpy", ext_unit_dict
- )
+ trpy = Util2d.load(f, model, (nlay,), np.float32, "trpy", ext_unit_dict)
# property data for each layer based on options
transient = not dis.steady.all()
@@ -447,9 +438,7 @@ def load(cls, f, model, ext_unit_dict=None):
else:
if model.verbose:
print(f" loading hy layer {k + 1:3d}...")
- t = Util2d.load(
- f, model, (nrow, ncol), np.float32, "hy", ext_unit_dict
- )
+ t = Util2d.load(f, model, (nrow, ncol), np.float32, "hy", ext_unit_dict)
hy[k] = t
# vcont
@@ -490,9 +479,7 @@ def load(cls, f, model, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowBcf._ftype()
)
if ipakcb > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
# create instance of bcf object
diff --git a/flopy/modflow/mfbct.py b/flopy/modflow/mfbct.py
index dbd5115213..d85bea4937 100644
--- a/flopy/modflow/mfbct.py
+++ b/flopy/modflow/mfbct.py
@@ -60,13 +60,7 @@ def __init__(
self.diffnc = diffnc
self.izod = izod
self.ifod = ifod
- self.icbund = Util3d(
- model,
- (nlay, nrow, ncol),
- np.float32,
- icbund,
- "icbund",
- )
+ self.icbund = Util3d(model, (nlay, nrow, ncol), np.float32, icbund, "icbund")
self.porosity = Util3d(
model, (nlay, nrow, ncol), np.float32, porosity, "porosity"
)
@@ -74,13 +68,7 @@ def __init__(
self.dlv = Util3d(model, (nlay, nrow, ncol), np.float32, dlv, "dlv")
self.dth = Util3d(model, (nlay, nrow, ncol), np.float32, dth, "dth")
self.dtv = Util3d(model, (nlay, nrow, ncol), np.float32, dth, "dtv")
- self.sconc = Util3d(
- model,
- (nlay, nrow, ncol),
- np.float32,
- sconc,
- "sconc",
- )
+ self.sconc = Util3d(model, (nlay, nrow, ncol), np.float32, sconc, "sconc")
self.parent.add_package(self)
return
diff --git a/flopy/modflow/mfchd.py b/flopy/modflow/mfchd.py
index 530b606da0..3ed45aebbe 100644
--- a/flopy/modflow/mfchd.py
+++ b/flopy/modflow/mfchd.py
@@ -130,9 +130,7 @@ def __init__(
if dtype is not None:
self.dtype = dtype
else:
- self.dtype = self.get_default_dtype(
- structured=self.parent.structured
- )
+ self.dtype = self.get_default_dtype(structured=self.parent.structured)
self.stress_period_data = MfList(self, stress_period_data)
self.np = 0
diff --git a/flopy/modflow/mfdis.py b/flopy/modflow/mfdis.py
index bd6b9903df..b2bae6bc80 100644
--- a/flopy/modflow/mfdis.py
+++ b/flopy/modflow/mfdis.py
@@ -184,9 +184,7 @@ def __init__(
# Set values of all parameters
self._generate_heading()
- self.laycbd = Util2d(
- model, (self.nlay,), np.int32, laycbd, name="laycbd"
- )
+ self.laycbd = Util2d(model, (self.nlay,), np.int32, laycbd, name="laycbd")
self.laycbd[-1] = 0 # bottom layer must be zero
self.delr = Util2d(
model,
@@ -220,13 +218,9 @@ def __init__(
"botm",
locat=self.unit_number[0],
)
- self.perlen = Util2d(
- model, (self.nper,), np.float32, perlen, name="perlen"
- )
+ self.perlen = Util2d(model, (self.nper,), np.float32, perlen, name="perlen")
self.nstp = Util2d(model, (self.nper,), np.int32, nstp, name="nstp")
- self.tsmult = Util2d(
- model, (self.nper,), np.float32, tsmult, name="tsmult"
- )
+ self.tsmult = Util2d(model, (self.nper,), np.float32, tsmult, name="tsmult")
self.steady = Util2d(model, (self.nper,), bool, steady, name="steady")
try:
@@ -279,9 +273,7 @@ def __init__(
if start_datetime is None:
start_datetime = model._start_datetime
- self.tr = TemporalReference(
- itmuni=self.itmuni, start_datetime=start_datetime
- )
+ self.tr = TemporalReference(itmuni=self.itmuni, start_datetime=start_datetime)
self.start_datetime = start_datetime
self._totim = None
@@ -396,9 +388,7 @@ def get_kstp_kper_toffset(self, t=0.0, use_cached_totim=False):
break
return kstp, kper, toffset
- def get_totim_from_kper_toffset(
- self, kper=0, toffset=0.0, use_cached_totim=False
- ):
+ def get_totim_from_kper_toffset(self, kper=0, toffset=0.0, use_cached_totim=False):
"""
Get totim from a passed kper and time offset from the beginning
of a stress period
@@ -425,9 +415,7 @@ def get_totim_from_kper_toffset(
if kper < 0:
kper = 0.0
if kper >= self.nper:
- raise ValueError(
- f"kper ({kper}) must be less than to nper ({self.nper})."
- )
+ raise ValueError(f"kper ({kper}) must be less than to nper ({self.nper}).")
totim = self.get_totim(use_cached_totim)
nstp = self.nstp.array
@@ -613,11 +601,7 @@ def write_file(self, check=True):
"""
if check: # allows turning off package checks when writing files at model level
- self.check(
- f=f"{self.name[0]}.chk",
- verbose=self.parent.verbose,
- level=1,
- )
+ self.check(f=f"{self.name[0]}.chk", verbose=self.parent.verbose, level=1)
# Open file for writing
f_dis = open(self.fn_path, "w")
# Item 0: heading
@@ -625,12 +609,7 @@ def write_file(self, check=True):
# Item 1: NLAY, NROW, NCOL, NPER, ITMUNI, LENUNI
f_dis.write(
"{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}\n".format(
- self.nlay,
- self.nrow,
- self.ncol,
- self.nper,
- self.itmuni,
- self.lenuni,
+ self.nlay, self.nrow, self.ncol, self.nper, self.itmuni, self.lenuni
)
)
# Item 2: LAYCBD
@@ -648,9 +627,7 @@ def write_file(self, check=True):
# Item 6: NPER, NSTP, TSMULT, Ss/tr
for t in range(self.nper):
- f_dis.write(
- f"{self.perlen[t]:14f}{self.nstp[t]:14d}{self.tsmult[t]:10f} "
- )
+ f_dis.write(f"{self.perlen[t]:14f}{self.nstp[t]:14d}{self.tsmult[t]:10f} ")
if self.steady[t]:
f_dis.write(" SS\n")
else:
@@ -699,10 +676,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None):
thickness = np.ma.array(thickness, mask=non_finite)
chk.values(
- thickness,
- active & (thickness <= 0),
- "zero or negative thickness",
- "Error",
+ thickness, active & (thickness <= 0), "zero or negative thickness", "Error"
)
thin_cells = (thickness < chk.thin_cell_threshold) & (thickness > 0)
chk.values(
@@ -808,21 +782,15 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
# dataset 3 -- delr
if model.verbose:
print(" loading delr...")
- delr = Util2d.load(
- f, model, (ncol,), np.float32, "delr", ext_unit_dict
- )
+ delr = Util2d.load(f, model, (ncol,), np.float32, "delr", ext_unit_dict)
# dataset 4 -- delc
if model.verbose:
print(" loading delc...")
- delc = Util2d.load(
- f, model, (nrow,), np.float32, "delc", ext_unit_dict
- )
+ delc = Util2d.load(f, model, (nrow,), np.float32, "delc", ext_unit_dict)
# dataset 5 -- top
if model.verbose:
print(" loading top...")
- top = Util2d.load(
- f, model, (nrow, ncol), np.float32, "top", ext_unit_dict
- )
+ top = Util2d.load(f, model, (nrow, ncol), np.float32, "top", ext_unit_dict)
# dataset 6 -- botm
ncbd = laycbd.sum()
if model.verbose:
@@ -897,11 +865,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
filenames=filenames,
)
if check:
- dis.check(
- f=f"{dis.name[0]}.chk",
- verbose=dis.parent.verbose,
- level=0,
- )
+ dis.check(f=f"{dis.name[0]}.chk", verbose=dis.parent.verbose, level=0)
# return dis object instance
return dis
diff --git a/flopy/modflow/mfdrn.py b/flopy/modflow/mfdrn.py
index 1639457340..0dbaca6917 100644
--- a/flopy/modflow/mfdrn.py
+++ b/flopy/modflow/mfdrn.py
@@ -92,8 +92,8 @@ class ModflowDrn(Package):
Notes
-----
Parameters are not supported in FloPy.
- If "RETURNFLOW" in passed in options, the drain return package (DRT) activated, which expects
- a different (longer) dtype for stress_period_data
+ If "RETURNFLOW" in passed in options, the drain return package (DRT)
+ activated, which expects a different (longer) dtype for stress_period_data
Examples
--------
@@ -224,11 +224,7 @@ def write_file(self, check=True):
"""
if check: # allows turning off package checks when writing files at model level
- self.check(
- f=f"{self.name[0]}.chk",
- verbose=self.parent.verbose,
- level=1,
- )
+ self.check(f=f"{self.name[0]}.chk", verbose=self.parent.verbose, level=1)
f_drn = open(self.fn_path, "w")
f_drn.write(f"{self.heading}\n")
line = f"{self.stress_period_data.mxact:10d}{self.ipakcb:10d}"
@@ -251,9 +247,7 @@ def add_record(self, kper, index, values):
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True, is_drt=False):
# get an empty recarray that corresponds to dtype
- dtype = ModflowDrn.get_default_dtype(
- structured=structured, is_drt=is_drt
- )
+ dtype = ModflowDrn.get_default_dtype(structured=structured, is_drt=is_drt)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
return create_empty_recarray(ncells, dtype, default_value=-1.0e10)
diff --git a/flopy/modflow/mfdrt.py b/flopy/modflow/mfdrt.py
index c4988afefa..0bc3b39d27 100644
--- a/flopy/modflow/mfdrt.py
+++ b/flopy/modflow/mfdrt.py
@@ -98,8 +98,8 @@ class ModflowDrt(Package):
>>> import flopy
>>> ml = flopy.modflow.Modflow()
- >>> lrcec = {0:[2, 3, 4, 10., 100., 1 ,1 ,1, 1.0]} #this drain will be applied to all
- >>> #stress periods
+ >>> # this drain will be applied to all stress periods
+ >>> lrcec = {0:[2, 3, 4, 10., 100., 1 ,1 ,1, 1.0]}
>>> drt = flopy.modflow.ModflowDrt(ml, stress_period_data=lrcec)
"""
@@ -153,9 +153,7 @@ def __init__(
if dtype is not None:
self.dtype = dtype
else:
- self.dtype = self.get_default_dtype(
- structured=self.parent.structured
- )
+ self.dtype = self.get_default_dtype(structured=self.parent.structured)
self.stress_period_data = MfList(self, stress_period_data)
self.parent.add_package(self)
@@ -216,11 +214,7 @@ def write_file(self, check=True):
"""
if check: # allows turning off package checks when writing files at model level
- self.check(
- f=f"{self.name[0]}.chk",
- verbose=self.parent.verbose,
- level=1,
- )
+ self.check(f=f"{self.name[0]}.chk", verbose=self.parent.verbose, level=1)
f_drn = open(self.fn_path, "w")
f_drn.write(f"{self.heading}\n")
line = f"{self.stress_period_data.mxact:10d}{self.ipakcb:10d}{0:10d}{0:10d}"
diff --git a/flopy/modflow/mfevt.py b/flopy/modflow/mfevt.py
index f3e92f3f0a..2398d9686b 100644
--- a/flopy/modflow/mfevt.py
+++ b/flopy/modflow/mfevt.py
@@ -132,18 +132,10 @@ def __init__(
exdp_u2d_shape = get_pak_vals_shape(model, exdp)
ievt_u2d_shape = get_pak_vals_shape(model, ievt)
- self.surf = Transient2d(
- model, surf_u2d_shape, np.float32, surf, name="surf"
- )
- self.evtr = Transient2d(
- model, evtr_u2d_shape, np.float32, evtr, name="evtr"
- )
- self.exdp = Transient2d(
- model, exdp_u2d_shape, np.float32, exdp, name="exdp"
- )
- self.ievt = Transient2d(
- model, ievt_u2d_shape, np.int32, ievt, name="ievt"
- )
+ self.surf = Transient2d(model, surf_u2d_shape, np.float32, surf, name="surf")
+ self.evtr = Transient2d(model, evtr_u2d_shape, np.float32, evtr, name="evtr")
+ self.exdp = Transient2d(model, exdp_u2d_shape, np.float32, exdp, name="exdp")
+ self.ievt = Transient2d(model, ievt_u2d_shape, np.int32, ievt, name="ievt")
self.np = 0
self.parent.add_package(self)
@@ -182,18 +174,11 @@ def write_file(self, f=None):
for kper, u2d in self.ievt.transient_2ds.items():
ievt[kper] = u2d.array + 1
ievt = Transient2d(
- self.parent,
- self.ievt.shape,
- self.ievt.dtype,
- ievt,
- self.ievt.name,
+ self.parent, self.ievt.shape, self.ievt.dtype, ievt, self.ievt.name
)
if not self.parent.structured:
mxndevt = np.max(
- [
- u2d.array.size
- for kper, u2d in self.ievt.transient_2ds.items()
- ]
+ [u2d.array.size for kper, u2d in self.ievt.transient_2ds.items()]
)
f_evt.write(f"{mxndevt:10d}\n")
@@ -274,9 +259,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
npar = int(raw[1])
if npar > 0:
if model.verbose:
- print(
- " Parameters detected. Number of parameters = ", npar
- )
+ print(" Parameters detected. Number of parameters = ", npar)
line = f.readline()
# Dataset 2
t = line.strip().split()
@@ -327,25 +310,16 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
if insurf >= 0:
if model.verbose:
print(f" loading surf stress period {iper + 1:3d}...")
- t = Util2d.load(
- f, model, u2d_shape, np.float32, "surf", ext_unit_dict
- )
+ t = Util2d.load(f, model, u2d_shape, np.float32, "surf", ext_unit_dict)
current_surf = t
surf[iper] = current_surf
if inevtr >= 0:
if npar == 0:
if model.verbose:
- print(
- f" loading evtr stress period {iper + 1:3d}..."
- )
+ print(f" loading evtr stress period {iper + 1:3d}...")
t = Util2d.load(
- f,
- model,
- u2d_shape,
- np.float32,
- "evtr",
- ext_unit_dict,
+ f, model, u2d_shape, np.float32, "evtr", ext_unit_dict
)
else:
parm_dict = {}
@@ -366,26 +340,20 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
except:
iname = "static"
parm_dict[pname] = iname
- t = mfparbc.parameter_bcfill(
- model, u2d_shape, parm_dict, pak_parms
- )
+ t = mfparbc.parameter_bcfill(model, u2d_shape, parm_dict, pak_parms)
current_evtr = t
evtr[iper] = current_evtr
if inexdp >= 0:
if model.verbose:
print(f" loading exdp stress period {iper + 1:3d}...")
- t = Util2d.load(
- f, model, u2d_shape, np.float32, "exdp", ext_unit_dict
- )
+ t = Util2d.load(f, model, u2d_shape, np.float32, "exdp", ext_unit_dict)
current_exdp = t
exdp[iper] = current_exdp
if nevtop == 2:
if inievt >= 0:
if model.verbose:
- print(
- f" loading ievt stress period {iper + 1:3d}..."
- )
+ print(f" loading ievt stress period {iper + 1:3d}...")
t = Util2d.load(
f, model, u2d_shape, np.int32, "ievt", ext_unit_dict
)
@@ -419,9 +387,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowEvt._ftype()
)
if ipakcb > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
# set args for unitnumber and filenames
diff --git a/flopy/modflow/mffhb.py b/flopy/modflow/mffhb.py
index 7e68a08be4..259caa8ba7 100644
--- a/flopy/modflow/mffhb.py
+++ b/flopy/modflow/mffhb.py
@@ -237,7 +237,7 @@ def __init__(
# perform some simple verification
if len(self.bdtime) != self.nbdtim:
raise ValueError(
- "bdtime has {} entries but requires " "{} entries.".format(
+ "bdtime has {} entries but requires {} entries.".format(
len(self.bdtime), self.nbdtim
)
)
@@ -250,7 +250,7 @@ def __init__(
if self.ds5.shape[0] != self.nflw:
raise ValueError(
- "dataset 5 has {} rows but requires " "{} rows.".format(
+ "dataset 5 has {} rows but requires {} rows.".format(
self.ds5.shape[0], self.nflw
)
)
@@ -261,8 +261,9 @@ def __init__(
nc += 2
if len(self.ds5.dtype.names) != nc:
raise ValueError(
- "dataset 5 has {} columns but requires "
- "{} columns.".format(len(self.ds5.dtype.names), nc)
+ "dataset 5 has {} columns but requires {} columns.".format(
+ len(self.ds5.dtype.names), nc
+ )
)
if self.nhed > 0:
@@ -272,7 +273,7 @@ def __init__(
)
if self.ds7.shape[0] != self.nhed:
raise ValueError(
- "dataset 7 has {} rows but requires " "{} rows.".format(
+ "dataset 7 has {} rows but requires {} rows.".format(
self.ds7.shape[0], self.nhed
)
)
@@ -283,8 +284,9 @@ def __init__(
nc += 2
if len(self.ds7.dtype.names) != nc:
raise ValueError(
- "dataset 7 has {} columns but requires "
- "{} columns.".format(len(self.ds7.dtype.names), nc)
+ "dataset 7 has {} columns but requires {} columns.".format(
+ len(self.ds7.dtype.names), nc
+ )
)
self.parent.add_package(self)
@@ -570,10 +572,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
for naux in range(nfhbx1):
if model.verbose:
print(f"loading fhb dataset 6a - aux {naux + 1}")
- print(
- "dataset 6a will not be preserved in "
- "the created fhb object."
- )
+ print("dataset 6a will not be preserved in the created fhb object.")
# Dataset 6a IFHBUN CNSTM IFHBPT
line = f.readline()
raw = line.strip().split()
@@ -589,10 +588,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
if model.verbose:
print(f"loading fhb dataset 6b - aux {naux + 1}")
- print(
- "dataset 6b will not be preserved in "
- "the created fhb object."
- )
+ print("dataset 6b will not be preserved in the created fhb object.")
current = np.recarray(nflw, dtype=dtype)
for n in range(nflw):
ds6b = read1d(f, np.zeros((nbdtim,)))
@@ -647,10 +643,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
for naux in range(nfhbx1):
if model.verbose:
print(f"loading fhb dataset 8a - aux {naux + 1}")
- print(
- "dataset 8a will not be preserved in "
- "the created fhb object."
- )
+ print("dataset 8a will not be preserved in the created fhb object.")
# Dataset 6a IFHBUN CNSTM IFHBPT
line = f.readline()
raw = line.strip().split()
@@ -667,10 +660,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
if model.verbose:
print(f"loading fhb dataset 8b - aux {naux + 1}")
- print(
- "dataset 8b will not be preserved in "
- "the created fhb object."
- )
+ print("dataset 8b will not be preserved in the created fhb object.")
current = np.recarray(nflw, dtype=dtype)
for n in range(nhed):
ds8b = read1d(f, np.zeros((nbdtim,)))
@@ -688,9 +678,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowFhb._ftype()
)
if ipakcb > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
# auxiliary data are not passed to load instantiation
diff --git a/flopy/modflow/mfflwob.py b/flopy/modflow/mfflwob.py
index 1513ed3f00..fc4b2b0bb9 100644
--- a/flopy/modflow/mfflwob.py
+++ b/flopy/modflow/mfflwob.py
@@ -231,18 +231,10 @@ def __init__(
self.factor = factor
# -create empty arrays of the correct size
- self.layer = np.zeros(
- (self.nqfb, max(np.abs(self.nqclfb))), dtype="int32"
- )
- self.row = np.zeros(
- (self.nqfb, max(np.abs(self.nqclfb))), dtype="int32"
- )
- self.column = np.zeros(
- (self.nqfb, max(np.abs(self.nqclfb))), dtype="int32"
- )
- self.factor = np.zeros(
- (self.nqfb, max(np.abs(self.nqclfb))), dtype="float32"
- )
+ self.layer = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), dtype="int32")
+ self.row = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), dtype="int32")
+ self.column = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), dtype="int32")
+ self.factor = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), dtype="float32")
self.nqobfb = np.zeros((self.nqfb), dtype="int32")
self.nqclfb = np.zeros((self.nqfb), dtype="int32")
self.irefsp = np.zeros((self.nqtfb), dtype="int32")
@@ -503,9 +495,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
ext_unit_dict, filetype=ftype.upper()
)
if iufbobsv > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=iufbobsv
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=iufbobsv)
model.add_pop_key_list(iufbobsv)
# create ModflowFlwob object instance
diff --git a/flopy/modflow/mfgage.py b/flopy/modflow/mfgage.py
index 1b7c4b76d0..588e6dbd1f 100644
--- a/flopy/modflow/mfgage.py
+++ b/flopy/modflow/mfgage.py
@@ -131,9 +131,7 @@ def __init__(
# convert gage_data to a recarray, if necessary
if isinstance(gage_data, np.ndarray):
if not gage_data.dtype == dtype:
- gage_data = np.rec.fromarrays(
- gage_data.transpose(), dtype=dtype
- )
+ gage_data = np.rec.fromarrays(gage_data.transpose(), dtype=dtype)
elif isinstance(gage_data, pd.DataFrame):
gage_data = gage_data.to_records(index=False)
elif isinstance(gage_data, list):
@@ -159,8 +157,7 @@ def __init__(
gage_data = d
else:
raise Exception(
- "gage_data must be a numpy record array, numpy array "
- "or a list"
+ "gage_data must be a numpy record array, numpy array or a list"
)
# add gage output files to model
@@ -347,9 +344,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
for key, value in ext_unit_dict.items():
if key == abs(iu):
model.add_pop_key_list(abs(iu))
- relpth = os.path.relpath(
- value.filename, model.model_ws
- )
+ relpth = os.path.relpath(value.filename, model.model_ws)
files.append(relpth)
break
diff --git a/flopy/modflow/mfghb.py b/flopy/modflow/mfghb.py
index 33b9e99fd7..75108f977c 100644
--- a/flopy/modflow/mfghb.py
+++ b/flopy/modflow/mfghb.py
@@ -148,9 +148,7 @@ def __init__(
if dtype is not None:
self.dtype = dtype
else:
- self.dtype = self.get_default_dtype(
- structured=self.parent.structured
- )
+ self.dtype = self.get_default_dtype(structured=self.parent.structured)
self.stress_period_data = MfList(self, stress_period_data)
def _ncells(self):
@@ -180,11 +178,7 @@ def write_file(self, check=True):
"""
if check: # allows turning off package checks when writing files at model level
- self.check(
- f=f"{self.name[0]}.chk",
- verbose=self.parent.verbose,
- level=1,
- )
+ self.check(f=f"{self.name[0]}.chk", verbose=self.parent.verbose, level=1)
f_ghb = open(self.fn_path, "w")
f_ghb.write(f"{self.heading}\n")
f_ghb.write(f"{self.stress_period_data.mxact:10d}{self.ipakcb:10d}")
diff --git a/flopy/modflow/mfgmg.py b/flopy/modflow/mfgmg.py
index 9987e42f40..14fe8796ad 100644
--- a/flopy/modflow/mfgmg.py
+++ b/flopy/modflow/mfgmg.py
@@ -272,13 +272,9 @@ def write_file(self):
f_gmg = open(self.fn_path, "w")
f_gmg.write(f"{self.heading}\n")
# dataset 0
- f_gmg.write(
- f"{self.rclose} {self.iiter} {self.hclose} {self.mxiter}\n"
- )
+ f_gmg.write(f"{self.rclose} {self.iiter} {self.hclose} {self.mxiter}\n")
# dataset 1
- f_gmg.write(
- f"{self.damp} {self.iadamp} {self.ioutgmg} {self.iunitmhc}\n"
- )
+ f_gmg.write(f"{self.damp} {self.iadamp} {self.ioutgmg} {self.iunitmhc}\n")
# dataset 2
f_gmg.write(f"{self.ism} {self.isc} ")
if self.iadamp == 2:
@@ -377,9 +373,7 @@ def load(cls, f, model, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowGmg._ftype()
)
if iunitmhc > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=iunitmhc
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=iunitmhc)
model.add_pop_key_list(iunitmhc)
return cls(
diff --git a/flopy/modflow/mfhfb.py b/flopy/modflow/mfhfb.py
index 6d9479c132..e018f54d4b 100644
--- a/flopy/modflow/mfhfb.py
+++ b/flopy/modflow/mfhfb.py
@@ -203,9 +203,7 @@ def write_file(self):
)
)
else:
- f_hfb.write(
- "{:10d}{:10d}{:13.6g}\n".format(a[0] + 1, a[1] + 1, a[2])
- )
+ f_hfb.write("{:10d}{:10d}{:13.6g}\n".format(a[0] + 1, a[1] + 1, a[2]))
f_hfb.write(f"{self.nacthfb:10d}")
f_hfb.close()
@@ -385,9 +383,7 @@ def load(cls, f, model, ext_unit_dict=None):
# fill current parameter data (par_current)
for ibnd, t in enumerate(data_dict):
t = tuple(t)
- par_current[ibnd] = tuple(
- t[: len(par_current.dtype.names)]
- )
+ par_current[ibnd] = tuple(t[: len(par_current.dtype.names)])
# convert indices to zero-based
if structured:
diff --git a/flopy/modflow/mfhob.py b/flopy/modflow/mfhob.py
index 261e15a0fa..c2761d72d4 100644
--- a/flopy/modflow/mfhob.py
+++ b/flopy/modflow/mfhob.py
@@ -614,9 +614,7 @@ def __init__(
raise ValueError(
"sum of dataset 4 proportions must equal 1.0 - "
"sum of dataset 4 proportions = {tot} for "
- "observation name {obsname}.".format(
- tot=tot, obsname=self.obsname
- )
+ "observation name {obsname}.".format(tot=tot, obsname=self.obsname)
)
# convert passed time_series_data to a numpy array
@@ -652,8 +650,7 @@ def __init__(
names = [names]
elif not isinstance(names, list):
raise ValueError(
- "HeadObservation names must be a "
- "string or a list of strings"
+ "HeadObservation names must be a string or a list of strings"
)
if len(names) < self.nobs:
raise ValueError(
diff --git a/flopy/modflow/mfhyd.py b/flopy/modflow/mfhyd.py
index 29af549886..56e9da4c94 100644
--- a/flopy/modflow/mfhyd.py
+++ b/flopy/modflow/mfhyd.py
@@ -333,9 +333,7 @@ def load(cls, f, model, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowHyd._ftype()
)
if ihydun > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ihydun
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ihydun)
model.add_pop_key_list(ihydun)
# return hyd instance
diff --git a/flopy/modflow/mflak.py b/flopy/modflow/mflak.py
index 1491119e7f..e6bb4d0b70 100644
--- a/flopy/modflow/mflak.py
+++ b/flopy/modflow/mflak.py
@@ -376,7 +376,7 @@ def __init__(
if self.dis.steady[0]:
if stage_range.shape != (nlakes, 2):
raise Exception(
- "stages shape should be ({},2) but is only " "{}.".format(
+ "stages shape should be ({},2) but is only {}.".format(
nlakes, stage_range.shape
)
)
@@ -514,15 +514,9 @@ def write_file(self):
if self.tabdata:
ipos.append(5)
t.append(self.iunit_tab[n])
- f.write(
- write_fixed_var(
- t, ipos=ipos, free=self.parent.free_format_input
- )
- )
+ f.write(write_fixed_var(t, ipos=ipos, free=self.parent.free_format_input))
- ds8_keys = (
- list(self.sill_data.keys()) if self.sill_data is not None else []
- )
+ ds8_keys = list(self.sill_data.keys()) if self.sill_data is not None else []
ds9_keys = list(self.flux_data.keys())
nper = self.dis.steady.shape[0]
for kper in range(nper):
@@ -541,9 +535,7 @@ def write_file(self):
t = [itmp, itmp2, tmplwrt]
comment = f"Stress period {kper + 1}"
f.write(
- write_fixed_var(
- t, free=self.parent.free_format_input, comment=comment
- )
+ write_fixed_var(t, free=self.parent.free_format_input, comment=comment)
)
if itmp > 0:
@@ -730,9 +722,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
lwrt = []
for iper in range(nper):
if model.verbose:
- print(
- f" reading lak dataset 4 - for stress period {iper + 1}"
- )
+ print(f" reading lak dataset 4 - for stress period {iper + 1}")
line = f.readline().rstrip()
if model.array_free_format:
t = line.split()
@@ -743,34 +733,23 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
if itmp > 0:
if model.verbose:
- print(
- f" reading lak dataset 5 - for stress period {iper + 1}"
- )
+ print(f" reading lak dataset 5 - for stress period {iper + 1}")
name = f"LKARR_StressPeriod_{iper}"
lakarr = Util3d.load(
f, model, (nlay, nrow, ncol), np.int32, name, ext_unit_dict
)
if model.verbose:
- print(
- f" reading lak dataset 6 - for stress period {iper + 1}"
- )
+ print(f" reading lak dataset 6 - for stress period {iper + 1}")
name = f"BDLKNC_StressPeriod_{iper}"
bdlknc = Util3d.load(
- f,
- model,
- (nlay, nrow, ncol),
- np.float32,
- name,
- ext_unit_dict,
+ f, model, (nlay, nrow, ncol), np.float32, name, ext_unit_dict
)
lake_loc[iper] = lakarr
lake_lknc[iper] = bdlknc
if model.verbose:
- print(
- f" reading lak dataset 7 - for stress period {iper + 1}"
- )
+ print(f" reading lak dataset 7 - for stress period {iper + 1}")
line = f.readline().rstrip()
t = line.split()
nslms = int(t[0])
@@ -803,9 +782,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
sill_data[iper] = ds8
if itmp1 >= 0:
if model.verbose:
- print(
- f" reading lak dataset 9 - for stress period {iper + 1}"
- )
+ print(f" reading lak dataset 9 - for stress period {iper + 1}")
ds9 = {}
for n in range(nlakes):
line = f.readline().rstrip()
@@ -853,9 +830,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowLak._ftype()
)
if ipakcb > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
ipos = 2
diff --git a/flopy/modflow/mflpf.py b/flopy/modflow/mflpf.py
index 8dd6fd33b5..7473cdcfc4 100644
--- a/flopy/modflow/mflpf.py
+++ b/flopy/modflow/mflpf.py
@@ -237,9 +237,7 @@ def __init__(
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
# item 1
- self.hdry = (
- hdry # Head in cells that are converted to dry during a simulation
- )
+ self.hdry = hdry # Head in cells that are converted to dry during a simulation
self.nplpf = 0 # number of LPF parameters
self.ikcflag = 0 # 1 and -1 are not supported.
self.laytyp = Util2d(model, (nlay,), np.int32, laytyp, name="laytyp")
@@ -351,11 +349,7 @@ def write_file(self, check=True, f=None):
"""
# allows turning off package checks when writing files at model level
if check:
- self.check(
- f=f"{self.name[0]}.chk",
- verbose=self.parent.verbose,
- level=1,
- )
+ self.check(f=f"{self.name[0]}.chk", verbose=self.parent.verbose, level=1)
# get model information
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
@@ -374,11 +368,7 @@ def write_file(self, check=True, f=None):
if self.parent.version == "mfusg" and not self.parent.structured:
f.write(
"{:10d}{:10.6G}{:10d}{:10d} {:s}\n".format(
- self.ipakcb,
- self.hdry,
- self.nplpf,
- self.ikcflag,
- self.options,
+ self.ipakcb, self.hdry, self.nplpf, self.ikcflag, self.options
)
)
else:
@@ -568,9 +558,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if model.verbose:
print(f" loading hk layer {k + 1:3d}...")
if "hk" not in par_types:
- t = Util2d.load(
- f, model, (nrow, ncol), np.float32, "hk", ext_unit_dict
- )
+ t = Util2d.load(f, model, (nrow, ncol), np.float32, "hk", ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(
@@ -584,12 +572,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
print(f" loading hani layer {k + 1:3d}...")
if "hani" not in par_types:
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- "hani",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, "hani", ext_unit_dict
)
else:
line = f.readline()
@@ -605,9 +588,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if layvka[k] != 0:
key = "vani"
if "vk" not in par_types and "vani" not in par_types:
- t = Util2d.load(
- f, model, (nrow, ncol), np.float32, key, ext_unit_dict
- )
+ t = Util2d.load(f, model, (nrow, ncol), np.float32, key, ext_unit_dict)
else:
line = f.readline()
key = "vk"
@@ -640,12 +621,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
print(f" loading sy layer {k + 1:3d}...")
if "sy" not in par_types:
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- "sy",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, "sy", ext_unit_dict
)
else:
line = f.readline()
@@ -660,12 +636,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
print(f" loading vkcb layer {k + 1:3d}...")
if "vkcb" not in par_types:
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- "vkcb",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, "vkcb", ext_unit_dict
)
else:
line = f.readline()
@@ -694,9 +665,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
ext_unit_dict, filetype=ModflowLpf._ftype()
)
if ipakcb > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
# create instance of lpf class
@@ -729,11 +698,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
filenames=filenames,
)
if check:
- lpf.check(
- f=f"{lpf.name[0]}.chk",
- verbose=lpf.parent.verbose,
- level=0,
- )
+ lpf.check(f=f"{lpf.name[0]}.chk", verbose=lpf.parent.verbose, level=0)
return lpf
@staticmethod
diff --git a/flopy/modflow/mfmlt.py b/flopy/modflow/mfmlt.py
index 69413dffc1..b2fce78afc 100644
--- a/flopy/modflow/mfmlt.py
+++ b/flopy/modflow/mfmlt.py
@@ -204,10 +204,7 @@ def load(cls, f, model, nrow=None, ncol=None, ext_unit_dict=None):
)
return cls(
- model,
- mult_dict=mult_dict,
- unitnumber=unitnumber,
- filenames=filenames,
+ model, mult_dict=mult_dict, unitnumber=unitnumber, filenames=filenames
)
@staticmethod
diff --git a/flopy/modflow/mfmnw1.py b/flopy/modflow/mfmnw1.py
index d18c883b85..f16e901916 100644
--- a/flopy/modflow/mfmnw1.py
+++ b/flopy/modflow/mfmnw1.py
@@ -107,42 +107,38 @@ def __init__(
# call base package constructor
super().__init__(
- model,
- extension,
- self._ftype(),
- unitnumber,
- filenames=filenames[0],
+ model, extension, self._ftype(), unitnumber, filenames=filenames[0]
)
self.url = "mnw.html"
self.nper = self.parent.nrow_ncol_nlay_nper[-1]
self._generate_heading()
- self.mxmnw = (
- mxmnw # -maximum number of multi-node wells to be simulated
- )
+ self.mxmnw = mxmnw # -maximum number of multi-node wells to be simulated
self.iwelpt = iwelpt # -verbosity flag
- self.nomoiter = nomoiter # -integer indicating the number of iterations for which flow in MNW wells is calculated
- self.kspref = kspref # -alphanumeric key indicating which set of water levels are to be used as reference values for calculating drawdown
- self.losstype = (
- losstype # -string indicating head loss type for each well
- )
- self.wel1_bynode_qsum = wel1_bynode_qsum # -nested list containing file names, unit numbers, and ALLTIME flag for auxiliary output, e.g. [['test.ByNode',92,'ALLTIME']]
+ # integer indicating the number of iterations for which flow in MNW wells
+ # is calculated
+ self.nomoiter = nomoiter
+ # alphanumeric key indicating which set of water levels are to be used as
+ # reference values for calculating drawdown
+ self.kspref = kspref
+ self.losstype = losstype # -string indicating head loss type for each well
+ # nested list containing file names, unit numbers, and ALLTIME flag for
+ # auxiliary output, e.g. [['test.ByNode',92,'ALLTIME']]
+ self.wel1_bynode_qsum = wel1_bynode_qsum
if dtype is not None:
self.dtype = dtype
else:
- self.dtype = self.get_default_dtype(
- structured=self.parent.structured
- )
+ self.dtype = self.get_default_dtype(structured=self.parent.structured)
self.stress_period_data = MfList(self, stress_period_data)
- self.mnwname = mnwname # -string prefix name of file for outputting time series data from MNW1
+ # string prefix name of file for outputting time series data from MNW1
+ self.mnwname = mnwname
# -input format checks:
lossTypes = ["skin", "linear", "nonlinear"]
- assert self.losstype.lower() in lossTypes, (
- "LOSSTYPE (%s) must be one of the following: skin, linear, nonlinear"
- % (self.losstype)
- )
+ assert (
+ self.losstype.lower() in lossTypes
+ ), f"LOSSTYPE ({self.losstype}) must be one of the following: {lossTypes}"
self.parent.add_package(self)
@staticmethod
@@ -187,9 +183,8 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
structured = model.structured
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
- nper = (
- 1 if nper == 0 else nper
- ) # otherwise iterations from 0, nper won't run
+ nper = 1 if nper == 0 else nper
+ # otherwise iterations from 0, nper won't run
openfile = not hasattr(f, "read")
if openfile:
@@ -272,22 +267,16 @@ def write_file(self):
f = open(self.fn_path, "w")
# -write header
- f.write("%s\n" % self.heading)
+ f.write(f"{self.heading}\n")
# -Section 1 - MXMNW ipakcb IWELPT NOMOITER REF:kspref
f.write(
"%10i%10i%10i%10i REF = %s\n"
- % (
- self.mxmnw,
- self.ipakcb,
- self.iwelpt,
- self.nomoiter,
- self.kspref,
- )
+ % (self.mxmnw, self.ipakcb, self.iwelpt, self.nomoiter, self.kspref)
)
# -Section 2 - LOSSTYPE {PLossMNW}
- f.write("%s\n" % (self.losstype))
+ f.write(f"{self.losstype}\n")
if self.wel1_bynode_qsum is not None:
# -Section 3a - {FILE:filename WEL1:iunw1}
@@ -299,9 +288,7 @@ def write_file(self):
for each in self.wel1_bynode_qsum:
if each[0].split(".")[1].lower() == "bynode":
if len(each) == 2:
- f.write(
- "FILE:%s BYNODE:%-10i\n" % (each[0], int(each[1]))
- )
+ f.write("FILE:%s BYNODE:%-10i\n" % (each[0], int(each[1])))
elif len(each) == 3:
f.write(
"FILE:%s BYNODE:%-10i %s\n"
@@ -312,22 +299,20 @@ def write_file(self):
for each in self.wel1_bynode_qsum:
if each[0].split(".")[1].lower() == "qsum":
if len(each) == 2:
- f.write(
- "FILE:%s QSUM:%-10i\n" % (each[0], int(each[1]))
- )
+ f.write("FILE:%s QSUM:%-10i\n" % (each[0], int(each[1])))
elif len(each) == 3:
f.write(
- "FILE:%s QSUM:%-10i %s\n"
- % (each[0], int(each[1]), each[2])
+ "FILE:%s QSUM:%-10i %s\n" % (each[0], int(each[1]), each[2])
)
spd = self.stress_period_data.drop("mnw_no")
- # force write_transient to keep the list arrays internal because MNW1 doesn't allow open/close
+ # force write_transient to keep the list arrays internal because MNW1
+ # doesn't allow open/close
spd.write_transient(f, forceInternal=True)
# -Un-numbered section PREFIX:MNWNAME
if self.mnwname:
- f.write("PREFIX:%s\n" % (self.mnwname))
+ f.write(f"PREFIX:{self.mnwname}\n")
f.close()
@@ -385,9 +370,7 @@ def getitem(line, txt):
return items
-def _parse_5(
- f, itmp, qfrcmn_default=None, qfrcmx_default=None, qcut_default=""
-):
+def _parse_5(f, itmp, qfrcmn_default=None, qfrcmx_default=None, qcut_default=""):
data = []
mnw_no = 0
mn = False
@@ -453,11 +436,11 @@ def _parse_5(
qfrcmn = 0.0
qfrcmx = 0.0
if "qcut" in linetxt:
- txt = [t for t in line if "qcut" in t][0]
+ txt = next(t for t in line if "qcut" in t)
qcut = txt
line.remove(txt)
elif "%cut" in linetxt:
- txt = [t for t in line if "%cut" in t][0]
+ txt = next(t for t in line if "%cut" in t)
qcut = txt
line.remove(txt)
if "qcut" in linetxt or "%cut" in linetxt:
diff --git a/flopy/modflow/mfmnw2.py b/flopy/modflow/mfmnw2.py
index 1ba0214968..c65bc4d0c8 100644
--- a/flopy/modflow/mfmnw2.py
+++ b/flopy/modflow/mfmnw2.py
@@ -168,8 +168,8 @@ class Mnw:
When writing non-dataset 2d variables to MNW2 input, the first value
for the well will be used.
- Other variables (e.g. hlim) can be entered here as
- constant for all stress periods, or by stress period below in stress_period_data.
+ Other variables (e.g. hlim) can be entered here as constant for all
+ stress periods, or by stress period below in stress_period_data.
See MNW2 input instructions for more details.
Columns are:
@@ -492,9 +492,7 @@ def make_node_data(self):
"""
nnodes = self.nnodes
- node_data = ModflowMnw2.get_empty_node_data(
- np.abs(nnodes), aux_names=self.aux
- )
+ node_data = ModflowMnw2.get_empty_node_data(np.abs(nnodes), aux_names=self.aux)
names = Mnw.get_item2_names(self)
for n in names:
@@ -563,8 +561,7 @@ def get_default_spd_dtype(structured=True):
)
else:
raise NotImplementedError(
- "Mnw2: get_default_spd_dtype not implemented for "
- "unstructured grids"
+ "Mnw2: get_default_spd_dtype not implemented for unstructured grids"
)
@staticmethod
@@ -609,14 +606,7 @@ def get_item2_names(mnw2obj=None, node_data=None):
names += ["k"]
if nnodes < 0:
names += ["ztop", "zbotm"]
- names += [
- "wellid",
- "losstype",
- "pumploc",
- "qlimit",
- "ppflag",
- "pumpcap",
- ]
+ names += ["wellid", "losstype", "pumploc", "qlimit", "ppflag", "pumpcap"]
if losstype.lower() == "thiem":
names += ["rw"]
elif losstype.lower() == "skin":
@@ -768,11 +758,7 @@ def _write_2(self, f_mnw, float_format=" {:15.7E}", indent=12):
fmt = indent + "{} {:.0f} {:.0f} {:.0f} {:.0f}\n"
f_mnw.write(
fmt.format(
- self.losstype,
- self.pumploc,
- self.qlimit,
- self.ppflag,
- self.pumpcap,
+ self.losstype, self.pumploc, self.qlimit, self.ppflag, self.pumpcap
)
)
@@ -821,9 +807,7 @@ def _getloc(n):
def _getloc(n):
"""Output for dataset 2d2."""
- fmt = (
- indent + "{0} {0} ".format(float_format) + "{:.0f} {:.0f}"
- )
+ fmt = indent + "{0} {0} ".format(float_format) + "{:.0f} {:.0f}"
return fmt.format(
self.node_data.ztop[n],
self.node_data.zbotm[n],
@@ -863,9 +847,7 @@ def _getloc(n):
# dataset 2g
if self.pumpcap > 0:
fmt = indent + "{0} {0} {0} {0}\n".format(float_format)
- f_mnw.write(
- fmt.format(self.hlift, self.liftq0, self.liftqmax, self.hwtol)
- )
+ f_mnw.write(fmt.format(self.hlift, self.liftq0, self.liftqmax, self.hwtol))
# dataset 2h
if self.pumpcap > 0:
fmt = indent + "{0} {0}\n".format(float_format)
@@ -1021,9 +1003,8 @@ def __init__(
self.url = "mnw2.html"
self.nper = self.parent.nrow_ncol_nlay_nper[-1]
- self.nper = (
- 1 if self.nper == 0 else self.nper
- ) # otherwise iterations from 0, nper won't run
+ self.nper = 1 if self.nper == 0 else self.nper
+ # otherwise iterations from 0, nper won't run
self.structured = self.parent.structured
# Dataset 0
@@ -1042,18 +1023,12 @@ def __init__(
if node_data is not None:
if isinstance(node_data, pd.DataFrame):
node_data = node_data.to_records(index=False)
- self.node_data = self.get_empty_node_data(
- len(node_data), aux_names=aux
- )
+ self.node_data = self.get_empty_node_data(len(node_data), aux_names=aux)
names = [
- n
- for n in node_data.dtype.names
- if n in self.node_data.dtype.names
+ n for n in node_data.dtype.names if n in self.node_data.dtype.names
]
for n in names:
- self.node_data[n] = node_data[
- n
- ] # recarray of Mnw properties by node
+ self.node_data[n] = node_data[n] # recarray of Mnw properties by node
self.nodtot = len(self.node_data)
self._sort_node_data()
@@ -1070,15 +1045,11 @@ def __init__(
)
if stress_period_data is not None:
stress_period_data = {
- per: sp.to_records(index=False)
- if isinstance(sp, pd.DataFrame)
- else sp
+ per: sp.to_records(index=False) if isinstance(sp, pd.DataFrame) else sp
for per, sp in stress_period_data.items()
}
for per, data in stress_period_data.items():
- spd = ModflowMnw2.get_empty_stress_period_data(
- len(data), aux_names=aux
- )
+ spd = ModflowMnw2.get_empty_stress_period_data(len(data), aux_names=aux)
names = [n for n in data.dtype.names if n in spd.dtype.names]
for n in names:
spd[n] = data[n]
@@ -1102,7 +1073,7 @@ def __init__(
if (
"k"
not in stress_period_data[
- list(stress_period_data.keys())[0]
+ next(iter(stress_period_data.keys()))
].dtype.names
):
self._add_kij_to_stress_period_data()
@@ -1155,9 +1126,7 @@ def get_empty_node_data(
dtype = ModflowMnw2.get_default_node_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
- return create_empty_recarray(
- maxnodes, dtype, default_value=default_value
- )
+ return create_empty_recarray(maxnodes, dtype, default_value=default_value)
@staticmethod
def get_default_node_dtype(structured=True):
@@ -1319,9 +1288,8 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
structured = model.structured
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
- nper = (
- 1 if nper == 0 else nper
- ) # otherwise iterations from 0, nper won't run
+ nper = 1 if nper == 0 else nper
+ # otherwise iterations from 0, nper won't run
openfile = not hasattr(f, "read")
if openfile:
@@ -1348,11 +1316,10 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
)
mnw[mnwobj.wellid] = mnwobj
# master table with all node data
- node_data = np.append(node_data, mnwobj.node_data).view(
- np.recarray
- )
+ node_data = np.append(node_data, mnwobj.node_data).view(np.recarray)
- stress_period_data = {} # stress period data table for package (flopy convention)
+ # stress period data table for package (flopy convention)
+ stress_period_data = {}
itmp = []
for per in range(0, nper):
# dataset 3
@@ -1369,24 +1336,13 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
)
hlim, qcut, qfrcmn, qfrcmx = 0, 0, 0, 0
if mnw[wellid].qlimit < 0:
- hlim, qcut, qfrcmn, qfrcmx = _parse_4b(
- get_next_line(f)
- )
+ hlim, qcut, qfrcmn, qfrcmx = _parse_4b(get_next_line(f))
# update package stress period data table
ndw = node_data[node_data.wellid == wellid]
kij = [ndw.k[0], ndw.i[0], ndw.j[0]]
current_4[i] = tuple(
kij
- + [
- wellid,
- qdes,
- capmult,
- cprime,
- hlim,
- qcut,
- qfrcmn,
- qfrcmx,
- ]
+ + [wellid, qdes, capmult, cprime, hlim, qcut, qfrcmn, qfrcmx]
+ xyz
)
# update well stress period data table
@@ -1401,9 +1357,9 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
pass
else:
# copy pumping rates from previous stress period
- mnw[wellid].stress_period_data[per] = mnw[
- wellid
- ].stress_period_data[per - 1]
+ mnw[wellid].stress_period_data[per] = mnw[wellid].stress_period_data[
+ per - 1
+ ]
itmp.append(itmp_per)
if openfile:
@@ -1480,9 +1436,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None):
if np.any(invalid_itmp):
for v in np.array(self.itmp)[invalid_itmp]:
chk._add_to_summary(
- type="Error",
- value=v,
- desc="Itmp value greater than MNWMAX",
+ type="Error", value=v, desc="Itmp value greater than MNWMAX"
)
chk.summarize()
@@ -1547,9 +1501,7 @@ def make_mnw_objects(self):
for wellid in mnws:
nd = node_data[node_data.wellid == wellid]
nnodes = Mnw.get_nnodes(nd)
- mnwspd = Mnw.get_empty_stress_period_data(
- self.nper, aux_names=self.aux
- )
+ mnwspd = Mnw.get_empty_stress_period_data(self.nper, aux_names=self.aux)
for per, itmp in enumerate(self.itmp):
inds = stress_period_data[per].wellid == wellid
if itmp > 0 and np.any(inds):
@@ -1625,10 +1577,8 @@ def make_stress_period_data(self, mnwobjs):
stress_period_data = {}
for per, itmp in enumerate(self.itmp):
if itmp > 0:
- stress_period_data[per] = (
- ModflowMnw2.get_empty_stress_period_data(
- itmp, aux_names=self.aux
- )
+ stress_period_data[per] = ModflowMnw2.get_empty_stress_period_data(
+ itmp, aux_names=self.aux
)
i = 0
for mnw in mnwobjs:
@@ -1643,9 +1593,9 @@ def make_stress_period_data(self, mnwobjs):
]
stress_period_data[per]["wellid"][i - 1] = mnw.wellid
for n in names:
- stress_period_data[per][n][i - 1] = (
- mnw.stress_period_data[n][per]
- )
+ stress_period_data[per][n][i - 1] = mnw.stress_period_data[
+ n
+ ][per]
stress_period_data[per].sort(order="wellid")
if i < itmp:
raise ItmpError(itmp, i)
@@ -1688,14 +1638,16 @@ def export(self, f, **kwargs):
for per in self.stress_period_data.data.keys():
for col in todrop:
inds = self.stress_period_data[per].wellid == wellid
- self.stress_period_data[per][col][inds] = (
- self.node_data[wellnd][col]
- )
+ self.stress_period_data[per][col][inds] = self.node_data[
+ wellnd
+ ][col]
self.node_data_MfList = self.node_data_MfList.drop(todrop)
"""
todrop = {'qfrcmx', 'qfrcmn'}
names = list(set(self.stress_period_data.dtype.names).difference(todrop))
- dtype = np.dtype([(k, d) for k, d in self.stress_period_data.dtype.descr if k not in todrop])
+ dtype = np.dtype(
+ [(k, d) for k, d in self.stress_period_data.dtype.descr if k not in todrop]
+ )
spd = {}
for k, v in self.stress_period_data.data.items():
newarr = np.array(np.zeros_like(self.stress_period_data[k][names]),
@@ -1731,9 +1683,7 @@ def _write_1(self, f_mnw):
f_mnw.write(f" aux {abc}")
f_mnw.write("\n")
- def write_file(
- self, filename=None, float_format=" {:15.7E}", use_tables=True
- ):
+ def write_file(self, filename=None, float_format=" {:15.7E}", use_tables=True):
"""
Write the package file.
@@ -1768,9 +1718,7 @@ def write_file(
# need a method that assigns attributes from table to objects!
# call make_mnw_objects?? (table is definitive then)
if use_tables:
- mnws = np.unique(
- self.node_data.wellid
- ).tolist() # preserve any order
+ mnws = np.unique(self.node_data.wellid).tolist() # preserve any order
else:
mnws = self.mnw.values()
for k in mnws:
@@ -1789,36 +1737,28 @@ def write_file(
if self.mnw[wellid].pumpcap > 0:
fmt = " " + float_format
f_mnw.write(
- fmt.format(
- *self.stress_period_data[per].capmult[n]
- )
+ fmt.format(*self.stress_period_data[per].capmult[n])
)
if qdes > 0 and self.gwt:
- f_mnw.write(
- fmt.format(*self.stress_period_data[per].cprime[n])
- )
+ f_mnw.write(fmt.format(*self.stress_period_data[per].cprime[n]))
if len(self.aux) > 0:
for var in self.aux:
fmt = " " + float_format
f_mnw.write(
- fmt.format(
- *self.stress_period_data[per][var][n]
- )
+ fmt.format(*self.stress_period_data[per][var][n])
)
f_mnw.write("\n")
if self.mnw[wellid].qlimit < 0:
- hlim, qcut = self.stress_period_data[per][
- ["hlim", "qcut"]
- ][n]
+ hlim, qcut = self.stress_period_data[per][["hlim", "qcut"]][n]
fmt = float_format + " {:.0f}"
f_mnw.write(fmt.format(hlim, qcut))
if qcut != 0:
fmt = " {} {}".format(float_format)
f_mnw.write(
fmt.format(
- *self.stress_period_data[per][
- ["qfrcmn", "qfrcmx"]
- ][n]
+ *self.stress_period_data[per][["qfrcmn", "qfrcmx"]][
+ n
+ ]
)
)
f_mnw.write("\n")
@@ -1854,9 +1794,7 @@ def _parse_1(line):
option = [] # aux names
if len(line) > 0:
option += [
- line[i]
- for i in np.arange(1, len(line))
- if "aux" in line[i - 1].lower()
+ line[i] for i in np.arange(1, len(line)) if "aux" in line[i - 1].lower()
]
return mnwmax, nodtot, ipakcb, mnwprint, option
@@ -1910,9 +1848,7 @@ def _parse_2(f):
d2dw = dict(zip(["rw", "rskin", "kskin", "B", "C", "P", "cwc"], [0] * 7))
if losstype.lower() != "none":
# update d2dw items
- d2dw.update(
- _parse_2c(get_next_line(f), losstype)
- ) # dict of values for well
+ d2dw.update(_parse_2c(get_next_line(f), losstype)) # dict of values for well
for k, v in d2dw.items():
if v > 0:
d2d[k].append(v)
@@ -2038,9 +1974,7 @@ def _parse_2(f):
)
-def _parse_2c(
- line, losstype, rw=-1, rskin=-1, kskin=-1, B=-1, C=-1, P=-1, cwc=-1
-):
+def _parse_2c(line, losstype, rw=-1, rskin=-1, kskin=-1, B=-1, C=-1, P=-1, cwc=-1):
"""
Parameters
diff --git a/flopy/modflow/mfmnwi.py b/flopy/modflow/mfmnwi.py
index a0fbdb89d0..1eb1ca8dba 100644
--- a/flopy/modflow/mfmnwi.py
+++ b/flopy/modflow/mfmnwi.py
@@ -151,20 +151,12 @@ def __init__(
self.mnwobs = mnwobs
# list of lists containing wells and related information to be
# output (length = [MNWOBS][4or5])
- self.wellid_unit_qndflag_qhbflag_concflag = (
- wellid_unit_qndflag_qhbflag_concflag
- )
+ self.wellid_unit_qndflag_qhbflag_concflag = wellid_unit_qndflag_qhbflag_concflag
# -input format checks:
- assert (
- self.wel1flag >= 0
- ), "WEL1flag must be greater than or equal to zero."
- assert (
- self.qsumflag >= 0
- ), "QSUMflag must be greater than or equal to zero."
- assert (
- self.byndflag >= 0
- ), "BYNDflag must be greater than or equal to zero."
+ assert self.wel1flag >= 0, "WEL1flag must be greater than or equal to zero."
+ assert self.qsumflag >= 0, "QSUMflag must be greater than or equal to zero."
+ assert self.byndflag >= 0, "BYNDflag must be greater than or equal to zero."
if len(self.wellid_unit_qndflag_qhbflag_concflag) != self.mnwobs:
print(
@@ -234,22 +226,14 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowMnwi._ftype()
)
if wel1flag > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=wel1flag
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=wel1flag)
if qsumflag > 0:
- iu, filenames[2] = model.get_ext_dict_attr(
- ext_unit_dict, unit=qsumflag
- )
+ iu, filenames[2] = model.get_ext_dict_attr(ext_unit_dict, unit=qsumflag)
if byndflag > 0:
- iu, filenames[3] = model.get_ext_dict_attr(
- ext_unit_dict, unit=byndflag
- )
+ iu, filenames[3] = model.get_ext_dict_attr(ext_unit_dict, unit=byndflag)
idx = 4
for unit in unique_units:
- iu, filenames[idx] = model.get_ext_dict_attr(
- ext_unit_dict, unit=unit
- )
+ iu, filenames[idx] = model.get_ext_dict_attr(ext_unit_dict, unit=unit)
idx += 1
return cls(
@@ -331,12 +315,8 @@ def write_file(self):
unit = t[1]
qndflag = t[2]
qhbflag = t[3]
- assert (
- qndflag >= 0
- ), "QNDflag must be greater than or equal to zero."
- assert (
- qhbflag >= 0
- ), "QHBflag must be greater than or equal to zero."
+ assert qndflag >= 0, "QNDflag must be greater than or equal to zero."
+ assert qhbflag >= 0, "QHBflag must be greater than or equal to zero."
line = f"{wellid:20s} "
line += f"{unit:5d} "
line += f"{qndflag:5d} "
diff --git a/flopy/modflow/mfnwt.py b/flopy/modflow/mfnwt.py
index 52b3e4ddb3..80285b8d9d 100644
--- a/flopy/modflow/mfnwt.py
+++ b/flopy/modflow/mfnwt.py
@@ -136,8 +136,8 @@ class ModflowNwt(Package):
(GMRES) is the number of iterations between restarts of the GMRES
Solver. (default is 15).
iacl : int
- (XMD) is a flag for the acceleration method: 0 is conjugate gradient, 1 is ORTHOMIN,
- 2 is Bi-CGSTAB. (default is 2).
+ (XMD) is a flag for the acceleration method: 0 is conjugate gradient,
+ 1 is ORTHOMIN, 2 is Bi-CGSTAB. (default is 2).
norder : int
(XMD) is a flag for the scheme of ordering the unknowns: 0 is original
ordering, 1 is RCM ordering, 2 is Minimum Degree ordering.
@@ -403,9 +403,7 @@ def load(cls, f, model, ext_unit_dict=None):
# dataset 0 -- header
flines = [
- line.strip()
- for line in f.readlines()
- if not line.strip().startswith("#")
+ line.strip() for line in f.readlines() if not line.strip().startswith("#")
]
if openfile:
diff --git a/flopy/modflow/mfoc.py b/flopy/modflow/mfoc.py
index b98f38ac71..e17b4be302 100644
--- a/flopy/modflow/mfoc.py
+++ b/flopy/modflow/mfoc.py
@@ -338,9 +338,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None):
if dis is None:
dis = self.parent.get_package("DISU")
if dis is None:
- chk._add_to_summary(
- "Error", package="OC", desc="DIS package not available"
- )
+ chk._add_to_summary("Error", package="OC", desc="DIS package not available")
else:
# generate possible actions expected
expected_actions = []
@@ -569,9 +567,7 @@ def reset_budgetunit(self, budgetunit=None, fname=None):
for pp in self.parent.packagelist:
if hasattr(pp, "ipakcb"):
pp.ipakcb = self.iubud
- self.parent.add_output_file(
- pp.ipakcb, fname=fname, package=pp.name
- )
+ self.parent.add_output_file(pp.ipakcb, fname=fname, package=pp.name)
return
@@ -689,9 +685,7 @@ def get_ocoutput_units(f, ext_unit_dict=None):
return ihedun, fhead, iddnun, fddn
@classmethod
- def load(
- cls, f, model, nper=None, nstp=None, nlay=None, ext_unit_dict=None
- ):
+ def load(cls, f, model, nper=None, nstp=None, nlay=None, ext_unit_dict=None):
"""
Load an existing package.
@@ -888,7 +882,8 @@ def load(
if line[0] == "#":
continue
- # added by JJS 12/12/14 to avoid error when there is a blank line in the OC file
+ # added by JJS 12/12/14 to avoid error when there is a
+ # blank line in the OC file
if lnlst == []:
continue
# end add
diff --git a/flopy/modflow/mfpar.py b/flopy/modflow/mfpar.py
index de4cea6c4a..e17bbc973f 100644
--- a/flopy/modflow/mfpar.py
+++ b/flopy/modflow/mfpar.py
@@ -284,8 +284,8 @@ def parameter_fill(model, shape, findkey, parm_dict, findlayer=None):
for lpf and upw:
- >>> data = flopy.modflow.mfpar.ModflowPar.parameter_fill(m, (nrow, ncol), 'vkcb',
- >>> .....................................................parm_dict, findlayer=1)
+ >>> data = flopy.modflow.mfpar.ModflowPar.parameter_fill(
+ ... m, (nrow, ncol), 'vkcb', parm_dict, findlayer=1)
"""
@@ -315,16 +315,12 @@ def parameter_fill(model, shape, findkey, parm_dict, findlayer=None):
if mltarr.lower() == "none":
mult = np.ones(shape, dtype=dtype)
else:
- mult = model.mfpar.mult.mult_dict[mltarr.lower()][
- :, :
- ]
+ mult = model.mfpar.mult.mult_dict[mltarr.lower()][:, :]
if zonarr.lower() == "all":
cluster_data = pv * mult
else:
mult_save = np.copy(mult)
- za = model.mfpar.zone.zone_dict[zonarr.lower()][
- :, :
- ]
+ za = model.mfpar.zone.zone_dict[zonarr.lower()][:, :]
# build a multiplier for all of the izones
mult = np.zeros(shape, dtype=dtype)
for iz in izones:
diff --git a/flopy/modflow/mfpbc.py b/flopy/modflow/mfpbc.py
index 9239d13858..bcbef1dd19 100644
--- a/flopy/modflow/mfpbc.py
+++ b/flopy/modflow/mfpbc.py
@@ -40,10 +40,7 @@ def __init__(
"or layer_row_column_data."
)
- (
- self.mxactp,
- self.layer_row_column_data,
- ) = self.assign_layer_row_column_data(
+ (self.mxactp, self.layer_row_column_data) = self.assign_layer_row_column_data(
layer_row_column_data, 5, zerobase=zerobase
)
# misuse of this function - zerobase needs to be False
@@ -75,7 +72,7 @@ def write_file(self):
"""
f_pbc = open(self.fn_path, "w")
- f_pbc.write("%s\n" % self.heading)
+ f_pbc.write(f"{self.heading}\n")
f_pbc.write("%10i%10i\n" % (self.mxactp, self.mxcos))
for n in range(self.parent.get_package("DIS").nper):
if n < len(self.layer_row_column_data):
@@ -91,9 +88,7 @@ def write_file(self):
f_pbc.write(f"{itmp:10d}{ctmp:10d}{self.np:10d}\n")
if n < len(self.layer_row_column_data):
for b in a:
- f_pbc.write(
- f"{b[0]:10d}{b[1]:10d}{b[2]:10d}{b[3]:10d}{b[4]:10d}\n"
- )
+ f_pbc.write(f"{b[0]:10d}{b[1]:10d}{b[2]:10d}{b[3]:10d}{b[4]:10d}\n")
if n < len(self.cosines):
for d in c:
f_pbc.write(f"{d[0]:10g}{d[1]:10g}{d[2]:10g}\n")
diff --git a/flopy/modflow/mfpcgn.py b/flopy/modflow/mfpcgn.py
index 21c4e2eff8..4775b0c226 100644
--- a/flopy/modflow/mfpcgn.py
+++ b/flopy/modflow/mfpcgn.py
@@ -336,16 +336,12 @@ def write_file(self):
else:
# dataset 1
sfmt = " {0:9d} {1:9d} {2:9.3g} {3:9.3g}\n"
- line = sfmt.format(
- self.iter_mo, self.iter_mi, self.close_r, self.close_h
- )
+ line = sfmt.format(self.iter_mo, self.iter_mi, self.close_r, self.close_h)
f.write(line)
# dataset 2
sfmt = " {0:9.3g} {1:9d} {2:9d} {3:9d}\n"
- line = sfmt.format(
- self.relax, self.ifill, self.unit_pc, self.unit_ts
- )
+ line = sfmt.format(self.relax, self.ifill, self.unit_pc, self.unit_ts)
f.write(line)
# dataset 3
@@ -516,17 +512,11 @@ def load(cls, f, model, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowPcgn._ftype()
)
if unit_pc > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=unit_pc
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=unit_pc)
if unit_ts > 0:
- iu, filenames[2] = model.get_ext_dict_attr(
- ext_unit_dict, unit=unit_ts
- )
+ iu, filenames[2] = model.get_ext_dict_attr(ext_unit_dict, unit=unit_ts)
if ipunit > 0:
- iu, filenames[3] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipunit
- )
+ iu, filenames[3] = model.get_ext_dict_attr(ext_unit_dict, unit=ipunit)
return cls(
model,
diff --git a/flopy/modflow/mfpks.py b/flopy/modflow/mfpks.py
index 566898ff30..688098f91c 100644
--- a/flopy/modflow/mfpks.py
+++ b/flopy/modflow/mfpks.py
@@ -245,10 +245,7 @@ def load(cls, f, model, ext_unit_dict=None):
# dataset 0 -- header
- print(
- " Warning: "
- "load method not completed. default pks object created."
- )
+ print(" Warning: load method not completed. default pks object created.")
if openfile:
f.close()
diff --git a/flopy/modflow/mfpval.py b/flopy/modflow/mfpval.py
index ff65054de5..019ad6b5a4 100644
--- a/flopy/modflow/mfpval.py
+++ b/flopy/modflow/mfpval.py
@@ -169,7 +169,7 @@ def load(cls, f, model, ext_unit_dict=None):
print(f' reading parameter values from "{filename}"')
# read PVAL data
- pval_dict = dict()
+ pval_dict = {}
for n in range(npval):
line = f.readline()
t = line.strip().split()
@@ -192,10 +192,7 @@ def load(cls, f, model, ext_unit_dict=None):
)
return cls(
- model,
- pval_dict=pval_dict,
- unitnumber=unitnumber,
- filenames=filenames,
+ model, pval_dict=pval_dict, unitnumber=unitnumber, filenames=filenames
)
@staticmethod
diff --git a/flopy/modflow/mfrch.py b/flopy/modflow/mfrch.py
index a9a22e394f..4cb12054ef 100644
--- a/flopy/modflow/mfrch.py
+++ b/flopy/modflow/mfrch.py
@@ -129,13 +129,9 @@ def __init__(
rech_u2d_shape = get_pak_vals_shape(model, rech)
irch_u2d_shape = get_pak_vals_shape(model, irch)
- self.rech = Transient2d(
- model, rech_u2d_shape, np.float32, rech, name="rech_"
- )
+ self.rech = Transient2d(model, rech_u2d_shape, np.float32, rech, name="rech_")
if self.nrchop == 2:
- self.irch = Transient2d(
- model, irch_u2d_shape, np.int32, irch, name="irch_"
- )
+ self.irch = Transient2d(model, irch_u2d_shape, np.int32, irch, name="irch_")
else:
self.irch = None
self.np = 0
@@ -195,28 +191,20 @@ def check(
active = np.ones(self.rech.array[0][0].shape, dtype=bool)
# check for unusually high or low values of mean R/T
- hk_package = {"UPW", "LPF"}.intersection(
- set(self.parent.get_package_list())
- )
+ hk_package = {"UPW", "LPF"}.intersection(set(self.parent.get_package_list()))
if len(hk_package) > 0 and self.parent.structured:
- pkg = list(hk_package)[0]
+ pkg = next(iter(hk_package))
# handle quasi-3D layers
# (ugly, would be nice to put this else where in a general function)
if self.parent.dis.laycbd.sum() != 0:
thickness = np.empty(
- (
- self.parent.dis.nlay,
- self.parent.dis.nrow,
- self.parent.dis.ncol,
- ),
+ (self.parent.dis.nlay, self.parent.dis.nrow, self.parent.dis.ncol),
dtype=float,
)
l = 0
for i, cbd in enumerate(self.parent.dis.laycbd):
- thickness[i, :, :] = self.parent.modelgrid.cell_thickness[
- l, :, :
- ]
+ thickness[i, :, :] = self.parent.modelgrid.cell_thickness[l, :, :]
if cbd > 0:
l += 1
l += 1
@@ -241,31 +229,21 @@ def check(
if len(lessthan) > 0:
txt = (
"\r Mean R/T ratio < checker warning threshold of "
- "{} for {} stress periods".format(RTmin, len(lessthan))
- )
- chk._add_to_summary(
- type="Warning", value=R_T.min(), desc=txt
- )
- chk.remove_passed(
- f"Mean R/T is between {RTmin} and {RTmax}"
+ f"{RTmin} for {len(lessthan)} stress periods"
)
+ chk._add_to_summary(type="Warning", value=R_T.min(), desc=txt)
+ chk.remove_passed(f"Mean R/T is between {RTmin} and {RTmax}")
if len(greaterthan) > 0:
txt = (
"\r Mean R/T ratio > checker warning "
- "threshold of {} for "
- "{} stress periods".format(RTmax, len(greaterthan))
- )
- chk._add_to_summary(
- type="Warning", value=R_T.max(), desc=txt
- )
- chk.remove_passed(
- f"Mean R/T is between {RTmin} and {RTmax}"
+ f"threshold of {RTmax} for "
+ f"{len(greaterthan)} stress periods"
)
+ chk._add_to_summary(type="Warning", value=R_T.max(), desc=txt)
+ chk.remove_passed(f"Mean R/T is between {RTmin} and {RTmax}")
elif len(lessthan) == 0 and len(greaterthan) == 0:
- chk.append_passed(
- f"Mean R/T is between {RTmin} and {RTmax}"
- )
+ chk.append_passed(f"Mean R/T is between {RTmin} and {RTmax}")
# check for NRCHOP values != 3
if self.nrchop != 3:
@@ -306,11 +284,7 @@ def write_file(self, check=True, f=None):
"""
# allows turning off package checks when writing files at model level
if check:
- self.check(
- f=f"{self.name[0]}.chk",
- verbose=self.parent.verbose,
- level=1,
- )
+ self.check(f=f"{self.name[0]}.chk", verbose=self.parent.verbose, level=1)
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
# Open file for writing
if f is not None:
@@ -325,18 +299,11 @@ def write_file(self, check=True, f=None):
for kper, u2d in self.irch.transient_2ds.items():
irch[kper] = u2d.array + 1
irch = Transient2d(
- self.parent,
- self.irch.shape,
- self.irch.dtype,
- irch,
- self.irch.name,
+ self.parent, self.irch.shape, self.irch.dtype, irch, self.irch.name
)
if not self.parent.structured:
mxndrch = np.max(
- [
- u2d.array.size
- for kper, u2d in self.irch.transient_2ds.items()
- ]
+ [u2d.array.size for kper, u2d in self.irch.transient_2ds.items()]
)
f_rch.write(f"{mxndrch:10d}\n")
@@ -348,9 +315,7 @@ def write_file(self, check=True, f=None):
inirch = self.rech[kper].array.size
else:
inirch = -1
- f_rch.write(
- f"{inrech:10d}{inirch:10d} # Stress period {kper + 1}\n"
- )
+ f_rch.write(f"{inrech:10d}{inirch:10d} # Stress period {kper + 1}\n")
if inrech >= 0:
f_rch.write(file_entry_rech)
if self.nrchop == 2:
@@ -414,9 +379,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None, check=True):
npar = int(raw[1])
if npar > 0:
if model.verbose:
- print(
- f" Parameters detected. Number of parameters = {npar}"
- )
+ print(f" Parameters detected. Number of parameters = {npar}")
line = f.readline()
# dataset 2
t = line_parse(line)
@@ -463,16 +426,9 @@ def load(cls, f, model, nper=None, ext_unit_dict=None, check=True):
if inrech >= 0:
if npar == 0:
if model.verbose:
- print(
- f" loading rech stress period {iper + 1:3d}..."
- )
+ print(f" loading rech stress period {iper + 1:3d}...")
t = Util2d.load(
- f,
- model,
- u2d_shape,
- np.float32,
- "rech",
- ext_unit_dict,
+ f, model, u2d_shape, np.float32, "rech", ext_unit_dict
)
else:
parm_dict = {}
@@ -490,9 +446,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None, check=True):
except:
iname = "static"
parm_dict[pname] = iname
- t = mfparbc.parameter_bcfill(
- model, u2d_shape, parm_dict, pak_parms
- )
+ t = mfparbc.parameter_bcfill(model, u2d_shape, parm_dict, pak_parms)
current_rech = t
rech[iper] = current_rech
@@ -500,9 +454,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None, check=True):
if nrchop == 2:
if inirch >= 0:
if model.verbose:
- print(
- f" loading irch stress period {iper + 1:3d}..."
- )
+ print(f" loading irch stress period {iper + 1:3d}...")
t = Util2d.load(
f, model, u2d_shape, np.int32, "irch", ext_unit_dict
)
@@ -522,9 +474,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None, check=True):
ext_unit_dict, filetype=ModflowRch._ftype()
)
if ipakcb > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
# create recharge package instance
@@ -538,11 +488,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None, check=True):
filenames=filenames,
)
if check:
- rch.check(
- f=f"{rch.name[0]}.chk",
- verbose=rch.parent.verbose,
- level=0,
- )
+ rch.check(f=f"{rch.name[0]}.chk", verbose=rch.parent.verbose, level=0)
return rch
@staticmethod
diff --git a/flopy/modflow/mfriv.py b/flopy/modflow/mfriv.py
index 63e0ee155d..b9e9a78738 100644
--- a/flopy/modflow/mfriv.py
+++ b/flopy/modflow/mfriv.py
@@ -155,9 +155,7 @@ def __init__(
if dtype is not None:
self.dtype = dtype
else:
- self.dtype = self.get_default_dtype(
- structured=self.parent.structured
- )
+ self.dtype = self.get_default_dtype(structured=self.parent.structured)
self.stress_period_data = MfList(self, stress_period_data)
self.parent.add_package(self)
@@ -200,11 +198,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None):
if isinstance(data, pd.DataFrame):
data = data.to_records(index=False).astype(self.dtype)
spd = data
- inds = (
- (spd.k, spd.i, spd.j)
- if self.parent.structured
- else (spd.node)
- )
+ inds = (spd.k, spd.i, spd.j) if self.parent.structured else (spd.node)
# check that river stage and bottom are above model cell
# bottoms also checks for nan values
@@ -300,11 +294,7 @@ def write_file(self, check=True):
"""
# allows turning off package checks when writing files at model level
if check:
- self.check(
- f=f"{self.name[0]}.chk",
- verbose=self.parent.verbose,
- level=1,
- )
+ self.check(f=f"{self.name[0]}.chk", verbose=self.parent.verbose, level=1)
f_riv = open(self.fn_path, "w")
f_riv.write(f"{self.heading}\n")
line = f"{self.stress_period_data.mxact:10d}{self.ipakcb:10d}"
diff --git a/flopy/modflow/mfsfr2.py b/flopy/modflow/mfsfr2.py
index 62112fb51a..c3f6e546d4 100644
--- a/flopy/modflow/mfsfr2.py
+++ b/flopy/modflow/mfsfr2.py
@@ -269,40 +269,29 @@ class ModflowSfr2(Package):
"""
- _options = dict(
- [
- ("reachinput", OptionBlock.simple_flag),
- ("transroute", OptionBlock.simple_flag),
- ("tabfiles", OptionBlock.simple_tabfile),
- (
- "lossfactor",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 1,
- OptionBlock.vars: {"factor": OptionBlock.simple_float},
- },
- ),
- (
- "strhc1kh",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 1,
- OptionBlock.vars: {"factorkh": OptionBlock.simple_float},
- },
- ),
- (
- "strhc1kv",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 1,
- OptionBlock.vars: {"factorkv": OptionBlock.simple_float},
- },
- ),
- ]
- )
+ _options = {
+ "reachinput": OptionBlock.simple_flag,
+ "transroute": OptionBlock.simple_flag,
+ "tabfiles": OptionBlock.simple_tabfile,
+ "lossfactor": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 1,
+ OptionBlock.vars: {"factor": OptionBlock.simple_float},
+ },
+ "strhc1kh": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 1,
+ OptionBlock.vars: {"factorkh": OptionBlock.simple_float},
+ },
+ "strhc1kv": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 1,
+ OptionBlock.vars: {"factorkv": OptionBlock.simple_float},
+ },
+ }
nsfrpar = 0
default_value = 0.0
@@ -366,10 +355,7 @@ def __init__(
if fname is None:
fname = f"{model.name}.sfr.{ext}"
model.add_output_file(
- abs(istcb2),
- fname=fname,
- binflag=binflag,
- package=self._ftype(),
+ abs(istcb2), fname=fname, binflag=binflag, package=self._ftype()
)
else:
istcb2 = 0
@@ -413,9 +399,7 @@ def __init__(
# number of reaches, negative value is flag for unsat.
# flow beneath streams and/or transient routing
self._nstrm = (
- np.sign(nstrm) * len(reach_data)
- if reach_data is not None
- else nstrm
+ np.sign(nstrm) * len(reach_data) if reach_data is not None else nstrm
)
if segment_data is not None:
# segment_data is a zero-d array
@@ -430,20 +414,21 @@ def __init__(
else:
pass
# use atleast_1d for length since segment_data might be a 0D array
- # this seems to be OK, because self.segment_data is produced by the constructor (never 0D)
+ # this seems to be OK, because self.segment_data is produced by the
+ # constructor (never 0D)
self.nsfrpar = nsfrpar
self.nparseg = nparseg
- # conversion factor used in calculating stream depth for stream reach (icalc = 1 or 2)
+ # conversion factor used in calculating stream depth for stream reach
+ # (icalc = 1 or 2)
self._const = const if const is not None else None
- self.dleak = (
- dleak # tolerance level of stream depth used in computing leakage
- )
+ self.dleak = dleak # tolerance level of stream depth used in computing leakage
# flag; unit number for writing table of SFR output to text file
self.istcb2 = istcb2
# if nstrm < 0
- # defines the format of the input data and whether or not unsaturated flow is simulated
+ # defines the format of the input data and whether or not unsaturated
+ # flow is simulated
self.isfropt = isfropt
# if isfropt > 1
@@ -474,9 +459,7 @@ def __init__(
self.reach_data[n] = reach_data[n]
# assign node numbers if there are none (structured grid)
- if np.diff(
- self.reach_data.node
- ).max() == 0 and self.parent.has_package("DIS"):
+ if np.diff(self.reach_data.node).max() == 0 and self.parent.has_package("DIS"):
# first make kij list
lrc = np.array(self.reach_data)[["k", "i", "j"]].tolist()
self.reach_data["node"] = self.parent.dis.get_node(lrc)
@@ -521,12 +504,7 @@ def __init__(
self.reach_data["iseg"] = 1
consistent_seg_numbers = (
- len(
- set(self.reach_data.iseg).difference(
- set(self.graph.keys())
- )
- )
- == 0
+ len(set(self.reach_data.iseg).difference(set(self.graph.keys()))) == 0
)
if not consistent_seg_numbers:
warnings.warn(
@@ -535,9 +513,7 @@ def __init__(
# first convert any not_a_segment_values to 0
for v in self.not_a_segment_values:
- self.segment_data[0].outseg[
- self.segment_data[0].outseg == v
- ] = 0
+ self.segment_data[0].outseg[self.segment_data[0].outseg == v] = 0
self.set_outreaches()
self.channel_geometry_data = channel_geometry_data
self.channel_flow_data = channel_flow_data
@@ -597,9 +573,7 @@ def nstrm(self):
@property
def nper(self):
nper = self.parent.nrow_ncol_nlay_nper[-1]
- nper = (
- 1 if nper == 0 else nper
- ) # otherwise iterations from 0, nper won't run
+ nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run
return nper
@property
@@ -637,7 +611,7 @@ def paths(self):
nseg = np.array(sorted(self._paths.keys()), dtype=int)
nseg = nseg[nseg > 0].copy()
outseg = np.array([self._paths[k][1] for k in nseg])
- existing_nseg = sorted(list(self.graph.keys()))
+ existing_nseg = sorted(self.graph.keys())
existing_outseg = [self.graph[k] for k in existing_nseg]
if not np.array_equal(nseg, existing_nseg) or not np.array_equal(
outseg, existing_outseg
@@ -655,9 +629,7 @@ def _make_graph(self):
for recarray in self.segment_data.values():
graph.update(dict(zip(recarray["nseg"], recarray["outseg"])))
- outlets = set(graph.values()).difference(
- set(graph.keys())
- ) # including lakes
+ outlets = set(graph.values()).difference(set(graph.keys())) # including lakes
graph.update({o: 0 for o in outlets if o != 0})
return graph
@@ -693,9 +665,7 @@ def get_empty_segment_data(nsegments=0, aux_names=None, default_value=0.0):
dtype = ModflowSfr2.get_default_segment_dtype()
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
- d = create_empty_recarray(
- nsegments, dtype, default_value=default_value
- )
+ d = create_empty_recarray(nsegments, dtype, default_value=default_value)
return d
@staticmethod
@@ -796,9 +766,8 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
structured = model.structured
if nper is None:
nper = model.nper
- nper = (
- 1 if nper == 0 else nper
- ) # otherwise iterations from 0, nper won't run
+ nper = 1 if nper == 0 else nper
+ # otherwise iterations from 0, nper won't run
openfile = not hasattr(f, "read")
if openfile:
@@ -872,9 +841,7 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
# set column names, dtypes
names = _get_item2_names(nstrm, reachinput, isfropt, structured)
dtypes = [
- d
- for d in ModflowSfr2.get_default_reach_dtype().descr
- if d[0] in names
+ d for d in ModflowSfr2.get_default_reach_dtype().descr if d[0] in names
]
lines = []
@@ -888,9 +855,8 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
# initialize full reach_data array with all possible columns
reach_data = ModflowSfr2.get_empty_reach_data(len(lines))
for n in names:
- reach_data[n] = tmp[
- n
- ] # not sure if there's a way to assign multiple columns
+ # not sure if there's a way to assign multiple columns
+ reach_data[n] = tmp[n]
# zero-based convention
inds = ["k", "i", "j"] if structured else ["node"]
@@ -914,7 +880,8 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
)
# container to hold any auxiliary variables
current_aux = {}
- # these could also be implemented as structured arrays with a column for segment number
+ # these could also be implemented as structured arrays with
+ # a column for segment number
current_6d = {}
current_6e = {}
for j in range(itmp):
@@ -930,9 +897,9 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
# of this logic
# https://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/sfr.htm
dataset_6b, dataset_6c = (0,) * 9, (0,) * 9
- if not (
- isfropt in [2, 3] and icalc == 1 and i > 1
- ) and not (isfropt in [1, 2, 3] and icalc >= 2):
+ if not (isfropt in [2, 3] and icalc == 1 and i > 1) and not (
+ isfropt in [1, 2, 3] and icalc >= 2
+ ):
dataset_6b = _parse_6bc(
f.readline(),
icalc,
@@ -953,19 +920,13 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
if icalc == 2:
# ATL: not sure exactly how isfropt logic functions for this
- # dataset 6d description suggests that this line isn't read for isfropt > 1
- # but description of icalc suggest that icalc=2 (8-point channel) can be used with any isfropt
- if (
- i == 0
- or nstrm > 0
- and not reachinput
- or isfropt <= 1
- ):
+ # dataset 6d description suggests that this line isn't read for
+ # isfropt > 1 but description of icalc suggest that icalc=2
+ # (8-point channel) can be used with any isfropt
+ if i == 0 or (nstrm > 0 and not reachinput) or isfropt <= 1:
dataset_6d = []
for _ in range(2):
- dataset_6d.append(
- _get_dataset(f.readline(), [0.0] * 8)
- )
+ dataset_6d.append(_get_dataset(f.readline(), [0.0] * 8))
current_6d[temp_nseg] = dataset_6d
if icalc == 4:
nstrpts = dataset_6a[5]
@@ -985,9 +946,7 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
if tabfiles and i == 0:
for j in range(numtab):
- segnum, numval, iunit = map(
- int, f.readline().strip().split()[:3]
- )
+ segnum, numval, iunit = map(int, f.readline().strip().split()[:3])
tabfiles_dict[segnum] = {"numval": numval, "inuit": iunit}
else:
@@ -1143,9 +1102,7 @@ def assign_layers(self, adjust_botms=False, pad=1.0):
print("Fixing elevation conflicts...")
botm = self.parent.dis.botm.array.copy()
for ib, jb in zip(below_i, below_j):
- inds = (self.reach_data.i == ib) & (
- self.reach_data.j == jb
- )
+ inds = (self.reach_data.i == ib) & (self.reach_data.j == jb)
botm[-1, ib, jb] = streambotms[inds].min() - pad
l.append(botm[-1, below_i, below_j])
header += ",new_model_botm"
@@ -1201,9 +1158,7 @@ def get_outlets(self, level=0, verbose=True):
# use graph instead of above loop
nrow = len(self.segment_data[per].nseg)
- ncol = np.max(
- [len(v) if v is not None else 0 for v in self.paths.values()]
- )
+ ncol = np.max([len(v) if v is not None else 0 for v in self.paths.values()])
all_outsegs = np.zeros((nrow, ncol), dtype=int)
for i, (k, v) in enumerate(self.paths.items()):
if k > 0:
@@ -1239,18 +1194,14 @@ def set_outreaches(self):
self.repair_outsegs()
rd = self.reach_data
outseg = self.graph
- reach1IDs = dict(
- zip(rd[rd.ireach == 1].iseg, rd[rd.ireach == 1].reachID)
- )
+ reach1IDs = dict(zip(rd[rd.ireach == 1].iseg, rd[rd.ireach == 1].reachID))
outreach = []
for i in range(len(rd)):
# if at the end of reach data or current segment
if i + 1 == len(rd) or rd.ireach[i + 1] == 1:
nextseg = outseg[rd.iseg[i]] # get next segment
if nextseg > 0: # current reach is not an outlet
- nextrchid = reach1IDs[
- nextseg
- ] # get reach 1 of next segment
+ nextrchid = reach1IDs[nextseg] # get reach 1 of next segment
else:
nextrchid = 0
else: # otherwise, it's the next reachID
@@ -1258,9 +1209,7 @@ def set_outreaches(self):
outreach.append(nextrchid)
self.reach_data["outreach"] = outreach
- def get_slopes(
- self, default_slope=0.001, minimum_slope=0.0001, maximum_slope=1.0
- ):
+ def get_slopes(self, default_slope=0.001, minimum_slope=0.0001, maximum_slope=1.0):
"""
Compute slopes by reach using values in strtop (streambed top)
and rchlen (reach length) columns of reach_data. The slope for a
@@ -1338,9 +1287,8 @@ def get_upsegs(self):
for o in np.unique(segment_data.outseg)
}
- outsegs = [
- k for k in list(upsegs.keys()) if k > 0
- ] # exclude 0, which is the outlet designator
+ # exclude 0, which is the outlet designator
+ outsegs = [k for k in list(upsegs.keys()) if k > 0]
# for each outseg key, for each upseg, check for more upsegs,
# append until headwaters has been reached
@@ -1374,20 +1322,14 @@ def get_variable_by_stress_period(self, varname):
isvar = all_data.sum(axis=1) != 0
ra = np.rec.fromarrays(all_data[isvar].transpose().copy(), dtype=dtype)
segs = self.segment_data[0].nseg[isvar]
- isseg = np.array(
- [True if s in segs else False for s in self.reach_data.iseg]
- )
+ isseg = np.array([True if s in segs else False for s in self.reach_data.iseg])
isinlet = isseg & (self.reach_data.ireach == 1)
- rd = np.array(self.reach_data[isinlet])[
- ["k", "i", "j", "iseg", "ireach"]
- ]
+ rd = np.array(self.reach_data[isinlet])[["k", "i", "j", "iseg", "ireach"]]
ra = recfunctions.merge_arrays([rd, ra], flatten=True, usemask=False)
return ra.view(np.recarray)
def repair_outsegs(self):
- isasegment = np.isin(
- self.segment_data[0].outseg, self.segment_data[0].nseg
- )
+ isasegment = np.isin(self.segment_data[0].outseg, self.segment_data[0].nseg)
isasegment = isasegment | (self.segment_data[0].outseg < 0)
self.segment_data[0]["outseg"][~isasegment] = 0.0
self._graph = None
@@ -1403,7 +1345,7 @@ def renumber_segments(self):
r : dictionary mapping old segment numbers to new
"""
- nseg = sorted(list(self.graph.keys()))
+ nseg = sorted(self.graph.keys())
outseg = [self.graph[k] for k in nseg]
# explicitly fix any gaps in the numbering
@@ -1476,9 +1418,7 @@ def renumber_channel_data(d):
d2 = None
return d2
- self.channel_geometry_data = renumber_channel_data(
- self.channel_geometry_data
- )
+ self.channel_geometry_data = renumber_channel_data(self.channel_geometry_data)
self.channel_flow_data = renumber_channel_data(self.channel_flow_data)
return r
@@ -1579,9 +1519,7 @@ def _get_headwaters(self, per=0):
One dimensional array listing all headwater segments.
"""
upsegs = [
- self.segment_data[per]
- .nseg[self.segment_data[per].outseg == s]
- .tolist()
+ self.segment_data[per].nseg[self.segment_data[per].outseg == s].tolist()
for s in self.segment_data[0].nseg
]
return self.segment_data[per].nseg[
@@ -1667,26 +1605,19 @@ def _write_1c(self, f_sfr):
)
)
if self.reachinput:
- self.nstrm = abs(
- self.nstrm
- ) # see explanation for dataset 1c in online guide
+ # see explanation for dataset 1c in online guide
+ self.nstrm = abs(self.nstrm)
f_sfr.write(f"{self.isfropt:.0f} ")
if self.isfropt > 1:
- f_sfr.write(
- f"{self.nstrail:.0f} {self.isuzn:.0f} {self.nsfrsets:.0f} "
- )
+ f_sfr.write(f"{self.nstrail:.0f} {self.isuzn:.0f} {self.nsfrsets:.0f} ")
if self.nstrm < 0:
f_sfr.write(f"{self.isfropt:.0f} ")
if self.isfropt > 1:
- f_sfr.write(
- f"{self.nstrail:.0f} {self.isuzn:.0f} {self.nsfrsets:.0f} "
- )
+ f_sfr.write(f"{self.nstrail:.0f} {self.isuzn:.0f} {self.nsfrsets:.0f} ")
if self.nstrm < 0 or self.transroute:
f_sfr.write(f"{self.irtflg:.0f} ")
if self.irtflg > 0:
- f_sfr.write(
- f"{self.numtim:.0f} {self.weight:.8f} {self.flwtol:.8f} "
- )
+ f_sfr.write(f"{self.numtim:.0f} {self.weight:.8f} {self.flwtol:.8f} ")
f_sfr.write("\n")
def _write_reach_data(self, f_sfr):
@@ -1697,10 +1628,7 @@ def _write_reach_data(self, f_sfr):
# decide which columns to write
columns = _get_item2_names(
- self.nstrm,
- self.reachinput,
- self.isfropt,
- structured=self.parent.structured,
+ self.nstrm, self.reachinput, self.isfropt, structured=self.parent.structured
)
# --make copy of data for multiple calls
@@ -1754,18 +1682,14 @@ def _write_segment_data(self, i, j, f_sfr):
bwdth,
) = (0 if v == self.default_value else v for v in seg_dat)
- f_sfr.write(
- " ".join(fmts[0:4]).format(nseg, icalc, outseg, iupseg) + " "
- )
+ f_sfr.write(" ".join(fmts[0:4]).format(nseg, icalc, outseg, iupseg) + " ")
if iupseg > 0:
f_sfr.write(fmts[4].format(iprior) + " ")
if icalc == 4:
f_sfr.write(fmts[5].format(nstrpts) + " ")
- f_sfr.write(
- " ".join(fmts[6:10]).format(flow, runoff, etsw, pptsw) + " "
- )
+ f_sfr.write(" ".join(fmts[6:10]).format(flow, runoff, etsw, pptsw) + " ")
if icalc in [1, 2]:
f_sfr.write(fmts[10].format(roughch) + " ")
@@ -1773,9 +1697,7 @@ def _write_segment_data(self, i, j, f_sfr):
f_sfr.write(fmts[11].format(roughbk) + " ")
if icalc == 3:
- f_sfr.write(
- " ".join(fmts[12:16]).format(cdpth, fdpth, awdth, bwdth) + " "
- )
+ f_sfr.write(" ".join(fmts[12:16]).format(cdpth, fdpth, awdth, bwdth) + " ")
f_sfr.write("\n")
self._write_6bc(
@@ -1822,31 +1744,22 @@ def _write_6bc(self, i, j, f_sfr, cols=()):
if self.isfropt in [0, 4, 5] and icalc <= 0:
f_sfr.write(
- " ".join(fmts[0:5]).format(
- hcond, thickm, elevupdn, width, depth
- )
- + " "
+ " ".join(fmts[0:5]).format(hcond, thickm, elevupdn, width, depth) + " "
)
elif self.isfropt in [0, 4, 5] and icalc == 1:
f_sfr.write(fmts[0].format(hcond) + " ")
if i == 0:
- f_sfr.write(
- " ".join(fmts[1:4]).format(thickm, elevupdn, width) + " "
- )
+ f_sfr.write(" ".join(fmts[1:4]).format(thickm, elevupdn, width) + " ")
if self.isfropt in [4, 5]:
- f_sfr.write(
- " ".join(fmts[5:8]).format(thts, thti, eps) + " "
- )
+ f_sfr.write(" ".join(fmts[5:8]).format(thts, thti, eps) + " ")
if self.isfropt == 5:
f_sfr.write(fmts[8].format(uhc) + " ")
elif i > 0 and self.isfropt == 0:
- f_sfr.write(
- " ".join(fmts[1:4]).format(thickm, elevupdn, width) + " "
- )
+ f_sfr.write(" ".join(fmts[1:4]).format(thickm, elevupdn, width) + " ")
elif self.isfropt in [0, 4, 5] and icalc >= 2:
f_sfr.write(fmts[0].format(hcond) + " ")
@@ -1857,9 +1770,7 @@ def _write_6bc(self, i, j, f_sfr, cols=()):
f_sfr.write(" ".join(fmts[1:3]).format(thickm, elevupdn) + " ")
if self.isfropt in [4, 5] and icalc == 2 and i == 0:
- f_sfr.write(
- " ".join(fmts[3:6]).format(thts, thti, eps) + " "
- )
+ f_sfr.write(" ".join(fmts[3:6]).format(thts, thti, eps) + " ")
if self.isfropt == 5:
f_sfr.write(fmts[8].format(uhc) + " ")
@@ -1904,10 +1815,7 @@ def write_file(self, filename=None):
f_sfr.write(f"{self.heading}\n")
# Item 1
- if (
- isinstance(self.options, OptionBlock)
- and self.parent.version == "mfnwt"
- ):
+ if isinstance(self.options, OptionBlock) and self.parent.version == "mfnwt":
self.options.update_from_package(self)
self.options.write_options(f_sfr)
elif isinstance(self.options, OptionBlock):
@@ -1940,14 +1848,11 @@ def write_file(self, filename=None):
# or isfropt <= 1:
if (
i == 0
- or self.nstrm > 0
- and not self.reachinput
+ or (self.nstrm > 0 and not self.reachinput)
or self.isfropt <= 1
):
for k in range(2):
- for d in self.channel_geometry_data[i][nseg][
- k
- ]:
+ for d in self.channel_geometry_data[i][nseg][k]:
f_sfr.write(f"{d:.2f} ")
f_sfr.write("\n")
@@ -2022,11 +1927,7 @@ def export_linkages(self, f, **kwargs):
# append connection lengths for filtering in GIS
rd = recfunctions.append_fields(
- rd,
- names=["length"],
- data=[lengths],
- usemask=False,
- asrecarray=True,
+ rd, names=["length"], data=[lengths], usemask=False, asrecarray=True
)
recarray2shp(rd, geoms, f, **kwargs)
@@ -2115,12 +2016,14 @@ class check:
Daniel Feinstein's top 10 SFR problems (7/16/2014):
1) cell gaps btw adjacent reaches in a single segment
- 2) cell gaps btw routed segments. possibly because of re-entry problems at domain edge
+ 2) cell gaps btw routed segments. possibly because of re-entry problems at
+ domain edge
3) adjacent reaches with STOP sloping the wrong way
4) routed segments with end/start sloping the wrong way
5) STOP>TOP1 violations, i.e.,floaters
6) STOP< 0:
- txt += "{} instances where an outlet was not found after {} consecutive segments!\n".format(
- len(circular_segs), self.sfr.nss
+ txt += (
+ f"{len(circular_segs)} instances where an outlet was "
+ f"not found after {self.sfr.nss} consecutive segments!\n"
)
if self.level == 1:
txt += " ".join(map(str, circular_segs)) + "\n"
else:
- f = os.path.join(
- self.sfr.parent._model_ws, "circular_routing.chk.csv"
- )
- np.savetxt(
- f, circular_segs, fmt="%d", delimiter=",", header=txt
- )
+ f = os.path.join(self.sfr.parent._model_ws, "circular_routing.chk.csv")
+ np.savetxt(f, circular_segs, fmt="%d", delimiter=",", header=txt)
txt += f"See {f} for details."
if self.verbose:
print(txt)
@@ -2424,8 +2319,7 @@ def routing(self):
txt += " ".join(map(str, segments_with_breaks)) + "\n"
else:
fpath = os.path.join(
- self.sfr.parent._model_ws,
- "reach_connection_gaps.chk.csv",
+ self.sfr.parent._model_ws, "reach_connection_gaps.chk.csv"
)
with open(fpath, "w") as fp:
fp.write(",".join(rd.dtype.names) + "\n")
@@ -2433,14 +2327,9 @@ def routing(self):
txt += f"See {fpath} for details."
if self.verbose:
print(txt)
- self._txt_footer(
- headertxt, txt, "reach connections", warning=False
- )
+ self._txt_footer(headertxt, txt, "reach connections", warning=False)
else:
- txt += (
- "No DIS package or modelgrid object; cannot "
- "check reach proximities."
- )
+ txt += "No DIS package or modelgrid object; cannot check reach proximities."
self._txt_footer(headertxt, txt, "")
def overlapping_conductance(self, tol=1e-6):
@@ -2450,8 +2339,7 @@ def overlapping_conductance(self, tol=1e-6):
"""
headertxt = (
- "Checking for model cells with multiple non-zero "
- "SFR conductances...\n"
+ "Checking for model cells with multiple non-zero SFR conductances...\n"
)
txt = ""
if self.verbose:
@@ -2467,9 +2355,7 @@ def overlapping_conductance(self, tol=1e-6):
for i, (r, c) in enumerate(reach_data[["i", "j"]]):
if (r, c) not in uniquerc:
uniquerc[(r, c)] = i + 1
- reach_data["node"] = [
- uniquerc[(r, c)] for r, c in reach_data[["i", "j"]]
- ]
+ reach_data["node"] = [uniquerc[(r, c)] for r, c in reach_data[["i", "j"]]]
K = reach_data["strhc1"]
if K.max() == 0:
@@ -2495,9 +2381,7 @@ def overlapping_conductance(self, tol=1e-6):
conductances.sort()
# list nodes with multiple non-zero SFR reach conductances
- if conductances[-1] != 0.0 and (
- conductances[0] / conductances[-1] > tol
- ):
+ if conductances[-1] != 0.0 and (conductances[0] / conductances[-1] > tol):
nodes_with_multiple_conductance.update({node})
if len(nodes_with_multiple_conductance) > 0:
@@ -2555,9 +2439,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
with model grid
"""
- headertxt = (
- f"Checking for streambed tops of less than {min_strtop}...\n"
- )
+ headertxt = f"Checking for streambed tops of less than {min_strtop}...\n"
txt = ""
if self.verbose:
print(headertxt.strip())
@@ -2570,8 +2452,10 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
is_less = self.reach_data.strtop < min_strtop
if np.any(is_less):
below_minimum = self.reach_data[is_less]
- txt += "{} instances of streambed top below minimum found.\n".format(
- len(below_minimum)
+ txt += (
+ "{} instances of streambed top below minimum found.\n".format(
+ len(below_minimum)
+ )
)
if self.level == 1:
txt += "Reaches with low strtop:\n"
@@ -2583,9 +2467,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
passed = True
self._txt_footer(headertxt, txt, "minimum streambed top", passed)
- headertxt = (
- f"Checking for streambed tops of greater than {max_strtop}...\n"
- )
+ headertxt = f"Checking for streambed tops of greater than {max_strtop}...\n"
txt = ""
if self.verbose:
print(headertxt.strip())
@@ -2593,10 +2475,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
passed = False
if self.sfr.isfropt in [1, 2, 3]:
if np.diff(self.reach_data.strtop).max() == 0:
- txt += (
- "isfropt setting of 1,2 or 3 "
- "requires strtop information!\n"
- )
+ txt += "isfropt setting of 1,2 or 3 requires strtop information!\n"
else:
is_greater = self.reach_data.strtop > max_strtop
if np.any(is_greater):
@@ -2616,8 +2495,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
self._txt_footer(headertxt, txt, "maximum streambed top", passed)
headertxt = (
- "Checking segment_data for "
- "downstream rises in streambed elevation...\n"
+ "Checking segment_data for downstream rises in streambed elevation...\n"
)
txt = ""
if self.verbose:
@@ -2636,16 +2514,10 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
# enforce consecutive increasing segment numbers (for indexing)
segment_data.sort(order="nseg")
t = _check_numbers(
- len(segment_data),
- segment_data.nseg,
- level=1,
- datatype="Segment",
+ len(segment_data), segment_data.nseg, level=1, datatype="Segment"
)
if len(t) > 0:
- txt += (
- "Elevation check requires "
- "consecutive segment numbering."
- )
+ txt += "Elevation check requires consecutive segment numbering."
self._txt_footer(headertxt, txt, "")
return
@@ -2667,15 +2539,9 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
# next check for rises between segments
non_outlets = segment_data.outseg > 0
- non_outlets_seg_data = segment_data[
- non_outlets
- ] # lake outsegs are < 0
+ non_outlets_seg_data = segment_data[non_outlets] # lake outsegs are < 0
outseg_elevup = np.array(
- [
- segment_data.elevup[o - 1]
- for o in segment_data.outseg
- if o > 0
- ]
+ [segment_data.elevup[o - 1] for o in segment_data.outseg if o > 0]
)
d_elev2 = outseg_elevup - segment_data.elevdn[non_outlets]
non_outlets_seg_data = recfunctions.append_fields(
@@ -2688,13 +2554,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
txt += self._boolean_compare(
non_outlets_seg_data[
- [
- "nseg",
- "outseg",
- "elevdn",
- "outseg_elevup",
- "d_elev2",
- ]
+ ["nseg", "outseg", "elevdn", "outseg_elevup", "d_elev2"]
],
col1="d_elev2",
col2=np.zeros(len(non_outlets_seg_data)),
@@ -2715,18 +2575,16 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
self._txt_footer(headertxt, txt, "segment elevations", passed)
headertxt = (
- "Checking reach_data for "
- "downstream rises in streambed elevation...\n"
+ "Checking reach_data for downstream rises in streambed elevation...\n"
)
txt = ""
if self.verbose:
print(headertxt.strip())
passed = False
- if (
- self.sfr.nstrm < 0
- or self.sfr.reachinput
- and self.sfr.isfropt in [1, 2, 3]
- ): # see SFR input instructions
+ if self.sfr.nstrm < 0 or (
+ self.sfr.reachinput and self.sfr.isfropt in [1, 2, 3]
+ ):
+ # see SFR input instructions
# compute outreaches if they aren't there already
if np.diff(self.sfr.reach_data.outreach).max() == 0:
self.sfr.set_outreaches()
@@ -2746,12 +2604,12 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
]
)
- reach_data = (
- self.sfr.reach_data
- ) # inconsistent with other checks that work with
- # reach_data attribute of check class. Want to have get_outreaches as a method of sfr class
- # (for other uses). Not sure if other check methods should also copy reach_data directly from
+ # inconsistent with other checks that work with
+ # reach_data attribute of check class. Want to have get_outreaches
+ # as a method of sfr class (for other uses). Not sure if other
+ # check methods should also copy reach_data directly from
# SFR package instance for consistency.
+ reach_data = self.sfr.reach_data
rd = recfunctions.append_fields(
rd,
@@ -2777,33 +2635,35 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
],
col1="d_strtop",
col2=np.zeros(len(rd)),
- level0txt="{} reaches encountered with strtop < strtop of downstream reach.",
+ level0txt="{} reaches encountered with strtop < strtop of downstream reach.", # noqa
level1txt="Elevation rises:",
)
if len(txt) == 0:
passed = True
else:
- txt += "Reach strtop not specified for nstrm={}, reachinput={} and isfropt={}\n".format(
- self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt
+ txt += (
+ f"Reach strtop not specified for nstrm={self.sfr.nstrm}, "
+ f"reachinput={self.sfr.reachinput} and isfropt={self.sfr.isfropt}\n"
)
passed = True
self._txt_footer(headertxt, txt, "reach elevations", passed)
- headertxt = "Checking reach_data for inconsistencies between streambed elevations and the model grid...\n"
+ headertxt = "Checking reach_data for inconsistencies between streambed elevations and the model grid...\n" # noqa
if self.verbose:
print(headertxt.strip())
txt = ""
if self.sfr.parent.dis is None:
- txt += "No DIS file supplied; cannot check SFR elevations against model grid."
+ txt += (
+ "No DIS file supplied; cannot check SFR elevations against model grid."
+ )
self._txt_footer(headertxt, txt, "")
return
passed = False
warning = True
- if (
- self.sfr.nstrm < 0
- or self.sfr.reachinput
- and self.sfr.isfropt in [1, 2, 3]
- ): # see SFR input instructions
+ if self.sfr.nstrm < 0 or (
+ self.sfr.reachinput and self.sfr.isfropt in [1, 2, 3]
+ ):
+ # see SFR input instructions
reach_data = np.array(self.reach_data)
i, j, k = reach_data["i"], reach_data["j"], reach_data["k"]
@@ -2835,21 +2695,15 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
],
col1="layerbot",
col2="strbot",
- level0txt="{} reaches encountered with streambed bottom below layer bottom.",
+ level0txt="{} reaches encountered with streambed bottom below layer bottom.", # noqa
level1txt="Layer bottom violations:",
)
if len(txt) > 0:
- warning = (
- False # this constitutes an error (MODFLOW won't run)
- )
+ warning = False # this constitutes an error (MODFLOW won't run)
# check streambed elevations in relation to model top
tops = self.sfr.parent.dis.top.array[i, j]
reach_data = recfunctions.append_fields(
- reach_data,
- names="modeltop",
- data=tops,
- usemask=False,
- asrecarray=False,
+ reach_data, names="modeltop", data=tops, usemask=False, asrecarray=False
)
txt += self._boolean_compare(
@@ -2875,8 +2729,9 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
if len(txt) == 0:
passed = True
else:
- txt += "Reach strtop, strthick not specified for nstrm={}, reachinput={} and isfropt={}\n".format(
- self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt
+ txt += (
+ f"Reach strtop, strthick not specified for nstrm={self.sfr.nstrm}, "
+ f"reachinput={self.sfr.reachinput} and isfropt={self.sfr.isfropt}\n"
)
passed = True
self._txt_footer(
@@ -2909,10 +2764,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
# enforce consecutive increasing segment numbers (for indexing)
segment_data.sort(order="nseg")
t = _check_numbers(
- len(segment_data),
- segment_data.nseg,
- level=1,
- datatype="Segment",
+ len(segment_data), segment_data.nseg, level=1, datatype="Segment"
)
if len(t) > 0:
raise Exception(
@@ -2942,16 +2794,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
txt += self._boolean_compare(
segment_ends[
- [
- "k",
- "i",
- "j",
- "iseg",
- "strtop",
- "modeltop",
- "diff",
- "reachID",
- ]
+ ["k", "i", "j", "iseg", "strtop", "modeltop", "diff", "reachID"]
].copy(),
col1=np.zeros(len(segment_ends)),
col2="diff",
@@ -2962,22 +2805,19 @@ def elevations(self, min_strtop=-10, max_strtop=15000):
if len(txt) == 0:
passed = True
else:
- txt += "Segment elevup and elevdn not specified for nstrm={} and isfropt={}\n".format(
- self.sfr.nstrm, self.sfr.isfropt
+ txt += (
+ f"Segment elevup and elevdn not specified for nstrm={self.sfr.nstrm} "
+ f"and isfropt={self.sfr.isfropt}\n"
)
passed = True
- self._txt_footer(
- headertxt, txt, "segment elevations vs. model grid", passed
- )
+ self._txt_footer(headertxt, txt, "segment elevations vs. model grid", passed)
def slope(self, minimum_slope=1e-4, maximum_slope=1.0):
- """Checks that streambed slopes are greater than or equal to a specified minimum value.
- Low slope values can cause "backup" or unrealistic stream stages with icalc options
- where stage is computed.
+ """Checks that streambed slopes are greater than or equal to a
+ specified minimum value. Low slope values can cause "backup" or
+ unrealistic stream stages with icalc options where stage is computed.
"""
- headertxt = (
- f"Checking for streambed slopes of less than {minimum_slope}...\n"
- )
+ headertxt = f"Checking for streambed slopes of less than {minimum_slope}...\n"
txt = ""
if self.verbose:
print(headertxt.strip())
@@ -2985,15 +2825,14 @@ def slope(self, minimum_slope=1e-4, maximum_slope=1.0):
passed = False
if self.sfr.isfropt in [1, 2, 3]:
if np.diff(self.reach_data.slope).max() == 0:
- txt += (
- "isfropt setting of 1,2 or 3 requires slope information!\n"
- )
+ txt += "isfropt setting of 1,2 or 3 requires slope information!\n"
else:
is_less = self.reach_data.slope < minimum_slope
if np.any(is_less):
below_minimum = self.reach_data[is_less]
- txt += "{} instances of streambed slopes below minimum found.\n".format(
- len(below_minimum)
+ txt += (
+ f"{len(below_minimum)} instances of streambed slopes "
+ "below minimum found.\n"
)
if self.level == 1:
txt += "Reaches with low slopes:\n"
@@ -3005,7 +2844,9 @@ def slope(self, minimum_slope=1e-4, maximum_slope=1.0):
passed = True
self._txt_footer(headertxt, txt, "minimum slope", passed)
- headertxt = f"Checking for streambed slopes of greater than {maximum_slope}...\n"
+ headertxt = (
+ f"Checking for streambed slopes of greater than {maximum_slope}...\n"
+ )
txt = ""
if self.verbose:
print(headertxt.strip())
@@ -3013,16 +2854,15 @@ def slope(self, minimum_slope=1e-4, maximum_slope=1.0):
passed = False
if self.sfr.isfropt in [1, 2, 3]:
if np.diff(self.reach_data.slope).max() == 0:
- txt += (
- "isfropt setting of 1,2 or 3 requires slope information!\n"
- )
+ txt += "isfropt setting of 1,2 or 3 requires slope information!\n"
else:
is_greater = self.reach_data.slope > maximum_slope
if np.any(is_greater):
above_max = self.reach_data[is_greater]
- txt += "{} instances of streambed slopes above maximum found.\n".format(
- len(above_max)
+ txt += (
+ f"{len(above_max)} instances of streambed slopes "
+ "above maximum found.\n"
)
if self.level == 1:
txt += "Reaches with high slopes:\n"
@@ -3115,9 +2955,8 @@ def _get_duplicates(a):
method https://stackoverflow.com/q/11528078/
"""
s = np.sort(a, axis=None)
- equal_to_previous_item = np.append(
- s[1:] == s[:-1], False
- ) # maintain same dimension for boolean array
+ # maintain same dimension for boolean array
+ equal_to_previous_item = np.append(s[1:] == s[:-1], False)
return np.unique(s[equal_to_previous_item])
@@ -3168,9 +3007,9 @@ def _fmt_string_list(array, float_format="{!s}"):
float_format = "{!s}"
elif vtype == "s":
raise ValueError(
- "'str' type found in dtype for {!r}. "
+ f"'str' type found in dtype for {name!r}. "
"This gives unpredictable results when "
- "recarray to file - change to 'object' type".format(name)
+ "recarray to file - change to 'object' type"
)
else:
raise ValueError(f"unknown dtype for {name!r}: {vtype!r}")
@@ -3266,11 +3105,7 @@ def _parse_1c(line, reachinput, transroute):
flwtol = float(line.pop(0))
# auxiliary variables (MODFLOW-LGR)
- option = [
- line[i]
- for i in np.arange(1, len(line))
- if "aux" in line[i - 1].lower()
- ]
+ option = [line[i] for i in np.arange(1, len(line)) if "aux" in line[i - 1].lower()]
return (
nstrm,
@@ -3412,8 +3247,8 @@ def _parse_6bc(line, icalc, nstrm, isfropt, reachinput, per=0):
thickm = line.pop(0)
elevupdn = line.pop(0)
if isfropt in [4, 5] and per == 0:
- # table in online guide suggests that the following items should be present in this case
- # but in the example
+ # table in online guide suggests that the following items
+ # should be present in this case but in the example
thts = _pop_item(line)
thti = _pop_item(line)
eps = _pop_item(line)
@@ -3470,7 +3305,7 @@ def _find_path(graph, start, end=0, path=None):
dictionary (graph) so that the recursion works.
"""
if path is None:
- path = list()
+ path = []
path = path + [start]
if start == end:
return path
diff --git a/flopy/modflow/mfsip.py b/flopy/modflow/mfsip.py
index 096a0c7a55..ba2c30f4b8 100644
--- a/flopy/modflow/mfsip.py
+++ b/flopy/modflow/mfsip.py
@@ -148,11 +148,7 @@ def write_file(self):
f.write(f"{self.mxiter:10d}{self.nparm:10d}\n")
f.write(
"{:10.3f}{:10.3g}{:10d}{:10.3f}{:10d}\n".format(
- self.accl,
- self.hclose,
- self.ipcalc,
- self.wseed,
- self.iprsip,
+ self.accl, self.hclose, self.ipcalc, self.wseed, self.iprsip
)
)
f.close()
diff --git a/flopy/modflow/mfsor.py b/flopy/modflow/mfsor.py
index 720bfc36d5..af0c5b5cfd 100644
--- a/flopy/modflow/mfsor.py
+++ b/flopy/modflow/mfsor.py
@@ -165,10 +165,7 @@ def load(cls, f, model, ext_unit_dict=None):
# dataset 0 -- header
- print(
- " Warning: load method not completed. "
- "Default sor object created."
- )
+ print(" Warning: load method not completed. Default sor object created.")
if openfile:
f.close()
diff --git a/flopy/modflow/mfstr.py b/flopy/modflow/mfstr.py
index 572efa7e11..214028ca53 100644
--- a/flopy/modflow/mfstr.py
+++ b/flopy/modflow/mfstr.py
@@ -219,7 +219,7 @@ class ModflowStr(Package):
>>> #applied to all stress periods
>>> str = flopy.modflow.ModflowStr(m, stress_period_data=strd)
- """
+ """ # noqa
def __init__(
self,
@@ -254,9 +254,7 @@ def __init__(
self.set_cbc_output_file(ipakcb, model, filenames[1])
if istcb2 is not None:
- model.add_output_file(
- istcb2, fname=filenames[2], package=self._ftype()
- )
+ model.add_output_file(istcb2, fname=filenames[2], package=self._ftype())
else:
ipakcb = 0
@@ -283,7 +281,7 @@ def __init__(
if ntrib > 10:
raise Exception(
"ModflowStr error: ntrib must be less that 10: "
- "specified value = {}".format(ntrib)
+ f"specified value = {ntrib}"
)
if options is None:
@@ -366,8 +364,8 @@ def __init__(
d = d.to_records(index=False)
if isinstance(d, np.recarray):
e = (
- "ModflowStr error: recarray dtype: {} does not match "
- "self dtype: {}".format(d.dtype, self.dtype)
+ f"ModflowStr error: recarray dtype: {d.dtype} "
+ f"does not match self dtype: {self.dtype}"
)
assert d.dtype == self.dtype, e
elif isinstance(d, np.ndarray):
@@ -375,16 +373,13 @@ def __init__(
elif isinstance(d, int):
if model.verbose:
if d < 0:
- print(
- " reusing str data from previous "
- "stress period"
- )
+ print(" reusing str data from previous stress period")
elif d == 0:
print(f" no str data for stress period {key}")
else:
raise Exception(
"ModflowStr error: unsupported data type: "
- "{} at kper {}".format(type(d), key)
+ f"{type(d)} at kper {key}"
)
# add stress_period_data to package
@@ -397,8 +392,8 @@ def __init__(
d = np.array(d)
if isinstance(d, np.recarray):
e = (
- "ModflowStr error: recarray dtype: {} does not match "
- "self dtype: {}".format(d.dtype, self.dtype2)
+ f"ModflowStr error: recarray dtype: {d.dtype} "
+ f"does not match self dtype: {self.dtype2}"
)
assert d.dtype == self.dtype2, e
elif isinstance(d, np.ndarray):
@@ -411,13 +406,11 @@ def __init__(
"from previous stress period"
)
elif d == 0:
- print(
- f" no str segment data for stress period {key}"
- )
+ print(f" no str segment data for stress period {key}")
else:
raise Exception(
"ModflowStr error: unsupported data type: "
- "{} at kper {}".format(type(d), key)
+ f"{type(d)} at kper {key}"
)
# add segment_data to package
@@ -612,16 +605,12 @@ def write_file(self):
ds9 = []
for idx in range(self.ntrib):
ds9.append(line[idx])
- f_str.write(
- write_fixed_var(ds9, length=fmt9, free=free)
- )
+ f_str.write(write_fixed_var(ds9, length=fmt9, free=free))
# dataset 10
if self.ndiv > 0:
for line in sdata:
- f_str.write(
- write_fixed_var([line[-1]], length=10, free=free)
- )
+ f_str.write(write_fixed_var([line[-1]], length=10, free=free))
# close the str file
f_str.close()
@@ -758,9 +747,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
dt = ModflowStr.get_empty(
1, aux_names=aux_names, structured=model.structured
).dtype
- pak_parms = mfparbc.load(
- f, npstr, dt, model, ext_unit_dict, model.verbose
- )
+ pak_parms = mfparbc.load(f, npstr, dt, model, ext_unit_dict, model.verbose)
if nper is None:
nper = model.nper
@@ -834,26 +821,19 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
parval = float(par_dict["parval"])
else:
try:
- parval = float(
- model.mfpar.pval.pval_dict[pname]
- )
+ parval = float(model.mfpar.pval.pval_dict[pname])
except:
parval = float(par_dict["parval"])
# fill current parameter data (par_current)
for ibnd, t in enumerate(data_dict):
- current[ibnd] = tuple(
- t[: len(current.dtype.names)]
- )
+ current[ibnd] = tuple(t[: len(current.dtype.names)])
else:
if model.verbose:
print(" reading str dataset 6")
current, current_seg = ModflowStr.get_empty(
- itmp,
- nss,
- aux_names=aux_names,
- structured=model.structured,
+ itmp, nss, aux_names=aux_names, structured=model.structured
)
for ibnd in range(itmp):
line = f.readline()
@@ -942,9 +922,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowStr._ftype()
)
if ipakcb > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
if abs(istcb2) > 0:
iu, filenames[2] = model.get_ext_dict_attr(
ext_unit_dict, unit=abs(istcb2)
diff --git a/flopy/modflow/mfsub.py b/flopy/modflow/mfsub.py
index 3a76639cf5..158d4cee37 100644
--- a/flopy/modflow/mfsub.py
+++ b/flopy/modflow/mfsub.py
@@ -259,10 +259,7 @@ def __init__(
if idsave is not None:
model.add_output_file(
- idsave,
- fname=filenames[2],
- extension="rst",
- package=self._ftype(),
+ idsave, fname=filenames[2], extension="rst", package=self._ftype()
)
else:
idsave = 0
@@ -473,9 +470,7 @@ def write_file(self, check=False, f=None):
f"{self.ipakcb} {self.isuboc} {self.nndb} {self.ndb} {self.nmz} {self.nn} "
)
- f.write(
- f"{self.ac1} {self.ac2} {self.itmin} {self.idsave} {self.idrest}"
- )
+ f.write(f"{self.ac1} {self.ac2} {self.itmin} {self.idsave} {self.idrest}")
line = ""
if self.idbit is not None:
line += f" {self.idbit}"
@@ -654,48 +649,28 @@ def load(cls, f, model, ext_unit_dict=None):
if model.verbose:
print(f" loading sub dataset 5 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"hc layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"hc layer {kk}", ext_unit_dict
)
hc[k] = t
# sfe
if model.verbose:
print(f" loading sub dataset 6 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"sfe layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"sfe layer {kk}", ext_unit_dict
)
sfe[k] = t
# sfv
if model.verbose:
print(f" loading sub dataset 7 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"sfv layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"sfv layer {kk}", ext_unit_dict
)
sfv[k] = t
# com
if model.verbose:
print(f" loading sub dataset 8 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"com layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"com layer {kk}", ext_unit_dict
)
com[k] = t
@@ -739,12 +714,7 @@ def load(cls, f, model, ext_unit_dict=None):
if model.verbose:
print(f" loading sub dataset 11 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"dhc layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"dhc layer {kk}", ext_unit_dict
)
dhc[k] = t
# dcom
@@ -763,24 +733,14 @@ def load(cls, f, model, ext_unit_dict=None):
if model.verbose:
print(f" loading sub dataset 13 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"dz layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"dz layer {kk}", ext_unit_dict
)
dz[k] = t
# nz
if model.verbose:
print(f" loading sub dataset 14 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.int32,
- f"nz layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.int32, f"nz layer {kk}", ext_unit_dict
)
nz[k] = t
@@ -815,14 +775,10 @@ def load(cls, f, model, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowSub._ftype()
)
if ipakcb > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
if idsave > 0:
- iu, filenames[2] = model.get_ext_dict_attr(
- ext_unit_dict, unit=idsave
- )
+ iu, filenames[2] = model.get_ext_dict_attr(ext_unit_dict, unit=idsave)
if isuboc > 0:
ipos = 3
diff --git a/flopy/modflow/mfswi2.py b/flopy/modflow/mfswi2.py
index bc783208da..92a9a66ec3 100644
--- a/flopy/modflow/mfswi2.py
+++ b/flopy/modflow/mfswi2.py
@@ -234,10 +234,7 @@ def __init__(
# update external file information with zeta output, if necessary
if iswizt is not None:
model.add_output_file(
- iswizt,
- fname=filenames[1],
- extension="zta",
- package=self._ftype(),
+ iswizt, fname=filenames[1], extension="zta", package=self._ftype()
)
else:
iswizt = 0
@@ -271,7 +268,7 @@ def __init__(
if len(obsnam) != nobs:
raise Exception(
"ModflowSwi2: obsnam must be a list with a "
- "length of {} not {}.".format(nobs, len(obsnam))
+ f"length of {nobs} not {len(obsnam)}."
)
if nobs > 0:
@@ -352,27 +349,17 @@ def __init__(
# Create arrays so that they have the correct size
if self.istrat == 1:
- self.nu = Util2d(
- model, (self.nsrf + 1,), np.float32, nu, name="nu"
- )
+ self.nu = Util2d(model, (self.nsrf + 1,), np.float32, nu, name="nu")
else:
- self.nu = Util2d(
- model, (self.nsrf + 2,), np.float32, nu, name="nu"
- )
+ self.nu = Util2d(model, (self.nsrf + 2,), np.float32, nu, name="nu")
self.zeta = []
for i in range(self.nsrf):
self.zeta.append(
Util3d(
- model,
- (nlay, nrow, ncol),
- np.float32,
- zeta[i],
- name=f"zeta_{i + 1}",
+ model, (nlay, nrow, ncol), np.float32, zeta[i], name=f"zeta_{i + 1}"
)
)
- self.ssz = Util3d(
- model, (nlay, nrow, ncol), np.float32, ssz, name="ssz"
- )
+ self.ssz = Util3d(model, (nlay, nrow, ncol), np.float32, ssz, name="ssz")
self.isource = Util3d(
model, (nlay, nrow, ncol), np.int32, isource, name="isource"
)
@@ -451,9 +438,7 @@ def write_file(self, check=True, f=None):
# write dataset 3b
if self.adaptive is True:
f.write("# Dataset 3b\n")
- f.write(
- f"{self.nadptmx:10d}{self.nadptmn:10d}{self.adptfct:14.6g}\n"
- )
+ f.write(f"{self.nadptmx:10d}{self.nadptmn:10d}{self.adptfct:14.6g}\n")
# write dataset 4
f.write("# Dataset 4\n")
f.write(self.nu.get_file_entry())
@@ -653,12 +638,7 @@ def load(cls, f, model, ext_unit_dict=None):
ctxt = f"zeta_surf{n + 1:02d}"
zeta.append(
Util3d.load(
- f,
- model,
- (nlay, nrow, ncol),
- np.float32,
- ctxt,
- ext_unit_dict,
+ f, model, (nlay, nrow, ncol), np.float32, ctxt, ext_unit_dict
)
)
@@ -723,13 +703,9 @@ def load(cls, f, model, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowSwi2._ftype()
)
if iswizt > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=iswizt
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=iswizt)
if ipakcb > 0:
- iu, filenames[2] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[2] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
if abs(iswiobs) > 0:
iu, filenames[3] = model.get_ext_dict_attr(
ext_unit_dict, unit=abs(iswiobs)
diff --git a/flopy/modflow/mfswr1.py b/flopy/modflow/mfswr1.py
index ae628a9c52..e6100e3006 100644
--- a/flopy/modflow/mfswr1.py
+++ b/flopy/modflow/mfswr1.py
@@ -55,9 +55,7 @@ class ModflowSwr1(Package):
"""
- def __init__(
- self, model, extension="swr", unitnumber=None, filenames=None
- ):
+ def __init__(self, model, extension="swr", unitnumber=None, filenames=None):
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowSwr1._defaultunit()
@@ -141,9 +139,7 @@ def load(cls, f, model, ext_unit_dict=None):
filename = f
f = open(filename, "r")
- print(
- "Warning: load method not completed. default swr1 object created."
- )
+ print("Warning: load method not completed. default swr1 object created.")
if openfile:
f.close()
diff --git a/flopy/modflow/mfswt.py b/flopy/modflow/mfswt.py
index 3a43cbfe9c..06bb287d9a 100644
--- a/flopy/modflow/mfswt.py
+++ b/flopy/modflow/mfswt.py
@@ -631,18 +631,7 @@ def load(cls, f, model, ext_unit_dict=None):
print(" loading swt dataset 3")
line = f.readline()
t = line.strip().split()
- (
- iizcfl,
- izcfm,
- iglfl,
- iglfm,
- iestfl,
- iestfm,
- ipcsfl,
- ipcsfm,
- istfl,
- istfm,
- ) = (
+ (iizcfl, izcfm, iglfl, iglfm, iestfl, iestfm, ipcsfl, ipcsfm, istfl, istfm) = (
int(t[0]),
int(t[1]),
int(t[2]),
@@ -658,23 +647,17 @@ def load(cls, f, model, ext_unit_dict=None):
# read dataset 4
if model.verbose:
print(" loading swt dataset 4")
- gl0 = Util2d.load(
- f, model, (nrow, ncol), np.float32, "gl0", ext_unit_dict
- )
+ gl0 = Util2d.load(f, model, (nrow, ncol), np.float32, "gl0", ext_unit_dict)
# read dataset 5
if model.verbose:
print(" loading swt dataset 5")
- sgm = Util2d.load(
- f, model, (nrow, ncol), np.float32, "sgm", ext_unit_dict
- )
+ sgm = Util2d.load(f, model, (nrow, ncol), np.float32, "sgm", ext_unit_dict)
# read dataset 6
if model.verbose:
print(" loading swt dataset 6")
- sgs = Util2d.load(
- f, model, (nrow, ncol), np.float32, "sgs", ext_unit_dict
- )
+ sgs = Util2d.load(f, model, (nrow, ncol), np.float32, "sgs", ext_unit_dict)
# read datasets 7 to 13
thick = [0] * nsystm
@@ -697,12 +680,7 @@ def load(cls, f, model, ext_unit_dict=None):
if model.verbose:
print(f" loading swt dataset 7 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"thick layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"thick layer {kk}", ext_unit_dict
)
thick[k] = t
if icrcc != 0:
@@ -710,24 +688,14 @@ def load(cls, f, model, ext_unit_dict=None):
if model.verbose:
print(f" loading swt dataset 8 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"sse layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"sse layer {kk}", ext_unit_dict
)
sse[k] = t
# ssv
if model.verbose:
print(f" loading swt dataset 9 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"sse layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"sse layer {kk}", ext_unit_dict
)
ssv[k] = t
else:
@@ -735,48 +703,28 @@ def load(cls, f, model, ext_unit_dict=None):
if model.verbose:
print(f" loading swt dataset 10 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"cr layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"cr layer {kk}", ext_unit_dict
)
cr[k] = t
# cc
if model.verbose:
print(f" loading swt dataset 11 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"cc layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"cc layer {kk}", ext_unit_dict
)
cc[k] = t
# void
if model.verbose:
print(f" loading swt dataset 12 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"void layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"void layer {kk}", ext_unit_dict
)
void[k] = t
# sub
if model.verbose:
print(f" loading swt dataset 13 for layer {kk}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- f"sub layer {kk}",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, f"sub layer {kk}", ext_unit_dict
)
sub[k] = t
@@ -842,9 +790,7 @@ def load(cls, f, model, ext_unit_dict=None):
ext_unit_dict, filetype=ModflowSwt._ftype()
)
if ipakcb > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
if iswtoc > 0:
ipos = 2
diff --git a/flopy/modflow/mfupw.py b/flopy/modflow/mfupw.py
index 35da223f3b..3722b1f9ad 100644
--- a/flopy/modflow/mfupw.py
+++ b/flopy/modflow/mfupw.py
@@ -161,8 +161,7 @@ def __init__(
):
if model.version != "mfnwt":
raise Exception(
- "Error: model version must be mfnwt to use "
- f"{self._ftype()} package"
+ f"Error: model version must be mfnwt to use {self._ftype()} package"
)
# set default unit number of one is not specified
@@ -275,11 +274,7 @@ def write_file(self, check=True, f=None):
"""
# allows turning off package checks when writing files at model level
if check:
- self.check(
- f=f"{self.name[0]}.chk",
- verbose=self.parent.verbose,
- level=1,
- )
+ self.check(f=f"{self.name[0]}.chk", verbose=self.parent.verbose, level=1)
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
if f is not None:
f_upw = f
@@ -363,8 +358,8 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if model.version != "mfnwt":
print(
- "Warning: model version was reset from '{}' to 'mfnwt' "
- "in order to load a UPW file".format(model.version)
+ f"Warning: model version was reset from '{model.version}' "
+ "to 'mfnwt' in order to load a UPW file"
)
model.version = "mfnwt"
@@ -384,12 +379,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if model.verbose:
print(" loading ipakcb, HDRY, NPUPW, IPHDRY...")
t = line_parse(line)
- ipakcb, hdry, npupw, iphdry = (
- int(t[0]),
- float(t[1]),
- int(t[2]),
- int(t[3]),
- )
+ ipakcb, hdry, npupw, iphdry = (int(t[0]), float(t[1]), int(t[2]), int(t[3]))
# options
noparcheck = False
@@ -452,9 +442,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if model.verbose:
print(f" loading hk layer {k + 1:3d}...")
if "hk" not in par_types:
- t = Util2d.load(
- f, model, (nrow, ncol), np.float32, "hk", ext_unit_dict
- )
+ t = Util2d.load(f, model, (nrow, ncol), np.float32, "hk", ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(
@@ -468,12 +456,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
print(f" loading hani layer {k + 1:3d}...")
if "hani" not in par_types:
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- "hani",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, "hani", ext_unit_dict
)
else:
line = f.readline()
@@ -489,9 +472,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
if layvka[k] != 0:
key = "vani"
if "vk" not in par_types and "vani" not in par_types:
- t = Util2d.load(
- f, model, (nrow, ncol), np.float32, key, ext_unit_dict
- )
+ t = Util2d.load(f, model, (nrow, ncol), np.float32, key, ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(
@@ -521,12 +502,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
print(f" loading sy layer {k + 1:3d}...")
if "sy" not in par_types:
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- "sy",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, "sy", ext_unit_dict
)
else:
line = f.readline()
@@ -541,12 +517,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
print(f" loading vkcb layer {k + 1:3d}...")
if "vkcb" not in par_types:
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- "vkcb",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, "vkcb", ext_unit_dict
)
else:
line = f.readline()
@@ -566,9 +537,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
ext_unit_dict, filetype=ModflowUpw._ftype()
)
if ipakcb > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
# create upw object
@@ -593,11 +562,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
filenames=filenames,
)
if check:
- upw.check(
- f=f"{upw.name[0]}.chk",
- verbose=upw.parent.verbose,
- level=0,
- )
+ upw.check(f=f"{upw.name[0]}.chk", verbose=upw.parent.verbose, level=0)
# return upw object
return upw
diff --git a/flopy/modflow/mfuzf1.py b/flopy/modflow/mfuzf1.py
index 5acc699eb3..bb03bd462e 100644
--- a/flopy/modflow/mfuzf1.py
+++ b/flopy/modflow/mfuzf1.py
@@ -302,41 +302,31 @@ class ModflowUzf1(Package):
"""
- _options = dict(
- [
- ("specifythtr", OptionBlock.simple_flag),
- ("specifythti", OptionBlock.simple_flag),
- ("nosurfleak", OptionBlock.simple_flag),
- ("specifysurfk", OptionBlock.simple_flag),
- ("rejectsurfk", OptionBlock.simple_flag),
- ("seepsurfk", OptionBlock.simple_flag),
- ("capillaryuzet", OptionBlock.simple_flag),
- (
- "etsquare",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 1,
- OptionBlock.vars: {"smoothfact": OptionBlock.simple_float},
- },
- ),
- (
- "netflux",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 2,
- OptionBlock.vars: dict(
- [
- ("unitrech", OptionBlock.simple_int),
- ("unitdis", OptionBlock.simple_int),
- ]
- ),
- },
- ),
- ("savefinf", OptionBlock.simple_flag),
- ]
- )
+ _options = {
+ "specifythtr": OptionBlock.simple_flag,
+ "specifythti": OptionBlock.simple_flag,
+ "nosurfleak": OptionBlock.simple_flag,
+ "specifysurfk": OptionBlock.simple_flag,
+ "rejectsurfk": OptionBlock.simple_flag,
+ "seepsurfk": OptionBlock.simple_flag,
+ "capillaryuzet": OptionBlock.simple_flag,
+ "etsquare": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 1,
+ OptionBlock.vars: {"smoothfact": OptionBlock.simple_float},
+ },
+ "netflux": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 2,
+ OptionBlock.vars: {
+ "unitrech": OptionBlock.simple_int,
+ "unitdis": OptionBlock.simple_int,
+ },
+ },
+ "savefinf": OptionBlock.simple_flag,
+ }
def __init__(
self,
@@ -521,9 +511,7 @@ def __init__(
# Data Set 2
# IUZFBND (NCOL, NROW) -- U2DINT
- self.iuzfbnd = Util2d(
- model, (nrow, ncol), np.int32, iuzfbnd, name="iuzfbnd"
- )
+ self.iuzfbnd = Util2d(model, (nrow, ncol), np.int32, iuzfbnd, name="iuzfbnd")
# If IRUNFLG > 0: Read item 3
# Data Set 3
@@ -540,9 +528,7 @@ def __init__(
self.vks = Util2d(model, (nrow, ncol), np.float32, vks, name="vks")
if seepsurfk or specifysurfk:
- self.surfk = Util2d(
- model, (nrow, ncol), np.float32, surfk, name="surfk"
- )
+ self.surfk = Util2d(model, (nrow, ncol), np.float32, surfk, name="surfk")
if iuzfopt > 0:
# Data Set 5
@@ -550,20 +536,14 @@ def __init__(
self.eps = Util2d(model, (nrow, ncol), np.float32, eps, name="eps")
# Data Set 6a
# THTS (NCOL, NROW) -- U2DREL
- self.thts = Util2d(
- model, (nrow, ncol), np.float32, thts, name="thts"
- )
+ self.thts = Util2d(model, (nrow, ncol), np.float32, thts, name="thts")
# Data Set 6b
# THTS (NCOL, NROW) -- U2DREL
if self.specifythtr > 0:
- self.thtr = Util2d(
- model, (nrow, ncol), np.float32, thtr, name="thtr"
- )
+ self.thtr = Util2d(model, (nrow, ncol), np.float32, thtr, name="thtr")
# Data Set 7
# [THTI (NCOL, NROW)] -- U2DREL
- self.thti = Util2d(
- model, (nrow, ncol), np.float32, thti, name="thti"
- )
+ self.thti = Util2d(model, (nrow, ncol), np.float32, thti, name="thti")
# Data Set 8
# {IFTUNIT: [IUZROW, IUZCOL, IUZOPT]}
@@ -572,15 +552,11 @@ def __init__(
# Dataset 9, 11, 13 and 15 will be written automatically in the
# write_file function
# Data Set 10
- # [FINF (NCOL, NROW)] – U2DREL
+ # [FINF (NCOL, NROW)] - U2DREL
- self.finf = Transient2d(
- model, (nrow, ncol), np.float32, finf, name="finf"
- )
+ self.finf = Transient2d(model, (nrow, ncol), np.float32, finf, name="finf")
if ietflg > 0:
- self.pet = Transient2d(
- model, (nrow, ncol), np.float32, pet, name="pet"
- )
+ self.pet = Transient2d(model, (nrow, ncol), np.float32, pet, name="pet")
self.extdp = Transient2d(
model, (nrow, ncol), np.float32, extdp, name="extdp"
)
@@ -696,10 +672,7 @@ def write_file(self, f=None):
f_uzf.write(f"{self.heading}\n")
# Dataset 1a
- if (
- isinstance(self.options, OptionBlock)
- and self.parent.version == "mfnwt"
- ):
+ if isinstance(self.options, OptionBlock) and self.parent.version == "mfnwt":
self.options.update_from_package(self)
self.options.write_options(f_uzf)
@@ -708,7 +681,9 @@ def write_file(self, f=None):
# Dataset 1b
if self.iuzfopt > 0:
- comment = " #NUZTOP IUZFOPT IRUNFLG IETFLG ipakcb IUZFCB2 NTRAIL NSETS NUZGAGES"
+ comment = (
+ " #NUZTOP IUZFOPT IRUNFLG IETFLG ipakcb IUZFCB2 NTRAIL NSETS NUZGAGES"
+ )
f_uzf.write(
"{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:15.6E}{:100s}\n".format(
self.nuztop,
@@ -765,10 +740,7 @@ def write_file(self, f=None):
f_uzf.write(self.thtr.get_file_entry())
# Data Set 7
# [THTI (NCOL, NROW)] -- U2DREL
- if (
- not self.parent.get_package("DIS").steady[0]
- or self.specifythti > 0.0
- ):
+ if not self.parent.get_package("DIS").steady[0] or self.specifythti > 0.0:
f_uzf.write(self.thti.get_file_entry())
# If NUZGAG>0: Item 8 is repeated NUZGAG times
# Data Set 8
@@ -803,11 +775,7 @@ def write_transient(name):
write_transient("extdp")
if self.iuzfopt > 0:
write_transient("extwc")
- if (
- self.capillaryuzet
- and "nwt" in self.parent.version
- and self.iuzfopt > 0
- ):
+ if self.capillaryuzet and "nwt" in self.parent.version and self.iuzfopt > 0:
write_transient("air_entry")
write_transient("hroot")
write_transient("rootact")
diff --git a/flopy/modflow/mfwel.py b/flopy/modflow/mfwel.py
index e211c949a8..86c815c373 100644
--- a/flopy/modflow/mfwel.py
+++ b/flopy/modflow/mfwel.py
@@ -110,34 +110,22 @@ class ModflowWel(Package):
"""
- _options = dict(
- [
- (
- "specify",
- {
- OptionBlock.dtype: np.bool_,
- OptionBlock.nested: True,
- OptionBlock.n_nested: 2,
- OptionBlock.vars: dict(
- [
- ("phiramp", OptionBlock.simple_float),
- (
- "iunitramp",
- dict(
- [
- (OptionBlock.dtype, int),
- (OptionBlock.nested, False),
- (OptionBlock.optional, True),
- ]
- ),
- ),
- ]
- ),
+ _options = {
+ "specify": {
+ OptionBlock.dtype: np.bool_,
+ OptionBlock.nested: True,
+ OptionBlock.n_nested: 2,
+ OptionBlock.vars: {
+ "phiramp": OptionBlock.simple_float,
+ "iunitramp": {
+ OptionBlock.dtype: int,
+ OptionBlock.nested: False,
+ OptionBlock.optional: True,
},
- ),
- ("tabfiles", OptionBlock.simple_tabfile),
- ]
- )
+ },
+ },
+ "tabfiles": OptionBlock.simple_tabfile,
+ }
def __init__(
self,
@@ -205,9 +193,7 @@ def __init__(
if dtype is not None:
self.dtype = dtype
else:
- self.dtype = self.get_default_dtype(
- structured=self.parent.structured
- )
+ self.dtype = self.get_default_dtype(structured=self.parent.structured)
# determine if any aux variables in dtype
dt = self.get_default_dtype(structured=self.parent.structured)
@@ -228,9 +214,7 @@ def __init__(
self.options = options
# initialize MfList
- self.stress_period_data = MfList(
- self, stress_period_data, binary=binary
- )
+ self.stress_period_data = MfList(self, stress_period_data, binary=binary)
if add_package:
self.parent.add_package(self)
@@ -271,10 +255,7 @@ def write_file(self, f=None):
f_wel.write(f"{self.heading}\n")
- if (
- isinstance(self.options, OptionBlock)
- and self.parent.version == "mfnwt"
- ):
+ if isinstance(self.options, OptionBlock) and self.parent.version == "mfnwt":
self.options.update_from_package(self)
if self.options.block:
self.options.write_options(f_wel)
@@ -285,9 +266,7 @@ def write_file(self, f=None):
if self.options.noprint:
line += "NOPRINT "
if self.options.auxiliary:
- line += " ".join(
- [str(aux).upper() for aux in self.options.auxiliary]
- )
+ line += " ".join([str(aux).upper() for aux in self.options.auxiliary])
else:
for opt in self.options:
@@ -296,10 +275,7 @@ def write_file(self, f=None):
line += "\n"
f_wel.write(line)
- if (
- isinstance(self.options, OptionBlock)
- and self.parent.version == "mfnwt"
- ):
+ if isinstance(self.options, OptionBlock) and self.parent.version == "mfnwt":
if not self.options.block:
if isinstance(self.options.specify, np.ndarray):
self.options.tabfiles = False
@@ -307,9 +283,7 @@ def write_file(self, f=None):
else:
if self.specify and self.parent.version == "mfnwt":
- f_wel.write(
- f"SPECIFY {self.phiramp:10.5g} {self.iunitramp:10d}\n"
- )
+ f_wel.write(f"SPECIFY {self.phiramp:10.5g} {self.iunitramp:10d}\n")
self.stress_period_data.write_transient(f_wel)
f_wel.close()
diff --git a/flopy/modflow/mfzon.py b/flopy/modflow/mfzon.py
index ca63a8efdd..b55b397d1a 100644
--- a/flopy/modflow/mfzon.py
+++ b/flopy/modflow/mfzon.py
@@ -177,9 +177,7 @@ def load(cls, f, model, nrow=None, ncol=None, ext_unit_dict=None):
if model.verbose:
print(f' reading data for "{zonnam:<10s}" zone')
# load data
- t = Util2d.load(
- f, model, (nrow, ncol), np.int32, zonnam, ext_unit_dict
- )
+ t = Util2d.load(f, model, (nrow, ncol), np.int32, zonnam, ext_unit_dict)
# add unit number to list of external files in ext_unit_dict
# to remove.
if t.locat is not None:
@@ -198,10 +196,7 @@ def load(cls, f, model, nrow=None, ncol=None, ext_unit_dict=None):
)
return cls(
- model,
- zone_dict=zone_dict,
- unitnumber=unitnumber,
- filenames=filenames,
+ model, zone_dict=zone_dict, unitnumber=unitnumber, filenames=filenames
)
@staticmethod
diff --git a/flopy/modflowlgr/mflgr.py b/flopy/modflowlgr/mflgr.py
index df7c3c0ef9..5f3dff655e 100644
--- a/flopy/modflowlgr/mflgr.py
+++ b/flopy/modflowlgr/mflgr.py
@@ -249,10 +249,7 @@ def _get_path(self, bpth, pth, fpth=""):
rpth = fpth
else:
rpth = os.path.join(rpth, fpth)
- msg = (
- "namefiles must be in the same directory as "
- "the lgr control file\n"
- )
+ msg = "namefiles must be in the same directory as the lgr control file\n"
msg += f"Control file path: {lpth}\n"
msg += f"Namefile path: {mpth}\n"
msg += f"Relative path: {rpth}\n"
@@ -321,9 +318,7 @@ def write_name_file(self):
zip(self.children_models, self.children_data)
):
# dataset 6
- pth = self._get_path(
- self._model_ws, child._model_ws, fpth=child.namefile
- )
+ pth = self._get_path(self._model_ws, child._model_ws, fpth=child.namefile)
comment = f"data set 6 - child {idx + 1} namefile"
line = self._padline(pth, comment=comment)
f.write(line)
@@ -340,9 +335,7 @@ def write_name_file(self):
child_data.iucbhsv,
child_data.iucbfsv,
)
- comment = (
- f"data set 8 - child {idx + 1} ishflg, ibflg, iucbhsv, iucbfsv"
- )
+ comment = f"data set 8 - child {idx + 1} ishflg, ibflg, iucbhsv, iucbfsv"
line = self._padline(line, comment=comment)
f.write(line)
@@ -366,9 +359,7 @@ def write_name_file(self):
# dataset 12
line = "{} {} {}".format(
- child_data.nplbeg + 1,
- child_data.nprbeg + 1,
- child_data.npcbeg + 1,
+ child_data.nplbeg + 1, child_data.nprbeg + 1, child_data.npcbeg + 1
)
comment = f"data set 12 - child {idx + 1} nplbeg, nprbeg, npcbeg"
line = self._padline(line, comment=comment)
@@ -376,9 +367,7 @@ def write_name_file(self):
# dataset 13
line = "{} {} {}".format(
- child_data.nplend + 1,
- child_data.nprend + 1,
- child_data.npcend + 1,
+ child_data.nplend + 1, child_data.nprend + 1, child_data.npcend + 1
)
comment = f"data set 13 - child {idx + 1} nplend, nprend, npcend"
line = self._padline(line, comment=comment)
@@ -429,8 +418,9 @@ def change_model_ws(self, new_pth=None, reset_external=False):
not_valid = new_pth
new_pth = os.getcwd()
print(
- "\n{} not valid, workspace-folder was changed to {}"
- "\n".format(not_valid, new_pth)
+ "\n{} not valid, workspace-folder was changed to {}\n".format(
+ not_valid, new_pth
+ )
)
# --reset the model workspace
old_pth = self._model_ws
@@ -446,9 +436,7 @@ def change_model_ws(self, new_pth=None, reset_external=False):
npth = new_pth
else:
npth = os.path.join(new_pth, rpth)
- self.parent.change_model_ws(
- new_pth=npth, reset_external=reset_external
- )
+ self.parent.change_model_ws(new_pth=npth, reset_external=reset_external)
# reset model_ws for the children
for child in self.children_models:
lpth = os.path.abspath(old_pth)
@@ -619,20 +607,12 @@ def load(
# dataset 12
line = f.readline()
t = line.split()
- nplbeg, nprbeg, npcbeg = (
- int(t[0]) - 1,
- int(t[1]) - 1,
- int(t[2]) - 1,
- )
+ nplbeg, nprbeg, npcbeg = (int(t[0]) - 1, int(t[1]) - 1, int(t[2]) - 1)
# dataset 13
line = f.readline()
t = line.split()
- nplend, nprend, npcend = (
- int(t[0]) - 1,
- int(t[1]) - 1,
- int(t[2]) - 1,
- )
+ nplend, nprend, npcend = (int(t[0]) - 1, int(t[1]) - 1, int(t[2]) - 1)
# dataset 14
line = f.readline()
diff --git a/flopy/modpath/mp6.py b/flopy/modpath/mp6.py
index 7f8b04d663..33f70d1939 100644
--- a/flopy/modpath/mp6.py
+++ b/flopy/modpath/mp6.py
@@ -102,9 +102,7 @@ def __init__(
# ensure that user-specified files are used
iu = self.__mf.oc.iuhead
head_file = (
- self.__mf.get_output(unit=iu)
- if head_file is None
- else head_file
+ self.__mf.get_output(unit=iu) if head_file is None else head_file
)
p = self.__mf.get_package("LPF")
if p is None:
@@ -118,13 +116,9 @@ def __init__(
)
iu = p.ipakcb
budget_file = (
- self.__mf.get_output(unit=iu)
- if budget_file is None
- else budget_file
- )
- dis_file = (
- self.__mf.dis.file_name[0] if dis_file is None else dis_file
+ self.__mf.get_output(unit=iu) if budget_file is None else budget_file
)
+ dis_file = self.__mf.dis.file_name[0] if dis_file is None else dis_file
dis_unit = self.__mf.dis.unit_number[0]
nper = self.__mf.dis.nper
@@ -158,7 +152,8 @@ def __init__(
)
if self.__mf is None:
- # read from nper, lay, nrow, ncol from dis file, Item 1: NLAY, NROW, NCOL, NPER, ITMUNI, LENUNI
+ # read from nper, lay, nrow, ncol from dis file,
+ # Item 1: NLAY, NROW, NCOL, NPER, ITMUNI, LENUNI
read_dis = dis_file
if not os.path.exists(read_dis):
# path doesn't exist, probably relative to model_ws
@@ -168,12 +163,7 @@ def __init__(
while line[0] == "#":
line = f.readline()
nlay, nrow, ncol, nper, itmuni, lennuni = line.split()
- self.nrow_ncol_nlay_nper = (
- int(nrow),
- int(ncol),
- int(nlay),
- int(nper),
- )
+ self.nrow_ncol_nlay_nper = (int(nrow), int(ncol), int(nlay), int(nper))
# set the rest of the attributes
self.__sim = None
@@ -187,9 +177,7 @@ def __init__(
self.load = load
self.__next_ext_unit = 500
if external_path is not None:
- assert os.path.exists(
- external_path
- ), "external_path does not exist"
+ assert os.path.exists(external_path), "external_path does not exist"
self.external = True
def __repr__(self):
@@ -271,14 +259,17 @@ def create_mpsim(
(default is 'WEL').
start_time : float or tuple
Sets the value of MODPATH reference time relative to MODFLOW time.
- float : value of MODFLOW simulation time at which to start the particle tracking simulation.
+ float : value of MODFLOW simulation time at which to start the
+ particle tracking simulation.
Sets the value of MODPATH ReferenceTimeOption to 1.
- tuple : (period, step, time fraction) MODFLOW stress period, time step and fraction
+ tuple : (period, step, time fraction) MODFLOW stress period,
+ time step and fraction
between 0 and 1 at which to start the particle tracking simulation.
Sets the value of MODPATH ReferenceTimeOption to 2.
default_ifaces : list
- List of cell faces (1-6; see MODPATH6 manual, fig. 7) on which to start particles.
- (default is None, meaning ifaces will vary depending on packages argument above)
+ List of cell faces (1-6; see MODPATH6 manual, fig. 7) on which to
+ start particles. (default is None, meaning ifaces will vary
+ depending on packages argument above)
ParticleRowCount : int
Rows of particles to start on each cell index face (iface).
ParticleColumnCount : int
@@ -305,7 +296,8 @@ def create_mpsim(
ref_time = 0
ref_time_per_stp = (0, 0, 1.0)
if isinstance(start_time, tuple):
- ReferenceTimeOption = 2 # 1: specify value for ref. time, 2: specify kper, kstp, rel. time pos
+ # 1: specify value for ref. time, 2: specify kper, kstp, rel. time pos
+ ReferenceTimeOption = 2
ref_time_per_stp = start_time
else:
ref_time = start_time
@@ -342,9 +334,7 @@ def create_mpsim(
if package.upper() == "WEL":
ParticleGenerationOption = 1
if "WEL" not in pak_list:
- raise Exception(
- "Error: no well package in the passed model"
- )
+ raise Exception("Error: no well package in the passed model")
for kper in range(nper):
mflist = self.__mf.wel.stress_period_data[kper]
idx = (mflist["k"], mflist["i"], mflist["j"])
@@ -369,9 +359,7 @@ def create_mpsim(
)
group_region.append([k, i, j, k, i, j])
if default_ifaces is None:
- ifaces.append(
- side_faces + [top_face, botm_face]
- )
+ ifaces.append(side_faces + [top_face, botm_face])
face_ct.append(6)
else:
ifaces.append(default_ifaces)
@@ -381,9 +369,7 @@ def create_mpsim(
elif "MNW" in package.upper():
ParticleGenerationOption = 1
if "MNW2" not in pak_list:
- raise Exception(
- "Error: no MNW2 package in the passed model"
- )
+ raise Exception("Error: no MNW2 package in the passed model")
node_data = self.__mf.mnw2.get_allnode_data()
node_data.sort(order=["wellid", "k"])
wellids = np.unique(node_data.wellid)
@@ -414,27 +400,15 @@ def append_node(ifaces_well, wellid, node_number, k, i, j):
k, i, j = nd.k[0], nd.i[0], nd.j[0]
if len(nd) == 1:
append_node(
- side_faces + [top_face, botm_face],
- wellid,
- 0,
- k,
- i,
- j,
+ side_faces + [top_face, botm_face], wellid, 0, k, i, j
)
else:
- append_node(
- side_faces + [top_face], wellid, 0, k, i, j
- )
+ append_node(side_faces + [top_face], wellid, 0, k, i, j)
for n in range(len(nd))[1:]:
k, i, j = nd.k[n], nd.i[n], nd.j[n]
if n == len(nd) - 1:
append_node(
- side_faces + [botm_face],
- wellid,
- n,
- k,
- i,
- j,
+ side_faces + [botm_face], wellid, n, k, i, j
)
else:
append_node(side_faces, wellid, n, k, i, j)
@@ -464,9 +438,7 @@ def append_node(ifaces_well, wellid, node_number, k, i, j):
if self.__mf is not None:
model_ws = self.__mf.model_ws
if os.path.exists(os.path.join(model_ws, package)):
- print(
- "detected a particle starting locations file in packages"
- )
+ print("detected a particle starting locations file in packages")
assert len(packages) == 1, (
"if a particle starting locations file is passed, "
"other packages cannot be specified"
diff --git a/flopy/modpath/mp6bas.py b/flopy/modpath/mp6bas.py
index 141e8397e1..9a50922d84 100644
--- a/flopy/modpath/mp6bas.py
+++ b/flopy/modpath/mp6bas.py
@@ -28,13 +28,14 @@ class Modpath6Bas(Package):
hdry : float
Head value assigned to dry cells (default is -8888.).
def_face_ct : int
- Number fo default iface codes to read (default is 0).
+ Number of default iface codes to read (default is 0).
bud_label : str or list of strs
MODFLOW budget item to which a default iface is assigned.
def_iface : int or list of ints
Cell face (iface) on which to assign flows from MODFLOW budget file.
laytyp : None, int or list of ints
- MODFLOW layer type (0 is convertible, 1 is confined). If None, read from modflow model
+ MODFLOW layer type (0 is convertible, 1 is confined).
+ If None, read from modflow model
ibound : None or array of ints, optional
The ibound array (the default is 1). If None, pull from parent modflow model
prsity : array of ints, optional
@@ -181,9 +182,7 @@ def _create_ltype(self, laytyp):
else: # no user passed layertype
have_layertype = False
if self.parent.getmf() is None:
- raise ValueError(
- "if modflowmodel is None then laytype must be passed"
- )
+ raise ValueError("if modflowmodel is None then laytype must be passed")
# run though flow packages
flow_package = self.parent.getmf().get_package("BCF6")
diff --git a/flopy/modpath/mp6sim.py b/flopy/modpath/mp6sim.py
index 1cf465958a..8ef2895b45 100644
--- a/flopy/modpath/mp6sim.py
+++ b/flopy/modpath/mp6sim.py
@@ -204,7 +204,8 @@ def write_file(self):
None
"""
- # item numbers and CamelCase variable names correspond to Modpath 6 documentation
+ # item numbers and CamelCase variable names correspond to
+ # Modpath 6 documentation
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
f_sim = open(self.fn_path, "w")
@@ -273,23 +274,13 @@ def write_file(self):
)
# item 14
if ReleaseOption == 2:
- (
- ReleasePeriodLength,
- ReleaseEventCount,
- ) = self.release_times[i]
- f_sim.write(
- f"{ReleasePeriodLength:f} {ReleaseEventCount}\n"
- )
+ (ReleasePeriodLength, ReleaseEventCount) = self.release_times[i]
+ f_sim.write(f"{ReleasePeriodLength:f} {ReleaseEventCount}\n")
# item 15
if GridCellRegionOption == 1:
- (
- MinLayer,
- MinRow,
- MinColumn,
- MaxLayer,
- MaxRow,
- MaxColumn,
- ) = self.group_region[i]
+ (MinLayer, MinRow, MinColumn, MaxLayer, MaxRow, MaxColumn) = (
+ self.group_region[i]
+ )
f_sim.write(
"{:d} {:d} {:d} {:d} {:d} {:d}\n".format(
MinLayer + 1,
@@ -313,26 +304,18 @@ def write_file(self):
f_sim.write(f"{self.face_ct[i]}\n")
# item 20
for j in range(self.face_ct[i]):
- (
- IFace,
- ParticleRowCount,
- ParticleColumnCount,
- ) = self.ifaces[i][j]
+ IFace, ParticleRowCount, ParticleColumnCount = self.ifaces[i][j]
f_sim.write(
f"{IFace} {ParticleRowCount} {ParticleColumnCount}\n"
)
# item 21
elif PlacementOption == 2:
- (
- ParticleLayerCount,
- ParticleRowCount,
- ParticleColumnCount,
- ) = self.particle_cell_cnt[i]
+ (ParticleLayerCount, ParticleRowCount, ParticleColumnCount) = (
+ self.particle_cell_cnt[i]
+ )
f_sim.write(
"{:d} {:d} {:d} \n".format(
- ParticleLayerCount,
- ParticleRowCount,
- ParticleColumnCount,
+ ParticleLayerCount, ParticleRowCount, ParticleColumnCount
)
)
@@ -365,9 +348,7 @@ def write_file(self):
# item 27
for k in range(self.cell_bd_ct):
Grid, Layer, Row, Column = self.bud_loc[k]
- f_sim.write(
- f"{Grid} {Layer + 1} {Row + 1} {Column + 1} \n"
- )
+ f_sim.write(f"{Grid} {Layer + 1} {Row + 1} {Column + 1} \n")
if self.options_dict["BudgetOutputOption"] == 4:
# item 28
f_sim.write(f"{self.trace_file}\n")
@@ -399,7 +380,8 @@ class StartingLocationsFile(Package):
The model object (of type :class:`flopy.modpath.mp.Modpath`) to which
this package will be added.
inputstyle : 1
- Input style described in MODPATH6 manual (currently only input style 1 is supported)
+ Input style described in MODPATH6 manual
+ (currently only input style 1 is supported)
extension : string
Filename extension (default is 'loc')
use_pandas: bool, default True
@@ -418,9 +400,7 @@ def __init__(
self.model = model
self.use_pandas = use_pandas
- self.heading = (
- "# Starting locations file for Modpath, generated by Flopy."
- )
+ self.heading = "# Starting locations file for Modpath, generated by Flopy."
self.input_style = inputstyle
if inputstyle != 1:
raise NotImplementedError
@@ -462,7 +442,8 @@ def get_empty_starting_locations_data(
Parameters
----------
npt : int
- Number of particles. Particles in array will be numbered consecutively from 1 to npt.
+ Number of particles.
+ Particles in array will be numbered consecutively from 1 to npt.
"""
dtype = StartingLocationsFile.get_dtypes()
@@ -500,11 +481,9 @@ def _write_particle_data_with_pandas(self, data, float_format):
'initialtime', 'label']
:param save_group_mapper bool, if true, save a groupnumber to group name mapper as well.
:return:
- """
+ """ # noqa
# convert float format string to pandas float format
- float_format = (
- float_format.replace("{", "").replace("}", "").replace(":", "%")
- )
+ float_format = float_format.replace("{", "").replace("}", "").replace(":", "%")
data = pd.DataFrame(data)
if len(data) == 0:
return
@@ -515,12 +494,11 @@ def _write_particle_data_with_pandas(self, data, float_format):
data.loc[:, "groupname"] = data.groupname.str.decode("UTF-8")
# write loc file with pandas to save time
- # simple speed test writing particles with flopy and running model took 30 min, writing with pandas took __min
+ # simple speed test writing particles with flopy and running model took 30 min,
+ # writing with pandas took __min
loc_path = self.fn_path
# write groups
- group_dict = dict(
- data[["particlegroup", "groupname"]].itertuples(False, None)
- )
+ group_dict = dict(data[["particlegroup", "groupname"]].itertuples(False, None))
# writing group loc data
groups = (
@@ -530,13 +508,9 @@ def _write_particle_data_with_pandas(self, data, float_format):
.reset_index()
.rename(columns={"groupname": "count"})
)
- groups.loc[:, "groupname"] = groups.loc[:, "particlegroup"].replace(
- group_dict
- )
+ groups.loc[:, "groupname"] = groups.loc[:, "particlegroup"].replace(group_dict)
group_count = len(groups.index)
- groups = pd.Series(
- groups[["groupname", "count"]].astype(str).values.flatten()
- )
+ groups = pd.Series(groups[["groupname", "count"]].astype(str).values.flatten())
with open(loc_path, "w") as f:
f.write(f"{self.heading}\n")
f.write(f"{self.input_style:d}\n")
diff --git a/flopy/modpath/mp7.py b/flopy/modpath/mp7.py
index 75d4a0ce81..67fda5a1a3 100644
--- a/flopy/modpath/mp7.py
+++ b/flopy/modpath/mp7.py
@@ -110,26 +110,21 @@ def __init__(
raise TypeError(
"Modpath7: flow model is not an instance of "
"flopy.modflow.Modflow or flopy.mf6.MFModel. "
- "Passed object of type {}".format(type(flowmodel))
+ f"Passed object of type {type(flowmodel)}"
)
# if a MFModel instance ensure flowmodel is a MODFLOW 6 GWF model
if isinstance(flowmodel, MFModel):
- if (
- flowmodel.model_type != "gwf"
- and flowmodel.model_type != "gwf6"
- ):
+ if flowmodel.model_type != "gwf" and flowmodel.model_type != "gwf6":
raise TypeError(
"Modpath7: flow model type must be gwf. "
- "Passed model_type is {}.".format(flowmodel.model_type)
+ f"Passed model_type is {flowmodel.model_type}."
)
# set flowmodel and flow_version attributes
self.flowmodel = flowmodel
self.flow_version = self.flowmodel.version
- self._flowmodel_ws = os.path.relpath(
- flowmodel.model_ws, self._model_ws
- )
+ self._flowmodel_ws = os.path.relpath(flowmodel.model_ws, self._model_ws)
if self.flow_version == "mf6":
# get discretization package
@@ -142,20 +137,14 @@ def __init__(
)
else:
if dis.package_name.lower() == "dis":
- nlay, nrow, ncol = (
- dis.nlay.array,
- dis.nrow.array,
- dis.ncol.array,
- )
+ nlay, nrow, ncol = (dis.nlay.array, dis.nrow.array, dis.ncol.array)
shape = (nlay, nrow, ncol)
elif dis.package_name.lower() == "disv":
nlay, ncpl = dis.nlay.array, dis.ncpl.array
shape = (nlay, ncpl)
elif dis.package_name.lower() == "disu":
nodes = dis.nodes.array
- shape = tuple(
- nodes,
- )
+ shape = tuple(nodes)
else:
raise TypeError(
"DIS, DISV, or DISU packages must be "
@@ -184,8 +173,7 @@ def __init__(
tdis = self.flowmodel.simulation.get_package("TDIS")
if tdis is None:
raise Exception(
- "TDIS package must be "
- "included in the passed MODFLOW 6 model"
+ "TDIS package must be included in the passed MODFLOW 6 model"
)
tdis_file = tdis.filename
@@ -209,9 +197,7 @@ def __init__(
# set budget file name
if budgetfilename is None:
- budgetfilename = oc.budget_filerecord.array["budgetfile"][
- 0
- ]
+ budgetfilename = oc.budget_filerecord.array["budgetfile"][0]
else:
shape = None
# extract data from DIS or DISU files and set shape
@@ -339,10 +325,7 @@ def __repr__(self):
def laytyp(self):
if self.flowmodel.version == "mf6":
icelltype = self.flowmodel.npf.icelltype.array
- laytyp = [
- icelltype[k].max()
- for k in range(self.flowmodel.modelgrid.nlay)
- ]
+ laytyp = [icelltype[k].max() for k in range(self.flowmodel.modelgrid.nlay)]
else:
p = self.flowmodel.get_package("BCF6")
if p is None:
@@ -383,12 +366,10 @@ def write_name_file(self):
f.write(f"DIS {self.dis_file}\n")
if self.grbdis_file is not None:
f.write(
- f"{self.grbtag:10s} {os.path.join(self._flowmodel_ws, self.grbdis_file)}\n"
+ f"{self.grbtag:10s} {os.path.join(self._flowmodel_ws, self.grbdis_file)}\n" # noqa
)
if self.tdis_file is not None:
- f.write(
- f"TDIS {os.path.join(self._flowmodel_ws, self.tdis_file)}\n"
- )
+ f.write(f"TDIS {os.path.join(self._flowmodel_ws, self.tdis_file)}\n")
if self.headfilename is not None:
f.write(
f"HEAD {os.path.join(self._flowmodel_ws, self.headfilename)}\n"
@@ -412,6 +393,7 @@ def create_mp7(
rowcelldivisions=2,
layercelldivisions=2,
nodes=None,
+ porosity=0.30,
):
"""
Create a default MODPATH 7 model using a passed flowmodel with
@@ -447,6 +429,8 @@ def create_mp7(
direction (default is 2).
nodes : int, list of ints, tuple of ints, or np.ndarray
Nodes (zero-based) with particles. If (default is node 0).
+ porosity: float or array of floats (nlay, nrow, ncol)
+ The porosity array (the default is 0.30).
Returns
-------
@@ -477,7 +461,7 @@ def create_mp7(
# create MODPATH 7 basic file and add to the MODPATH 7
# model instance (mp)
- Modpath7Bas(mp, defaultiface=defaultiface)
+ Modpath7Bas(mp, porosity=porosity, defaultiface=defaultiface)
# create particles
if nodes is None:
diff --git a/flopy/modpath/mp7bas.py b/flopy/modpath/mp7bas.py
index 8126499d36..583462af65 100644
--- a/flopy/modpath/mp7bas.py
+++ b/flopy/modpath/mp7bas.py
@@ -38,9 +38,7 @@ class Modpath7Bas(Package):
"""
- def __init__(
- self, model, porosity=0.30, defaultiface=None, extension="mpbas"
- ):
+ def __init__(self, model, porosity=0.30, defaultiface=None, extension="mpbas"):
unitnumber = model.next_unit()
super().__init__(model, extension, "MPBAS", unitnumber)
@@ -96,8 +94,8 @@ def __init__(
# check iface value
if value < 0 or value > 6:
raise ValueError(
- "defaultiface for package {} must be between 0 and 1 "
- "({} specified)".format(key, value)
+ f"defaultiface for package {key} must be between 0 and 1 "
+ f"({value} specified)"
)
self.defaultifacecount = defaultifacecount
diff --git a/flopy/modpath/mp7particledata.py b/flopy/modpath/mp7particledata.py
index 472ff19151..a191315c95 100644
--- a/flopy/modpath/mp7particledata.py
+++ b/flopy/modpath/mp7particledata.py
@@ -6,8 +6,8 @@
"""
from collections import namedtuple
+from collections.abc import Iterator
from itertools import product
-from typing import Iterator, Tuple
import numpy as np
import pandas as pd
@@ -153,8 +153,7 @@ def __init__(
)
else:
allint = all(
- isinstance(el, (int, np.int32, np.int64))
- for el in partlocs
+ isinstance(el, (int, np.int32, np.int64)) for el in partlocs
)
# convert to a list of tuples
if allint:
@@ -162,9 +161,7 @@ def __init__(
for el in partlocs:
t.append((el,))
partlocs = t
- alllsttup = all(
- isinstance(el, (list, tuple)) for el in partlocs
- )
+ alllsttup = all(isinstance(el, (list, tuple)) for el in partlocs)
if alllsttup:
alllen1 = all(len(el) == 1 for el in partlocs)
if not alllen1:
@@ -183,9 +180,7 @@ def __init__(
partlocs = np.array(partlocs)
if len(partlocs.shape) == 1:
partlocs = partlocs.reshape(len(partlocs), 1)
- partlocs = unstructured_to_structured(
- np.array(partlocs), dtype=dtype
- )
+ partlocs = unstructured_to_structured(np.array(partlocs), dtype=dtype)
elif isinstance(partlocs, np.ndarray):
# reshape and convert dtype if needed
if len(partlocs.shape) == 1:
@@ -195,7 +190,8 @@ def __init__(
partlocs = unstructured_to_structured(partlocs, dtype=dtype)
else:
raise ValueError(
- f"{self.name}: partlocs must be a list or tuple with lists or tuples, or an ndarray"
+ f"{self.name}: partlocs must be a list or tuple with lists or "
+ "tuples, or an ndarray"
)
# localx
@@ -253,9 +249,7 @@ def __init__(
timeoffset = 0.0
else:
if isinstance(timeoffset, (float, int)):
- timeoffset = (
- np.ones(partlocs.shape[0], dtype=np.float32) * timeoffset
- )
+ timeoffset = np.ones(partlocs.shape[0], dtype=np.float32) * timeoffset
elif isinstance(timeoffset, (list, tuple)):
timeoffset = np.array(timeoffset, dtype=np.float32)
if isinstance(timeoffset, np.ndarray):
@@ -313,9 +307,7 @@ def __init__(
# create empty particle
ncells = partlocs.shape[0]
self.dtype = self._get_dtype(structured, particleid)
- particledata = create_empty_recarray(
- ncells, self.dtype, default_value=0
- )
+ particledata = create_empty_recarray(ncells, self.dtype, default_value=0)
# fill particle
if structured:
@@ -358,12 +350,7 @@ def write(self, f=None):
d = np.recarray.copy(self.particledata.to_records(index=False))
lnames = [name.lower() for name in d.dtype.names]
# Add one to the kij and node indices
- for idx in (
- "k",
- "i",
- "j",
- "node",
- ):
+ for idx in ("k", "i", "j", "node"):
if idx in lnames:
d[idx] += 1
# Add one to the particle id if required
@@ -410,15 +397,13 @@ def cvt_z(p, k, i, j):
span = mx - mn
return mn + span * p
- def convert(row) -> Tuple[float, float, float]:
+ def convert(row) -> tuple[float, float, float]:
verts = grid.get_cell_vertices(row.i, row.j)
xs, ys = list(zip(*verts))
return [
cvt_xy(row.localx, xs),
cvt_xy(row.localy, ys),
- row.localz
- if localz
- else cvt_z(row.localz, row.k, row.i, row.j),
+ row.localz if localz else cvt_z(row.localz, row.k, row.i, row.j),
]
else:
@@ -436,7 +421,7 @@ def cvt_z(p, nn):
span = mx - mn
return mn + span * p
- def convert(row) -> Tuple[float, float, float]:
+ def convert(row) -> tuple[float, float, float]:
verts = grid.get_cell_vertices(row.node)
xs, ys = list(zip(*verts))
return [
@@ -782,26 +767,14 @@ def write(self, f=None):
# item 5
fmt = " {} {} {}\n"
line = fmt.format(
- self.columncelldivisions,
- self.rowcelldivisions,
- self.layercelldivisions,
+ self.columncelldivisions, self.rowcelldivisions, self.layercelldivisions
)
f.write(line)
Extent = namedtuple(
"Extent",
- [
- "minx",
- "maxx",
- "miny",
- "maxy",
- "minz",
- "maxz",
- "xspan",
- "yspan",
- "zspan",
- ],
+ ["minx", "maxx", "miny", "maxy", "minz", "maxz", "xspan", "yspan", "zspan"],
)
@@ -837,7 +810,8 @@ def get_extent(grid, k=None, i=None, j=None, nn=None, localz=False) -> Extent:
)
else:
raise ValueError(
- "A cell (node) must be specified by indices (for structured grids) or node number (for vertex/unstructured)"
+ "A cell (node) must be specified by indices (for structured grids) "
+ "or node number (for vertex/unstructured)"
)
xs, ys = list(zip(*verts))
minx, maxx = min(xs), max(xs)
@@ -848,9 +822,7 @@ def get_extent(grid, k=None, i=None, j=None, nn=None, localz=False) -> Extent:
return Extent(minx, maxx, miny, maxy, minz, maxz, xspan, yspan, zspan)
-def get_face_release_points(
- subdivisiondata, cellid, extent
-) -> Iterator[tuple]:
+def get_face_release_points(subdivisiondata, cellid, extent) -> Iterator[tuple]:
"""
Get release points for MODPATH 7 input style 2, template
subdivision style 1, i.e. face (2D) subdivision, for the
@@ -934,10 +906,7 @@ def get_face_release_points(
yield cellid + [p[0], extent.maxy, p[1]]
# z1 (bottom)
- if (
- subdivisiondata.rowdivisions5 > 0
- and subdivisiondata.columndivisions5 > 0
- ):
+ if subdivisiondata.rowdivisions5 > 0 and subdivisiondata.columndivisions5 > 0:
xincr = extent.xspan / subdivisiondata.columndivisions5
xlocs = [
(extent.minx + (xincr * 0.5) + (xincr * rd))
@@ -952,10 +921,7 @@ def get_face_release_points(
yield cellid + [p[0], p[1], extent.minz]
# z2 (top)
- if (
- subdivisiondata.rowdivisions6 > 0
- and subdivisiondata.columndivisions6 > 0
- ):
+ if subdivisiondata.rowdivisions6 > 0 and subdivisiondata.columndivisions6 > 0:
xincr = extent.xspan / subdivisiondata.columndivisions6
xlocs = [
(extent.minx + (xincr * 0.5) + (xincr * rd))
@@ -970,9 +936,7 @@ def get_face_release_points(
yield cellid + [p[0], p[1], extent.maxz]
-def get_cell_release_points(
- subdivisiondata, cellid, extent
-) -> Iterator[tuple]:
+def get_cell_release_points(subdivisiondata, cellid, extent) -> Iterator[tuple]:
"""
Get release points for MODPATH 7 input style 2, template
subdivision type 2, i.e. cell (3D) subdivision, for the
@@ -1011,7 +975,8 @@ def get_release_points(
if nn is None and (k is None or i is None or j is None):
raise ValueError(
- "A cell (node) must be specified by indices (for structured grids) or node number (for vertex/unstructured)"
+ "A cell (node) must be specified by indices (for structured grids) "
+ "or node number (for vertex/unstructured)"
)
cellid = [k, i, j] if nn is None else [nn]
@@ -1022,9 +987,7 @@ def get_release_points(
elif isinstance(subdivisiondata, CellDataType):
return get_cell_release_points(subdivisiondata, cellid, extent)
else:
- raise ValueError(
- f"Unsupported subdivision data type: {type(subdivisiondata)}"
- )
+ raise ValueError(f"Unsupported subdivision data type: {type(subdivisiondata)}")
class LRCParticleData:
@@ -1150,9 +1113,7 @@ def write(self, f=None):
for sd, region in zip(self.subdivisiondata, self.lrcregions):
# item 3
- f.write(
- f"{sd.templatesubdivisiontype} {region.shape[0]} {sd.drape}\n"
- )
+ f.write(f"{sd.templatesubdivisiontype} {region.shape[0]} {sd.drape}\n")
# item 4 or 5
sd.write(f)
@@ -1215,9 +1176,7 @@ def to_prp(self, grid, localz=False) -> Iterator[tuple]:
"""
if grid.grid_type != "structured":
- raise ValueError(
- "Particle representation is structured but grid is not"
- )
+ raise ValueError("Particle representation is structured but grid is not")
irpt_offset = 0
for region in self.lrcregions:
@@ -1228,9 +1187,7 @@ def to_prp(self, grid, localz=False) -> Iterator[tuple]:
for j in range(minj, maxj + 1):
for sd in self.subdivisiondata:
for irpt, rpt in enumerate(
- get_release_points(
- sd, grid, k, i, j, localz=localz
- )
+ get_release_points(sd, grid, k, i, j, localz=localz)
):
assert rpt[0] == k
assert rpt[1] == i
@@ -1310,8 +1267,7 @@ def __init__(self, subdivisiondata=None, nodes=None):
nodes = nodes.reshape(1, nodes.shape[0])
# convert to a list of numpy arrays
nodes = [
- np.array(nodes[i, :], dtype=np.int32)
- for i in range(nodes.shape[0])
+ np.array(nodes[i, :], dtype=np.int32) for i in range(nodes.shape[0])
]
elif isinstance(nodes, (list, tuple)):
# convert a single list/tuple to a list of tuples if only one
@@ -1320,9 +1276,7 @@ def __init__(self, subdivisiondata=None, nodes=None):
if len(nodes) > 1:
nodes = [tuple(nodes)]
# determine if the list or tuple contains lists or tuples
- alllsttup = all(
- isinstance(el, (list, tuple, np.ndarray)) for el in nodes
- )
+ alllsttup = all(isinstance(el, (list, tuple, np.ndarray)) for el in nodes)
if not alllsttup:
raise TypeError(
"{}: nodes should be "
@@ -1380,9 +1334,7 @@ def write(self, f=None):
for sd, nodes in zip(self.subdivisiondata, self.nodedata):
# item 3
- f.write(
- f"{sd.templatesubdivisiontype} {nodes.shape[0]} {sd.drape}\n"
- )
+ f.write(f"{sd.templatesubdivisiontype} {nodes.shape[0]} {sd.drape}\n")
# item 4 or 5
sd.write(f)
@@ -1417,9 +1369,7 @@ def to_coords(self, grid, localz=False) -> Iterator[tuple]:
for sd in self.subdivisiondata:
for nd in self.nodedata:
- for rpt in get_release_points(
- sd, grid, nn=int(nd[0]), localz=localz
- ):
+ for rpt in get_release_points(sd, grid, nn=int(nd[0]), localz=localz):
yield (*rpt[1:4],)
def to_prp(self, grid, localz=False) -> Iterator[tuple]:
diff --git a/flopy/modpath/mp7particlegroup.py b/flopy/modpath/mp7particlegroup.py
index 5148138536..37df446465 100644
--- a/flopy/modpath/mp7particlegroup.py
+++ b/flopy/modpath/mp7particlegroup.py
@@ -86,9 +86,7 @@ def __init__(self, particlegroupname, filename, releasedata):
releasetimecount = int(releasedata[0])
releaseinterval = 0
# convert releasetimes list or tuple to a numpy array
- if isinstance(releasedata[1], list) or isinstance(
- releasedata[1], tuple
- ):
+ if isinstance(releasedata[1], list) or isinstance(releasedata[1], tuple):
releasedata[1] = np.array(releasedata[1])
if releasedata[1].shape[0] != releasetimecount:
raise ValueError(
@@ -144,9 +142,7 @@ def write(self, fp=None, ws="."):
# item 29
fp.write(
"{} {} {}\n".format(
- self.releasetimecount,
- self.releasetimes[0],
- self.releaseinterval,
+ self.releasetimecount, self.releasetimes[0], self.releaseinterval
)
)
elif self.releaseoption == 3:
@@ -154,9 +150,7 @@ def write(self, fp=None, ws="."):
fp.write(f"{self.releasetimecount}\n")
# item 31
tp = self.releasetimes
- v = Util2d(
- self, (tp.shape[0],), np.float32, tp, name="temp", locat=0
- )
+ v = Util2d(self, (tp.shape[0],), np.float32, tp, name="temp", locat=0)
fp.write(v.string)
# item 32
@@ -220,9 +214,7 @@ def __init__(
"""
# instantiate base class
- _Modpath7ParticleGroup.__init__(
- self, particlegroupname, filename, releasedata
- )
+ _Modpath7ParticleGroup.__init__(self, particlegroupname, filename, releasedata)
self.name = "ParticleGroup"
# create default node-based particle data if not passed
@@ -305,9 +297,7 @@ def __init__(self, particlegroupname, filename, releasedata):
"""
# instantiate base class
- _Modpath7ParticleGroup.__init__(
- self, particlegroupname, filename, releasedata
- )
+ _Modpath7ParticleGroup.__init__(self, particlegroupname, filename, releasedata)
def write(self, fp=None, ws="."):
"""
@@ -370,9 +360,7 @@ def __init__(
self.name = "ParticleGroupLRCTemplate"
# instantiate base class
- _ParticleGroupTemplate.__init__(
- self, particlegroupname, filename, releasedata
- )
+ _ParticleGroupTemplate.__init__(self, particlegroupname, filename, releasedata)
# validate particledata
if particledata is None:
particledata = NodeParticleData()
@@ -468,9 +456,7 @@ def __init__(
self.name = "ParticleGroupNodeTemplate"
# instantiate base class
- _ParticleGroupTemplate.__init__(
- self, particlegroupname, filename, releasedata
- )
+ _ParticleGroupTemplate.__init__(self, particlegroupname, filename, releasedata)
# validate particledata
if particledata is None:
particledata = NodeParticleData()
diff --git a/flopy/modpath/mp7sim.py b/flopy/modpath/mp7sim.py
index 0a561f2e6e..3df35c1398 100644
--- a/flopy/modpath/mp7sim.py
+++ b/flopy/modpath/mp7sim.py
@@ -315,9 +315,7 @@ def __init__(
except:
sim_enum_error("weaksourceoption", weaksourceoption, weakOpt)
try:
- self.budgetoutputoption = budgetOpt[
- budgetoutputoption.lower()
- ].value
+ self.budgetoutputoption = budgetOpt[budgetoutputoption.lower()].value
except:
sim_enum_error("budgetoutputoption", budgetoutputoption, budgetOpt)
# tracemode
@@ -520,16 +518,9 @@ def __init__(
)
self.stopzone = stopzone
if zones is None:
- raise ValueError(
- "zones must be specified if zonedataoption='on'."
- )
+ raise ValueError("zones must be specified if zonedataoption='on'.")
self.zones = Util3d(
- model,
- shape3d,
- np.int32,
- zones,
- name="zones",
- locat=self.unit_number[0],
+ model, shape3d, np.int32, zones, name="zones", locat=self.unit_number[0]
)
# retardationfactoroption
@@ -538,14 +529,11 @@ def __init__(
retardationfactoroption.lower()
].value
except:
- sim_enum_error(
- "retardationfactoroption", retardationfactoroption, onoffOpt
- )
+ sim_enum_error("retardationfactoroption", retardationfactoroption, onoffOpt)
if self.retardationfactoroption == 2:
if retardation is None:
raise ValueError(
- "retardation must be specified if "
- "retardationfactoroption='on'."
+ "retardation must be specified if retardationfactoroption='on'."
)
self.retardation = Util3d(
model,
@@ -560,11 +548,7 @@ def __init__(
particlegroups = [ParticleGroup()]
elif isinstance(
particlegroups,
- (
- ParticleGroup,
- ParticleGroupLRCTemplate,
- ParticleGroupNodeTemplate,
- ),
+ (ParticleGroup, ParticleGroupLRCTemplate, ParticleGroupNodeTemplate),
):
particlegroups = [particlegroups]
self.particlegroups = particlegroups
@@ -615,9 +599,7 @@ def write_file(self, check=False):
# item 7 and 8
if self.tracemode == 1:
f.write(f"{self.tracefilename}\n")
- f.write(
- f"{self.traceparticlegroup + 1} {self.traceparticleid + 1}\n"
- )
+ f.write(f"{self.traceparticlegroup + 1} {self.traceparticleid + 1}\n")
# item 9
f.write(f"{self.BudgetCellCount}\n")
# item 10
@@ -657,9 +639,7 @@ def write_file(self, check=False):
f.write(f"{self.timepointoption}\n")
if self.timepointoption == 1:
# item 17
- f.write(
- f"{self.timepointdata[0]} {self.timepointdata[1][0]}\n"
- )
+ f.write(f"{self.timepointdata[0]} {self.timepointdata[1][0]}\n")
elif self.timepointoption == 2:
# item 18
f.write(f"{self.timepointdata[0]}\n")
diff --git a/flopy/mt3d/mt.py b/flopy/mt3d/mt.py
index 45599aceeb..acaf90e123 100644
--- a/flopy/mt3d/mt.py
+++ b/flopy/mt3d/mt.py
@@ -147,9 +147,7 @@ def __init__(
# Check whether specified ftlfile exists in model directory; if not,
# warn user
- if os.path.isfile(
- os.path.join(self.model_ws, f"{modelname}.{namefile_ext}")
- ):
+ if os.path.isfile(os.path.join(self.model_ws, f"{modelname}.{namefile_ext}")):
with open(
os.path.join(self.model_ws, f"{modelname}.{namefile_ext}")
) as nm_file:
@@ -180,10 +178,7 @@ def __init__(
):
pass
else:
- print(
- "Specified value of ftlfree conflicts with FTL "
- "file format"
- )
+ print("Specified value of ftlfree conflicts with FTL file format")
print(
f"Switching ftlfree from {self.ftlfree} to {not self.ftlfree}"
)
@@ -387,18 +382,14 @@ def write_name_file(self):
f_nam.write(f"{self.heading}\n")
f_nam.write(
"{:14s} {:5d} {}\n".format(
- self.lst.name[0],
- self.lst.unit_number[0],
- self.lst.file_name[0],
+ self.lst.name[0], self.lst.unit_number[0], self.lst.file_name[0]
)
)
if self.ftlfilename is not None:
ftlfmt = ""
if self.ftlfree:
ftlfmt = "FREE"
- f_nam.write(
- f"{'FTL':14s} {self.ftlunit:5d} {self.ftlfilename} {ftlfmt}\n"
- )
+ f_nam.write(f"{'FTL':14s} {self.ftlunit:5d} {self.ftlfilename} {ftlfmt}\n")
# write file entries in name file
f_nam.write(str(self.get_name_file_entries()))
@@ -407,9 +398,7 @@ def write_name_file(self):
f_nam.write(f"DATA {u:5d} {f}\n")
# write the output files
- for u, f, b in zip(
- self.output_units, self.output_fnames, self.output_binflag
- ):
+ for u, f, b in zip(self.output_units, self.output_fnames, self.output_binflag):
if u == 0:
continue
if b:
@@ -504,9 +493,7 @@ def load(
namefile_path, mt.mfnam_packages, verbose=verbose
)
except Exception as e:
- raise Exception(
- f"error loading name file entries from file:\n{e!s}"
- )
+ raise Exception(f"error loading name file entries from file:\n{e!s}")
if mt.verbose:
print(
@@ -552,9 +539,7 @@ def load(
return None
try:
- pck = btn.package.load(
- btn.filename, mt, ext_unit_dict=ext_unit_dict
- )
+ pck = btn.package.load(btn.filename, mt, ext_unit_dict=ext_unit_dict)
except Exception as e:
raise Exception(f"error loading BTN: {e!s}")
files_successfully_loaded.append(btn.filename)
@@ -602,15 +587,11 @@ def load(
if forgive:
try:
pck = item.package.load(
- item.filehandle,
- mt,
- ext_unit_dict=ext_unit_dict,
+ item.filehandle, mt, ext_unit_dict=ext_unit_dict
)
files_successfully_loaded.append(item.filename)
if mt.verbose:
- print(
- f" {pck.name[0]:4s} package load...success"
- )
+ print(f" {pck.name[0]:4s} package load...success")
except BaseException as o:
if mt.verbose:
print(
@@ -624,9 +605,7 @@ def load(
)
files_successfully_loaded.append(item.filename)
if mt.verbose:
- print(
- f" {pck.name[0]:4s} package load...success"
- )
+ print(f" {pck.name[0]:4s} package load...success")
else:
if mt.verbose:
print(f" {item.filetype:4s} package load...skipped")
@@ -651,9 +630,7 @@ def load(
elif key not in mt.pop_key_list:
mt.external_fnames.append(item.filename)
mt.external_units.append(key)
- mt.external_binflag.append(
- "binary" in item.filetype.lower()
- )
+ mt.external_binflag.append("binary" in item.filetype.lower())
mt.external_output.append(False)
# pop binary output keys and any external file units that are now
@@ -674,8 +651,9 @@ def load(
# write message indicating packages that were successfully loaded
if mt.verbose:
print(
- "\n The following {} packages were "
- "successfully loaded.".format(len(files_successfully_loaded))
+ "\n The following {} packages were successfully loaded.".format(
+ len(files_successfully_loaded)
+ )
)
for fname in files_successfully_loaded:
print(f" {os.path.basename(fname)}")
@@ -738,7 +716,9 @@ def load_obs(fname):
r : np.ndarray
"""
- firstline = "STEP TOTAL TIME LOCATION OF OBSERVATION POINTS (K,I,J)"
+ firstline = (
+ "STEP TOTAL TIME LOCATION OF OBSERVATION POINTS (K,I,J)"
+ )
dtype = [("step", int), ("time", float)]
nobs = 0
obs = []
diff --git a/flopy/mt3d/mtadv.py b/flopy/mt3d/mtadv.py
index 5227a75f36..1d961139f2 100644
--- a/flopy/mt3d/mtadv.py
+++ b/flopy/mt3d/mtadv.py
@@ -232,27 +232,17 @@ def write_file(self):
"""
f_adv = open(self.fn_path, "w")
f_adv.write(
- "%10i%10f%10i%10i\n"
- % (self.mixelm, self.percel, self.mxpart, self.nadvfd)
+ "%10i%10f%10i%10i\n" % (self.mixelm, self.percel, self.mxpart, self.nadvfd)
)
if self.mixelm > 0:
f_adv.write("%10i%10f\n" % (self.itrack, self.wd))
if (self.mixelm == 1) or (self.mixelm == 3):
f_adv.write(
"%10.4e%10i%10i%10i%10i%10i\n"
- % (
- self.dceps,
- self.nplane,
- self.npl,
- self.nph,
- self.npmin,
- self.npmax,
- )
+ % (self.dceps, self.nplane, self.npl, self.nph, self.npmin, self.npmax)
)
if (self.mixelm == 2) or (self.mixelm == 3):
- f_adv.write(
- "%10i%10i%10i\n" % (self.interp, self.nlsink, self.npsink)
- )
+ f_adv.write("%10i%10i%10i\n" % (self.interp, self.nlsink, self.npsink))
if self.mixelm == 3:
f_adv.write("%10f\n" % (self.dchmoc))
f_adv.close()
diff --git a/flopy/mt3d/mtbtn.py b/flopy/mt3d/mtbtn.py
index 1ddc994170..f74a62342a 100644
--- a/flopy/mt3d/mtbtn.py
+++ b/flopy/mt3d/mtbtn.py
@@ -294,9 +294,7 @@ def __init__(
if isinstance(obs, list):
obs = np.array(obs)
if obs.ndim != 2:
- raise Exception(
- "obs must be (or be convertible to) a 2d array"
- )
+ raise Exception("obs must be (or be convertible to) a 2d array")
self.obs = obs
self.nprobs = nprobs
self.chkmas = chkmas
@@ -324,22 +322,11 @@ def __init__(
)
self.ssflag = ssflag
self.dt0 = Util2d(
- model,
- (self.nper,),
- np.float32,
- dt0,
- name="dt0",
- array_free_format=False,
- )
- self.mxstrn = Util2d(
- model, (self.nper,), np.int32, mxstrn, name="mxstrn"
- )
- self.ttsmult = Util2d(
- model, (self.nper,), np.float32, ttsmult, name="ttmult"
- )
- self.ttsmax = Util2d(
- model, (self.nper,), np.float32, ttsmax, name="ttsmax"
+ model, (self.nper,), np.float32, dt0, name="dt0", array_free_format=False
)
+ self.mxstrn = Util2d(model, (self.nper,), np.int32, mxstrn, name="mxstrn")
+ self.ttsmult = Util2d(model, (self.nper,), np.float32, ttsmult, name="ttmult")
+ self.ttsmax = Util2d(model, (self.nper,), np.float32, ttsmax, name="ttsmax")
# Do some fancy stuff for multi-species concentrations
self.sconc = []
@@ -677,9 +664,7 @@ def write_file(self):
# A3; Keywords
# Build a string of the active keywords
- if (
- self.parent.version == "mt3d-usgs"
- ): # Keywords not supported by MT3Dms
+ if self.parent.version == "mt3d-usgs": # Keywords not supported by MT3Dms
str1 = ""
if self.MFStyleArr:
str1 += " MODFLOWSTYLEARRAYS"
@@ -702,12 +687,7 @@ def write_file(self):
# A3
f_btn.write(
"{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}\n".format(
- self.nlay,
- self.nrow,
- self.ncol,
- self.nper,
- self.ncomp,
- self.mcomp,
+ self.nlay, self.nrow, self.ncol, self.nper, self.ncomp, self.mcomp
)
)
@@ -799,9 +779,7 @@ def write_file(self):
for i in range(nobs):
f_btn.write(
"{:10d}{:10d}{:10d}\n".format(
- self.obs[i, 0] + 1,
- self.obs[i, 1] + 1,
- self.obs[i, 2] + 1,
+ self.obs[i, 0] + 1, self.obs[i, 1] + 1, self.obs[i, 2] + 1
)
)
@@ -821,10 +799,7 @@ def write_file(self):
f_btn.write(s)
f_btn.write(
"{:10.4G}{:10d}{:10.4G}{:10.4G}\n".format(
- self.dt0[t],
- self.mxstrn[t],
- self.ttsmult[t],
- self.ttsmax[t],
+ self.dt0[t], self.mxstrn[t], self.ttsmult[t], self.ttsmax[t]
)
)
f_btn.close()
@@ -891,9 +866,8 @@ def load(cls, f, model, ext_unit_dict=None):
NoWetDryPrint = False
OmitDryBud = False
AltWTSorb = False
- if (
- m_arr[0].strip().isdigit() is not True
- ): # If m_arr[0] is not a digit, it is a keyword
+ if m_arr[0].strip().isdigit() is not True:
+ # If m_arr[0] is not a digit, it is a keyword
if model.verbose:
print(f" loading optional keywords: {line.strip()}")
for i in range(0, len(m_arr)):
@@ -967,13 +941,7 @@ def load(cls, f, model, ext_unit_dict=None):
if model.verbose:
print(" loading DELR...")
delr = Util2d.load(
- f,
- model,
- (ncol,),
- np.float32,
- "delr",
- ext_unit_dict,
- array_format="mt3d",
+ f, model, (ncol,), np.float32, "delr", ext_unit_dict, array_format="mt3d"
)
if model.verbose:
print(f" DELR {delr}")
@@ -981,13 +949,7 @@ def load(cls, f, model, ext_unit_dict=None):
if model.verbose:
print(" loading DELC...")
delc = Util2d.load(
- f,
- model,
- (nrow,),
- np.float32,
- "delc",
- ext_unit_dict,
- array_format="mt3d",
+ f, model, (nrow,), np.float32, "delc", ext_unit_dict, array_format="mt3d"
)
if model.verbose:
print(f" DELC {delc}")
@@ -1165,7 +1127,7 @@ def load(cls, f, model, ext_unit_dict=None):
if model.verbose:
print(
- " loading PERLEN, NSTP, TSMULT, TSLNGH, DT0, MXSTRN, TTSMULT, TTSMAX..."
+ " loading PERLEN, NSTP, TSMULT, TSLNGH, DT0, MXSTRN, TTSMULT, TTSMAX..." # noqa
)
dt0, mxstrn, ttsmult, ttsmax = [], [], [], []
perlen = []
diff --git a/flopy/mt3d/mtcts.py b/flopy/mt3d/mtcts.py
index 6e2eba0453..33cbb0503b 100644
--- a/flopy/mt3d/mtcts.py
+++ b/flopy/mt3d/mtcts.py
@@ -140,9 +140,7 @@ class Mt3dCts(Package):
"""
- def __init__(
- self,
- ):
+ def __init__(self):
raise NotImplementedError()
@classmethod
diff --git a/flopy/mt3d/mtdsp.py b/flopy/mt3d/mtdsp.py
index c7811c0133..afe9375af1 100644
--- a/flopy/mt3d/mtdsp.py
+++ b/flopy/mt3d/mtdsp.py
@@ -209,8 +209,8 @@ def __init__(
val = kwargs.pop(name)
else:
print(
- "DSP: setting dmcoef for component {} "
- "to zero, kwarg name {}".format(icomp, name)
+ f"DSP: setting dmcoef for component {icomp} "
+ f"to zero, kwarg name {name}"
)
u2or3 = utype(
model,
@@ -225,8 +225,7 @@ def __init__(
if len(list(kwargs.keys())) > 0:
raise Exception(
- "DSP error: unrecognized kwargs: "
- + " ".join(list(kwargs.keys()))
+ "DSP error: unrecognized kwargs: " + " ".join(list(kwargs.keys()))
)
self.parent.add_package(self)
return
@@ -271,9 +270,7 @@ def write_file(self):
return
@classmethod
- def load(
- cls, f, model, nlay=None, nrow=None, ncol=None, ext_unit_dict=None
- ):
+ def load(cls, f, model, nlay=None, nrow=None, ncol=None, ext_unit_dict=None):
"""
Load an existing package.
diff --git a/flopy/mt3d/mtlkt.py b/flopy/mt3d/mtlkt.py
index 749744b8d0..7b51af32b0 100644
--- a/flopy/mt3d/mtlkt.py
+++ b/flopy/mt3d/mtlkt.py
@@ -222,8 +222,7 @@ def __init__(
# Check to make sure that all kwargs have been consumed
if len(list(kwargs.keys())) > 0:
raise Exception(
- "LKT error: unrecognized kwargs: "
- + " ".join(list(kwargs.keys()))
+ "LKT error: unrecognized kwargs: " + " ".join(list(kwargs.keys()))
)
self.parent.add_package(self)
@@ -265,9 +264,7 @@ def write_file(self):
# (Evap, precip, specified runoff into the lake, specified
# withdrawal directly from the lake
if self.lk_stress_period_data is not None:
- self.lk_stress_period_data.write_transient(
- f_lkt, single_per=kper
- )
+ self.lk_stress_period_data.write_transient(f_lkt, single_per=kper)
else:
f_lkt.write("0\n")
@@ -275,9 +272,7 @@ def write_file(self):
return
@classmethod
- def load(
- cls, f, model, nlak=None, nper=None, ncomp=None, ext_unit_dict=None
- ):
+ def load(cls, f, model, nlak=None, nper=None, ncomp=None, ext_unit_dict=None):
"""
Load an existing package.
@@ -366,23 +361,15 @@ def load(
" Mass does not exit the model via simulated lake evaporation "
)
else:
- print(
- " Mass exits the lake via simulated lake evaporation "
- )
+ print(" Mass exits the lake via simulated lake evaporation ")
# Item 2 (COLDLAK - Initial concentration in this instance)
if model.verbose:
print(" loading initial concentration (COLDLAK) ")
if model.free_format:
- print(
- " Using MODFLOW style array reader utilities to "
- "read COLDLAK"
- )
+ print(" Using MODFLOW style array reader utilities to read COLDLAK")
elif model.array_format == "mt3d":
- print(
- " Using historic MT3DMS array reader utilities to "
- "read COLDLAK"
- )
+ print(" Using historic MT3DMS array reader utilities to read COLDLAK")
kwargs = {}
coldlak = Util2d.load(
@@ -419,9 +406,7 @@ def load(
for iper in range(nper):
if model.verbose:
- print(
- f" loading lkt boundary condition data for kper {iper + 1:5d}"
- )
+ print(f" loading lkt boundary condition data for kper {iper + 1:5d}")
# Item 3: NTMP: An integer value corresponding to the number of
# specified lake boundary conditions to follow.
@@ -437,7 +422,8 @@ def load(
print(" ntmp < 0 not allowed for first stress period ")
if (iper > 0) and (ntmp < 0):
print(
- " use lkt boundary conditions specified in last stress period "
+ " use lkt boundary conditions "
+ "specified in last stress period "
)
# Item 4: Read ntmp boundary conditions
@@ -453,9 +439,7 @@ def load(
if cbclk > 0:
for ilkvar in range(cbclk):
t.append(m_arr[ilkvar + 2])
- current_lk[ilkbnd] = tuple(
- t[: len(current_lk.dtype.names)]
- )
+ current_lk[ilkbnd] = tuple(t[: len(current_lk.dtype.names)])
# Convert ILKBC (node) index to zero-based
current_lk["node"] -= 1
current_lk = current_lk.view(np.recarray)
@@ -478,9 +462,7 @@ def load(
ext_unit_dict, filetype=Mt3dLkt._ftype()
)
if icbclk > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=icbclk
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=icbclk)
model.add_pop_key_list(icbclk)
# Construct and return LKT package
diff --git a/flopy/mt3d/mtrct.py b/flopy/mt3d/mtrct.py
index 5ae9b35763..b4e008f909 100644
--- a/flopy/mt3d/mtrct.py
+++ b/flopy/mt3d/mtrct.py
@@ -284,8 +284,8 @@ def __init__(
val = kwargs.pop(name)
else:
print(
- "RCT: setting sp1 for component {} to zero, "
- "kwarg name {}".format(icomp, name)
+ f"RCT: setting sp1 for component {icomp} to zero, "
+ f"kwarg name {name}"
)
u3d = Util3d(
model,
@@ -320,8 +320,8 @@ def __init__(
val = kwargs.pop(name)
else:
print(
- "RCT: setting sp2 for component {} to zero, "
- "kwarg name {}".format(icomp, name)
+ f"RCT: setting sp2 for component {icomp} to zero, "
+ f"kwarg name {name}"
)
u3d = Util3d(
model,
@@ -356,8 +356,8 @@ def __init__(
val = kwargs.pop(name)
else:
print(
- "RCT: setting rc1 for component {} to zero, "
- "kwarg name {}".format(icomp, name)
+ f"RCT: setting rc1 for component {icomp} to zero, "
+ f"kwarg name {name}"
)
u3d = Util3d(
model,
@@ -392,8 +392,8 @@ def __init__(
val = kwargs.pop(name)
else:
print(
- "RCT: setting rc2 for component {} to zero, "
- "kwarg name {}".format(icomp, name)
+ f"RCT: setting rc2 for component {icomp} to zero, "
+ f"kwarg name {name}"
)
u3d = Util3d(
model,
@@ -409,8 +409,7 @@ def __init__(
# Check to make sure that all kwargs have been consumed
if len(list(kwargs.keys())) > 0:
raise Exception(
- "RCT error: unrecognized kwargs: "
- + " ".join(list(kwargs.keys()))
+ "RCT error: unrecognized kwargs: " + " ".join(list(kwargs.keys()))
)
self.parent.add_package(self)
@@ -431,8 +430,7 @@ def write_file(self):
# Open file for writing
f_rct = open(self.fn_path, "w")
f_rct.write(
- "%10i%10i%10i%10i\n"
- % (self.isothm, self.ireact, self.irctop, self.igetsc)
+ "%10i%10i%10i%10i\n" % (self.isothm, self.ireact, self.irctop, self.igetsc)
)
if self.isothm in [1, 2, 3, 4, 6]:
f_rct.write(self.rhob.get_file_entry())
diff --git a/flopy/mt3d/mtsft.py b/flopy/mt3d/mtsft.py
index 1aeade5b9a..2bebd4aa80 100644
--- a/flopy/mt3d/mtsft.py
+++ b/flopy/mt3d/mtsft.py
@@ -382,11 +382,7 @@ def write_file(self):
# Item 1
f.write(
"{:10d}{:10d}{:10d}{:10d}{:10d}".format(
- self.nsfinit,
- self.mxsfbc,
- self.icbcsf,
- self.ioutobs,
- self.ietsfr,
+ self.nsfinit, self.mxsfbc, self.icbcsf, self.ioutobs, self.ietsfr
)
+ 30 * " "
+ "# nsfinit, mxsfbc, icbcsf, ioutobs, ietsfr\n"
@@ -428,7 +424,8 @@ def write_file(self):
f.write(line)
# Items 7, 8
- # Loop through each stress period and assign source & sink concentrations to stream features
+ # Loop through each stress period and assign source & sink
+ # concentrations to stream features
nper = self.parent.nper
for kper in range(nper):
if f.closed:
@@ -445,9 +442,7 @@ def write_file(self):
return
@classmethod
- def load(
- cls, f, model, nsfinit=None, nper=None, ncomp=None, ext_unit_dict=None
- ):
+ def load(cls, f, model, nsfinit=None, nper=None, ncomp=None, ext_unit_dict=None):
"""
Load an existing package.
@@ -606,15 +601,9 @@ def load(
print(" loading COLDSF...")
if model.free_format:
- print(
- " Using MODFLOW style array reader utilities to "
- "read COLDSF"
- )
+ print(" Using MODFLOW style array reader utilities to read COLDSF")
elif model.array_format == "mt3d":
- print(
- " Using historic MT3DMS array reader utilities to "
- "read COLDSF"
- )
+ print(" Using historic MT3DMS array reader utilities to read COLDSF")
coldsf = Util2d.load(
f,
@@ -646,15 +635,9 @@ def load(
# Item 4 (DISPSF(NRCH)) Reach-by-reach dispersion
if model.verbose:
if model.free_format:
- print(
- " Using MODFLOW style array reader utilities to "
- "read DISPSF"
- )
+ print(" Using MODFLOW style array reader utilities to read DISPSF")
elif model.array_format == "mt3d":
- print(
- " Using historic MT3DMS array reader utilities to "
- "read DISPSF"
- )
+ print(" Using historic MT3DMS array reader utilities to read DISPSF")
dispsf = Util2d.load(
f,
diff --git a/flopy/mt3d/mtssm.py b/flopy/mt3d/mtssm.py
index 8887c57cdb..d92e40c489 100644
--- a/flopy/mt3d/mtssm.py
+++ b/flopy/mt3d/mtssm.py
@@ -205,9 +205,8 @@ def __init__(
for i, label in enumerate(SsmLabels):
mfpack = mf.get_package(label)
ssmpack = SsmPackage(label, mfpack, (i < 6))
- self.__SsmPackages.append(
- ssmpack
- ) # First 6 need T/F flag in file line 1
+ # First 6 need T/F flag in file line 1
+ self.__SsmPackages.append(ssmpack)
if dtype is not None:
self.dtype = dtype
@@ -218,10 +217,7 @@ def __init__(
self.stress_period_data = None
else:
self.stress_period_data = MfList(
- self,
- model=model,
- data=stress_period_data,
- list_free_format=False,
+ self, model=model, data=stress_period_data, list_free_format=False
)
if mxss is None and mf is None:
@@ -242,9 +238,7 @@ def __init__(
if self.stress_period_data is not None:
for i in range(nper):
if i in self.stress_period_data.data:
- mxss_kper += np.sum(
- self.stress_period_data.data[i].itype == -1
- )
+ mxss_kper += np.sum(self.stress_period_data.data[i].itype == -1)
mxss_kper += np.sum(
self.stress_period_data.data[i].itype == -15
)
@@ -307,12 +301,8 @@ def __init__(
self.cevt = None
try:
- if cevt is None and (
- model.mf.evt is not None or model.mf.ets is not None
- ):
- print(
- "found 'ets'/'evt' in modflow model, resetting cevt to 0.0"
- )
+ if cevt is None and (model.mf.evt is not None or model.mf.ets is not None):
+ print("found 'ets'/'evt' in modflow model, resetting cevt to 0.0")
cevt = 0.0
except:
if model.verbose:
@@ -355,8 +345,7 @@ def __init__(
if len(list(kwargs.keys())) > 0:
raise Exception(
- "SSM error: unrecognized kwargs: "
- + " ".join(list(kwargs.keys()))
+ "SSM error: unrecognized kwargs: " + " ".join(list(kwargs.keys()))
)
# Add self to parent and return
@@ -548,9 +537,7 @@ def load(
# Item D1: Dummy input line - line already read above
if model.verbose:
- print(
- " loading FWEL, FDRN, FRCH, FEVT, FRIV, FGHB, (FNEW(n), n=1,4)..."
- )
+ print(" loading FWEL, FDRN, FRCH, FEVT, FRIV, FGHB, (FNEW(n), n=1,4)...")
fwel = line[0:2]
fdrn = line[2:4]
frch = line[4:6]
@@ -728,10 +715,7 @@ def load(
# Item D8: KSS, ISS, JSS, CSS, ITYPE, (CSSMS(n),n=1,NCOMP)
if model.verbose:
- print(
- " loading KSS, ISS, JSS, CSS, ITYPE, "
- "(CSSMS(n),n=1,NCOMP)..."
- )
+ print(" loading KSS, ISS, JSS, CSS, ITYPE, (CSSMS(n),n=1,NCOMP)...")
if nss > 0:
current = np.empty((nss), dtype=dtype)
for ibnd in range(nss):
diff --git a/flopy/mt3d/mttob.py b/flopy/mt3d/mttob.py
index 1d7927e489..6631ff2327 100644
--- a/flopy/mt3d/mttob.py
+++ b/flopy/mt3d/mttob.py
@@ -67,15 +67,11 @@ def write_file(self):
MaxFluxCells = MaxFluxCells + len(FluxGroup[1])
MaxFluxObs = MaxFluxObs + 1
f_tob.write("%10d%10d%10d\n" % (MaxConcObs, MaxFluxObs, MaxFluxCells))
- f_tob.write(
- "%s%10d%10d%10d\n" % (self.outnam, inConcObs, inFluxObs, inSaveObs)
- )
+ f_tob.write("%s%10d%10d%10d\n" % (self.outnam, inConcObs, inFluxObs, inSaveObs))
if inFluxObs:
nFluxGroup = len(self.FluxGroups)
- f_tob.write(
- "%10d%10f%10d\n" % (nFluxGroup, self.FScale, self.iOutFlux)
- )
+ f_tob.write("%10d%10f%10d\n" % (nFluxGroup, self.FScale, self.iOutFlux))
for FluxGroup in self.FluxGroups:
nFluxTimeObs, FluxTimeObs = self.assign_layer_row_column_data(
FluxGroup[0], 5, zerobase=False
@@ -94,9 +90,7 @@ def write_file(self):
)
for c in Cells:
c = c[0] # Still to fix this!
- f_tob.write(
- "%10d%10d%10d%10f\n" % (c[0], c[1], c[2], c[3])
- )
+ f_tob.write("%10d%10d%10d%10f\n" % (c[0], c[1], c[2], c[3]))
f_tob.close()
return
diff --git a/flopy/mt3d/mtuzt.py b/flopy/mt3d/mtuzt.py
index e979aa9472..d363c0b1bb 100644
--- a/flopy/mt3d/mtuzt.py
+++ b/flopy/mt3d/mtuzt.py
@@ -352,9 +352,7 @@ def write_file(self):
incuzinf = max(incuzinf, incuzinficomp)
if incuzinf == 1:
break
- f_uzt.write(
- f"{incuzinf:10d} # INCUZINF - SP {kper + 1:5d}\n"
- )
+ f_uzt.write(f"{incuzinf:10d} # INCUZINF - SP {kper + 1:5d}\n")
if incuzinf == 1:
for t2d in self.cuzinf:
u2d = t2d[kper]
@@ -497,9 +495,7 @@ def load(
cuzinf = None
# At least one species being simulated, so set up a place holder
- t2d = Transient2d(
- model, (nrow, ncol), np.float32, 0.0, name="cuzinf", locat=0
- )
+ t2d = Transient2d(model, (nrow, ncol), np.float32, 0.0, name="cuzinf", locat=0)
cuzinf = {0: t2d}
if ncomp > 1:
for icomp in range(2, ncomp + 1):
@@ -520,12 +516,7 @@ def load(
for icomp in range(2, ncomp + 1):
name = f"cuzet{icomp}"
t2d = Transient2d(
- model,
- (nrow, ncol),
- np.float32,
- 0.0,
- name=name,
- locat=0,
+ model, (nrow, ncol), np.float32, 0.0, name=name, locat=0
)
kwargs[name] = {0: t2d}
@@ -539,12 +530,7 @@ def load(
for icomp in range(2, ncomp + 1):
name = f"cgwet{icomp}"
t2d = Transient2d(
- model,
- (nrow, ncol),
- np.float32,
- 0.0,
- name=name,
- locat=0,
+ model, (nrow, ncol), np.float32, 0.0, name=name, locat=0
)
kwargs[name] = {0: t2d}
elif iet == 0:
@@ -577,12 +563,7 @@ def load(
if model.verbose:
print(f" loading {name}...")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- name,
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, name, ext_unit_dict
)
cuzinficomp = kwargs[name]
cuzinficomp[iper] = t
@@ -617,12 +598,7 @@ def load(
if model.verbose:
print(f" Reading CUZET array for kper {iper + 1:5d}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- "cuzet",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, "cuzet", ext_unit_dict
)
cuzet[iper] = t
@@ -633,12 +609,7 @@ def load(
if model.verbose:
print(f" loading {name}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- name,
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, name, ext_unit_dict
)
cuzeticomp = kwargs[name]
cuzeticomp[iper] = t
@@ -671,12 +642,7 @@ def load(
if incuzet >= 0:
print(f" Reading CGWET array for kper {iper + 1:5d}")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- "cgwet",
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, "cgwet", ext_unit_dict
)
cgwet[iper] = t
@@ -687,12 +653,7 @@ def load(
if model.verbose:
print(f" loading {name}...")
t = Util2d.load(
- f,
- model,
- (nrow, ncol),
- np.float32,
- name,
- ext_unit_dict,
+ f, model, (nrow, ncol), np.float32, name, ext_unit_dict
)
cgweticomp = kwargs[name]
cgweticomp[iper] = t
@@ -726,9 +687,7 @@ def load(
ext_unit_dict, filetype=Mt3dUzt._ftype()
)
if icbcuz > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=icbcuz
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=icbcuz)
model.add_pop_key_list(icbcuz)
# Construct and return uzt package
diff --git a/flopy/pakbase.py b/flopy/pakbase.py
index bf153da5d6..127596c932 100644
--- a/flopy/pakbase.py
+++ b/flopy/pakbase.py
@@ -211,18 +211,14 @@ def _get_kparams(self):
kparams[kp] = name
if "hk" in self.__dict__:
if self.hk.shape[1] is None:
- hk = np.asarray(
- [a.array.flatten() for a in self.hk], dtype=object
- )
+ hk = np.asarray([a.array.flatten() for a in self.hk], dtype=object)
else:
hk = self.hk.array.copy()
else:
hk = self.k.array.copy()
if "vka" in self.__dict__ and self.layvka.sum() > 0:
if self.vka.shape[1] is None:
- vka = np.asarray(
- [a.array.flatten() for a in self.vka], dtype=object
- )
+ vka = np.asarray([a.array.flatten() for a in self.vka], dtype=object)
else:
vka = self.vka.array
vka_param = kparams.pop("vka")
@@ -263,11 +259,7 @@ def _check_flowp(self, f=None, verbose=True, level=1, checktype=None):
for l in range(vka.shape[0]):
vka[l] *= hk[l] if self.layvka.array[l] != 0 else 1
self._check_thresholds(
- chk,
- vka,
- active,
- chk.property_threshold_values["vka"],
- vka_param,
+ chk, vka, active, chk.property_threshold_values["vka"], vka_param
)
for kp, name in kparams.items():
@@ -330,9 +322,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None):
):
chk = self._check_oc(f, verbose, level, checktype)
# check property values in upw and lpf packages
- elif self.name[0] in ["UPW", "LPF"] or self.package_type.upper() in [
- "NPF"
- ]:
+ elif self.name[0] in ["UPW", "LPF"] or self.package_type.upper() in ["NPF"]:
chk = self._check_flowp(f, verbose, level, checktype)
elif self.package_type.upper() in ["STO"]:
chk = self._get_check(f, verbose, level, checktype)
@@ -386,7 +376,7 @@ def _check_storage(self, chk, storage_coeff):
[
(
True
- if l > 0 or l < 0 and "THICKSTRT" in self.options
+ if l > 0 or (l < 0 and "THICKSTRT" in self.options)
else False
)
for l in self.laytyp
@@ -394,7 +384,8 @@ def _check_storage(self, chk, storage_coeff):
)
if inds.any():
if self.sy.shape[1] is None:
- # unstructured; build flat nodal property array slicers (by layer)
+ # unstructured;
+ # build flat nodal property array slicers (by layer)
node_to = np.cumsum([s.array.size for s in self.ss])
node_from = np.array([0] + list(node_to[:-1]))
node_k_slices = np.array(
@@ -417,19 +408,14 @@ def _check_storage(self, chk, storage_coeff):
else:
iconvert = self.iconvert.array
inds = np.array(
- [
- True if l > 0 or l < 0 else False
- for l in iconvert.flatten()
- ]
+ [True if l > 0 or l < 0 else False for l in iconvert.flatten()]
)
if not inds.any():
skip_sy_check = True
for ishape in np.ndindex(active.shape):
if active[ishape]:
- active[ishape] = (
- iconvert[ishape] > 0 or iconvert[ishape] < 0
- )
+ active[ishape] = iconvert[ishape] > 0 or iconvert[ishape] < 0
if not skip_sy_check:
chk.values(
sarrays["sy"],
@@ -528,21 +514,18 @@ def __getitem__(self, item):
spd = getattr(self, "stress_period_data")
if isinstance(item, MfList):
if not isinstance(item, list) and not isinstance(item, tuple):
- msg = (
- f"package.__getitem__() kper {item} not in data.keys()"
- )
+ msg = f"package.__getitem__() kper {item} not in data.keys()"
assert item in list(spd.data.keys()), msg
return spd[item]
if item[1] not in self.dtype.names:
raise Exception(
- "package.__getitem(): item {} not in dtype names "
- "{}".format(item, self.dtype.names)
+ "package.__getitem(): item {} not in dtype names {}".format(
+ item, self.dtype.names
+ )
)
- msg = (
- f"package.__getitem__() kper {item[0]} not in data.keys()"
- )
+ msg = f"package.__getitem__() kper {item[0]} not in data.keys()"
assert item[0] in list(spd.data.keys()), msg
if spd.vtype[item[0]] == np.recarray:
@@ -756,7 +739,7 @@ def _confined_layer_check(self, chk):
if option.lower() == "thickstrt":
thickstrt = True
for i, l in enumerate(self.laytyp.array.tolist()):
- if l == 0 or l < 0 and thickstrt:
+ if l == 0 or (l < 0 and thickstrt):
confined = True
continue
if confined and l > 0:
@@ -925,9 +908,7 @@ def load(
if nppak > 0:
mxl = int(t[2])
if model.verbose:
- print(
- f" Parameters detected. Number of parameters = {nppak}"
- )
+ print(f" Parameters detected. Number of parameters = {nppak}")
line = f.readline()
# dataset 2a
@@ -950,9 +931,7 @@ def load(
mxl = int(t[3])
imax += 1
if model.verbose:
- print(
- f" Parameters detected. Number of parameters = {nppak}"
- )
+ print(f" Parameters detected. Number of parameters = {nppak}")
options = []
aux_names = []
@@ -1024,9 +1003,7 @@ def load(
dt = pak_type.get_empty(
1, aux_names=aux_names, structured=model.structured
).dtype
- pak_parms = mfparbc.load(
- f, nppak, dt, model, ext_unit_dict, model.verbose
- )
+ pak_parms = mfparbc.load(f, nppak, dt, model, ext_unit_dict, model.verbose)
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
@@ -1070,9 +1047,7 @@ def load(
current = pak_type.get_empty(
itmp, aux_names=aux_names, structured=model.structured
)
- current = ulstrd(
- f, itmp, current, model, sfac_columns, ext_unit_dict
- )
+ current = ulstrd(f, itmp, current, model, sfac_columns, ext_unit_dict)
if model.structured:
current["k"] -= 1
current["i"] -= 1
@@ -1096,12 +1071,7 @@ def load(
itmp_cln, aux_names=aux_names, structured=False
)
current_cln = ulstrd(
- f,
- itmp_cln,
- current_cln,
- model,
- sfac_columns,
- ext_unit_dict,
+ f, itmp_cln, current_cln, model, sfac_columns, ext_unit_dict
)
current_cln["node"] -= 1
bnd_output_cln = np.recarray.copy(current_cln)
@@ -1126,16 +1096,12 @@ def load(
iname = "static"
except:
if model.verbose:
- print(
- f" implicit static instance for parameter {pname}"
- )
+ print(f" implicit static instance for parameter {pname}")
par_dict, current_dict = pak_parms.get(pname)
data_dict = current_dict[iname]
- par_current = pak_type.get_empty(
- par_dict["nlst"], aux_names=aux_names
- )
+ par_current = pak_type.get_empty(par_dict["nlst"], aux_names=aux_names)
# get appropriate parval
if model.mfpar.pval is None:
@@ -1149,9 +1115,7 @@ def load(
# fill current parameter data (par_current)
for ibnd, t in enumerate(data_dict):
t = tuple(t)
- par_current[ibnd] = tuple(
- t[: len(par_current.dtype.names)]
- )
+ par_current[ibnd] = tuple(t[: len(par_current.dtype.names)])
if model.structured:
par_current["k"] -= 1
@@ -1196,9 +1160,7 @@ def load(
ext_unit_dict, filetype=pak_type._ftype()
)
if ipakcb > 0:
- iu, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
if "mfusgwel" in pak_type_str:
@@ -1227,11 +1189,7 @@ def load(
filenames=filenames,
)
if check:
- pak.check(
- f=f"{pak.name[0]}.chk",
- verbose=pak.parent.verbose,
- level=0,
- )
+ pak.check(f=f"{pak.name[0]}.chk", verbose=pak.parent.verbose, level=0)
return pak
def set_cbc_output_file(self, ipakcb, model, fname):
diff --git a/flopy/pest/templatewriter.py b/flopy/pest/templatewriter.py
index ad0eb12534..7b8e6f19e0 100644
--- a/flopy/pest/templatewriter.py
+++ b/flopy/pest/templatewriter.py
@@ -46,8 +46,8 @@ def write_template(self):
# Check to make sure pak has p.type as an attribute
if not hasattr(pak, p.type.lower()):
msg = (
- "Parameter named {} of type {} not found in "
- "package {}".format(p.name, p.type.lower(), ftype)
+ f"Parameter named {p.name} of type {p.type.lower()} "
+ f"not found in package {ftype}"
)
raise Exception(msg)
@@ -85,9 +85,8 @@ def write_template(self):
# Write the file
paktpl.heading = "ptf ~\n" + paktpl.heading
paktpl.fn_path += ".tpl"
- paktpl.write_file(
- check=False
- ) # for now, turn off checks for template files
+ # for now, turn off checks for template files
+ paktpl.write_file(check=False)
# Destroy the template version of the package
paktpl = None
diff --git a/flopy/plot/__init__.py b/flopy/plot/__init__.py
index c5a61d12c6..cebcc751cb 100644
--- a/flopy/plot/__init__.py
+++ b/flopy/plot/__init__.py
@@ -23,10 +23,5 @@
from .crosssection import PlotCrossSection
from .map import PlotMapView
-from .plotutil import (
- PlotUtilities,
- SwiConcentration,
- plot_shapefile,
- shapefile_extents,
-)
+from .plotutil import PlotUtilities, SwiConcentration, plot_shapefile, shapefile_extents
from .styles import styles
diff --git a/flopy/plot/crosssection.py b/flopy/plot/crosssection.py
index 41231211ce..2480fa647a 100644
--- a/flopy/plot/crosssection.py
+++ b/flopy/plot/crosssection.py
@@ -39,10 +39,13 @@ class PlotCrossSection:
(xmin, xmax, ymin, ymax) will be used to specify axes limits. If None
then these will be calculated based on grid, coordinates, and rotation.
geographic_coords : bool
- boolean flag to allow the user to plot cross section lines in
- geographic coordinates. If False (default), cross section is plotted
- as the distance along the cross section line.
-
+ boolean flag to allow the user to plot cross-section lines in
+ geographic coordinates. If False (default), cross-section is plotted
+ as the distance along the cross-section line.
+ min_segment_length : float
+ minimum width of a grid cell polygon to be plotted. Cells with a
+ cross-sectional width less than min_segment_length will be ignored
+ and not included in the plot. Default is 1e-02.
"""
def __init__(
@@ -53,6 +56,7 @@ def __init__(
line=None,
extent=None,
geographic_coords=False,
+ min_segment_length=1e-02,
):
self.ax = ax
self.geographic_coords = geographic_coords
@@ -89,7 +93,7 @@ def __init__(
else:
self.ax = ax
- onkey = list(line.keys())[0]
+ onkey = next(iter(line.keys()))
self.__geographic_xpts = None
# un-translate model grid into model coordinates
@@ -107,9 +111,7 @@ def __init__(
(
xverts,
yverts,
- ) = plotutil.UnstructuredPlotUtilities.irregular_shape_patch(
- xverts, yverts
- )
+ ) = plotutil.UnstructuredPlotUtilities.irregular_shape_patch(xverts, yverts)
self.xvertices, self.yvertices = geometry.transform(
xverts,
@@ -172,7 +174,7 @@ def __init__(
xp[idx2] += 1e-03
self.direction = "y"
- pts = [(xt, yt) for xt, yt in zip(xp, yp)]
+ pts = list(zip(xp, yp))
self.pts = np.array(pts)
@@ -180,8 +182,24 @@ def __init__(
self.pts, self.xvertices, self.yvertices
)
+ self.xypts = plotutil.UnstructuredPlotUtilities.filter_line_segments(
+ self.xypts, threshold=min_segment_length
+ )
+ # need to ensure that the ordering of vertices in xypts is correct
+ # based on the projection. In certain cases vertices need to be sorted
+ # for the specific "projection"
+ for node, points in self.xypts.items():
+ if self.direction == "y":
+ if points[0][-1] < points[1][-1]:
+ points = points[::-1]
+ else:
+ if points[0][0] > points[1][0]:
+ points = points[::-1]
+
+ self.xypts[node] = points
+
if len(self.xypts) < 2:
- if len(list(self.xypts.values())[0]) < 2:
+ if len(next(iter(self.xypts.values()))) < 2:
s = (
"cross-section cannot be created\n."
" less than 2 points intersect the model grid\n"
@@ -197,13 +215,9 @@ def __init__(
xp = [t[0] for t in pt]
yp = [t[1] for t in pt]
xp, yp = geometry.transform(
- xp,
- yp,
- self.mg.xoffset,
- self.mg.yoffset,
- self.mg.angrot_radians,
+ xp, yp, self.mg.xoffset, self.mg.yoffset, self.mg.angrot_radians
)
- xypts[nn] = [(xt, yt) for xt, yt in zip(xp, yp)]
+ xypts[nn] = list(zip(xp, yp))
self.xypts = xypts
@@ -224,9 +238,7 @@ def __init__(
else:
self.active = np.ones(self.mg.nlay, dtype=int)
- self._nlay, self._ncpl, self.ncb = self.mg.cross_section_lay_ncpl_ncb(
- self.ncb
- )
+ self._nlay, self._ncpl, self.ncb = self.mg.cross_section_lay_ncpl_ncb(self.ncb)
top = self.mg.top.reshape(1, self._ncpl)
botm = self.mg.botm.reshape(self._nlay + self.ncb, self._ncpl)
@@ -238,6 +250,7 @@ def __init__(
self.idomain = np.ones(botm.shape, dtype=int)
self.projpts = self.set_zpts(None)
+ self.projctr = None
# Create cross-section extent
if extent is None:
@@ -271,24 +284,11 @@ def __init__(
def _is_valid(line):
shapely_geo = import_optional_dependency("shapely.geometry")
- if isinstance(
- line,
- (
- list,
- tuple,
- np.ndarray,
- ),
- ):
+ if isinstance(line, (list, tuple, np.ndarray)):
a = np.array(line)
if (len(a.shape) < 2 or a.shape[0] < 2) or a.shape[1] != 2:
return False
- elif not isinstance(
- line,
- (
- geometry.LineString,
- shapely_geo.LineString,
- ),
- ):
+ elif not isinstance(line, (geometry.LineString, shapely_geo.LineString)):
return False
return True
@@ -329,9 +329,7 @@ def polygons(self):
if cell not in self._polygons:
self._polygons[cell] = [Polygon(verts, closed=True)]
else:
- self._polygons[cell].append(
- Polygon(verts, closed=True)
- )
+ self._polygons[cell].append(Polygon(verts, closed=True))
return copy.copy(self._polygons)
@@ -476,9 +474,7 @@ def plot_surface(self, a, masked_values=None, **kwargs):
elif a[cell] is np.ma.masked:
continue
else:
- line = ax.plot(
- d[cell], [a[cell], a[cell]], color=color, **kwargs
- )
+ line = ax.plot(d[cell], [a[cell], a[cell]], color=color, **kwargs)
surface.append(line)
ax = self._set_axes_limits(ax)
@@ -534,9 +530,7 @@ def plot_fill_between(
else:
projpts = self.projpts
- pc = self.get_grid_patch_collection(
- a, projpts, fill_between=True, **kwargs
- )
+ pc = self.get_grid_patch_collection(a, projpts, fill_between=True, **kwargs)
if pc is not None:
ax.add_collection(pc)
ax = self._set_axes_limits(ax)
@@ -578,13 +572,10 @@ def contour_array(self, a, masked_values=None, head=None, **kwargs):
xcenters = self.xcenters
plotarray = np.array([a[cell] for cell in sorted(self.projpts)])
- (
- plotarray,
- xcenters,
- zcenters,
- mplcontour,
- ) = self.mg.cross_section_set_contour_arrays(
- plotarray, xcenters, head, self.elev, self.projpts
+ (plotarray, xcenters, zcenters, mplcontour) = (
+ self.mg.cross_section_set_contour_arrays(
+ plotarray, xcenters, head, self.elev, self.projpts
+ )
)
if not mplcontour:
@@ -592,10 +583,7 @@ def contour_array(self, a, masked_values=None, head=None, **kwargs):
zcenters = self.set_zcentergrid(np.ravel(head))
else:
zcenters = np.array(
- [
- np.mean(np.array(v).T[1])
- for i, v in sorted(self.projpts.items())
- ]
+ [np.mean(np.array(v).T[1]) for i, v in sorted(self.projpts.items())]
)
# work around for tri-contour ignore vmin & vmax
@@ -645,13 +633,9 @@ def contour_array(self, a, masked_values=None, head=None, **kwargs):
if mplcontour:
plotarray = np.ma.masked_array(plotarray, ismasked)
if filled:
- contour_set = ax.contourf(
- xcenters, zcenters, plotarray, **kwargs
- )
+ contour_set = ax.contourf(xcenters, zcenters, plotarray, **kwargs)
else:
- contour_set = ax.contour(
- xcenters, zcenters, plotarray, **kwargs
- )
+ contour_set = ax.contour(xcenters, zcenters, plotarray, **kwargs)
else:
triang = tri.Triangulation(xcenters, zcenters)
analyze = tri.TriAnalyzer(triang)
@@ -762,19 +746,12 @@ def plot_ibound(
plotarray[idx1] = 1
plotarray[idx2] = 2
plotarray = np.ma.masked_equal(plotarray, 0)
- cmap = matplotlib.colors.ListedColormap(
- ["none", color_noflow, color_ch]
- )
+ cmap = matplotlib.colors.ListedColormap(["none", color_noflow, color_ch])
bounds = [0, 1, 2, 3]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
# mask active cells
patches = self.plot_array(
- plotarray,
- masked_values=[0],
- head=head,
- cmap=cmap,
- norm=norm,
- **kwargs,
+ plotarray, masked_values=[0], head=head, cmap=cmap, norm=norm, **kwargs
)
return patches
@@ -799,9 +776,7 @@ def plot_grid(self, **kwargs):
ax.add_collection(col)
return col
- def plot_bc(
- self, name=None, package=None, kper=0, color=None, head=None, **kwargs
- ):
+ def plot_bc(self, name=None, package=None, kper=0, color=None, head=None, **kwargs):
"""
Plot boundary conditions locations for a specific boundary
type from a flopy model
@@ -857,15 +832,11 @@ def plot_bc(
try:
mflist = pp.stress_period_data.array[kper]
except Exception as e:
- raise Exception(
- f"Not a list-style boundary package: {e!s}"
- )
+ raise Exception(f"Not a list-style boundary package: {e!s}")
if mflist is None:
return
- t = np.array(
- [list(i) for i in mflist["cellid"]], dtype=int
- ).T
+ t = np.array([list(i) for i in mflist["cellid"]], dtype=int).T
if len(idx) == 0:
idx = np.copy(t)
@@ -880,9 +851,7 @@ def plot_bc(
try:
mflist = p.stress_period_data[kper]
except Exception as e:
- raise Exception(
- f"Not a list-style boundary package: {e!s}"
- )
+ raise Exception(f"Not a list-style boundary package: {e!s}")
if mflist is None:
return
if len(self.mg.shape) == 3:
@@ -891,9 +860,7 @@ def plot_bc(
idx = mflist["node"]
if len(self.mg.shape) == 3:
- plotarray = np.zeros(
- (self.mg.nlay, self.mg.nrow, self.mg.ncol), dtype=int
- )
+ plotarray = np.zeros((self.mg.nlay, self.mg.nrow, self.mg.ncol), dtype=int)
plotarray[idx[0], idx[1], idx[2]] = 1
elif len(self.mg.shape) == 2:
plotarray = np.zeros((self._nlay, self._ncpl), dtype=int)
@@ -916,16 +883,116 @@ def plot_bc(
bounds = [0, 1, 2]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
patches = self.plot_array(
- plotarray,
- masked_values=[0],
- head=head,
- cmap=cmap,
- norm=norm,
- **kwargs,
+ plotarray, masked_values=[0], head=head, cmap=cmap, norm=norm, **kwargs
)
return patches
+ def plot_centers(
+ self, a=None, s=None, masked_values=None, inactive=False, **kwargs
+ ):
+ """
+ Method to plot cell centers on cross-section using matplotlib
+ scatter. This method accepts an optional data array(s) for
+ coloring and scaling the cell centers. Cell centers in inactive
+ nodes are not plotted by default
+
+ Parameters
+ ----------
+ a : None, np.ndarray
+ optional numpy nd.array of size modelgrid.nnodes
+ s : None, float, numpy array
+ optional point size parameter
+ masked_values : None, iterable
+ optional list, tuple, or np array of array (a) values to mask
+ inactive : bool
+ boolean flag to include inactive cell centers in the plot.
+ Default is False
+ **kwargs :
+ matplotlib ax.scatter() keyword arguments
+
+ Returns
+ -------
+ matplotlib ax.scatter() object
+ """
+ ax = kwargs.pop("ax", self.ax)
+
+ projpts = self.projpts
+ nodes = list(projpts.keys())
+ xcs = self.mg.xcellcenters.ravel()
+ ycs = self.mg.ycellcenters.ravel()
+ projctr = {}
+
+ if not self.geographic_coords:
+ xcs, ycs = geometry.transform(
+ xcs,
+ ycs,
+ self.mg.xoffset,
+ self.mg.yoffset,
+ self.mg.angrot_radians,
+ inverse=True,
+ )
+
+ for node, points in self.xypts.items():
+ projpt = projpts[node]
+ d0 = np.min(np.array(projpt).T[0])
+
+ xc_dist = geometry.project_point_onto_xc_line(
+ points[:2], [xcs[node], ycs[node]], d0=d0, calc_dist=True
+ )
+ projctr[node] = xc_dist
+
+ else:
+ projctr = {}
+ for node in nodes:
+ if self.direction == "x":
+ projctr[node] = xcs[node]
+ else:
+ projctr[node] = ycs[node]
+
+ # pop off any centers that are outside the "visual field"
+ # for a given cross-section.
+ removed = {}
+ for node, points in projpts.items():
+ center = projctr[node]
+ points = np.array(points[:2]).T
+ if np.min(points[0]) > center or np.max(points[0]) < center:
+ removed[node] = (np.min(points[0]), center, np.max(points[0]))
+ projctr.pop(node)
+
+ # filter out inactive cells
+ if not inactive:
+ idomain = self.mg.idomain.ravel()
+ for node, points in projpts.items():
+ if idomain[node] == 0:
+ if node in projctr:
+ projctr.pop(node)
+
+ self.projctr = projctr
+ nodes = list(projctr.keys())
+ xcenters = list(projctr.values())
+ zcenters = [np.mean(np.array(projpts[node]).T[1]) for node in nodes]
+
+ if a is not None:
+ if not isinstance(a, np.ndarray):
+ a = np.array(a)
+ a = a.ravel().astype(float)
+
+ if masked_values is not None:
+ self._masked_values.extend(list(masked_values))
+
+ for mval in self._masked_values:
+ a[a == mval] = np.nan
+
+ a = a[nodes]
+
+ if s is not None:
+ if not isinstance(s, (int, float)):
+ s = s[nodes]
+ print(len(xcenters))
+ scat = ax.scatter(xcenters, zcenters, c=a, s=s, **kwargs)
+ return scat
+
def plot_vector(
self,
vx,
@@ -984,19 +1051,16 @@ def plot_vector(
arbitrary = False
pts = self.pts
xuniform = [
- True if abs(pts.T[0, 0] - i) < self.mean_dy else False
- for i in pts.T[0]
+ True if abs(pts.T[0, 0] - i) < self.mean_dy else False for i in pts.T[0]
]
yuniform = [
- True if abs(pts.T[1, 0] - i) < self.mean_dx else False
- for i in pts.T[1]
+ True if abs(pts.T[1, 0] - i) < self.mean_dx else False for i in pts.T[1]
]
if not np.all(xuniform) and not np.all(yuniform):
arbitrary = True
if arbitrary:
err_msg = (
- "plot_specific_discharge() does not "
- "support arbitrary cross-sections"
+ "plot_specific_discharge() does not support arbitrary cross-sections"
)
raise AssertionError(err_msg)
@@ -1024,9 +1088,7 @@ def plot_vector(
zcenters = self.set_zcentergrid(np.ravel(head), kstep=kstep)
else:
- zcenters = [
- np.mean(np.array(v).T[1]) for i, v in sorted(projpts.items())
- ]
+ zcenters = [np.mean(np.array(v).T[1]) for i, v in sorted(projpts.items())]
xcenters = np.array(
[np.mean(np.array(v).T[0]) for i, v in sorted(projpts.items())]
@@ -1067,9 +1129,7 @@ def plot_vector(
return quiver
- def plot_pathline(
- self, pl, travel_time=None, method="cell", head=None, **kwargs
- ):
+ def plot_pathline(self, pl, travel_time=None, method="cell", head=None, **kwargs):
"""
Plot particle pathlines. Compatible with MODFLOW 6 PRT particle track
data format, or MODPATH 6 or 7 pathline data format.
@@ -1209,9 +1269,7 @@ def plot_pathline(
return lc
- def plot_timeseries(
- self, ts, travel_time=None, method="cell", head=None, **kwargs
- ):
+ def plot_timeseries(self, ts, travel_time=None, method="cell", head=None, **kwargs):
"""
Plot the MODPATH timeseries. Not compatible with MODFLOW 6 PRT.
@@ -1350,6 +1408,7 @@ def plot_endpoint(
self.xvertices,
self.yvertices,
self.direction,
+ self._ncpl,
method=method,
starting=istart,
)
@@ -1362,6 +1421,7 @@ def plot_endpoint(
self.xypts,
self.direction,
self.mg,
+ self._ncpl,
self.geographic_coords,
starting=istart,
)
@@ -1369,8 +1429,8 @@ def plot_endpoint(
arr = []
c = []
for node, epl in sorted(epdict.items()):
- c.append(cd[node])
for xy in epl:
+ c.append(cd[node])
arr.append(xy)
arr = np.array(arr)
@@ -1404,9 +1464,7 @@ def get_grid_line_collection(self, **kwargs):
facecolor = kwargs.pop("facecolor", "none")
facecolor = kwargs.pop("fc", facecolor)
- polygons = [
- p for _, polys in sorted(self.polygons.items()) for p in polys
- ]
+ polygons = [p for _, polys in sorted(self.polygons.items()) for p in polys]
if len(polygons) > 0:
patches = PatchCollection(
polygons, edgecolor=edgecolor, facecolor=facecolor, **kwargs
@@ -1643,9 +1701,7 @@ def get_grid_patch_collection(
data.append(plotarray[cell])
if len(rectcol) > 0:
- patches = PatchCollection(
- rectcol, match_original=match_original, **kwargs
- )
+ patches = PatchCollection(rectcol, match_original=match_original, **kwargs)
if not fill_between:
patches.set_array(np.array(data))
patches.set_clim(vmin, vmax)
diff --git a/flopy/plot/map.py b/flopy/plot/map.py
index 06473bf7f0..f27208eb50 100644
--- a/flopy/plot/map.py
+++ b/flopy/plot/map.py
@@ -42,9 +42,7 @@ class PlotMapView:
"""
- def __init__(
- self, model=None, modelgrid=None, ax=None, layer=0, extent=None
- ):
+ def __init__(self, model=None, modelgrid=None, ax=None, layer=0, extent=None):
self.model = model
self.layer = layer
self.mg = None
@@ -149,9 +147,7 @@ def plot_array(self, a, masked_values=None, **kwargs):
return
if not isinstance(polygons[0], Path):
- collection = ax.pcolormesh(
- self.mg.xvertices, self.mg.yvertices, plotarray
- )
+ collection = ax.pcolormesh(self.mg.xvertices, self.mg.yvertices, plotarray)
else:
plotarray = plotarray.ravel()
@@ -506,15 +502,11 @@ def plot_bc(
try:
mflist = pp.stress_period_data.array[kper]
except Exception as e:
- raise Exception(
- f"Not a list-style boundary package: {e!s}"
- )
+ raise Exception(f"Not a list-style boundary package: {e!s}")
if mflist is None:
return
- t = np.array(
- [list(i) for i in mflist["cellid"]], dtype=int
- ).T
+ t = np.array([list(i) for i in mflist["cellid"]], dtype=int).T
if len(idx) == 0:
idx = np.copy(t)
@@ -529,9 +521,7 @@ def plot_bc(
try:
mflist = p.stress_period_data[kper]
except Exception as e:
- raise Exception(
- f"Not a list-style boundary package: {e!s}"
- )
+ raise Exception(f"Not a list-style boundary package: {e!s}")
if mflist is None:
return
if len(self.mg.shape) == 3:
@@ -624,6 +614,65 @@ def plot_shapes(self, obj, **kwargs):
ax = self._set_axes_limits(ax)
return patch_collection
+ def plot_centers(
+ self, a=None, s=None, masked_values=None, inactive=False, **kwargs
+ ):
+ """
+ Method to plot cell centers on cross-section using matplotlib
+ scatter. This method accepts an optional data array(s) for
+ coloring and scaling the cell centers. Cell centers in inactive
+ nodes are not plotted by default
+
+ Parameters
+ ----------
+ a : None, np.ndarray
+ optional numpy nd.array of size modelgrid.nnodes
+ s : None, float, numpy array
+ optional point size parameter
+ masked_values : None, iterable
+ optional list, tuple, or np array of array (a) values to mask
+ inactive : bool
+ boolean flag to include inactive cell centers in the plot.
+ Default is False
+ **kwargs :
+ matplotlib ax.scatter() keyword arguments
+
+ Returns
+ -------
+ matplotlib ax.scatter() object
+ """
+ ax = kwargs.pop("ax", self.ax)
+
+ xcenters = self.mg.get_xcellcenters_for_layer(self.layer).ravel()
+ ycenters = self.mg.get_ycellcenters_for_layer(self.layer).ravel()
+ idomain = self.mg.get_plottable_layer_array(self.mg.idomain, self.layer).ravel()
+
+ active_ixs = list(range(len(xcenters)))
+ if not inactive:
+ active_ixs = np.where(idomain != 0)[0]
+
+ xcenters = xcenters[active_ixs]
+ ycenters = ycenters[active_ixs]
+
+ if a is not None:
+ a = self.mg.get_plottable_layer_array(a).ravel()
+
+ if masked_values is not None:
+ self._masked_values.extend(list(masked_values))
+
+ for mval in self._masked_values:
+ a[a == mval] = np.nan
+
+ a = a[active_ixs]
+
+ if s is not None:
+ if not isinstance(s, (int, float)):
+ s = self.mg.get_plottable_layer_array(s).ravel()
+ s = s[active_ixs]
+
+ scat = ax.scatter(xcenters, ycenters, c=a, s=s, **kwargs)
+ return scat
+
def plot_vector(
self,
vx,
@@ -983,11 +1032,7 @@ def plot_endpoint(
# transform data!
x0r, y0r = geometry.transform(
- tep[xp],
- tep[yp],
- self.mg.xoffset,
- self.mg.yoffset,
- self.mg.angrot_radians,
+ tep[xp], tep[yp], self.mg.xoffset, self.mg.yoffset, self.mg.angrot_radians
)
# build array to plot
arr = np.vstack((x0r, y0r)).T
diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py
index 4efab156a6..7771bc59d0 100644
--- a/flopy/plot/plotutil.py
+++ b/flopy/plot/plotutil.py
@@ -356,10 +356,7 @@ def _plot_package_helper(package, **kwargs):
if defaults["key"] is None:
names = [
"{} {} location stress period {} layer {}".format(
- model_name,
- package.name[0],
- defaults["kper"] + 1,
- k + 1,
+ model_name, package.name[0], defaults["kper"] + 1, k + 1
)
for k in range(package.parent.modelgrid.nlay)
]
@@ -623,11 +620,7 @@ def _plot_mflist_helper(
else:
names = [
"{}{} {} stress period: {} layer: {}".format(
- model_name,
- mflist.package.name[0],
- key,
- kper + 1,
- k + 1,
+ model_name, mflist.package.name[0], key, kper + 1, k + 1
)
for k in range(mflist.model.modelgrid.nlay)
]
@@ -841,8 +834,7 @@ def _plot_util3d_helper(
name = [name] * nplottable_layers
names = [
- f"{model_name}{name[k]} layer {k + 1}"
- for k in range(nplottable_layers)
+ f"{model_name}{name[k]} layer {k + 1}" for k in range(nplottable_layers)
]
filenames = None
@@ -988,9 +980,7 @@ def _plot_transient2d_helper(
return axes
@staticmethod
- def _plot_scalar_helper(
- scalar, filename_base=None, file_extension=None, **kwargs
- ):
+ def _plot_scalar_helper(scalar, filename_base=None, file_extension=None, **kwargs):
"""
Helper method to plot scalar objects
@@ -1153,9 +1143,7 @@ def _plot_array_helper(
for idx, k in enumerate(range(i0, i1)):
fig = plt.figure(num=fignum[idx])
- pmv = PlotMapView(
- ax=axes[idx], model=model, modelgrid=modelgrid, layer=k
- )
+ pmv = PlotMapView(ax=axes[idx], model=model, modelgrid=modelgrid, layer=k)
if defaults["pcolor"]:
cm = pmv.plot_array(
plotarray,
@@ -1283,11 +1271,7 @@ def _plot_bc_helper(
pmv = PlotMapView(ax=axes[idx], model=model, layer=k)
fig = plt.figure(num=fignum[idx])
pmv.plot_bc(
- ftype=ftype,
- package=package,
- kper=kper,
- ax=axes[idx],
- color=color,
+ ftype=ftype, package=package, kper=kper, ax=axes[idx], color=color
)
if defaults["grid"]:
@@ -1659,12 +1643,7 @@ def line_intersect_grid(ptsin, xgrid, ygrid):
for iix, cell in enumerate(cells):
xc = x[cell]
yc = y[cell]
- verts = [
- (xt, yt)
- for xt, yt in zip(
- xc[cell_vertex_ix[iix]], yc[cell_vertex_ix[iix]]
- )
- ]
+ verts = list(zip(xc[cell_vertex_ix[iix]], yc[cell_vertex_ix[iix]]))
if cell in vdict:
for i in verts:
@@ -1711,6 +1690,47 @@ def line_intersect_grid(ptsin, xgrid, ygrid):
return vdict
+ @staticmethod
+ def filter_line_segments(vdict, threshold=1e-2):
+ """
+ Method to filter out artifact intersections due to epsilon perturbation
+ of line segments. This method gets the distance of intersection
+ and then filters by a user provided threshold
+
+ Parameters
+ ----------
+ vdict : dict
+ dictionary of node number, intersection vertices (line segment)
+ threshold : float
+ user provided thresholding value
+
+ Returns
+ -------
+ vdict
+ """
+ from ..utils.geometry import distance
+
+ nodes = list(vdict.keys())
+ dists = []
+
+ for node in nodes:
+ points = vdict[node]
+ if len(points) < 2:
+ dist = 0
+ else:
+ pt0 = points[0]
+ pt1 = points[1]
+ dist = distance(pt0[0], pt0[1], pt1[0], pt1[1])
+
+ dists.append(dist)
+
+ dists = np.array(dists)
+ ixs = np.where(dists < threshold)[0]
+ for ix in ixs:
+ node = nodes[ix]
+ vdict.pop(node)
+ return vdict
+
@staticmethod
def irregular_shape_patch(xverts, yverts=None):
"""
@@ -1874,9 +1894,7 @@ def calc_conc(self, zeta, layer=None):
pct = {}
for isrf in range(self.__nsrf):
z = zeta[isrf]
- pct[isrf] = (self.__botm[:-1, :, :] - z[:, :, :]) / self.__b[
- :, :, :
- ]
+ pct[isrf] = (self.__botm[:-1, :, :] - z[:, :, :]) / self.__b[:, :, :]
for isrf in range(self.__nsrf):
p = pct[isrf]
if self.__istrat == 1:
@@ -1981,9 +1999,7 @@ def shapefile_get_vertices(shp):
return vertices
-def shapefile_to_patch_collection(
- shp: Union[str, os.PathLike], radius=500.0, idx=None
-):
+def shapefile_to_patch_collection(shp: Union[str, os.PathLike], radius=500.0, idx=None):
"""
Create a patch collection from the shapes in a shapefile
@@ -2344,9 +2360,7 @@ def intersect_modpath_with_crosssection(
xp, yp, zp = "x0", "y0", "z0"
if not isinstance(recarrays, list):
- recarrays = [
- recarrays,
- ]
+ recarrays = [recarrays]
if projection == "x":
v_opp = yvertices
@@ -2407,9 +2421,7 @@ def intersect_modpath_with_crosssection(
oppts[cell],
)
idx = [
- i
- for i, (x, y) in enumerate(zip(m0[0], m1[0]))
- if x == y == True
+ i for i, (x, y) in enumerate(zip(m0[0], m1[0])) if x == y == True
]
else:
idx = [i for i, x in enumerate(m0[0]) if x == True]
@@ -2478,17 +2490,13 @@ def reproject_modpath_to_crosssection(
line = xypts[tcell]
if len(line) < 2:
continue
- if projection == "x":
- d0 = np.min([i[0] for i in projpts[cell]])
- else:
- d0 = np.max([i[0] for i in projpts[cell]])
+ d0 = np.min([i[0] for i in projpts[cell]])
for rec in recarrays:
pts = list(zip(rec[xp], rec[yp]))
- x, y = geometry.project_point_onto_xc_line(
- line, pts, d0, projection
+ xc_dist = geometry.project_point_onto_xc_line(
+ line, pts, d0=d0, calc_dist=True
)
- rec[xp] = x
- rec[yp] = y
+ rec[proj] = xc_dist
pid = rec["particleid"][0]
pline = list(zip(rec[proj], rec[zp], rec["time"]))
if pid not in ptdict:
@@ -2552,7 +2560,7 @@ def parse_modpath_selection_options(
# selection of endpoints
if selection is not None:
if isinstance(selection, int):
- selection = tuple((selection,))
+ selection = (selection,)
try:
if len(selection) == 1:
node = selection[0]
@@ -2712,9 +2720,7 @@ def to_mp7_pathlines(
# return early if already in MP7 format
if "t" not in dt:
- return (
- data if ret_type == pd.DataFrame else data.to_records(index=False)
- )
+ return data if ret_type == pd.DataFrame else data.to_records(index=False)
# return early if empty
if data.empty:
@@ -2785,9 +2791,7 @@ def to_mp7_endpoints(
# check format
dt = data.dtypes
if all(n in dt for n in MP7_ENDPOINT_DTYPE.names):
- return (
- data if ret_type == pd.DataFrame else data.to_records(index=False)
- )
+ return data if ret_type == pd.DataFrame else data.to_records(index=False)
if not (
all(n in dt for n in MIN_PARTICLE_TRACK_DTYPE.names)
or all(n in dt for n in PRT_PATHLINE_DTYPE.names)
@@ -2811,12 +2815,8 @@ def to_mp7_endpoints(
data[seqn_key] = particles.ngroup()
# select startpoints and endpoints, sorting by sequencenumber
- startpts = (
- data.sort_values("t").groupby(seqn_key).head(1).sort_values(seqn_key)
- )
- endpts = (
- data.sort_values("t").groupby(seqn_key).tail(1).sort_values(seqn_key)
- )
+ startpts = data.sort_values("t").groupby(seqn_key).head(1).sort_values(seqn_key)
+ endpts = data.sort_values("t").groupby(seqn_key).tail(1).sort_values(seqn_key)
# add columns for
pairings = [
@@ -2915,9 +2915,7 @@ def to_prt_pathlines(
# return early if already in PRT format
if "t" in dt:
- return (
- data if ret_type == pd.DataFrame else data.to_records(index=False)
- )
+ return data if ret_type == pd.DataFrame else data.to_records(index=False)
# return early if empty
if data.empty:
diff --git a/flopy/plot/styles.py b/flopy/plot/styles.py
index 48e916f07f..0b27ce999f 100644
--- a/flopy/plot/styles.py
+++ b/flopy/plot/styles.py
@@ -95,9 +95,7 @@ def heading(
if letter is None and idx is not None:
letter = chr(ord("A") + idx)
- font = styles.__set_fontspec(
- bold=True, italic=False, fontsize=fontsize
- )
+ font = styles.__set_fontspec(bold=True, italic=False, fontsize=fontsize)
if letter is not None:
if heading is None:
@@ -114,13 +112,7 @@ def heading(
return
text = ax.text(
- x,
- y,
- text,
- va="bottom",
- ha="left",
- fontdict=font,
- transform=ax.transAxes,
+ x, y, text, va="bottom", ha="left", fontdict=font, transform=ax.transAxes
)
return text
@@ -148,9 +140,7 @@ def xlabel(cls, ax=None, label="", bold=False, italic=False, **kwargs):
if ax is None:
ax = plt.gca()
fontsize = kwargs.pop("fontsize", 9)
- fontspec = styles.__set_fontspec(
- bold=bold, italic=italic, fontsize=fontsize
- )
+ fontspec = styles.__set_fontspec(bold=bold, italic=italic, fontsize=fontsize)
ax.set_xlabel(label, fontdict=fontspec, **kwargs)
@classmethod
@@ -178,9 +168,7 @@ def ylabel(cls, ax=None, label="", bold=False, italic=False, **kwargs):
ax = plt.gca()
fontsize = kwargs.pop("fontsize", 9)
- fontspec = styles.__set_fontspec(
- bold=bold, italic=italic, fontsize=fontsize
- )
+ fontspec = styles.__set_fontspec(bold=bold, italic=italic, fontsize=fontsize)
ax.set_ylabel(label, fontdict=fontspec, **kwargs)
@classmethod
@@ -311,19 +299,10 @@ def add_text(
else:
transform = ax.transData
- font = styles.__set_fontspec(
- bold=bold, italic=italic, fontsize=fontsize
- )
+ font = styles.__set_fontspec(bold=bold, italic=italic, fontsize=fontsize)
text_obj = ax.text(
- x,
- y,
- text,
- va=va,
- ha=ha,
- fontdict=font,
- transform=transform,
- **kwargs,
+ x, y, text, va=va, ha=ha, fontdict=font, transform=transform, **kwargs
)
return text_obj
@@ -381,9 +360,7 @@ def add_annotation(
if xytext is None:
xytext = (0.0, 0.0)
- fontspec = styles.__set_fontspec(
- bold=bold, italic=italic, fontsize=fontsize
- )
+ fontspec = styles.__set_fontspec(bold=bold, italic=italic, fontsize=fontsize)
# add font information to kwargs
if kwargs is None:
kwargs = fontspec
diff --git a/flopy/seawat/swt.py b/flopy/seawat/swt.py
index 7339b69914..f2de3c9382 100644
--- a/flopy/seawat/swt.py
+++ b/flopy/seawat/swt.py
@@ -141,9 +141,7 @@ def __init__(
# the starting external data unit number
self._next_ext_unit = 3000
if external_path is not None:
- assert (
- model_ws == "."
- ), "ERROR: external cannot be used with model_ws"
+ assert model_ws == ".", "ERROR: external cannot be used with model_ws"
if os.path.exists(external_path):
print(f"Note: external_path {external_path} already exists")
@@ -295,13 +293,9 @@ def _set_name(self, value):
def change_model_ws(self, new_pth=None, reset_external=False):
# if hasattr(self,"_mf"):
if self._mf is not None:
- self._mf.change_model_ws(
- new_pth=new_pth, reset_external=reset_external
- )
+ self._mf.change_model_ws(new_pth=new_pth, reset_external=reset_external)
if self._mt is not None:
- self._mt.change_model_ws(
- new_pth=new_pth, reset_external=reset_external
- )
+ self._mt.change_model_ws(new_pth=new_pth, reset_external=reset_external)
super().change_model_ws(new_pth=new_pth, reset_external=reset_external)
def write_name_file(self):
@@ -323,17 +317,13 @@ def write_name_file(self):
if self.glo.unit_number[0] > 0:
f_nam.write(
"{:14s} {:5d} {}\n".format(
- self.glo.name[0],
- self.glo.unit_number[0],
- self.glo.file_name[0],
+ self.glo.name[0], self.glo.unit_number[0], self.glo.file_name[0]
)
)
# Write list file entry
f_nam.write(
"{:14s} {:5d} {}\n".format(
- self.lst.name[0],
- self.lst.unit_number[0],
- self.lst.file_name[0],
+ self.lst.name[0], self.lst.unit_number[0], self.lst.file_name[0]
)
)
@@ -400,9 +390,7 @@ def write_name_file(self):
f_nam.write(f"{tag:14s} {u:5d} {f}\n")
# write the output files
- for u, f, b in zip(
- self.output_units, self.output_fnames, self.output_binflag
- ):
+ for u, f, b in zip(self.output_units, self.output_fnames, self.output_binflag):
if u == 0:
continue
if b:
diff --git a/flopy/seawat/swtvdf.py b/flopy/seawat/swtvdf.py
index 914fe92c4e..8b890d1105 100644
--- a/flopy/seawat/swtvdf.py
+++ b/flopy/seawat/swtvdf.py
@@ -284,14 +284,11 @@ def write_file(self):
elif self.mtdnconc == -1:
f_vdf.write(
- "%10.4f%10.4f%10.4f\n"
- % (self.denseref, self.drhodprhd, self.prhdref)
+ "%10.4f%10.4f%10.4f\n" % (self.denseref, self.drhodprhd, self.prhdref)
)
f_vdf.write("%10i\n" % self.nsrhoeos)
if self.nsrhoeos == 1:
- f_vdf.write(
- "%10i%10.4f%10.4f\n" % (1, self.denseslp, self.crhoref)
- )
+ f_vdf.write("%10i%10.4f%10.4f\n" % (1, self.denseslp, self.crhoref))
else:
for i in range(self.nsrhoeos):
mtrhospec = 1 + i
@@ -467,9 +464,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
for iper in range(nper):
if model.verbose:
- print(
- f" loading INDENSE for stress period {iper + 1}..."
- )
+ print(f" loading INDENSE for stress period {iper + 1}...")
line = f.readline()
t = line.strip().split()
indense = int(t[0])
@@ -477,12 +472,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
if indense > 0:
name = f"DENSE_StressPeriod_{iper}"
t = Util3d.load(
- f,
- model,
- (nlay, nrow, ncol),
- np.float32,
- name,
- ext_unit_dict,
+ f, model, (nlay, nrow, ncol), np.float32, name, ext_unit_dict
)
if indense == 2:
t = t.array
diff --git a/flopy/seawat/swtvsc.py b/flopy/seawat/swtvsc.py
index 368bd1f6a6..fc9afc5fa8 100644
--- a/flopy/seawat/swtvsc.py
+++ b/flopy/seawat/swtvsc.py
@@ -149,8 +149,7 @@ def __init__(
):
if len(list(kwargs.keys())) > 0:
raise Exception(
- "VSC error: unrecognized kwargs: "
- + " ".join(list(kwargs.keys()))
+ "VSC error: unrecognized kwargs: " + " ".join(list(kwargs.keys()))
)
if unitnumber is None:
@@ -431,12 +430,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None):
if invisc > 0:
name = f"VISC_StressPeriod_{iper}"
t = Util3d.load(
- f,
- model,
- (nlay, nrow, ncol),
- np.float32,
- name,
- ext_unit_dict,
+ f, model, (nlay, nrow, ncol), np.float32, name, ext_unit_dict
)
if invisc == 2:
t = t.array
diff --git a/flopy/utils/__init__.py b/flopy/utils/__init__.py
index 87e0b6e5c6..acce927a4f 100644
--- a/flopy/utils/__init__.py
+++ b/flopy/utils/__init__.py
@@ -23,13 +23,7 @@
from .utl_import import import_optional_dependency # isort:skip
from . import get_modflow as get_modflow_module
-from .binaryfile import (
- BinaryHeader,
- CellBudgetFile,
- HeadFile,
- HeadUFile,
- UcnFile,
-)
+from .binaryfile import BinaryHeader, CellBudgetFile, HeadFile, HeadUFile, UcnFile
from .check import check
from .flopy_io import read_fixed_var, write_fixed_var
from .formattedfile import FormattedHeadFile
@@ -53,13 +47,7 @@
from .recarray_utils import create_empty_recarray, ra_slice, recarray
from .reference import TemporalReference
from .sfroutputfile import SfrFile
-from .swroutputfile import (
- SwrBudget,
- SwrExchange,
- SwrFlow,
- SwrStage,
- SwrStructure,
-)
+from .swroutputfile import SwrBudget, SwrExchange, SwrFlow, SwrStage, SwrStructure
from .util_array import Transient2d, Transient3d, Util2d, Util3d, read1d
from .util_list import MfList
from .utils_def import FlopyBinaryData, totim_to_datetime
diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile/__init__.py
similarity index 92%
rename from flopy/utils/binaryfile.py
rename to flopy/utils/binaryfile/__init__.py
index ce95d500f8..194c1140f8 100644
--- a/flopy/utils/binaryfile.py
+++ b/flopy/utils/binaryfile/__init__.py
@@ -14,13 +14,13 @@
import warnings
from pathlib import Path
from shutil import move
-from typing import List, Optional, Union
+from typing import Optional, Union
import numpy as np
import pandas as pd
-from ..utils.datafile import Header, LayerFile
-from .gridutil import get_lni
+from ..datafile import Header, LayerFile
+from ..gridutil import get_lni
HEAD_TEXT = " HEAD"
@@ -89,18 +89,7 @@ def write_budget(
ndim2 = 1
ndim3 = -1
h = np.array(
- (
- kstp,
- kper,
- text,
- ndim1,
- ndim2,
- ndim3,
- imeth,
- delt,
- pertim,
- totim,
- ),
+ (kstp, kper, text, ndim1, ndim2, ndim3, imeth, delt, pertim, totim),
dtype=dt,
)
h.tofile(fbin)
@@ -111,18 +100,7 @@ def write_budget(
ndim2 = 1
ndim3 = -1
h = np.array(
- (
- kstp,
- kper,
- text,
- ndim1,
- ndim2,
- ndim3,
- imeth,
- delt,
- pertim,
- totim,
- ),
+ (kstp, kper, text, ndim1, ndim2, ndim3, imeth, delt, pertim, totim),
dtype=dt,
)
h.tofile(fbin)
@@ -209,19 +187,13 @@ def set_values(self, **kwargs):
try:
self.header[0][k] = int(kwargs[k])
except:
- print(
- f"{k} key not available in {self.header_type} "
- "header dtype"
- )
+ print(f"{k} key not available in {self.header_type} header dtype")
for k in fkey:
if k in kwargs.keys():
try:
self.header[0][k] = float(kwargs[k])
except:
- print(
- f"{k} key not available "
- f"in {self.header_type} header dtype"
- )
+ print(f"{k} key not available in {self.header_type} header dtype")
for k in ckey:
if k in kwargs.keys():
# Convert to upper case to be consistent case used by MODFLOW
@@ -460,9 +432,7 @@ class BinaryLayerFile(LayerFile):
pointing to the 1st byte of data for the corresponding data arrays.
"""
- def __init__(
- self, filename: Union[str, os.PathLike], precision, verbose, **kwargs
- ):
+ def __init__(self, filename: Union[str, os.PathLike], precision, verbose, **kwargs):
super().__init__(filename, precision, verbose, **kwargs)
def _build_index(self):
@@ -483,7 +453,8 @@ def _build_index(self):
warn_threshold = 10000000
if self.nrow > 1 and self.nrow * self.ncol > warn_threshold:
warnings.warn(
- f"Very large grid, ncol ({self.ncol}) * nrow ({self.nrow}) > {warn_threshold}"
+ f"Very large grid, ncol ({self.ncol}) * nrow ({self.nrow})"
+ f" > {warn_threshold}"
)
self.file.seek(0, 2)
self.totalbytes = self.file.tell()
@@ -590,9 +561,7 @@ def get_ts(self, idx):
for k, i, j in kijlist:
ioffset = (i * self.ncol + j) * self.realtype(1).nbytes
for irec, header in enumerate(self.recordarray):
- ilay = (
- header["ilay"] - 1
- ) # change ilay from header to zero-based
+ ilay = header["ilay"] - 1 # change ilay from header to zero-based
if ilay != k:
continue
ipos = self.iposarray[irec].item()
@@ -659,9 +628,7 @@ def __init__(
s = f"Error. Precision could not be determined for {filename}"
print(s)
raise Exception()
- self.header_dtype = BinaryHeader.set_dtype(
- bintype="Head", precision=precision
- )
+ self.header_dtype = BinaryHeader.set_dtype(bintype="Head", precision=precision)
super().__init__(filename, precision, verbose, **kwargs)
def reverse(self, filename: Optional[os.PathLike] = None):
@@ -690,47 +657,34 @@ def get_max_kper_kstp_tsim():
kstp = {0: 0}
for i in range(len(self) - 1, -1, -1):
header = self.recordarray[i]
- if (
- header["kper"] in kstp
- and header["kstp"] > kstp[header["kper"]]
- ):
+ if header["kper"] in kstp and header["kstp"] > kstp[header["kper"]]:
kstp[header["kper"]] += 1
else:
kstp[header["kper"]] = 0
return kper, kstp, tsim
- # get max period and time from the head file
maxkper, maxkstp, maxtsim = get_max_kper_kstp_tsim()
- # if we have tdis, get max period number and simulation time from it
- tdis_maxkper, tdis_maxtsim = None, None
- if self.tdis is not None:
- pd = self.tdis.perioddata.get_data()
- if any(pd):
- tdis_maxkper = len(pd) - 1
- tdis_maxtsim = sum([p[0] for p in pd])
- # if we have both, check them against each other
- if tdis_maxkper is not None:
- assert maxkper == tdis_maxkper, (
- f"Max stress period in binary head file ({maxkper}) != "
- f"max stress period in provided tdis ({tdis_maxkper})"
- )
- assert maxtsim == tdis_maxtsim, (
- f"Max simulation time in binary head file ({maxtsim}) != "
- f"max simulation time in provided tdis ({tdis_maxtsim})"
- )
+ prev_kper = None
+ perlen = None
def reverse_header(header):
"""Reverse period, step and time fields in the record header"""
+ nonlocal prev_kper
+ nonlocal perlen
+
# reverse kstp and kper headers
kstp = header["kstp"] - 1
kper = header["kper"] - 1
header["kstp"] = maxkstp[kper] - kstp + 1
header["kper"] = maxkper - kper + 1
+ if kper != prev_kper:
+ perlen = header["pertim"]
+ prev_kper = kper
+
# reverse totim and pertim headers
header["totim"] = maxtsim - header["totim"]
- perlen = pd[kper][0]
header["pertim"] = perlen - header["pertim"]
return header
@@ -828,9 +782,7 @@ def __init__(
s = f"Error. Precision could not be determined for {filename}"
print(s)
raise Exception()
- self.header_dtype = BinaryHeader.set_dtype(
- bintype="Ucn", precision=precision
- )
+ self.header_dtype = BinaryHeader.set_dtype(bintype="Ucn", precision=precision)
super().__init__(filename, precision, verbose, **kwargs)
return
@@ -898,9 +850,7 @@ def __init__(
s = f"Error. Precision could not be determined for {filename}"
print(s)
raise Exception()
- self.header_dtype = BinaryHeader.set_dtype(
- bintype="Head", precision=precision
- )
+ self.header_dtype = BinaryHeader.set_dtype(bintype="Head", precision=precision)
super().__init__(filename, precision, verbose, **kwargs)
def _get_data_array(self, totim=0.0):
@@ -911,9 +861,7 @@ def _get_data_array(self, totim=0.0):
"""
if totim >= 0.0:
- keyindices = np.asarray(
- self.recordarray["totim"] == totim
- ).nonzero()[0]
+ keyindices = np.asarray(self.recordarray["totim"] == totim).nonzero()[0]
if len(keyindices) == 0:
msg = f"totim value ({totim}) not found in file..."
raise Exception(msg)
@@ -1064,7 +1012,6 @@ def __init__(
self.paknamlist_from = []
self.paknamlist_to = []
self.compact = True # compact budget file flag
-
self.dis = None
self.modelgrid = None
if "model" in kwargs.keys():
@@ -1317,9 +1264,7 @@ def _build_index(self):
ipos # store the position right after header2
)
self.recordarray.append(header)
- self.iposarray.append(
- ipos
- ) # store the position right after header2
+ self.iposarray.append(ipos) # store the position right after header2
# skip over the data to the next record and set ipos
self._skip_record(header)
@@ -1348,9 +1293,7 @@ def _build_index(self):
dtype = self.header_dtype[name]
if np.issubdtype(dtype, bytes): # convert to str
self.headers[name] = (
- self.headers[name]
- .str.decode("ascii", "strict")
- .str.strip()
+ self.headers[name].str.decode("ascii", "strict").str.strip()
)
def _skip_record(self, header):
@@ -1476,8 +1419,7 @@ def _find_paknam(self, paknam, to=False):
break
if paknam16 is None:
raise Exception(
- "The specified package name string is not "
- "in the budget file."
+ "The specified package name string is not in the budget file."
)
return paknam16
@@ -1630,9 +1572,7 @@ def get_indices(self, text=None):
# check and make sure that text is in file
if text is not None:
text16 = self._find_text(text)
- select_indices = np.asarray(
- self.recordarray["text"] == text16
- ).nonzero()
+ select_indices = np.asarray(self.recordarray["text"] == text16).nonzero()
if isinstance(select_indices, tuple):
select_indices = select_indices[0]
else:
@@ -1675,7 +1615,7 @@ def get_data(
paknam=None,
paknam2=None,
full3D=False,
- ) -> Union[List, np.ndarray]:
+ ) -> Union[list, np.ndarray]:
"""
Get data from the binary budget file.
@@ -1755,22 +1695,14 @@ def get_data(
if kstpkper is not None:
kstp1 = kstpkper[0] + 1
kper1 = kstpkper[1] + 1
- select_indices = select_indices & (
- self.recordarray["kstp"] == kstp1
- )
- select_indices = select_indices & (
- self.recordarray["kper"] == kper1
- )
+ select_indices = select_indices & (self.recordarray["kstp"] == kstp1)
+ select_indices = select_indices & (self.recordarray["kper"] == kper1)
selected = True
if text16 is not None:
- select_indices = select_indices & (
- self.recordarray["text"] == text16
- )
+ select_indices = select_indices & (self.recordarray["text"] == text16)
selected = True
if paknam16 is not None:
- select_indices = select_indices & (
- self.recordarray["paknam"] == paknam16
- )
+ select_indices = select_indices & (self.recordarray["paknam"] == paknam16)
selected = True
if paknam16_2 is not None:
select_indices = select_indices & (
@@ -1832,8 +1764,7 @@ def get_ts(self, idx, text=None, times=None):
# issue exception if text not provided
if text is None:
raise Exception(
- "text keyword must be provided to CellBudgetFile "
- "get_ts() method."
+ "text keyword must be provided to CellBudgetFile get_ts() method."
)
kijlist = self._build_kijlist(idx)
@@ -1883,8 +1814,7 @@ def get_ts(self, idx, text=None, times=None):
if self.modelgrid.grid_type == "structured":
ndx = [
- lrc[0]
- * (self.modelgrid.nrow * self.modelgrid.ncol)
+ lrc[0] * (self.modelgrid.nrow * self.modelgrid.ncol)
+ lrc[1] * self.modelgrid.ncol
+ (lrc[2] + 1)
for lrc in kijlist
@@ -1923,8 +1853,9 @@ def _build_kijlist(self, idx):
fail = True
if fail:
raise Exception(
- "Invalid cell index. Cell {} not within model grid: "
- "{}".format((k, i, j), (self.nlay, self.nrow, self.ncol))
+ "Invalid cell index. Cell {} not within model grid: {}".format(
+ (k, i, j), (self.nlay, self.nrow, self.ncol)
+ )
)
return kijlist
@@ -1936,9 +1867,7 @@ def _get_nstation(self, idx, kijlist):
def _init_result(self, nstation):
# Initialize result array and put times in first column
- result = np.empty(
- (len(self.kstpkper), nstation + 1), dtype=self.realtype
- )
+ result = np.empty((len(self.kstpkper), nstation + 1), dtype=self.realtype)
result[:, :] = np.nan
if len(self.times) == result.shape[0]:
result[:, 0] = np.array(self.times)
@@ -1998,17 +1927,13 @@ def get_record(self, idx, full3D=False):
if self.verbose:
s += f"an array of shape {(nlay, nrow, ncol)}"
print(s)
- return binaryread(
- self.file, self.realtype(1), shape=(nlay, nrow, ncol)
- )
+ return binaryread(self.file, self.realtype(1), shape=(nlay, nrow, ncol))
# imeth 1
elif imeth == 1:
if self.verbose:
s += f"an array of shape {(nlay, nrow, ncol)}"
print(s)
- return binaryread(
- self.file, self.realtype(1), shape=(nlay, nrow, ncol)
- )
+ return binaryread(self.file, self.realtype(1), shape=(nlay, nrow, ncol))
# imeth 2
elif imeth == 2:
@@ -2016,10 +1941,7 @@ def get_record(self, idx, full3D=False):
dtype = np.dtype([("node", np.int32), ("q", self.realtype)])
if self.verbose:
if full3D:
- s += (
- f"a numpy masked array of "
- f"size ({nlay}, {nrow}, {ncol})"
- )
+ s += f"a numpy masked array of size ({nlay}, {nrow}, {ncol})"
else:
s += f"a numpy recarray of size ({nlist}, 2)"
print(s)
@@ -2035,10 +1957,7 @@ def get_record(self, idx, full3D=False):
data = binaryread(self.file, self.realtype(1), shape=(nrow, ncol))
if self.verbose:
if full3D:
- s += (
- "a numpy masked array of size "
- f"({nlay}, {nrow}, {ncol})"
- )
+ s += f"a numpy masked array of size ({nlay}, {nrow}, {ncol})"
else:
s += (
"a list of two 2D numpy arrays. The first is an "
@@ -2204,9 +2123,7 @@ def get_residual(self, totim, scaled=False):
residual = np.zeros((nlay, nrow, ncol), dtype=float)
if scaled:
inflow = np.zeros((nlay, nrow, ncol), dtype=float)
- select_indices = np.asarray(
- self.recordarray["totim"] == totim
- ).nonzero()[0]
+ select_indices = np.asarray(self.recordarray["totim"] == totim).nonzero()[0]
for i in select_indices:
text = self.recordarray[i]["text"].decode()
@@ -2309,26 +2226,46 @@ def reverse(self, filename: Optional[os.PathLike] = None):
]
)
- # make sure we have tdis
- if self.tdis is None or not any(self.tdis.perioddata.get_data()):
- raise ValueError(
- "tdis must be known to reverse a cell budget file"
- )
+ nrecords = len(self)
+ target = filename
- # extract perioddata
- pd = self.tdis.perioddata.get_data()
+ def get_max_kper_kstp_tsim():
+ header = self.recordarray[-1]
+ kper = header["kper"] - 1
+ tsim = header["totim"]
+ kstp = {0: 0}
+ for i in range(len(self) - 1, -1, -1):
+ header = self.recordarray[i]
+ if header["kper"] in kstp and header["kstp"] > kstp[header["kper"]]:
+ kstp[header["kper"]] += 1
+ else:
+ kstp[header["kper"]] = 0
+ return kper, kstp, tsim
- # get maximum period number and total simulation time
- nper = len(pd)
- kpermx = nper - 1
- tsimtotal = 0.0
- for tpd in pd:
- tsimtotal += tpd[0]
+ maxkper, maxkstp, maxtsim = get_max_kper_kstp_tsim()
+ prev_kper = None
+ perlen = None
- # get number of records
- nrecords = len(self)
+ def reverse_header(header):
+ """Reverse period, step and time fields in the record header"""
- target = filename
+ nonlocal prev_kper
+ nonlocal perlen
+
+ # reverse kstp and kper headers
+ kstp = header["kstp"] - 1
+ kper = header["kper"] - 1
+ header["kstp"] = maxkstp[kper] - kstp + 1
+ header["kper"] = maxkper - kper + 1
+
+ if kper != prev_kper:
+ perlen = header["pertim"] - 1
+ prev_kper = kper
+
+ # reverse totim and pertim headers
+ header["totim"] = maxtsim - header["totim"]
+ header["pertim"] = perlen - header["pertim"]
+ return header
# if rewriting the same file, write
# temp file then copy it into place
@@ -2343,18 +2280,7 @@ def reverse(self, filename: Optional[os.PathLike] = None):
for idx in range(nrecords - 1, -1, -1):
# load header array
header = self.recordarray[idx]
-
- # reverse kstp and kper in the header array
- (kstp, kper) = (header["kstp"] - 1, header["kper"] - 1)
- kstpmx = pd[kper][1] - 1
- kstpb = kstpmx - kstp
- kperb = kpermx - kper
- (header["kstp"], header["kper"]) = (kstpb + 1, kperb + 1)
-
- # reverse totim and pertim in the header array
- header["totim"] = tsimtotal - header["totim"]
- perlen = pd[kper][0]
- header["pertim"] = perlen - header["pertim"]
+ header = reverse_header(header)
# Write main header information to backward budget file
h = header[
@@ -2376,14 +2302,7 @@ def reverse(self, filename: Optional[os.PathLike] = None):
h.tofile(f)
if header["imeth"] == 6:
# Write additional header information to the backward budget file
- h = header[
- [
- "modelnam",
- "paknam",
- "modelnam2",
- "paknam2",
- ]
- ]
+ h = header[["modelnam", "paknam", "modelnam2", "paknam2"]]
h = np.array(h, dtype=dt2)
h.tofile(f)
# Load data
@@ -2400,13 +2319,9 @@ def reverse(self, filename: Optional[os.PathLike] = None):
# Write auxiliary column names
naux = ndat - 1
if naux > 0:
- auxtxt = [
- "{:16}".format(colname) for colname in colnames[3:]
- ]
+ auxtxt = ["{:16}".format(colname) for colname in colnames[3:]]
auxtxt = tuple(auxtxt)
- dt = np.dtype(
- [(colname, "S16") for colname in colnames[3:]]
- )
+ dt = np.dtype([(colname, "S16") for colname in colnames[3:]])
h = np.array(auxtxt, dtype=dt)
h.tofile(f)
# Write nlist
diff --git a/flopy/utils/binaryfile/reverse.py b/flopy/utils/binaryfile/reverse.py
new file mode 100644
index 0000000000..f69e00f5d2
--- /dev/null
+++ b/flopy/utils/binaryfile/reverse.py
@@ -0,0 +1,31 @@
+import argparse
+from pathlib import Path
+
+from flopy.utils.binaryfile import CellBudgetFile, HeadFile
+
+if __name__ == "__main__":
+ """Reverse head or budget files."""
+
+ parser = argparse.ArgumentParser(description="Reverse head or budget files.")
+ parser.add_argument(
+ "--infile",
+ "-i",
+ type=str,
+ help="Input file.",
+ )
+ parser.add_argument(
+ "--outfile",
+ "-o",
+ type=str,
+ help="Output file.",
+ )
+ args = parser.parse_args()
+ infile = Path(args.infile)
+ outfile = Path(args.outfile)
+ suffix = infile.suffix.lower()
+ if suffix in [".hds", ".hed"]:
+ HeadFile(infile).reverse(outfile)
+ elif suffix in [".bud", ".cbc"]:
+ CellBudgetFile(infile).reverse(outfile)
+ else:
+ raise ValueError(f"Unrecognized file suffix: {suffix}")
diff --git a/flopy/utils/check.py b/flopy/utils/check.py
index 432dc52214..7d9801a481 100644
--- a/flopy/utils/check.py
+++ b/flopy/utils/check.py
@@ -1,6 +1,6 @@
import os
-from pathlib import Path
from typing import Optional, Union
+from warnings import warn
import numpy as np
from numpy.lib import recfunctions
@@ -149,9 +149,7 @@ def _add_to_summary(
col_list += [k, i, j] if self.structured else [node]
col_list += [value, desc]
sa = self._get_summary_array(np.array(col_list))
- self.summary_array = np.append(self.summary_array, sa).view(
- np.recarray
- )
+ self.summary_array = np.append(self.summary_array, sa).view(np.recarray)
def _boolean_compare(
self,
@@ -229,17 +227,12 @@ def _boolean_compare(
cols = [
c
for c in failed_info.dtype.names
- if failed_info[c].sum() != 0
- and c != "diff"
- and "tmp" not in c
+ if failed_info[c].sum() != 0 and c != "diff" and "tmp" not in c
]
# currently failed_info[cols] results in a warning. Not sure
# how to do this properly with a recarray.
failed_info = recfunctions.append_fields(
- failed_info[cols].copy(),
- names="diff",
- data=diff,
- asrecarray=True,
+ failed_info[cols].copy(), names="diff", data=diff, asrecarray=True
)
failed_info.sort(order="diff", axis=0)
if not sort_ascending:
@@ -256,9 +249,7 @@ def _get_summary_array(self, array=None):
ra = recarray(array, dtype)
return ra
- def _txt_footer(
- self, headertxt, txt, testname, passed=False, warning=True
- ):
+ def _txt_footer(self, headertxt, txt, testname, passed=False, warning=True):
"""
if len(txt) == 0 or passed:
txt += 'passed.'
@@ -286,9 +277,7 @@ def _stress_period_data_valid_indices(self, stress_period_data):
error_name="invalid BC index",
error_type="Error",
)
- self.summary_array = np.append(self.summary_array, sa).view(
- np.recarray
- )
+ self.summary_array = np.append(self.summary_array, sa).view(np.recarray)
spd_inds_valid = False
self.remove_passed("BC indices valid")
if spd_inds_valid:
@@ -313,9 +302,7 @@ def _stress_period_data_nans(self, stress_period_data, nan_excl_list):
error_name="Not a number",
error_type="Error",
)
- self.summary_array = np.append(self.summary_array, sa).view(
- np.recarray
- )
+ self.summary_array = np.append(self.summary_array, sa).view(np.recarray)
self.remove_passed("not a number (Nan) entries")
else:
self.append_passed("not a number (Nan) entries")
@@ -332,14 +319,9 @@ def _stress_period_data_inactivecells(self, stress_period_data):
if np.any(ibnd == 0):
sa = self._list_spd_check_violations(
- stress_period_data,
- ibnd == 0,
- error_name=msg,
- error_type="Warning",
- )
- self.summary_array = np.append(self.summary_array, sa).view(
- np.recarray
+ stress_period_data, ibnd == 0, error_name=msg, error_type="Warning"
)
+ self.summary_array = np.append(self.summary_array, sa).view(np.recarray)
self.remove_passed(f"{msg}s")
else:
self.append_passed(f"{msg}s")
@@ -453,9 +435,7 @@ def get_active(self, include_cbd=False):
# make ibound of same shape as thicknesses/botm for quasi-3D models
active = self.model.bas6.ibound.array != 0
if include_cbd and dis.laycbd.sum() > 0:
- laycbd = np.flatnonzero(
- dis.laycbd.array > 0
- ) # cbd layer index
+ laycbd = np.flatnonzero(dis.laycbd.array > 0) # cbd layer index
active = np.insert(active, laycbd, active[laycbd], axis=0)
else: # if bas package is missing
@@ -493,9 +473,7 @@ def stress_period_data_values(
error_name=error_name,
error_type=error_type,
)
- self.summary_array = np.append(self.summary_array, sa).view(
- np.recarray
- )
+ self.summary_array = np.append(self.summary_array, sa).view(np.recarray)
self.remove_passed(error_name)
else:
self.append_passed(error_name)
@@ -517,14 +495,10 @@ def values(self, a, criteria, error_name="", error_type="Warning"):
# but indsT will only have two columns if a 2-D array is being compared
# pad indsT with a column of zeros for k
if indsT.shape[1] == 2:
- indsT = np.column_stack(
- [np.zeros(indsT.shape[0], dtype=int), indsT]
- )
+ indsT = np.column_stack([np.zeros(indsT.shape[0], dtype=int), indsT])
sa = np.column_stack([tp, pn, indsT, v, en])
sa = self._get_summary_array(sa)
- self.summary_array = np.append(self.summary_array, sa).view(
- np.recarray
- )
+ self.summary_array = np.append(self.summary_array, sa).view(np.recarray)
self.remove_passed(error_name)
else:
self.append_passed(error_name)
@@ -573,11 +547,7 @@ def summarize(self, scrub: bool = False):
if txt == "":
txt += " No errors or warnings encountered.\n"
- elif (
- self.f is not None
- and self.verbose
- and self.summary_array.shape[0] > 0
- ):
+ elif self.f is not None and self.verbose and self.summary_array.shape[0] > 0:
txt += f" see {relpath_safe(self.summaryfile, scrub=scrub)} for details.\n"
# print checks that passed for higher levels
@@ -603,7 +573,7 @@ def summarize(self, scrub: bool = False):
print("Errors and/or Warnings encountered.")
if self.f is not None:
print(
- f" see {relpath_safe(self.summaryfile, scrub=scrub)} for details.\n"
+ f" see {relpath_safe(self.summaryfile, scrub=scrub)} for details.\n" # noqa
)
# start of older model specific code
@@ -613,8 +583,7 @@ def _has_cell_indices(self, stress_period_data):
) != {"k", "i", "j"}:
self._add_to_summary(
type="Error",
- desc="\r Stress period data missing k, "
- "i, j for structured grid.",
+ desc="\r Stress period data missing k, i, j for structured grid.",
)
return False
elif (
@@ -695,9 +664,9 @@ def get_neighbors(self, a):
tmp[1:-1, 0:-2, 1:-1].ravel(), # i-1
tmp[1:-1, 2:, 1:-1].ravel(), # i+1
tmp[1:-1, 1:-1, :-2].ravel(), # j-1
- tmp[1:-1, 1:-1, 2:].ravel(),
+ tmp[1:-1, 1:-1, 2:].ravel(), # j+1
]
- ) # j+1
+ )
return neighbors.reshape(6, nk, ni, nj)
else:
if "DISU" in self.model.get_package_list():
@@ -706,9 +675,7 @@ def get_neighbors(self, a):
if isinstance(a, Util3d):
a = a.array
pad_value = int(-1e9)
- n_max = (
- np.max(disu.iac.array) - 1
- ) # -1 for self, removed below
+ n_max = np.max(disu.iac.array) - 1 # -1 for self, removed below
arr_neighbors = [
np.pad(
a[n - 1],
@@ -718,9 +685,7 @@ def get_neighbors(self, a):
)
for n in neighbors
]
- arr_neighbors = np.where(
- arr_neighbors == -1e9, np.nan, arr_neighbors
- )
+ arr_neighbors = np.where(arr_neighbors == -1e9, np.nan, arr_neighbors)
neighbors = arr_neighbors.T
else:
# if no disu, we can't define neighbours for this ugrid
@@ -745,9 +710,7 @@ def _fmt_string_list(array, float_format="{}"):
"recarray to file - change to 'object' type"
)
else:
- raise Exception(
- f"MfList.fmt_string error: unknown vtype in dtype:{vtype}"
- )
+ raise Exception(f"MfList.fmt_string error: unknown vtype in dtype:{vtype}")
return fmt_string
@@ -799,6 +762,15 @@ def fields_view(arr, fields):
class mf6check(check):
+ """
+ Check an mf6 package for common errors.
+
+ .. deprecated:: 3.9
+ The MF6 check mechanism is deprecated pending reimplementation
+ in a future release. While the checks API will remain in place
+ through 3.x, it may be unstable, and will likely change in 4.x.
+ """
+
def __init__(
self,
package,
@@ -807,6 +779,12 @@ def __init__(
level=1,
property_threshold_values={},
):
+ warn(
+ "The MF6 check mechanism is deprecated pending reimplementation "
+ "in a future release. While the checks API will remain in place "
+ "through 3.x, it may be unstable, and will likely change in 4.x.",
+ category=DeprecationWarning,
+ )
super().__init__(package, f, verbose, level, property_threshold_values)
if hasattr(package, "model_or_sim"):
self.model = package.model_or_sim
@@ -827,9 +805,7 @@ def _get_cell_inds(self, spd):
for item in zip(*cellid):
hnames += (
- np.ndarray(
- shape=(len(item),), buffer=np.array(item), dtype=np.int32
- ),
+ np.ndarray(shape=(len(item),), buffer=np.array(item), dtype=np.int32),
)
return hnames
diff --git a/flopy/utils/compare.py b/flopy/utils/compare.py
index 2e5f3fd6b9..5c763178ee 100644
--- a/flopy/utils/compare.py
+++ b/flopy/utils/compare.py
@@ -1,6 +1,6 @@
import os
import textwrap
-from typing import List, Optional, Union
+from typing import Optional, Union
import numpy as np
@@ -84,12 +84,8 @@ def compare_budget(
max_cumpd=0.01,
max_incpd=0.01,
outfile: Optional[Union[str, os.PathLike]] = None,
- files1: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
- files2: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
+ files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
+ files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
):
"""Compare the budget results from two simulations.
@@ -218,10 +214,7 @@ def compare_budget(
maxcolname = max(maxcolname, len(colname))
s = 2 * "\n"
- s += (
- f"STRESS PERIOD: {kper[jdx] + 1} "
- + f"TIME STEP: {kstp[jdx] + 1}"
- )
+ s += f"STRESS PERIOD: {kper[jdx] + 1} " + f"TIME STEP: {kstp[jdx] + 1}"
f.write(s)
if idx == 0:
@@ -291,12 +284,8 @@ def compare_swrbudget(
max_cumpd=0.01,
max_incpd=0.01,
outfile: Optional[Union[str, os.PathLike]] = None,
- files1: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
- files2: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
+ files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
+ files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
):
"""Compare the SWR budget results from two simulations.
@@ -418,10 +407,7 @@ def compare_swrbudget(
maxcolname = max(maxcolname, len(colname))
s = 2 * "\n"
- s += (
- f"STRESS PERIOD: {kper[jdx] + 1} "
- + f"TIME STEP: {kstp[jdx] + 1}"
- )
+ s += f"STRESS PERIOD: {kper[jdx] + 1} " + f"TIME STEP: {kstp[jdx] + 1}"
f.write(s)
if idx == 0:
@@ -492,12 +478,8 @@ def compare_heads(
text2=None,
htol=0.001,
outfile: Optional[Union[str, os.PathLike]] = None,
- files1: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
- files2: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
+ files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
+ files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
difftol=False,
verbose=False,
exfile: Optional[Union[str, os.PathLike]] = None,
@@ -687,10 +669,7 @@ def compare_heads(
try:
exd = np.genfromtxt(exfile).flatten()
except:
- e = (
- "Could not read exclusion "
- + f"file {os.path.basename(exfile)}"
- )
+ e = "Could not read exclusion " + f"file {os.path.basename(exfile)}"
print(e)
return False
else:
@@ -715,9 +694,7 @@ def compare_heads(
status1 = status1.upper()
unstructured1 = False
if status1 == dbs:
- headobj1 = HeadFile(
- hfpth1, precision=precision, verbose=verbose, text=text
- )
+ headobj1 = HeadFile(hfpth1, precision=precision, verbose=verbose, text=text)
txt = headobj1.recordarray["text"][0]
if isinstance(txt, bytes):
txt = txt.decode("utf-8")
@@ -730,9 +707,7 @@ def compare_heads(
status2 = status2.upper()
unstructured2 = False
if status2 == dbs:
- headobj2 = HeadFile(
- hfpth2, precision=precision, verbose=verbose, text=text2
- )
+ headobj2 = HeadFile(hfpth2, precision=precision, verbose=verbose, text=text2)
txt = headobj2.recordarray["text"][0]
if isinstance(txt, bytes):
txt = txt.decode("utf-8")
@@ -883,12 +858,8 @@ def compare_concentrations(
precision="auto",
ctol=0.001,
outfile: Optional[Union[str, os.PathLike]] = None,
- files1: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
- files2: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
+ files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
+ files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
difftol=False,
verbose=False,
):
@@ -1112,14 +1083,10 @@ def compare_concentrations(
def compare_stages(
- namefile1: Union[str, os.PathLike] = None,
- namefile2: Union[str, os.PathLike] = None,
- files1: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
- files2: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
+ namefile1: Optional[Union[str, os.PathLike]] = None,
+ namefile2: Optional[Union[str, os.PathLike]] = None,
+ files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
+ files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
htol=0.001,
outfile: Optional[Union[str, os.PathLike]] = None,
difftol=False,
@@ -1328,20 +1295,16 @@ def compare_stages(
def compare(
- namefile1: Union[str, os.PathLike] = None,
- namefile2: Union[str, os.PathLike] = None,
+ namefile1: Optional[Union[str, os.PathLike]] = None,
+ namefile2: Optional[Union[str, os.PathLike]] = None,
precision="auto",
max_cumpd=0.01,
max_incpd=0.01,
htol=0.001,
outfile1: Optional[Union[str, os.PathLike]] = None,
outfile2: Optional[Union[str, os.PathLike]] = None,
- files1: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
- files2: Optional[
- Union[str, os.PathLike, List[Union[str, os.PathLike]]]
- ] = None,
+ files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
+ files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None,
):
"""Compare the budget and head results for two MODFLOW-based model
simulations.
diff --git a/flopy/utils/crs.py b/flopy/utils/crs.py
index 69d7b8856f..e64f638735 100644
--- a/flopy/utils/crs.py
+++ b/flopy/utils/crs.py
@@ -132,7 +132,8 @@ def get_crs(prjfile=None, crs=None, **kwargs):
raise ValueError(
"Different coordinate reference systems "
f"in crs argument and supplied projection file: {prjfile}\n"
- f"\nuser supplied crs: {crs} !=\ncrs from projection file: {prjfile_crs}"
+ f"\nuser supplied crs: {crs} !=\n"
+ f"crs from projection file: {prjfile_crs}"
)
else:
crs = prjfile_crs
diff --git a/flopy/utils/cvfdutil.py b/flopy/utils/cvfdutil.py
index 3a59031d2b..76d31a84ac 100644
--- a/flopy/utils/cvfdutil.py
+++ b/flopy/utils/cvfdutil.py
@@ -360,9 +360,7 @@ def get_disv_gridprops(verts, iverts, xcyc=None):
if xcyc is None:
xcyc = np.empty((ncpl, 2), dtype=float)
for icell in range(ncpl):
- vlist = [
- (verts[ivert, 0], verts[ivert, 1]) for ivert in iverts[icell]
- ]
+ vlist = [(verts[ivert, 0], verts[ivert, 1]) for ivert in iverts[icell]]
xcyc[icell, 0], xcyc[icell, 1] = centroid_of_polygon(vlist)
else:
assert xcyc.shape == (ncpl, 2)
@@ -371,10 +369,7 @@ def get_disv_gridprops(verts, iverts, xcyc=None):
vertices.append((i, verts[i, 0], verts[i, 1]))
cell2d = []
for i in range(ncpl):
- cell2d.append(
- [i, xcyc[i, 0], xcyc[i, 1], len(iverts[i])]
- + [iv for iv in iverts[i]]
- )
+ cell2d.append([i, xcyc[i, 0], xcyc[i, 1], len(iverts[i])] + list(iverts[i]))
gridprops = {}
gridprops["ncpl"] = ncpl
gridprops["nvert"] = nvert
diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py
index 5c32b72920..91037258ba 100644
--- a/flopy/utils/datafile.py
+++ b/flopy/utils/datafile.py
@@ -4,6 +4,15 @@
"""
+# in LayerFile, the recordarray attribute begins its life as
+# a list, which is appended to in subclasses' build_index(),
+# then finally becomes an array, after which it's accessed
+# in this file by column name. this probably deserves some
+# attention, but in the meantime, disable the pylint rule
+# to appease codacy.
+#
+# pylint: disable=invalid-sequence-index
+
import os
import warnings
from pathlib import Path
@@ -120,8 +129,9 @@ def __init__(self, filetype=None, precision="single"):
self.dtype = None
self.header = None
print(
- "Specified {} type is not available. "
- "Available types are:".format(self.header_type)
+ "Specified {} type is not available. Available types are:".format(
+ self.header_type
+ )
)
for idx, t in enumerate(self.header_types):
print(f" {idx + 1} {t}")
@@ -156,9 +166,7 @@ class LayerFile:
"""
- def __init__(
- self, filename: Union[str, os.PathLike], precision, verbose, **kwargs
- ):
+ def __init__(self, filename: Union[str, os.PathLike], precision, verbose, **kwargs):
from ..discretization.structuredgrid import StructuredGrid
self.filename = Path(filename).expanduser().absolute()
@@ -213,9 +221,7 @@ def __init__(
if self.mg is None:
self.mg = StructuredGrid(
delc=np.ones((self.nrow,)),
- delr=np.ones(
- self.ncol,
- ),
+ delr=np.ones(self.ncol),
nlay=self.nlay,
xoff=0.0,
yoff=0.0,
@@ -283,9 +289,7 @@ def to_shapefile(
"""
plotarray = np.atleast_3d(
- self.get_data(
- kstpkper=kstpkper, totim=totim, mflay=mflay
- ).transpose()
+ self.get_data(kstpkper=kstpkper, totim=totim, mflay=mflay).transpose()
).transpose()
if mflay is not None:
attrib_dict = {f"{attrib_name}{mflay}": plotarray[0, :, :]}
@@ -394,15 +398,11 @@ def plot(
else:
i0 = 0
i1 = self.nlay
- filenames = [
- f"{filename_base}_Layer{k + 1}.{fext}" for k in range(i0, i1)
- ]
+ filenames = [f"{filename_base}_Layer{k + 1}.{fext}" for k in range(i0, i1)]
# make sure we have a (lay,row,col) shape plotarray
plotarray = np.atleast_3d(
- self.get_data(
- kstpkper=kstpkper, totim=totim, mflay=mflay
- ).transpose()
+ self.get_data(kstpkper=kstpkper, totim=totim, mflay=mflay).transpose()
).transpose()
from ..plot.plotutil import PlotUtilities
@@ -463,9 +463,7 @@ def _get_data_array(self, totim=0):
"""
if totim >= 0.0:
- keyindices = np.asarray(
- self.recordarray["totim"] == totim
- ).nonzero()[0]
+ keyindices = np.asarray(self.recordarray["totim"] == totim).nonzero()[0]
if len(keyindices) == 0:
msg = f"totim value ({totim}) not found in file..."
raise Exception(msg)
@@ -552,9 +550,7 @@ def get_data(self, kstpkper=None, idx=None, totim=None, mflay=None):
& (self.recordarray["kper"] == kper1)
).nonzero()
if idx[0].shape[0] == 0:
- raise Exception(
- f"get_data() error: kstpkper not found:{kstpkper}"
- )
+ raise Exception(f"get_data() error: kstpkper not found:{kstpkper}")
totim1 = self.recordarray[idx]["totim"][0]
elif totim is not None:
totim1 = totim
@@ -637,8 +633,9 @@ def _build_kijlist(self, idx):
fail = True
if fail:
raise Exception(
- "Invalid cell index. Cell {} not within model grid: "
- "{}".format((k, i, j), (self.nlay, self.nrow, self.ncol))
+ "Invalid cell index. Cell {} not within model grid: {}".format(
+ (k, i, j), (self.nlay, self.nrow, self.ncol)
+ )
)
return kijlist
diff --git a/flopy/utils/datautil.py b/flopy/utils/datautil.py
index 0fba1bdb4b..011a4323df 100644
--- a/flopy/utils/datautil.py
+++ b/flopy/utils/datautil.py
@@ -5,10 +5,7 @@
def clean_filename(file_name):
- if (
- file_name[0] in PyListUtil.quote_list
- and file_name[-1] in PyListUtil.quote_list
- ):
+ if file_name[0] in PyListUtil.quote_list and file_name[-1] in PyListUtil.quote_list:
# quoted string
# keep entire string and remove the quotes
f_name = file_name.strip('"')
@@ -83,11 +80,7 @@ def is_float(v):
@staticmethod
def is_basic_type(obj):
- if (
- isinstance(obj, str)
- or isinstance(obj, int)
- or isinstance(obj, float)
- ):
+ if isinstance(obj, str) or isinstance(obj, int) or isinstance(obj, float):
return True
return False
@@ -103,9 +96,9 @@ def cellid_model_num(data_item_name, model_data, model_dim):
model_num = data_item_name[7:]
if DatumUtil.is_int(model_num):
return int(model_num) - 1
- if (
- data_item_name == "cellidn" or data_item_name == "cellidsj"
- ) and len(model_dim) > 0:
+ if (data_item_name == "cellidn" or data_item_name == "cellidsj") and len(
+ model_dim
+ ) > 0:
return 0
elif data_item_name == "cellidm" and len(model_dim) > 1:
return 1
@@ -192,8 +185,7 @@ def has_one_item(current_list):
if len(current_list) != 1:
return False
if (
- isinstance(current_list[0], list)
- or isinstance(current_list, np.ndarray)
+ isinstance(current_list[0], list) or isinstance(current_list, np.ndarray)
) and len(current_list[0] != 0):
return False
return True
@@ -246,9 +238,7 @@ def first_item(current_list):
return item
@staticmethod
- def next_item(
- current_list, new_list=True, nesting_change=0, end_of_list=True
- ):
+ def next_item(current_list, new_list=True, nesting_change=0, end_of_list=True):
# returns the next item in a nested list along with other information:
# (, , ,
#
@@ -259,9 +249,7 @@ def next_item(
else:
list_size = 1
for item in current_list:
- if isinstance(item, list) or isinstance(
- current_list, np.ndarray
- ):
+ if isinstance(item, list) or isinstance(current_list, np.ndarray):
# still in a list of lists, recurse
for item in PyListUtil.next_item(
item,
@@ -317,10 +305,7 @@ def reset_delimiter_used():
@staticmethod
def split_data_line(line, external_file=False, delimiter_conf_length=15):
- if (
- PyListUtil.line_num > delimiter_conf_length
- and PyListUtil.consistent_delim
- ):
+ if PyListUtil.line_num > delimiter_conf_length and PyListUtil.consistent_delim:
# consistent delimiter has been found. continue using that
# delimiter without doing further checks
if PyListUtil.delimiter_used is None:
@@ -328,9 +313,7 @@ def split_data_line(line, external_file=False, delimiter_conf_length=15):
clean_line = comment_split[0].strip().split()
else:
comment_split = line.split("#", 1)
- clean_line = (
- comment_split[0].strip().split(PyListUtil.delimiter_used)
- )
+ clean_line = comment_split[0].strip().split(PyListUtil.delimiter_used)
if len(comment_split) > 1:
clean_line.append("#")
clean_line.append(comment_split[1].strip())
@@ -525,8 +508,7 @@ def __init__(self, mdlist=None, shape=None, callback=None):
self.build_list(callback)
else:
raise Exception(
- "MultiList requires either a mdlist or a shape "
- "at initialization."
+ "MultiList requires either a mdlist or a shape at initialization."
)
def __getitem__(self, k):
@@ -568,21 +550,15 @@ def increment_dimension(self, dimension, callback):
new_row_idx = len(self.multi_dim_list)
self.multi_dim_list.append([])
for index in range(0, self.list_shape[1]):
- self.multi_dim_list[-1].append(
- callback((new_row_idx, index))
- )
+ self.multi_dim_list[-1].append(callback((new_row_idx, index)))
self.list_shape = (self.list_shape[0] + 1, self.list_shape[1])
elif dimension == 2:
new_col_idx = len(self.multi_dim_list[0])
for index in range(0, self.list_shape[0]):
- self.multi_dim_list[index].append(
- callback((index, new_col_idx))
- )
+ self.multi_dim_list[index].append(callback((index, new_col_idx)))
self.list_shape = (self.list_shape[0], self.list_shape[1] + 1)
else:
- raise Exception(
- 'For two dimensional lists "dimension" must ' "be 1 or 2."
- )
+ raise Exception('For two dimensional lists "dimension" must be 1 or 2.')
def build_list(self, callback):
entry_points = [(self.multi_dim_list, self.first_index())]
@@ -602,9 +578,7 @@ def build_list(self, callback):
new_location = (len(entry_point) - 1,)
else:
new_location = ((len(entry_point[0]) - 1), val)
- new_entry_points.append(
- (entry_point[0][-1], new_location)
- )
+ new_entry_points.append((entry_point[0][-1], new_location))
else:
entry_point[0].append(
callback(tuple(i + val for i in entry_point[1]))
diff --git a/flopy/utils/flopy_io.py b/flopy/utils/flopy_io.py
index 403f3e7a97..88e5a2f255 100644
--- a/flopy/utils/flopy_io.py
+++ b/flopy/utils/flopy_io.py
@@ -45,9 +45,7 @@ def _fmt_string(array, float_format="{}"):
"recarray to file - change to 'object' type"
)
else:
- raise Exception(
- f"MfList.fmt_string error: unknown vtype in dtype:{vtype}"
- )
+ raise Exception(f"MfList.fmt_string error: unknown vtype in dtype:{vtype}")
return fmt_string
@@ -324,9 +322,7 @@ def flux_to_wel(cbc_file, text, precision="single", model=None, verbose=False):
return wel
-def loadtxt(
- file, delimiter=" ", dtype=None, skiprows=0, use_pandas=True, **kwargs
-):
+def loadtxt(file, delimiter=" ", dtype=None, skiprows=0, use_pandas=True, **kwargs):
"""
Use pandas to load a text file
(significantly faster than n.loadtxt or genfromtxt see
diff --git a/flopy/utils/formattedfile.py b/flopy/utils/formattedfile.py
index 8d87cf6464..9f3074bfb4 100644
--- a/flopy/utils/formattedfile.py
+++ b/flopy/utils/formattedfile.py
@@ -65,10 +65,7 @@ def read_header(self, text_file):
arrheader = header_text.split()
# Verify header exists and is in the expected format
- if (
- len(arrheader) >= 5
- and arrheader[4].upper() != self.text_ident.upper()
- ):
+ if len(arrheader) >= 5 and arrheader[4].upper() != self.text_ident.upper():
raise Exception(
"Expected header not found. Make sure the file being "
"processed includes headers (LABEL output control option): "
@@ -84,9 +81,7 @@ def read_header(self, text_file):
or not is_int(arrheader[6])
or not is_int(arrheader[7])
):
- raise Exception(
- f"Unexpected format for FHDTextHeader: {header_text}"
- )
+ raise Exception(f"Unexpected format for FHDTextHeader: {header_text}")
headerinfo = np.empty([8], dtype=self.dtype)
headerinfo["kstp"] = int(arrheader[0])
@@ -159,9 +154,7 @@ def _build_index(self):
# provide headers as a pandas frame
self.headers = pd.DataFrame(self.recordarray, index=self.iposarray)
- self.headers["text"] = self.headers["text"].str.decode(
- "ascii", "strict"
- )
+ self.headers["text"] = self.headers["text"].str.decode("ascii", "strict")
def _store_record(self, header, ipos):
"""
diff --git a/flopy/utils/geometry.py b/flopy/utils/geometry.py
index 578040d559..3c58bf534b 100644
--- a/flopy/utils/geometry.py
+++ b/flopy/utils/geometry.py
@@ -36,9 +36,7 @@ def __init__(
if shapetype == "Polygon":
self.exterior = tuple(map(tuple, exterior))
self.interiors = (
- tuple()
- if interiors is None
- else (tuple(map(tuple, i)) for i in interiors)
+ () if interiors is None else (tuple(map(tuple, i)) for i in interiors)
)
self.interiors = tuple(self.interiors)
@@ -57,7 +55,7 @@ def __init__(
else:
err = (
"Supported shape types are Polygon, LineString, "
- "and Point: Supplied shape type {}".format(shapetype)
+ f"and Point: Supplied shape type {shapetype}"
)
raise TypeError(err)
@@ -74,23 +72,15 @@ def __geo_interface__(self):
if self.__type == "Polygon":
geo_interface = {
- "coordinates": tuple(
- [self.exterior] + [i for i in self.interiors]
- ),
+ "coordinates": tuple([self.exterior] + list(self.interiors)),
"type": self.__type,
}
elif self.__type == "LineString":
- geo_interface = {
- "coordinates": tuple(self.coords),
- "type": self.__type,
- }
+ geo_interface = {"coordinates": tuple(self.coords), "type": self.__type}
elif self.__type == "Point":
- geo_interface = {
- "coordinates": tuple(self.coords),
- "type": self.__type,
- }
+ geo_interface = {"coordinates": tuple(self.coords), "type": self.__type}
return geo_interface
@@ -135,9 +125,7 @@ def from_geojson(geo_interface):
shape = LineString(geo_interface["coordinates"])
elif geo_interface["type"] == "MultiLineString":
- geoms = [
- LineString(coords) for coords in geo_interface["coordinates"]
- ]
+ geoms = [LineString(coords) for coords in geo_interface["coordinates"]]
shape = MultiLineString(geoms)
elif geo_interface["type"] == "Point":
@@ -357,10 +345,7 @@ def __init__(self, exterior, interiors=None):
z information is only stored if it was entered.
"""
super().__init__(
- self.type,
- coordinates=None,
- exterior=exterior,
- interiors=interiors,
+ self.type, coordinates=None, exterior=exterior, interiors=interiors
)
def __eq__(self, other):
@@ -663,14 +648,10 @@ def rotate(x, y, xoff, yoff, angrot_radians):
y = np.array(y)
xrot = (
- xoff
- + np.cos(angrot_radians) * (x - xoff)
- - np.sin(angrot_radians) * (y - yoff)
+ xoff + np.cos(angrot_radians) * (x - xoff) - np.sin(angrot_radians) * (y - yoff)
)
yrot = (
- yoff
- + np.sin(angrot_radians) * (x - xoff)
- + np.cos(angrot_radians) * (y - yoff)
+ yoff + np.sin(angrot_radians) * (x - xoff) + np.cos(angrot_radians) * (y - yoff)
)
return xrot, yrot
@@ -868,9 +849,9 @@ def point_in_polygon(xc, yc, polygon):
num = len(polygon)
j = num - 1
for i in range(num):
- tmp = polygon[i][0] + (polygon[j][0] - polygon[i][0]) * (
- yc - polygon[i][1]
- ) / (polygon[j][1] - polygon[i][1])
+ tmp = polygon[i][0] + (polygon[j][0] - polygon[i][0]) * (yc - polygon[i][1]) / (
+ polygon[j][1] - polygon[i][1]
+ )
comp = np.asarray(
((polygon[i][1] > yc) ^ (polygon[j][1] > yc)) & (xc < tmp)
@@ -886,7 +867,7 @@ def point_in_polygon(xc, yc, polygon):
return mask
-def project_point_onto_xc_line(line, pts, d0=0, direction="x"):
+def project_point_onto_xc_line(line, pts, d0=0, calc_dist=False):
"""
Method to project points onto a cross sectional line
that is defined by distance. Used for plotting MODPATH results
@@ -898,46 +879,80 @@ def project_point_onto_xc_line(line, pts, d0=0, direction="x"):
pts : list or np.ndarray
numpy array of [(x, y),] points to be projected
d0 : distance offset along line of min(xl)
- direction : string
- projection direction "x" or "y"
+ calc_dist : bool
+ boolean flag to indicate that the return type is distance offset
+ by d0 (calculated from line[0]). If false this will return the
+ "projected" xy location along the cross-sectional line
+
Returns:
- np.ndarray of projected [(x, y),] points
+ tuple of (x , y) or distance
"""
if isinstance(line, list):
line = np.array(line)
if isinstance(pts, list):
pts = np.array(pts)
+ if pts.ndim == 1:
+ pts = np.expand_dims(pts, axis=0)
x0, x1 = line.T[0, :]
y0, y1 = line.T[1, :]
- dx = np.abs(x0 - x1)
- dy = np.abs(y0 - y1)
+ dx = x1 - x0
+ dy = y1 - y0
+ if dx == 0:
+ dx = 1e-10 # trap the vertical line condition to avoid inf slope
m = dy / dx
b = y0 - (m * x0)
x = pts.T[0]
y = pts.T[1]
- if direction == "x":
- if dy == 0:
- pass
- else:
- y = (x * m) + b
+ bx = ((m * y) + (x - m * b)) / (1 + m**2)
+ by = ((m**2 * y) + (m * x) + b) / (1 + m**2)
+
+ if calc_dist:
+ # get distance between bxy, xy0
+ dist = distance(bx, by, x0, y0)
+ # get distance between xy0, xy1
+ dist0 = distance(x0, y0, x1, y1)
+ # get distance between bxy, xy1
+ dist1 = distance(bx, by, x1, y1)
+
+ adj = np.full((dist.size,), 1)
+ for ix, d in enumerate(dist):
+ if d <= dist0:
+ if dist1[ix] > dist0:
+ adj[ix] = -1
+ else:
+ if dist1[ix] > d:
+ adj[ix] = -1
- else:
- if dx == 0:
- pass
- else:
- x = (y - b) / m
+ dist *= adj
+ dist += d0
+ if len(dist) == 1:
+ dist = dist[0]
+ return dist
- # now do distance equation on pts from x0, y0
- asq = (x - x0) ** 2
- bsq = (y - y0) ** 2
- dist = np.sqrt(asq + bsq)
- if direction == "x":
- x = dist + d0
else:
- y = d0 - dist
+ return bx, by
+
+
+def distance(x0, y0, x1, y1):
+ """
+ General distance equation
- return (x, y)
+ Parameters
+ ----------
+ x0 : float
+ y0 : float
+ x1 : np.array or float
+ y1 : np.array or float
+
+ Returns
+ -------
+ distance
+ """
+ asq = (x0 - x1) ** 2
+ bsq = (y0 - y1) ** 2
+ dist = np.sqrt(asq + bsq)
+ return dist
diff --git a/flopy/utils/geospatial_utils.py b/flopy/utils/geospatial_utils.py
index 6101f951c6..80b02644a2 100644
--- a/flopy/utils/geospatial_utils.py
+++ b/flopy/utils/geospatial_utils.py
@@ -49,9 +49,7 @@ class GeoSpatialUtil:
"""
def __init__(self, obj, shapetype=None):
- self.__shapefile = import_optional_dependency(
- "shapefile", errors="silent"
- )
+ self.__shapefile = import_optional_dependency("shapefile", errors="silent")
self.__obj = obj
self.__geo_interface = {}
self._geojson = None
@@ -212,9 +210,7 @@ def shape(self):
"""
if self.__shapefile is not None:
if self._shape is None:
- self._shape = self.__shapefile.Shape._from_geojson(
- self.__geo_interface
- )
+ self._shape = self.__shapefile.Shape._from_geojson(self.__geo_interface)
return self._shape
@property
@@ -260,14 +256,10 @@ class GeoSpatialCollection:
"""
def __init__(self, obj, shapetype=None):
- self.__shapefile = import_optional_dependency(
- "shapefile", errors="silent"
- )
+ self.__shapefile = import_optional_dependency("shapefile", errors="silent")
gpd = import_optional_dependency("geopandas", errors="silent")
- shapely_geo = import_optional_dependency(
- "shapely.geometry", errors="silent"
- )
+ shapely_geo = import_optional_dependency("shapely.geometry", errors="silent")
self.__obj = obj
self.__collection = []
@@ -317,9 +309,7 @@ def __init__(self, obj, shapetype=None):
shapetype = [shapetype] * len(obj)
for ix, geom in enumerate(obj):
- self.__collection.append(
- GeoSpatialUtil(geom, shapetype[ix])
- )
+ self.__collection.append(GeoSpatialUtil(geom, shapetype[ix]))
elif self.__shapefile is not None:
if isinstance(obj, (str, os.PathLike)):
diff --git a/flopy/utils/get_modflow.py b/flopy/utils/get_modflow.py
index 6bc75ad4de..d13035a21a 100755
--- a/flopy/utils/get_modflow.py
+++ b/flopy/utils/get_modflow.py
@@ -35,7 +35,15 @@
"modflow6-nightly-build": "modflow6_nightly",
}
available_repos = list(renamed_prefix.keys())
-available_ostags = ["linux", "mac", "macarm", "win32", "win64", "win64par"]
+available_ostags = [
+ "linux",
+ "mac",
+ "macarm",
+ "win32",
+ "win64",
+ "win64ext",
+ "win64par",
+]
max_http_tries = 3
# Check if this is running from flopy
@@ -68,7 +76,7 @@ def get_ostag() -> str:
def get_suffixes(ostag) -> Tuple[str, str]:
- if ostag in ["win32", "win64", "win64par"]:
+ if ostag.startswith("win"):
return ".exe", ".dll"
elif ostag == "linux":
return "", ".so"
@@ -97,9 +105,7 @@ def get_request(url, params={}):
return urllib.request.Request(url, headers=headers)
-def get_releases(
- owner=None, repo=None, quiet=False, per_page=None
-) -> List[str]:
+def get_releases(owner=None, repo=None, quiet=False, per_page=None) -> List[str]:
"""Get list of available releases."""
owner = default_owner if owner is None else owner
repo = default_repo if repo is None else repo
@@ -207,9 +213,7 @@ def columns_str(items, line_chars=79) -> str:
lines = []
for row_num in range(num_rows):
row_items = items[row_num::num_rows]
- lines.append(
- " ".join(item.ljust(item_chars) for item in row_items).rstrip()
- )
+ lines.append(" ".join(item.ljust(item_chars) for item in row_items).rstrip())
return "\n".join(lines)
@@ -222,9 +226,7 @@ def get_bindir_options(previous=None) -> Dict[str, Tuple[Path, str]]:
if within_flopy: # don't check is_dir() or access yet
options[":flopy"] = (flopy_appdata_path / "bin", "used by FloPy")
# Python bin (same for standard or conda varieties)
- py_bin = Path(sys.prefix) / (
- "Scripts" if get_ostag().startswith("win") else "bin"
- )
+ py_bin = Path(sys.prefix) / ("Scripts" if get_ostag().startswith("win") else "bin")
if py_bin.is_dir() and os.access(py_bin, os.W_OK):
options[":python"] = (py_bin, "used by Python")
home_local_bin = Path.home() / ".local" / "bin"
@@ -234,9 +236,7 @@ def get_bindir_options(previous=None) -> Dict[str, Tuple[Path, str]]:
if local_bin.is_dir() and os.access(local_bin, os.W_OK):
options[":system"] = (local_bin, "system local bindir")
# Windows user
- windowsapps_dir = Path(
- os.path.expandvars(r"%LOCALAPPDATA%\Microsoft\WindowsApps")
- )
+ windowsapps_dir = Path(os.path.expandvars(r"%LOCALAPPDATA%\Microsoft\WindowsApps"))
if windowsapps_dir.is_dir() and os.access(windowsapps_dir, os.W_OK):
options[":windowsapps"] = (windowsapps_dir, "User App path")
@@ -248,28 +248,25 @@ def get_bindir_options(previous=None) -> Dict[str, Tuple[Path, str]]:
def select_bindir(bindir, previous=None, quiet=False, is_cli=False) -> Path:
- """Resolve an install location if provided, or prompt interactive user to select one."""
+ """Resolve an install location if provided, or prompt interactive user to
+ select one."""
options = get_bindir_options(previous)
if len(bindir) > 1: # auto-select mode
# match one option that starts with input, e.g. :Py -> :python
- sel = list(opt for opt in options if opt.startswith(bindir.lower()))
+ sel = [opt for opt in options if opt.startswith(bindir.lower())]
if len(sel) != 1:
opt_avail = ", ".join(
- f"'{opt}' for '{optpath}'"
- for opt, (optpath, _) in options.items()
- )
- raise ValueError(
- f"invalid option '{bindir}', choose from: {opt_avail}"
+ f"'{opt}' for '{optpath}'" for opt, (optpath, _) in options.items()
)
+ raise ValueError(f"invalid option '{bindir}', choose from: {opt_avail}")
if not quiet:
print(f"auto-selecting option {sel[0]!r} for 'bindir'")
return Path(options[sel[0]][0]).resolve()
else:
if not is_cli:
opt_avail = ", ".join(
- f"'{opt}' for '{optpath}'"
- for opt, (optpath, _) in options.items()
+ f"'{opt}' for '{optpath}'" for opt, (optpath, _) in options.items()
)
raise ValueError(f"specify the option, choose from: {opt_avail}")
@@ -290,9 +287,7 @@ def select_bindir(bindir, previous=None, quiet=False, is_cli=False) -> Path:
if num_tries < 2:
print("invalid option, try choosing option again")
else:
- raise RuntimeError(
- "invalid option, too many attempts"
- ) from None
+ raise RuntimeError("invalid option, too many attempts") from None
def run_main(
@@ -376,6 +371,12 @@ def run_main(
if ostag is None:
ostag = get_ostag()
+ if ostag == "win64par":
+ warnings.warn(
+ "The parallel build is deprecated and will no longer "
+ "be published: 'win64ext' replaces 'win64par'."
+ )
+
exe_suffix, lib_suffix = get_suffixes(ostag)
# select bindir if path not provided
@@ -401,9 +402,7 @@ def run_main(
# make sure repo option is valid
if repo not in available_repos:
- raise KeyError(
- f"repo {repo!r} not supported; choose one of {available_repos}"
- )
+ raise KeyError(f"repo {repo!r} not supported; choose one of {available_repos}")
# get the selected release
release = get_release(owner, repo, release_id, quiet)
@@ -424,9 +423,7 @@ def run_main(
dst_fname = "-".join([repo, release["tag_name"], ostag]) + asset_suffix
else:
# change local download name so it is more unique
- dst_fname = "-".join(
- [renamed_prefix[repo], release["tag_name"], asset_name]
- )
+ dst_fname = "-".join([renamed_prefix[repo], release["tag_name"], asset_name])
tmpdir = None
if downloads_dir is None:
downloads_dir = Path.home() / "Downloads"
@@ -436,13 +433,9 @@ def run_main(
else: # check user-defined
downloads_dir = Path(downloads_dir)
if not downloads_dir.is_dir():
- raise OSError(
- f"downloads directory '{downloads_dir}' does not exist"
- )
+ raise OSError(f"downloads directory '{downloads_dir}' does not exist")
elif not os.access(downloads_dir, os.W_OK):
- raise OSError(
- f"downloads directory '{downloads_dir}' is not writable"
- )
+ raise OSError(f"downloads directory '{downloads_dir}' is not writable")
download_pth = downloads_dir / dst_fname
if download_pth.is_file() and not force:
if not quiet:
@@ -537,25 +530,18 @@ def add_item(key, fname, do_chmod):
for key in sorted(code):
if code[key].get("shared_object"):
fname = f"{key}{lib_suffix}"
- if nosub or (
- subset and (key in subset or fname in subset)
- ):
+ if nosub or (subset and (key in subset or fname in subset)):
add_item(key, fname, do_chmod=False)
else:
fname = f"{key}{exe_suffix}"
- if nosub or (
- subset and (key in subset or fname in subset)
- ):
+ if nosub or (subset and (key in subset or fname in subset)):
add_item(key, fname, do_chmod=True)
# check if double version exists
fname = f"{key}dbl{exe_suffix}"
if (
code[key].get("double_switch", True)
and fname in files
- and (
- nosub
- or (subset and (key in subset or fname in subset))
- )
+ and (nosub or (subset and (key in subset or fname in subset)))
):
add_item(key, fname, do_chmod=True)
@@ -590,7 +576,7 @@ def add_item(key, fname, do_chmod):
bindir_path.replace(bindir / fpath.name)
rmdirs.add(fpath.parent)
# clean up directories, starting with the longest
- for rmdir in reversed(sorted(rmdirs)):
+ for rmdir in sorted(rmdirs, reverse=True):
bindir_path = bindir / rmdir
bindir_path.rmdir()
for subdir in rmdir.parents:
@@ -731,9 +717,7 @@ def cli_main():
help="Force re-download archive. Default behavior will use archive if "
"previously downloaded in downloads-dir.",
)
- parser.add_argument(
- "--quiet", action="store_true", help="Show fewer messages."
- )
+ parser.add_argument("--quiet", action="store_true", help="Show fewer messages.")
args = vars(parser.parse_args())
try:
run_main(**args, _is_cli=True)
diff --git a/flopy/utils/gridgen.py b/flopy/utils/gridgen.py
index 437f3a0be5..badd181559 100644
--- a/flopy/utils/gridgen.py
+++ b/flopy/utils/gridgen.py
@@ -2,7 +2,7 @@
import subprocess
import warnings
from pathlib import Path
-from typing import List, Union
+from typing import Union
import numpy as np
@@ -38,9 +38,7 @@ def read1d(f, a):
return a
-def features_to_shapefile(
- features, featuretype, filename: Union[str, os.PathLike]
-):
+def features_to_shapefile(features, featuretype, filename: Union[str, os.PathLike]):
"""
Write a shapefile for the features of type featuretype.
@@ -74,12 +72,7 @@ def features_to_shapefile(
features = GeoSpatialCollection(features, featuretype).flopy_geometry
- if featuretype.lower() not in [
- "point",
- "line",
- "linestring",
- "polygon",
- ]:
+ if featuretype.lower() not in ["point", "line", "linestring", "polygon"]:
raise ValueError(f"Unrecognized feature type: {featuretype}")
if featuretype.lower() in ("line", "linestring"):
@@ -106,9 +99,7 @@ def features_to_shapefile(
wr.close()
-def ndarray_to_asciigrid(
- fname: Union[str, os.PathLike], a, extent, nodata=1.0e30
-):
+def ndarray_to_asciigrid(fname: Union[str, os.PathLike], a, extent, nodata=1.0e30):
# extent info
xmin, xmax, ymin, ymax = extent
ncol, nrow = a.shape
@@ -238,9 +229,7 @@ def __init__(
self.modelgrid = modelgrid.parent.modelgrid
else:
- raise TypeError(
- "A StructuredGrid object must be supplied to Gridgen"
- )
+ raise TypeError("A StructuredGrid object must be supplied to Gridgen")
self.nlay = self.modelgrid.nlay
self.nrow = self.modelgrid.nrow
@@ -269,12 +258,8 @@ def __init__(
if vertical_pass_through:
self.vertical_pass_through = "True"
- self.smoothing_level_vertical = kwargs.pop(
- "smoothing_level_vertical", 1
- )
- self.smoothing_level_horizontal = kwargs.pop(
- "smoothing_level_horizontal", 1
- )
+ self.smoothing_level_vertical = kwargs.pop("smoothing_level_vertical", 1)
+ self.smoothing_level_horizontal = kwargs.pop("smoothing_level_horizontal", 1)
# Set up a blank _active_domain list with None for each layer
self._addict = {}
self._active_domain = []
@@ -289,9 +274,7 @@ def __init__(
# Set up blank _elev and _elev_extent dictionaries
self._asciigrid_dict = {}
- def set_surface_interpolation(
- self, isurf, type, elev=None, elev_extent=None
- ):
+ def set_surface_interpolation(self, isurf, type, elev=None, elev_extent=None):
"""
Parameters
----------
@@ -326,14 +309,13 @@ def set_surface_interpolation(
if type == "ASCIIGRID":
if isinstance(elev, np.ndarray):
if elev_extent is None:
- raise ValueError(
- "ASCIIGRID was specified but elev_extent was not."
- )
+ raise ValueError("ASCIIGRID was specified but elev_extent was not.")
try:
xmin, xmax, ymin, ymax = elev_extent
except:
raise ValueError(
- f"Cannot unpack elev_extent as tuple (xmin, xmax, ymin, ymax): {elev_extent}"
+ "Cannot unpack elev_extent as tuple (xmin, xmax, ymin, ymax): "
+ f"{elev_extent}"
)
nm = f"_gridgen.lay{isurf}.asc"
@@ -349,7 +331,8 @@ def set_surface_interpolation(
self._asciigrid_dict[isurf] = elev
else:
raise ValueError(
- "ASCIIGRID was specified but elevation was not provided as a numpy ndarray or asciigrid file."
+ "ASCIIGRID was specified but elevation was not provided "
+ "as a numpy ndarray or asciigrid file."
)
def resolve_shapefile_path(self, p):
@@ -360,9 +343,7 @@ def _resolve(p):
return path if path.is_file() else self.model_ws / p
path = _resolve(p)
- path = (
- path if path.is_file() else _resolve(Path(p).with_suffix(".shp"))
- )
+ path = path if path.is_file() else _resolve(Path(p).with_suffix(".shp"))
return path if path.is_file() else None
def add_active_domain(self, feature, layers):
@@ -408,9 +389,7 @@ def add_active_domain(self, feature, layers):
), f"Shapefile does not exist: {shapefile_path}"
# store shapefile info
- self._addict[shapefile_path.stem] = relpath_safe(
- shapefile_path, self.model_ws
- )
+ self._addict[shapefile_path.stem] = relpath_safe(shapefile_path, self.model_ws)
for k in layers:
self._active_domain[k] = shapefile_path.stem
@@ -509,11 +488,7 @@ def build(self, verbose=False):
qtgfname = os.path.join(self.model_ws, "quadtreegrid.dfn")
if os.path.isfile(qtgfname):
os.remove(qtgfname)
- cmds = [
- self.exe_name,
- "quadtreebuilder",
- "_gridgen_build.dfn",
- ]
+ cmds = [self.exe_name, "quadtreebuilder", "_gridgen_build.dfn"]
buff = subprocess.check_output(cmds, cwd=self.model_ws)
if verbose:
print(buff)
@@ -593,16 +568,10 @@ def export(self, verbose=False):
f.write("\n")
f.write(self._grid_export_blocks())
f.close()
- assert os.path.isfile(
- fname
- ), f"Could not create export dfn file: {fname}"
+ assert os.path.isfile(fname), f"Could not create export dfn file: {fname}"
# Export shapefiles
- cmds = [
- self.exe_name,
- "grid_to_shapefile_poly",
- "_gridgen_export.dfn",
- ]
+ cmds = [self.exe_name, "grid_to_shapefile_poly", "_gridgen_export.dfn"]
buff = []
try:
buff = subprocess.check_output(cmds, cwd=self.model_ws)
@@ -611,16 +580,9 @@ def export(self, verbose=False):
fn = os.path.join(self.model_ws, "qtgrid.shp")
assert os.path.isfile(fn)
except:
- print(
- "Error. Failed to export polygon shapefile of grid",
- buff,
- )
+ print("Error. Failed to export polygon shapefile of grid", buff)
- cmds = [
- self.exe_name,
- "grid_to_shapefile_point",
- "_gridgen_export.dfn",
- ]
+ cmds = [self.exe_name, "grid_to_shapefile_point", "_gridgen_export.dfn"]
buff = []
try:
buff = subprocess.check_output(cmds, cwd=self.model_ws)
@@ -629,17 +591,10 @@ def export(self, verbose=False):
fn = os.path.join(self.model_ws, "qtgrid_pt.shp")
assert os.path.isfile(fn)
except:
- print(
- "Error. Failed to export polygon shapefile of grid",
- buff,
- )
+ print("Error. Failed to export polygon shapefile of grid", buff)
# Export the usg data
- cmds = [
- self.exe_name,
- "grid_to_usgdata",
- "_gridgen_export.dfn",
- ]
+ cmds = [self.exe_name, "grid_to_usgdata", "_gridgen_export.dfn"]
buff = []
try:
buff = subprocess.check_output(cmds, cwd=self.model_ws)
@@ -662,11 +617,7 @@ def export(self, verbose=False):
except:
print("Error. Failed to export vtk file", buff)
- cmds = [
- self.exe_name,
- "grid_to_vtk_sv",
- "_gridgen_export.dfn",
- ]
+ cmds = [self.exe_name, "grid_to_vtk_sv", "_gridgen_export.dfn"]
buff = []
try:
buff = subprocess.check_output(cmds, cwd=self.model_ws)
@@ -675,10 +626,7 @@ def export(self, verbose=False):
fn = os.path.join(self.model_ws, "qtg_sv.vtu")
assert os.path.isfile(fn)
except:
- print(
- "Error. Failed to export shared vertex vtk file",
- buff,
- )
+ print("Error. Failed to export shared vertex vtk file", buff)
def plot(
self,
@@ -815,9 +763,7 @@ def get_disu(
self.nodes = nodes
# nodelay
- nodelay = self.read_qtg_nodesperlay_dat(
- model_ws=self.model_ws, nlay=nlay
- )
+ nodelay = self.read_qtg_nodesperlay_dat(model_ws=self.model_ws, nlay=nlay)
# top
top = [0] * nlay
@@ -964,9 +910,7 @@ def get_nodelay(self):
"""
nlay = self.get_nlay()
- nodelay = self.read_qtg_nodesperlay_dat(
- model_ws=self.model_ws, nlay=nlay
- )
+ nodelay = self.read_qtg_nodesperlay_dat(model_ws=self.model_ws, nlay=nlay)
return nodelay
def get_top(self):
@@ -1302,9 +1246,7 @@ def get_verts_iverts(self, ncells, verbose=False):
"""
from .cvfdutil import to_cvfd
- verts, iverts = to_cvfd(
- self._vertdict, nodestop=ncells, verbose=verbose
- )
+ verts, iverts = to_cvfd(self._vertdict, nodestop=ncells, verbose=verbose)
return verts, iverts
def get_cellxy(self, ncells):
@@ -1690,11 +1632,7 @@ def intersect(self, features, featuretype, layer):
# Load the intersection results as a recarray, convert nodenumber
# to zero-based and return
result = np.genfromtxt(
- fn,
- dtype=None,
- names=True,
- delimiter=",",
- usecols=tuple(range(ncol)),
+ fn, dtype=None, names=True, delimiter=",", usecols=tuple(range(ncol))
)
result = np.atleast_1d(result)
result = result.view(np.recarray)
@@ -1763,9 +1701,7 @@ def _mfgrid_block(self):
if bot.min() == bot.max():
s += f" BOTTOM LAYER {k + 1} = CONSTANT {bot.min()}\n"
else:
- s += " BOTTOM LAYER {0} = OPEN/CLOSE bot{0}.dat\n".format(
- k + 1
- )
+ s += " BOTTOM LAYER {0} = OPEN/CLOSE bot{0}.dat\n".format(k + 1)
fname = os.path.join(self.model_ws, f"bot{k + 1}.dat")
np.savetxt(fname, bot)
@@ -1894,9 +1830,7 @@ def _mkvertdict(self):
self._vertdict[nodenumber] = shapes[i].points
@staticmethod
- def read_qtg_nod(
- model_ws: Union[str, os.PathLike], nodes_only: bool = False
- ):
+ def read_qtg_nod(model_ws: Union[str, os.PathLike], nodes_only: bool = False):
"""Read qtg.nod file
Parameters
@@ -1979,7 +1913,7 @@ def read_qtg_nodesperlay_dat(model_ws: Union[str, os.PathLike], nlay: int):
@staticmethod
def read_quadtreegrid_top_dat(
- model_ws: Union[str, os.PathLike], nodelay: List[int], lay: int
+ model_ws: Union[str, os.PathLike], nodelay: list[int], lay: int
):
"""Read quadtreegrid.top_.dat file
@@ -2002,7 +1936,7 @@ def read_quadtreegrid_top_dat(
@staticmethod
def read_quadtreegrid_bot_dat(
- model_ws: Union[str, os.PathLike], nodelay: List[int], lay: int
+ model_ws: Union[str, os.PathLike], nodelay: list[int], lay: int
):
"""Read quadtreegrid.bot_.dat file
diff --git a/flopy/utils/gridintersect.py b/flopy/utils/gridintersect.py
index eed25d3102..a294884480 100644
--- a/flopy/utils/gridintersect.py
+++ b/flopy/utils/gridintersect.py
@@ -1,79 +1,26 @@
-import contextlib
import warnings
-from itertools import product
import numpy as np
+from pandas import DataFrame
from .geometry import transform
from .geospatial_utils import GeoSpatialUtil
-from .parse_version import Version
from .utl_import import import_optional_dependency
-NUMPY_GE_121 = Version(np.__version__) >= Version("1.21")
-
shapely = import_optional_dependency("shapely", errors="silent")
-if shapely is not None:
- SHAPELY_GE_20 = Version(shapely.__version__) >= Version("2.0a1")
- # shapely > 1.8 required
- if Version(shapely.__version__) < Version("1.8"):
- warnings.warn("GridIntersect requires shapely>=1.8.")
- shapely = None
- if SHAPELY_GE_20:
- from shapely import unary_union
- else:
- from shapely.ops import unary_union
-else:
- SHAPELY_GE_20 = False
-
-shapely_warning = None
-if shapely is not None:
- try:
- from shapely.errors import ShapelyDeprecationWarning as shapely_warning
- except ImportError:
- pass
-
-if shapely_warning is not None and not SHAPELY_GE_20:
-
- @contextlib.contextmanager
- def ignore_shapely_warnings_for_object_array():
- with warnings.catch_warnings():
- warnings.filterwarnings(
- "ignore",
- "Iteration|The array interface|__len__",
- shapely_warning,
- )
- if NUMPY_GE_121:
- # warning from numpy for existing Shapely releases (this is
- # fixed with Shapely 1.8)
- warnings.filterwarnings(
- "ignore",
- "An exception was ignored while fetching",
- DeprecationWarning,
- )
- yield
-
- @contextlib.contextmanager
- def ignore_shapely2_strtree_warning():
- with warnings.catch_warnings():
- warnings.filterwarnings(
- "ignore",
- (
- "STRtree will be changed in 2.0.0 and "
- "will not be compatible with versions < 2."
- ),
- shapely_warning,
- )
- yield
-
-else:
-
- @contextlib.contextmanager
- def ignore_shapely_warnings_for_object_array():
- yield
- @contextlib.contextmanager
- def ignore_shapely2_strtree_warning():
- yield
+# TODO: remove the following methods and classes in version 3.10.0
+# - ModflowGridIndices
+# - GridIntersect:
+# - remove method kwarg from __init__
+# - remove structured methods from intersect() and intersects()
+# - _intersect_point_structured()
+# - _intersect_linestring_structured()
+# - _get_nodes_intersecting_linestring()
+# - _check_adjacent_cells_intersecting_line()
+# - _intersect_rectangle_structured()
+# - _intersect_polygon_structured()
+# - _transform_geo_interface_polygon()
def parse_shapely_ix_result(collection, ix_result, shptyps=None):
@@ -121,9 +68,7 @@ def parse_shapely_ix_result(collection, ix_result, shptyps=None):
class GridIntersect:
- """Class for intersecting shapely geometries (Point, Linestring, Polygon,
- or their Multi variants) with MODFLOW grids. Contains optimized search
- routines for structured grids.
+ """Class for intersecting shapely geometries with MODFLOW grids.
Notes
-----
@@ -135,11 +80,6 @@ class GridIntersect:
with the whole collection at once.
- Building the STR-tree can take a while for large grids. Once built the
intersect routines (for individual shapes) should be pretty fast.
- - The optimized routines for structured grids can outperform the shapely
- routines for point and linestring intersections because of the reduced
- overhead of building and parsing the STR-tree. However, for polygons
- the STR-tree implementation is often faster than the optimized
- structured routines, especially for larger grids.
"""
def __init__(self, mfgrid, method=None, rtree=True, local=False):
@@ -150,27 +90,45 @@ def __init__(self, mfgrid, method=None, rtree=True, local=False):
mfgrid : flopy modflowgrid
MODFLOW grid as implemented in flopy
method : str, optional
- Options are either 'vertex' which uses shapely intersection operations
- or 'structured' which uses optimized methods that only work for structured
- grids. The default is None, which determines intersection method based on
- the grid type.
+ Method to use for intersection shapes with the grid. Method 'vertex'
+ will be the only option in the future. Method 'structured' is deprecated.
+ This keyword argument will be removed in a future release.
+
+ .. deprecated:: 3.9.0
+ method="vertex" will be the only option from 3.10.0
+
rtree : bool, optional
whether to build an STR-Tree, default is True. If False no STR-tree
is built, but intersects will loop through all model gridcells
- (which is generally slower). Only read when `method='vertex'`.
+ (which is generally slower).
local : bool, optional
use local model coordinates from model grid to build grid geometries,
- default is False and uses real-world coordinates (with offset and rotation),
- if specified.
+ default is False and uses real-world coordinates (with offset and rotation).
"""
+ import_optional_dependency(
+ "shapely", error_message="GridIntersect requires shapely"
+ )
self.mfgrid = mfgrid
self.local = local
+ # TODO: remove method kwarg in version v3.10.0
+ # keep default behavior for v3.9.0, but warn if method is not vertex
+ # allow silencing of warning with method="vertex" in v3.9.0
if method is None:
# determine method from grid_type
self.method = self.mfgrid.grid_type
else:
# set method
self.method = method
+ if self.method != "vertex":
+ warnings.warn(
+ (
+ 'Note `method="structured"` is deprecated. '
+ 'Pass `method="vertex"` to silence this warning. '
+ "This will be the new default in a future release and this "
+ "keyword argument will be removed."
+ ),
+ category=DeprecationWarning,
+ )
self.rtree = rtree
# really only necessary for method=='vertex' as structured methods
@@ -188,8 +146,7 @@ def __init__(self, mfgrid, method=None, rtree=True, local=False):
"shapely.strtree",
error_message="STRTree requires shapely",
)
- with ignore_shapely2_strtree_warning():
- self.strtree = strtree.STRtree(self.geoms)
+ self.strtree = strtree.STRtree(self.geoms)
elif self.method == "structured" and mfgrid.grid_type == "structured":
# geoms and cellids do not need to be assigned for structured
@@ -212,7 +169,8 @@ def intersect(
return_all_intersections=False,
contains_centroid=False,
min_area_fraction=None,
- shapely2=True,
+ geo_dataframe=False,
+ shapely2=None,
):
"""Method to intersect a shape with a model grid.
@@ -244,93 +202,76 @@ def intersect(
float defining minimum intersection area threshold, if intersection
area is smaller than min_frac_area * cell_area, do not store
intersection result, only used if shape type is "polygon"
- shapely2 : bool, optional
- temporary flag to determine whether to use methods optimized for
- shapely 2.0. Useful for comparison performance between the old
- (shapely 1.8) and new (shapely 2.0) implementations.
+ geo_dataframe : bool, optional
+ if True, return a geopandas GeoDataFrame, default is False
Returns
-------
- numpy.recarray
- a record array containing information about the intersection
+ numpy.recarray or gepandas.GeoDataFrame
+ a record array containing information about the intersection or
+ a geopandas.GeoDataFrame if geo_dataframe=True
"""
+ if shapely2 is not None:
+ warnings.warn(
+ "The shapely2 keyword argument is deprecated. "
+ "Shapely<2 support was dropped in flopy version 3.9.0."
+ )
gu = GeoSpatialUtil(shp, shapetype=shapetype)
shp = gu.shapely
if gu.shapetype in ("Point", "MultiPoint"):
- if (
- self.method == "structured"
- and self.mfgrid.grid_type == "structured"
- ):
+ if self.method == "structured" and self.mfgrid.grid_type == "structured":
rec = self._intersect_point_structured(
shp, return_all_intersections=return_all_intersections
)
else:
- if SHAPELY_GE_20 and shapely2:
- rec = self._intersect_point_shapely2(
- shp,
- sort_by_cellid=sort_by_cellid,
- return_all_intersections=return_all_intersections,
- )
- else:
- rec = self._intersect_point_shapely(
- shp,
- sort_by_cellid=sort_by_cellid,
- return_all_intersections=return_all_intersections,
- )
+ rec = self._intersect_point_shapely(
+ shp,
+ sort_by_cellid=sort_by_cellid,
+ return_all_intersections=return_all_intersections,
+ )
elif gu.shapetype in ("LineString", "MultiLineString"):
- if (
- self.method == "structured"
- and self.mfgrid.grid_type == "structured"
- ):
+ if self.method == "structured" and self.mfgrid.grid_type == "structured":
rec = self._intersect_linestring_structured(
shp,
keepzerolengths,
return_all_intersections=return_all_intersections,
)
else:
- if SHAPELY_GE_20 and shapely2:
- rec = self._intersect_linestring_shapely2(
- shp,
- keepzerolengths,
- sort_by_cellid=sort_by_cellid,
- return_all_intersections=return_all_intersections,
- )
- else:
- rec = self._intersect_linestring_shapely(
- shp,
- keepzerolengths,
- sort_by_cellid=sort_by_cellid,
- return_all_intersections=return_all_intersections,
- )
+ rec = self._intersect_linestring_shapely(
+ shp,
+ keepzerolengths,
+ sort_by_cellid=sort_by_cellid,
+ return_all_intersections=return_all_intersections,
+ )
elif gu.shapetype in ("Polygon", "MultiPolygon"):
- if (
- self.method == "structured"
- and self.mfgrid.grid_type == "structured"
- ):
+ if self.method == "structured" and self.mfgrid.grid_type == "structured":
rec = self._intersect_polygon_structured(
shp,
contains_centroid=contains_centroid,
min_area_fraction=min_area_fraction,
)
else:
- if SHAPELY_GE_20 and shapely2:
- rec = self._intersect_polygon_shapely2(
- shp,
- sort_by_cellid=sort_by_cellid,
- contains_centroid=contains_centroid,
- min_area_fraction=min_area_fraction,
- )
- else:
- rec = self._intersect_polygon_shapely(
- shp,
- sort_by_cellid=sort_by_cellid,
- contains_centroid=contains_centroid,
- min_area_fraction=min_area_fraction,
- )
+ rec = self._intersect_polygon_shapely(
+ shp,
+ sort_by_cellid=sort_by_cellid,
+ contains_centroid=contains_centroid,
+ min_area_fraction=min_area_fraction,
+ )
else:
raise TypeError(f"Shapetype {gu.shapetype} is not supported")
+ if geo_dataframe:
+ gpd = import_optional_dependency("geopandas")
+ gdf = (
+ gpd.GeoDataFrame(rec)
+ .rename(columns={"ixshapes": "geometry"})
+ .set_geometry("geometry")
+ )
+ if self.mfgrid.crs is not None:
+ gdf = gdf.set_crs(self.mfgrid.crs)
+ return gdf
+
return rec
def _set_method_get_gridshapes(self):
@@ -386,22 +327,14 @@ def _rect_grid_to_geoms_cellids(self):
]
).transpose((1, 2, 0))
- if SHAPELY_GE_20:
- # use array-based methods for speed
- geoms = shapely.polygons(
- shapely.linearrings(
- xverts.flatten(),
- y=yverts.flatten(),
- indices=np.repeat(cellids, 4),
- )
+ # use array-based methods for speed
+ geoms = shapely.polygons(
+ shapely.linearrings(
+ xverts.flatten(),
+ y=yverts.flatten(),
+ indices=np.repeat(cellids, 4),
)
- else:
- from shapely.geometry import Polygon
-
- geoms = []
- for i, j in product(range(nrow), range(ncol)):
- geoms.append(Polygon(zip(xverts[i, j], yverts[i, j])))
- geoms = np.array(geoms)
+ )
return geoms, cellids
@@ -436,9 +369,7 @@ def _vtx_grid_to_geoms_cellids(self):
list(
zip(
*self.mfgrid.get_local_coords(
- *np.array(
- self.mfgrid.get_cell_vertices(node)
- ).T
+ *np.array(self.mfgrid.get_cell_vertices(node)).T
)
)
)
@@ -452,42 +383,6 @@ def _vtx_grid_to_geoms_cellids(self):
]
return np.array(geoms), np.arange(self.mfgrid.ncpl)
- def _rect_grid_to_shape_list(self):
- """internal method, list of shapely polygons for structured grid cells.
-
- .. deprecated:: 3.3.6
- use _rect_grid_to_geoms_cellids() instead.
-
- Returns
- -------
- list
- list of shapely Polygons
- """
- warnings.warn(
- "`_rect_grid_to_shape_list()` is deprecated, please"
- "use `_rect_grid_to_geoms_cellids()` instead.",
- DeprecationWarning,
- )
- return self._rect_grid_to_geoms_cellids()[0].tolist()
-
- def _vtx_grid_to_shape_list(self):
- """internal method, list of shapely polygons for vertex grids.
-
- .. deprecated:: 3.3.6
- use _vtx_grid_to_geoms_cellids() instead.
-
- Returns
- -------
- list
- list of shapely Polygons
- """
- warnings.warn(
- "`_vtx_grid_to_shape_list()` is deprecated, please"
- "use `_vtx_grid_to_geoms_cellids()` instead.",
- DeprecationWarning,
- )
- return self._vtx_grid_to_geoms_cellids()[0].tolist()
-
def query_grid(self, shp):
"""Perform spatial query on grid with shapely geometry. If no spatial
query is possible returns all grid cells.
@@ -503,10 +398,7 @@ def query_grid(self, shp):
array containing cellids of grid cells in query result
"""
if self.rtree:
- if SHAPELY_GE_20:
- result = self.strtree.query(shp)
- else:
- result = np.array(self.strtree.query_items(shp))
+ result = self.strtree.query(shp)
else:
# no spatial query
result = self.cellids
@@ -533,343 +425,12 @@ def filter_query_result(self, cellids, shp):
filter or generator containing polygons that intersect with shape
"""
# get only gridcells that intersect
- if SHAPELY_GE_20:
- if not shapely.is_prepared(shp):
- shapely.prepare(shp)
- qcellids = cellids[shapely.intersects(self.geoms[cellids], shp)]
- else:
- # prepare shape for efficient batch intersection check
- prepared = import_optional_dependency("shapely.prepared")
- prepshp = prepared.prep(shp)
- qfiltered = filter(
- lambda tup: prepshp.intersects(tup[0]),
- zip(self.geoms[cellids], cellids),
- )
- try:
- _, qcellids = zip(*qfiltered)
- qcellids = np.array(qcellids)
- except ValueError:
- # catch empty filter result (i.e. when rtree=False)
- qcellids = np.empty(0, dtype=int)
+ if not shapely.is_prepared(shp):
+ shapely.prepare(shp)
+ qcellids = cellids[shapely.intersects(self.geoms[cellids], shp)]
return qcellids
- @staticmethod
- def sort_gridshapes(geoms, cellids):
- """Sort geometries (from i.e. query result) by cell id.
-
- .. deprecated:: 3.3.6
- sorting is now performed on cellids.
-
- Parameters
- ----------
- geoms : iterable
- list or iterable of geometries
-
- Returns
- -------
- list
- sorted list of gridcells
- """
- warnings.warn(
- "`sort_gridshapes()` is deprecated, sort cellids"
- " and use that to select geometries, i.e. "
- "`GridIntersect.geoms[sorted_cellids]`.",
- DeprecationWarning,
- )
- return [
- igeom
- for _, igeom in sorted(
- zip(cellids, geoms), key=lambda pair: pair[0]
- )
- ]
-
def _intersect_point_shapely(
- self, shp, sort_by_cellid=True, return_all_intersections=False
- ):
- """intersect grid with Point or MultiPoint.
-
- Parameters
- ----------
- shp : Point or MultiPoint
-
- shapely Point or MultiPoint to intersect with grid. Note, it is
- generally faster to loop over a MultiPoint and intersect per point
- than to intersect a MultiPoint directly.
- sort_by_cellid : bool, optional
- flag whether to sort cells by id, used to ensure node with lowest
- id is returned, by default True
- return_all_intersections : bool, optional
- if True, return multiple intersection results for points on grid
- cell boundaries (e.g. returns 2 intersection results if a point
- lies on the boundary between two grid cells). The default is
- False, which will return a single intersection result for boundary
- cases.
-
- Returns
- -------
- numpy.recarray
- a record array containing information about the intersection
- """
- shapely_geo = import_optional_dependency("shapely.geometry")
-
- # query grid
- qcellids = self.query_grid(shp) # returns cellids
- if len(qcellids) > 0:
- qfiltered = self.filter_query_result(qcellids, shp)
- else:
- # query result is empty
- qfiltered = qcellids
- # sort cells to ensure lowest cell ids are returned
- if sort_by_cellid:
- qfiltered.sort()
-
- isectshp = []
- cellids = []
- vertices = []
- parsed_points = [] # for keeping track of points
-
- # loop over cells returned by filtered spatial query
- for cid in qfiltered:
- r = self.geoms[cid]
- # do intersection
- intersect = shp.intersection(r)
- # parse result per Point
- collection = parse_shapely_ix_result(
- [], intersect, shptyps=["Point"]
- )
- # loop over intersection result and store information
- cell_verts = []
- cell_shps = []
- for c in collection:
- verts = c.__geo_interface__["coordinates"]
- # avoid returning multiple cells for points on boundaries
- # if return_all_intersections is False
- if not return_all_intersections:
- if verts in parsed_points:
- continue
- parsed_points.append(verts)
- cell_shps.append(c) # collect points
- cell_verts.append(verts)
- # if any new ix found
- if len(cell_shps) > 0:
- # combine new points in MultiPoint
- isectshp.append(
- shapely_geo.MultiPoint(cell_shps)
- if len(cell_shps) > 1
- else cell_shps[0]
- )
- vertices.append(tuple(cell_verts))
- # if structured calculated (i, j) cell address
- if self.mfgrid.grid_type == "structured":
- cid = self.mfgrid.get_lrc([cid])[0][1:]
- cellids.append(cid)
-
- rec = np.recarray(
- len(isectshp),
- names=["cellids", "vertices", "ixshapes"],
- formats=["O", "O", "O"],
- )
- with ignore_shapely_warnings_for_object_array():
- rec.ixshapes = isectshp
- rec.vertices = vertices
- rec.cellids = cellids
-
- return rec
-
- def _intersect_linestring_shapely(
- self,
- shp,
- keepzerolengths=False,
- sort_by_cellid=True,
- return_all_intersections=False,
- ):
- """intersect with LineString or MultiLineString.
-
- Parameters
- ----------
- shp : shapely.geometry.LineString or MultiLineString
- LineString to intersect with the grid
- keepzerolengths : bool, optional
- keep linestrings with length zero, default is False
- sort_by_cellid : bool, optional
- flag whether to sort cells by id, used to ensure node
- with lowest id is returned, by default True
- return_all_intersections : bool, optional
- if True, return multiple intersection results for linestrings on
- grid cell boundaries (e.g. returns 2 intersection results if a
- linestring lies on the boundary between two grid cells). The
- default is False, which will return a single intersection result
- for boundary cases.
-
- Returns
- -------
- numpy.recarray
- a record array containing information about the intersection
- """
- # query grid
- qcellids = self.query_grid(shp)
- if len(qcellids) > 0:
- # filter result further if possible (only strtree and filter methods)
- qfiltered = self.filter_query_result(qcellids, shp)
- else:
- # query result is empty
- qfiltered = qcellids
- # sort cells to ensure lowest cell ids are returned
- if sort_by_cellid:
- qfiltered.sort()
-
- # initialize empty lists for storing results
- isectshp = []
- cellids = []
- vertices = []
- vertices_check = []
- lengths = []
-
- # loop over cells returned by filtered spatial query
- for cid in qfiltered:
- r = self.geoms[cid]
- # do intersection
- intersect = shp.intersection(r)
- # parse result
- collection = parse_shapely_ix_result(
- [], intersect, shptyps=["LineString", "MultiLineString"]
- )
- # loop over intersection result and store information
- for c in collection:
- verts = c.__geo_interface__["coordinates"]
- # test if linestring was already processed (if on boundary),
- # ignore if return_all_intersections is True
- if not return_all_intersections:
- if verts in vertices_check:
- continue
- # if keep zero don't check length
- if not keepzerolengths:
- if c.length == 0.0:
- continue
- isectshp.append(c)
- lengths.append(c.length)
- vertices.append(verts)
- # unpack mutlilinestring for checking if linestring already parsed
- if c.geom_type.startswith("Multi"):
- vertices_check += [iv for iv in verts]
- else:
- vertices_check.append(verts)
- # if structured calculate (i, j) cell address
- if self.mfgrid.grid_type == "structured":
- cid = self.mfgrid.get_lrc([cid])[0][1:]
- cellids.append(cid)
-
- rec = np.recarray(
- len(isectshp),
- names=["cellids", "vertices", "lengths", "ixshapes"],
- formats=["O", "O", "f8", "O"],
- )
- with ignore_shapely_warnings_for_object_array():
- rec.ixshapes = isectshp
- rec.vertices = vertices
- rec.lengths = lengths
- rec.cellids = cellids
-
- return rec
-
- def _intersect_polygon_shapely(
- self,
- shp,
- sort_by_cellid=True,
- contains_centroid=False,
- min_area_fraction=None,
- ):
- """intersect with Polygon or MultiPolygon.
-
- Parameters
- ----------
- shp : shapely.geometry.Polygon or MultiPolygon
- shape to intersect with the grid
- sort_by_cellid : bool, optional
- flag whether to sort cells by id, used to ensure node
- with lowest id is returned, by default True
- contains_centroid : bool, optional
- if True, only store intersection result if cell centroid is
- contained within intersection shape
- min_area_fraction : float, optional
- float defining minimum intersection area threshold, if
- intersection area is smaller than min_frac_area * cell_area, do
- not store intersection result
-
- Returns
- -------
- numpy.recarray
- a record array containing information about the intersection
- """
- shapely_geo = import_optional_dependency("shapely.geometry")
-
- # query grid
- qcellids = self.query_grid(shp)
- if len(qcellids) > 0:
- # filter result further if possible (only strtree and filter methods)
- qfiltered = self.filter_query_result(qcellids, shp)
- else:
- # query result is empty
- qfiltered = qcellids
- # sort cells to ensure lowest cell ids are returned
- if sort_by_cellid:
- qfiltered.sort()
-
- isectshp = []
- cellids = []
- vertices = []
- areas = []
-
- # loop over cells returned by filtered spatial query
- for cid in qfiltered:
- r = self.geoms[cid]
- # do intersection
- intersect = shp.intersection(r)
- # parse result
- collection = parse_shapely_ix_result(
- [], intersect, shptyps=["Polygon", "MultiPolygon"]
- )
- if len(collection) > 1:
- collection = [shapely_geo.MultiPolygon(collection)]
- # loop over intersection result and store information
- for c in collection:
- # don't store intersections with 0 area
- if c.area == 0.0:
- continue
- # option: only store result if cell centroid is contained
- # within intersection result
- if contains_centroid:
- if not c.intersects(r.centroid):
- continue
- # option: min_area_fraction, only store if intersected area
- # is larger than fraction * cell_area
- if min_area_fraction:
- if c.area < (min_area_fraction * r.area):
- continue
-
- verts = c.__geo_interface__["coordinates"]
- isectshp.append(c)
- areas.append(c.area)
- vertices.append(verts)
- # if structured calculate (i, j) cell address
- if self.mfgrid.grid_type == "structured":
- cid = self.mfgrid.get_lrc([cid])[0][1:]
- cellids.append(cid)
-
- rec = np.recarray(
- len(isectshp),
- names=["cellids", "vertices", "areas", "ixshapes"],
- formats=["O", "O", "f8", "O"],
- )
- with ignore_shapely_warnings_for_object_array():
- rec.ixshapes = isectshp
- rec.vertices = vertices
- rec.areas = areas
- rec.cellids = cellids
-
- return rec
-
- def _intersect_point_shapely2(
self,
shp,
sort_by_cellid=True,
@@ -898,9 +459,10 @@ def _intersect_point_shapely2(
for ishp, cid in zip(ixresult, qcellids):
points = []
for pnt in shapely.get_parts(ishp):
- if tuple(pnt.coords)[0] not in parsed:
+ next_pnt = next(iter(pnt.coords))
+ if next_pnt not in parsed:
points.append(pnt)
- parsed.append(tuple(pnt.coords)[0])
+ parsed.append(next_pnt)
if len(points) > 1:
keep_pts.append(shapely.MultiPoint(points))
@@ -927,7 +489,7 @@ def _intersect_point_shapely2(
return rec
- def _intersect_linestring_shapely2(
+ def _intersect_linestring_shapely(
self,
shp,
keepzerolengths=False,
@@ -954,31 +516,48 @@ def _intersect_linestring_shapely2(
mask_empty = shapely.is_empty(ixresult)
# keep only Linestring and MultiLineString
geomtype_ids = shapely.get_type_id(ixresult)
- mask_type = np.isin(geomtype_ids, [1, 5, 7])
+ all_ids = [
+ shapely.GeometryType.LINESTRING,
+ shapely.GeometryType.MULTILINESTRING,
+ shapely.GeometryType.GEOMETRYCOLLECTION,
+ ]
+ line_ids = [
+ shapely.GeometryType.LINESTRING,
+ shapely.GeometryType.MULTILINESTRING,
+ ]
+ mask_type = np.isin(geomtype_ids, all_ids)
ixresult = ixresult[~mask_empty & mask_type]
qcellids = qcellids[~mask_empty & mask_type]
# parse geometry collections (i.e. when part of linestring touches a cell edge,
# resulting in a point intersection result)
- if 7 in geomtype_ids:
+ if shapely.GeometryType.GEOMETRYCOLLECTION in geomtype_ids:
def parse_linestrings_in_geom_collection(gc):
parts = shapely.get_parts(gc)
- parts = parts[np.isin(shapely.get_type_id(parts), [1, 5])]
+ parts = parts[np.isin(shapely.get_type_id(parts), line_ids)]
if len(parts) > 1:
- p = shapely.multilinestring(parts)
+ p = shapely.multilinestrings(parts)
elif len(parts) == 0:
p = shapely.LineString()
else:
p = parts[0]
return p
- mask_gc = geomtype_ids[~mask_empty & mask_type] == 7
- ixresult[mask_gc] = np.apply_along_axis(
- parse_linestrings_in_geom_collection,
- axis=0,
- arr=ixresult[mask_gc],
+ mask_gc = (
+ geomtype_ids[~mask_empty & mask_type]
+ == shapely.GeometryType.GEOMETRYCOLLECTION
)
+ # NOTE: not working for multiple geometry collections, result is reduced
+ # to a single multilinestring, which causes doubles in the result
+ # ixresult[mask_gc] = np.apply_along_axis(
+ # parse_linestrings_in_geom_collection,
+ # axis=0,
+ # arr=ixresult[mask_gc],
+ # )
+ ixresult[mask_gc] = [
+ parse_linestrings_in_geom_collection(gc) for gc in ixresult[mask_gc]
+ ]
if not return_all_intersections:
# intersection with grid cell boundaries
@@ -986,7 +565,7 @@ def parse_linestrings_in_geom_collection(gc):
shp, shapely.get_exterior_ring(self.geoms[qcellids])
)
mask_bnds_empty = shapely.is_empty(ixbounds)
- mask_bnds_type = np.isin(shapely.get_type_id(ixbounds), [1, 5])
+ mask_bnds_type = np.isin(shapely.get_type_id(ixbounds), all_ids)
# get ids of boundary intersections
idxs = np.nonzero(~mask_bnds_empty & mask_bnds_type)[0]
@@ -999,10 +578,8 @@ def parse_linestrings_in_geom_collection(gc):
# masks to obtain overlapping intersection result
mask_self = idxs == i # select not self
- mask_bnds_empty = shapely.is_empty(
- isect
- ) # select boundary ix result
- mask_overlap = np.isin(shapely.get_type_id(isect), [1, 5])
+ mask_bnds_empty = shapely.is_empty(isect) # select boundary ix result
+ mask_overlap = np.isin(shapely.get_type_id(isect), all_ids)
# calculate difference between self and overlapping result
diff = shapely.difference(
@@ -1034,7 +611,7 @@ def parse_linestrings_in_geom_collection(gc):
return rec
- def _intersect_polygon_shapely2(
+ def _intersect_polygon_shapely(
self,
shp,
sort_by_cellid=True,
@@ -1075,17 +652,15 @@ def parse_polygons_in_geom_collection(gc):
mask_gc = geomtype_ids[~mask_empty & mask_type] == 7
ixresult[mask_gc] = np.apply_along_axis(
- parse_polygons_in_geom_collection,
- axis=0,
- arr=ixresult[mask_gc],
+ parse_polygons_in_geom_collection, axis=0, arr=ixresult[mask_gc]
)
# check centroids
if contains_centroid:
centroids = shapely.centroid(self.geoms[qcellids])
- mask_centroid = shapely.contains(
+ mask_centroid = shapely.contains(ixresult, centroids) | shapely.touches(
ixresult, centroids
- ) | shapely.touches(ixresult, centroids)
+ )
ixresult = ixresult[mask_centroid]
qcellids = qcellids[mask_centroid]
@@ -1113,7 +688,7 @@ def parse_polygons_in_geom_collection(gc):
return rec
- def intersects(self, shp, shapetype=None):
+ def intersects(self, shp, shapetype=None, dataframe=False):
"""Return cellids for grid cells that intersect with shape.
Parameters
@@ -1125,26 +700,17 @@ def intersects(self, shp, shapetype=None):
type of shape (i.e. "point", "linestring", "polygon" or
their multi-variants), used by GeoSpatialUtil if shp is
passed as a list of vertices, default is None
+ dataframe : bool, optional
+ if True, return a pandas.DataFrame, default is False
Returns
-------
- numpy.recarray
- a record array containing cell IDs of the gridcells
- the shape intersects with
+ numpy.recarray or pandas.DataFrame
+ a record array or pandas.DataFrame containing cell IDs of the gridcells
+ the shape intersects with.
"""
shp = GeoSpatialUtil(shp, shapetype=shapetype).shapely
-
- if SHAPELY_GE_20:
- qfiltered = self.strtree.query(shp, predicate="intersects")
- else:
- # query grid
- qcellids = self.query_grid(shp)
- if len(qcellids) > 0:
- # filter result further if possible (only strtree and filter methods)
- qfiltered = self.filter_query_result(qcellids, shp)
- else:
- # query result is empty
- qfiltered = qcellids
+ qfiltered = self.strtree.query(shp, predicate="intersects")
# build rec-array
rec = np.recarray(len(qfiltered), names=["cellids"], formats=["O"])
@@ -1152,11 +718,17 @@ def intersects(self, shp, shapetype=None):
rec.cellids = list(zip(*self.mfgrid.get_lrc([qfiltered])[0][1:]))
else:
rec.cellids = qfiltered
+
+ if dataframe:
+ return DataFrame(rec)
return rec
def _intersect_point_structured(self, shp, return_all_intersections=False):
"""intersection method for intersecting points with structured grids.
+ .. deprecated:: 3.9.0
+ use _intersect_point_shapely() or set method="vertex" in GridIntersect.
+
Parameters
----------
shp : shapely.geometry.Point or MultiPoint
@@ -1274,9 +846,7 @@ def _intersect_point_structured(self, shp, return_all_intersections=False):
tempnodes.append(node)
tempshapes.append(ixs)
else:
- tempshapes[-1] = shapely_geo.MultiPoint(
- [tempshapes[-1], ixs]
- )
+ tempshapes[-1] = shapely_geo.MultiPoint([tempshapes[-1], ixs])
ixshapes = tempshapes
nodelist = tempnodes
@@ -1285,8 +855,7 @@ def _intersect_point_structured(self, shp, return_all_intersections=False):
len(nodelist), names=["cellids", "ixshapes"], formats=["O", "O"]
)
rec.cellids = nodelist
- with ignore_shapely_warnings_for_object_array():
- rec.ixshapes = ixshapes
+ rec.ixshapes = ixshapes
return rec
def _intersect_linestring_structured(
@@ -1294,6 +863,9 @@ def _intersect_linestring_structured(
):
"""method for intersecting linestrings with structured grids.
+ .. deprecated:: 3.9.0
+ use _intersect_point_shapely() or set method="vertex" in GridIntersect.
+
Parameters
----------
shp : shapely.geometry.Linestring or MultiLineString
@@ -1314,6 +886,7 @@ def _intersect_linestring_structured(
numpy.recarray
a record array containing information about the intersection
"""
+ shapely = import_optional_dependency("shapely")
shapely_geo = import_optional_dependency("shapely.geometry")
affinity_loc = import_optional_dependency("shapely.affinity")
@@ -1339,9 +912,7 @@ def _intersect_linestring_structured(
shp, xoff=-self.mfgrid.xoffset, yoff=-self.mfgrid.yoffset
)
if self.mfgrid.angrot != 0.0 and not self.local:
- shp = affinity_loc.rotate(
- shp, -self.mfgrid.angrot, origin=(0.0, 0.0)
- )
+ shp = affinity_loc.rotate(shp, -self.mfgrid.angrot, origin=(0.0, 0.0))
# clip line to mfgrid bbox
lineclip = shp.intersection(pl)
@@ -1386,9 +957,7 @@ def _intersect_linestring_structured(
ix, self.mfgrid.angrot, origin=(0.0, 0.0)
)
ix_realworld = affinity_loc.translate(
- ix_realworld,
- self.mfgrid.xoffset,
- self.mfgrid.yoffset,
+ ix_realworld, self.mfgrid.xoffset, self.mfgrid.yoffset
)
ixs_realworld.append(ix_realworld)
else:
@@ -1397,13 +966,10 @@ def _intersect_linestring_structured(
vertices += v_realworld
ixshapes += ixs_realworld
else: # linestring is fully within grid
- (
- nodelist,
- lengths,
- vertices,
- ixshapes,
- ) = self._get_nodes_intersecting_linestring(
- lineclip, return_all_intersections=return_all_intersections
+ (nodelist, lengths, vertices, ixshapes) = (
+ self._get_nodes_intersecting_linestring(
+ lineclip, return_all_intersections=return_all_intersections
+ )
)
# if necessary, transform coordinates back to real
# world coordinates
@@ -1452,9 +1018,7 @@ def _intersect_linestring_structured(
templengths.append(
sum([l for l, i in zip(lengths, nodelist) if i == inode])
)
- tempverts.append(
- [v for v, i in zip(vertices, nodelist) if i == inode]
- )
+ tempverts.append([v for v, i in zip(vertices, nodelist) if i == inode])
tempshapes.append(
[ix for ix, i in zip(ixshapes, nodelist) if i == inode]
)
@@ -1478,7 +1042,7 @@ def _intersect_linestring_structured(
tempverts.append(vertices[i])
ishp = ixshapes[i]
if isinstance(ishp, list):
- ishp = unary_union(ishp)
+ ishp = shapely.unary_union(ishp)
tempshapes.append(ishp)
nodelist = tempnodes
lengths = templengths
@@ -1493,8 +1057,7 @@ def _intersect_linestring_structured(
rec.vertices = vertices
rec.lengths = lengths
rec.cellids = nodelist
- with ignore_shapely_warnings_for_object_array():
- rec.ixshapes = ixshapes
+ rec.ixshapes = ixshapes
return rec
@@ -1505,6 +1068,9 @@ def _get_nodes_intersecting_linestring(
and return a list of node indices and the length of the line in that
node.
+ .. deprecated:: 3.9.0
+ method="structured" is deprecated.
+
Parameters
----------
linestring: shapely.geometry.LineString or MultiLineString
@@ -1576,13 +1142,10 @@ def _get_nodes_intersecting_linestring(
n = 0
while True:
(i, j) = nodelist[n]
- (
- node,
- length,
- verts,
- ixshape,
- ) = self._check_adjacent_cells_intersecting_line(
- linestring, (i, j), nodelist
+ (node, length, verts, ixshape) = (
+ self._check_adjacent_cells_intersecting_line(
+ linestring, (i, j), nodelist
+ )
)
for inode, ilength, ivert, ix in zip(node, length, verts, ixshape):
@@ -1605,11 +1168,12 @@ def _get_nodes_intersecting_linestring(
return nodelist, lengths, vertices, ixshapes
- def _check_adjacent_cells_intersecting_line(
- self, linestring, i_j, nodelist
- ):
+ def _check_adjacent_cells_intersecting_line(self, linestring, i_j, nodelist):
"""helper method that follows a line through a structured grid.
+ .. deprecated:: 3.9.0
+ method="structured" is deprecated.
+
Parameters
----------
linestring : shapely.geometry.LineString
@@ -1782,6 +1346,9 @@ def _intersect_rectangle_structured(self, rectangle):
"""intersect a rectangle with a structured grid to retrieve node ids of
intersecting grid cells.
+ .. deprecated:: 3.9.0
+ method="structured" is deprecated.
+
Note: only works in local coordinates (i.e. non-rotated grid
with origin at (0, 0))
@@ -1867,6 +1434,10 @@ def _intersect_polygon_structured(
"""intersect polygon with a structured grid. Uses bounding box of the
Polygon to limit search space.
+ .. deprecated:: 3.9.0
+ method="structured" is deprecated. Use `_intersect_polygon_shapely()`.
+
+
Notes
-----
If performance is slow, try setting the method to 'vertex'
@@ -1907,9 +1478,7 @@ def _intersect_polygon_structured(
shp, xoff=-self.mfgrid.xoffset, yoff=-self.mfgrid.yoffset
)
if self.mfgrid.angrot != 0.0 and not self.local:
- shp = affinity_loc.rotate(
- shp, -self.mfgrid.angrot, origin=(0.0, 0.0)
- )
+ shp = affinity_loc.rotate(shp, -self.mfgrid.angrot, origin=(0.0, 0.0))
# use the bounds of the polygon to restrict the cell search
minx, miny, maxx, maxy = shp.bounds
@@ -1925,10 +1494,7 @@ def _intersect_polygon_structured(
cell_coords = [
(self.mfgrid.xyedges[0][j], self.mfgrid.xyedges[1][i]),
(self.mfgrid.xyedges[0][j + 1], self.mfgrid.xyedges[1][i]),
- (
- self.mfgrid.xyedges[0][j + 1],
- self.mfgrid.xyedges[1][i + 1],
- ),
+ (self.mfgrid.xyedges[0][j + 1], self.mfgrid.xyedges[1][i + 1]),
(self.mfgrid.xyedges[0][j], self.mfgrid.xyedges[1][i + 1]),
]
else:
@@ -1957,9 +1523,7 @@ def _intersect_polygon_structured(
# option: min_area_fraction, only store if intersected area
# is larger than fraction * cell_area
if min_area_fraction:
- if intersect.area < (
- min_area_fraction * cell_polygon.area
- ):
+ if intersect.area < (min_area_fraction * cell_polygon.area):
continue
nodelist.append((i, j))
@@ -1975,20 +1539,14 @@ def _intersect_polygon_structured(
v_realworld = []
if intersect.geom_type.startswith("Multi"):
for ipoly in intersect.geoms:
- v_realworld += (
- self._transform_geo_interface_polygon(ipoly)
- )
+ v_realworld += self._transform_geo_interface_polygon(ipoly)
else:
- v_realworld += self._transform_geo_interface_polygon(
- intersect
- )
+ v_realworld += self._transform_geo_interface_polygon(intersect)
intersect_realworld = affinity_loc.rotate(
intersect, self.mfgrid.angrot, origin=(0.0, 0.0)
)
intersect_realworld = affinity_loc.translate(
- intersect_realworld,
- self.mfgrid.xoffset,
- self.mfgrid.yoffset,
+ intersect_realworld, self.mfgrid.xoffset, self.mfgrid.yoffset
)
else:
v_realworld = intersect.__geo_interface__["coordinates"]
@@ -2004,8 +1562,7 @@ def _intersect_polygon_structured(
rec.vertices = vertices
rec.areas = areas
rec.cellids = nodelist
- with ignore_shapely_warnings_for_object_array():
- rec.ixshapes = ixshapes
+ rec.ixshapes = ixshapes
return rec
@@ -2013,6 +1570,10 @@ def _transform_geo_interface_polygon(self, polygon):
"""Internal method, helper function to transform geometry
__geo_interface__.
+ .. deprecated:: 3.9.0
+ method="structured" is deprecated. Only used by
+ `_intersect_polygon_structured()`
+
Used for translating intersection result coordinates back into
real-world coordinates.
@@ -2076,17 +1637,16 @@ def _transform_geo_interface_polygon(self, polygon):
return geom_list
@staticmethod
- def plot_polygon(rec, ax=None, **kwargs):
+ def plot_polygon(result, ax=None, **kwargs):
"""method to plot the polygon intersection results from the resulting
numpy.recarray.
- Note: only works when recarray has 'intersects' column!
+ Note: only works when recarray has 'ixshapes' column!
Parameters
----------
- rec : numpy.recarray
- record array containing intersection results
- (the resulting shapes)
+ result : numpy.recarray or geopandas.GeoDataFrame
+ record array or GeoDataFrame containing intersection results
ax : matplotlib.pyplot.axes, optional
axes to plot onto, if not provided, creates a new figure
**kwargs:
@@ -2103,6 +1663,10 @@ def plot_polygon(rec, ax=None, **kwargs):
if ax is None:
_, ax = plt.subplots()
+ ax.set_aspect("equal", adjustable="box")
+ autoscale = True
+ else:
+ autoscale = False
patches = []
if "facecolor" in kwargs:
@@ -2117,7 +1681,11 @@ def add_poly_patch(poly):
ppi = _polygon_patch(poly, facecolor=fc, **kwargs)
patches.append(ppi)
- for i, ishp in enumerate(rec.ixshapes):
+ # allow for result to be geodataframe
+ geoms = (
+ result.ixshapes if isinstance(result, np.rec.recarray) else result.geometry
+ )
+ for i, ishp in enumerate(geoms):
if hasattr(ishp, "geoms"):
for geom in ishp.geoms:
add_poly_patch(geom)
@@ -2127,20 +1695,22 @@ def add_poly_patch(poly):
pc = PatchCollection(patches, match_original=True)
ax.add_collection(pc)
+ if autoscale:
+ ax.autoscale_view()
+
return ax
@staticmethod
- def plot_linestring(rec, ax=None, cmap=None, **kwargs):
+ def plot_linestring(result, ax=None, cmap=None, **kwargs):
"""method to plot the linestring intersection results from the
resulting numpy.recarray.
- Note: only works when recarray has 'intersects' column!
+ Note: only works when recarray has 'ixshapes' column!
Parameters
----------
- rec : numpy.recarray
- record array containing intersection results
- (the resulting shapes)
+ result : numpy.recarray or geopandas.GeoDataFrame
+ record array or GeoDataFrame containing intersection results
ax : matplotlib.pyplot.axes, optional
axes to plot onto, if not provided, creates a new figure
cmap : str
@@ -2157,6 +1727,7 @@ def plot_linestring(rec, ax=None, cmap=None, **kwargs):
if ax is None:
_, ax = plt.subplots()
+ ax.set_aspect("equal", adjustable="box")
specified_color = True
if "c" in kwargs:
@@ -2168,9 +1739,13 @@ def plot_linestring(rec, ax=None, cmap=None, **kwargs):
if cmap is not None:
colormap = plt.get_cmap(cmap)
- colors = colormap(np.linspace(0, 1, rec.shape[0]))
+ colors = colormap(np.linspace(0, 1, result.shape[0]))
- for i, ishp in enumerate(rec.ixshapes):
+ # allow for result to be geodataframe
+ geoms = (
+ result.ixshapes if isinstance(result, np.rec.recarray) else result.geometry
+ )
+ for i, ishp in enumerate(geoms):
if not specified_color:
if cmap is None:
c = f"C{i % 10}"
@@ -2185,16 +1760,16 @@ def plot_linestring(rec, ax=None, cmap=None, **kwargs):
return ax
@staticmethod
- def plot_point(rec, ax=None, **kwargs):
+ def plot_point(result, ax=None, **kwargs):
"""method to plot the point intersection results from the resulting
numpy.recarray.
- Note: only works when recarray has 'intersects' column!
+ Note: only works when recarray has 'ixshapes' column!
Parameters
----------
- rec : numpy.recarray
- record array containing intersection results
+ result : numpy.recarray or geopandas.GeoDataFrame
+ record array or GeoDataFrame containing intersection results
ax : matplotlib.pyplot.axes, optional
axes to plot onto, if not provided, creates a new figure
**kwargs:
@@ -2213,7 +1788,11 @@ def plot_point(rec, ax=None, **kwargs):
_, ax = plt.subplots()
x, y = [], []
- geo_coll = shapely_geo.GeometryCollection(list(rec.ixshapes))
+ # allow for result to be geodataframe
+ geoms = (
+ result.ixshapes if isinstance(result, np.rec.recarray) else result.geometry
+ )
+ geo_coll = shapely_geo.GeometryCollection(list(geoms))
collection = parse_shapely_ix_result([], geo_coll, ["Point"])
for c in collection:
x.append(c.x)
@@ -2222,10 +1801,62 @@ def plot_point(rec, ax=None, **kwargs):
return ax
+ def plot_intersection_result(self, result, plot_grid=True, ax=None, **kwargs):
+ """Plot intersection result.
+
+ Parameters
+ ----------
+ result : numpy.rec.recarray or geopandas.GeoDataFrame
+ result of intersect()
+ plot_grid : bool, optional
+ plot model grid, by default True
+ ax : matplotlib.Axes, optional
+ axes to plot on, by default None which creates a new axis
+
+ Returns
+ -------
+ ax : matplotlib.Axes
+ returns axes handle
+ """
+ shapely = import_optional_dependency("shapely")
+
+ if plot_grid:
+ self.mfgrid.plot(ax=ax)
+
+ geoms = (
+ result["ixshapes"]
+ if isinstance(result, np.rec.recarray)
+ else result["geometry"]
+ )
+ if np.isin(
+ shapely.get_type_id(geoms),
+ [shapely.GeometryType.POINT, shapely.GeometryType.MULTIPOINT],
+ ).all():
+ ax = GridIntersect.plot_point(result, ax=ax, **kwargs)
+ elif np.isin(
+ shapely.get_type_id(geoms),
+ [
+ shapely.GeometryType.LINESTRING,
+ shapely.GeometryType.MULTILINESTRING,
+ ],
+ ).all():
+ ax = GridIntersect.plot_linestring(result, ax=ax, **kwargs)
+ elif np.isin(
+ shapely.get_type_id(geoms),
+ [shapely.GeometryType.POLYGON, shapely.GeometryType.MULTIPOLYGON],
+ ).all():
+ ax = GridIntersect.plot_polygon(result, ax=ax, **kwargs)
+
+ return ax
+
class ModflowGridIndices:
"""Collection of methods that can be used to find cell indices for a
- structured, but irregularly spaced MODFLOW grid."""
+ structured, but irregularly spaced MODFLOW grid.
+
+ .. deprecated:: 3.9.0
+ This class is deprecated and will be removed in version 3.10.0.
+ """
@staticmethod
def find_position_in_array(arr, x):
@@ -2366,10 +1997,7 @@ def _polygon_patch(polygon, **kwargs):
patch = PathPatch(
Path.make_compound_path(
Path(np.asarray(polygon.exterior.coords)[:, :2]),
- *[
- Path(np.asarray(ring.coords)[:, :2])
- for ring in polygon.interiors
- ],
+ *[Path(np.asarray(ring.coords)[:, :2]) for ring in polygon.interiors],
),
**kwargs,
)
diff --git a/flopy/utils/gridutil.py b/flopy/utils/gridutil.py
index 4c1366aa4d..fa8fccb124 100644
--- a/flopy/utils/gridutil.py
+++ b/flopy/utils/gridutil.py
@@ -2,15 +2,15 @@
Grid utilities
"""
+from collections.abc import Collection, Iterable, Sequence
from math import floor
-from typing import Collection, Iterable, List, Sequence, Tuple, Union
import numpy as np
from .cvfdutil import centroid_of_polygon, get_disv_gridprops
-def get_lni(ncpl, nodes) -> List[Tuple[int, int]]:
+def get_lni(ncpl, nodes) -> list[tuple[int, int]]:
"""
Get layer index and within-layer node index (both 0-based).
@@ -292,10 +292,8 @@ def get_disv_kwargs(
# botm check
if np.isscalar(botm):
botm = botm * np.ones((nlay, nrow, ncol), dtype=float)
- elif isinstance(botm, List):
- assert (
- len(botm) == nlay
- ), "if botm provided as a list it must have length nlay"
+ elif isinstance(botm, list):
+ assert len(botm) == nlay, "if botm provided as a list it must have length nlay"
b = np.empty((nlay, nrow, ncol), dtype=float)
for k in range(nlay):
b[k] = botm[k]
diff --git a/flopy/utils/lgrutil.py b/flopy/utils/lgrutil.py
index 467f46a702..b40736eaee 100644
--- a/flopy/utils/lgrutil.py
+++ b/flopy/utils/lgrutil.py
@@ -155,9 +155,7 @@ def __init__(
self.delrp = Util2d(m, (ncolp,), np.float32, delrp, "delrp").array
self.delcp = Util2d(m, (nrowp,), np.float32, delcp, "delcp").array
self.topp = Util2d(m, (nrowp, ncolp), np.float32, topp, "topp").array
- self.botmp = Util3d(
- m, (nlayp, nrowp, ncolp), np.float32, botmp, "botmp"
- ).array
+ self.botmp = Util3d(m, (nlayp, nrowp, ncolp), np.float32, botmp, "botmp").array
# idomain
assert idomainp.shape == (nlayp, nrowp, ncolp)
@@ -264,12 +262,7 @@ def get_top_botm(self):
dz = (top - bot) / self.ncppl[kp]
for _ in range(self.ncppl[kp]):
botm[kc, icrowstart:icrowend, iccolstart:iccolend] = (
- botm[
- kc - 1,
- icrowstart:icrowend,
- iccolstart:iccolend,
- ]
- - dz
+ botm[kc - 1, icrowstart:icrowend, iccolstart:iccolend] - dz
)
kc += 1
return botm[0], botm[1:]
@@ -293,9 +286,7 @@ def get_replicated_parent_array(self, parent_array):
"""
assert parent_array.shape == (self.nrowp, self.ncolp)
- child_array = np.empty(
- (self.nrow, self.ncol), dtype=parent_array.dtype
- )
+ child_array = np.empty((self.nrow, self.ncol), dtype=parent_array.dtype)
for ip in range(self.nprbeg, self.nprend + 1):
for jp in range(self.npcbeg, self.npcend + 1):
icrowstart = (ip - self.nprbeg) * self.ncpp
@@ -512,14 +503,7 @@ def get_exchange_data(self, angldegx=False, cdist=False):
y2 = float(yp[ip, jp])
cd = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
- exg = [
- (kp, ip, jp),
- (kc, ic, jc),
- ihc,
- cl1,
- cl2,
- hwva,
- ]
+ exg = [(kp, ip, jp), (kc, ic, jc), ihc, cl1, cl2, hwva]
if angldegx:
exg.append(float(angle))
if cdist:
@@ -574,16 +558,7 @@ def child(self):
xorigin = self.xll
yorigin = self.yll
simple_regular_grid = SimpleRegularGrid(
- nlayc,
- nrowc,
- ncolc,
- delrc,
- delcc,
- topc,
- botmc,
- idomainc,
- xorigin,
- yorigin,
+ nlayc, nrowc, ncolc, delrc, delcc, topc, botmc, idomainc, xorigin, yorigin
)
return simple_regular_grid
@@ -593,7 +568,7 @@ def to_disv_gridprops(self):
used to create a disv grid (instead of a separate parent
and child representation). The gridprops dictionary can
be unpacked into the flopy.mf6.Modflowdisv() constructor
- and flopy.discretization.VertexGrid() contructor.
+ and flopy.discretization.VertexGrid() constructor.
Note that export capability will only work if the parent
and child models have corresponding layers.
@@ -706,9 +681,7 @@ def find_hanging_vertices(self):
if cidomain[kc, ic, jc] == 0:
continue
- if (
- idir == -1
- ): # left child face connected to right parent face
+ if idir == -1: # left child face connected to right parent face
# child vertices 0 and 3 added as hanging nodes
if (ip, jp) in self.right_face_hanging:
hlist = self.right_face_hanging.pop((ip, jp))
@@ -919,14 +892,10 @@ def get_xcyc(self):
cidx = self.cgrid.idomain[0] > 0
px = self.pgrid.xcellcenters[pidx].flatten()
cx = self.cgrid.xcellcenters[cidx].flatten()
- xcyc[:, 0] = np.vstack(
- (np.atleast_2d(px).T, np.atleast_2d(cx).T)
- ).flatten()
+ xcyc[:, 0] = np.vstack((np.atleast_2d(px).T, np.atleast_2d(cx).T)).flatten()
py = self.pgrid.ycellcenters[pidx].flatten()
cy = self.cgrid.ycellcenters[cidx].flatten()
- xcyc[:, 1] = np.vstack(
- (np.atleast_2d(py).T, np.atleast_2d(cy).T)
- ).flatten()
+ xcyc[:, 1] = np.vstack((np.atleast_2d(py).T, np.atleast_2d(cy).T)).flatten()
return xcyc
def get_top(self):
@@ -974,7 +943,7 @@ def get_disv_gridprops(self):
used to create a disv grid (instead of a separate parent
and child representation). The gridprops dictionary can
be unpacked into the flopy.mf6.Modflowdisv() constructor
- and flopy.discretization.VertexGrid() contructor.
+ and flopy.discretization.VertexGrid() constructor.
Note that export capability will only work if the parent
and child models have corresponding layers.
@@ -991,13 +960,15 @@ def get_disv_gridprops(self):
assert (
self.lgr.ncppl.min() == self.lgr.ncppl.max()
), "Exporting disv grid properties requires ncppl to be 1."
- assert (
- self.lgr.nlayp == self.lgr.nlay
- ), "Exporting disv grid properties requires parent and child models to have the same number of layers."
+ assert self.lgr.nlayp == self.lgr.nlay, (
+ "Exporting disv grid properties requires parent and child models "
+ "to have the same number of layers."
+ )
for k in range(self.lgr.nlayp - 1):
- assert np.allclose(
- self.lgr.idomain[k], self.lgr.idomain[k + 1]
- ), "Exporting disv grid properties requires parent idomain is same for all layers."
+ assert np.allclose(self.lgr.idomain[k], self.lgr.idomain[k + 1]), (
+ "Exporting disv grid properties requires parent idomain "
+ "is same for all layers."
+ )
# get information and build gridprops
xcyc = self.get_xcyc()
diff --git a/flopy/utils/mflistfile.py b/flopy/utils/mflistfile.py
index 33f13b77f4..f15fee32c6 100644
--- a/flopy/utils/mflistfile.py
+++ b/flopy/utils/mflistfile.py
@@ -197,9 +197,7 @@ def get_kstpkper(self):
if not self._isvalid:
return None
kstpkper = []
- for kstp, kper in zip(
- self.inc["time_step"], self.inc["stress_period"]
- ):
+ for kstp, kper in zip(self.inc["time_step"], self.inc["stress_period"]):
kstpkper.append((kstp, kper))
return kstpkper
@@ -301,11 +299,7 @@ def get_model_runtime(self, units="seconds"):
# reopen the file
self.f = open(self.file_name, "r", encoding="ascii", errors="replace")
units = units.lower()
- if (
- not units == "seconds"
- and not units == "minutes"
- and not units == "hours"
- ):
+ if not units == "seconds" and not units == "minutes" and not units == "hours":
raise AssertionError(
'"units" input variable must be "minutes", "hours", '
f'or "seconds": {units} was specified'
@@ -429,16 +423,12 @@ def get_data(self, kstpkper=None, idx=None, totim=None, incremental=False):
try:
ipos = self.get_kstpkper().index(kstpkper)
except:
- print(
- f" could not retrieve kstpkper {kstpkper} from the lst file"
- )
+ print(f" could not retrieve kstpkper {kstpkper} from the lst file")
elif totim is not None:
try:
ipos = self.get_times().index(totim)
except:
- print(
- f" could not retrieve totime {totim} from the lst file"
- )
+ print(f" could not retrieve totime {totim} from the lst file")
elif idx is not None:
ipos = idx
else:
@@ -456,9 +446,7 @@ def get_data(self, kstpkper=None, idx=None, totim=None, incremental=False):
else:
t = self.cum[ipos]
- dtype = np.dtype(
- [("index", np.int32), ("value", np.float32), ("name", "|S25")]
- )
+ dtype = np.dtype([("index", np.int32), ("value", np.float32), ("name", "|S25")])
v = np.recarray(shape=(len(self.inc.dtype.names[3:])), dtype=dtype)
for i, name in enumerate(self.inc.dtype.names[3:]):
mult = 1.0
@@ -500,9 +488,7 @@ def get_dataframes(self, start_datetime="1-1-1970", diff=False):
if start_datetime is not None:
try:
totim = totim_to_datetime(
- totim,
- start=pd.to_datetime(start_datetime),
- timeunit=self.timeunit,
+ totim, start=pd.to_datetime(start_datetime), timeunit=self.timeunit
)
except:
pass # if totim can't be cast to pd.datetime return in native units
@@ -645,10 +631,7 @@ def _get_index(self, maxentries):
ts, sp = get_ts_sp(line)
except:
print(
- "unable to cast ts,sp on line number",
- l_count,
- " line: ",
- line,
+ "unable to cast ts,sp on line number", l_count, " line: ", line
)
break
@@ -695,8 +678,7 @@ def _set_entries(self):
)
except:
raise Exception(
- "unable to read budget information from first "
- "entry in list file"
+ "unable to read budget information from first entry in list file"
)
self.entries = incdict.keys()
null_entries = {}
@@ -773,7 +755,7 @@ def _get_sp(self, ts, sp, seekpoint):
if line == "":
print(
"end of file found while seeking budget "
- "information for ts,sp: {} {}".format(ts, sp)
+ f"information for ts,sp: {ts} {sp}"
)
return self.null_entries
@@ -789,7 +771,7 @@ def _get_sp(self, ts, sp, seekpoint):
if line == "":
print(
"end of file found while seeking budget "
- "information for ts,sp: {} {}".format(ts, sp)
+ f"information for ts,sp: {ts} {sp}"
)
return self.null_entries
if len(re.findall("=", line)) == 2:
@@ -800,20 +782,12 @@ def _get_sp(self, ts, sp, seekpoint):
return self.null_entries
if flux is None:
print(
- "error casting in flux for",
- entry,
- " to float in ts,sp",
- ts,
- sp,
+ "error casting in flux for", entry, " to float in ts,sp", ts, sp
)
return self.null_entries
if cumu is None:
print(
- "error casting in cumu for",
- entry,
- " to float in ts,sp",
- ts,
- sp,
+ "error casting in cumu for", entry, " to float in ts,sp", ts, sp
)
return self.null_entries
if entry.endswith(tag.upper()):
@@ -880,19 +854,15 @@ def _get_totim(self, ts, sp, seekpoint):
if line == "":
print(
"end of file found while seeking budget "
- "information for ts,sp: {} {}".format(ts, sp)
+ f"information for ts,sp: {ts} {sp}"
)
return np.nan, np.nan, np.nan
elif (
ihead == 2
- and "SECONDS MINUTES HOURS DAYS YEARS"
- not in line
+ and "SECONDS MINUTES HOURS DAYS YEARS" not in line
):
break
- elif (
- "-----------------------------------------------------------"
- in line
- ):
+ elif "-----------------------------------------------------------" in line:
line = self.f.readline()
break
diff --git a/flopy/utils/mfreadnam.py b/flopy/utils/mfreadnam.py
index ba833b842d..0e3f96cdb2 100644
--- a/flopy/utils/mfreadnam.py
+++ b/flopy/utils/mfreadnam.py
@@ -11,7 +11,7 @@
import os
from os import PathLike
from pathlib import Path, PurePosixPath, PureWindowsPath
-from typing import List, Tuple, Union
+from typing import Optional, Union
class NamData:
@@ -295,11 +295,12 @@ def attribs_from_namfile_header(namefile):
def get_entries_from_namefile(
path: Union[str, PathLike],
- ftype: str = None,
- unit: int = None,
- extension: str = None,
-) -> List[Tuple]:
- """Get entries from an MF6 namefile. Can select using FTYPE, UNIT, or file extension.
+ ftype: Optional[str] = None,
+ unit: Optional[int] = None,
+ extension: Optional[str] = None,
+) -> list[tuple]:
+ """Get entries from an MF6 namefile. Can select using FTYPE, UNIT, or file
+ extension.
This function only supports MF6 namefiles.
Parameters
@@ -480,7 +481,7 @@ def get_mf6_nper(tdisfile):
"""
with open(tdisfile) as f:
lines = f.readlines()
- line = [line for line in lines if "NPER" in line.upper()][0]
+ line = next(line for line in lines if "NPER" in line.upper())
nper = line.strip().split()[1]
return nper
diff --git a/flopy/utils/modpathfile.py b/flopy/utils/modpathfile.py
index 5bc3e20609..6fbb930d28 100644
--- a/flopy/utils/modpathfile.py
+++ b/flopy/utils/modpathfile.py
@@ -4,7 +4,7 @@
import itertools
import os
-from typing import List, Optional, Tuple, Union
+from typing import Optional, Union
import numpy as np
from numpy.lib.recfunctions import append_fields, repack_fields
@@ -17,23 +17,17 @@
class ModpathFile(ParticleTrackFile):
"""Provides MODPATH output file support."""
- def __init__(
- self, filename: Union[str, os.PathLike], verbose: bool = False
- ):
+ def __init__(self, filename: Union[str, os.PathLike], verbose: bool = False):
super().__init__(filename, verbose)
self.output_type = self.__class__.__name__.lower().replace("file", "")
- (
- self.modpath,
- self.compact,
- self.skiprows,
- self.version,
- self.direction,
- ) = self.parse(filename, self.output_type)
+ (self.modpath, self.compact, self.skiprows, self.version, self.direction) = (
+ self.parse(filename, self.output_type)
+ )
@staticmethod
def parse(
file_path: Union[str, os.PathLike], file_type: str
- ) -> Tuple[bool, int, int, Optional[int]]:
+ ) -> tuple[bool, int, int, Optional[int]]:
"""
Extract preliminary information from a MODPATH output file:
- whether in compact format
@@ -67,10 +61,7 @@ def parse(
if skiprows < 1:
if f"MODPATH_{file_type.upper()}_FILE 6" in line.upper():
version = 6
- elif (
- f"MODPATH_{file_type.upper()}_FILE 7"
- in line.upper()
- ):
+ elif f"MODPATH_{file_type.upper()}_FILE 7" in line.upper():
version = 7
elif "MODPATH 5.0" in line.upper():
version = 5
@@ -95,16 +86,15 @@ def parse(
return modpath, compact, skiprows, version, direction
- def intersect(
- self, cells, to_recarray
- ) -> Union[List[np.recarray], np.recarray]:
+ def intersect(self, cells, to_recarray) -> Union[list[np.recarray], np.recarray]:
if self.version < 7:
try:
raslice = self._data[["k", "i", "j"]]
except (KeyError, ValueError):
raise KeyError(
- "could not extract 'k', 'i', and 'j' keys "
- "from {} data".format(self.output_type.lower())
+ "could not extract 'k', 'i', and 'j' keys from {} data".format(
+ self.output_type.lower()
+ )
)
else:
try:
@@ -232,14 +222,12 @@ class PathlineFile(ModpathFile):
"sequencenumber",
]
- def __init__(
- self, filename: Union[str, os.PathLike], verbose: bool = False
- ):
+ def __init__(self, filename: Union[str, os.PathLike], verbose: bool = False):
super().__init__(filename, verbose=verbose)
self.dtype, self._data = self._load()
self.nid = np.unique(self._data["particleid"])
- def _load(self) -> Tuple[np.dtype, np.ndarray]:
+ def _load(self) -> tuple[np.dtype, np.ndarray]:
dtype = self.dtypes[self.version]
if self.version == 7:
dtyper = np.dtype(
@@ -278,16 +266,8 @@ def _load(self) -> Tuple[np.dtype, np.ndarray]:
sequencenumber, group, particleid, pathlinecount = t[0:4]
nrows += pathlinecount
# read in the particle data
- d = np.loadtxt(
- itertools.islice(f, 0, pathlinecount), dtype=dtyper
- )
- key = (
- idx,
- sequencenumber,
- group,
- particleid,
- pathlinecount,
- )
+ d = np.loadtxt(itertools.islice(f, 0, pathlinecount), dtype=dtyper)
+ key = (idx, sequencenumber, group, particleid, pathlinecount)
particle_pathlines[key] = d.copy()
idx += 1
@@ -297,9 +277,7 @@ def _load(self) -> Tuple[np.dtype, np.ndarray]:
# fill data
ipos0 = 0
for key, value in particle_pathlines.items():
- idx, sequencenumber, group, particleid, pathlinecount = key[
- 0:5
- ]
+ idx, sequencenumber, group, particleid, pathlinecount = key[0:5]
ipos1 = ipos0 + pathlinecount
# fill constant items for particle
# particleid is not necessarily unique for all pathlines - use
@@ -387,7 +365,8 @@ def write_shapefile(
.get_alldata() (if None, .get_alldata() is exported).
.. deprecated:: 3.7
- The ``timeseries_data`` option will be removed for FloPy 4. Use ``data`` instead.
+ The ``timeseries_data`` option will be removed for FloPy 4.
+ Use ``data`` instead.
one_per_particle : boolean (default True)
True writes a single LineString with a single set of attribute
data for each particle. False writes a record/geometry for each
@@ -556,14 +535,12 @@ class EndpointFile(ModpathFile):
"zone",
]
- def __init__(
- self, filename: Union[str, os.PathLike], verbose: bool = False
- ):
+ def __init__(self, filename: Union[str, os.PathLike], verbose: bool = False):
super().__init__(filename, verbose)
self.dtype, self._data = self._load()
self.nid = np.unique(self._data["particleid"])
- def _load(self) -> Tuple[np.dtype, np.ndarray]:
+ def _load(self) -> tuple[np.dtype, np.ndarray]:
dtype = self.dtypes[self.version]
data = loadtxt(self.fname, dtype=dtype, skiprows=self.skiprows)
@@ -665,9 +642,7 @@ def get_destination_endpoint_data(self, dest_cells, source=False):
raslice = repack_fields(data[keys])
except (KeyError, ValueError):
raise KeyError(
- "could not extract "
- + "', '".join(keys)
- + " from endpoint data."
+ "could not extract " + "', '".join(keys) + " from endpoint data."
)
else:
if source:
@@ -717,7 +692,8 @@ def write_shapefile(
(if none, EndpointFile.get_alldata() is exported).
.. deprecated:: 3.7
- The ``endpoint_data`` option will be removed for FloPy 4. Use ``data`` instead.
+ The ``endpoint_data`` option will be removed for FloPy 4.
+ Use ``data`` instead.
shpname : str
File path for shapefile
direction : str
@@ -754,8 +730,7 @@ def write_shapefile(
xcol, ycol, zcol = "x0", "y0", "z0"
else:
raise Exception(
- 'flopy.map.plot_endpoint direction must be "ending" '
- 'or "starting".'
+ 'flopy.map.plot_endpoint direction must be "ending" or "starting".'
)
if mg is None:
raise ValueError("A modelgrid object was not provided.")
@@ -873,7 +848,7 @@ def __init__(self, filename, verbose=False):
self.dtype, self._data = self._load()
self.nid = np.unique(self._data["particleid"])
- def _load(self) -> Tuple[np.dtype, np.ndarray]:
+ def _load(self) -> tuple[np.dtype, np.ndarray]:
dtype = self.dtypes[self.version]
if self.version in [3, 5] and not self.compact:
dtype = np.dtype(
@@ -956,7 +931,8 @@ def write_shapefile(
is exported).
.. deprecated:: 3.7
- The ``timeseries_data`` option will be removed for FloPy 4. Use ``data`` instead.
+ The ``timeseries_data`` option will be removed for FloPy 4.
+ Use ``data`` instead.
one_per_particle : boolean (default True)
True writes a single LineString with a single set of attribute
data for each particle. False writes a record/geometry for each
diff --git a/flopy/utils/mtlistfile.py b/flopy/utils/mtlistfile.py
index 7bae5ea049..06c54e3017 100644
--- a/flopy/utils/mtlistfile.py
+++ b/flopy/utils/mtlistfile.py
@@ -51,9 +51,7 @@ def __init__(self, file_name):
return
- def parse(
- self, forgive=True, diff=True, start_datetime=None, time_unit="d"
- ):
+ def parse(self, forgive=True, diff=True, start_datetime=None, time_unit="d"):
"""
Main entry point for parsing the list file.
@@ -111,10 +109,8 @@ def parse(
self._parse_sw(f, line)
elif self.tkstp_key in line:
try:
- self.tkstp_overflow = (
- self._extract_number_between_strings(
- line, self.tkstp_key, "in"
- )
+ self.tkstp_overflow = self._extract_number_between_strings(
+ line, self.tkstp_key, "in"
)
except Exception as e:
warnings.warn(
@@ -175,15 +171,9 @@ def parse(
return df_gw, df_sw
def _diff(self, df):
- out_cols = [
- c for c in df.columns if "_out" in c and not c.startswith("net_")
- ]
- in_cols = [
- c for c in df.columns if "_in" in c and not c.startswith("net_")
- ]
- add_cols = [
- c for c in df.columns if c not in out_cols + in_cols + ["totim"]
- ]
+ out_cols = [c for c in df.columns if "_out" in c and not c.startswith("net_")]
+ in_cols = [c for c in df.columns if "_in" in c and not c.startswith("net_")]
+ add_cols = [c for c in df.columns if c not in out_cols + in_cols + ["totim"]]
out_base = [c.replace("_out_", "_") for c in out_cols]
in_base = [c.replace("_in_", "_") for c in in_cols]
map_names = {
@@ -202,8 +192,8 @@ def _diff(self, df):
else:
out_base_mapped.append(base)
out_base = out_base_mapped
- in_dict = {ib: ic for ib, ic in zip(in_base, in_cols)}
- out_dict = {ib: ic for ib, ic in zip(out_base, out_cols)}
+ in_dict = dict(zip(in_base, in_cols))
+ out_dict = dict(zip(out_base, out_cols))
in_base = set(in_base)
out_base = set(out_base)
out_base.update(in_base)
@@ -240,15 +230,11 @@ def _parse_gw(self, f, line):
for _ in range(7):
line = self._readline(f)
if line is None:
- raise Exception(
- "EOF while reading from component header to totim"
- )
+ raise Exception("EOF while reading from component header to totim")
try:
totim = float(line.split()[-2])
except Exception as e:
- raise Exception(
- f"error parsing totim on line {self.lcount}: {e!s}"
- )
+ raise Exception(f"error parsing totim on line {self.lcount}: {e!s}")
for _ in range(3):
line = self._readline(f)
@@ -259,9 +245,7 @@ def _parse_gw(self, f, line):
for _ in range(4):
line = self._readline(f)
if line is None:
- raise Exception(
- "EOF while reading from time step to particles"
- )
+ raise Exception("EOF while reading from time step to particles")
try:
kper = int(line[-6:-1])
@@ -301,9 +285,7 @@ def _parse_gw(self, f, line):
try:
item, ival, oval = self._parse_gw_line(line)
except Exception as e:
- raise Exception(
- f"error parsing GW items on line {self.lcount}: {e!s}"
- )
+ raise Exception(f"error parsing GW items on line {self.lcount}: {e!s}")
self._add_to_gw_data(item, ival, oval, comp)
if break_next:
break
@@ -328,9 +310,7 @@ def _parse_gw(self, f, line):
try:
item, ival, oval = self._parse_gw_line(line)
except Exception as e:
- raise Exception(
- f"error parsing GW items on line {self.lcount}: {e!s}"
- )
+ raise Exception(f"error parsing GW items on line {self.lcount}: {e!s}")
self._add_to_gw_data(item, ival, oval, comp)
if "discrepancy" in item:
# can't rely on blank lines following block
@@ -477,12 +457,8 @@ def _add_to_sw_data(self, inout, item, cval, fval, comp):
self.sw_data[iitem].append(val)
@staticmethod
- def _extract_number_between_strings(
- input_string, start_string, end_string
- ):
- pattern = (
- rf"{re.escape(start_string)}\s*(\d+)\s*{re.escape(end_string)}"
- )
+ def _extract_number_between_strings(input_string, start_string, end_string):
+ pattern = rf"{re.escape(start_string)}\s*(\d+)\s*{re.escape(end_string)}"
match = re.search(pattern, input_string)
if match:
@@ -490,5 +466,6 @@ def _extract_number_between_strings(
return extracted_number
else:
raise Exception(
- f"Error extracting number between {start_string} and {end_string} in {input_string}"
+ "Error extracting number between "
+ f"{start_string} and {end_string} in {input_string}"
)
diff --git a/flopy/utils/observationfile.py b/flopy/utils/observationfile.py
index 3e5308b3aa..e416bbb386 100644
--- a/flopy/utils/observationfile.py
+++ b/flopy/utils/observationfile.py
@@ -496,9 +496,7 @@ class CsvFile:
"""
- def __init__(
- self, csvfile, delimiter=",", deletechars="", replace_space=""
- ):
+ def __init__(self, csvfile, delimiter=",", deletechars="", replace_space=""):
with open(csvfile) as self.file:
self.delimiter = delimiter
self.deletechars = deletechars
diff --git a/flopy/utils/optionblock.py b/flopy/utils/optionblock.py
index f999fef28a..95628556e0 100644
--- a/flopy/utils/optionblock.py
+++ b/flopy/utils/optionblock.py
@@ -29,22 +29,17 @@ class OptionBlock:
vars = "vars"
optional = "optional"
- simple_flag = dict([(dtype, np.bool_), (nested, False), (optional, False)])
- simple_str = dict([(dtype, str), (nested, False), (optional, False)])
- simple_float = dict([(dtype, float), (nested, False), (optional, False)])
- simple_int = dict([(dtype, int), (nested, False), (optional, False)])
-
- simple_tabfile = dict(
- [
- (dtype, np.bool_),
- (nested, True),
- (n_nested, 2),
- (
- vars,
- dict([("numtab", simple_int), ("maxval", simple_int)]),
- ),
- ]
- )
+ simple_flag = {dtype: np.bool_, nested: False, optional: False}
+ simple_str = {dtype: str, nested: False, optional: False}
+ simple_float = {dtype: float, nested: False, optional: False}
+ simple_int = {dtype: int, nested: False, optional: False}
+
+ simple_tabfile = {
+ dtype: np.bool_,
+ nested: True,
+ n_nested: 2,
+ vars: {"numtab": simple_int, "maxval": simple_int},
+ }
def __init__(self, options_line, package, block=True):
self._context = package._options
@@ -59,10 +54,10 @@ def __init__(self, options_line, package, block=True):
self._set_attributes()
def __getattr__(self, key):
- if key == "auxillary": # catch typo from older version
+ if key == "auxillary": # catch typo from older version - codespell:ignore
key = "auxiliary"
warnings.warn(
- "the atttribute 'auxillary' is deprecated, use 'auxiliary' instead",
+ "the attribute 'auxillary' is deprecated, use 'auxiliary' instead",
category=DeprecationWarning,
)
return super().__getattribute__(key)
@@ -132,9 +127,7 @@ def __repr__(self):
if v == "None" and d[OptionBlock.optional]:
pass
else:
- val.append(
- str(object.__getattribute__(self, k))
- )
+ val.append(str(object.__getattribute__(self, k)))
if "None" in val:
pass
@@ -161,10 +154,10 @@ def __setattr__(self, key, value):
is consistent with the attribute data type
"""
- if key == "auxillary": # catch typo from older version
+ if key == "auxillary": # catch typo from older version - codespell:ignore
key = "auxiliary"
warnings.warn(
- "the atttribute 'auxillary' is deprecated, use 'auxiliary' instead",
+ "the attribute 'auxillary' is deprecated, use 'auxiliary' instead",
category=DeprecationWarning,
)
@@ -406,7 +399,9 @@ def load_options(cls, options, package):
valid = True
if not valid:
- err_msg = f"Invalid type set to variable {k} in option block"
+ err_msg = (
+ f"Invalid type set to variable {k} in option block"
+ )
raise TypeError(err_msg)
option_line += t[ix] + " "
diff --git a/flopy/utils/parse_version.py b/flopy/utils/parse_version.py
index cd07d4a3ec..da85abc1ef 100644
--- a/flopy/utils/parse_version.py
+++ b/flopy/utils/parse_version.py
@@ -12,14 +12,15 @@
import itertools
import re
import warnings
-from typing import Callable, Iterator, SupportsInt, Tuple, Union
+from collections.abc import Iterator
+from typing import Callable, SupportsInt, Union
__all__ = [
- "parse",
- "Version",
- "LegacyVersion",
- "InvalidVersion",
"VERSION_PATTERN",
+ "InvalidVersion",
+ "LegacyVersion",
+ "Version",
+ "parse",
]
@@ -88,28 +89,28 @@ def __neg__(self: object) -> InfinityType:
InfiniteTypes = Union[InfinityType, NegativeInfinityType]
-PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
+PrePostDevType = Union[InfiniteTypes, tuple[str, int]]
SubLocalType = Union[InfiniteTypes, int, str]
LocalType = Union[
NegativeInfinityType,
- Tuple[
+ tuple[
Union[
SubLocalType,
- Tuple[SubLocalType, str],
- Tuple[NegativeInfinityType, SubLocalType],
+ tuple[SubLocalType, str],
+ tuple[NegativeInfinityType, SubLocalType],
],
...,
],
]
-CmpKey = Tuple[
+CmpKey = tuple[
int,
- Tuple[int, ...],
+ tuple[int, ...],
PrePostDevType,
PrePostDevType,
PrePostDevType,
LocalType,
]
-LegacyCmpKey = Tuple[int, Tuple[str, ...]]
+LegacyCmpKey = tuple[int, tuple[str, ...]]
VersionComparisonMethod = Callable[
[Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
]
@@ -245,9 +246,7 @@ def is_devrelease(self) -> bool:
return False
-_legacy_version_component_re = re.compile(
- r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE
-)
+_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
_legacy_version_replacement_map = {
"pre": "c",
@@ -336,9 +335,7 @@ def _legacy_cmpkey(version: str) -> LegacyCmpKey:
class Version(_BaseVersion):
- _regex = re.compile(
- r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE
- )
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None:
# Validate the version and parse it into pieces
@@ -350,16 +347,11 @@ def __init__(self, version: str) -> None:
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
- pre=_parse_letter_version(
- match.group("pre_l"), match.group("pre_n")
- ),
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
- match.group("post_l"),
- match.group("post_n1") or match.group("post_n2"),
- ),
- dev=_parse_letter_version(
- match.group("dev_l"), match.group("dev_n")
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
@@ -540,9 +532,7 @@ def _cmpkey(
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
_release = tuple(
- reversed(
- list(itertools.dropwhile(lambda x: x == 0, reversed(release)))
- )
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
@@ -584,8 +574,7 @@ def _cmpkey(
# - Shorter versions sort before longer versions when the prefixes
# match exactly
_local = tuple(
- (i, "") if isinstance(i, int) else (NegativeInfinity, i)
- for i in local
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
return epoch, _release, _pre, _post, _dev, _local
diff --git a/flopy/utils/particletrackfile.py b/flopy/utils/particletrackfile.py
index 378ff830ea..c5df347c02 100644
--- a/flopy/utils/particletrackfile.py
+++ b/flopy/utils/particletrackfile.py
@@ -79,9 +79,7 @@ def get_maxtime(self) -> float:
"""
return self._data["time"].max()
- def get_data(
- self, partid=0, totim=None, ge=True, minimal=False
- ) -> np.recarray:
+ def get_data(self, partid=0, totim=None, ge=True, minimal=False) -> np.recarray:
"""
Get a single particle track, optionally filtering by time.
@@ -153,9 +151,7 @@ def get_alldata(self, totim=None, ge=True, minimal=False):
data = data[idx]
return [data[data["particleid"] == i] for i in nids]
- def get_destination_data(
- self, dest_cells, to_recarray=True
- ) -> np.recarray:
+ def get_destination_data(self, dest_cells, to_recarray=True) -> np.recarray:
"""
Get data for set of destination cells.
@@ -318,9 +314,7 @@ def write_shapefile(
x, y = geometry.transform(ra.x, ra.y, 0, 0, 0)
z = ra.z
geoms += [
- LineString(
- [(x[i - 1], y[i - 1], z[i - 1]), (x[i], y[i], z[i])]
- )
+ LineString([(x[i - 1], y[i - 1], z[i - 1]), (x[i], y[i], z[i])])
for i in np.arange(1, (len(ra)))
]
sdata += ra[1:].tolist()
diff --git a/flopy/utils/postprocessing.py b/flopy/utils/postprocessing.py
index 0f204cb7e8..3e7728357f 100644
--- a/flopy/utils/postprocessing.py
+++ b/flopy/utils/postprocessing.py
@@ -295,9 +295,7 @@ def get_extended_budget(
matched_name = [s for s in rec_names if budget_term in s]
if not matched_name:
raise RuntimeError(budget_term + err_msg)
- frf = cbf.get_data(
- idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term
- )
+ frf = cbf.get_data(idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term)
Qx_ext[:, :, 1:] = frf[0]
# SWI2 package
budget_term_swi = "SWIADDTOFRF"
@@ -315,9 +313,7 @@ def get_extended_budget(
matched_name = [s for s in rec_names if budget_term in s]
if not matched_name:
raise RuntimeError(budget_term + err_msg)
- fff = cbf.get_data(
- idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term
- )
+ fff = cbf.get_data(idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term)
Qy_ext[:, 1:, :] = -fff[0]
# SWI2 package
budget_term_swi = "SWIADDTOFFF"
@@ -335,9 +331,7 @@ def get_extended_budget(
matched_name = [s for s in rec_names if budget_term in s]
if not matched_name:
raise RuntimeError(budget_term + err_msg)
- flf = cbf.get_data(
- idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term
- )
+ flf = cbf.get_data(idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term)
Qz_ext[1:, :, :] = -flf[0]
# SWI2 package
budget_term_swi = "SWIADDTOFLF"
@@ -352,9 +346,7 @@ def get_extended_budget(
if boundary_ifaces is not None:
# need calculated heads for some stresses and to check hnoflo and hdry
if hdsfile is None:
- raise ValueError(
- "hdsfile must be provided when using boundary_ifaces"
- )
+ raise ValueError("hdsfile must be provided when using boundary_ifaces")
if isinstance(hdsfile, (bf.HeadFile, fm.FormattedHeadFile)):
hds = hdsfile
else:
@@ -366,9 +358,7 @@ def get_extended_budget(
# get hnoflo and hdry values
if model is None:
- raise ValueError(
- "model must be provided when using boundary_ifaces"
- )
+ raise ValueError("model must be provided when using boundary_ifaces")
noflo_or_dry = np.logical_or(head == model.hnoflo, head == model.hdry)
for budget_term, iface_info in boundary_ifaces.items():
@@ -410,9 +400,7 @@ def get_extended_budget(
np.logical_not(noflo_or_dry[lay, :, :]),
np.logical_not(already_found),
)
- already_found = np.logical_or(
- already_found, water_table[lay, :, :]
- )
+ already_found = np.logical_or(already_found, water_table[lay, :, :])
Q_stress[np.logical_not(water_table)] = 0.0
# case where the same iface is assigned to all cells
@@ -532,9 +520,7 @@ def get_extended_budget(
elif iface == 6:
Qz_ext[lay, row, col] -= Q_stress_cell
else:
- raise TypeError(
- "boundary_ifaces value must be either int or list."
- )
+ raise TypeError("boundary_ifaces value must be either int or list.")
return Qx_ext, Qy_ext, Qz_ext
@@ -600,16 +586,13 @@ def get_specific_discharge(
if vectors[ix].shape == modelgrid.shape:
tqx = np.zeros(
- (modelgrid.nlay, modelgrid.nrow, modelgrid.ncol + 1),
- dtype=np.float32,
+ (modelgrid.nlay, modelgrid.nrow, modelgrid.ncol + 1), dtype=np.float32
)
tqy = np.zeros(
- (modelgrid.nlay, modelgrid.nrow + 1, modelgrid.ncol),
- dtype=np.float32,
+ (modelgrid.nlay, modelgrid.nrow + 1, modelgrid.ncol), dtype=np.float32
)
tqz = np.zeros(
- (modelgrid.nlay + 1, modelgrid.nrow, modelgrid.ncol),
- dtype=np.float32,
+ (modelgrid.nlay + 1, modelgrid.nrow, modelgrid.ncol), dtype=np.float32
)
if vectors[0] is not None:
tqx[:, :, 1:] = vectors[0]
@@ -652,9 +635,7 @@ def get_specific_discharge(
if modelgrid._idomain is None:
modelgrid._idomain = model.dis.ibound
if head is not None:
- noflo_or_dry = np.logical_or(
- head == model.hnoflo, head == model.hdry
- )
+ noflo_or_dry = np.logical_or(head == model.hnoflo, head == model.hdry)
modelgrid._idomain[noflo_or_dry] = 0
# get cross section areas along x
@@ -675,26 +656,16 @@ def get_specific_discharge(
cross_area_x = (
delc[:]
* 0.5
- * (
- saturated_thickness[:, :, :-1]
- + saturated_thickness[:, :, 1:]
- )
+ * (saturated_thickness[:, :, :-1] + saturated_thickness[:, :, 1:])
)
cross_area_y = (
delr
* 0.5
- * (
- saturated_thickness[:, 1:, :]
- + saturated_thickness[:, :-1, :]
- )
- )
- qx[:, :, 1:] = (
- 0.5 * (tqx[:, :, 2:] + tqx[:, :, 1:-1]) / cross_area_x
+ * (saturated_thickness[:, 1:, :] + saturated_thickness[:, :-1, :])
)
+ qx[:, :, 1:] = 0.5 * (tqx[:, :, 2:] + tqx[:, :, 1:-1]) / cross_area_x
qx[:, :, 0] = 0.5 * tqx[:, :, 1] / cross_area_x[:, :, 0]
- qy[:, 1:, :] = (
- 0.5 * (tqy[:, 2:, :] + tqy[:, 1:-1, :]) / cross_area_y
- )
+ qy[:, 1:, :] = 0.5 * (tqy[:, 2:, :] + tqy[:, 1:-1, :]) / cross_area_y
qy[:, 0, :] = 0.5 * tqy[:, 1, :] / cross_area_y[:, 0, :]
qz = 0.5 * (tqz[1:, :, :] + tqz[:-1, :, :]) / cross_area_z
diff --git a/flopy/utils/rasters.py b/flopy/utils/rasters.py
index 4fabf0482b..ee7da72b7c 100644
--- a/flopy/utils/rasters.py
+++ b/flopy/utils/rasters.py
@@ -114,9 +114,7 @@ def __init__(
self._meta = meta
self._dataset = None
- self.__arr_dict = {
- self._bands[b]: arr for b, arr in enumerate(self._array)
- }
+ self.__arr_dict = {self._bands[b]: arr for b, arr in enumerate(self._array)}
self.__xcenters = None
self.__ycenters = None
@@ -255,11 +253,7 @@ def __transform(self, dst_crs, inplace):
"""
import rasterio
from rasterio.io import MemoryFile
- from rasterio.warp import (
- Resampling,
- calculate_default_transform,
- reproject,
- )
+ from rasterio.warp import Resampling, calculate_default_transform, reproject
height = self._meta["height"]
width = self._meta["width"]
@@ -303,7 +297,7 @@ def __transform(self, dst_crs, inplace):
self.__xcenters = None
self.__ycenters = None
- self._meta.update({k: v for k, v in kwargs.items()})
+ self._meta.update(dict(kwargs.items()))
self._dataset = None
else:
@@ -537,20 +531,8 @@ def resample_to_grid(
arr = self.get_array(band, masked=True)
arr = arr.flatten()
- # filter out nan values from the original dataset
- if np.isnan(np.sum(arr)):
- idx = np.isfinite(arr)
- rxc = rxc[idx]
- ryc = ryc[idx]
- arr = arr[idx]
-
# step 3: use griddata interpolation to snap to grid
- data = griddata(
- (rxc, ryc),
- arr,
- (xc, yc),
- method=method,
- )
+ data = griddata((rxc, ryc), arr, (xc, yc), method=method)
elif method in ("median", "mean", "min", "max", "mode"):
# these methods are slow and could use speed ups
@@ -574,7 +556,7 @@ def resample_to_grid(
else:
raise TypeError(f"{method} method not supported")
- if extrapolate_edges and method != "nearest":
+ if extrapolate_edges:
xc = modelgrid.xcellcenters
yc = modelgrid.ycellcenters
@@ -598,12 +580,7 @@ def resample_to_grid(
ryc = ryc[idx]
arr = arr[idx]
- extrapolate = griddata(
- (rxc, ryc),
- arr,
- (xc, yc),
- method="nearest",
- )
+ extrapolate = griddata((rxc, ryc), arr, (xc, yc), method="nearest")
data = np.where(np.isnan(data), extrapolate, data)
# step 4: return grid to user in shape provided
@@ -705,12 +682,7 @@ def crop(self, polygon, invert=False):
self._meta["width"] = crp_mask.shape[1]
transform = self._meta["transform"]
self._meta["transform"] = self._affine.Affine(
- transform[0],
- transform[1],
- xmin,
- transform[3],
- transform[4],
- ymax,
+ transform[0], transform[1], xmin, transform[3], transform[4], ymax
)
self.__xcenters = None
self.__ycenters = None
@@ -953,9 +925,7 @@ def raster_from_array(
crs = modelgrid.crs
if modelgrid.grid_type != "structured":
- raise TypeError(
- f"{type(modelgrid)} discretizations are not supported"
- )
+ raise TypeError(f"{type(modelgrid)} discretizations are not supported")
if not np.all(modelgrid.delc == modelgrid.delc[0]):
raise AssertionError("DELC must have a uniform spacing")
@@ -966,9 +936,7 @@ def raster_from_array(
yul = modelgrid.yvertices[0, 0]
xul = modelgrid.xvertices[0, 0]
angrot = modelgrid.angrot
- transform = Affine(
- modelgrid.delr[0], 0, xul, 0, -modelgrid.delc[0], yul
- )
+ transform = Affine(modelgrid.delr[0], 0, xul, 0, -modelgrid.delc[0], yul)
if angrot != 0:
transform *= Affine.rotation(angrot)
@@ -1024,12 +992,7 @@ def plot(self, ax=None, contour=False, **kwargs):
from rasterio.plot import show
if self._dataset is not None:
- ax = show(
- self._dataset,
- ax=ax,
- contour=contour,
- **kwargs,
- )
+ ax = show(self._dataset, ax=ax, contour=contour, **kwargs)
else:
d0 = len(self.__arr_dict)
diff --git a/flopy/utils/sfroutputfile.py b/flopy/utils/sfroutputfile.py
index ba23a9b5b4..faf5f31338 100644
--- a/flopy/utils/sfroutputfile.py
+++ b/flopy/utils/sfroutputfile.py
@@ -96,9 +96,7 @@ def __init__(self, filename, geometries=None, verbose=False):
"Cond",
]
if has_gradient and has_delUzstor:
- raise ValueError(
- "column 16 should be either 'gradient' or 'Qwt', not both"
- )
+ raise ValueError("column 16 should be either 'gradient' or 'Qwt', not both")
elif has_gradient:
self.names.append("gradient")
elif has_delUzstor:
@@ -147,9 +145,7 @@ def get_nstrm(df):
Number of SFR cells
"""
- wherereach1 = np.asarray(
- (df.segment == 1) & (df.reach == 1)
- ).nonzero()[0]
+ wherereach1 = np.asarray((df.segment == 1) & (df.reach == 1)).nonzero()[0]
if len(wherereach1) == 1:
return len(df)
elif len(wherereach1) > 1:
diff --git a/flopy/utils/swroutputfile.py b/flopy/utils/swroutputfile.py
index eff2488688..c1091346d7 100644
--- a/flopy/utils/swroutputfile.py
+++ b/flopy/utils/swroutputfile.py
@@ -41,9 +41,7 @@ class SwrFile(FlopyBinaryData):
"""
- def __init__(
- self, filename, swrtype="stage", precision="double", verbose=False
- ):
+ def __init__(self, filename, swrtype="stage", precision="double", verbose=False):
"""
Class constructor.
@@ -327,9 +325,7 @@ def get_ts(self, irec=0, iconn=0, klay=0, istr=0):
return gage_record
def _read_connectivity(self):
- self.conn_dtype = np.dtype(
- [("reach", "i4"), ("from", "i4"), ("to", "i4")]
- )
+ self.conn_dtype = np.dtype([("reach", "i4"), ("from", "i4"), ("to", "i4")])
conn = np.zeros((self.nrecord, 3), int)
icount = 0
for nrg in range(self.flowitems):
@@ -607,9 +603,7 @@ def _build_index(self):
totim, dt, kper, kstp, kswr, success = self._read_header()
if success:
if self.type == "exchange":
- bytes = self.nitems * (
- self.integerbyte + 8 * self.realbyte
- )
+ bytes = self.nitems * (self.integerbyte + 8 * self.realbyte)
elif self.type == "structure":
bytes = self.nitems * (5 * self.realbyte)
else:
@@ -626,9 +620,7 @@ def _build_index(self):
else:
if self.verbose:
print()
- self._recordarray = np.array(
- self._recordarray, dtype=self.header_dtype
- )
+ self._recordarray = np.array(self._recordarray, dtype=self.header_dtype)
self._times = np.array(self._times)
self._kswrkstpkper = np.array(self._kswrkstpkper)
return
@@ -748,15 +740,14 @@ class SwrFlow(SwrFile):
"""
def __init__(self, filename, precision="double", verbose=False):
- super().__init__(
- filename, swrtype="flow", precision=precision, verbose=verbose
- )
+ super().__init__(filename, swrtype="flow", precision=precision, verbose=verbose)
return
class SwrExchange(SwrFile):
"""
- Read binary SWR surface-water groundwater exchange output from MODFLOW SWR Process binary output files
+ Read binary SWR surface-water groundwater exchange output from
+ MODFLOW SWR Process binary output files
Parameters
----------
diff --git a/flopy/utils/triangle.py b/flopy/utils/triangle.py
index e378fb3184..aa93bbd823 100644
--- a/flopy/utils/triangle.py
+++ b/flopy/utils/triangle.py
@@ -248,22 +248,14 @@ def plot(
vertices = self.get_vertices()
ncpl = len(cell2d)
- modelgrid = VertexGrid(
- vertices=vertices, cell2d=cell2d, ncpl=ncpl, nlay=1
- )
+ modelgrid = VertexGrid(vertices=vertices, cell2d=cell2d, ncpl=ncpl, nlay=1)
pmv = PlotMapView(modelgrid=modelgrid, ax=ax, layer=layer)
if a is None:
- pc = pmv.plot_grid(
- facecolor=facecolor, edgecolor=edgecolor, **kwargs
- )
+ pc = pmv.plot_grid(facecolor=facecolor, edgecolor=edgecolor, **kwargs)
else:
pc = pmv.plot_array(
- a,
- masked_values=masked_values,
- cmap=cmap,
- edgecolor=edgecolor,
- **kwargs,
+ a, masked_values=masked_values, cmap=cmap, edgecolor=edgecolor, **kwargs
)
return pc
diff --git a/flopy/utils/util_array.py b/flopy/utils/util_array.py
index 8d341f51a2..41c0e44101 100644
--- a/flopy/utils/util_array.py
+++ b/flopy/utils/util_array.py
@@ -415,9 +415,7 @@ def read1d(f, a):
"""
if len(a.shape) != 1:
- raise ValueError(
- f"read1d: expected 1 dimension, found shape {a.shape}"
- )
+ raise ValueError(f"read1d: expected 1 dimension, found shape {a.shape}")
values = []
while len(values) < a.shape[0]:
line = f.readline()
@@ -548,9 +546,7 @@ def __init__(
return
if len(shape) != 3:
- raise ValueError(
- f"Util3d: expected 3 dimensions, found shape {shape}"
- )
+ raise ValueError(f"Util3d: expected 3 dimensions, found shape {shape}")
self._model = model
self.shape = shape
self._dtype = dtype
@@ -585,28 +581,21 @@ def __init__(
for k in range(shape[0]):
self.ext_filename_base.append(
os.path.join(
- model.external_path,
- self.name_base[k].replace(" ", "_"),
+ model.external_path, self.name_base[k].replace(" ", "_")
)
)
else:
for k in range(shape[0]):
- self.ext_filename_base.append(
- self.name_base[k].replace(" ", "_")
- )
+ self.ext_filename_base.append(self.name_base[k].replace(" ", "_"))
self.util_2ds = self.build_2d_instances()
def __setitem__(self, k, value):
if isinstance(k, int):
- assert k in range(
- 0, self.shape[0]
- ), "Util3d error: k not in range nlay"
+ assert k in range(0, self.shape[0]), "Util3d error: k not in range nlay"
self.util_2ds[k] = new_u2d(self.util_2ds[k], value)
else:
- raise NotImplementedError(
- f"Util3d doesn't support setitem indices: {k}"
- )
+ raise NotImplementedError(f"Util3d doesn't support setitem indices: {k}")
def __setattr__(self, key, value):
if hasattr(self, "util_2ds") and key == "cnstnt":
@@ -616,9 +605,7 @@ def __setattr__(self, key, value):
elif hasattr(self, "util_2ds") and key == "fmtin":
for u2d in self.util_2ds:
u2d.format = ArrayFormat(
- u2d,
- fortran=value,
- array_free_format=self.array_free_format,
+ u2d, fortran=value, array_free_format=self.array_free_format
)
super().__setattr__("fmtin", value)
elif hasattr(self, "util_2ds") and key == "how":
@@ -735,9 +722,7 @@ def plot(
return axes
def __getitem__(self, k):
- if isinstance(k, int) or np.issubdtype(
- getattr(k, "dtype", None), np.integer
- ):
+ if isinstance(k, int) or np.issubdtype(getattr(k, "dtype", None), np.integer):
return self.util_2ds[k]
elif len(k) == 3:
return self.array[k[0], k[1], k[2]]
@@ -795,9 +780,7 @@ def build_2d_instances(self):
and isinstance(self.shape[2], (np.ndarray, list))
and len(self.__value) == np.sum(self.shape[2])
):
- self.__value = np.split(
- self.__value, np.cumsum(self.shape[2])[:-1]
- )
+ self.__value = np.split(self.__value, np.cumsum(self.shape[2])[:-1])
# if this is a list or 1-D array with constant values per layer
if isinstance(self.__value, list) or (
@@ -813,9 +796,7 @@ def build_2d_instances(self):
if isinstance(item, Util2d):
# we need to reset the external name because most of the
# load() methods don't use layer-specific names
- item._ext_filename = (
- f"{self.ext_filename_base[i]}{i + 1}.ref"
- )
+ item._ext_filename = f"{self.ext_filename_base[i]}{i + 1}.ref"
# reset the model instance in cases these Util2d's
# came from another model instance
item.model = self._model
@@ -824,9 +805,7 @@ def build_2d_instances(self):
name = self.name_base[i] + str(i + 1)
ext_filename = None
if self._model.external_path is not None:
- ext_filename = (
- f"{self.ext_filename_base[i]}{i + 1}.ref"
- )
+ ext_filename = f"{self.ext_filename_base[i]}{i + 1}.ref"
shape = self.shape[1:]
if shape[0] is None:
# allow for unstructured so that ncol changes by layer
@@ -893,9 +872,7 @@ def load(
array_format=None,
):
if len(shape) != 3:
- raise ValueError(
- f"Util3d: expected 3 dimensions, found shape {shape}"
- )
+ raise ValueError(f"Util3d: expected 3 dimensions, found shape {shape}")
nlay, nrow, ncol = shape
u2ds = []
for k in range(nlay):
@@ -1172,9 +1149,7 @@ def build_transient_sequence(self):
f"Transient3d error: can't cast key: {key} to kper integer"
)
if key < 0:
- raise Exception(
- f"Transient3d error: key can't be negative: {key}"
- )
+ raise Exception(f"Transient3d error: key can't be negative: {key}")
try:
u3d = self.__get_3d_instance(key, val)
except Exception as e:
@@ -1435,9 +1410,7 @@ def __setattr__(self, key, value):
elif hasattr(self, "transient_2ds") and key == "fmtin":
# set fmtin for each u2d
for kper, u2d in self.transient_2ds.items():
- self.transient_2ds[kper].format = ArrayFormat(
- u2d, fortran=value
- )
+ self.transient_2ds[kper].format = ArrayFormat(u2d, fortran=value)
elif hasattr(self, "transient_2ds") and key == "how":
# set how for each u2d
for kper, u2d in self.transient_2ds.items():
@@ -1579,8 +1552,7 @@ def __setitem__(self, key, value):
@property
def array(self):
arr = np.zeros(
- (self._model.nper, 1, self.shape[0], self.shape[1]),
- dtype=self._dtype,
+ (self._model.nper, 1, self.shape[0], self.shape[1]), dtype=self._dtype
)
for kper in range(self._model.nper):
u2d = self[kper]
@@ -1620,9 +1592,7 @@ def build_transient_sequence(self):
f"Transient2d error: can't cast key: {key} to kper integer"
)
if key < 0:
- raise Exception(
- f"Transient2d error: key can't be negative: {key}"
- )
+ raise Exception(f"Transient2d error: key can't be negative: {key}")
try:
u2d = self.__get_2d_instance(key, val)
except Exception as e:
@@ -1847,8 +1817,7 @@ def __init__(
self._model = model
if len(shape) not in (1, 2):
raise ValueError(
- "Util2d: shape must describe 1- or 2-dimensions, "
- "e.g. (nrow, ncol)"
+ "Util2d: shape must describe 1- or 2-dimensions, e.g. (nrow, ncol)"
)
if min(shape) < 1:
raise ValueError("Util2d: each shape dimension must be at least 1")
@@ -1870,12 +1839,7 @@ def __init__(
self.ext_filename = ext_filename
self._ext_filename = self._name.replace(" ", "_") + ".ref"
- self._acceptable_hows = [
- "constant",
- "internal",
- "external",
- "openclose",
- ]
+ self._acceptable_hows = ["constant", "internal", "external", "openclose"]
if how is not None:
how = how.lower()
@@ -2005,9 +1969,7 @@ def export(self, f, **kwargs):
def set_fmtin(self, fmtin):
self._format = ArrayFormat(
- self,
- fortran=fmtin,
- array_free_format=self.format.array_free_format,
+ self, fortran=fmtin, array_free_format=self.format.array_free_format
)
def get_value(self):
@@ -2141,9 +2103,7 @@ def python_file_path(self):
if self._model.model_ws != ".":
python_file_path = os.path.join(self._model.model_ws)
if self._model.external_path is not None:
- python_file_path = os.path.join(
- python_file_path, self._model.external_path
- )
+ python_file_path = os.path.join(python_file_path, self._model.external_path)
python_file_path = os.path.join(python_file_path, self.filename)
return python_file_path
@@ -2171,9 +2131,7 @@ def model_file_path(self):
model_file_path = ""
if self._model.external_path is not None:
- model_file_path = os.path.join(
- model_file_path, self._model.external_path
- )
+ model_file_path = os.path.join(model_file_path, self._model.external_path)
model_file_path = os.path.join(model_file_path, self.filename)
return model_file_path
@@ -2197,8 +2155,7 @@ def _get_fixed_cr(self, locat, value=None):
if self.format.binary:
if locat is None:
raise Exception(
- "Util2d._get_fixed_cr(): locat is None but "
- "format is binary"
+ "Util2d._get_fixed_cr(): locat is None but format is binary"
)
if not self.format.array_free_format:
locat = -1 * np.abs(locat)
@@ -2249,16 +2206,10 @@ def get_openclose_cr(self):
def get_external_cr(self):
locat = self._model.next_ext_unit()
- self._model.add_external(
- self.model_file_path, locat, self.format.binary
- )
+ self._model.add_external(self.model_file_path, locat, self.format.binary)
if self.format.array_free_format:
cr = "EXTERNAL {:>30d} {:15} {:>10s} {:2.0f} {:<30s}\n".format(
- locat,
- self.cnstnt_str,
- self.format.fortran,
- self.iprn,
- self._name,
+ locat, self.cnstnt_str, self.format.fortran, self.iprn, self._name
)
return cr
else:
@@ -2313,10 +2264,7 @@ def get_file_entry(self, how=None):
if self.vtype != str:
if self.format.binary:
self.write_bin(
- self.shape,
- self.python_file_path,
- self._array,
- bintype="head",
+ self.shape, self.python_file_path, self._array, bintype="head"
)
else:
self.write_txt(
@@ -2366,9 +2314,7 @@ def get_file_entry(self, how=None):
return self.get_constant_cr(value)
else:
- raise Exception(
- f"Util2d.get_file_entry() error: unrecognized 'how':{how}"
- )
+ raise Exception(f"Util2d.get_file_entry() error: unrecognized 'how':{how}")
@property
def string(self):
@@ -2377,7 +2323,8 @@ def string(self):
Note:
the string representation DOES NOT include the effects of the control
- record multiplier - this method is used primarily for writing model input files
+ record multiplier - this method is used primarily for writing model
+ input files
"""
# convert array to string with specified format
@@ -2424,9 +2371,9 @@ def _array(self):
if value is a string or a constant, the array is loaded/built only once
Note:
- the return array representation DOES NOT include the effect of the multiplier
- in the control record. To get the array as the model sees it (with the multiplier applied),
- use the Util2d.array method.
+ the return array representation DOES NOT include the effect of the
+ multiplier in the control record. To get the array as the model
+ sees it (with the multiplier applied), use the Util2d.array method.
"""
if self.vtype == str:
if self.__value_built is None:
@@ -2570,9 +2517,7 @@ def load_txt(shape, file_in, dtype, fmtin):
return data.reshape(shape)
@staticmethod
- def write_txt(
- shape, file_out, data, fortran_format="(FREE)", python_format=None
- ):
+ def write_txt(shape, file_out, data, fortran_format="(FREE)", python_format=None):
if fortran_format.upper() == "(FREE)" and python_format is None:
np.savetxt(
file_out,
@@ -2585,10 +2530,7 @@ def write_txt(
file_out = open(file_out, "w")
file_out.write(
Util2d.array2string(
- shape,
- data,
- fortran_format=fortran_format,
- python_format=python_format,
+ shape, data, fortran_format=fortran_format, python_format=python_format
)
)
@@ -2609,22 +2551,16 @@ def array2string(shape, data, fortran_format="(FREE)", python_format=None):
ncol = shape[0]
data = np.atleast_2d(data)
if python_format is None:
- (
- column_length,
- fmt,
- width,
- decimal,
- ) = ArrayFormat.decode_fortran_descriptor(fortran_format)
+ (column_length, fmt, width, decimal) = (
+ ArrayFormat.decode_fortran_descriptor(fortran_format)
+ )
if decimal is None:
output_fmt = f"{{0:{width}d}}"
else:
output_fmt = f"{{0:{width}.{decimal}{fmt}}}"
else:
try:
- column_length, output_fmt = (
- int(python_format[0]),
- python_format[1],
- )
+ column_length, output_fmt = (int(python_format[0]), python_format[1])
except:
raise Exception(
"Util2d.write_txt: \nunable to parse "
@@ -2734,9 +2670,7 @@ def parse_value(self, value):
f'Util2d:could not cast boolean value to type "bool": {value}'
)
else:
- raise Exception(
- "Util2d:value type is bool, but dtype not set as bool"
- )
+ raise Exception("Util2d:value type is bool, but dtype not set as bool")
elif isinstance(value, (str, os.PathLike)):
if os.path.exists(value):
self.__value = str(value)
@@ -2790,9 +2724,7 @@ def parse_value(self, value):
self.__value = value
else:
- raise Exception(
- f"Util2d:unsupported type in util_array: {type(value)}"
- )
+ raise Exception(f"Util2d:unsupported type in util_array: {type(value)}")
@classmethod
def load(
@@ -2866,9 +2798,7 @@ def load(
)
else:
f = open(fname, "rb")
- header_data, data = Util2d.load_bin(
- shape, f, dtype, bintype="Head"
- )
+ header_data, data = Util2d.load_bin(shape, f, dtype, bintype="Head")
f.close()
u2d = cls(
model,
@@ -2990,9 +2920,7 @@ def parse_control_record(
try:
fname = ext_unit_dict[nunit].filename.strip()
except:
- print(
- f" could not determine filename for unit {raw[1]}"
- )
+ print(f" could not determine filename for unit {raw[1]}")
if isfloat:
cnstnt = float(raw[2].lower().replace("d", "e"))
@@ -3017,9 +2945,7 @@ def parse_control_record(
locat = int(line[0:10].strip())
if isfloat:
if len(line) >= 20:
- cnstnt = float(
- line[10:20].strip().lower().replace("d", "e")
- )
+ cnstnt = float(line[10:20].strip().lower().replace("d", "e"))
else:
cnstnt = 0.0
else:
@@ -3059,9 +2985,7 @@ def parse_control_record(
freefmt = "block"
nunit = current_unit
elif locat == 102:
- raise NotImplementedError(
- "MT3D zonal format not supported..."
- )
+ raise NotImplementedError("MT3D zonal format not supported...")
elif locat == 103:
freefmt = "internal"
nunit = current_unit
diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py
index 8616cf11df..8593e5e79a 100644
--- a/flopy/utils/util_list.py
+++ b/flopy/utils/util_list.py
@@ -194,16 +194,12 @@ def drop(self, fields):
if not isinstance(fields, list):
fields = [fields]
names = [n for n in self.dtype.names if n not in fields]
- dtype = np.dtype(
- [(k, d) for k, d in self.dtype.descr if k not in fields]
- )
+ dtype = np.dtype([(k, d) for k, d in self.dtype.descr if k not in fields])
spd = {}
for k, v in self.data.items():
# because np 1.9 doesn't support indexing by list of columns
newarr = np.array([self.data[k][n] for n in names]).transpose()
- newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(
- np.recarray
- )
+ newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(np.recarray)
for n in dtype.names:
newarr[n] = self.data[k][n]
spd[k] = newarr
@@ -315,9 +311,7 @@ def __cast_data(self, data):
try:
data = np.array(data)
except Exception as e:
- raise ValueError(
- f"MfList error: casting list to ndarray: {e!s}"
- )
+ raise ValueError(f"MfList error: casting list to ndarray: {e!s}")
# If data is a dict, the we have to assume it is keyed on kper
if isinstance(data, dict):
@@ -336,9 +330,7 @@ def __cast_data(self, data):
try:
d = np.array(d)
except Exception as e:
- raise ValueError(
- f"MfList error: casting list to ndarray: {e}"
- )
+ raise ValueError(f"MfList error: casting list to ndarray: {e}")
if isinstance(d, np.recarray):
self.__cast_recarray(kper, d)
@@ -375,9 +367,7 @@ def __cast_data(self, data):
elif isinstance(data, str):
self.__cast_str(0, data)
else:
- raise ValueError(
- f"MfList error: unsupported data type: {type(data)}"
- )
+ raise ValueError(f"MfList error: unsupported data type: {type(data)}")
def __cast_str(self, kper, d):
# If d is a string, assume it is a filename and check that it exists
@@ -419,19 +409,13 @@ def __cast_ndarray(self, kper, d):
f"dtype len: {len(self.dtype)}"
)
try:
- self.__data[kper] = np.rec.fromarrays(
- d.transpose(), dtype=self.dtype
- )
+ self.__data[kper] = np.rec.fromarrays(d.transpose(), dtype=self.dtype)
except Exception as e:
- raise ValueError(
- f"MfList error: casting ndarray to recarray: {e!s}"
- )
+ raise ValueError(f"MfList error: casting ndarray to recarray: {e!s}")
self.__vtype[kper] = np.recarray
def __cast_dataframe(self, kper, d):
- self.__cast_recarray(
- kper, d.to_records(index=False).astype(self.dtype)
- )
+ self.__cast_recarray(kper, d.to_records(index=False).astype(self.dtype))
def get_dataframe(self, squeeze=False):
"""
@@ -463,9 +447,7 @@ def get_dataframe(self, squeeze=False):
# may have to iterate over the first stress period
for per in range(self._model.nper):
if hasattr(self.data[per], "dtype"):
- varnames = list(
- [n for n in self.data[per].dtype.names if n not in names]
- )
+ varnames = [n for n in self.data[per].dtype.names if n not in names]
break
# create list of dataframes for each stress period
@@ -494,22 +476,13 @@ def get_dataframe(self, squeeze=False):
# squeeze: remove duplicate periods
if squeeze:
- changed = (
- df.groupby(["k", "i", "j", "no"]).diff().ne(0.0).any(axis=1)
- )
+ changed = df.groupby(["k", "i", "j", "no"]).diff().ne(0.0).any(axis=1)
changed = changed.groupby("per").transform(lambda s: s.any())
df = df.loc[changed, :]
df = df.reset_index()
df.loc[:, "node"] = df.loc[:, "i"] * self._model.ncol + df.loc[:, "j"]
- df = df.loc[
- :,
- names
- + [
- "node",
- ]
- + [v for v in varnames if not v == "node"],
- ]
+ df = df.loc[:, names + ["node"] + [v for v in varnames if not v == "node"]]
return df
def add_record(self, kper, index, values):
@@ -536,9 +509,7 @@ def add_record(self, kper, index, values):
self.__vtype[kper] = np.recarray
elif self.vtype[kper] == np.recarray:
# Extend the recarray
- self.__data[kper] = np.append(
- self.__data[kper], self.get_empty(1)
- )
+ self.__data[kper] = np.append(self.__data[kper], self.get_empty(1))
else:
self.__data[kper] = self.get_empty(1)
self.__vtype[kper] = np.recarray
@@ -588,9 +559,7 @@ def __setitem__(self, kper, data):
try:
data = np.array(data)
except Exception as e:
- raise ValueError(
- f"MfList error: casting list to ndarray: {e!s}"
- )
+ raise ValueError(f"MfList error: casting list to ndarray: {e!s}")
# cast data
if isinstance(data, int):
self.__cast_int(kper, data)
@@ -603,9 +572,7 @@ def __setitem__(self, kper, data):
elif isinstance(data, str):
self.__cast_str(kper, data)
else:
- raise ValueError(
- f"MfList error: unsupported data type: {type(data)}"
- )
+ raise ValueError(f"MfList error: unsupported data type: {type(data)}")
def __fromfile(self, f):
try:
@@ -629,10 +596,7 @@ def get_filenames(self):
elif kper in kpers:
kper_vtype = self.__vtype[kper]
- if (
- self._model.array_free_format
- and self._model.external_path is not None
- ):
+ if self._model.array_free_format and self._model.external_path is not None:
filename = f"{self.package.name[0]}_{kper:04d}.dat"
filenames.append(filename)
return filenames
@@ -664,14 +628,8 @@ def write_transient(
), "MfList.write() error: f argument must be a file handle"
kpers = list(self.data.keys())
pak_name_str = self.package.__class__.__name__.lower()
- if (len(kpers) == 0) and (
- pak_name_str == "mfusgwel"
- ): # must be cln wels
- kpers += [
- kper
- for kper in list(cln_data.data.keys())
- if kper not in kpers
- ]
+ if (len(kpers) == 0) and (pak_name_str == "mfusgwel"): # must be cln wels
+ kpers += [kper for kper in list(cln_data.data.keys()) if kper not in kpers]
kpers.sort()
first = kpers[0]
if single_per is None:
@@ -764,9 +722,7 @@ def write_transient(
if cln_data is not None:
if cln_data.get_itmp(kper) is not None:
- cln_data.write_transient(
- f, single_per=kper, write_header=False
- )
+ cln_data.write_transient(f, single_per=kper, write_header=False)
def __tofile(self, f, data):
# Write the recarray (data) to the file (or file handle) f
@@ -801,9 +757,7 @@ def check_kij(self):
return
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
if nl == 0:
- warnings.warn(
- "MfList.check_kij(): unable to get dis info from model"
- )
+ warnings.warn("MfList.check_kij(): unable to get dis info from model")
return
for kper in list(self.data.keys()):
out_idx = []
@@ -887,9 +841,7 @@ def attribute_by_kper(self, attr, function=np.mean, idx_val=None):
kper_data = self.__data[kper]
if idx_val is not None:
kper_data = kper_data[
- np.asarray(
- kper_data[idx_val[0]] == idx_val[1]
- ).nonzero()
+ np.asarray(kper_data[idx_val[0]] == idx_val[1]).nonzero()
]
v = function(kper_data[attr])
values.append(v)
@@ -1078,13 +1030,10 @@ def to_array(self, kper=0, mask=False):
for name, arr in arrays.items():
if unstructured:
- cnt = np.zeros(
- (self._model.nlay * self._model.ncpl,), dtype=float
- )
+ cnt = np.zeros((self._model.nlay * self._model.ncpl,), dtype=float)
else:
cnt = np.zeros(
- (self._model.nlay, self._model.nrow, self._model.ncol),
- dtype=float,
+ (self._model.nlay, self._model.nrow, self._model.ncol), dtype=float
)
for rec in sarr:
if unstructured:
@@ -1106,49 +1055,23 @@ def to_array(self, kper=0, mask=False):
@property
def masked_4D_arrays(self):
- # get the first kper
- arrays = self.to_array(kper=0, mask=True)
-
- # initialize these big arrays
- m4ds = {}
- for name, array in arrays.items():
- m4d = np.zeros(
- (
- self._model.nper,
- self._model.nlay,
- self._model.nrow,
- self._model.ncol,
- )
- )
- m4d[0, :, :, :] = array
- m4ds[name] = m4d
- for kper in range(1, self._model.nper):
- arrays = self.to_array(kper=kper, mask=True)
- for name, array in arrays.items():
- m4ds[name][kper, :, :, :] = array
- return m4ds
+ return dict(self.masked_4D_arrays_itr())
def masked_4D_arrays_itr(self):
- # get the first kper
- arrays = self.to_array(kper=0, mask=True)
-
- # initialize these big arrays
- for name, array in arrays.items():
- m4d = np.zeros(
- (
- self._model.nper,
- self._model.nlay,
- self._model.nrow,
- self._model.ncol,
- )
- )
- m4d[0, :, :, :] = array
- for kper in range(1, self._model.nper):
- arrays = self.to_array(kper=kper, mask=True)
- for tname, array in arrays.items():
- if tname == name:
- m4d[kper, :, :, :] = array
- yield name, m4d
+ nper = self._model.nper
+
+ # get the first kper array to extract array shape and names
+ arrays_kper_0 = self.to_array(kper=0, mask=True)
+ shape_per_spd = next(iter(arrays_kper_0.values())).shape
+
+ for name in arrays_kper_0.keys():
+ ma = np.zeros((nper, *shape_per_spd))
+ for kper in range(nper):
+ # If new_arrays is not None, overwrite arrays
+ if new_arrays := self.to_array(kper=kper, mask=True):
+ arrays = new_arrays
+ ma[kper] = arrays[name]
+ yield name, ma
@property
def array(self):
diff --git a/flopy/utils/utils_def.py b/flopy/utils/utils_def.py
index 4212223947..31798a0a3d 100644
--- a/flopy/utils/utils_def.py
+++ b/flopy/utils/utils_def.py
@@ -123,10 +123,10 @@ def get_pak_vals_shape(model, vals):
if nrow is None: # unstructured
if isinstance(vals, dict):
try: # check for iterable
- _ = (v for v in list(vals.values())[0])
+ _ = (v for v in next(iter(vals.values())))
except:
return (1, ncol[0]) # default to layer 1 node count
- return np.array(list(vals.values())[0], ndmin=2).shape
+ return np.array(next(iter(vals.values())), ndmin=2).shape
else:
# check for single iterable
try:
@@ -166,9 +166,7 @@ def get_util2d_shape_for_layer(model, layer=0):
return (nrow, ncol)
-def get_unitnumber_from_ext_unit_dict(
- model, pak_class, ext_unit_dict=None, ipakcb=0
-):
+def get_unitnumber_from_ext_unit_dict(model, pak_class, ext_unit_dict=None, ipakcb=0):
"""
For a given modflow package, defines input file unit number,
plus package input and (optionally) output (budget) save file names.
@@ -198,9 +196,7 @@ def get_unitnumber_from_ext_unit_dict(
ext_unit_dict, filetype=pak_class._ftype()
)
if ipakcb > 0:
- _, filenames[1] = model.get_ext_dict_attr(
- ext_unit_dict, unit=ipakcb
- )
+ _, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
return unitnumber, filenames
@@ -233,9 +229,7 @@ def type_from_iterable(_iter, index=0, _type=int, default_val=0):
def get_open_file_object(fname_or_fobj, read_write="rw"):
"""Returns an open file object for either a file name or open file object."""
- openfile = not (
- hasattr(fname_or_fobj, "read") or hasattr(fname_or_fobj, "write")
- )
+ openfile = not (hasattr(fname_or_fobj, "read") or hasattr(fname_or_fobj, "write"))
if openfile:
filename = fname_or_fobj
f_obj = open(filename, read_write)
diff --git a/flopy/utils/utl_import.py b/flopy/utils/utl_import.py
index a7943e6b50..51ebacad01 100644
--- a/flopy/utils/utl_import.py
+++ b/flopy/utils/utl_import.py
@@ -1,3 +1,4 @@
+# ruff: noqa: E501
# Vendored from https://github.com/pandas-dev/pandas/blob/master/pandas/compat/_optional.py
# changeset d30aeeba0c79fb8e4b651a8f528e87c3de8cb898
# 10/11/2021
@@ -140,9 +141,7 @@ def import_optional_dependency(
module_to_get = sys.modules[install_name]
else:
module_to_get = module
- minimum_version = (
- min_version if min_version is not None else VERSIONS.get(parent)
- )
+ minimum_version = min_version if min_version is not None else VERSIONS.get(parent)
if minimum_version:
version = get_version(module_to_get)
if Version(version) < Version(minimum_version):
diff --git a/flopy/utils/voronoi.py b/flopy/utils/voronoi.py
index d3f52566ae..c9085e2c46 100644
--- a/flopy/utils/voronoi.py
+++ b/flopy/utils/voronoi.py
@@ -1,5 +1,5 @@
+from collections.abc import Iterator
from math import sqrt
-from typing import Iterator, Tuple
import numpy as np
@@ -10,7 +10,7 @@
def get_sorted_vertices(
icell_vertices, vertices, verbose=False
-) -> Iterator[Tuple[float, int]]:
+) -> Iterator[tuple[float, int]]:
centroid = vertices[icell_vertices].mean(axis=0)
tlist = []
for i, iv in enumerate(icell_vertices):
@@ -143,14 +143,14 @@ def tri2vor(tri, **kwargs):
nvertices = vor.vertices.shape[0]
xc = vor.vertices[:, 0].reshape((nvertices, 1))
yc = vor.vertices[:, 1].reshape((nvertices, 1))
- domain_polygon = [(x, y) for x, y in tri._polygons[0]]
+ domain_polygon = list(tri._polygons[0])
vor_vert_indomain = point_in_polygon(xc, yc, domain_polygon)
vor_vert_indomain = vor_vert_indomain.flatten()
nholes = len(tri._holes)
if nholes > 0:
for ihole in range(nholes):
ipolygon = ihole + 1
- polygon = [(x, y) for x, y in tri._polygons[ipolygon]]
+ polygon = list(tri._polygons[ipolygon])
vor_vert_notindomain = point_in_polygon(xc, yc, polygon)
vor_vert_notindomain = vor_vert_notindomain.flatten()
idx = np.asarray(vor_vert_notindomain == True).nonzero()
@@ -165,7 +165,7 @@ def tri2vor(tri, **kwargs):
# Create new lists for the voronoi grid vertices and the
# voronoi grid incidence list. There should be one voronoi
# cell for each vertex point in the triangular grid
- vor_verts = [(x, y) for x, y in vor.vertices[idx_filtered]]
+ vor_verts = list(vor.vertices[idx_filtered])
vor_iverts = [[] for i in range(npoints)]
# step 1 -- go through voronoi ridge vertices
@@ -281,9 +281,7 @@ def __init__(self, tri, **kwargs):
if isinstance(tri, Triangle):
verts, iverts, points = tri2vor(tri, **kwargs)
else:
- raise TypeError(
- "The tri argument must be of type flopy.utils.Triangle"
- )
+ raise TypeError("The tri argument must be of type flopy.utils.Triangle")
self.points = points
self.verts = verts
self.iverts = iverts
@@ -303,9 +301,7 @@ def get_disv_gridprops(self):
flopy.mf6.ModflowGwfdisv constructor
"""
- disv_gridprops = get_disv_gridprops(
- self.verts, self.iverts, xcyc=self.points
- )
+ disv_gridprops = get_disv_gridprops(self.verts, self.iverts, xcyc=self.points)
return disv_gridprops
def get_disu5_gridprops(self):
diff --git a/flopy/utils/zonbud.py b/flopy/utils/zonbud.py
index 39949a1b0a..871046594f 100644
--- a/flopy/utils/zonbud.py
+++ b/flopy/utils/zonbud.py
@@ -61,17 +61,13 @@ def __init__(
if isinstance(cbc_file, CellBudgetFile):
self.cbc = cbc_file
- elif isinstance(cbc_file, (str, os.PathLike)) and os.path.isfile(
- cbc_file
- ):
+ elif isinstance(cbc_file, (str, os.PathLike)) and os.path.isfile(cbc_file):
self.cbc = CellBudgetFile(cbc_file)
else:
raise Exception(f"Cannot load cell budget file: {cbc_file}.")
if isinstance(z, np.ndarray):
- assert np.issubdtype(
- z.dtype, np.integer
- ), "Zones dtype must be integer"
+ assert np.issubdtype(z.dtype, np.integer), "Zones dtype must be integer"
else:
e = (
"Please pass zones as a numpy ndarray of (positive)"
@@ -81,9 +77,7 @@ def __init__(
# Check for negative zone values
if np.any(z < 0):
- raise Exception(
- "Negative zone value(s) found:", np.unique(z[z < 0])
- )
+ raise Exception("Negative zone value(s) found:", np.unique(z[z < 0]))
self.dis = None
if "model" in kwargs.keys():
@@ -130,9 +124,7 @@ def __init__(
# Check dimensions of input zone array
s = (
"Row/col dimensions of zone array {}"
- " do not match model row/col dimensions {}".format(
- z.shape, self.cbc_shape
- )
+ " do not match model row/col dimensions {}".format(z.shape, self.cbc_shape)
)
assert z.shape[-2] == self.nrow and z.shape[-1] == self.ncol, s
@@ -163,9 +155,7 @@ def __init__(
for z, a in iter(aliases.items()):
if z != 0 and z in self._zonenamedict.keys():
if z in seen:
- raise Exception(
- "Zones may not have more than 1 alias."
- )
+ raise Exception("Zones may not have more than 1 alias.")
self._zonenamedict[z] = "_".join(a.split())
seen.append(z)
@@ -177,9 +167,7 @@ def __init__(
# Get imeth for each record in the CellBudgetFile record list
self.imeth = {}
for record in self.cbc.recordarray:
- self.imeth[record["text"].strip().decode("utf-8")] = record[
- "imeth"
- ]
+ self.imeth[record["text"].strip().decode("utf-8")] = record["imeth"]
# INTERNAL FLOW TERMS ARE USED TO CALCULATE FLOW BETWEEN ZONES.
# CONSTANT-HEAD TERMS ARE USED TO IDENTIFY WHERE CONSTANT-HEAD CELLS
@@ -225,9 +213,7 @@ def __init__(
if verbose:
s = (
"Computing the budget for"
- " time step {} in stress period {}".format(
- kk[0] + 1, kk[1] + 1
- )
+ " time step {} in stress period {}".format(kk[0] + 1, kk[1] + 1)
)
print(s)
self._compute_budget(kstpkper=kk)
@@ -269,10 +255,7 @@ def _compute_budget(self, kstpkper=None, totim=None):
C-----FLOW. STORE CONSTANT-HEAD LOCATIONS IN ICH ARRAY.
"""
chd = self.cbc.get_data(
- text="CONSTANT HEAD",
- full3D=True,
- kstpkper=kstpkper,
- totim=totim,
+ text="CONSTANT HEAD", full3D=True, kstpkper=kstpkper, totim=totim
)[0]
ich[np.ma.where(chd != 0.0)] = 1
if "FLOW RIGHT FACE" in self.record_names:
@@ -304,9 +287,7 @@ def _compute_budget(self, kstpkper=None, totim=None):
return
- def _add_empty_record(
- self, recordarray, recname, kstpkper=None, totim=None
- ):
+ def _add_empty_record(self, recordarray, recname, kstpkper=None, totim=None):
"""
Build an empty records based on the specified flow direction and
record name for the given list of zones.
@@ -366,9 +347,7 @@ def _initialize_budget_recordarray(self, kstpkper=None, totim=None):
("stress_period", "= 2:
- data = self.cbc.get_data(
- text=recname, kstpkper=kstpkper, totim=totim
- )[0]
+ data = self.cbc.get_data(text=recname, kstpkper=kstpkper, totim=totim)[
+ 0
+ ]
# "FLOW RIGHT FACE" COMPUTE FLOW BETWEEN ZONES ACROSS COLUMNS.
# COMPUTE FLOW ONLY BETWEEN A ZONE AND A HIGHER ZONE -- FLOW FROM
@@ -617,9 +577,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
# Get indices with negative flow face values (into higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
@@ -629,9 +587,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzl[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
# FLOW BETWEEN NODE J,I,K AND J+1,I,K
k, i, j = np.asarray(
@@ -656,9 +612,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzr[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
# Get indices with negative flow face values (into higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
@@ -668,9 +622,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.asarray(ich == 1).nonzero()
@@ -685,9 +637,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
idx = np.asarray(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
@@ -695,9 +645,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi[tzi != 0]]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
k, i, j = np.asarray(ich == 1).nonzero()
k, i, j = (
@@ -715,9 +663,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
idx = np.asarray(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
@@ -725,9 +671,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
except Exception as e:
print(e)
@@ -749,9 +693,9 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
"""
try:
if self.nrow >= 2:
- data = self.cbc.get_data(
- text=recname, kstpkper=kstpkper, totim=totim
- )[0]
+ data = self.cbc.get_data(text=recname, kstpkper=kstpkper, totim=totim)[
+ 0
+ ]
# "FLOW FRONT FACE"
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I-1,K
@@ -767,17 +711,13 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
idx = np.asarray(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I+1,K.
k, i, j = np.asarray(
@@ -791,17 +731,13 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
idx = np.asarray(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.asarray(ich == 1).nonzero()
@@ -816,9 +752,7 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
idx = np.asarray(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
@@ -826,9 +760,7 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
k, i, j = np.asarray(ich == 1).nonzero()
k, i, j = (
@@ -846,9 +778,7 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
idx = np.asarray(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
@@ -856,9 +786,7 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
except Exception as e:
print(e)
@@ -881,9 +809,9 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
"""
try:
if self.nlay >= 2:
- data = self.cbc.get_data(
- text=recname, kstpkper=kstpkper, totim=totim
- )[0]
+ data = self.cbc.get_data(text=recname, kstpkper=kstpkper, totim=totim)[
+ 0
+ ]
# "FLOW LOWER FACE"
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K-1
@@ -899,17 +827,13 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
(q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
idx = np.asarray(
(q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K+1
k, i, j = np.asarray(
@@ -923,17 +847,13 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
(q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
idx = np.asarray(
(q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
).nonzero()
fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
- self._update_budget_fromfaceflow(
- fzi, tzi, np.abs(fi), kstpkper, totim
- )
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.asarray(ich == 1).nonzero()
@@ -948,9 +868,7 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
idx = np.asarray(
(q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
@@ -958,9 +876,7 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
k, i, j = np.asarray(ich == 1).nonzero()
k, i, j = (
@@ -978,9 +894,7 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
idx = np.asarray(
(q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
@@ -988,9 +902,7 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
- self._update_budget_fromssst(
- fz, tz, np.abs(f), kstpkper, totim
- )
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
except Exception as e:
print(e)
@@ -1013,12 +925,8 @@ def _accumulate_flow_ssst(self, recname, kstpkper, totim):
if imeth == 2 or imeth == 5:
# LIST
- qin = np.ma.zeros(
- (self.nlay * self.nrow * self.ncol), self.float_type
- )
- qout = np.ma.zeros(
- (self.nlay * self.nrow * self.ncol), self.float_type
- )
+ qin = np.ma.zeros((self.nlay * self.nrow * self.ncol), self.float_type)
+ qout = np.ma.zeros((self.nlay * self.nrow * self.ncol), self.float_type)
for [node, q] in zip(data["node"], data["q"]):
idx = node - 1
if q > 0:
@@ -1053,9 +961,7 @@ def _accumulate_flow_ssst(self, recname, kstpkper, totim):
qout[0, r, c] = data[r, c]
else:
# Should not happen
- raise Exception(
- f'Unrecognized "imeth" for {recname} record: {imeth}'
- )
+ raise Exception(f'Unrecognized "imeth" for {recname} record: {imeth}')
# Inflows
fz = []
@@ -1111,13 +1017,9 @@ def _compute_mass_balance(self, kstpkper, totim):
(self._budget["totim"] == totim)
& np.in1d(self._budget["name"], innames)
).nonzero()
- a = _numpyvoid2numeric(
- self._budget[list(self._zonenamedict.values())][rowidx]
- )
+ a = _numpyvoid2numeric(self._budget[list(self._zonenamedict.values())][rowidx])
intot = np.array(a.sum(axis=0))
- tz = np.array(
- list([n for n in self._budget.dtype.names if n not in skipcols])
- )
+ tz = np.array([n for n in self._budget.dtype.names if n not in skipcols])
fz = np.array(["TOTAL_IN"] * len(tz))
self._update_budget_fromssst(fz, tz, intot, kstpkper, totim)
@@ -1133,28 +1035,20 @@ def _compute_mass_balance(self, kstpkper, totim):
(self._budget["totim"] == totim)
& np.in1d(self._budget["name"], outnames)
).nonzero()
- a = _numpyvoid2numeric(
- self._budget[list(self._zonenamedict.values())][rowidx]
- )
+ a = _numpyvoid2numeric(self._budget[list(self._zonenamedict.values())][rowidx])
outot = np.array(a.sum(axis=0))
- tz = np.array(
- list([n for n in self._budget.dtype.names if n not in skipcols])
- )
+ tz = np.array([n for n in self._budget.dtype.names if n not in skipcols])
fz = np.array(["TOTAL_OUT"] * len(tz))
self._update_budget_fromssst(fz, tz, outot, kstpkper, totim)
# Compute IN-OUT
- tz = np.array(
- list([n for n in self._budget.dtype.names if n not in skipcols])
- )
+ tz = np.array([n for n in self._budget.dtype.names if n not in skipcols])
f = intot - outot
fz = np.array(["IN-OUT"] * len(tz))
self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
# Compute percent discrepancy
- tz = np.array(
- list([n for n in self._budget.dtype.names if n not in skipcols])
- )
+ tz = np.array([n for n in self._budget.dtype.names if n not in skipcols])
fz = np.array(["PERCENT_DISCREPANCY"] * len(tz))
in_minus_out = intot - outot
in_plus_out = intot + outot
@@ -1233,9 +1127,7 @@ def get_budget(self, names=None, zones=None, net=False, pivot=False):
return recarray
- def get_volumetric_budget(
- self, modeltime, recarray=None, extrapolate_kper=False
- ):
+ def get_volumetric_budget(self, modeltime, recarray=None, extrapolate_kper=False):
"""
Method to generate a volumetric budget table based on flux information
@@ -1288,10 +1180,7 @@ def to_csv(self, fname):
f.write(",".join(self._budget.dtype.names) + "\n")
# Write rows
for rowidx in range(self._budget.shape[0]):
- s = (
- ",".join([str(i) for i in list(self._budget[:][rowidx])])
- + "\n"
- )
+ s = ",".join([str(i) for i in list(self._budget[:][rowidx])]) + "\n"
f.write(s)
return
@@ -1632,10 +1521,7 @@ def write_zone_file(cls, fname, array, fmtin=None, iprn=None):
end = start + fmtin
vals = rowvals[start:end]
while len(vals) > 0:
- s = (
- "".join([formatter(int(val)) for val in vals])
- + "\n"
- )
+ s = "".join([formatter(int(val)) for val in vals]) + "\n"
f.write(s)
start = end
end = start + fmtin
@@ -1644,10 +1530,7 @@ def write_zone_file(cls, fname, array, fmtin=None, iprn=None):
elif fmtin == ncol:
for row in range(nrow):
vals = array[lay, row, :].ravel()
- f.write(
- "".join([formatter(int(val)) for val in vals])
- + "\n"
- )
+ f.write("".join([formatter(int(val)) for val in vals]) + "\n")
def copy(self):
"""
@@ -1679,8 +1562,7 @@ def export(self, f, ml, **kwargs):
if isinstance(f, str):
if not f.endswith(".nc"):
raise AssertionError(
- "File extension must end with .nc to "
- "export a netcdf file"
+ "File extension must end with .nc to export a netcdf file"
)
zbncfobj = dataframe_to_netcdf_fmt(
@@ -1711,7 +1593,7 @@ def __deepcopy__(self, memo):
def __mul__(self, other):
newbud = self._budget.copy()
for f in self._zonenamedict.values():
- newbud[f] = np.array([r for r in newbud[f]]) * other
+ newbud[f] = np.array(list(newbud[f])) * other
idx = np.isin(self._budget["name"], "PERCENT_DISCREPANCY")
newbud[:][idx] = self._budget[:][idx]
newobj = self.copy()
@@ -1721,7 +1603,7 @@ def __mul__(self, other):
def __truediv__(self, other):
newbud = self._budget.copy()
for f in self._zonenamedict.values():
- newbud[f] = np.array([r for r in newbud[f]]) / float(other)
+ newbud[f] = np.array(list(newbud[f])) / float(other)
idx = np.isin(self._budget["name"], "PERCENT_DISCREPANCY")
newbud[:][idx] = self._budget[:][idx]
newobj = self.copy()
@@ -1731,7 +1613,7 @@ def __truediv__(self, other):
def __div__(self, other):
newbud = self._budget.copy()
for f in self._zonenamedict.values():
- newbud[f] = np.array([r for r in newbud[f]]) / float(other)
+ newbud[f] = np.array(list(newbud[f])) / float(other)
idx = np.isin(self._budget["name"], "PERCENT_DISCREPANCY")
newbud[:][idx] = self._budget[:][idx]
newobj = self.copy()
@@ -1741,7 +1623,7 @@ def __div__(self, other):
def __add__(self, other):
newbud = self._budget.copy()
for f in self._zonenamedict.values():
- newbud[f] = np.array([r for r in newbud[f]]) + other
+ newbud[f] = np.array(list(newbud[f])) + other
idx = np.isin(self._budget["name"], "PERCENT_DISCREPANCY")
newbud[:][idx] = self._budget[:][idx]
newobj = self.copy()
@@ -1751,7 +1633,7 @@ def __add__(self, other):
def __sub__(self, other):
newbud = self._budget.copy()
for f in self._zonenamedict.values():
- newbud[f] = np.array([r for r in newbud[f]]) - other
+ newbud[f] = np.array(list(newbud[f])) - other
idx = np.isin(self._budget["name"], "PERCENT_DISCREPANCY")
newbud[:][idx] = self._budget[:][idx]
newobj = self.copy()
@@ -1834,9 +1716,7 @@ def run_model(self, exe_name=None, nam_file=None, silent=False):
exe_name = self._exe_name
if nam_file is None:
nam_file = os.path.join(self._name + self._extension)
- return run_model(
- exe_name, nam_file, model_ws=self._model_ws, silent=silent
- )
+ return run_model(exe_name, nam_file, model_ws=self._model_ws, silent=silent)
def __setattr__(self, key, value):
if key in ("zon", "bud", "grb", "cbc"):
@@ -1870,9 +1750,7 @@ def add_package(self, pkg_name, pkg):
if pkg_name == "cbc":
pkg_name = "bud"
else:
- raise KeyError(
- f"{pkg_name} package is not valid for zonebudget"
- )
+ raise KeyError(f"{pkg_name} package is not valid for zonebudget")
if isinstance(pkg, str):
if os.path.exists(os.path.join(self._model_ws, pkg)):
@@ -1965,9 +1843,7 @@ def get_dataframes(
>>> df = zb6.get_dataframes()
"""
- recarray = self.get_budget(
- names=names, zones=zones, net=net, pivot=pivot
- )
+ recarray = self.get_budget(names=names, zones=zones, net=net, pivot=pivot)
return _recarray_to_dataframe(
recarray,
@@ -1979,9 +1855,7 @@ def get_dataframes(
pivot=pivot,
)
- def get_budget(
- self, f=None, names=None, zones=None, net=False, pivot=False
- ):
+ def get_budget(self, f=None, names=None, zones=None, net=False, pivot=False):
"""
Method to read and get zonebudget output
@@ -2009,15 +1883,11 @@ def get_budget(
if f is None and self._recarray is None:
f = os.path.join(self._model_ws, f"{self._name}.csv")
- self._recarray = _read_zb_csv2(
- f, add_prefix=False, aliases=aliases
- )
+ self._recarray = _read_zb_csv2(f, add_prefix=False, aliases=aliases)
elif f is None:
pass
else:
- self._recarray = _read_zb_csv2(
- f, add_prefix=False, aliases=aliases
- )
+ self._recarray = _read_zb_csv2(f, add_prefix=False, aliases=aliases)
recarray = _get_budget(
self._recarray,
@@ -2032,9 +1902,7 @@ def get_budget(
return recarray
- def get_volumetric_budget(
- self, modeltime, recarray=None, extrapolate_kper=False
- ):
+ def get_volumetric_budget(self, modeltime, recarray=None, extrapolate_kper=False):
"""
Method to generate a volumetric budget table based on flux information
@@ -2147,8 +2015,7 @@ def export(self, f, ml, **kwargs):
f = str(f)
if not f.endswith(".nc"):
raise AssertionError(
- "File extension must end with .nc to "
- "export a netcdf file"
+ "File extension must end with .nc to export a netcdf file"
)
zbncfobj = dataframe_to_netcdf_fmt(
@@ -2229,8 +2096,7 @@ def write_input(self, f=None, line_length=20):
with open(f, "w") as foo:
bfmt = [" {:d}"]
foo.write(
- f"BEGIN DIMENSIONS\n NCELLS {self.ncells}\n"
- "END DIMENSIONS\n\n"
+ f"BEGIN DIMENSIONS\n NCELLS {self.ncells}\nEND DIMENSIONS\n\n"
)
foo.write("BEGIN GRIDDATA\n IZONE\n")
@@ -2289,7 +2155,7 @@ def load(f: Union[str, os.PathLike], model):
if method == "open/close":
fobj = open(os.path.join(pkg_ws, t[1]))
while i < ncells:
- t = multi_line_strip(fobj)
+ t = multi_line_strip(fobj).split()
if t[0] == "open/close":
if fobj != foo:
fobj.close()
@@ -2393,17 +2259,13 @@ def _recarray_to_dataframe(
elif timeunit.upper() == "YEARS":
timeunit = "Y"
- errmsg = (
- f"Specified time units ({timeunit}) not recognized. Please use one of "
- )
+ errmsg = f"Specified time units ({timeunit}) not recognized. Please use one of "
assert timeunit in valid_timeunit, errmsg + ", ".join(valid_timeunit) + "."
df = pd.DataFrame().from_records(recarray)
if start_datetime is not None and "totim" in list(df):
totim = totim_to_datetime(
- df.totim,
- start=pd.to_datetime(start_datetime),
- timeunit=timeunit,
+ df.totim, start=pd.to_datetime(start_datetime), timeunit=timeunit
)
df["datetime"] = totim
if pivot:
@@ -2565,12 +2427,8 @@ def _compute_net_budget(recarray, zonenamedict):
:return:
"""
recnames = _get_record_names(recarray)
- innames = [
- n for n in recnames if n.startswith("FROM_") or n.endswith("_IN")
- ]
- outnames = [
- n for n in recnames if n.startswith("TO_") or n.endswith("_OUT")
- ]
+ innames = [n for n in recnames if n.startswith("FROM_") or n.endswith("_IN")]
+ outnames = [n for n in recnames if n.startswith("TO_") or n.endswith("_OUT")]
select_fields = ["totim", "time_step", "stress_period", "name"] + list(
zonenamedict.values()
)
@@ -2583,9 +2441,7 @@ def _compute_net_budget(recarray, zonenamedict):
out_budget = recarray[select_fields][select_records_out]
net_budget = in_budget.copy()
for f in [n for n in zonenamedict.values() if n in select_fields]:
- net_budget[f] = np.array([r for r in in_budget[f]]) - np.array(
- [r for r in out_budget[f]]
- )
+ net_budget[f] = np.array(list(in_budget[f])) - np.array(list(out_budget[f]))
newnames = []
for n in net_budget["name"]:
if n.endswith("_IN") or n.endswith("_OUT"):
@@ -2944,8 +2800,7 @@ def _pivot_recarray(recarray):
n = 0
for kstp, kper in kstp_kper:
idxs = np.asarray(
- (recarray["time_step"] == kstp)
- & (recarray["stress_period"] == kper)
+ (recarray["time_step"] == kstp) & (recarray["stress_period"] == kper)
).nonzero()
if len(idxs) == 0:
pass
@@ -3110,13 +2965,7 @@ def dataframe_to_netcdf_fmt(df, zone_array, flux=True):
data[col] = np.zeros((totim.size, zones.size), dtype=float)
for i, time in enumerate(totim):
- tdf = df.loc[
- df.totim.isin(
- [
- time,
- ]
- )
- ]
+ tdf = df.loc[df.totim.isin([time])]
tdf = tdf.sort_values(by=["zone"])
for col in df.columns:
diff --git a/flopy/version.py b/flopy/version.py
index cd0fda2a16..8763a751ec 100644
--- a/flopy/version.py
+++ b/flopy/version.py
@@ -1,4 +1,4 @@
# flopy version file automatically created using
-# update_version.py on October 03, 2024 12:12:12
+# update_version.py on December 20, 2024 01:37:30
-__version__ = "3.8.2"
+__version__ = "3.9.0"
diff --git a/pyproject.toml b/pyproject.toml
index f18d5cd164..ade0dbaf16 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -20,14 +20,14 @@ classifiers = [
"Intended Audience :: Science/Research",
"License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
"Programming Language :: Python :: 3 :: Only",
- "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
"Topic :: Scientific/Engineering :: Hydrology",
]
-requires-python = ">=3.8"
+requires-python = ">=3.9"
dependencies = [
"numpy>=1.20.3",
"matplotlib >=1.4.0",
@@ -36,14 +36,11 @@ dependencies = [
dynamic = ["version", "readme"]
[project.optional-dependencies]
-dev = ["flopy[lint,test,optional,doc]"]
-lint = [
- "cffconvert",
- "ruff"
-]
+dev = ["flopy[lint,test,optional,doc]", "tach"]
+lint = ["cffconvert", "codespell[toml] >=2.2.2", "ruff"]
test = [
"flopy[lint]",
- "coverage",
+ "coverage !=7.6.5",
"flaky",
"filelock",
"jupyter",
@@ -65,8 +62,10 @@ optional = [
"fiona",
"geojson",
"geopandas",
+ "GitPython",
"imageio",
"netcdf4",
+ "pooch",
"pymetis ; platform_system != 'Windows'",
"pyproj",
"pyshp",
@@ -74,7 +73,7 @@ optional = [
"rasterio",
"rasterstats",
"scipy",
- "shapely >=1.8",
+ "shapely >=2.0",
"vtk",
"xmipy",
]
@@ -116,8 +115,7 @@ include = ["flopy", "flopy.*"]
"flopy.plot" = ["mplstyle/*.mplstyle"]
[tool.ruff]
-line-length = 79
-target-version = "py38"
+line-length = 88
include = [
"pyproject.toml",
"flopy/**/*.py",
@@ -130,16 +128,23 @@ extend-include = [
"examples/**/*.ipynb"
]
+[tool.ruff.format]
+exclude = [
+ "flopy/mf6/**/*.py",
+]
+
[tool.ruff.lint]
select = [
+ "C4", # flake8 comprehensions
"D409", # pydocstyle - section-underline-matches-section-length
"E", # pycodestyle error
"F", # Pyflakes
"I001", # isort - unsorted-imports
+ # "ISC001", # implicitly concatenated string literals
+ "RUF", # Ruff-specific rules
]
ignore = [
"E402", # module level import not at top of file
- "E501", # line too long TODO FIXME
"E712", # Avoid equality comparisons to `True`
"E722", # do not use bare `except`
"E721", # use `is`/`is not` for type comparisons
@@ -148,6 +153,37 @@ ignore = [
"F403", # unable to detect undefined names (star imports)
"F524", # `.format` missing argument(s) for placeholder(s)
"F811", # Redefinition of unused variable
- "F821", # undefined name TODO FIXME
"F841", # local variable assigned but never used
-]
\ No newline at end of file
+ "RUF005", # collection literal concatenation
+ "RUF012", # mutable class default
+ "RUF017", # quadratic-list-summation
+]
+
+[tool.ruff.lint.per-file-ignores]
+".docs/**/*.py" = ["E501"]
+"flopy/mf6/**/*.py" = ["C4", "E", "F", "ISC", "RUF"]
+
+[tool.codespell]
+skip = "cliff.toml,./examples/data/*"
+ignore-words-list = [
+ "alltime",
+ "dum",
+ "inout",
+ "intot",
+ "delt",
+ "gage",
+ "gages",
+ "datbase",
+ "wel",
+ "nam",
+ "lke",
+ "ist",
+ "ninj",
+ "drob",
+ "thck",
+ "vor",
+ "yur",
+ "localy",
+ "vertx",
+ "nd",
+]
diff --git a/scripts/process_benchmarks.py b/scripts/process_benchmarks.py
index b934c5b336..c781753334 100644
--- a/scripts/process_benchmarks.py
+++ b/scripts/process_benchmarks.py
@@ -19,7 +19,7 @@
def get_benchmarks(paths):
- benchmarks = list()
+ benchmarks = []
num_benchmarks = 0
for path in paths:
@@ -34,10 +34,7 @@ def get_benchmarks(paths):
for benchmark in bmarks:
num_benchmarks += 1
fullname = benchmark["fullname"]
- included = [
- "min",
- "mean",
- ]
+ included = ["min", "mean"]
for stat, value in benchmark["stats"].items():
if stat not in included:
continue
@@ -73,9 +70,7 @@ def matplotlib_plot(stats):
# markers according to system
systems = np.unique(benchmarks_df["system"])
markers = dict(zip(systems, ["x", "o", "s"])) # osx, linux, windows
- benchmarks_df["marker"] = benchmarks_df["system"].apply(
- lambda x: markers[x]
- )
+ benchmarks_df["marker"] = benchmarks_df["system"].apply(lambda x: markers[x])
for i, (stat_name, stat_group) in enumerate(stats):
stat_df = pd.DataFrame(stat_group)
@@ -91,15 +86,8 @@ def matplotlib_plot(stats):
for pi, python in enumerate(pythons):
psub = ssub[ssub["python"] == python]
color = colors[python]
- ax.scatter(
- psub["time"], psub["value"], color=color, marker=marker
- )
- ax.plot(
- psub["time"],
- psub["value"],
- linestyle="dotted",
- color=color,
- )
+ ax.scatter(psub["time"], psub["value"], color=color, marker=marker)
+ ax.plot(psub["time"], psub["value"], linestyle="dotted", color=color)
# configure legend
patches = []
diff --git a/scripts/update_version.py b/scripts/update_version.py
index 619a9e133f..c30164efab 100644
--- a/scripts/update_version.py
+++ b/scripts/update_version.py
@@ -146,11 +146,7 @@ def update_citation_cff(timestamp: datetime, version: Version):
# write CITATION.cff
with open(fpth, "w") as f:
yaml.safe_dump(
- citation,
- f,
- allow_unicode=True,
- default_flow_style=False,
- sort_keys=False,
+ citation, f, allow_unicode=True, default_flow_style=False, sort_keys=False
)
print(f"Updated {fpth} to version {version}")
@@ -226,7 +222,5 @@ def update_version(
else:
update_version(
timestamp=datetime.now(),
- version=(
- Version(args.version) if args.version else _current_version
- ),
+ version=(Version(args.version) if args.version else _current_version),
)
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 1dc2c579d6..0000000000
--- a/setup.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from setuptools import setup
-
-# See pyproject.toml for project metadata
-setup(name="flopy")
diff --git a/tach.toml b/tach.toml
new file mode 100644
index 0000000000..10330b5a2e
--- /dev/null
+++ b/tach.toml
@@ -0,0 +1,694 @@
+exclude = [
+ ".*__pycache__",
+ ".*egg-info",
+ "autotest",
+ "docs",
+]
+source_roots = [
+ ".",
+]
+root_module = "ignore"
+
+[[modules]]
+path = "flopy.datbase"
+depends_on = []
+
+[[modules]]
+path = "flopy.discretization.grid"
+depends_on = [
+ { path = "flopy.export.shapefile_utils" },
+ { path = "flopy.utils.crs" },
+ { path = "flopy.utils.geometry" },
+ { path = "flopy.utils.geospatial_utils" },
+ { path = "flopy.utils.gridutil" },
+]
+
+[[modules]]
+path = "flopy.discretization.modeltime"
+depends_on = []
+
+[[modules]]
+path = "flopy.discretization.structuredgrid"
+depends_on = [
+ { path = "flopy.discretization.grid" },
+ { path = "flopy.mf6.utils.binarygrid_util" },
+]
+
+[[modules]]
+path = "flopy.discretization.unstructuredgrid"
+depends_on = [
+ { path = "flopy.discretization.grid" },
+ { path = "flopy.mf6.utils.binarygrid_util" },
+ { path = "flopy.utils.geometry" },
+ { path = "flopy.utils.gridgen" },
+]
+
+[[modules]]
+path = "flopy.discretization.vertexgrid"
+depends_on = [
+ { path = "flopy.discretization.grid" },
+ { path = "flopy.mf6.utils.binarygrid_util" },
+ { path = "flopy.utils.geometry" },
+]
+
+[[modules]]
+path = "flopy.export.longnames"
+depends_on = []
+
+[[modules]]
+path = "flopy.export.metadata"
+depends_on = [
+ { path = "flopy.utils.flopy_io" },
+]
+
+[[modules]]
+path = "flopy.export.netcdf"
+depends_on = [
+ { path = "flopy.export.longnames" },
+ { path = "flopy.export.metadata" },
+ { path = "flopy.utils.crs" },
+ { path = "flopy.utils.parse_version" },
+]
+
+[[modules]]
+path = "flopy.export.shapefile_utils"
+depends_on = [
+ { path = "flopy.datbase" },
+ { path = "flopy.discretization.grid" },
+ { path = "flopy.utils.crs" },
+ { path = "flopy.utils.flopy_io" },
+ { path = "flopy.utils.geospatial_utils" },
+]
+
+[[modules]]
+path = "flopy.export.unitsformat"
+depends_on = []
+
+[[modules]]
+path = "flopy.export.utils"
+depends_on = [
+ { path = "flopy.datbase" },
+ { path = "flopy.export.longnames" },
+ { path = "flopy.export.netcdf" },
+ { path = "flopy.export.shapefile_utils" },
+ { path = "flopy.export.unitsformat" },
+ { path = "flopy.export.vtk" },
+ { path = "flopy.mbase" },
+ { path = "flopy.pakbase" },
+ { path = "flopy.utils.crs" },
+ { path = "flopy.utils.flopy_io" },
+ { path = "flopy.utils.geometry" },
+]
+
+[[modules]]
+path = "flopy.export.vtk"
+depends_on = [
+ { path = "flopy.datbase" },
+]
+
+[[modules]]
+path = "flopy.mbase"
+depends_on = [
+ { path = "flopy.discretization.grid" },
+ { path = "flopy.export.utils" },
+ { path = "flopy.utils.flopy_io" },
+]
+
+[[modules]]
+path = "flopy.mf6.coordinates.modeldimensions"
+depends_on = [
+ { path = "flopy.mf6.coordinates.modelgrid" },
+ { path = "flopy.mf6.coordinates.simulationtime" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.mf6.utils.mfenums" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.coordinates.modelgrid"
+depends_on = [
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.utils.mfenums" },
+]
+
+[[modules]]
+path = "flopy.mf6.coordinates.simulationtime"
+depends_on = [
+ { path = "flopy.mf6.mfbase" },
+]
+
+[[modules]]
+path = "flopy.mf6.data.mfdata"
+depends_on = [
+ { path = "flopy.datbase" },
+ { path = "flopy.export.utils" },
+ { path = "flopy.mbase" },
+ { path = "flopy.mf6.coordinates.modeldimensions" },
+ { path = "flopy.mf6.data.mfdatastorage" },
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.data.mfdataarray"
+depends_on = [
+ { path = "flopy.datbase" },
+ { path = "flopy.mf6.data.mfdata" },
+ { path = "flopy.mf6.data.mfdatastorage" },
+ { path = "flopy.mf6.data.mffileaccess" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.mf6.utils.mfenums" },
+ { path = "flopy.plot.plotutil" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.data.mfdatalist"
+depends_on = [
+ { path = "flopy.datbase" },
+ { path = "flopy.mbase" },
+ { path = "flopy.mf6.data.mfdata" },
+ { path = "flopy.mf6.data.mfdatastorage" },
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.data.mffileaccess" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.mf6.utils.mfenums" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.data.mfdataplist"
+depends_on = [
+ { path = "flopy.datbase" },
+ { path = "flopy.discretization.structuredgrid" },
+ { path = "flopy.discretization.unstructuredgrid" },
+ { path = "flopy.discretization.vertexgrid" },
+ { path = "flopy.mf6.data.mfdata" },
+ { path = "flopy.mf6.data.mfdatalist" },
+ { path = "flopy.mf6.data.mfdatastorage" },
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.data.mffileaccess" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.mf6.utils.mfenums" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.data.mfdatascalar"
+depends_on = [
+ { path = "flopy.datbase" },
+ { path = "flopy.mf6.data.mfdata" },
+ { path = "flopy.mf6.data.mfdatastorage" },
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.data.mffileaccess" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.plot.plotutil" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.data.mfdatastorage"
+depends_on = [
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.data.mffileaccess" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.data.mfdatautil"
+depends_on = [
+ { path = "flopy.mf6.coordinates.modeldimensions" },
+ { path = "flopy.mf6.data.mfdatastorage" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.data.mffileaccess"
+depends_on = [
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.utils.binaryfile" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.data.mfstructure"
+depends_on = [
+ { path = "flopy.mf6.mfbase" },
+]
+
+[[modules]]
+path = "flopy.mf6.mfbase"
+depends_on = []
+
+[[modules]]
+path = "flopy.mf6.mfmodel"
+depends_on = [
+ { path = "flopy.discretization.grid" },
+ { path = "flopy.discretization.modeltime" },
+ { path = "flopy.discretization.structuredgrid" },
+ { path = "flopy.discretization.unstructuredgrid" },
+ { path = "flopy.discretization.vertexgrid" },
+ { path = "flopy.export.utils" },
+ { path = "flopy.mbase" },
+ { path = "flopy.mf6.coordinates.modeldimensions" },
+ { path = "flopy.mf6.data.mfdata" },
+ { path = "flopy.mf6.data.mfdatalist" },
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.mf6.mfpackage" },
+ { path = "flopy.mf6.utils.mfenums" },
+ { path = "flopy.mf6.utils.output_util" },
+ { path = "flopy.plot.plotutil" },
+ { path = "flopy.utils.check" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.mfpackage"
+depends_on = [
+ { path = "flopy.mbase" },
+ { path = "flopy.mf6.coordinates.modeldimensions" },
+ { path = "flopy.mf6.data.mfdata" },
+ { path = "flopy.mf6.data.mfdataarray" },
+ { path = "flopy.mf6.data.mfdatalist" },
+ { path = "flopy.mf6.data.mfdataplist" },
+ { path = "flopy.mf6.data.mfdatascalar" },
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.mf6.utils.output_util" },
+ { path = "flopy.pakbase" },
+ { path = "flopy.plot.plotutil" },
+ { path = "flopy.utils.check" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.mfsimbase"
+depends_on = [
+ { path = "flopy.mbase" },
+ { path = "flopy.mf6.data.mfdata" },
+ { path = "flopy.mf6.data.mfdatalist" },
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.mf6.mfmodel" },
+ { path = "flopy.mf6.mfpackage" },
+ { path = "flopy.mf6.utils.binaryfile_utils" },
+ { path = "flopy.mf6.utils.mfobservation" },
+]
+
+[[modules]]
+path = "flopy.mf6.modflow.mfgwe"
+depends_on = [
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.mfmodel" },
+]
+
+[[modules]]
+path = "flopy.mf6.modflow.mfgwedisu"
+depends_on = [
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.mfpackage" },
+]
+
+[[modules]]
+path = "flopy.mf6.modflow.mfsimulation"
+depends_on = [
+ { path = "flopy.mf6.mfsimbase" },
+]
+
+[[modules]]
+path = "flopy.mf6.utils.binaryfile_utils"
+depends_on = [
+ { path = "flopy.utils.binaryfile" },
+]
+
+[[modules]]
+path = "flopy.mf6.utils.binarygrid_util"
+depends_on = [
+ { path = "flopy.discretization.structuredgrid" },
+ { path = "flopy.discretization.unstructuredgrid" },
+ { path = "flopy.discretization.vertexgrid" },
+ { path = "flopy.utils.utils_def" },
+]
+
+[[modules]]
+path = "flopy.mf6.utils.codegen"
+depends_on = []
+
+[[modules]]
+path = "flopy.mf6.utils.createpackages"
+depends_on = [
+ { path = "flopy.mf6.data.mfdatautil" },
+ { path = "flopy.mf6.data.mfstructure" },
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.mf6.utils.generate_classes"
+depends_on = [
+ { path = "flopy.mf6.utils.createpackages" },
+]
+
+[[modules]]
+path = "flopy.mf6.utils.lakpak_utils"
+depends_on = []
+
+[[modules]]
+path = "flopy.mf6.utils.mfenums"
+depends_on = []
+
+[[modules]]
+path = "flopy.mf6.utils.mfobservation"
+depends_on = []
+
+[[modules]]
+path = "flopy.mf6.utils.mfsimlistfile"
+depends_on = []
+
+[[modules]]
+path = "flopy.mf6.utils.model_splitter"
+depends_on = [
+ { path = "flopy.mf6.data.mfdataarray" },
+ { path = "flopy.mf6.data.mfdatalist" },
+ { path = "flopy.mf6.data.mfdataplist" },
+ { path = "flopy.mf6.data.mfdatascalar" },
+ { path = "flopy.mf6.mfbase" },
+ { path = "flopy.plot.plotutil" },
+]
+
+[[modules]]
+path = "flopy.mf6.utils.output_util"
+depends_on = [
+ { path = "flopy.mbase" },
+ { path = "flopy.pakbase" },
+ { path = "flopy.utils.observationfile" },
+]
+
+[[modules]]
+path = "flopy.mf6.utils.postprocessing"
+depends_on = [
+ { path = "flopy.mf6.utils.binarygrid_util" },
+]
+
+[[modules]]
+path = "flopy.mf6.utils.reference"
+depends_on = []
+
+[[modules]]
+path = "flopy.mf6.utils.testutils"
+depends_on = [
+ { path = "flopy.utils.datautil" },
+]
+
+[[modules]]
+path = "flopy.pakbase"
+depends_on = [
+ { path = "flopy.utils.check" },
+ { path = "flopy.utils.flopy_io" },
+]
+
+[[modules]]
+path = "flopy.plot.crosssection"
+depends_on = [
+ { path = "flopy.plot.plotutil" },
+ { path = "flopy.utils.geometry" },
+ { path = "flopy.utils.geospatial_utils" },
+]
+
+[[modules]]
+path = "flopy.plot.map"
+depends_on = [
+ { path = "flopy.plot.plotutil" },
+ { path = "flopy.utils.geometry" },
+]
+
+[[modules]]
+path = "flopy.plot.mplstyle"
+depends_on = []
+
+[[modules]]
+path = "flopy.plot.plotutil"
+depends_on = [
+ { path = "flopy.datbase" },
+ { path = "flopy.plot.map" },
+ { path = "flopy.utils.geometry" },
+ { path = "flopy.utils.geospatial_utils" },
+ { path = "flopy.utils.particletrackfile" },
+]
+
+[[modules]]
+path = "flopy.plot.styles"
+depends_on = []
+
+[[modules]]
+path = "flopy.utils.binaryfile"
+depends_on = [
+ { path = "flopy.utils.datafile" },
+ { path = "flopy.utils.gridutil" },
+]
+
+[[modules]]
+path = "flopy.utils.check"
+depends_on = [
+ { path = "flopy.utils.flopy_io" },
+ { path = "flopy.utils.recarray_utils" },
+ { path = "flopy.utils.util_array" },
+]
+
+[[modules]]
+path = "flopy.utils.compare"
+depends_on = [
+ { path = "flopy.utils.mfreadnam" },
+]
+
+[[modules]]
+path = "flopy.utils.crs"
+depends_on = []
+
+[[modules]]
+path = "flopy.utils.cvfdutil"
+depends_on = [
+ { path = "flopy.utils.utl_import" },
+]
+
+[[modules]]
+path = "flopy.utils.datafile"
+depends_on = [
+ { path = "flopy.discretization.structuredgrid" },
+ { path = "flopy.export.shapefile_utils" },
+ { path = "flopy.plot.plotutil" },
+]
+
+[[modules]]
+path = "flopy.utils.datautil"
+depends_on = []
+
+[[modules]]
+path = "flopy.utils.flopy_io"
+depends_on = [
+ { path = "flopy.utils.util_list" },
+]
+
+[[modules]]
+path = "flopy.utils.formattedfile"
+depends_on = [
+ { path = "flopy.utils.datafile" },
+]
+
+[[modules]]
+path = "flopy.utils.geometry"
+depends_on = [
+ { path = "flopy.utils.geospatial_utils" },
+]
+
+[[modules]]
+path = "flopy.utils.geospatial_utils"
+depends_on = [
+ { path = "flopy.utils.geometry" },
+]
+
+[[modules]]
+path = "flopy.utils.get_modflow"
+depends_on = []
+
+[[modules]]
+path = "flopy.utils.gridgen"
+depends_on = [
+ { path = "flopy.export.shapefile_utils" },
+ { path = "flopy.mbase" },
+ { path = "flopy.utils.cvfdutil" },
+ { path = "flopy.utils.flopy_io" },
+ { path = "flopy.utils.geospatial_utils" },
+ { path = "flopy.utils.util_array" },
+]
+
+[[modules]]
+path = "flopy.utils.gridintersect"
+depends_on = [
+ { path = "flopy.utils.geometry" },
+ { path = "flopy.utils.geospatial_utils" },
+ { path = "flopy.utils.utl_import" },
+]
+
+[[modules]]
+path = "flopy.utils.gridutil"
+depends_on = [
+ { path = "flopy.utils.cvfdutil" },
+]
+
+[[modules]]
+path = "flopy.utils.lgrutil"
+depends_on = [
+ { path = "flopy.utils.cvfdutil" },
+ { path = "flopy.utils.util_array" },
+]
+
+[[modules]]
+path = "flopy.utils.mflistfile"
+depends_on = [
+ { path = "flopy.utils.flopy_io" },
+ { path = "flopy.utils.observationfile" },
+ { path = "flopy.utils.utils_def" },
+]
+
+[[modules]]
+path = "flopy.utils.mfreadnam"
+depends_on = []
+
+[[modules]]
+path = "flopy.utils.modpathfile"
+depends_on = [
+ { path = "flopy.export.shapefile_utils" },
+ { path = "flopy.utils.flopy_io" },
+ { path = "flopy.utils.geometry" },
+ { path = "flopy.utils.particletrackfile" },
+]
+
+[[modules]]
+path = "flopy.utils.mtlistfile"
+depends_on = []
+
+[[modules]]
+path = "flopy.utils.observationfile"
+depends_on = [
+ { path = "flopy.utils.flopy_io" },
+ { path = "flopy.utils.utils_def" },
+]
+
+[[modules]]
+path = "flopy.utils.optionblock"
+depends_on = [
+ { path = "flopy.utils.flopy_io" },
+]
+
+[[modules]]
+path = "flopy.utils.parse_version"
+depends_on = []
+
+[[modules]]
+path = "flopy.utils.particletrackfile"
+depends_on = [
+ { path = "flopy.export.shapefile_utils" },
+ { path = "flopy.utils.geometry" },
+]
+
+[[modules]]
+path = "flopy.utils.postprocessing"
+depends_on = [
+ { path = "flopy.utils.binaryfile" },
+ { path = "flopy.utils.formattedfile" },
+]
+
+[[modules]]
+path = "flopy.utils.rasters"
+depends_on = [
+ { path = "flopy.utils.geometry" },
+ { path = "flopy.utils.geospatial_utils" },
+ { path = "flopy.utils.utl_import" },
+]
+
+[[modules]]
+path = "flopy.utils.recarray_utils"
+depends_on = []
+
+[[modules]]
+path = "flopy.utils.reference"
+depends_on = []
+
+[[modules]]
+path = "flopy.utils.sfroutputfile"
+depends_on = []
+
+[[modules]]
+path = "flopy.utils.swroutputfile"
+depends_on = [
+ { path = "flopy.utils.utils_def" },
+]
+
+[[modules]]
+path = "flopy.utils.triangle"
+depends_on = [
+ { path = "flopy.mbase" },
+ { path = "flopy.utils.cvfdutil" },
+ { path = "flopy.utils.geospatial_utils" },
+]
+
+[[modules]]
+path = "flopy.utils.util_array"
+depends_on = [
+ { path = "flopy.datbase" },
+ { path = "flopy.utils.binaryfile" },
+ { path = "flopy.utils.flopy_io" },
+]
+
+[[modules]]
+path = "flopy.utils.util_list"
+depends_on = [
+ { path = "flopy.datbase" },
+ { path = "flopy.utils.recarray_utils" },
+]
+
+[[modules]]
+path = "flopy.utils.utils_def"
+depends_on = []
+
+[[modules]]
+path = "flopy.utils.utl_import"
+depends_on = [
+ { path = "flopy.utils.parse_version" },
+]
+
+[[modules]]
+path = "flopy.utils.voronoi"
+depends_on = [
+ { path = "flopy.utils.cvfdutil" },
+ { path = "flopy.utils.geometry" },
+ { path = "flopy.utils.triangle" },
+ { path = "flopy.utils.utl_import" },
+]
+
+[[modules]]
+path = "flopy.utils.zonbud"
+depends_on = [
+ { path = "flopy.export.utils" },
+ { path = "flopy.mbase" },
+ { path = "flopy.utils.binaryfile" },
+ { path = "flopy.utils.flopy_io" },
+ { path = "flopy.utils.utils_def" },
+]
diff --git a/version.txt b/version.txt
index 00e897bdae..b72ad011fa 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-3.8.2
\ No newline at end of file
+3.9.0
\ No newline at end of file