diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9b7251a5..d9af02c7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: rev: v0.6.4 hooks: - id: ruff - args: [ --extend-select, I, --fix ] + args: [ --fix ] - id: ruff-format exclude: "tests/data/testensemble-reek001" diff --git a/pyproject.toml b/pyproject.toml index f2b83cdc..69ce4792 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -67,3 +67,42 @@ write_to = "src/fmu/ensemble/version.py" ignore_directives = ["argparse", "automodule"] # This looks like a bug in rstcheck: ignore_messages = "Hyperlink target .* is not referenced" + +[tool.ruff] +src = ["src"] +line-length = 88 + +[tool.ruff.lint] +select = [ + "W", # pycodestyle + "I", # isort + "B", # flake-8-bugbear + "SIM", # flake-8-simplify + "F", # pyflakes + "PL", # pylint + "NPY", # numpy specific rules + "C4", # flake8-comprehensions +] +ignore = ["PLW2901", # redefined-loop-name + "PLR2004", # magic-value-comparison + "PLR0915", # too-many-statements + "PLR0912", # too-many-branches + "PLR0911", # too-many-return-statements + "PLC2701", # import-private-name + "PLR6201", # literal-membership + "PLR0914", # too-many-locals + "PLR6301", # no-self-use + "PLW1641", # eq-without-hash + "PLR0904", # too-many-public-methods + "PLR1702", # too-many-nested-blocks + "PLW3201", # bad-dunder-method-name + "B028", # no-explicit-stacklevel +] + +[tool.ruff.lint.extend-per-file-ignores] +"tests/*" = [ +"PLW0603" # global-statement +] + +[tool.ruff.lint.pylint] +max-args = 20 diff --git a/src/fmu/ensemble/ensemble.py b/src/fmu/ensemble/ensemble.py index 5928f36c..3120673c 100644 --- a/src/fmu/ensemble/ensemble.py +++ b/src/fmu/ensemble/ensemble.py @@ -112,14 +112,12 @@ def __init__( globbedpaths = [glob.glob(path) for path in paths] globbedpaths = list({item for sublist in globbedpaths for item in sublist}) if not globbedpaths: - if isinstance(runpathfile, str): - if not runpathfile: - logger.warning("Initialized empty ScratchEnsemble") - return - if isinstance(runpathfile, pd.DataFrame): - if runpathfile.empty: - logger.warning("Initialized empty ScratchEnsemble") - return + if isinstance(runpathfile, str) and not runpathfile: + logger.warning("Initialized empty ScratchEnsemble") + return + if isinstance(runpathfile, pd.DataFrame) and runpathfile.empty: + logger.warning("Initialized empty ScratchEnsemble") + return count = None if globbedpaths: @@ -893,9 +891,8 @@ def filter(self, localpath, inplace=True, **kwargs): if inplace: if not realization.contains(localpath, **kwargs): deletethese.append(realidx) - else: - if realization.contains(localpath, **kwargs): - keepthese.append(realidx) + elif realization.contains(localpath, **kwargs): + keepthese.append(realidx) if inplace: logger.info("Removing realizations %s", deletethese) @@ -932,7 +929,7 @@ def drop(self, localpath, **kwargs): if shortcut2path(self.keys(), localpath) not in self.keys(): raise ValueError("%s not found" % localpath) for _, realization in self.realizations.items(): - try: + try: # noqa: SIM105 realization.drop(localpath, **kwargs) except ValueError: pass # Allow localpath to be missing in some realizations @@ -1176,7 +1173,7 @@ def get_wellnames(self, well_match=None): for well in well_match: result = result.union(set(eclsum.wells(well))) - return sorted(list(result)) + return sorted(result) def get_groupnames(self, group_match=None): """ @@ -1213,7 +1210,7 @@ def get_groupnames(self, group_match=None): for group in group_match: result = result.union(set(eclsum.groups(group))) - return sorted(list(result)) + return sorted(result) def agg(self, aggregation, keylist=None, excludekeys=None): """Aggregate the ensemble data into one VirtualRealization diff --git a/src/fmu/ensemble/ensembleset.py b/src/fmu/ensemble/ensembleset.py index 8f3ea240..c03e430e 100644 --- a/src/fmu/ensemble/ensembleset.py +++ b/src/fmu/ensemble/ensembleset.py @@ -439,7 +439,7 @@ def drop(self, localpath, **kwargs): if self.shortcut2path(localpath) not in self.keys(): raise ValueError("%s not found" % localpath) for _, ensemble in self._ensembles.items(): - try: + try: # noqa: SIM105 ensemble.drop(localpath, **kwargs) except ValueError: pass # Allow localpath to be missing in some ensembles. @@ -781,4 +781,4 @@ def get_wellnames(self, well_match=None): result = set() for _, ensemble in self._ensembles.items(): result = result.union(ensemble.get_wellnames(well_match)) - return sorted(list(result)) + return sorted(result) diff --git a/src/fmu/ensemble/observations.py b/src/fmu/ensemble/observations.py index e711a3a2..b348a02d 100644 --- a/src/fmu/ensemble/observations.py +++ b/src/fmu/ensemble/observations.py @@ -75,7 +75,7 @@ def __init__(self, observations): observations: dict with observation structure or string with path to a yaml file. """ - self.observations = dict() + self.observations = {} if isinstance(observations, str): with open(observations) as yamlfile: @@ -183,7 +183,7 @@ def load_smry(self, realization, smryvector, time_index="yearly", smryerror=None # it is ok (assuming ISO-datestrings) # Modify the observation object (self) - if "smry" not in self.observations.keys(): + if "smry" not in self.observations: self.observations["smry"] = [] # Empty list # Construct a virtual observation with observation units @@ -251,7 +251,7 @@ def _realization_mismatch(self, real): # mismatch_df = pd.DataFrame(columns=['OBSTYPE', 'OBSKEY', # 'DATE', 'OBSINDEX', 'MISMATCH', 'L1', 'L2', 'SIGN']) mismatches = [] - for obstype in self.observations.keys(): + for obstype in self.observations: for obsunit in self.observations[obstype]: # (list) if obstype == "txt": try: @@ -267,20 +267,20 @@ def _realization_mismatch(self, real): measerror = 1 sign = (mismatch > 0) - (mismatch < 0) mismatches.append( - dict( - OBSTYPE=obstype, - OBSKEY=str(obsunit["localpath"]) + { + "OBSTYPE": obstype, + "OBSKEY": str(obsunit["localpath"]) + "/" + str(obsunit["key"]), - LABEL=obsunit.get("label", ""), - MISMATCH=mismatch, - L1=abs(mismatch), - L2=abs(mismatch) ** 2, - SIMVALUE=sim_value, - OBSVALUE=obsunit["value"], - MEASERROR=measerror, - SIGN=sign, - ) + "LABEL": obsunit.get("label", ""), + "MISMATCH": mismatch, + "L1": abs(mismatch), + "L2": abs(mismatch) ** 2, + "SIMVALUE": sim_value, + "OBSVALUE": obsunit["value"], + "MEASERROR": measerror, + "SIGN": sign, + } ) if obstype == "scalar": try: @@ -294,18 +294,18 @@ def _realization_mismatch(self, real): measerror = 1 sign = (mismatch > 0) - (mismatch < 0) mismatches.append( - dict( - OBSTYPE=obstype, - OBSKEY=str(obsunit["key"]), - LABEL=obsunit.get("label", ""), - MISMATCH=mismatch, - L1=abs(mismatch), - SIMVALUE=sim_value, - OBSVALUE=obsunit["value"], - MEASERROR=measerror, - L2=abs(mismatch) ** 2, - SIGN=sign, - ) + { + "OBSTYPE": obstype, + "OBSKEY": str(obsunit["key"]), + "LABEL": obsunit.get("label", ""), + "MISMATCH": mismatch, + "L1": abs(mismatch), + "SIMVALUE": sim_value, + "OBSVALUE": obsunit["value"], + "MEASERROR": measerror, + "L2": abs(mismatch) ** 2, + "SIGN": sign, + } ) if obstype == "smryh": if "time_index" in obsunit: @@ -352,16 +352,16 @@ def _realization_mismatch(self, real): ) measerror = 1 mismatches.append( - dict( - OBSTYPE="smryh", - OBSKEY=obsunit["key"], - LABEL=obsunit.get("label", ""), - MISMATCH=sim_hist["mismatch"].sum(), - MEASERROR=measerror, - L1=sim_hist["mismatch"].abs().sum(), - L2=math.sqrt((sim_hist["mismatch"] ** 2).sum()), - TIME_INDEX=time_index_str, - ) + { + "OBSTYPE": "smryh", + "OBSKEY": obsunit["key"], + "LABEL": obsunit.get("label", ""), + "MISMATCH": sim_hist["mismatch"].sum(), + "MEASERROR": measerror, + "L1": sim_hist["mismatch"].abs().sum(), + "L2": math.sqrt((sim_hist["mismatch"] ** 2).sum()), + "TIME_INDEX": time_index_str, + } ) if obstype == "smry": # For 'smry', there is a list of @@ -381,19 +381,19 @@ def _realization_mismatch(self, real): mismatch = float(sim_value - unit["value"]) sign = (mismatch > 0) - (mismatch < 0) mismatches.append( - dict( - OBSTYPE="smry", - OBSKEY=obsunit["key"], - DATE=unit["date"], - MEASERROR=unit["error"], - LABEL=unit.get("label", ""), - MISMATCH=mismatch, - OBSVALUE=unit["value"], - SIMVALUE=sim_value, - L1=abs(mismatch), - L2=abs(mismatch) ** 2, - SIGN=sign, - ) + { + "OBSTYPE": "smry", + "OBSKEY": obsunit["key"], + "DATE": unit["date"], + "MEASERROR": unit["error"], + "LABEL": unit.get("label", ""), + "MISMATCH": mismatch, + "OBSVALUE": unit["value"], + "SIMVALUE": sim_value, + "L1": abs(mismatch), + "L2": abs(mismatch) ** 2, + "SIGN": sign, + } ) return pd.DataFrame(mismatches) @@ -422,13 +422,12 @@ def _realization_misfit(self, real, defaulterrors=False, corr=None): zeroerrors = mismatch["MEASERROR"] < 1e-7 if defaulterrors: mismatch[zeroerrors]["MEASERROR"] = 1 - else: - if zeroerrors.any(): - print(mismatch[zeroerrors]) - raise ValueError( - "Zero measurement error in observation set" - + ". can't be used to calculate misfit" - ) + elif zeroerrors.any(): + print(mismatch[zeroerrors]) + raise ValueError( + "Zero measurement error in observation set" + + ". can't be used to calculate misfit" + ) if "MISFIT" not in mismatch.columns: mismatch["MISFIT"] = mismatch["L2"] / (mismatch["MEASERROR"] ** 2) @@ -460,7 +459,7 @@ def _clean_observations(self): ) self.observations.pop(key) # Check smryh observations for validity - if "smryh" in self.observations.keys(): + if "smryh" in self.observations: smryhunits = self.observations["smryh"] if not isinstance(smryhunits, list): logger.warning( @@ -484,8 +483,10 @@ def _clean_observations(self): continue # If time_index is not a supported mnemonic, # parse it to a date object - if "time_index" in unit: - if unit["time_index"] not in [ + if ( + "time_index" in unit + and unit["time_index"] + not in [ "raw", "report", "yearly", @@ -493,23 +494,25 @@ def _clean_observations(self): "first", "last", "monthly", - ] and not isinstance(unit["time_index"], datetime.datetime): - try: - unit["time_index"] = dateutil.parser.isoparse( - unit["time_index"] - ).date() - except (TypeError, ValueError) as exception: - logger.warning( - "Parsing date %s failed with error", - (str(unit["time_index"]), str(exception)), - ) - del smryhunits[smryhunits.index(unit)] - continue + ] + and not isinstance(unit["time_index"], datetime.datetime) + ): + try: + unit["time_index"] = dateutil.parser.isoparse( + unit["time_index"] + ).date() + except (TypeError, ValueError) as exception: + logger.warning( + "Parsing date %s failed with error", + (str(unit["time_index"]), str(exception)), + ) + del smryhunits[smryhunits.index(unit)] + continue # If everything has been deleted through cleanup, delete the section if not smryhunits: del self.observations["smryh"] # Check smry observations for validity - if "smry" in self.observations.keys(): + if "smry" in self.observations: # We already know that observations['smry'] is a list # Each list element must be a dict with # the mandatory keys 'key' and 'observation' diff --git a/src/fmu/ensemble/realization.py b/src/fmu/ensemble/realization.py index ee41a574..2f9b2fb0 100644 --- a/src/fmu/ensemble/realization.py +++ b/src/fmu/ensemble/realization.py @@ -345,11 +345,8 @@ def load_scalar( value = parse_number(value) if not isinstance(value, str): self.data[localpath] = value - else: - # In case we are re-reading, we must - # ensure there is no value present now: - if localpath in self.data: - del self.data[localpath] + elif localpath in self.data: + del self.data[localpath] else: self.data[localpath] = value return value @@ -463,12 +460,9 @@ def load_csv(self, localpath, convert_numeric=True, force_reread=False): [self.files, pd.DataFrame([filerow])], ignore_index=True ) try: - if convert_numeric: - # Trust that Pandas will determine sensible datatypes - # faster than the convert_numeric() function - dtype = None - else: - dtype = str + # Trust that Pandas will determine sensible datatypes + # faster than the convert_numeric() function + dtype = None if convert_numeric else str dframe = pd.read_csv(fullpath, dtype=dtype) if "REAL" in dframe: dframe.rename(columns={"REAL": "REAL_ORIG"}, inplace=True) @@ -696,7 +690,7 @@ def get_df(self, localpath, merge=None): TypeError if data in localpath or merge is not of a mergeable type """ fullpath = shortcut2path(self.keys(), localpath) - if fullpath not in self.data.keys(): + if fullpath not in self.data: raise KeyError("Could not find {}".format(localpath)) data = self.data[shortcut2path(self.keys(), localpath)] if not isinstance(merge, list): @@ -706,9 +700,7 @@ def get_df(self, localpath, merge=None): # this function happily returns references to the internal # dataframes in the realization object. So ensure # we copy dataframes if any merging is about to happen. - if isinstance(data, pd.DataFrame): - data = data.copy() - elif isinstance(data, dict): + if isinstance(data, (pd.DataFrame, dict)): data = data.copy() elif isinstance(data, (str, int, float, np.number)): # Convert scalar data into something mergeable @@ -958,9 +950,9 @@ def get_eclsum(self, cache=True, include_restart=True): EclSum: object representing the summary file. None if nothing was found. """ - if cache and self._eclsum: # Return cached object if available - if self._eclsum_include_restart == include_restart: - return self._eclsum + # Return cached object if available + if cache and self._eclsum and self._eclsum_include_restart == include_restart: + return self._eclsum unsmry_file_row = self.files[self.files.FILETYPE == "UNSMRY"] unsmry_filename = None @@ -1368,9 +1360,12 @@ def contains(self, localpath, **kwargs): return False if not kwargs: return localpath in self.keys() - if isinstance(self.data[localpath], dict): - if "key" in kwargs and "value" not in kwargs: - return kwargs["key"] in self.data[localpath] + if ( + isinstance(self.data[localpath], dict) + and "key" in kwargs + and "value" not in kwargs + ): + return kwargs["key"] in self.data[localpath] if isinstance(self.data[localpath], pd.DataFrame): if "key" in kwargs: raise ValueError("Don't use key for tabular data") @@ -1455,10 +1450,7 @@ def drop(self, localpath, **kwargs): def __repr__(self): """Represent the realization. Show only the last part of the path""" pathsummary = self._origpath[-50:] - if self.index is not None: - indexstr = str(self.index) - else: - indexstr = "Error" + indexstr = str(self.index) if self.index is not None else "Error" return "".format(indexstr, pathsummary) def __sub__(self, other): diff --git a/src/fmu/ensemble/util/rates.py b/src/fmu/ensemble/util/rates.py index 2b27e2ca..ef9b4fd0 100644 --- a/src/fmu/ensemble/util/rates.py +++ b/src/fmu/ensemble/util/rates.py @@ -47,11 +47,8 @@ def compute_volumetric_rates(realization, column_keys, time_index, time_unit): Returns: A dataframe indexed by DATE with cumulative columns. """ - if isinstance(time_unit, str): - if time_unit not in ["days", "months", "years"]: - raise ValueError( - "Unsupported time_unit " + time_unit + " for volumetric rates" - ) + if isinstance(time_unit, str) and time_unit not in ["days", "months", "years"]: + raise ValueError("Unsupported time_unit " + time_unit + " for volumetric rates") # pylint: disable=protected-access column_keys = realization._glob_smry_keys(column_keys) diff --git a/src/fmu/ensemble/virtualensemble.py b/src/fmu/ensemble/virtualensemble.py index 7db04dd5..aa307f23 100644 --- a/src/fmu/ensemble/virtualensemble.py +++ b/src/fmu/ensemble/virtualensemble.py @@ -120,7 +120,7 @@ def update_realindices(self): # Check all dataframes: idxset = set() - for key in self.data.keys(): + for key in self.data: if key != "__smry_metadata": idxset = idxset | set(self.data[key]["REAL"].unique()) self.realindices = list(idxset) @@ -189,7 +189,7 @@ def get_realization(self, realindex): vreal = VirtualRealization( description="Realization %d from %s" % (realindex, self._name) ) - for key in self.data.keys(): + for key in self.data: data = self.get_df(key) if key != "__smry_metadata": # Special treatment of the internal special frame @@ -243,7 +243,7 @@ def add_realization(self, realization, realidx=None, overwrite=False): self.remove_realizations(realidx) # Add the data from the incoming realization key by key - for key in realization.keys(): + for key in realization: dframe = realization.get_df(key) if isinstance(dframe, dict): # dicts to go to one-row dataframes dframe = pd.DataFrame(index=[1], data=dframe) @@ -252,7 +252,7 @@ def add_realization(self, realization, realidx=None, overwrite=False): dframe["REAL"] = realidx if key not in self.data and key in self.lazy_frames: self.get_df(key) # Trigger load from disk. - if key not in self.data.keys(): + if key not in self.data: self.data[key] = dframe else: self.data[key] = pd.concat( @@ -392,10 +392,7 @@ def agg(self, aggregation, keylist=None, excludekeys=None): if not (int in dtypes or float in dtypes): logger.info("No numerical data to aggregate in %s", key) continue - if groupby: - aggobject = data.groupby(groupby) - else: - aggobject = data + aggobject = data.groupby(groupby) if groupby else data if quantilematcher.match(aggregation): quantile = int(quantilematcher.match(aggregation).group(1)) @@ -431,7 +428,7 @@ def append(self, key, dataframe, overwrite=False): raise ValueError("Can only append dataframes") if "REAL" not in dataframe.columns and not key.startswith("__"): raise ValueError("REAL column not in incoming dataframe") - if key in self.data.keys() and not overwrite: + if key in self.data and not overwrite: logger.warning("Ignoring %s data already exists", key) return self.data[key] = dataframe @@ -503,15 +500,14 @@ def prepare_vens_directory(filesystempath, delete=False): logger.info(" - Deleted existing directory") shutil.rmtree(filesystempath) os.mkdir(filesystempath) - else: - if os.listdir(filesystempath): - logger.critical( - ( - "Refusing to write virtual ensemble " - " to non-empty directory" - ) + elif os.listdir(filesystempath): + logger.critical( + ( + "Refusing to write virtual ensemble " + " to non-empty directory" ) - raise IOError("Directory %s not empty" % filesystempath) + ) + raise IOError("Directory %s not empty" % filesystempath) else: os.mkdir(filesystempath) @@ -592,20 +588,16 @@ def prepare_vens_directory(filesystempath, delete=False): for key in self.keys(): dirname = os.path.join(filesystempath, os.path.dirname(key)) - if dirname: - if not os.path.exists(dirname): - os.makedirs(dirname) + if dirname and not os.path.exists(dirname): + os.makedirs(dirname) data = self.get_df(key) filename = os.path.join(dirname, os.path.basename(key)) # Trim .csv from end of dict-key # .csv will be reinstated by logic in from_disk() - if filename[-4:] == ".csv": - filebase = filename[:-4] - else: - # parameters.txt or STATUS ends here: - filebase = filename + # parameters.txt or STATUS ends here + filebase = filename[:-4] if filename[-4:] == ".csv" else filename if not isinstance(data, pd.DataFrame): raise ValueError("VirtualEnsembles should " + "only store DataFrames") @@ -675,9 +667,8 @@ def from_disk(self, filesystempath, fmt="parquet", lazy_load=False): for filename in filenames: # Special treatment of the filename "_name" if filename == "_name": - self._name = "".join( - open(os.path.join(root, filename), "r").readlines() - ).strip() + with open(os.path.join(root, filename), "r") as f: + self._name = "".join(f.rea()).strip() if filename == "_manifest.yml": self.manifest = os.path.join(root, "_manifest.yml") @@ -732,10 +723,7 @@ def from_disk(self, filesystempath, fmt="parquet", lazy_load=False): self.update_realindices() end_time = datetime.datetime.now() - if lazy_load: - lazy_str = "(lazy) " - else: - lazy_str = "" + lazy_str = "(lazy) " if lazy_load else "" logger.info( "Loading ensemble from disk %stook %g seconds", lazy_str, @@ -784,7 +772,7 @@ def get_df(self, localpath, merge=None): ) allfullpaths = list(self.data.keys()) + list(self.lazy_frames.keys()) fullpath = self.shortcut2path(localpath, keys=allfullpaths) - if fullpath not in self.data.keys(): + if fullpath not in self.data: # Need to lazy load it: logger.warning("Loading %s from disk, was lazy", fullpath) self._load_frame_fromdisk(fullpath, self.lazy_frames[fullpath]) diff --git a/src/fmu/ensemble/virtualrealization.py b/src/fmu/ensemble/virtualrealization.py index 1dbbcccf..32514ec9 100644 --- a/src/fmu/ensemble/virtualrealization.py +++ b/src/fmu/ensemble/virtualrealization.py @@ -58,7 +58,7 @@ def append(self, key, dataframe, overwrite=False): No checks performed on the dataframe coming in. If key exists, nothing will be appended unless overwrite is set to True """ - if key in self.data.keys() and not overwrite: + if key in self.data and not overwrite: logger.warning("Ignoring %s, data already exists", key) return self.data[key] = dataframe @@ -84,15 +84,15 @@ def to_disk(self, filesystempath, delete=False): "to_disk() is considered deprecated and might be " "removed in fmu-ensemble v2.0.0", FutureWarning, + stacklevel=1, ) if os.path.exists(filesystempath): if delete: shutil.rmtree(filesystempath) os.mkdir(filesystempath) - else: - if os.listdir(filesystempath): - logger.critical("Refusing to write to non-empty directory") - raise IOError("Directory %s not empty" % filesystempath) + elif os.listdir(filesystempath): + logger.critical("Refusing to write to non-empty directory") + raise IOError("Directory %s not empty" % filesystempath) else: os.mkdir(filesystempath) @@ -107,9 +107,8 @@ def to_disk(self, filesystempath, delete=False): for key in self.keys(): dirname = os.path.join(filesystempath, os.path.dirname(key)) - if dirname: - if not os.path.exists(dirname): - os.makedirs(dirname) + if dirname and not os.path.exists(dirname): + os.makedirs(dirname) data = self.get_df(key) filename = os.path.join(dirname, os.path.basename(key)) @@ -118,7 +117,7 @@ def to_disk(self, filesystempath, delete=False): data.to_csv(filename, index=False) elif isinstance(data, dict): with open(filename, "w") as fhandle: - for paramkey in data.keys(): + for paramkey in data: fhandle.write(paramkey + " " + str(data[paramkey]) + "\n") elif isinstance(data, (str, float, int, np.number)): with open(filename, "w") as fhandle: @@ -152,14 +151,14 @@ def load_disk(self, filesystempath): "load_disk() is considered deprecated and might be " "removed in fmu-ensemble v2.0.0", FutureWarning, + stacklevel=1, ) logger.info("Loading virtual realization from %s", filesystempath) for root, _, filenames in os.walk(filesystempath): for filename in filenames: if filename == "_description": - self._description = " ".join( - open(os.path.join(root, filename)).readlines() - ) + with open(os.path.join(root, filename)) as f: + self._description = " ".join(f.readlines()) logger.info("got name as %s", self._description) elif filename == "STATUS": self.append("STATUS", pd.read_csv(os.path.join(root, filename))) diff --git a/tests/test_batch.py b/tests/test_batch.py index 6bed5204..801d4714 100644 --- a/tests/test_batch.py +++ b/tests/test_batch.py @@ -74,7 +74,7 @@ def test_yaml(): ensset.add_ensemble(ScratchEnsemble(ensname, paths=enspath)) ensset.process_batch(ymlconfig["batch"]) - assert "parameters.txt" in ensset.keys() - assert "OK" in ensset.keys() - assert "npv.txt" in ensset.keys() + assert "parameters.txt" in ensset + assert "OK" in ensset + assert "npv.txt" in ensset assert not ensset.get_df("unsmry--yearly").empty diff --git a/tests/test_ensemble.py b/tests/test_ensemble.py index 761fa296..24408bc3 100644 --- a/tests/test_ensemble.py +++ b/tests/test_ensemble.py @@ -87,7 +87,7 @@ def test_reek001(tmpdir): assert "NPV" in reekensemble.load_txt("outputs.txt").columns # Check implicit discovery assert "outputs.txt" in reekensemble.files["LOCALPATH"].values - assert all([os.path.isabs(x) for x in reekensemble.files["FULLPATH"]]) + assert all(os.path.isabs(x) for x in reekensemble.files["FULLPATH"]) # File discovery: csvvolfiles = reekensemble.find_files( @@ -117,7 +117,7 @@ def test_reek001(tmpdir): assert len(newfiles.columns) + 1 == len(csvvolfiles.columns) # FULLPATH should always contain absolute paths - assert all([os.path.isabs(x) for x in reekensemble.files["FULLPATH"]]) + assert all(os.path.isabs(x) for x in reekensemble.files["FULLPATH"]) # The metadata in the rediscovered files should have been removed assert reekensemble.files[reekensemble.files["GRID"] == "simgrid"].empty @@ -274,7 +274,7 @@ def test_reek001_scalars(): "reektest", testdir + "/data/testensemble-reek001/" + "realization-*/iter-0" ) - assert "OK" in reekensemble.keys() + assert "OK" in reekensemble assert isinstance(reekensemble.get_df("OK"), pd.DataFrame) assert len(reekensemble.get_df("OK")) == 5 @@ -297,7 +297,7 @@ def test_reek001_scalars(): reekensemble.load_scalar("emptyscalarfile") # missing in real-4 assert len(reekensemble.get_df("emptyscalarfile")) == 4 - assert "emptyscalarfile" in reekensemble.keys() + assert "emptyscalarfile" in reekensemble # Use when filter is merged. # assert len(reekensemble.filter('emptyscalarfile', inplace=True)) == 4 @@ -340,14 +340,14 @@ def test_noautodiscovery(): assert not reekensemble.get_smry(column_keys="FOPT").empty # Some very basic data is discovered even though we have autodiscovery=False - assert "parameters.txt" in reekensemble.keys() - assert "STATUS" in reekensemble.keys() + assert "parameters.txt" in reekensemble + assert "STATUS" in reekensemble # If these are unwanted, we can delete explicitly: reekensemble.remove_data("parameters.txt") reekensemble.remove_data(["STATUS"]) - assert "parameters.txt" not in reekensemble.keys() - assert "STATUS" not in reekensemble.keys() + assert "parameters.txt" not in reekensemble + assert "STATUS" not in reekensemble def test_ensemble_ecl(): @@ -807,7 +807,7 @@ def test_ertrunpathfile(): ) assert len(ens) == 5 - assert all([os.path.isabs(x) for x in ens.files["FULLPATH"]]) + assert all(os.path.isabs(x) for x in ens.files["FULLPATH"]) # Check that the UNSMRY files has been discovered, they should always be # because ECLBASE is given in the runpathfile assert sum(["UNSMRY" in x for x in ens.files["BASENAME"].unique()]) == 5 @@ -850,35 +850,35 @@ def test_eclsumcaching(): ens.load_smry() # Default is to do caching, so these will not be None: - assert all([x._eclsum for (idx, x) in ens.realizations.items()]) + assert all(x._eclsum for (idx, x) in ens.realizations.items()) # If we redo this operation, the same objects should all # be None afterwards: ens.load_smry(cache_eclsum=False) # cache_eclsum==None is from v1.1.5 no longer equivalent to False - assert not any([x._eclsum for (idx, x) in ens.realizations.items()]) + assert not any(x._eclsum for (idx, x) in ens.realizations.items()) ens.get_smry() - assert all([x._eclsum for (idx, x) in ens.realizations.items()]) + assert all(x._eclsum for (idx, x) in ens.realizations.items()) ens.get_smry(cache_eclsum=False) - assert not any([x._eclsum for (idx, x) in ens.realizations.items()]) + assert not any(x._eclsum for (idx, x) in ens.realizations.items()) ens.get_smry_stats() - assert all([x._eclsum for (idx, x) in ens.realizations.items()]) + assert all(x._eclsum for (idx, x) in ens.realizations.items()) ens.get_smry_stats(cache_eclsum=False) - assert not any([x._eclsum for (idx, x) in ens.realizations.items()]) + assert not any(x._eclsum for (idx, x) in ens.realizations.items()) ens.get_smry_dates() - assert all([x._eclsum for (idx, x) in ens.realizations.items()]) + assert all(x._eclsum for (idx, x) in ens.realizations.items()) # Clear the cached objects because the statement above has cached it.. for _, realization in ens.realizations.items(): realization._eclsum = None ens.get_smry_dates(cache_eclsum=False) - assert not any([x._eclsum for (idx, x) in ens.realizations.items()]) + assert not any(x._eclsum for (idx, x) in ens.realizations.items()) def test_filedescriptors(): diff --git a/tests/test_ensemble_agg.py b/tests/test_ensemble_agg.py index da6de1b2..16ef1594 100644 --- a/tests/test_ensemble_agg.py +++ b/tests/test_ensemble_agg.py @@ -98,12 +98,12 @@ def test_ensemble_aggregations(tmpdir): ) # job 49 is the Eclipse forward model - assert "npv.txt" in stats["mean"].keys() + assert "npv.txt" in stats["mean"] assert stats["mean"]["npv.txt"] == 3382.5 # Test agg(excludekeys=..) - assert "STATUS" not in reekensemble.agg("mean", excludekeys="STATUS").keys() - assert "STATUS" not in reekensemble.agg("mean", keylist=["parameters.txt"]).keys() + assert "STATUS" not in reekensemble.agg("mean", excludekeys="STATUS") + assert "STATUS" not in reekensemble.agg("mean", keylist=["parameters.txt"]) assert ( reekensemble.agg("p01")["parameters"]["RMS_SEED"] @@ -114,26 +114,22 @@ def test_ensemble_aggregations(tmpdir): reekensemble.agg("foobar") # Check that include/exclude functionality in agg() works: - assert ( - "parameters.txt" - not in reekensemble.agg("mean", excludekeys="parameters.txt").keys() + assert "parameters.txt" not in reekensemble.agg( + "mean", excludekeys="parameters.txt" ) - assert ( - "parameters.txt" - not in reekensemble.agg("mean", excludekeys=["parameters.txt"]).keys() + assert "parameters.txt" not in reekensemble.agg( + "mean", excludekeys=["parameters.txt"] ) - assert "parameters.txt" not in reekensemble.agg("mean", keylist="STATUS").keys() - assert "parameters.txt" not in reekensemble.agg("mean", keylist=["STATUS"]).keys() + assert "parameters.txt" not in reekensemble.agg("mean", keylist="STATUS") + assert "parameters.txt" not in reekensemble.agg("mean", keylist=["STATUS"]) # Shorthand notion works for keys to include, but they # should get returned with fully qualified paths. - assert ( - "share/results/tables/unsmry--yearly.csv" - in reekensemble.agg("mean", keylist="unsmry--yearly").keys() + assert "share/results/tables/unsmry--yearly.csv" in reekensemble.agg( + "mean", keylist="unsmry--yearly" ) - assert ( - "share/results/tables/unsmry--yearly.csv" - in reekensemble.agg("mean", keylist=["unsmry--yearly"]).keys() + assert "share/results/tables/unsmry--yearly.csv" in reekensemble.agg( + "mean", keylist=["unsmry--yearly"] ) assert isinstance( reekensemble.agg("mean", keylist="unsmry--yearly").get_df("unsmry--yearly"), diff --git a/tests/test_ensemblecombination.py b/tests/test_ensemblecombination.py index e4d0dee8..90c6d033 100644 --- a/tests/test_ensemblecombination.py +++ b/tests/test_ensemblecombination.py @@ -74,7 +74,7 @@ def test_ensemblecombination_basic(): assert not vhalf_filtered2.get_df("unsmry--yearly").empty with pytest.raises((KeyError, ValueError)): # pylint: disable=pointless-statement - vhalf_filtered2.parameters + _ = vhalf_filtered2.parameters # Get summary data with parameters: smry_params = vhalf.get_df("unsmry--yearly", merge="parameters.txt") diff --git a/tests/test_ensembleset.py b/tests/test_ensembleset.py index dfed25bb..431585f0 100644 --- a/tests/test_ensembleset.py +++ b/tests/test_ensembleset.py @@ -1,5 +1,6 @@ """Testing fmu-ensemble, EnsembleSet class.""" +import contextlib import glob import logging import os @@ -63,10 +64,8 @@ def test_ensembleset_reek001(tmpdir): assert len(ensset["iter-1"].get_df("STATUS")) == 250 # Try adding the same object over again - try: + with contextlib.suppress(ValueError): ensset.add_ensemble(iter0) - except ValueError: - pass assert len(ensset) == 2 # Unchanged! # Initializing nothing, we get warning about the missing name @@ -385,7 +384,7 @@ def test_mangling_data(tmpdir): assert isinstance(ensset["iter-0"], ScratchEnsemble) assert isinstance(ensset["iter-1"], ScratchEnsemble) - assert "parameters.txt" in ensset.keys() + assert "parameters.txt" in ensset # We should only have parameters in iter-0 params = ensset.get_df("parameters.txt") @@ -393,7 +392,7 @@ def test_mangling_data(tmpdir): assert params["ENSEMBLE"].unique() == "iter-0" ensset.load_txt("outputs.txt") - assert "outputs.txt" in ensset.keys() + assert "outputs.txt" in ensset assert len(ensset.get_df("outputs.txt")) == 4 # When it does not exist in any of the ensembles, we @@ -422,12 +421,10 @@ def test_filestructures(tmpdir): ) os.makedirs(runpath1) os.makedirs(runpath2) - open(os.path.join(runpath1, "parameters.txt"), "w").write( - "REALTIMESITER " + str(real * iterr) + "\n" - ) - open(os.path.join(runpath1, "parameters.txt"), "w").write( - "REALTIMESITERX2 " + str(real * iterr * 2) + "\n" - ) + with open(os.path.join(runpath1, "parameters.txt"), "w") as fhandle: + fhandle.write("REALTIMESITER " + str(real * iterr) + "\n") + with open(os.path.join(runpath2, "parameters.txt"), "w") as fhandle: + fhandle.write("REALTIMESITERX2 " + str(real * iterr * 2) + "\n") # Initializing from this ensemble root should give nothing, # we do not recognize this iter_*/real_* by default @@ -525,23 +522,23 @@ def test_ertrunpathfile(tmp="TMP"): # Also construct an artificial ert runpathfile with iter-0 and iter-1, # by modifying a copy of the runpath for iter-0 - iter0runpath = open(testdir + "/data/ert-runpath-file", "r").readlines() + with open(testdir + "/data/ert-runpath-file", "r") as fhandle: + iter0runpath = fhandle.readlines() if not os.path.exists(tmp): os.mkdir(tmp) - enssetrunpathfile = open(tmp + "/ensset-runpath-file", "w") - print(iter0runpath) - enssetrunpathfile.write("".join(iter0runpath)) - for line in iter0runpath: - (real, path, eclname, _) = line.split() - enssetrunpathfile.write(real + " ") # CHECK THIS! - # Could the first column just be the line number? - # Iterate on the ERT official doc when determined. - enssetrunpathfile.write(path.replace("iter-0", "iter-1") + " ") - enssetrunpathfile.write(eclname + " ") - enssetrunpathfile.write("001" + "\n") - enssetrunpathfile.close() + with open(tmp + "/ensset-runpath-file", "w") as enssetrunpathfile: + print(iter0runpath) + enssetrunpathfile.write("".join(iter0runpath)) + for line in iter0runpath: + (real, path, eclname, _) = line.split() + enssetrunpathfile.write(real + " ") # CHECK THIS! + # Could the first column just be the line number? + # Iterate on the ERT official doc when determined. + enssetrunpathfile.write(path.replace("iter-0", "iter-1") + " ") + enssetrunpathfile.write(eclname + " ") + enssetrunpathfile.write("001" + "\n") ensset = EnsembleSet("ensfromrunpath", runpathfile=tmp + "/ensset-runpath-file") assert len(ensset) == 2 diff --git a/tests/test_observations.py b/tests/test_observations.py index c7299b40..15b3dd7d 100644 --- a/tests/test_observations.py +++ b/tests/test_observations.py @@ -275,18 +275,18 @@ def test_errormessages(): Observations(3) # Unsupported observation category, this foobar will be wiped - emptyobs = Observations(dict(foobar="foo")) + emptyobs = Observations({"foobar": "foo"}) assert emptyobs.empty # (there will be logged a warning) # Empty observation set should be ok, but it must be a dict - empty2 = Observations(dict()) + empty2 = Observations({}) assert empty2.empty with pytest.raises(ValueError): Observations([]) # Check that the dict is a dict of lists: - assert Observations(dict(smry="not_a_list")).empty + assert Observations({"smry": "not_a_list"}).empty # (warning will be printed) # This should give a warning because 'observation' is missing diff --git a/tests/test_realization.py b/tests/test_realization.py index 91c490b5..adca08be 100644 --- a/tests/test_realization.py +++ b/tests/test_realization.py @@ -87,14 +87,14 @@ def test_single_realization(tmpdir): assert vol_df2["STOIIP_TOTAL"].sum() > 0 # Test scalar import - assert "OK" in real.keys() # Imported in __init__ + assert "OK" in real # Imported in __init__ assert real["OK"] == "All jobs complete 22:47:54" # NB: Trailing whitespace from the OK-file is removed. assert isinstance(real["OK"], str) # Check that we can "reimport" the OK file real.load_scalar("OK", force_reread=True) - assert "OK" in real.keys() # Imported in __init__ + assert "OK" in real # Imported in __init__ assert real["OK"] == "All jobs complete 22:47:54" assert isinstance(real["OK"], str) assert len(real.files[real.files.LOCALPATH == "OK"]) == 1 @@ -114,7 +114,7 @@ def test_single_realization(tmpdir): assert "emptyscalarfile" in real.files["LOCALPATH"].values # Check that FULLPATH always has absolute paths - assert all([os.path.isabs(x) for x in real.files["FULLPATH"]]) + assert all(os.path.isabs(x) for x in real.files["FULLPATH"]) with pytest.raises(IOError): real.load_scalar("notexisting.txt") @@ -328,7 +328,8 @@ def test_volumetric_rates(): # Pick 10 **random** dates to get the volumetric rates between: daily_dates = real.get_smry_dates(freq="daily", normalize=False) - subset_dates = np.random.choice(daily_dates, size=10, replace=False) + rng = np.random.Generator() + subset_dates = rng.choice(daily_dates, size=10, replace=False) subset_dates.sort() dcum = real.get_smry(column_keys="FOPT", time_index=subset_dates) ddcum = real.get_volumetric_rates(column_keys="FOPT", time_index=subset_dates) @@ -606,13 +607,13 @@ def test_singlereal_ecl(tmp="TMP"): # Test caching/internalization of summary files # This should be false, since only the full localpath is in keys(): - assert "unsmry--raw.csv" not in real.keys() - assert "share/results/tables/unsmry--raw.csv" in real.keys() + assert "unsmry--raw.csv" not in real + assert "share/results/tables/unsmry--raw.csv" in real assert "FOPT" in real["unsmry--raw"] with pytest.raises((ValueError, KeyError)): # This does not exist before we have asked for it # pylint: disable=pointless-statement - "FOPT" in real["unsmry--yearly"] + _ = "FOPT" in real["unsmry--yearly"] def test_can_import_summary_files_beyond_2262(tmpdir, monkeypatch): @@ -798,8 +799,8 @@ def test_filesystem_changes(): # Should not fail # Try with an empty STATUS file: - fhandle = open(realdir + "/STATUS", "w") - fhandle.close() + with open(realdir + "/STATUS", "w") as fhandle: + pass real = ensemble.ScratchRealization(realdir) assert real.get_df("STATUS").empty # This demonstrates we can fool the Realization object, and @@ -807,35 +808,33 @@ def test_filesystem_changes(): # Try with a STATUS file with error message on first job # the situation where there is one successful job. - fhandle = open(realdir + "/STATUS", "w") - fhandle.write( - ( - "Current host : st-rst16-02-03/x86_64 " - "file-server:10.14.10.238\n" - "LSF JOBID: not running LSF\n" - "COPY_FILE : 20:58:57 .... 20:59:00 " - "EXIT: 1/Executable: /project/res/komodo/2018.02/root/etc/ERT/" - "Config/jobs/util/script/copy_file.py failed with exit code: 1\n" + with open(realdir + "/STATUS", "w") as fhandle: + fhandle.write( + ( + "Current host : st-rst16-02-03/x86_64 " + "file-server:10.14.10.238\n" + "LSF JOBID: not running LSF\n" + "COPY_FILE : 20:58:57 .... 20:59:00 " + "EXIT: 1/Executable: /project/res/komodo/2018.02/root/etc/ERT/" + "Config/jobs/util/script/copy_file.py failed with exit code: 1\n" + ) ) - ) - fhandle.close() real = ensemble.ScratchRealization(realdir) # When issue 37 is resolved, update this to 1 and check the # error message is picked up. assert len(real.get_df("STATUS")) == 1 - fhandle = open(realdir + "/STATUS", "w") - fhandle.write( - ( - "Current host : st-rst16-02-03/x86_64 " - "file-server:10.14.10.238\n" - "LSF JOBID: not running LSF\n" - "COPY_FILE : 20:58:55 .... 20:58:57\n" - "COPY_FILE : 20:58:57 .... 20:59:00 " - " EXIT: 1/Executable: /project/res/komodo/2018.02/root/etc/ERT/" - "Config/jobs/util/script/copy_file.py failed with exit code: 1 " + with open(realdir + "/STATUS", "w") as fhandle: + fhandle.write( + ( + "Current host : st-rst16-02-03/x86_64 " + "file-server:10.14.10.238\n" + "LSF JOBID: not running LSF\n" + "COPY_FILE : 20:58:55 .... 20:58:57\n" + "COPY_FILE : 20:58:57 .... 20:59:00 " + " EXIT: 1/Executable: /project/res/komodo/2018.02/root/etc/ERT/" + "Config/jobs/util/script/copy_file.py failed with exit code: 1 " + ) ) - ) - fhandle.close() real = ensemble.ScratchRealization(realdir) assert len(real.get_df("STATUS")) == 2 # Check that we have the error string picked up: @@ -865,10 +864,9 @@ def test_filesystem_changes(): # Unquoted valued with spaces will be truncated, # quoted valued will be correctly parsed # (read_csv(sep='\s+') is the parser) - param_file = open(realdir + "/parameters.txt", "a") - param_file.write("FOOBAR 1 2 3 4 5 6\n") - param_file.write('FOOSPACES "1 2 3 4 5 6"\n') - param_file.close() + with open(realdir + "/parameters.txt", "a") as param_file: + param_file.write("FOOBAR 1 2 3 4 5 6\n") + param_file.write('FOOSPACES "1 2 3 4 5 6"\n') real = ensemble.ScratchRealization(realdir) assert real.parameters["FOOBAR"] == 1 @@ -1002,7 +1000,7 @@ def test_drop(): assert len(real.get_df("unsmry--monthly")) == datecount - 1 real.drop("parameters") - assert "parameters.txt" not in real.keys() + assert "parameters.txt" not in real def test_find_files_comps(): @@ -1069,7 +1067,7 @@ def test_find_files_yml(): fileh.write("baah") yamlfile = "." + filename + ".yml" with open(os.path.join(realdir, yamlfile), "w") as fileh: - fileh.write(yaml.dump(dict(a=dict(x=1, y=2), b="bar"))) + fileh.write(yaml.dump({"a": {"x": 1, "y": 2}, "b": "bar"})) # Now find the gri files, and add metadata: files_df = real.find_files("*.gri", metayaml=True) @@ -1120,7 +1118,7 @@ def test_get_smry_meta(): # Can create dataframes like this: meta_df = pd.DataFrame.from_dict(meta, orient="index") hist_keys = meta_df[meta_df["is_historical"]].index - assert all([key.split(":")[0].endswith("H") for key in hist_keys]) + assert all(key.split(":")[0].endswith("H") for key in hist_keys) # When virtualizing a realization, smry data must be loaded # for smry metadata to be conserved @@ -1197,7 +1195,7 @@ def test_get_df_merge(): assert "top_structure" in scalar_dict # Inject a random dict and merge with: - real.data["foodict"] = dict(BAR="COM") + real.data["foodict"] = {"BAR": "COM"} dframe = real.get_df("parameters", merge="foodict") assert "BAR" in dframe assert "SORG1" in dframe diff --git a/tests/test_realizationcombination.py b/tests/test_realizationcombination.py index 18f17426..6443c0d2 100644 --- a/tests/test_realizationcombination.py +++ b/tests/test_realizationcombination.py @@ -56,12 +56,12 @@ def test_realizationcombination_basic(): assert "FWL" in vdiff["parameters"] assert vdiff["npv.txt"] == real1["npv.txt"] - real0["npv.txt"] vdiff_filtered = vdiff.to_virtual(keyfilter="parameters") - assert "parameters.txt" in vdiff_filtered.keys() + assert "parameters.txt" in vdiff_filtered with pytest.raises((KeyError, ValueError)): vdiff_filtered.get_df("unsmry--yearly") vdiff_filtered2 = vdiff.to_virtual(keyfilter="unsmry--yearly") - assert "parameters.txt" not in vdiff_filtered2.keys() + assert "parameters.txt" not in vdiff_filtered2 assert "FWPR" in vdiff_filtered2.get_df("unsmry--yearly") smrymeta = realdiff.get_smry_meta(["FO*"]) diff --git a/tests/test_virtualensemble.py b/tests/test_virtualensemble.py index 9a522fcc..e6eb9782 100644 --- a/tests/test_virtualensemble.py +++ b/tests/test_virtualensemble.py @@ -83,9 +83,9 @@ def test_virtualensemble(): == "share/results/tables/unsmry--yearly.csv" ) - assert "npv.txt" in vens.keys() + assert "npv.txt" in vens assert len(vens["npv.txt"]) == 5 # includes the 'error!' string in real4 - assert "outputs.txt" in vens.keys() + assert "outputs.txt" in vens assert len(vens["outputs.txt"]) == 4 # Check that get_smry() works @@ -165,7 +165,7 @@ def test_virtualensemble(): # Test data removal: vens.remove_data("parameters.txt") - assert "parameters.txt" not in vens.keys() + assert "parameters.txt" not in vens vens.remove_data("bogus") # This should only give warning # Test data addition. It should(?) work also for earlier nonexisting @@ -178,7 +178,7 @@ def test_virtualensemble(): } ), ) - assert "betterdata" in vens.keys() + assert "betterdata" in vens assert "REAL" in vens["betterdata"].columns assert "NPV" in vens["betterdata"].columns @@ -250,7 +250,7 @@ def test_todisk(tmpdir): # but change of order is fine assert set(vens.keys()) == set(fromdisk.keys()) - for frame in vens.keys(): + for frame in vens: if frame == "STATUS": continue @@ -283,11 +283,11 @@ def test_todisk(tmpdir): lazyfromdisk = VirtualEnsemble(fromdisk="vens_dumped_csv", lazy_load=True) assert set(vens.keys()) == set(fromcsvdisk.keys()) assert set(vens.keys()) == set(lazyfromdisk.keys()) - assert "OK" in lazyfromdisk.lazy_frames.keys() - assert "OK" not in lazyfromdisk.data.keys() + assert "OK" in lazyfromdisk.lazy_frames + assert "OK" not in lazyfromdisk.data assert len(fromcsvdisk.get_df("OK")) == len(lazyfromdisk.get_df("OK")) - assert "OK" not in lazyfromdisk.lazy_frames.keys() - assert "OK" in lazyfromdisk.data.keys() + assert "OK" not in lazyfromdisk.lazy_frames + assert "OK" in lazyfromdisk.data assert len(fromcsvdisk.parameters) == len(lazyfromdisk.parameters) assert len(fromcsvdisk.get_df("unsmry--yearly")) == len( lazyfromdisk.get_df("unsmry--yearly") @@ -312,26 +312,31 @@ def test_todisk(tmpdir): assert set(vens.keys()) == set(fromcsvdisk2.keys()) # Test manual intervention: - fooframe = pd.DataFrame(data=np.random.randn(3, 3), columns=["FOO", "BAR", "COM"]) + rng = np.random.Generator() + fooframe = pd.DataFrame( + data=rng.standard_normal(size=(3, 3)), columns=["FOO", "BAR", "COM"] + ) fooframe.to_csv(os.path.join("vens_dumped", "share/results/tables/randomdata.csv")) manualens = VirtualEnsemble(fromdisk="vens_dumped") - assert "share/results/tables/randomdata.csv" not in manualens.keys() + assert "share/results/tables/randomdata.csv" not in manualens # Now with correct column header, # but floating point data for realizations.. - fooframe = pd.DataFrame(data=np.random.randn(3, 3), columns=["REAL", "BAR", "COM"]) + fooframe = pd.DataFrame( + data=rng.standard_normal(size=(3, 3)), columns=["REAL", "BAR", "COM"] + ) fooframe.to_csv(os.path.join("vens_dumped", "share/results/tables/randomdata.csv")) manualens = VirtualEnsemble(fromdisk="vens_dumped") - assert "share/results/tables/randomdata.csv" not in manualens.keys() + assert "share/results/tables/randomdata.csv" not in manualens # Now with correct column header, and with integer data for REAL.. fooframe = pd.DataFrame( - data=np.random.randint(low=0, high=100, size=(3, 3)), + data=rng.integers(low=0, high=100, size=(3, 3)), columns=["REAL", "BAR", "COM"], ) fooframe.to_csv(os.path.join("vens_dumped", "share/results/tables/randomdata.csv")) manualens = VirtualEnsemble(fromdisk="vens_dumped") - assert "share/results/tables/randomdata.csv" in manualens.keys() + assert "share/results/tables/randomdata.csv" in manualens def test_todisk_includefile(tmpdir): @@ -381,12 +386,12 @@ def test_get_smry_meta(tmpdir): "reekmetatest", testdir + "/data/testensemble-reek001/" + "realization-*/iter-0" ) # If no smry loaded before virtualization, nothing should be there: - assert "__smry_metadata" not in reekensemble.to_virtual().keys() + assert "__smry_metadata" not in reekensemble.to_virtual() reekensemble.load_smry(time_index="yearly", column_keys=["F*"]) origmeta = reekensemble.get_smry_meta() vens = reekensemble.to_virtual() - assert "__smry_metadata" in vens.keys() + assert "__smry_metadata" in vens meta = vens.get_df("__smry_metadata") # Internally it is stored as a DataFrame, we check that # since it is possible to get it using get_df(), and thereby @@ -449,7 +454,7 @@ def test_get_smry_interpolation(): reekensemble.load_smry(time_index="monthly", column_keys=["F*"]) # Create a vens that contains both monthly and yearly: vens_monthly = reekensemble.to_virtual() - assert "npv.txt" in vens_monthly.keys() + assert "npv.txt" in vens_monthly reekensemble.load_smry(time_index="daily", column_keys=["F*"]) _ = reekensemble.to_virtual() # monthly, yearly *and* daily diff --git a/tests/test_virtualrealization.py b/tests/test_virtualrealization.py index c33f1cfe..3f8262a0 100644 --- a/tests/test_virtualrealization.py +++ b/tests/test_virtualrealization.py @@ -32,17 +32,17 @@ def test_virtual_realization(): # Check deepcopy(), first prove a bad situation vreal = real.to_virtual(deepcopy=False) - assert "parameters.txt" in real.keys() + assert "parameters.txt" in real del vreal["parameters.txt"] # This is a bad situation: - assert "parameters.txt" not in real.keys() + assert "parameters.txt" not in real # Now confirm that we can fix the bad # situation with the default to_virtual() real = ensemble.ScratchRealization(realdir) vreal = real.to_virtual() del vreal["parameters.txt"] - assert "parameters.txt" in real.keys() + assert "parameters.txt" in real real = ensemble.ScratchRealization(realdir) vreal = real.to_virtual() @@ -76,7 +76,7 @@ def test_virtual_todisk(tmpdir): real.load_scalar("npv.txt") vreal = real.to_virtual() - assert "npv.txt" in vreal.keys() + assert "npv.txt" in vreal tmpdir.chdir() @@ -116,7 +116,7 @@ def test_virtual_fromdisk(tmpdir): vreal = ensemble.VirtualRealization("foo") vreal.load_disk("virtreal2") - for key in vreal.keys(): + for key in vreal: if key != "__smry_metadata": if isinstance(real.get_df(key), (pd.DataFrame, dict)): assert len(real.get_df(key)) == len(vreal.get_df(key)) @@ -352,7 +352,7 @@ def test_glob_smry_keys(): assert len(vreal._glob_smry_keys(["FOP*"])) == 9 assert len(vreal._glob_smry_keys("WOPT:*")) == 8 - assert all([x.startswith("WOPT:") for x in vreal._glob_smry_keys("WOPT:*")]) + assert all(x.startswith("WOPT:") for x in vreal._glob_smry_keys("WOPT:*")) assert not vreal._glob_smry_keys("FOOBAR")