Skip to content

Commit

Permalink
Merge branch 'main' into dependabot/github_actions/peter-evans/find-c…
Browse files Browse the repository at this point in the history
…omment-3.1.0
  • Loading branch information
aulemahal authored Apr 11, 2024
2 parents 85f363d + 13bb070 commit 3aac41c
Show file tree
Hide file tree
Showing 17 changed files with 121 additions and 34 deletions.
2 changes: 1 addition & 1 deletion .cruft.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
"project_slug": "xscen",
"project_short_description": "A climate change scenario-building analysis framework, built with xclim/xarray.",
"pypi_username": "RondeauG",
"version": "0.8.4-dev.6",
"version": "0.8.4-dev.9",
"use_pytest": "y",
"use_black": "y",
"use_conda": "y",
Expand Down
5 changes: 4 additions & 1 deletion CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ Changelog

v0.9.0 (unreleased)
-------------------
Contributors to this version: Trevor James Smith (:user:`Zeitsperre`), Pascal Bourgault (:user:`aulemahal`), Gabriel Rondeau-Genesse (:user:`RondeauG`), Juliette Lavoie (:user: `juliettelavoie`).
Contributors to this version: Trevor James Smith (:user:`Zeitsperre`), Pascal Bourgault (:user:`aulemahal`), Gabriel Rondeau-Genesse (:user:`RondeauG`), Juliette Lavoie (:user:`juliettelavoie`), Marco Braun (:user:`vindelico`).

Breaking changes
^^^^^^^^^^^^^^^^
Expand All @@ -14,13 +14,15 @@ Breaking changes

Internal changes
^^^^^^^^^^^^^^^^
* Modified ``xscen.utils.change_unit`` to always adopt the name from the `variables_and_units dictionary` if the physical units are equal but their names are not (ex. degC <-> ˚C) (:pull:`373`).
* Updated the `cookiecutter` template to the latest version. (:pull:`358`):
* Addresses a handful of misconfigurations in the GitHub Workflows.
* Added a few free `grep`-based hooks for finding unwanted artifacts in the code base.
* Updated `ruff` to v0.2.0 and `black` to v24.2.0.
* Added more tests. (:pull:`366`, :pull:`367`, :pull:`372`).
* Refactored ``xs.spatial.subset`` into smaller functions. (:pull:`367`).
* An `encoding` argument was added to ``xs.config.load_config``. (:pull:`370`).
* Various small fixes to the code to address FutureWarnings. (:pull:`380`).

Bug fixes
^^^^^^^^^
Expand All @@ -31,6 +33,7 @@ Bug fixes
* Fixed a bug to accept `group = False` in `adjust` function. (:pull:`366`).
* `creep_weights` now correctly handles the case where the grid is small, `n` is large, and `mode=wrap`. (:issue:`367`).
* Fixed a bug in ``tasmin_from_dtr`` and ``tasmax_from_dtr``, when `dtr` units differed from tasmin/max. (:pull:`372`).
* Fixed a bug where the requested chunking would be ignored when saving a dataset (:pull:`379`).

v0.8.3 (2024-02-28)
-------------------
Expand Down
1 change: 0 additions & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,6 @@
"_build",
"Thumbs.db",
".DS_Store",
"notebooks/global_tas_average_obs.ipynb"
]

# The name of the Pygments (syntax highlighting) style to use.
Expand Down
21 changes: 20 additions & 1 deletion docs/notebooks/2_getting_started.ipynb
Original file line number Diff line number Diff line change
@@ -1,5 +1,24 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "eb10a72a-9ea1-4414-922b-0ea1aaea0648",
"metadata": {
"nbsphinx": "hidden"
},
"outputs": [],
"source": [
"# Remove flox spam\n",
"\n",
"import logging\n",
"\n",
"# Get the logger for the 'flox' package\n",
"logger = logging.getLogger(\"flox\")\n",
"# Set the logging level to WARNING\n",
"logger.setLevel(logging.WARNING)"
]
},
{
"cell_type": "markdown",
"id": "4f220a85",
Expand Down Expand Up @@ -1481,7 +1500,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.12.2"
}
},
"nbformat": 4,
Expand Down
21 changes: 20 additions & 1 deletion docs/notebooks/3_diagnostics.ipynb
Original file line number Diff line number Diff line change
@@ -1,5 +1,24 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "d513b8c4-0cb4-429b-b169-e0d8d40c795f",
"metadata": {
"nbsphinx": "hidden"
},
"outputs": [],
"source": [
"# Remove flox spam\n",
"\n",
"import logging\n",
"\n",
"# Get the logger for the 'flox' package\n",
"logger = logging.getLogger(\"flox\")\n",
"# Set the logging level to WARNING\n",
"logger.setLevel(logging.WARNING)"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down Expand Up @@ -484,7 +503,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.12.2"
}
},
"nbformat": 4,
Expand Down
24 changes: 22 additions & 2 deletions docs/notebooks/4_ensembles.ipynb
Original file line number Diff line number Diff line change
@@ -1,5 +1,23 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"nbsphinx": "hidden"
},
"outputs": [],
"source": [
"# Remove flox spam\n",
"\n",
"import logging\n",
"\n",
"# Get the logger for the 'flox' package\n",
"logger = logging.getLogger(\"flox\")\n",
"# Set the logging level to WARNING\n",
"logger.setLevel(logging.WARNING)"
]
},
{
"cell_type": "markdown",
"metadata": {},
Expand Down Expand Up @@ -36,7 +54,9 @@
"\n",
"for d in datasets:\n",
" ds = open_dataset(datasets[d]).isel(lon=slice(0, 4), lat=slice(0, 4))\n",
" ds = xs.climatological_mean(ds, window=30, periods=[[1981, 2010], [2021, 2050]])\n",
" ds = xs.climatological_op(\n",
" ds, op=\"mean\", window=30, periods=[[1981, 2010], [2021, 2050]]\n",
" )\n",
" datasets[d] = xs.compute_deltas(ds, reference_horizon=\"1981-2010\")\n",
" datasets[d].attrs[\"cat:id\"] = d # Required by build_reduction_data\n",
" datasets[d].attrs[\"cat:xrfreq\"] = \"AS-JAN\""
Expand Down Expand Up @@ -270,7 +290,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
"version": "3.12.2"
}
},
"nbformat": 4,
Expand Down
21 changes: 20 additions & 1 deletion docs/notebooks/5_warminglevels.ipynb
Original file line number Diff line number Diff line change
@@ -1,5 +1,24 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "f1899896-70a1-4efb-80e6-8765b95f4388",
"metadata": {
"nbsphinx": "hidden"
},
"outputs": [],
"source": [
"# Remove flox spam\n",
"\n",
"import logging\n",
"\n",
"# Get the logger for the 'flox' package\n",
"logger = logging.getLogger(\"flox\")\n",
"# Set the logging level to WARNING\n",
"logger.setLevel(logging.WARNING)"
]
},
{
"cell_type": "markdown",
"id": "3e311475",
Expand Down Expand Up @@ -483,7 +502,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.12.2"
}
},
"nbformat": 4,
Expand Down
4 changes: 2 additions & 2 deletions docs/notebooks/6_config.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@
"import xarray as xr\n",
"\n",
"# Create a dummy dataset\n",
"time = pd.date_range(\"1951-01-01\", \"2100-01-01\", freq=\"AS-JAN\")\n",
"time = pd.date_range(\"1951-01-01\", \"2100-01-01\", freq=\"YS-JAN\")\n",
"da = xr.DataArray([0] * len(time), coords={\"time\": time})\n",
"da.name = \"test\"\n",
"ds = da.to_dataset()\n",
Expand Down Expand Up @@ -378,7 +378,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
"version": "3.12.2"
}
},
"nbformat": 4,
Expand Down
2 changes: 1 addition & 1 deletion environment-dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ dependencies:
- zarr
# Opt
- nc-time-axis >=1.3.1
- pyarrow >=1.0.0
- pyarrow >=10.0.1
# Dev
- babel
- black ==24.2.0
Expand Down
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,5 +37,5 @@ dependencies:
- babel
# Opt
- nc-time-axis >=1.3.1
- pyarrow >=1.0.0
- pyarrow >=10.0.1
- pip
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ dependencies = [
"pandas >=2.2",
"parse",
# Used when opening catalogs.
"pyarrow",
"pyarrow>=10.0.1",
"pyyaml",
"rechunker",
"scipy",
Expand Down Expand Up @@ -127,7 +127,7 @@ target-version = [
]

[tool.bumpversion]
current_version = "0.8.4-dev.6"
current_version = "0.8.4-dev.9"
commit = true
commit_args = "--no-verify"
tag = false
Expand Down
File renamed without changes.
8 changes: 1 addition & 7 deletions xscen/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@

__author__ = """Gabriel Rondeau-Genesse"""
__email__ = "[email protected]"
__version__ = "0.8.4-dev.6"
__version__ = "0.8.4-dev.9"


def warning_on_one_line(
Expand All @@ -75,9 +75,3 @@ def warning_on_one_line(
"Pass observed=False to retain current behavior or observed=True to adopt the future default "
"and silence this warning.",
)
warnings.filterwarnings(
"ignore",
category=FutureWarning,
module="intake_esm",
message="DataFrame.applymap has been deprecated. Use DataFrame.map instead.",
)
12 changes: 7 additions & 5 deletions xscen/catutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -634,11 +634,13 @@ def parse_directory( # noqa: C901

# translate xrfreq into frequencies and vice-versa
if {"xrfreq", "frequency"}.issubset(df.columns):
df["xrfreq"].fillna(
df["frequency"].apply(CV.frequency_to_xrfreq, default=pd.NA), inplace=True
df.fillna(
{"xrfreq": df["frequency"].apply(CV.frequency_to_xrfreq, default=pd.NA)},
inplace=True,
)
df["frequency"].fillna(
df["xrfreq"].apply(CV.xrfreq_to_frequency, default=pd.NA), inplace=True
df.fillna(
{"frequency": df["xrfreq"].apply(CV.xrfreq_to_frequency, default=pd.NA)},
inplace=True,
)

# Parse dates
Expand Down Expand Up @@ -757,7 +759,7 @@ def parse_from_ds( # noqa: C901
attrs["variable"] = tuple(sorted(variables))
elif name in ("frequency", "xrfreq") and time is not None and time.size > 3:
# round to the minute to catch floating point imprecision
freq = xr.infer_freq(time.round("T"))
freq = xr.infer_freq(time.round("min"))
if freq:
if "xrfreq" in names:
attrs["xrfreq"] = freq
Expand Down
5 changes: 3 additions & 2 deletions xscen/extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def extract_dataset( # noqa: C901
)

out_dict = {}
for xrfreq in pd.unique([x for y in variables_and_freqs.values() for x in y]):
for xrfreq in np.unique([x for y in variables_and_freqs.values() for x in y]):
ds = xr.Dataset()
attrs = {}
# iterate on the datasets, in reverse timedelta order
Expand Down Expand Up @@ -814,7 +814,8 @@ def search_data_catalogs( # noqa: C901
valid_tp = []
for var, group in varcat.df.groupby(
varcat.esmcat.aggregation_control.groupby_attrs
+ ["variable"]
+ ["variable"],
observed=True,
):
valid_tp.append(
subset_file_coverage(
Expand Down
10 changes: 9 additions & 1 deletion xscen/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,8 @@ def save_to_netcdf(
for var in list(ds.data_vars.keys()):
if keepbits := _get_keepbits(bitround, var, ds[var].dtype):
ds = ds.assign({var: round_bits(ds[var], keepbits)})
# Remove original_shape from encoding, since it can cause issues with some engines.
ds[var].encoding.pop("original_shape", None)

_coerce_attrs(ds.attrs)
for var in ds.variables.values():
Expand Down Expand Up @@ -519,6 +521,8 @@ def _skip(var):
encoding.pop(var)
if keepbits := _get_keepbits(bitround, var, ds[var].dtype):
ds = ds.assign({var: round_bits(ds[var], keepbits)})
# Remove original_shape from encoding, since it can cause issues with some engines.
ds[var].encoding.pop("original_shape", None)

if len(ds.data_vars) == 0:
return None
Expand Down Expand Up @@ -904,8 +908,12 @@ def rechunk_for_saving(ds: xr.Dataset, rechunk: dict):
ds[rechunk_var] = ds[rechunk_var].chunk(
{d: chnks for d, chnks in rechunk_dims.items() if d in ds[rechunk_var].dims}
)
ds[rechunk_var].encoding.pop("chunksizes", None)
ds[rechunk_var].encoding["chunksizes"] = tuple(
rechunk_dims[d] if d in rechunk_dims else ds[d].shape[0]
for d in ds[rechunk_var].dims
)
ds[rechunk_var].encoding.pop("chunks", None)
ds[rechunk_var].encoding.pop("preferred_chunks", None)

return ds

Expand Down
13 changes: 8 additions & 5 deletions xscen/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def date_parser( # noqa: C901
date : str, cftime.datetime, pd.Timestamp, datetime.datetime, pd.Period
Date to be converted
end_of_period : bool or str
If 'Y' or 'M', the returned date will be the end of the year or month that contains the received date.
If 'YE' or 'ME', the returned date will be the end of the year or month that contains the received date.
If True, the period is inferred from the date's precision, but `date` must be a string, otherwise nothing is done.
out_dtype : str
Choices are 'datetime', 'period' or 'str'
Expand Down Expand Up @@ -245,12 +245,12 @@ def _parse_date(date, fmts):

if isinstance(end_of_period, str) or (end_of_period is True and fmt):
quasiday = (pd.Timedelta(1, "d") - pd.Timedelta(1, "s")).as_unit(date.unit)
if end_of_period == "Y" or "m" not in fmt:
if end_of_period in ["Y", "YE"] or "m" not in fmt:
date = (
pd.tseries.frequencies.to_offset("A-DEC").rollforward(date) + quasiday
pd.tseries.frequencies.to_offset("YE-DEC").rollforward(date) + quasiday
)
elif end_of_period == "M" or "d" not in fmt:
date = pd.tseries.frequencies.to_offset("M").rollforward(date) + quasiday
elif end_of_period in ["M", "ME"] or "d" not in fmt:
date = pd.tseries.frequencies.to_offset("ME").rollforward(date) + quasiday
# TODO: Implement subdaily ?

if out_dtype == "str":
Expand Down Expand Up @@ -718,6 +718,9 @@ def change_units(ds: xr.Dataset, variables_and_units: dict) -> xr.Dataset:
raise NotImplementedError(
f"No known transformation between {ds[v].units} and {variables_and_units[v]} (temporal dimensionality mismatch)."
)
elif (v in ds) and (ds[v].units != variables_and_units[v]):
# update unit name if physical units are equal but not their name (ex. degC vs °C)
ds[v] = ds[v].assign_attrs(units=variables_and_units[v])

return ds

Expand Down

0 comments on commit 3aac41c

Please sign in to comment.