Skip to content

Commit

Permalink
add Optional
Browse files Browse the repository at this point in the history
  • Loading branch information
RondeauG committed Nov 6, 2023
1 parent f643ea8 commit 906f44b
Show file tree
Hide file tree
Showing 15 changed files with 177 additions and 164 deletions.
26 changes: 13 additions & 13 deletions xscen/aggregate.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,10 @@ def _(s):
def climatological_mean(
ds: xr.Dataset,
*,
window: int = None,
min_periods: int = None,
window: Optional[int] = None,
min_periods: Optional[int] = None,
interval: int = 1,
periods: Union[list[str], list[list[str]]] = None,
periods: Optional[Union[list[str], list[list[str]]]] = None,
to_level: Optional[str] = "climatology",
) -> xr.Dataset:
"""Compute the mean over 'year' for given time periods, respecting the temporal resolution of ds.
Expand Down Expand Up @@ -372,13 +372,13 @@ def spatial_mean(
ds: xr.Dataset,
method: str,
*,
spatial_subset: bool = None,
call_clisops: bool = False,
region: Union[dict, str] = None,
kwargs: dict = None,
simplify_tolerance: float = None,
to_domain: str = None,
to_level: str = None,
spatial_subset: Optional[bool] = None,
call_clisops: Optional[bool] = False,
region: Optional[Union[dict, str]] = None,
kwargs: Optional[dict] = None,
simplify_tolerance: Optional[float] = None,
to_domain: Optional[str] = None,
to_level: Optional[str] = None,
) -> xr.Dataset:
"""Compute the spatial mean using a variety of available methods.
Expand Down Expand Up @@ -703,10 +703,10 @@ def produce_horizon(
ModuleType,
],
*,
periods: Union[list[str], list[list[str]]] = None,
warminglevels: dict = None,
periods: Optional[Union[list[str], list[list[str]]]] = None,
warminglevels: Optional[dict] = None,
to_level: Optional[str] = "horizons",
period: list = None,
period: Optional[list] = None,
) -> xr.Dataset:
"""Compute indicators, then the climatological mean, and finally unstack dates in order to have a single dataset with all indicators of different frequencies.
Expand Down
18 changes: 9 additions & 9 deletions xscen/biasadjust.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,12 +60,12 @@ def train(
period: list[str],
*,
method: str = "DetrendedQuantileMapping",
group: Union[sdba.Grouper, str, dict] = None,
xclim_train_args: dict = None,
group: Optional[Union[sdba.Grouper, str, dict]] = None,
xclim_train_args: Optional[dict] = None,
maximal_calendar: str = "noleap",
adapt_freq: dict = None,
jitter_under: dict = None,
jitter_over: dict = None,
adapt_freq: Optional[dict] = None,
jitter_under: Optional[dict] = None,
jitter_over: Optional[dict] = None,
align_on: Optional[str] = "year",
) -> xr.Dataset:
"""
Expand Down Expand Up @@ -191,11 +191,11 @@ def adjust(
dsim: xr.Dataset,
periods: Union[list[str], list[list[str]]],
*,
xclim_adjust_args: dict = None,
xclim_adjust_args: Optional[dict] = None,
to_level: str = "biasadjusted",
bias_adjust_institution: str = None,
bias_adjust_project: str = None,
moving_yearly_window: dict = None,
bias_adjust_institution: Optional[str] = None,
bias_adjust_project: Optional[str] = None,
moving_yearly_window: Optional[dict] = None,
align_on: Optional[str] = "year",
) -> xr.Dataset:
"""
Expand Down
34 changes: 18 additions & 16 deletions xscen/catalog.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,9 +191,9 @@ def __init__(
def from_df(
cls,
data: Union[pd.DataFrame, os.PathLike, Sequence[os.PathLike]],
esmdata: Union[os.PathLike, dict] = None,
esmdata: Optional[Union[os.PathLike, dict]] = None,
*,
read_csv_kwargs: Mapping[str, Any] = None,
read_csv_kwargs: Optional[Mapping[str, Any]] = None,
name: str = "virtual",
**intake_kwargs,
):
Expand Down Expand Up @@ -255,7 +255,7 @@ def _find_unique(series):
else:
return data.apply(_find_unique, result_type="reduce").to_dict()

def unique(self, columns: Union[str, Sequence[str]] = None):
def unique(self, columns: Optional[Union[str, Sequence[str]]] = None):
"""Return a series of unique values in the catalog.
Parameters
Expand Down Expand Up @@ -396,7 +396,7 @@ def exists_in_cat(self, **columns) -> bool:

def to_dataset(
self,
concat_on: Union[list[str], str] = None,
concat_on: Optional[Union[list[str], str]] = None,
create_ensemble_on: Optional[Union[list[str], str]] = None,
calendar: Optional[str] = "standard",
**kwargs,
Expand Down Expand Up @@ -528,7 +528,7 @@ def create(
cls,
filename: Union[os.PathLike, str],
*,
project: dict = None,
project: Optional[dict] = None,
overwrite: bool = False,
):
r"""Create a new project catalog from some project metadata.
Expand Down Expand Up @@ -609,8 +609,8 @@ def __init__(
df: Union[str, dict],
*args,
create: bool = False,
overwrite: bool = None,
project: dict = None,
overwrite: bool = False,
project: Optional[dict] = None,
**kwargs,
):
"""Open or create a project catalog.
Expand All @@ -622,7 +622,7 @@ def __init__(
If dict, this must be a dict representation of an ESM catalog. See the notes below.
create : bool
If True, and if 'df' is a string, this will create an empty ProjectCatalog if none already exists.
overwrite : bool, optional
overwrite : bool
If this and 'create' are True, this will overwrite any existing JSON and CSV file with an empty catalog.
project : dict, optional
Metadata to create the catalog, if required.
Expand All @@ -647,12 +647,14 @@ def __init__(
# TODO: Implement a way to easily destroy part of the catalog to "reset" some steps
def update(
self,
df: Union[
DataCatalog,
intake_esm.esm_datastore,
pd.DataFrame,
pd.Series,
Sequence[pd.Series],
df: Optional[
Union[
DataCatalog,
intake_esm.esm_datastore,
pd.DataFrame,
pd.Series,
Sequence[pd.Series],
]
] = None,
):
"""Update the catalog with new data and writes the new data to the csv file.
Expand Down Expand Up @@ -736,7 +738,7 @@ def update_from_ds(
self,
ds: xarray.Dataset,
path: Union[os.PathLike, str],
info_dict: dict = None,
info_dict: Optional[dict] = None,
**info_kwargs,
):
"""Update the catalog with new data and writes the new data to the csv file.
Expand Down Expand Up @@ -852,7 +854,7 @@ def _build_id(element: pd.Series, columns: list[str]):


def generate_id(
df: Union[pd.DataFrame, xr.Dataset], id_columns: list = None
df: Union[pd.DataFrame, xr.Dataset], id_columns: Optional[list] = None
) -> pd.Series:
"""Create an ID from column entries.
Expand Down
38 changes: 19 additions & 19 deletions xscen/catutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from functools import partial, reduce
from multiprocessing import Pool
from pathlib import Path
from typing import Any, Union
from typing import Any, Optional, Union

import cftime
import netCDF4
Expand Down Expand Up @@ -109,7 +109,7 @@ def _find_assets(
root: Union[str, os.PathLike],
exts: set[str],
lengths: set[int],
dirglob: str = None,
dirglob: Optional[str] = None,
):
"""Walk recursively over files in a directory, filtering according to a glob pattern, path depth and extensions.
Expand Down Expand Up @@ -180,9 +180,9 @@ def _name_parser(
path: Union[os.PathLike, str],
root: Union[os.PathLike, str],
patterns: list[Union[str, parse.Parser]],
read_from_file: Union[list[str], dict] = None,
attrs_map: dict = None,
xr_open_kwargs: dict = None,
read_from_file: Optional[Union[list[str], dict]] = None,
attrs_map: Optional[dict] = None,
xr_open_kwargs: Optional[dict] = None,
) -> Union[dict, None]:
"""Extract metadata information from the file path.
Expand Down Expand Up @@ -255,11 +255,11 @@ def _name_parser(
def _parse_dir(
root: Union[os.PathLike, str],
patterns: list[str],
dirglob: str = None,
checks: list[str] = None,
read_from_file: Union[list[str], dict] = None,
attrs_map: dict = None,
xr_open_kwargs: dict = None,
dirglob: Optional[str] = None,
checks: Optional[list[str]] = None,
read_from_file: Optional[Union[list[str], dict]] = None,
attrs_map: Optional[dict] = None,
xr_open_kwargs: Optional[dict] = None,
progress: bool = False,
):
"""Iterate and parses files in a directory, filtering according to basic pattern properties and optional checks.
Expand Down Expand Up @@ -436,21 +436,21 @@ def parse_directory(
directories: list[Union[str, os.PathLike]],
patterns: list[str],
*,
id_columns: list[str] = None,
id_columns: Optional[list[str]] = None,
read_from_file: Union[
bool,
Sequence[str],
tuple[Sequence[str], Sequence[str]],
Sequence[tuple[Sequence[str], Sequence[str]]],
] = False,
homogenous_info: dict = None,
cvs: Union[str, os.PathLike, dict] = None,
dirglob: str = None,
xr_open_kwargs: Mapping[str, Any] = None,
homogenous_info: Optional[dict] = None,
cvs: Optional[Union[str, os.PathLike, dict]] = None,
dirglob: Optional[str] = None,
xr_open_kwargs: Optional[Mapping[str, Any]] = None,
only_official_columns: bool = True,
progress: bool = False,
parallel_dirs: Union[bool, int] = False,
file_checks: list[str] = None,
file_checks: Optional[list[str]] = None,
) -> pd.DataFrame:
r"""Parse files in a directory and return them as a pd.DataFrame.
Expand Down Expand Up @@ -698,7 +698,7 @@ def parse_directory(
def parse_from_ds(
obj: Union[str, os.PathLike, xr.Dataset],
names: Sequence[str],
attrs_map: Mapping[str, str] = None,
attrs_map: Optional[Mapping[str, str]] = None,
**xrkwargs,
):
"""Parse a list of catalog fields from the file/dataset itself.
Expand Down Expand Up @@ -1094,8 +1094,8 @@ def _build_path(
@parse_config
def build_path(
data: Union[dict, xr.Dataset, xr.DataArray, pd.Series, DataCatalog, pd.DataFrame],
schemas: Union[str, os.PathLike, dict] = None,
root: Union[str, os.PathLike] = None,
schemas: Optional[Union[str, os.PathLike, dict]] = None,
root: Optional[Union[str, os.PathLike]] = None,
**extra_facets,
) -> Union[Path, DataCatalog, pd.DataFrame]:
r"""Parse the schema from a configuration and construct path using a dictionary of facets.
Expand Down
32 changes: 16 additions & 16 deletions xscen/diagnostics.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from copy import deepcopy
from pathlib import Path
from types import ModuleType
from typing import Union
from typing import Optional, Union

import numpy as np
import xarray as xr
Expand Down Expand Up @@ -45,18 +45,18 @@ def _(s):
def health_checks(
ds: Union[xr.Dataset, xr.DataArray],
*,
structure: dict = None,
calendar: str = None,
start_date: str = None,
end_date: str = None,
variables_and_units: dict = None,
cfchecks: dict = None,
freq: str = None,
missing: Union[dict, str, list] = None,
flags: dict = None,
flags_kwargs: dict = None,
structure: Optional[dict] = None,
calendar: Optional[str] = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
variables_and_units: Optional[dict] = None,
cfchecks: Optional[dict] = None,
freq: Optional[str] = None,
missing: Optional[Union[dict, str, list]] = None,
flags: Optional[dict] = None,
flags_kwargs: Optional[dict] = None,
return_flags: bool = False,
raise_on: list = None,
raise_on: Optional[list] = None,
) -> Union[None, xr.Dataset]:
"""
Perform a series of health checks on the dataset. Be aware that missing data checks and flag checks can be slow.
Expand Down Expand Up @@ -299,11 +299,11 @@ def properties_and_measures(
Sequence[tuple[str, Indicator]],
ModuleType,
],
period: list[str] = None,
period: Optional[list[str]] = None,
unstack: bool = False,
rechunk: dict = None,
dref_for_measure: xr.Dataset = None,
change_units_arg: dict = None,
rechunk: Optional[dict] = None,
dref_for_measure: Optional[xr.Dataset] = None,
change_units_arg: Optional[dict] = None,
to_level_prop: str = "diag-properties",
to_level_meas: str = "diag-measures",
) -> tuple[xr.Dataset, xr.Dataset]:
Expand Down
10 changes: 5 additions & 5 deletions xscen/ensembles.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from copy import deepcopy
from itertools import chain, groupby
from pathlib import Path
from typing import Union
from typing import Optional, Union

import numpy as np
import xarray as xr
Expand All @@ -27,8 +27,8 @@ def ensemble_stats(
],
statistics: dict,
*,
create_kwargs: dict = None,
weights: xr.DataArray = None,
create_kwargs: Optional[dict] = None,
weights: Optional[xr.DataArray] = None,
common_attrs_only: bool = True,
to_level: str = "ensemble",
) -> xr.Dataset:
Expand Down Expand Up @@ -146,9 +146,9 @@ def generate_weights(
*,
independence_level: str = "model",
balance_experiments: bool = False,
attribute_weights: dict = None,
attribute_weights: Optional[dict] = None,
skipna: bool = True,
v_for_skipna: str = None,
v_for_skipna: Optional[str] = None,
standardize: bool = False,
experiment_weights: bool = False,
) -> xr.DataArray:
Expand Down
Loading

0 comments on commit 906f44b

Please sign in to comment.