Skip to content

Commit

Permalink
help precommit
Browse files Browse the repository at this point in the history
  • Loading branch information
RondeauG committed Sep 18, 2024
1 parent 77d5520 commit 101837c
Show file tree
Hide file tree
Showing 16 changed files with 360 additions and 374 deletions.
60 changes: 30 additions & 30 deletions src/xscen/aggregate.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,11 @@ def _(s):
def climatological_mean(
ds: xr.Dataset,
*,
window: Optional[int] = None,
min_periods: Optional[int] = None,
window: int | None = None,
min_periods: int | None = None,
interval: int = 1,
periods: Optional[Union[list[str], list[list[str]]]] = None,
to_level: Optional[str] = "climatology",
periods: list[str] | list[list[str]] | None = None,
to_level: str | None = "climatology",
) -> xr.Dataset:
"""Compute the mean over 'year' for given time periods, respecting the temporal resolution of ds.
Expand Down Expand Up @@ -109,11 +109,11 @@ def climatological_mean(
def climatological_op( # noqa: C901
ds: xr.Dataset,
*,
op: Union[str, dict] = "mean",
window: Optional[int] = None,
min_periods: Optional[Union[int, float]] = None,
op: str | dict = "mean",
window: int | None = None,
min_periods: int | float | None = None,
stride: int = 1,
periods: Optional[Union[list[str], list[list[str]]]] = None,
periods: list[str] | list[list[str]] | None = None,
rename_variables: bool = True,
to_level: str = "climatology",
horizons_as_dim: bool = False,
Expand Down Expand Up @@ -506,11 +506,11 @@ def _ulinregress(x, y, **kwargs):
@parse_config
def compute_deltas( # noqa: C901
ds: xr.Dataset,
reference_horizon: Union[str, xr.Dataset],
reference_horizon: str | xr.Dataset,
*,
kind: Union[str, dict] = "+",
kind: str | dict = "+",
rename_variables: bool = True,
to_level: Optional[str] = "deltas",
to_level: str | None = "deltas",
) -> xr.Dataset:
"""Compute deltas in comparison to a reference time period, respecting the temporal resolution of ds.
Expand Down Expand Up @@ -702,13 +702,13 @@ def spatial_mean( # noqa: C901
ds: xr.Dataset,
method: str,
*,
spatial_subset: Optional[bool] = None,
call_clisops: Optional[bool] = False,
region: Optional[Union[dict, str]] = None,
kwargs: Optional[dict] = None,
simplify_tolerance: Optional[float] = None,
to_domain: Optional[str] = None,
to_level: Optional[str] = None,
spatial_subset: bool | None = None,
call_clisops: bool | None = False,
region: dict | str | None = None,
kwargs: dict | None = None,
simplify_tolerance: float | None = None,
to_domain: str | None = None,
to_level: str | None = None,
) -> xr.Dataset:
"""Compute the spatial mean using a variety of available methods.
Expand Down Expand Up @@ -1034,18 +1034,18 @@ def spatial_mean( # noqa: C901
@parse_config
def produce_horizon( # noqa: C901
ds: xr.Dataset,
indicators: Union[
str,
os.PathLike,
Sequence[Indicator],
Sequence[tuple[str, Indicator]],
ModuleType,
],
indicators: (
str
| os.PathLike
| Sequence[Indicator]
| Sequence[tuple[str, Indicator]]
| ModuleType
),
*,
periods: Optional[Union[list[str], list[list[str]]]] = None,
warminglevels: Optional[dict] = None,
to_level: Optional[str] = "horizons",
period: Optional[list] = None,
periods: list[str] | list[list[str]] | None = None,
warminglevels: dict | None = None,
to_level: str | None = "horizons",
period: list | None = None,
) -> xr.Dataset:
"""
Compute indicators, then the climatological mean, and finally unstack dates in order
Expand Down Expand Up @@ -1095,7 +1095,7 @@ def produce_horizon( # noqa: C901
if periods is not None:
all_periods.extend(standardize_periods(periods))
if warminglevels is not None:
if isinstance(warminglevels["wl"], (int, float)):
if isinstance(warminglevels["wl"], int | float):
all_periods.append(warminglevels)
elif isinstance(warminglevels["wl"], list):
template = deepcopy(warminglevels)
Expand Down
24 changes: 12 additions & 12 deletions src/xscen/biasadjust.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,17 +58,17 @@ def _add_preprocessing_attr(scen, train_kwargs):
def train(
dref: xr.Dataset,
dhist: xr.Dataset,
var: Union[str, list[str]],
var: str | list[str],
period: list[str],
*,
method: str = "DetrendedQuantileMapping",
group: Optional[Union[sdba.Grouper, str, dict]] = None,
xclim_train_args: Optional[dict] = None,
group: sdba.Grouper | str | dict | None = None,
xclim_train_args: dict | None = None,
maximal_calendar: str = "noleap",
adapt_freq: Optional[dict] = None,
jitter_under: Optional[dict] = None,
jitter_over: Optional[dict] = None,
align_on: Optional[str] = "year",
adapt_freq: dict | None = None,
jitter_under: dict | None = None,
jitter_over: dict | None = None,
align_on: str | None = "year",
) -> xr.Dataset:
"""
Train a bias-adjustment.
Expand Down Expand Up @@ -194,13 +194,13 @@ def train(
def adjust(
dtrain: xr.Dataset,
dsim: xr.Dataset,
periods: Union[list[str], list[list[str]]],
periods: list[str] | list[list[str]],
*,
xclim_adjust_args: Optional[dict] = None,
xclim_adjust_args: dict | None = None,
to_level: str = "biasadjusted",
bias_adjust_institution: Optional[str] = None,
bias_adjust_project: Optional[str] = None,
align_on: Optional[str] = "year",
bias_adjust_institution: str | None = None,
bias_adjust_project: str | None = None,
align_on: str | None = "year",
) -> xr.Dataset:
"""
Adjust a simulation.
Expand Down
58 changes: 28 additions & 30 deletions src/xscen/catalog.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,10 +198,10 @@ def __init__(
@classmethod
def from_df(
cls,
data: Union[pd.DataFrame, os.PathLike, Sequence[os.PathLike]],
esmdata: Optional[Union[os.PathLike, dict]] = None,
data: pd.DataFrame | os.PathLike | Sequence[os.PathLike],
esmdata: os.PathLike | dict | None = None,
*,
read_csv_kwargs: Optional[Mapping[str, Any]] = None,
read_csv_kwargs: Mapping[str, Any] | None = None,
name: str = "virtual",
**intake_kwargs,
):
Expand Down Expand Up @@ -263,7 +263,7 @@ def _find_unique(series):
else:
return data.apply(_find_unique, result_type="reduce").to_dict()

def unique(self, columns: Optional[Union[str, Sequence[str]]] = None):
def unique(self, columns: str | Sequence[str] | None = None):
"""Return a series of unique values in the catalog.
Parameters
Expand Down Expand Up @@ -309,7 +309,7 @@ def search(self, **columns):
)
return cat

def drop_duplicates(self, columns: Optional[list[str]] = None):
def drop_duplicates(self, columns: list[str] | None = None):
"""Drop duplicates in the catalog based on a subset of columns.
Parameters
Expand Down Expand Up @@ -404,10 +404,10 @@ def exists_in_cat(self, **columns) -> bool:

def to_dataset(
self,
concat_on: Optional[Union[list[str], str]] = None,
create_ensemble_on: Optional[Union[list[str], str]] = None,
ensemble_name: Optional[Union[list[str]]] = None,
calendar: Optional[str] = "standard",
concat_on: list[str] | str | None = None,
create_ensemble_on: list[str] | str | None = None,
ensemble_name: list[str] | None = None,
calendar: str | None = "standard",
**kwargs,
) -> xr.Dataset:
"""
Expand Down Expand Up @@ -538,7 +538,7 @@ def preprocess(ds):

def copy_files(
self,
dest: Union[str, os.PathLike],
dest: str | os.PathLike,
flat: bool = True,
unzip: bool = False,
zipzarr: bool = False,
Expand Down Expand Up @@ -636,9 +636,9 @@ class ProjectCatalog(DataCatalog):
@classmethod
def create(
cls,
filename: Union[os.PathLike, str],
filename: os.PathLike | str,
*,
project: Optional[dict] = None,
project: dict | None = None,
overwrite: bool = False,
):
r"""Create a new project catalog from some project metadata.
Expand Down Expand Up @@ -716,11 +716,11 @@ def create(

def __init__(
self,
df: Union[str, dict],
df: str | dict,
*args,
create: bool = False,
overwrite: bool = False,
project: Optional[dict] = None,
project: dict | None = None,
**kwargs,
):
"""
Expand All @@ -746,7 +746,7 @@ def __init__(
The ‘df’ key must be a Pandas DataFrame containing content that would otherwise be in the CSV file.
"""
if create:
if isinstance(df, (str, Path)) and (not Path(df).is_file() or overwrite):
if isinstance(df, str | Path) and (not Path(df).is_file() or overwrite):
self.create(df, project=project, overwrite=overwrite)
super().__init__(df, *args, **kwargs)
self.check_valid()
Expand All @@ -756,15 +756,13 @@ def __init__(
# TODO: Implement a way to easily destroy part of the catalog to "reset" some steps
def update(
self,
df: Optional[
Union[
DataCatalog,
intake_esm.esm_datastore,
pd.DataFrame,
pd.Series,
Sequence[pd.Series],
]
] = None,
df: None | (
DataCatalog
| intake_esm.esm_datastore
| pd.DataFrame
| pd.Series
| Sequence[pd.Series]
) = None,
):
"""Update the catalog with new data and writes the new data to the csv file.
Expand Down Expand Up @@ -846,8 +844,8 @@ def update(
def update_from_ds(
self,
ds: xr.Dataset,
path: Union[os.PathLike, str],
info_dict: Optional[dict] = None,
path: os.PathLike | str,
info_dict: dict | None = None,
**info_kwargs,
):
"""Update the catalog with new data and writes the new data to the csv file.
Expand Down Expand Up @@ -965,7 +963,7 @@ def _build_id(element: pd.Series, columns: list[str]):


def generate_id(
df: Union[pd.DataFrame, xr.Dataset], id_columns: Optional[list] = None
df: pd.DataFrame | xr.Dataset, id_columns: list | None = None
) -> pd.Series:
"""Create an ID from column entries.
Expand Down Expand Up @@ -996,7 +994,7 @@ def generate_id(
return df.apply(_build_id, axis=1, args=(id_columns,))


def unstack_id(df: Union[pd.DataFrame, ProjectCatalog, DataCatalog]) -> dict:
def unstack_id(df: pd.DataFrame | ProjectCatalog | DataCatalog) -> dict:
"""Reverse-engineer an ID using catalog entries.
Parameters
Expand All @@ -1009,7 +1007,7 @@ def unstack_id(df: Union[pd.DataFrame, ProjectCatalog, DataCatalog]) -> dict:
dict
Dictionary with one entry per unique ID, which are themselves dictionaries of all the individual parts of the ID.
"""
if isinstance(df, (ProjectCatalog, DataCatalog)):
if isinstance(df, ProjectCatalog | DataCatalog):
df = df.df

out = {}
Expand Down Expand Up @@ -1038,7 +1036,7 @@ def unstack_id(df: Union[pd.DataFrame, ProjectCatalog, DataCatalog]) -> dict:

def subset_file_coverage(
df: pd.DataFrame,
periods: Union[list[str], list[list[str]]],
periods: list[str] | list[list[str]],
*,
coverage: float = 0.99,
duplicates_ok: bool = False,
Expand Down
Loading

0 comments on commit 101837c

Please sign in to comment.