Skip to content

Commit

Permalink
WIP - remove GL01 exception, small fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
Zeitsperre committed Nov 25, 2024
1 parent 04caf25 commit 5205801
Show file tree
Hide file tree
Showing 4 changed files with 142 additions and 81 deletions.
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,6 @@ checks = [
"all", # report on all checks, except the below
"ES01", # "No extended summary found"
"EX01", # "No examples section found"
"GL01", # "Docstring text (summary) should start in the line immediately after the opening quotes (not in the same line, or leaving a blank line in between)"
"GL06", # "Found unknown section \"{section}\""
"SA01", # "See Also section not found",
"SS01" # "No summary found"
Expand Down
30 changes: 20 additions & 10 deletions xclim/analog.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ def spatial_analogs(
method: str = "kldiv",
**kwargs,
):
r"""Compute dissimilarity statistics between target points and candidate points.
r"""
Compute dissimilarity statistics between target points and candidate points.
Spatial analogues based on the comparison of climate indices. The algorithm compares
the distribution of the reference indices with the distribution of spatially
Expand Down Expand Up @@ -111,7 +112,8 @@ def spatial_analogs(


def standardize(x: np.ndarray, y: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Standardize x and y by the square root of the product of their standard deviation.
"""
Standardize x and y by the square root of the product of their standard deviation.
Parameters
----------
Expand All @@ -130,7 +132,8 @@ def standardize(x: np.ndarray, y: np.ndarray) -> tuple[np.ndarray, np.ndarray]:


def metric(func: Callable):
"""Register a metric function in the `metrics` mapping and add some preparation/checking code.
"""
Register a metric function in the `metrics` mapping and add some preparation/checking code.
Parameters
----------
Expand Down Expand Up @@ -178,7 +181,8 @@ def _metric_overhead(x, y, **kwargs):

@metric
def seuclidean(x: np.ndarray, y: np.ndarray) -> float:
"""Compute the Euclidean distance between the mean of a multivariate candidate sample with respect to the mean of a reference sample.
"""
Compute the Euclidean distance between the mean of a multivariate candidate sample with respect to the mean of a reference sample.
This method is scale-invariant.
Expand Down Expand Up @@ -212,7 +216,8 @@ def seuclidean(x: np.ndarray, y: np.ndarray) -> float:

@metric
def nearest_neighbor(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Compute a dissimilarity metric based on the number of points in the pooled sample whose nearest neighbor belongs to the same distribution.
"""
Compute a dissimilarity metric based on the number of points in the pooled sample whose nearest neighbor belongs to the same distribution.
This method is scale-invariant.
Expand Down Expand Up @@ -249,7 +254,8 @@ def nearest_neighbor(x: np.ndarray, y: np.ndarray) -> np.ndarray:

@metric
def zech_aslan(x: np.ndarray, y: np.ndarray, *, dmin: float = 1e-12) -> float:
r"""Compute a modified Zech-Aslan energy distance dissimilarity metric based on an analogy with the energy of a cloud of electrical charges.
r"""
Compute a modified Zech-Aslan energy distance dissimilarity metric based on an analogy with the energy of a cloud of electrical charges.
This method is scale-invariant.
Expand Down Expand Up @@ -382,7 +388,8 @@ def szekely_rizzo(x: np.ndarray, y: np.ndarray, *, standardize: bool = True) ->

@metric
def friedman_rafsky(x: np.ndarray, y: np.ndarray) -> float:
"""Compute a dissimilarity metric based on the Friedman-Rafsky runs statistics.
"""
Compute a dissimilarity metric based on the Friedman-Rafsky runs statistics.
The algorithm builds a minimal spanning tree (the subset of edges connecting all points that minimizes the total
edge length) then counts the edges linking points from the same distribution. This method is scale-dependent.
Expand Down Expand Up @@ -426,7 +433,8 @@ def friedman_rafsky(x: np.ndarray, y: np.ndarray) -> float:

@metric
def kolmogorov_smirnov(x: np.ndarray, y: np.ndarray) -> float:
"""Compute the Kolmogorov-Smirnov statistic applied to two multivariate samples as described by Fasano and Franceschini.
"""
Compute the Kolmogorov-Smirnov statistic applied to two multivariate samples as described by Fasano and Franceschini.
This method is scale-dependent.
Expand All @@ -448,7 +456,8 @@ def kolmogorov_smirnov(x: np.ndarray, y: np.ndarray) -> float:
"""

def pivot(_x: np.ndarray, _y: np.ndarray) -> float:
"""Pivot function to compute the KS statistic.
"""
Pivot function to compute the KS statistic.
Parameters
----------
Expand Down Expand Up @@ -491,7 +500,8 @@ def pivot(_x: np.ndarray, _y: np.ndarray) -> float:
def kldiv(
x: np.ndarray, y: np.ndarray, *, k: int | Sequence[int] = 1
) -> float | Sequence[float]:
r"""Compute the Kullback-Leibler divergence between two multivariate samples.
r"""
Compute the Kullback-Leibler divergence between two multivariate samples.
The formula to compute the K-L divergence from samples is given by:
Expand Down
62 changes: 41 additions & 21 deletions xclim/core/formatting.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,16 @@


class AttrFormatter(string.Formatter):
"""A formatter for frequently used attribute values.
"""
A formatter for frequently used attribute values.
Parameters
----------
mapping : dict[str, Sequence[str]]
mapping : dict of str, sequence of str
A mapping from values to their possible variations.
modifiers : Sequence[str]
The list of modifiers, must be the as long as the longest value of `mapping`.
modifiers : sequence of str
The list of modifiers.
Must at least match the length of the longest value of `mapping`.
Cannot include reserved modifier 'r'.
Notes
Expand All @@ -58,14 +60,16 @@ def __init__(
mapping: dict[str, Sequence[str]],
modifiers: Sequence[str],
) -> None:
"""Initialize the formatter.
"""
Initialize the formatter.
Parameters
----------
mapping : dict[str, Sequence[str]]
A mapping from values to their possible variations.
modifiers : Sequence[str]
The list of modifiers, must be the as long as the longest value of `mapping`.
The list of modifiers.
Must at least match the length of the longest value of `mapping`.
Cannot include reserved modifier 'r'.
"""
super().__init__()
Expand All @@ -75,7 +79,8 @@ def __init__(
self.mapping = mapping

def format(self, format_string: str, /, *args: Any, **kwargs: dict) -> str:
r"""Format a string.
r"""
Format a string.
Parameters
----------
Expand All @@ -97,7 +102,8 @@ def format(self, format_string: str, /, *args: Any, **kwargs: dict) -> str:
return super().format(format_string, *args, **kwargs)

def format_field(self, value, format_spec: str) -> str:
"""Format a value given a formatting spec.
"""
Format a value given a formatting spec.
If `format_spec` is in this Formatter's modifiers, the corresponding variation
of value is given. If `format_spec` is 'r' (raw), the value is returned unmodified.
Expand Down Expand Up @@ -232,8 +238,9 @@ def _match_value(self, value):
)


def parse_doc(doc: str) -> dict[str, str]:
"""Crude regex parsing reading an indice docstring and extracting information needed in indicator construction.
def parse_doc(doc: str) -> dict:
"""
Crude regex parsing reading an indice docstring and extracting information needed in indicator construction.
The appropriate docstring syntax is detailed in :ref:`notebooks/extendxclim:Defining new indices`.
Expand Down Expand Up @@ -279,7 +286,8 @@ def parse_doc(doc: str) -> dict[str, str]:


def _parse_parameters(section):
"""Parse the 'parameters' section of a docstring into a dictionary.
"""
Parse the 'parameters' section of a docstring into a dictionary.
Works by mapping the parameter name to its description and, potentially, to its set of choices.
The type annotation are not parsed, except for fixed sets of values (listed as "{'a', 'b', 'c'}").
Expand Down Expand Up @@ -335,7 +343,8 @@ def merge_attributes(
missing_str: str | None = None,
**inputs_kws: xr.DataArray | xr.Dataset,
) -> str:
r"""Merge attributes from several DataArrays or Datasets.
r"""
Merge attributes from several DataArrays or Datasets.
If more than one input is given, its name (if available) is prepended as: "<input name> : <input attribute>".
Expand Down Expand Up @@ -387,7 +396,8 @@ def update_history(
new_name: str | None = None,
**inputs_kws: xr.DataArray | xr.Dataset,
) -> str:
r"""Return a history string with the timestamped message and the combination of the history of all inputs.
r"""
Return a history string with the timestamped message and the combination of the history of all inputs.
The new history entry is formatted as "[<timestamp>] <new_name>: <hist_str> - xclim version: <xclim.__version__>."
Expand Down Expand Up @@ -434,7 +444,8 @@ def update_history(


def update_xclim_history(func: Callable) -> Callable:
"""Decorator that auto-generates and fills the history attribute.
"""
Decorator that auto-generates and fills the history attribute.
The history is generated from the signature of the function and added to the first output.
Because of a limitation of the `boltons` wrapper, all arguments passed to the wrapped function
Expand Down Expand Up @@ -487,7 +498,8 @@ def _call_and_add_history(*args, **kwargs):


def gen_call_string(funcname: str, *args, **kwargs) -> str:
r"""Generate a signature string for use in the history attribute.
r"""
Generate a signature string for use in the history attribute.
DataArrays and Dataset are replaced with their name, while Nones, floats, ints and strings are printed directly.
All other objects have their type printed between < >.
Expand Down Expand Up @@ -536,7 +548,8 @@ def gen_call_string(funcname: str, *args, **kwargs) -> str:


def prefix_attrs(source: dict, keys: Sequence, prefix: str) -> dict:
"""Rename some keys of a dictionary by adding a prefix.
"""
Rename some keys of a dictionary by adding a prefix.
Parameters
----------
Expand All @@ -562,7 +575,8 @@ def prefix_attrs(source: dict, keys: Sequence, prefix: str) -> dict:


def unprefix_attrs(source: dict, keys: Sequence, prefix: str) -> dict:
"""Remove prefix from keys in a dictionary.
"""
Remove prefix from keys in a dictionary.
Parameters
----------
Expand Down Expand Up @@ -610,7 +624,8 @@ def unprefix_attrs(source: dict, keys: Sequence, prefix: str) -> dict:
def _gen_parameters_section(
parameters: dict[str, dict[str, Any]], allowed_periods: list[str] | None = None
) -> str:
"""Generate the "parameters" section of the indicator docstring.
"""
Generate the "parameters" section of the indicator docstring.
Parameters
----------
Expand All @@ -622,6 +637,7 @@ def _gen_parameters_section(
Returns
-------
str
The formatted section.
"""
section = "Parameters\n----------\n"
for name, param in parameters.items():
Expand Down Expand Up @@ -656,7 +672,8 @@ def _gen_parameters_section(


def _gen_returns_section(cf_attrs: Sequence[dict[str, Any]]) -> str:
"""Generate the "Returns" section of an indicator's docstring.
"""
Generate the "Returns" section of an indicator's docstring.
Parameters
----------
Expand All @@ -666,6 +683,7 @@ def _gen_returns_section(cf_attrs: Sequence[dict[str, Any]]) -> str:
Returns
-------
str
The formatted section.
"""
section = "Returns\n-------\n"
for attrs in cf_attrs:
Expand All @@ -690,7 +708,8 @@ def _gen_returns_section(cf_attrs: Sequence[dict[str, Any]]) -> str:


def generate_indicator_docstring(ind) -> str:
"""Generate an indicator's docstring from keywords.
"""
Generate an indicator's docstring from keywords.
Parameters
----------
Expand Down Expand Up @@ -735,7 +754,8 @@ def generate_indicator_docstring(ind) -> str:


def get_percentile_metadata(data: xr.DataArray, prefix: str) -> dict[str, str]:
"""Get the metadata related to percentiles from the given DataArray as a dictionary.
"""
Get the metadata related to percentiles from the given DataArray as a dictionary.
Parameters
----------
Expand Down
Loading

0 comments on commit 5205801

Please sign in to comment.