diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bf52a2687..955a244a6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,7 +40,7 @@ repos: hooks: - id: isort - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.9 + rev: v0.1.11 hooks: - id: ruff - repo: https://github.com/pycqa/flake8 @@ -53,22 +53,30 @@ repos: rev: 1.7.1 hooks: - id: nbqa-pyupgrade + additional_dependencies: [ 'pyupgrade==3.15.0' ] args: [ '--py38-plus' ] - id: nbqa-black additional_dependencies: [ 'black==23.12.1' ] - id: nbqa-isort + additional_dependencies: [ 'isort==5.13.2' ] - repo: https://github.com/kynan/nbstripout rev: 0.6.1 hooks: - id: nbstripout files: '.ipynb' - args: [ '--extra-keys', 'metadata.kernelspec' ] + args: [ '--extra-keys=metadata.kernelspec' ] - repo: https://github.com/keewis/blackdoc rev: v0.3.9 hooks: - id: blackdoc additional_dependencies: [ 'black==23.12.1' ] exclude: '(xclim/indices/__init__.py|docs/installation.rst)' +- repo: https://github.com/codespell-project/codespell + rev: v2.2.6 + hooks: + - id: codespell + additional_dependencies: [ 'tomli' ] + args: [ '--toml=pyproject.toml' ] - repo: https://github.com/python-jsonschema/check-jsonschema rev: 0.27.3 hooks: diff --git a/CHANGES.rst b/CHANGES.rst index cc75c6ef9..a670f5556 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -25,6 +25,7 @@ Bug fixes ^^^^^^^^^ * Fixed passing ``missing=0`` to ``xclim.core.calendar.convert_calendar``. (:issue:`1562`, :pull:`1563`). * Fix wrong `window` attributes in ``xclim.indices.standardized_precipitation_index``, ``xclim.indices.standardized_precipitation_evapotranspiration_index``. (:issue:`1552` :pull:`1554`). +* Several spelling mistakes have been corrected within the documentation and codebase. (:pull:`1576`). Internal changes ^^^^^^^^^^^^^^^^ @@ -32,6 +33,7 @@ Internal changes * The `bump-version.yml` workflow has been adjusted to bump the `patch` version when the last version is determined to have been a `release` version; otherwise, the `build` version is bumped. (:issue:`1557`, :pull:`1569`). * The GitHub Workflows now use the `step-security/harden-runner` action to monitor source code, actions, and dependency safety. All workflows now employ more constrained permissions rule sets to prevent security issues. (:pull:`1577`). * Updated the CONTRIBUTING.rst directions to showcase the new versioning system. (:issue:`1557`, :pull:`1573`). +* The `codespell` library is now a development dependency for the `dev` installation recipe with configurations found within `pyproject.toml`. This is also now a linting step and integrated as a `pre-commit` hook. For more information, see the `codespell documentation `_ (:pull:`1576`). v0.47.0 (2023-12-01) @@ -317,7 +319,7 @@ New features and enhancements * ``xclim.core.calendar.yearly_interpolated_doy`` * ``xclim.core.calendar.yearly_random_doy`` * `scipy` is no longer pinned below v1.9 and `lmoments3>=1.0.5` is now a core dependency and installed by default with `pip`. (:issue:`1142`, :pull:`1171`). -* Fix bug on number of bins in ``xclim.sdba.propeties.spatial_correlogram``. (:pull:`1336`) +* Fix bug on number of bins in ``xclim.sdba.properties.spatial_correlogram``. (:pull:`1336`) * Add `resample_before_rl` argument to control when resampling happens in `maximum_consecutive_{frost|frost_free|dry|tx}_days` and in heat indices (in `_threshold`) (:issue:`1329`, :pull:`1331`) * Add ``xclim.ensembles.make_criteria`` to help create inputs for the ensemble-reduction methods. (:issue:`1338`, :pull:`1341`). @@ -1071,7 +1073,7 @@ Bug fixes * Dimensions in a grouper's ``add_dims`` are now taken into consideration in function wrapped with ``map_blocks/groups``. This feature is still not fully tested throughout ``sdba`` though, so use with caution. * Better dtype preservation throughout ``sdba``. * "constant" extrapolation in the quantile mappings' adjustment is now padding values just above and under the target's max and min, instead of ``±np.inf``. -* Fixes in ``sdba.LOCI`` for the case where a grouping with additionnal dimensions is used. +* Fixes in ``sdba.LOCI`` for the case where a grouping with additional dimensions is used. Internal Changes ^^^^^^^^^^^^^^^^ @@ -1139,7 +1141,7 @@ New indicators Internal Changes ^^^^^^^^^^^^^^^^ * ``aggregate_between_dates`` (introduced in v0.27.0) now accepts ``DayOfYear``-like strings for supplying start and end dates (e.g. ``start="02-01", end="10-31"``). -* The indicator call sequence now considers "variable" the inputs annoted so. Dropped the ``nvar`` attribute. +* The indicator call sequence now considers "variable" the inputs annotated so. Dropped the ``nvar`` attribute. * Default cfcheck is now to check metadata according to the variable name, using CMIP6 names in xclim/data/variable.yml. * ``Indicator.missing`` defaults to "skip" if ``freq`` is absent from the list of parameters. * Minor modifications to the GitHub Pull Requests template. @@ -1186,7 +1188,7 @@ New indicators Internal Changes ^^^^^^^^^^^^^^^^ * `run_length.rle_statistics` now accepts a `window` argument. -* Common arguments to the `op` parameter now have better adjective and noun formattings. +* Common arguments to the `op` parameter now have better adjective and noun formatting. * Added and adjusted typing in call signatures and docstrings, with grammar fixes, for many `xclim.indices` operations. * Added internal function ``aggregate_between_dates`` for array aggregation operations using xarray datetime arrays with start and end DayOfYear values. @@ -1422,7 +1424,7 @@ Breaking changes * The python library `pandoc` is no longer listed as a docs build requirement. Documentation still requires a current version of `pandoc` binaries installed at system-level. * ANUCLIM indices have seen their `input_freq` parameter renamed to `src_timestep` for clarity. -* A clean-up and harmonization of the indicators metadata has changed some of the indicator identifiers, long_names, abstracts and titles. `xclim.atmos.drought_code` and `fire_weather_indexes` now have indentifiers "dc" and "fwi" (lowercase version of the previous identifiers). +* A clean-up and harmonization of the indicators metadata has changed some of the indicator identifiers, long_names, abstracts and titles. `xclim.atmos.drought_code` and `fire_weather_indexes` now have identifiers "dc" and "fwi" (lowercase version of the previous identifiers). * `xc.indices.run_length.run_length_with_dates` becomes `xc.indices.run_length.season_length`. Its argument `date` is now optional and the default changes from "07-01" to `None`. * `xc.indices.consecutive_frost_days` becomes `xc.indices.maximum_consecutive_frost_days`. * Changed the `history` indicator output attribute to `xclim_history` in order to respect CF conventions. @@ -1569,7 +1571,7 @@ v0.14.x (2020-02-21) * Refactoring of the documentation. * Added support for pint 0.10 * Add `atmos.heat_wave_total_length` (fixing a namespace issue) -* Fixes in `utils.percentile_doy` and `indices.winter_rain_ratio` for multidimensionnal datasets. +* Fixes in `utils.percentile_doy` and `indices.winter_rain_ratio` for multidimensional datasets. * Rewrote the `subset.subset_shape` function to allow for dask.delayed (lazy) computation. * Added utility functions to compute `time_bnds` when resampling data encoded with `CFTimeIndex` (non-standard calendars). * Fix in `subset.subset_gridpoint` for dask array coordinates. diff --git a/Makefile b/Makefile index 7146a99ed..60b1597bc 100644 --- a/Makefile +++ b/Makefile @@ -60,6 +60,7 @@ lint: ## check style with flake8 and black nbqa black --check docs blackdoc --check --exclude=xclim/indices/__init__.py xclim blackdoc --check docs + codespell xclim tests docs yamllint --config-file=.yamllint.yaml xclim test: ## run tests quickly with the default Python diff --git a/docs/installation.rst b/docs/installation.rst index 722121b85..adb805ea5 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -24,7 +24,7 @@ Anaconda release For ease of installation across operating systems, we also offer an Anaconda Python package hosted on conda-forge. This version tends to be updated at around the same frequency as the PyPI-hosted library, but can lag by a few days at times. -`xclim` can be installed from conda-forge wth the following: +`xclim` can be installed from conda-forge with the following: .. code-block:: shell diff --git a/docs/notebooks/ensembles.ipynb b/docs/notebooks/ensembles.ipynb index 665b60d19..75c57c2eb 100644 --- a/docs/notebooks/ensembles.ipynb +++ b/docs/notebooks/ensembles.ipynb @@ -17,8 +17,6 @@ "\n", "from __future__ import annotations\n", "\n", - "from pathlib import Path\n", - "\n", "import numpy as np\n", "import pandas as pd\n", "import xarray as xr\n", @@ -290,7 +288,7 @@ "\n", "We can then divide the plotted points into categories each with its own hatching pattern, usually leaving the robust data (models agree and enough show a significant change) without hatching. \n", "\n", - "Xclim provides some tools to help in generating these hatching masks. First is [xc.ensembles.robustness_fractions](../apidoc/xclim.ensembles.rst#xclim.ensembles._robustness.robustness_fractions) that can characterize the change significance and sign agreement accross ensemble members. To demonstrate its usage, we'll first generate some fake annual mean temperature data. Here, `ref` is the data on the reference period and `fut` is a future projection. There are 5 different members in the ensemble. We tweaked the generation so that all models agree on significant change in the \"south\" while agreement and signifiance of change decreases as we go north and east." + "Xclim provides some tools to help in generating these hatching masks. First is [xc.ensembles.robustness_fractions](../apidoc/xclim.ensembles.rst#xclim.ensembles._robustness.robustness_fractions) that can characterize the change significance and sign agreement across ensemble members. To demonstrate its usage, we'll first generate some fake annual mean temperature data. Here, `ref` is the data on the reference period and `fut` is a future projection. There are 5 different members in the ensemble. We tweaked the generation so that all models agree on significant change in the \"south\" while agreement and signifiance of change decreases as we go north and east." ] }, { diff --git a/docs/notebooks/partitioning.ipynb b/docs/notebooks/partitioning.ipynb index 01bd60004..77820c140 100644 --- a/docs/notebooks/partitioning.ipynb +++ b/docs/notebooks/partitioning.ipynb @@ -10,7 +10,7 @@ "Here we estimate the sources of uncertainty for an ensemble of climate model projections. The data is the same as used in the [IPCC WGI AR6 Atlas](https://github.com/IPCC-WG1/Atlas). \n", "\n", "## Fetch data\n", - "We'll only fetch a small sample of the full ensemble to illustrate the logic and data structure expected by the partitioning algorith." + "We'll only fetch a small sample of the full ensemble to illustrate the logic and data structure expected by the partitioning algorithm." ] }, { diff --git a/docs/notebooks/sdba-advanced.ipynb b/docs/notebooks/sdba-advanced.ipynb index 3ee096608..5b6caa6e1 100644 --- a/docs/notebooks/sdba-advanced.ipynb +++ b/docs/notebooks/sdba-advanced.ipynb @@ -840,7 +840,7 @@ "ref_prop = sdba.properties.spell_length_distribution(\n", " da=ref_future, thresh=\"28 degC\", op=\">\", stat=\"mean\", group=\"time.season\"\n", ")\n", - "# Properties are often associated with the same measures. This correspondance is implemented in xclim:\n", + "# Properties are often associated with the same measures. This correspondence is implemented in xclim:\n", "measure = sdba.properties.spell_length_distribution.get_measure()\n", "measure_sim = measure(sim_prop, ref_prop)\n", "measure_scen = measure(scen_prop, ref_prop)\n", diff --git a/docs/notebooks/sdba.ipynb b/docs/notebooks/sdba.ipynb index 7f0e75f90..83731f22d 100644 --- a/docs/notebooks/sdba.ipynb +++ b/docs/notebooks/sdba.ipynb @@ -369,7 +369,7 @@ "metadata": {}, "outputs": [], "source": [ - "# To get an exagerated example we select different points\n", + "# To get an exaggerated example we select different points\n", "# here \"lon\" will be our dimension of two \"spatially correlated\" points\n", "reft = ds.air.isel(lat=21, lon=[40, 52]).drop_vars([\"lon\", \"lat\"])\n", "simt = ds.air.isel(lat=18, lon=[17, 35]).drop_vars([\"lon\", \"lat\"])\n", @@ -570,7 +570,7 @@ " base=sdba.QuantileDeltaMapping, # Use QDM as the univariate adjustment.\n", " base_kws={\"nquantiles\": 20, \"group\": \"time\"},\n", " n_iter=20, # perform 20 iteration\n", - " n_escore=1000, # only send 1000 points to the escore metric (it is realy slow)\n", + " n_escore=1000, # only send 1000 points to the escore metric (it is really slow)\n", " )\n", "\n", "scenh_npdft = out.scenh.rename(time_hist=\"time\") # Bias-adjusted historical period\n", diff --git a/docs/notebooks/usage.ipynb b/docs/notebooks/usage.ipynb index 69a01c629..36e278b9d 100644 --- a/docs/notebooks/usage.ipynb +++ b/docs/notebooks/usage.ipynb @@ -66,7 +66,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This computation was made using the `growing_degree_days` **indicator**. The same computation could be made through the **index**. You can see how the metadata is alot poorer here." + "This computation was made using the `growing_degree_days` **indicator**. The same computation could be made through the **index**. You can see how the metadata is a lot poorer here." ] }, { @@ -202,7 +202,7 @@ "):\n", " # Change the missing method to \"percent\", instead of the default \"any\"\n", " # Set the tolerance to 10%, periods with more than 10% of missing data\n", - " # in the input will be masked in the ouput.\n", + " # in the input will be masked in the output.\n", " gdd = xclim.atmos.growing_degree_days(daily_ds.air, thresh=\"10.0 degC\", freq=\"MS\")\n", "gdd" ] diff --git a/environment.yml b/environment.yml index 19d78a220..9ad2acd11 100644 --- a/environment.yml +++ b/environment.yml @@ -32,6 +32,7 @@ dependencies: - blackdoc - bump-my-version - cairosvg + - codespell - coverage - distributed >=2.0 - filelock diff --git a/pyproject.toml b/pyproject.toml index a76520be7..6ea8a6d7b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,6 +60,7 @@ dev = [ "black >=23.3.0", "blackdoc", "bump-my-version", + "codespell", "coverage[toml]", "flake8", "flake8-alphabetize", @@ -148,6 +149,9 @@ values = [ "release" ] +[tool.codespell] +skip = 'xclim/data/*.json,docs/_build,docs/notebooks/xclim_training/*.ipynb,docs/references.bib,__pycache__,*.nc,*.png,*.gz,*.whl' +ignore-words-list = "absolue,astroid,bloc,bui,callendar,degreee,environnement,hanel,inferrable,lond,nam,nd,ressources,vas" [tool.coverage.run] relative_files = true diff --git a/tests/test_ensembles.py b/tests/test_ensembles.py index f5a30af32..d093180f1 100644 --- a/tests/test_ensembles.py +++ b/tests/test_ensembles.py @@ -412,7 +412,7 @@ def test_kmeans_variweights(self, open_dataset, random_state): make_graph=False, variable_weights=var_weights, ) - # Results here may change according to sklearn version, hence the *isin* intead of == + # Results here may change according to sklearn version, hence the *isin* instead of == assert all(np.isin([12, 13, 16], ids)) assert len(ids) == 6 diff --git a/tests/test_indices.py b/tests/test_indices.py index 8f0b14e4d..5a7e0eb5d 100644 --- a/tests/test_indices.py +++ b/tests/test_indices.py @@ -1465,7 +1465,7 @@ def test_jetstream_metric_woollings(self): # Should raise ValueError as longitude is in 0-360 instead of -180.E-180.W with pytest.raises(ValueError): _ = xci.jetstream_metric_woollings(da_ua) - # redefine longitude coordiantes to -180.E-180.W so function runs + # redefine longitude coordinates to -180.E-180.W so function runs da_ua = da_ua.cf.assign_coords( { "X": ( @@ -2888,7 +2888,7 @@ def test_humidex(tas_series): # expected values from https://en.wikipedia.org/wiki/Humidex expected = np.array([16, 29, 47, 52]) * units.degC - # Celcius + # Celsius hc = xci.humidex(tas, dtps) np.testing.assert_array_almost_equal(hc, expected, 0) diff --git a/tests/test_sdba/test_base.py b/tests/test_sdba/test_base.py index 3cbe2867e..ffba45c1f 100644 --- a/tests/test_sdba/test_base.py +++ b/tests/test_sdba/test_base.py @@ -89,7 +89,7 @@ def test_grouper_apply(tas_series, use_dask, group, n): exp = tas.mean(dim=grouper.dim).expand_dims("group").T np.testing.assert_array_equal(out_mean, exp) - # With additionnal dimension included + # With additional dimension included grouper = Grouper(group, add_dims=["lat"]) out = grouper.apply("mean", tas) assert out.ndim == 1 @@ -98,7 +98,7 @@ def test_grouper_apply(tas_series, use_dask, group, n): assert out.attrs["group_compute_dims"] == [grouper.dim, "lat"] assert out.attrs["group_window"] == 1 - # Additionnal but main_only + # Additional but main_only out = grouper.apply("mean", tas, main_only=True) np.testing.assert_array_equal(out, out_mean) diff --git a/tests/test_temperature.py b/tests/test_temperature.py index 918df2b41..897decbc1 100644 --- a/tests/test_temperature.py +++ b/tests/test_temperature.py @@ -275,7 +275,7 @@ def test_TN_3d_data(self, open_dataset): ~np.isnan(tnmean).values & ~np.isnan(tnmax).values & ~np.isnan(tnmin).values ) - # test maxes always greater than mean and mean alwyas greater than min (non nan values only) + # test maxes always greater than mean and mean always greater than min (non nan values only) assert np.all(tnmax.values[no_nan] > tnmean.values[no_nan]) & np.all( tnmean.values[no_nan] > tnmin.values[no_nan] ) diff --git a/tests/test_utils.py b/tests/test_utils.py index 0ce3c64b3..45ec02004 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -65,7 +65,7 @@ def test_ensure_chunk_size(): class TestNanCalcPercentiles: def test_calc_perc_type7(self): - # Exemple array from: https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method + # Example array from: https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0]) res = nan_calc_percentiles(arr, percentiles=[40.0], alpha=1, beta=1) # The expected is from R `quantile(arr, probs=c(0.4), type=7)` @@ -87,7 +87,7 @@ def test_calc_perc_type8(self): assert np.all(res[0][1] == 27) def test_calc_perc_2d(self): - # Exemple array from: https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method + # Example array from: https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method arr = np.asarray( [[15.0, 20.0, 35.0, 40.0, 50.0], [15.0, 20.0, 35.0, 40.0, 50.0]] ) diff --git a/xclim/core/bootstrapping.py b/xclim/core/bootstrapping.py index f32cf8318..be5136b61 100644 --- a/xclim/core/bootstrapping.py +++ b/xclim/core/bootstrapping.py @@ -23,7 +23,7 @@ def percentile_bootstrap(func): This feature is experimental. - Bootstraping avoids discontinuities in the exceedance between the reference period over which percentiles are + Bootstrapping avoids discontinuities in the exceedance between the reference period over which percentiles are computed, and "out of reference" periods. See `bootstrap_func` for details. Declaration example: @@ -71,12 +71,12 @@ def bootstrap_func(compute_index_func: Callable, **kwargs) -> xarray.DataArray: at the beginning and end of the reference period used to calculate percentiles. The bootstrap procedure can reduce those discontinuities by iteratively computing the percentile estimate and the index on altered reference periods. - Theses altered reference periods are themselves built iteratively: When computing the index for year x, the - bootstrapping create as many altered reference period as the number of years in the reference period. - To build one altered reference period, the values of year x are replaced by the values of another year in the + These altered reference periods are themselves built iteratively: When computing the index for year `x`, the + bootstrapping creates as many altered reference periods as the number of years in the reference period. + To build one altered reference period, the values of year `x` are replaced by the values of another year in the reference period, then the index is computed on this altered period. This is repeated for each year of the reference - period, excluding year x, The final result of the index for year x, is then the average of all the index results on - altered years. + period, excluding year `x`. The final result of the index for year `x` is then the average of all the index results + on altered years. Parameters ---------- diff --git a/xclim/core/calendar.py b/xclim/core/calendar.py index dbe01a076..1e72fcc45 100644 --- a/xclim/core/calendar.py +++ b/xclim/core/calendar.py @@ -192,12 +192,12 @@ def common_calendar(calendars: Sequence[str], join="outer") -> str: join : {'inner', 'outer'} The criterion for the common calendar. - - 'outer': the common calendar is the smallest calendar (in number of days by year) - that will include all the dates of the other calendars. When converting - the data to this calendar, no timeseries will lose elements, but some + - 'outer': the common calendar is the smallest calendar (in number of days by year) that will include all the + dates of the other calendars. + When converting the data to this calendar, no timeseries will lose elements, but some might be missing (gaps or NaNs in the series). - - 'inner': the common calender is the smallest calendar of the list. When converting - the data to this calendar, no timeseries will have missing elements (no gaps or NaNs), + - 'inner': the common calendar is the smallest calendar of the list. + When converting the data to this calendar, no timeseries will have missing elements (no gaps or NaNs), but some might be dropped. Examples @@ -1102,7 +1102,7 @@ def time_bnds( # noqa: C901 is_on_offset = period.onOffset eps = pd.Timedelta(precision or "1U").to_pytimedelta() day = pd.Timedelta("1D").to_pytimedelta() - floor.pop("nanosecond") # unsuported by cftime + floor.pop("nanosecond") # unsupported by cftime else: period = pd.tseries.frequencies.to_offset(freq) is_on_offset = period.is_on_offset @@ -1823,14 +1823,14 @@ def stack_periods( def unstack_periods(da: xr.DataArray | xr.Dataset, dim: str = "period"): """Unstack an array constructed with :py:func:`stack_periods`. - Can only work with periods stacked with a ``stride`` that divides ``window`` in a odd number of sections. - When ``stride`` is smaller than ``window``, only the centermost stride of each window is kept, + Can only work with periods stacked with a ``stride`` that divides ``window`` in an odd number of sections. + When ``stride`` is smaller than ``window``, only the center-most stride of each window is kept, except for the beginning and end which are taken from the first and last windows. Parameters ---------- da : xr.DataArray - As constructed by :py:func:`stack_periods`, attributes of the period coordinates must have been perserved. + As constructed by :py:func:`stack_periods`, attributes of the period coordinates must have been preserved. dim : str The period dimension name. @@ -1838,7 +1838,8 @@ def unstack_periods(da: xr.DataArray | xr.Dataset, dim: str = "period"): ----- The following table shows which strides are included (``o``) in the unstacked output. in this example, ``stride`` was a fifth of ``window`` and ``min_length`` was 4 times ``stride``. - The row index ``i`` the period index in the stacked datast, columns are the stride-long section of the original timeseries. + The row index ``i`` the period index in the stacked dataset, columns are the stride-long section of the original + timeseries. .. table:: Unstacking example with ``stride < window``. diff --git a/xclim/core/units.py b/xclim/core/units.py index aae6441f1..e8cf71fb3 100644 --- a/xclim/core/units.py +++ b/xclim/core/units.py @@ -640,7 +640,7 @@ def _rate_and_amount_converter( if base in ["M", "Q", "A"]: start = time.indexes[dim][0] if not start_anchor: - # Anchor is on the end of the period, substract 1 period. + # Anchor is on the end of the period, subtract 1 period. start = start - xr.coding.cftime_offsets.to_offset(freq) # In the diff below, assign to upper label! label = "upper" @@ -1135,8 +1135,8 @@ def declare_relative_units(**units_by_name) -> Callable: ---------- \*\*kwargs Mapping from the input parameter names to dimensions relative to other parameters. - The dimensons can be a single parameter name as `` or more complex expressions, - like : ` * [time]`. + The dimensions can be a single parameter name as `` or more complex expressions, + like: ` * [time]`. Returns ------- diff --git a/xclim/ensembles/_robustness.py b/xclim/ensembles/_robustness.py index 561f821b8..3502defba 100644 --- a/xclim/ensembles/_robustness.py +++ b/xclim/ensembles/_robustness.py @@ -31,7 +31,7 @@ SIGNIFICANCE_TESTS = {} """Registry of change significance tests. -New tests must be decorated with :py:func:`significance_test` and fullfill the following requirements: +New tests must be decorated with :py:func:`significance_test` and fulfill the following requirements: - Function name should begin by "_", registered test name is the function name without its first character and with _ replaced by -. - Function must accept 2 positional arguments : fut and ref (see :py:func:`change_significance` for definitions) @@ -51,7 +51,7 @@ def significance_test(func): return func -# This function's docstring is modified to inlude the registered test names and docs. +# This function's docstring is modified to include the registered test names and docs. # See end of this file. @update_xclim_history def robustness_fractions( # noqa: C901 @@ -285,7 +285,7 @@ def change_significance( # noqa: C901 xr.DataArray | xr.Dataset | None, ] ): - """Backwards-compatible implementaton of :py:func:`robustness_fractions`.""" + """Backwards-compatible implementation of :py:func:`robustness_fractions`.""" warnings.warn( ( "Function change_significance is deprecated as of xclim 0.47 and will be removed in 0.49. " diff --git a/xclim/indicators/land/_streamflow.py b/xclim/indicators/land/_streamflow.py index 03bb0727b..9459d9995 100644 --- a/xclim/indicators/land/_streamflow.py +++ b/xclim/indicators/land/_streamflow.py @@ -30,7 +30,7 @@ def cfcheck(q): units="", long_name="Base flow index", description="Minimum of the 7-day moving average flow divided by the mean flow.", - asbtract="Minimum of the 7-day moving average flow divided by the mean flow.", + abstract="Minimum of the 7-day moving average flow divided by the mean flow.", compute=base_flow_index, ) diff --git a/xclim/indices/_agro.py b/xclim/indices/_agro.py index 96fa9f795..a3ac3eebb 100644 --- a/xclim/indices/_agro.py +++ b/xclim/indices/_agro.py @@ -1138,7 +1138,7 @@ def standardized_precipitation_index( params : xarray.DataArray Fit parameters. The `params` can be computed using ``xclim.indices.stats.standardized_index_fit_params`` in advance. - The ouput can be given here as input, and it overrides other options. + The output can be given here as input, and it overrides other options. \*\*indexer Indexing parameters to compute the indicator on a temporal subset of the data. It accepts the same arguments as :py:func:`xclim.indices.generic.select_time`. diff --git a/xclim/indices/_simple.py b/xclim/indices/_simple.py index 49a60dc56..e59db0f22 100644 --- a/xclim/indices/_simple.py +++ b/xclim/indices/_simple.py @@ -739,7 +739,7 @@ def sfcWindmax_mean(sfcWindmax: xarray.DataArray, freq: str = "YS") -> xarray.Da @declare_units(sfcWindmax="[speed]") def sfcWindmax_min(sfcWindmax: xarray.DataArray, freq: str = "YS") -> xarray.DataArray: - r"""Lowest daily maxium wind speed. + r"""Lowest daily maximum wind speed. The minimum of daily maximum wind speed. diff --git a/xclim/indices/_threshold.py b/xclim/indices/_threshold.py index f897dc206..f7a3d29a2 100644 --- a/xclim/indices/_threshold.py +++ b/xclim/indices/_threshold.py @@ -1601,7 +1601,7 @@ def first_snowfall( Notes ----- The 1 mm/day liquid water equivalent snowfall rate threshold in :cite:cts:`frei_snowfall_2018` corresponds - to the 1 cm/day snowfall rate threshold in :cite:cts:`cbcl_climate_2020` using a snow denstiy of 100 kg/m**3. + to the 1 cm/day snowfall rate threshold in :cite:cts:`cbcl_climate_2020` using a snow density of 100 kg/m**3. If threshold and prsn differ by a density (i.e. [length/time] vs. [mass/area/time]), a liquid water equivalent snowfall rate is assumed and the threshold is converted using a 1000 kg m-3 density. @@ -1656,7 +1656,7 @@ def last_snowfall( Notes ----- The 1 mm/day liquid water equivalent snowfall rate threshold in :cite:cts:`frei_snowfall_2018` corresponds - to the 1 cm/day snowfall rate threshold in :cite:cts:`cbcl_climate_2020` using a snow denstiy of 100 kg/m**3. + to the 1 cm/day snowfall rate threshold in :cite:cts:`cbcl_climate_2020` using a snow density of 100 kg/m**3. If threshold and prsn differ by a density (i.e. [length/time] vs. [mass/area/time]), a liquid water equivalent snowfall rate is assumed and the threshold is converted using a 1000 kg m-3 density. @@ -1759,7 +1759,7 @@ def snowfall_frequency( Notes ----- The 1 mm/day liquid water equivalent snowfall rate threshold in :cite:cts:`frei_snowfall_2018` corresponds - to the 1 cm/day snowfall rate threshold in :cite:cts:`cbcl_climate_2020` using a snow denstiy of 100 kg/m**3. + to the 1 cm/day snowfall rate threshold in :cite:cts:`cbcl_climate_2020` using a snow density of 100 kg/m**3. If threshold and prsn differ by a density (i.e. [length/time] vs. [mass/area/time]), a liquid water equivalent snowfall rate is assumed and the threshold is converted using a 1000 kg m-3 density. @@ -1811,7 +1811,7 @@ def snowfall_intensity( Notes ----- The 1 mm/day liquid water equivalent snowfall rate threshold in :cite:cts:`frei_snowfall_2018` corresponds - to the 1 cm/day snowfall rate threshold in :cite:cts:`cbcl_climate_2020` using a snow denstiy of 100 kg/m**3. + to the 1 cm/day snowfall rate threshold in :cite:cts:`cbcl_climate_2020` using a snow density of 100 kg/m**3. If threshold and prsn differ by a density (i.e. [length/time] vs. [mass/area/time]), a liquid water equivalent snowfall rate is assumed and the threshold is converted using a 1000 kg m-3 density. diff --git a/xclim/indices/fire/_cffwis.py b/xclim/indices/fire/_cffwis.py index ecbc406ff..531916fe5 100644 --- a/xclim/indices/fire/_cffwis.py +++ b/xclim/indices/fire/_cffwis.py @@ -35,7 +35,7 @@ Overwintering ------------- -Additionaly, overwintering of the drought code is also directly implemented in :py:func:`fire_weather_ufunc`. +Additionally, overwintering of the drought code is also directly implemented in :py:func:`fire_weather_ufunc`. The last drought_code of the season is kept in "winter" (where the fire season mask is False) and the precipitation is accumulated until the start of the next season. The first drought code is computed as a function of these instead of using the default DCStart value. Parameters to :py:func:`_overwintering_drought_code` are listed below. diff --git a/xclim/sdba/_processing.py b/xclim/sdba/_processing.py index 2a78c0a77..eb7a862b6 100644 --- a/xclim/sdba/_processing.py +++ b/xclim/sdba/_processing.py @@ -46,7 +46,7 @@ def _adapt_freq( Returns ------- - xr.Dataset, wth the following variables: + xr.Dataset, with the following variables: - `sim_adj`: Simulated data with the same frequency of values under threshold than ref. Adjustment is made group-wise. diff --git a/xclim/sdba/adjustment.py b/xclim/sdba/adjustment.py index 328ce91ab..c56e19c64 100644 --- a/xclim/sdba/adjustment.py +++ b/xclim/sdba/adjustment.py @@ -337,7 +337,7 @@ class EmpiricalQuantileMapping(TrainAdjust): Adjust step: interp : {'nearest', 'linear', 'cubic'} - The interpolation method to use when interpolating the adjustment factors. Defaults to "nearset". + The interpolation method to use when interpolating the adjustment factors. Defaults to "nearest". extrapolation : {'constant', 'nan'} The type of extrapolation to use. See :py:func:`xclim.sdba.utils.extrapolate_qm` for details. Defaults to "constant". @@ -1277,7 +1277,7 @@ def _parse(s): ( "The adjust method accepts ref, hist, sim and all arguments listed " 'below in "Parameters". It also accepts a `multi_dim` argument ' - "specifying the dimension accross which to take the 'features' and " + "specifying the dimension across which to take the 'features' and " "is valid for multivariate methods only. See :py:func:`xclim.sdba.stack_variables`." "In the description below, `n_features` is the size of the `multi_dim` " "dimension. There is no way of specifying parameters across other " diff --git a/xclim/sdba/base.py b/xclim/sdba/base.py index c759ca9fb..f85ff7974 100644 --- a/xclim/sdba/base.py +++ b/xclim/sdba/base.py @@ -684,7 +684,7 @@ def _call_and_transpose_on_exit(dsblock, **kwargs): # Remove all auxiliary coords on both tmpl and ds extra_coords = { - nam: crd for nam, crd in ds.coords.items() if nam not in crd.dims + name: crd for name, crd in ds.coords.items() if name not in crd.dims } ds = ds.drop_vars(extra_coords.keys()) # Coords not sharing dims with `all_dims` (like scalar aux coord on reduced 1D input) are absent from tmpl @@ -697,8 +697,8 @@ def _call_and_transpose_on_exit(dsblock, **kwargs): # Add back the extra coords, but only those which have compatible dimensions (like xarray would have done) out = out.assign_coords( { - nam: crd - for nam, crd in extra_coords.items() + name: crd + for name, crd in extra_coords.items() if set(crd.dims).issubset(out.dims) } ) diff --git a/xclim/sdba/properties.py b/xclim/sdba/properties.py index 3f6eacaab..e729a13ee 100644 --- a/xclim/sdba/properties.py +++ b/xclim/sdba/properties.py @@ -322,7 +322,7 @@ def _spell_length_distribution( and then over all years (e.g. Jan 1980-2010) group : {'time', 'time.season', 'time.month'} Grouping of the output. - E.g. If 'time.month', the spell lengths are coputed separately for each month. + E.g. If 'time.month', the spell lengths are computed separately for each month. resample_before_rl : bool Determines if the resampling should take place before or after the run length encoding (or a similar algorithm) is applied to runs. @@ -463,7 +463,7 @@ def _annual_cycle( ) -> xr.DataArray: r"""Annual cycle statistics. - A daily climatology is calculated and optionnaly smoothed with a (circular) moving average. + A daily climatology is calculated and optionally smoothed with a (circular) moving average. The requested statistic is returned. Parameters @@ -678,10 +678,10 @@ def _corr_btw_var( corr_type: {'Pearson','Spearman'} Type of correlation to calculate. output: {'correlation', 'pvalue'} - Wheter to return the correlation coefficient or the p-value. + Whether to return the correlation coefficient or the p-value. group : {'time', 'time.season', 'time.month'} Grouping of the output. - Eg. For 'time.month', the correlation would be calculated on each month separately, + e.g. For 'time.month', the correlation would be calculated on each month separately, but with all the years together. Returns diff --git a/xclim/sdba/utils.py b/xclim/sdba/utils.py index 441983b7f..f3d523967 100644 --- a/xclim/sdba/utils.py +++ b/xclim/sdba/utils.py @@ -594,11 +594,11 @@ def best_pc_orientation_simple( """ m = R.shape[0] P = np.diag(val * np.ones(m)) - signes = dict(itertools.zip_longest(itertools.product(*[[1, -1]] * m), [None])) - for orient in list(signes.keys()): + signs = dict(itertools.zip_longest(itertools.product(*[[1, -1]] * m), [None])) + for orient in list(signs.keys()): # Compute new error - signes[orient] = np.linalg.norm(P - ((orient * R) @ Hinv) @ P) - return np.array(min(signes, key=lambda o: signes[o])) + signs[orient] = np.linalg.norm(P - ((orient * R) @ Hinv) @ P) + return np.array(min(signs, key=lambda o: signs[o])) def best_pc_orientation_full( @@ -611,9 +611,9 @@ def best_pc_orientation_full( """Return best orientation vector for `A` according to the method of :cite:t:`sdba-alavoine_distinct_2022`. Eigenvectors returned by `pc_matrix` do not have a defined orientation. - Given an inverse transform Hinv, a transform R, the actual and target origins `Hmean` and `Rmean` and the matrix of - training observations hist, this computes a scenario for all possible orientations and return the orientation that - maximizes the Spearman correlation coefficient of all variables. The correlation is computed for each variable + Given an inverse transform `Hinv`, a transform `R`, the actual and target origins `Hmean` and `Rmean` and the matrix + of training observations `hist`, this computes a scenario for all possible orientations and return the orientation + that maximizes the Spearman correlation coefficient of all variables. The correlation is computed for each variable individually, then averaged. This trick is explained in :cite:t:`sdba-alavoine_distinct_2022`. @@ -641,11 +641,14 @@ def best_pc_orientation_full( ---------- :cite:cts:`sdba-alavoine_distinct_2022` + See Also + -------- + sdba.adjustment.PrincipalComponentAdjustment """ # All possible orientation vectors m = R.shape[0] - signes = dict(itertools.zip_longest(itertools.product(*[[1, -1]] * m), [None])) - for orient in list(signes.keys()): + signs = dict(itertools.zip_longest(itertools.product(*[[1, -1]] * m), [None])) + for orient in list(signs.keys()): # Calculate scen for hist scen = np.atleast_2d(Rmean).T + ((orient * R) @ Hinv) @ ( hist - np.atleast_2d(Hmean).T @@ -653,9 +656,9 @@ def best_pc_orientation_full( # Correlation for each variable corr = [spearmanr(hist[i, :], scen[i, :])[0] for i in range(hist.shape[0])] # Store mean correlation - signes[orient] = np.mean(corr) + signs[orient] = np.mean(corr) # Return orientation that maximizes the correlation - return np.array(max(signes, key=lambda o: signes[o])) + return np.array(max(signs, key=lambda o: signs[o])) def get_clusters_1d( @@ -667,7 +670,7 @@ def get_clusters_1d( Parameters ---------- - data: 1D ndarray + data : 1D ndarray Values to get clusters from. u1 : float Extreme value threshold, at least one value in the cluster must exceed this. diff --git a/xclim/testing/helpers.py b/xclim/testing/helpers.py index 93b824ca2..93397e662 100644 --- a/xclim/testing/helpers.py +++ b/xclim/testing/helpers.py @@ -257,7 +257,7 @@ def test_timeseries( def _raise_on_compute(dsk: dict): - """Raise an AssertionError mentionning the number triggered tasks.""" + """Raise an AssertionError mentioning the number triggered tasks.""" raise AssertionError( f"Not lazy. Computation was triggered with a graph of {len(dsk)} tasks." )