From 9deaa5211cc4c18f1614b7c8dca7bef79ed73987 Mon Sep 17 00:00:00 2001 From: Oriol Abril-Pla Date: Fri, 11 Mar 2022 16:44:43 +0200 Subject: [PATCH] unpin black and reformat codebase (#1994) * unpin black and reformat codebase * reformat benchmarks --- arviz/plots/backends/__init__.py | 2 +- arviz/plots/backends/bokeh/autocorrplot.py | 2 +- arviz/plots/backends/bokeh/elpdplot.py | 2 +- arviz/plots/backends/bokeh/pairplot.py | 2 +- .../plots/backends/matplotlib/autocorrplot.py | 2 +- arviz/plots/backends/matplotlib/elpdplot.py | 8 +++--- arviz/plots/backends/matplotlib/khatplot.py | 2 +- arviz/plots/backends/matplotlib/pairplot.py | 2 +- arviz/plots/plot_utils.py | 2 +- arviz/stats/density_utils.py | 26 +++++++++---------- arviz/stats/diagnostics.py | 2 +- arviz/stats/stats.py | 2 +- arviz/tests/base_tests/test_stats.py | 4 +-- arviz/tests/base_tests/test_utils.py | 2 +- arviz/tests/external_tests/test_data_emcee.py | 6 ++--- arviz/tests/helpers.py | 4 +-- asv_benchmarks/benchmarks/benchmarks.py | 4 +-- requirements-dev.txt | 2 +- requirements-docs.txt | 2 +- 19 files changed, 39 insertions(+), 39 deletions(-) diff --git a/arviz/plots/backends/__init__.py b/arviz/plots/backends/__init__.py index 41e6584163..81c6e92913 100644 --- a/arviz/plots/backends/__init__.py +++ b/arviz/plots/backends/__init__.py @@ -165,7 +165,7 @@ def create_layout(ax, force_layout=False): if subplot_order in ("square", "square_trimmed"): ax = [item for item in ax.ravel().tolist() if item is not None] n = int(np.ceil(len(ax) ** 0.5)) - ax = ax + [None for _ in range(n ** 2 - len(ax))] + ax = ax + [None for _ in range(n**2 - len(ax))] ax = np.array(ax).reshape(n, n) ax = ax.tolist() if (subplot_order == "square_trimmed") and any( diff --git a/arviz/plots/backends/bokeh/autocorrplot.py b/arviz/plots/backends/bokeh/autocorrplot.py index 97bd8d20d6..7a2326c48d 100644 --- a/arviz/plots/backends/bokeh/autocorrplot.py +++ b/arviz/plots/backends/bokeh/autocorrplot.py @@ -76,7 +76,7 @@ def plot_autocorr( x_prime = x if combined: x_prime = x.flatten() - c_i = 1.96 / x_prime.size ** 0.5 + c_i = 1.96 / x_prime.size**0.5 y = autocorr(x_prime) ax.add_layout(BoxAnnotation(bottom=-c_i, top=c_i, fill_color="gray")) diff --git a/arviz/plots/backends/bokeh/elpdplot.py b/arviz/plots/backends/bokeh/elpdplot.py index c18f20b1a5..ad82108149 100644 --- a/arviz/plots/backends/bokeh/elpdplot.py +++ b/arviz/plots/backends/bokeh/elpdplot.py @@ -73,7 +73,7 @@ def plot_elpd( else: max_plots = ( - numvars ** 2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"] + numvars**2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"] ) vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots) if vars_to_plot < numvars: diff --git a/arviz/plots/backends/bokeh/pairplot.py b/arviz/plots/backends/bokeh/pairplot.py index ffd27c4cb7..2965278d73 100644 --- a/arviz/plots/backends/bokeh/pairplot.py +++ b/arviz/plots/backends/bokeh/pairplot.py @@ -133,7 +133,7 @@ def plot_pair( dpi = backend_kwargs.pop("dpi") max_plots = ( - numvars ** 2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"] + numvars**2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"] ) vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots) if vars_to_plot < numvars: diff --git a/arviz/plots/backends/matplotlib/autocorrplot.py b/arviz/plots/backends/matplotlib/autocorrplot.py index d80b5d38fa..998720f1bf 100644 --- a/arviz/plots/backends/matplotlib/autocorrplot.py +++ b/arviz/plots/backends/matplotlib/autocorrplot.py @@ -50,7 +50,7 @@ def plot_autocorr( x_prime = x if combined: x_prime = x.flatten() - c_i = 1.96 / x_prime.size ** 0.5 + c_i = 1.96 / x_prime.size**0.5 y = autocorr(x_prime) ax.fill_between([0, max_lag], -c_i, c_i, color="0.75") diff --git a/arviz/plots/backends/matplotlib/elpdplot.py b/arviz/plots/backends/matplotlib/elpdplot.py index 9d018f3853..0af58fb8ff 100644 --- a/arviz/plots/backends/matplotlib/elpdplot.py +++ b/arviz/plots/backends/matplotlib/elpdplot.py @@ -55,7 +55,7 @@ def plot_elpd( for coord, float_color in color_mapping.items() ] plot_kwargs.setdefault("cmap", cmap_name) - plot_kwargs.setdefault("s", markersize ** 2) + plot_kwargs.setdefault("s", markersize**2) plot_kwargs.setdefault("c", colors) else: legend = False @@ -70,7 +70,7 @@ def plot_elpd( (figsize, ax_labelsize, titlesize, xt_labelsize, _, markersize) = _scale_fig_size( figsize, textsize, numvars - 1, numvars - 1 ) - plot_kwargs.setdefault("s", markersize ** 2) + plot_kwargs.setdefault("s", markersize**2) backend_kwargs.setdefault("figsize", figsize) backend_kwargs["squeeze"] = True if ax is None: @@ -113,7 +113,7 @@ def plot_elpd( else: max_plots = ( - numvars ** 2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"] + numvars**2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"] ) vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots) if vars_to_plot < numvars: @@ -128,7 +128,7 @@ def plot_elpd( (figsize, ax_labelsize, titlesize, xt_labelsize, _, markersize) = _scale_fig_size( figsize, textsize, numvars - 2, numvars - 2 ) - plot_kwargs.setdefault("s", markersize ** 2) + plot_kwargs.setdefault("s", markersize**2) if ax is None: fig, ax = plt.subplots( diff --git a/arviz/plots/backends/matplotlib/khatplot.py b/arviz/plots/backends/matplotlib/khatplot.py index 219f90a64e..5778e2fec3 100644 --- a/arviz/plots/backends/matplotlib/khatplot.py +++ b/arviz/plots/backends/matplotlib/khatplot.py @@ -69,7 +69,7 @@ def plot_khat( hlines_kwargs["color"] = vectorized_to_hex(hlines_kwargs["color"]) if markersize is None: - markersize = scaled_markersize ** 2 # s in scatter plot mus be markersize square + markersize = scaled_markersize**2 # s in scatter plot mus be markersize square # for dots to have the same size kwargs = matplotlib_kwarg_dealiaser(kwargs, "scatter") diff --git a/arviz/plots/backends/matplotlib/pairplot.py b/arviz/plots/backends/matplotlib/pairplot.py index 336b3f8375..5090602a31 100644 --- a/arviz/plots/backends/matplotlib/pairplot.py +++ b/arviz/plots/backends/matplotlib/pairplot.py @@ -222,7 +222,7 @@ def plot_pair( not_marginals = int(not marginals) num_subplot_cols = numvars - not_marginals max_plots = ( - num_subplot_cols ** 2 + num_subplot_cols**2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"] ) diff --git a/arviz/plots/plot_utils.py b/arviz/plots/plot_utils.py index 4b5441b0f6..26ab168f5a 100644 --- a/arviz/plots/plot_utils.py +++ b/arviz/plots/plot_utils.py @@ -125,7 +125,7 @@ def in_bounds(val): if n_items <= max_cols: return 1, n_items - ideal = in_bounds(round(n_items ** 0.5)) + ideal = in_bounds(round(n_items**0.5)) for offset in (0, 1, -1, 2, -2): cols = in_bounds(ideal + offset) diff --git a/arviz/stats/density_utils.py b/arviz/stats/density_utils.py index 70592fd5d4..d857bee1b7 100644 --- a/arviz/stats/density_utils.py +++ b/arviz/stats/density_utils.py @@ -71,7 +71,7 @@ def _bw_isj(x, grid_counts=None, x_std=None, x_range=None): a_sq = a_k[range(1, grid_len)] ** 2 t = _root(_fixed_point, x_len, args=(x_len, k_sq, a_sq), x=x) - h = t ** 0.5 * x_range + h = t**0.5 * x_range return h @@ -100,8 +100,8 @@ def _bw_taylor(x): """ x_len = len(x) kappa = _kappa_mle(x) - num = 3 * x_len * kappa ** 2 * ive(2, 2 * kappa) - den = 4 * np.pi ** 0.5 * ive(0, kappa) ** 2 + num = 3 * x_len * kappa**2 * ive(2, 2 * kappa) + den = 4 * np.pi**0.5 * ive(0, kappa) ** 2 return (num / den) ** 0.4 @@ -181,11 +181,11 @@ def _a1inv(x): Returns the value k, such that a1inv(x) = k, i.e. a1(k) = x. """ if 0 <= x < 0.53: - return 2 * x + x ** 3 + (5 * x ** 5) / 6 + return 2 * x + x**3 + (5 * x**5) / 6 elif x < 0.85: return -0.4 + 1.39 * x + 0.43 / (1 - x) else: - return 1 / (x ** 3 - 4 * x ** 2 + 3 * x) + return 1 / (x**3 - 4 * x**2 + 3 * x) def _kappa_mle(x): @@ -235,7 +235,7 @@ def _fixed_point(t, N, k_sq, a_sq): a_sq = np.asfarray(a_sq, dtype=np.float64) l = 7 - f = np.sum(np.power(k_sq, l) * a_sq * np.exp(-k_sq * np.pi ** 2 * t)) + f = np.sum(np.power(k_sq, l) * a_sq * np.exp(-k_sq * np.pi**2 * t)) f *= 0.5 * np.pi ** (2.0 * l) for j in np.arange(l - 1, 2 - 1, -1): @@ -243,10 +243,10 @@ def _fixed_point(t, N, k_sq, a_sq): c2 = np.product(np.arange(1.0, 2 * j + 1, 2, dtype=np.float64)) c2 /= (np.pi / 2) ** 0.5 t_j = np.power((c1 * (c2 / (N * f))), (2.0 / (3.0 + 2.0 * j))) - f = np.sum(k_sq ** j * a_sq * np.exp(-k_sq * np.pi ** 2.0 * t_j)) + f = np.sum(k_sq**j * a_sq * np.exp(-k_sq * np.pi**2.0 * t_j)) f *= 0.5 * np.pi ** (2 * j) - out = t - (2 * N * np.pi ** 0.5 * f) ** (-0.4) + out = t - (2 * N * np.pi**0.5 * f) ** (-0.4) return out @@ -777,13 +777,13 @@ def _kde_adaptive(x, bw, grid_edges, grid_counts, grid_len, bound_correction, ** [bw_adj[grid_npad - 1 :: -1], bw_adj, bw_adj[grid_len : grid_len - grid_npad - 1 : -1]] ) pdf_mat = (grid_padded - grid_padded[:, None]) / bw_adj[:, None] - pdf_mat = np.exp(-0.5 * pdf_mat ** 2) * grid_counts[:, None] + pdf_mat = np.exp(-0.5 * pdf_mat**2) * grid_counts[:, None] pdf_mat /= (2 * np.pi) ** 0.5 * bw_adj[:, None] pdf = np.sum(pdf_mat[:, grid_npad : grid_npad + grid_len], axis=0) / len(x) else: pdf_mat = (grid - grid[:, None]) / bw_adj[:, None] - pdf_mat = np.exp(-0.5 * pdf_mat ** 2) * grid_counts[:, None] + pdf_mat = np.exp(-0.5 * pdf_mat**2) * grid_counts[:, None] pdf_mat /= (2 * np.pi) ** 0.5 * bw_adj[:, None] pdf = np.sum(pdf_mat, axis=0) / len(x) @@ -838,7 +838,7 @@ def _fast_kde_2d(x, y, gridsize=(128, 128), circular=False): std_devs = np.diag(cov) ** 0.5 kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs) - inv_cov = np.linalg.inv(cov * scotts_factor ** 2) + inv_cov = np.linalg.inv(cov * scotts_factor**2) x_x = np.arange(kern_nx) - kern_nx / 2 y_y = np.arange(kern_ny) - kern_ny / 2 @@ -854,8 +854,8 @@ def _fast_kde_2d(x, y, gridsize=(128, 128), circular=False): grid = coo_matrix((weights, xyi), shape=(n_x, n_y)).toarray() grid = convolve2d(grid, kernel, mode="same", boundary=boundary) - norm_factor = np.linalg.det(2 * np.pi * cov * scotts_factor ** 2) - norm_factor = len_x * d_x * d_y * norm_factor ** 0.5 + norm_factor = np.linalg.det(2 * np.pi * cov * scotts_factor**2) + norm_factor = len_x * d_x * d_y * norm_factor**0.5 grid /= norm_factor diff --git a/arviz/stats/diagnostics.py b/arviz/stats/diagnostics.py index 67ac811766..f37492c0ab 100644 --- a/arviz/stats/diagnostics.py +++ b/arviz/stats/diagnostics.py @@ -735,7 +735,7 @@ def _ess_sd(ary, relative=False): if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)): return np.nan ary = _split_chains(ary) - return min(_ess(ary, relative=relative), _ess(ary ** 2, relative=relative)) + return min(_ess(ary, relative=relative), _ess(ary**2, relative=relative)) def _ess_quantile(ary, prob, relative=False): diff --git a/arviz/stats/stats.py b/arviz/stats/stats.py index d06cbcae7c..a81b45cd96 100644 --- a/arviz/stats/stats.py +++ b/arviz/stats/stats.py @@ -929,7 +929,7 @@ def _gpdfit(ary): prior_bs = 3 prior_k = 10 n = len(ary) - m_est = 30 + int(n ** 0.5) + m_est = 30 + int(n**0.5) b_ary = 1 - np.sqrt(m_est / (np.arange(1, m_est + 1, dtype=float) - 0.5)) b_ary /= prior_bs * ary[int(n / 4 + 0.5) - 1] diff --git a/arviz/tests/base_tests/test_stats.py b/arviz/tests/base_tests/test_stats.py index 30d6333962..66b832194f 100644 --- a/arviz/tests/base_tests/test_stats.py +++ b/arviz/tests/base_tests/test_stats.py @@ -163,7 +163,7 @@ def test_r2_score(): y = np.random.normal(x, 1) y_pred = x + np.random.randn(300, 100) res = linregress(x, y) - assert_allclose(res.rvalue ** 2, r2_score(y, y_pred).r2, 2) + assert_allclose(res.rvalue**2, r2_score(y, y_pred).r2, 2) @pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"]) @@ -627,7 +627,7 @@ def test_loo_pit_multi_lik(): posterior={"a": np.random.randn(4, 100)}, posterior_predictive={"y": post_pred}, observed_data={"y": obs}, - log_likelihood={"y": -(post_pred ** 2), "decoy": np.zeros_like(post_pred)}, + log_likelihood={"y": -(post_pred**2), "decoy": np.zeros_like(post_pred)}, ) loo_pit_data = loo_pit(idata, y="y") assert np.all((loo_pit_data >= 0) & (loo_pit_data <= 1)) diff --git a/arviz/tests/base_tests/test_utils.py b/arviz/tests/base_tests/test_utils.py index 595bab613b..a4e05026b7 100644 --- a/arviz/tests/base_tests/test_utils.py +++ b/arviz/tests/base_tests/test_utils.py @@ -360,7 +360,7 @@ def test_find_hdi_contours(mean, cov, contour_sigma): for idx, sigma in enumerate(contour_sigma): contour_sp[idx] = prob_dist.pdf(mean + sigma * stdevs[0] * eigenvecs[0]) - hdi_probs = 1 - np.exp(-0.5 * contour_sigma ** 2) + hdi_probs = 1 - np.exp(-0.5 * contour_sigma**2) contour_az = _find_hdi_contours(density, hdi_probs) np.testing.assert_allclose(contour_sp, contour_az, rtol=1e-2, atol=1e-4) diff --git a/arviz/tests/external_tests/test_data_emcee.py b/arviz/tests/external_tests/test_data_emcee.py index 978e4f74c2..d1735ce89f 100644 --- a/arviz/tests/external_tests/test_data_emcee.py +++ b/arviz/tests/external_tests/test_data_emcee.py @@ -123,13 +123,13 @@ def test_slices_warning(self, data, slices): from_emcee(data.obj, slices=slices) def test_no_blobs_error(self): - sampler = emcee.EnsembleSampler(6, 1, lambda x: -(x ** 2)) + sampler = emcee.EnsembleSampler(6, 1, lambda x: -(x**2)) sampler.run_mcmc(np.random.normal(size=(6, 1)), 20) with pytest.raises(ValueError): from_emcee(sampler, blob_names=["inexistent"]) def test_peculiar_blobs(self, data): - sampler = emcee.EnsembleSampler(6, 1, lambda x: (-(x ** 2), (np.random.normal(x), 3))) + sampler = emcee.EnsembleSampler(6, 1, lambda x: (-(x**2), (np.random.normal(x), 3))) sampler.run_mcmc(np.random.normal(size=(6, 1)), 20) inference_data = from_emcee(sampler, blob_names=["normal", "threes"]) fails = check_multiple_attrs({"log_likelihood": ["normal", "threes"]}, inference_data) @@ -139,7 +139,7 @@ def test_peculiar_blobs(self, data): assert not fails def test_single_blob(self): - sampler = emcee.EnsembleSampler(6, 1, lambda x: (-(x ** 2), 3)) + sampler = emcee.EnsembleSampler(6, 1, lambda x: (-(x**2), 3)) sampler.run_mcmc(np.random.normal(size=(6, 1)), 20) inference_data = from_emcee(sampler, blob_names=["blob"], blob_groups=["blob_group"]) fails = check_multiple_attrs({"blob_group": ["blob"]}, inference_data) diff --git a/arviz/tests/helpers.py b/arviz/tests/helpers.py index 72b76f4106..57b6af9164 100644 --- a/arviz/tests/helpers.py +++ b/arviz/tests/helpers.py @@ -291,9 +291,9 @@ def _emcee_lnprior(theta): # Half-cauchy prior, hwhm=25 if tau < 0: return -np.inf - prior_tau = -np.log(tau ** 2 + 25 ** 2) + prior_tau = -np.log(tau**2 + 25**2) prior_mu = -((mu / 10) ** 2) # normal prior, loc=0, scale=10 - prior_eta = -np.sum(eta ** 2) # normal prior, loc=0, scale=1 + prior_eta = -np.sum(eta**2) # normal prior, loc=0, scale=1 return prior_mu + prior_tau + prior_eta diff --git a/asv_benchmarks/benchmarks/benchmarks.py b/asv_benchmarks/benchmarks/benchmarks.py index aa19ca24da..f8a82242b5 100644 --- a/asv_benchmarks/benchmarks/benchmarks.py +++ b/asv_benchmarks/benchmarks/benchmarks.py @@ -55,7 +55,7 @@ def time_circ_std(self, numba_flag): class Kde_1d: - params = [(True, False), (10 ** 5, 10 ** 6, 10 ** 7)] + params = [(True, False), (10**5, 10**6, 10**7)] param_names = ("Numba", "n") def setup(self, numba_flag, n): @@ -70,7 +70,7 @@ def time_fast_kde_normal(self, numba_flag, n): class Fast_KDE_2d: - params = [(True, False), ((100, 10 ** 4), (10 ** 4, 100), (1000, 1000))] + params = [(True, False), ((100, 10**4), (10**4, 100), (1000, 1000))] param_names = ("Numba", "shape") def setup(self, numba_flag, shape): diff --git a/requirements-dev.txt b/requirements-dev.txt index 93ca01e2c1..7f8615631b 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -6,7 +6,7 @@ pydocstyle pylint pytest pytest-cov -black==21.12b0 ; python_version >= '3.6' +black ; python_version >= '3.6' typing_copilot ; python_version >= '3.7' mypy<0.800 cloudpickle<1.5.0 diff --git a/requirements-docs.txt b/requirements-docs.txt index e663d54311..83b41bed39 100644 --- a/requirements-docs.txt +++ b/requirements-docs.txt @@ -12,5 +12,5 @@ sphinx-notfound-page sphinx-copybutton bokeh sphinx_design -sphinx-codeautolink @ git+https://github.com/felix-hilden/sphinx-codeautolink +sphinx-codeautolink>=0.9.0 jupyter-sphinx