From 188895a8d2d4db6dafed172c71019c7e6f3ac377 Mon Sep 17 00:00:00 2001 From: jacopok Date: Thu, 23 May 2024 14:18:32 +0200 Subject: [PATCH 1/9] docs: draft of results dict explanation --- ultranest/integrator.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/ultranest/integrator.py b/ultranest/integrator.py index f74560b0..54b38abe 100644 --- a/ultranest/integrator.py +++ b/ultranest/integrator.py @@ -2438,6 +2438,42 @@ def run_iter( Yields ------ results: dict + - niter (int): number of sampler iterations (not likelihood evaluations!) + - logz (float64): natural logarithm of the evidence $Z = \int p(d|\theta) p(\theta) \text{d}\theta$ + - logzerr (float64): $1\sigma$ error on $\log Z$ ([can be safely assumed to be Gaussian](https://github.com/JohannesBuchner/UltraNest/issues/63)) + - logz_bs (float64): estimate of $\log Z$ from bootstrapping + - logzerr_bs (float64): error on the estimate of $\log Z$ from bootstrapping + - logz_single (float64): (?) + - logzerr_single (float64): (?) + - logzerr_tail (float64): remainder integral contribution (?) + - ess (float64): effective sample size, i.e. number of samples divided by the estimated correlation length + - H (float64): [information gained](https://arxiv.org/abs/2205.00009) + - Herr (float64): (Gaussian) $1\sigma$ error on $H$ + - posterior (dict): summary information on the posterior marginals - a dictionary of lists each with as many items as the fit parameters, indexed as $\theta_i$ in the following: + - mean (list): expectation value of $\theta_i$ + - stdev (list): standard deviation of $\theta_i$ + - median (list): median of $\theta_i$ + - errlo (list): one-sigma lower quantile of the marginal for $\theta_i$, i.e. $15.8655$% quantile + - errup (list): one-sigma upper quantile of the marginal for $\theta_i$, i.e. $84.1345$% quantile + - information_gain_bits (list): information gain from the marginal prior on $\theta_i$ to the posterior + - weighted_samples (dict): weighted samples from the posterior, as computed during sampling, sorted by their log-likelihood value + - upoints (ndarray): sample locations in the unit cube $[0, 1]^{d}$, where $d$ is the number of parameters - the shape is `n_iter` by $d$ + - points (ndarray): sample locations in the physical, user-provided space (same shape as `upoints`) + - weights (ndarray): sample weights - shape `n_iter`, they add to 1 + - logw (ndarray): ? + - bootstrapped_weights (ndarray): ? + - logl (ndarray): log-likelihood values at the sample points (?) + - samples (ndarray): re-weighted posterior samples: distributed according to $p(\theta | d)$ - these points are not sorted, and can be assumed to have been randomly shuffled (?) + - maximum_likelihood (dict): summary information on the maximum likelihood value $\theta_{ML}$ found by the posterior exploration + - logl (float64): value of the log-likelihood at this point: $p(d | \theta_{ML})$ + - point (list): coordinates of $\theta_{ML}$ in the physical space + - point_untransformed (list): coordinates of $\theta_{ML}$ in the unit cube + - ncall (int): total number of likelihood evaluations (accepted and not) + - paramnames (list): input parameter names + - insertion_order_MWW_test (dict): results for the MWW test (?, what is [Buchner+21 in prep](https://johannesbuchner.github.io/UltraNest/performance.html#output-files)?) + - independent_iterations (float) + - converged (bool) + """ # frac_remain=1 means 1:1 -> dlogz=log(0.5) # frac_remain=0.1 means 1:10 -> dlogz=log(0.1) From 81d87f17afb50603c69b24d2fd8865d2be2ada39 Mon Sep 17 00:00:00 2001 From: jacopok Date: Tue, 28 May 2024 15:19:20 +0200 Subject: [PATCH 2/9] Update draft of results dictionary documentation --- ultranest/integrator.py | 88 +++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 39 deletions(-) diff --git a/ultranest/integrator.py b/ultranest/integrator.py index 54b38abe..8a664ce6 100644 --- a/ultranest/integrator.py +++ b/ultranest/integrator.py @@ -2292,7 +2292,7 @@ def run( widen_before_initial_plateau_num_warn=10000, widen_before_initial_plateau_num_max=50000, ): - """Run until target convergence criteria are fulfilled. + r"""Run until target convergence criteria are fulfilled. Parameters ---------- @@ -2376,6 +2376,48 @@ def run( of initial live points so that once the plateau is traversed, *min_num_live_points* live points remain, but not more than *widen_before_initial_plateau_num_warn*. + + + Returns + ------ + results (dict): Results dictionary, with the following entries: + + - samples (ndarray): re-weighted posterior samples: distributed according to :math:`p(\theta | d)` - these points are not sorted, and can be assumed to have been randomly shuffled. See :py:func:`ultranest.utils.resample_equal` for more details. + - niter (int): number of sampler iterations + - ncall (int): total number of likelihood evaluations (accepted and not) + - logz (float64): natural logarithm of the evidence :math:`\log Z = \log \int p(d|\theta) p(\theta) \text{d}\theta` + - logzerr (float64): :math:`1\sigma` error on :math:`\log Z` (`can be safely assumed to be Gaussian `_) + - logz_bs (float64): estimate of :math:`\log Z` from bootstrapping - for details, see the `ultranest paper `_ + - logzerr_bs (float64): estimate of the error on the of :math:`\log Z` from bootstrapping + - logz_single (float64): estimate of :math:`\log Z` from a single sampler + - logzerr_single (float64): estimate of the error :math:`\log Z` from a single sampler + - logzerr_tail (float64): contribution of the tail (i.e. the terminal leaves of the tree) to the error on :math:`\log Z` (?) + - ess (float64): effective sample size, i.e. number of samples divided by the estimated correlation length + - H (float64): `information gained `_ + - Herr (float64): (Gaussian) :math:`1\sigma` error on :math:`H` + - posterior (dict): summary information on the posterior marginal distributions for each parameter - a dictionary of lists each with as many items as the fit parameters, indexed as :math:`\theta_i` in the following: + - mean (list): expectation value of :math:`\theta_i` + - stdev (list): standard deviation of :math:`\theta_i` + - median (list): median of :math:`\theta_i` + - errlo (list): one-sigma lower quantile of the marginal for :math:`\theta_i`, i.e. 15.8655% quantile + - errup (list): one-sigma upper quantile of the marginal for :math:`\theta_i`, i.e. 84.1345% quantile + - information_gain_bits (list): information gain from the marginal prior on :math:`\theta_i` to the posterior + - weighted_samples (dict): weighted samples from the posterior, as computed during sampling, sorted by their log-likelihood value + - upoints (ndarray): sample locations in the unit cube :math:`[0, 1]^{d}`, where $d$ is the number of parameters - the shape is `n_iter` by :math:`d` + - points (ndarray): sample locations in the physical, user-provided space (same shape as `upoints`) + - weights (ndarray): sample weights - shape `n_iter`, they add up to 1 + - logw (ndarray): ? + - bootstrapped_weights (ndarray): ? + - logl (ndarray): log-likelihood values at the sample points (?) + - maximum_likelihood (dict): summary information on the maximum likelihood value :math:`\theta_{ML}` found by the posterior exploration + - logl (float64): value of the log-likelihood at this point: :math:`\log p(d | \theta_{ML})` + - point (list): coordinates of :math:`\theta_{ML}` in the physical space + - point_untransformed (list): coordinates of :math:`\theta_{ML}` in the unit cube :math:`[0, 1]^{d}` + - paramnames (list): input parameter names + - insertion_order_MWW_test (dict): results for the `Mann-Whitney U-test `_; for more information, see the :py:class:`ultranest.netiter.MultiCounter` class + - independent_iterations (float): shortest insertion order test run length + - converged (bool): whether the run is converged according to the MWW test, at the given threshold + """ for _result in self.run_iter( update_interval_volume_fraction=update_interval_volume_fraction, @@ -2426,7 +2468,7 @@ def run_iter( widen_before_initial_plateau_num_warn=10000, widen_before_initial_plateau_num_max=50000, ): - """Iterate towards convergence. + r"""Iterate towards convergence. Use as an iterator like so:: @@ -2437,43 +2479,11 @@ def run_iter( Yields ------ - results: dict - - niter (int): number of sampler iterations (not likelihood evaluations!) - - logz (float64): natural logarithm of the evidence $Z = \int p(d|\theta) p(\theta) \text{d}\theta$ - - logzerr (float64): $1\sigma$ error on $\log Z$ ([can be safely assumed to be Gaussian](https://github.com/JohannesBuchner/UltraNest/issues/63)) - - logz_bs (float64): estimate of $\log Z$ from bootstrapping - - logzerr_bs (float64): error on the estimate of $\log Z$ from bootstrapping - - logz_single (float64): (?) - - logzerr_single (float64): (?) - - logzerr_tail (float64): remainder integral contribution (?) - - ess (float64): effective sample size, i.e. number of samples divided by the estimated correlation length - - H (float64): [information gained](https://arxiv.org/abs/2205.00009) - - Herr (float64): (Gaussian) $1\sigma$ error on $H$ - - posterior (dict): summary information on the posterior marginals - a dictionary of lists each with as many items as the fit parameters, indexed as $\theta_i$ in the following: - - mean (list): expectation value of $\theta_i$ - - stdev (list): standard deviation of $\theta_i$ - - median (list): median of $\theta_i$ - - errlo (list): one-sigma lower quantile of the marginal for $\theta_i$, i.e. $15.8655$% quantile - - errup (list): one-sigma upper quantile of the marginal for $\theta_i$, i.e. $84.1345$% quantile - - information_gain_bits (list): information gain from the marginal prior on $\theta_i$ to the posterior - - weighted_samples (dict): weighted samples from the posterior, as computed during sampling, sorted by their log-likelihood value - - upoints (ndarray): sample locations in the unit cube $[0, 1]^{d}$, where $d$ is the number of parameters - the shape is `n_iter` by $d$ - - points (ndarray): sample locations in the physical, user-provided space (same shape as `upoints`) - - weights (ndarray): sample weights - shape `n_iter`, they add to 1 - - logw (ndarray): ? - - bootstrapped_weights (ndarray): ? - - logl (ndarray): log-likelihood values at the sample points (?) - - samples (ndarray): re-weighted posterior samples: distributed according to $p(\theta | d)$ - these points are not sorted, and can be assumed to have been randomly shuffled (?) - - maximum_likelihood (dict): summary information on the maximum likelihood value $\theta_{ML}$ found by the posterior exploration - - logl (float64): value of the log-likelihood at this point: $p(d | \theta_{ML})$ - - point (list): coordinates of $\theta_{ML}$ in the physical space - - point_untransformed (list): coordinates of $\theta_{ML}$ in the unit cube - - ncall (int): total number of likelihood evaluations (accepted and not) - - paramnames (list): input parameter names - - insertion_order_MWW_test (dict): results for the MWW test (?, what is [Buchner+21 in prep](https://johannesbuchner.github.io/UltraNest/performance.html#output-files)?) - - independent_iterations (float) - - converged (bool) - + + results (dict): + + Results dictionary computed at the current iteration, with the same + keys as discussed in the :py:meth:`run` method. """ # frac_remain=1 means 1:1 -> dlogz=log(0.5) # frac_remain=0.1 means 1:10 -> dlogz=log(0.1) From d84ddfe0b8e2fd1d4c8750612954bb5f3a837421 Mon Sep 17 00:00:00 2001 From: jacopok Date: Tue, 28 May 2024 15:43:28 +0200 Subject: [PATCH 3/9] docs: update information on logged files --- docs/performance.rst | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/docs/performance.rst b/docs/performance.rst index e3ee853a..4e075531 100644 --- a/docs/performance.rst +++ b/docs/performance.rst @@ -98,16 +98,7 @@ If a `log_dir` directory was specified, you will find these files: * info folder: machine-readable summaries of the posterior * **post_summary.csv**: for each parameter: mean, std, median, upper and lower 1 sigma error. Can be read with `pandas.read_csv `_. - * **results.json**: Contains detailed output of the nested sampling run. Can be read with `json.load `_. - - * paramnames: parameter names - * ncall, niter: Number of likelihood calls, nested sampling iterations - * maximum_likelihood: highest loglikelihood point found so far - * H, Herr: (global) information gain - * ess: effective sample size - * logz, logzerr: ln(Z) and its uncertainty. logzerr_tail is the remainder integral contribution, logzerr_bs is from bootstrapping - * posterior: for each parameter: mean, std, median, upper and lower 1 sigma error, and `information gain `_. - * insertion_order_MWW_test: MWW test results (see Buchner+21 in prep) + * **results.json**: Contains detailed output of the nested sampling run, with all the same keys as the result dictionary in :py:meth:`ultranest.integrator.ReactiveNestedSampler.run`, except for ``samples`` and ``weighted_samples`` (as the sample information is saved in separate files - see the following entries in this list). Can be read with `json.load `_. * chains: machine-readable chains From 65d5ca0951b1429752ef45764cc47ea396d28006 Mon Sep 17 00:00:00 2001 From: jacopok Date: Tue, 28 May 2024 15:43:54 +0200 Subject: [PATCH 4/9] add details on the computation of logzerr and ess --- ultranest/integrator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ultranest/integrator.py b/ultranest/integrator.py index 8a664ce6..4ad5d28e 100644 --- a/ultranest/integrator.py +++ b/ultranest/integrator.py @@ -2386,13 +2386,13 @@ def run( - niter (int): number of sampler iterations - ncall (int): total number of likelihood evaluations (accepted and not) - logz (float64): natural logarithm of the evidence :math:`\log Z = \log \int p(d|\theta) p(\theta) \text{d}\theta` - - logzerr (float64): :math:`1\sigma` error on :math:`\log Z` (`can be safely assumed to be Gaussian `_) + - logzerr (float64): global estimate of the :math:`1\sigma` error on :math:`\log Z` (`can be safely assumed to be Gaussian `_); obtained as the quadratic mean of ``logz_bs`` and ``logz_tail`` - logz_bs (float64): estimate of :math:`\log Z` from bootstrapping - for details, see the `ultranest paper `_ - logzerr_bs (float64): estimate of the error on the of :math:`\log Z` from bootstrapping - logz_single (float64): estimate of :math:`\log Z` from a single sampler - logzerr_single (float64): estimate of the error :math:`\log Z` from a single sampler - logzerr_tail (float64): contribution of the tail (i.e. the terminal leaves of the tree) to the error on :math:`\log Z` (?) - - ess (float64): effective sample size, i.e. number of samples divided by the estimated correlation length + - ess (float64): effective sample size, i.e. number of samples divided by the estimated correlation length, estimated as :math:`N / (1 + N^{-1} \sum_i (N w_i - 1)^2)` - H (float64): `information gained `_ - Herr (float64): (Gaussian) :math:`1\sigma` error on :math:`H` - posterior (dict): summary information on the posterior marginal distributions for each parameter - a dictionary of lists each with as many items as the fit parameters, indexed as :math:`\theta_i` in the following: From 629900aa82a5ad363634d41a6cd4f5e611206453 Mon Sep 17 00:00:00 2001 From: jacopok Date: Tue, 28 May 2024 15:45:12 +0200 Subject: [PATCH 5/9] docs: small formatting fixes --- ultranest/integrator.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ultranest/integrator.py b/ultranest/integrator.py index 4ad5d28e..5117b9fb 100644 --- a/ultranest/integrator.py +++ b/ultranest/integrator.py @@ -2392,7 +2392,7 @@ def run( - logz_single (float64): estimate of :math:`\log Z` from a single sampler - logzerr_single (float64): estimate of the error :math:`\log Z` from a single sampler - logzerr_tail (float64): contribution of the tail (i.e. the terminal leaves of the tree) to the error on :math:`\log Z` (?) - - ess (float64): effective sample size, i.e. number of samples divided by the estimated correlation length, estimated as :math:`N / (1 + N^{-1} \sum_i (N w_i - 1)^2)` + - ess (float64): effective sample size, i.e. number of samples divided by the estimated correlation length, estimated as :math:`N / (1 + N^{-1} \sum_i (N w_i - 1)^2)` where :math:`w_i` are the sample weights - H (float64): `information gained `_ - Herr (float64): (Gaussian) :math:`1\sigma` error on :math:`H` - posterior (dict): summary information on the posterior marginal distributions for each parameter - a dictionary of lists each with as many items as the fit parameters, indexed as :math:`\theta_i` in the following: @@ -2403,9 +2403,9 @@ def run( - errup (list): one-sigma upper quantile of the marginal for :math:`\theta_i`, i.e. 84.1345% quantile - information_gain_bits (list): information gain from the marginal prior on :math:`\theta_i` to the posterior - weighted_samples (dict): weighted samples from the posterior, as computed during sampling, sorted by their log-likelihood value - - upoints (ndarray): sample locations in the unit cube :math:`[0, 1]^{d}`, where $d$ is the number of parameters - the shape is `n_iter` by :math:`d` - - points (ndarray): sample locations in the physical, user-provided space (same shape as `upoints`) - - weights (ndarray): sample weights - shape `n_iter`, they add up to 1 + - upoints (ndarray): sample locations in the unit cube :math:`[0, 1]^{d}`, where $d$ is the number of parameters - the shape is ``n_iter`` by :math:`d` + - points (ndarray): sample locations in the physical, user-provided space (same shape as ``upoints``) + - weights (ndarray): sample weights - shape ``n_iter``, they add up to 1 - logw (ndarray): ? - bootstrapped_weights (ndarray): ? - logl (ndarray): log-likelihood values at the sample points (?) From c7e66aa031c9651b0dd9ca686bc5b39f9f98f3f9 Mon Sep 17 00:00:00 2001 From: jacopok Date: Tue, 28 May 2024 16:24:49 +0200 Subject: [PATCH 6/9] docs: small tweaks to results explanation --- ultranest/integrator.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/ultranest/integrator.py b/ultranest/integrator.py index 5117b9fb..3430e5e8 100644 --- a/ultranest/integrator.py +++ b/ultranest/integrator.py @@ -2381,16 +2381,16 @@ def run( Returns ------ results (dict): Results dictionary, with the following entries: - + - samples (ndarray): re-weighted posterior samples: distributed according to :math:`p(\theta | d)` - these points are not sorted, and can be assumed to have been randomly shuffled. See :py:func:`ultranest.utils.resample_equal` for more details. + - logz (float64): natural logarithm of the evidence :math:`\log Z = \log \int p(d|\theta) p(\theta) \text{d}\theta` + - logzerr (float64): global estimate of the :math:`1\sigma` error on :math:`\log Z` (`can be safely assumed to be Gaussian `_); obtained as the quadratic sum of ``logz_bs`` and ``logz_tail``. Users are advised to use ``logz`` :math:`\pm` ``logzerr`` as the best estimate for the evidence and its error. - niter (int): number of sampler iterations - ncall (int): total number of likelihood evaluations (accepted and not) - - logz (float64): natural logarithm of the evidence :math:`\log Z = \log \int p(d|\theta) p(\theta) \text{d}\theta` - - logzerr (float64): global estimate of the :math:`1\sigma` error on :math:`\log Z` (`can be safely assumed to be Gaussian `_); obtained as the quadratic mean of ``logz_bs`` and ``logz_tail`` - logz_bs (float64): estimate of :math:`\log Z` from bootstrapping - for details, see the `ultranest paper `_ - logzerr_bs (float64): estimate of the error on the of :math:`\log Z` from bootstrapping - logz_single (float64): estimate of :math:`\log Z` from a single sampler - - logzerr_single (float64): estimate of the error :math:`\log Z` from a single sampler + - logzerr_single (float64): estimate of the error :math:`\log Z` from a single sampler, obtained as :math:`\sqrt{H / n_{\text{live}}}` - logzerr_tail (float64): contribution of the tail (i.e. the terminal leaves of the tree) to the error on :math:`\log Z` (?) - ess (float64): effective sample size, i.e. number of samples divided by the estimated correlation length, estimated as :math:`N / (1 + N^{-1} \sum_i (N w_i - 1)^2)` where :math:`w_i` are the sample weights - H (float64): `information gained `_ @@ -2405,19 +2405,18 @@ def run( - weighted_samples (dict): weighted samples from the posterior, as computed during sampling, sorted by their log-likelihood value - upoints (ndarray): sample locations in the unit cube :math:`[0, 1]^{d}`, where $d$ is the number of parameters - the shape is ``n_iter`` by :math:`d` - points (ndarray): sample locations in the physical, user-provided space (same shape as ``upoints``) - - weights (ndarray): sample weights - shape ``n_iter``, they add up to 1 - - logw (ndarray): ? - - bootstrapped_weights (ndarray): ? - - logl (ndarray): log-likelihood values at the sample points (?) + - weights (ndarray): sample weights - shape ``n_iter``, they sum to 1 + - logw (ndarray): logs of the sample weights (?) + - bootstrapped_weights (ndarray): bootstrapped estimate of the sample weights + - logl (ndarray): log-likelihood values at the sample points - maximum_likelihood (dict): summary information on the maximum likelihood value :math:`\theta_{ML}` found by the posterior exploration - logl (float64): value of the log-likelihood at this point: :math:`\log p(d | \theta_{ML})` - point (list): coordinates of :math:`\theta_{ML}` in the physical space - point_untransformed (list): coordinates of :math:`\theta_{ML}` in the unit cube :math:`[0, 1]^{d}` - paramnames (list): input parameter names - - insertion_order_MWW_test (dict): results for the `Mann-Whitney U-test `_; for more information, see the :py:class:`ultranest.netiter.MultiCounter` class + - insertion_order_MWW_test (dict): results for the Mann-Whitney U-test; for more information, see the :py:class:`ultranest.netiter.MultiCounter` class or `section 4.5.2 of Buchner 2023 `_ - independent_iterations (float): shortest insertion order test run length - converged (bool): whether the run is converged according to the MWW test, at the given threshold - """ for _result in self.run_iter( update_interval_volume_fraction=update_interval_volume_fraction, From c0006be2d7b0b325f7d58237e1102150a05de8a4 Mon Sep 17 00:00:00 2001 From: jacopok Date: Wed, 29 May 2024 16:32:25 +0200 Subject: [PATCH 7/9] fix whitespace --- ultranest/integrator.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/ultranest/integrator.py b/ultranest/integrator.py index 3430e5e8..982b4606 100644 --- a/ultranest/integrator.py +++ b/ultranest/integrator.py @@ -2376,12 +2376,12 @@ def run( of initial live points so that once the plateau is traversed, *min_num_live_points* live points remain, but not more than *widen_before_initial_plateau_num_warn*. - - + + Returns ------ results (dict): Results dictionary, with the following entries: - + - samples (ndarray): re-weighted posterior samples: distributed according to :math:`p(\theta | d)` - these points are not sorted, and can be assumed to have been randomly shuffled. See :py:func:`ultranest.utils.resample_equal` for more details. - logz (float64): natural logarithm of the evidence :math:`\log Z = \log \int p(d|\theta) p(\theta) \text{d}\theta` - logzerr (float64): global estimate of the :math:`1\sigma` error on :math:`\log Z` (`can be safely assumed to be Gaussian `_); obtained as the quadratic sum of ``logz_bs`` and ``logz_tail``. Users are advised to use ``logz`` :math:`\pm` ``logzerr`` as the best estimate for the evidence and its error. @@ -2478,12 +2478,11 @@ def run_iter( Yields ------ - + results (dict): - Results dictionary computed at the current iteration, with the same - keys as discussed in the :py:meth:`run` method. - """ + keys as discussed in the :py:meth:`run` method. +""" # frac_remain=1 means 1:1 -> dlogz=log(0.5) # frac_remain=0.1 means 1:10 -> dlogz=log(0.1) # dlogz_min = log(1./(1 + frac_remain)) From 6a61fe1a438b9657021b97658fbe779cdfe06a21 Mon Sep 17 00:00:00 2001 From: jacopok Date: Wed, 29 May 2024 17:25:32 +0200 Subject: [PATCH 8/9] split long lines --- ultranest/integrator.py | 50 ++++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/ultranest/integrator.py b/ultranest/integrator.py index 982b4606..990abf60 100644 --- a/ultranest/integrator.py +++ b/ultranest/integrator.py @@ -2382,39 +2382,63 @@ def run( ------ results (dict): Results dictionary, with the following entries: - - samples (ndarray): re-weighted posterior samples: distributed according to :math:`p(\theta | d)` - these points are not sorted, and can be assumed to have been randomly shuffled. See :py:func:`ultranest.utils.resample_equal` for more details. - - logz (float64): natural logarithm of the evidence :math:`\log Z = \log \int p(d|\theta) p(\theta) \text{d}\theta` - - logzerr (float64): global estimate of the :math:`1\sigma` error on :math:`\log Z` (`can be safely assumed to be Gaussian `_); obtained as the quadratic sum of ``logz_bs`` and ``logz_tail``. Users are advised to use ``logz`` :math:`\pm` ``logzerr`` as the best estimate for the evidence and its error. + - samples (ndarray): re-weighted posterior samples: distributed according + to :math:`p(\theta | d)` - these points are not sorted, and can be assumed + to have been randomly shuffled. + See :py:func:`ultranest.utils.resample_equal` for more details. + - logz (float64): natural logarithm of the evidence + :math:`\log Z = \log \int p(d|\theta) p(\theta) \text{d}\theta` + - logzerr (float64): global estimate of the :math:`1\sigma` error on + :math:`\log Z` + (`can be safely assumed to be Gaussian `_); + obtained as the quadratic sum of ``logz_bs`` and ``logz_tail``. + Users are advised to use ``logz`` :math:`\pm` ``logzerr`` + as the best estimate for the evidence and its error. - niter (int): number of sampler iterations - ncall (int): total number of likelihood evaluations (accepted and not) - - logz_bs (float64): estimate of :math:`\log Z` from bootstrapping - for details, see the `ultranest paper `_ - - logzerr_bs (float64): estimate of the error on the of :math:`\log Z` from bootstrapping + - logz_bs (float64): estimate of :math:`\log Z` from bootstrapping - + for details, see the + `ultranest paper `_ + - logzerr_bs (float64): estimate of the error on the of :math:`\log Z` + from bootstrapping - logz_single (float64): estimate of :math:`\log Z` from a single sampler - - logzerr_single (float64): estimate of the error :math:`\log Z` from a single sampler, obtained as :math:`\sqrt{H / n_{\text{live}}}` - - logzerr_tail (float64): contribution of the tail (i.e. the terminal leaves of the tree) to the error on :math:`\log Z` (?) - - ess (float64): effective sample size, i.e. number of samples divided by the estimated correlation length, estimated as :math:`N / (1 + N^{-1} \sum_i (N w_i - 1)^2)` where :math:`w_i` are the sample weights + - logzerr_single (float64): estimate of the error :math:`\log Z` from a + single sampler, obtained as :math:`\sqrt{H / n_{\text{live}}}` + - logzerr_tail (float64): contribution of the tail (i.e. the terminal + leaves of the tree) to the error on :math:`\log Z` (?) + - ess (float64): effective sample size, i.e. number of samples divided by + the estimated correlation length, estimated as + :math:`N / (1 + N^{-1} \sum_i (N w_i - 1)^2)` where :math:`w_i` are + the sample weights while :math:`N` is the number of samples - H (float64): `information gained `_ - Herr (float64): (Gaussian) :math:`1\sigma` error on :math:`H` - - posterior (dict): summary information on the posterior marginal distributions for each parameter - a dictionary of lists each with as many items as the fit parameters, indexed as :math:`\theta_i` in the following: + - posterior (dict): summary information on the posterior marginal distributions for each parameter - + a dictionary of lists each with as many items as the fit parameters, + indexed as :math:`\theta_i` in the following: - mean (list): expectation value of :math:`\theta_i` - stdev (list): standard deviation of :math:`\theta_i` - median (list): median of :math:`\theta_i` - errlo (list): one-sigma lower quantile of the marginal for :math:`\theta_i`, i.e. 15.8655% quantile - errup (list): one-sigma upper quantile of the marginal for :math:`\theta_i`, i.e. 84.1345% quantile - information_gain_bits (list): information gain from the marginal prior on :math:`\theta_i` to the posterior - - weighted_samples (dict): weighted samples from the posterior, as computed during sampling, sorted by their log-likelihood value - - upoints (ndarray): sample locations in the unit cube :math:`[0, 1]^{d}`, where $d$ is the number of parameters - the shape is ``n_iter`` by :math:`d` + - weighted_samples (dict): weighted samples from the posterior, as computed during sampling, + sorted by their log-likelihood value + - upoints (ndarray): sample locations in the unit cube :math:`[0, 1]^{d}`, + where :math:`d` is the number of parameters - the shape is ``n_iter`` by :math:`d` - points (ndarray): sample locations in the physical, user-provided space (same shape as ``upoints``) - weights (ndarray): sample weights - shape ``n_iter``, they sum to 1 - logw (ndarray): logs of the sample weights (?) - bootstrapped_weights (ndarray): bootstrapped estimate of the sample weights - logl (ndarray): log-likelihood values at the sample points - - maximum_likelihood (dict): summary information on the maximum likelihood value :math:`\theta_{ML}` found by the posterior exploration + - maximum_likelihood (dict): summary information on the maximum likelihood value + :math:`\theta_{ML}` found by the posterior exploration - logl (float64): value of the log-likelihood at this point: :math:`\log p(d | \theta_{ML})` - point (list): coordinates of :math:`\theta_{ML}` in the physical space - point_untransformed (list): coordinates of :math:`\theta_{ML}` in the unit cube :math:`[0, 1]^{d}` - paramnames (list): input parameter names - - insertion_order_MWW_test (dict): results for the Mann-Whitney U-test; for more information, see the :py:class:`ultranest.netiter.MultiCounter` class or `section 4.5.2 of Buchner 2023 `_ + - insertion_order_MWW_test (dict): results for the Mann-Whitney U-test; + for more information, see the :py:class:`ultranest.netiter.MultiCounter` class + or `section 4.5.2 of Buchner 2023 `_ - independent_iterations (float): shortest insertion order test run length - converged (bool): whether the run is converged according to the MWW test, at the given threshold """ From af444ea760d47b3dc08d956a65d5275ec02056e2 Mon Sep 17 00:00:00 2001 From: jacopok Date: Wed, 29 May 2024 17:55:04 +0200 Subject: [PATCH 9/9] Comply with pydocstyle --- ultranest/integrator.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ultranest/integrator.py b/ultranest/integrator.py index 990abf60..2d4d1c19 100644 --- a/ultranest/integrator.py +++ b/ultranest/integrator.py @@ -2502,11 +2502,10 @@ def run_iter( Yields ------ - results (dict): Results dictionary computed at the current iteration, with the same keys as discussed in the :py:meth:`run` method. -""" + """ # frac_remain=1 means 1:1 -> dlogz=log(0.5) # frac_remain=0.1 means 1:10 -> dlogz=log(0.1) # dlogz_min = log(1./(1 + frac_remain))