From f7cf05cccb59811c7a458da1192243aa22131d0d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 09:58:29 +0200 Subject: [PATCH 01/37] Bump actions/checkout from 3 to 4 (#1436) Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e9847bc68..d8746a3f6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -67,7 +67,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -103,7 +103,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -136,7 +136,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -180,7 +180,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -229,7 +229,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -265,7 +265,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -301,7 +301,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -337,7 +337,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -369,7 +369,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -402,7 +402,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -432,7 +432,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} uses: actions/setup-python@v4 From 04bf3f602174766fff4ee9ef9e49525778b7fd50 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 10:23:47 +0200 Subject: [PATCH 02/37] Bump actions/setup-python from 4 to 5 (#1437) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 4 to 5. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d8746a3f6..80f17209d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,7 +31,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -70,7 +70,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -106,7 +106,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -139,7 +139,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -183,7 +183,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -232,7 +232,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -268,7 +268,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -304,7 +304,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -340,7 +340,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -372,7 +372,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -405,7 +405,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -435,7 +435,7 @@ jobs: uses: actions/checkout@v4 - name: Prepare python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} From 4998509d8c6b5bc94727a0f007b6a37c6be06691 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 11:34:36 +0200 Subject: [PATCH 03/37] Bump codecov/codecov-action from 3 to 4 (#1438) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 3 to 4. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v3...v4) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 80f17209d..4f0ba3fa3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,7 +54,7 @@ jobs: CXX: clang++ - name: Coverage - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} file: ./coverage.xml @@ -90,7 +90,7 @@ jobs: run: ulimit -n 65536 65536 && tox -e base - name: Coverage - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} file: ./coverage.xml @@ -162,7 +162,7 @@ jobs: CXX: clang++ - name: Coverage - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} file: ./coverage.xml @@ -216,7 +216,7 @@ jobs: run: tox -e julia - name: Coverage - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} file: ./coverage.xml @@ -252,7 +252,7 @@ jobs: run: tox -e optimize - name: Coverage - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} file: ./coverage.xml @@ -288,7 +288,7 @@ jobs: run: tox -e hierarchical - name: Coverage - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} file: ./coverage.xml @@ -324,7 +324,7 @@ jobs: run: tox -e select - name: Coverage - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} file: ./coverage.xml From c4b1252f29d2597502ee9f74e56144e4d711cc4c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 12:01:24 +0200 Subject: [PATCH 04/37] Bump julia-actions/cache from 1 to 2 (#1439) Bumps [julia-actions/cache](https://github.com/julia-actions/cache) from 1 to 2. - [Release notes](https://github.com/julia-actions/cache/releases) - [Changelog](https://github.com/julia-actions/cache/blob/main/devdocs/making_a_new_release.md) - [Commits](https://github.com/julia-actions/cache/compare/v1...v2) --- updated-dependencies: - dependency-name: julia-actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4f0ba3fa3..332019025 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -201,7 +201,7 @@ jobs: version: 1.9 - name: Cache Julia - uses: julia-actions/cache@v1 + uses: julia-actions/cache@v2 - name: Install dependencies run: .github/workflows/install_deps.sh From bdddf13c3bced6047e8fe21f7b27e1f6b0b93745 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 12:28:37 +0200 Subject: [PATCH 05/37] Bump julia-actions/setup-julia from 1 to 2 (#1440) Bumps [julia-actions/setup-julia](https://github.com/julia-actions/setup-julia) from 1 to 2. - [Release notes](https://github.com/julia-actions/setup-julia/releases) - [Changelog](https://github.com/julia-actions/setup-julia/blob/master/devdocs/making_a_new_release.md) - [Commits](https://github.com/julia-actions/setup-julia/compare/v1...v2) --- updated-dependencies: - dependency-name: julia-actions/setup-julia dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 332019025..92817bfb8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -196,7 +196,7 @@ jobs: key: "${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-ci-${{ github.job }}" - name: Install julia - uses: julia-actions/setup-julia@v1 + uses: julia-actions/setup-julia@v2 with: version: 1.9 From 4fb1f9e3962b5dbcd905c70d459916f0bc9d79ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 13:28:53 +0200 Subject: [PATCH 06/37] Bump actions/cache from 3 to 4 (#1443) Bumps [actions/cache](https://github.com/actions/cache) from 3 to 4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 92817bfb8..58bf8b6cc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cache/pip @@ -75,7 +75,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cache/pip @@ -111,7 +111,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~\AppData\Local\pip\Cache @@ -144,7 +144,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cache/pip @@ -188,7 +188,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache tox and cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cache/pip @@ -237,7 +237,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cache/pip @@ -273,7 +273,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cache/pip @@ -309,7 +309,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cache/pip @@ -345,7 +345,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cache/pip @@ -377,7 +377,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cache/pip @@ -410,7 +410,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cache/pip @@ -440,7 +440,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cache/pip From bd4b81b8e5867fdaafbef491d55c0ce1c3dc1c87 Mon Sep 17 00:00:00 2001 From: Doresic <85789271+Doresic@users.noreply.github.com> Date: Tue, 13 Aug 2024 16:40:19 +0200 Subject: [PATCH 07/37] Visualize: allow for log scale of hierarchical parameters for visualization (#1435) * Implement hierarchical log scale for visualization * Add scaling info in parameter description * Fix test, had wrong inner par scale in test problem * review changes, scale in x axis, log noise parameters * Change docstring slightly --- pypesto/hierarchical/base_parameter.py | 17 +++ pypesto/hierarchical/base_problem.py | 37 +++++- .../inner_calculator_collector.py | 8 ++ pypesto/hierarchical/ordinal/problem.py | 7 ++ pypesto/hierarchical/petab.py | 26 +++- pypesto/hierarchical/relative/solver.py | 10 +- .../hierarchical/semiquantitative/problem.py | 15 ++- pypesto/problem/hierarchical.py | 9 ++ pypesto/visualize/observable_mapping.py | 13 ++ pypesto/visualize/parameters.py | 111 +++++++++++++++--- test/hierarchical/test_hierarchical.py | 4 +- 11 files changed, 233 insertions(+), 24 deletions(-) diff --git a/pypesto/hierarchical/base_parameter.py b/pypesto/hierarchical/base_parameter.py index e58cc7d8c..6692f8728 100644 --- a/pypesto/hierarchical/base_parameter.py +++ b/pypesto/hierarchical/base_parameter.py @@ -68,6 +68,23 @@ def __init__( if scale not in {LIN, LOG, LOG10}: raise ValueError(f"Scale not recognized: {scale}.") + + if ( + scale in [LOG, LOG10] + and inner_parameter_type == InnerParameterType.SIGMA + ): + raise ValueError( + f"Inner parameter type `{inner_parameter_type}` " + f"cannot be log-scaled." + ) + + if scale in [LOG, LOG10] and lb <= 0: + raise ValueError( + f"Lower bound of inner parameter `{inner_parameter_id}` " + f"cannot be non-positive for log-scaled parameters. " + f"Provide a positive lower bound." + ) + self.scale = scale if inner_parameter_type not in ( diff --git a/pypesto/hierarchical/base_problem.py b/pypesto/hierarchical/base_problem.py index 8adb5c721..7482e95c9 100644 --- a/pypesto/hierarchical/base_problem.py +++ b/pypesto/hierarchical/base_problem.py @@ -8,6 +8,7 @@ import numpy as np import pandas as pd +from ..C import LIN, LOG, LOG10 from .base_parameter import InnerParameter try: @@ -82,6 +83,10 @@ def get_interpretable_x_ids(self) -> list[str]: """ return list(self.xs.keys()) + def get_interpretable_x_scales(self) -> list[str]: + """Get scales of interpretable inner parameters.""" + return [x.scale for x in self.xs.values()] + def get_xs_for_type( self, inner_parameter_type: str ) -> list[InnerParameter]: @@ -119,7 +124,9 @@ def get_for_id(self, inner_parameter_id: str) -> InnerParameter: try: return self.xs[inner_parameter_id] except KeyError: - raise KeyError(f"Cannot find parameter with id {id}.") from None + raise KeyError( + f"Cannot find parameter with id {inner_parameter_id}." + ) from None def is_empty(self) -> bool: """Check for emptiness. @@ -222,15 +229,37 @@ def scale_value_dict( def scale_value(val: float | np.array, scale: str) -> float | np.array: """Scale a single value.""" - if scale == "lin": + if scale == LIN: return val - if scale == "log": + if scale == LOG: return np.log(val) - if scale == "log10": + if scale == LOG10: return np.log10(val) raise ValueError(f"Scale {scale} not recognized.") +def scale_back_value_dict( + dct: dict[str, float], problem: InnerProblem +) -> dict[str, float]: + """Scale back a value dictionary.""" + scaled_dct = {} + for key, val in dct.items(): + x = problem.get_for_id(key) + scaled_dct[key] = scale_back_value(val, x.scale) + return scaled_dct + + +def scale_back_value(val: float | np.array, scale: str) -> float | np.array: + """Scale back a single value.""" + if scale == LIN: + return val + if scale == LOG: + return np.exp(val) + if scale == LOG10: + return 10**val + raise ValueError(f"Scale {scale} not recognized.") + + def ix_matrices_from_arrays( ixs: dict[str, list[tuple[int, int, int]]], edatas: list[np.array] ) -> dict[str, list[np.array]]: diff --git a/pypesto/hierarchical/inner_calculator_collector.py b/pypesto/hierarchical/inner_calculator_collector.py index 8713ca33c..e89f4ecd1 100644 --- a/pypesto/hierarchical/inner_calculator_collector.py +++ b/pypesto/hierarchical/inner_calculator_collector.py @@ -286,6 +286,14 @@ def get_interpretable_inner_par_bounds( ub.extend(ub_i) return np.asarray(lb), np.asarray(ub) + def get_interpretable_inner_par_scales(self) -> list[str]: + """Return the scales of interpretable inner parameters of all inner problems.""" + return [ + scale + for inner_calculator in self.inner_calculators + for scale in inner_calculator.inner_problem.get_interpretable_x_scales() + ] + def __call__( self, x_dct: dict, diff --git a/pypesto/hierarchical/ordinal/problem.py b/pypesto/hierarchical/ordinal/problem.py index e7967d3bf..85d46312a 100644 --- a/pypesto/hierarchical/ordinal/problem.py +++ b/pypesto/hierarchical/ordinal/problem.py @@ -193,6 +193,13 @@ def get_interpretable_x_ids(self) -> list[str]: """ return [] + def get_interpretable_x_scales(self) -> list[str]: + """Get scales of interpretable inner parameters. + + There are no interpretable inner parameters for the ordinal problem. + """ + return [] + def get_groups_for_xs(self, inner_parameter_type: str) -> list[int]: """Get unique list of ``OptimalScalingParameter.group`` values.""" groups = [x.group for x in self.get_xs_for_type(inner_parameter_type)] diff --git a/pypesto/hierarchical/petab.py b/pypesto/hierarchical/petab.py index 278f01116..c1b85b3be 100644 --- a/pypesto/hierarchical/petab.py +++ b/pypesto/hierarchical/petab.py @@ -1,5 +1,6 @@ """Helper methods for hierarchical optimization with PEtab.""" +import warnings from typing import Literal import pandas as pd @@ -94,14 +95,35 @@ def validate_hierarchical_petab_problem(petab_problem: petab.Problem) -> None: and not ( inner_parameter_table[petab.PARAMETER_SCALE].isna() | (inner_parameter_table[petab.PARAMETER_SCALE] == petab.LIN) + | ( + inner_parameter_table[PARAMETER_TYPE] + != InnerParameterType.SIGMA + ) ).all() ): sub_df = inner_parameter_table.loc[ :, [PARAMETER_TYPE, petab.PARAMETER_SCALE] ] raise NotImplementedError( - "Only parameterScale=lin supported for parameters of the inner " - f"subproblem.\n{sub_df}" + "LOG and LOG10 parameter scale of inner parameters is not supported " + "for sigma parameters. Inner parameter table:\n" + f"{sub_df}" + ) + elif ( + petab.PARAMETER_SCALE in inner_parameter_table + and not ( + inner_parameter_table[petab.PARAMETER_SCALE].isna() + | (inner_parameter_table[petab.PARAMETER_SCALE] == petab.LIN) + ).all() + ): + sub_df = inner_parameter_table.loc[ + :, [PARAMETER_TYPE, petab.PARAMETER_SCALE] + ] + warnings.warn( + f"LOG and LOG10 parameter scale of inner parameters is used only " + f"for their visualization, and does not affect their optimization. " + f"Inner parameter table:\n{sub_df}", + stacklevel=1, ) inner_parameter_df = validate_measurement_formulae( diff --git a/pypesto/hierarchical/relative/solver.py b/pypesto/hierarchical/relative/solver.py index 00b7c93ed..930fdeba2 100644 --- a/pypesto/hierarchical/relative/solver.py +++ b/pypesto/hierarchical/relative/solver.py @@ -11,7 +11,11 @@ from ...optimize import minimize from ...problem import Problem from ..base_parameter import InnerParameter -from ..base_problem import InnerProblem, scale_value_dict +from ..base_problem import ( + InnerProblem, + scale_back_value_dict, + scale_value_dict, +) from ..base_solver import InnerSolver from .util import ( apply_offset, @@ -62,6 +66,8 @@ def calculate_obj_function( relevant_data = copy.deepcopy(problem.data) sim = copy.deepcopy(sim) sigma = copy.deepcopy(sigma) + inner_parameters = copy.deepcopy(inner_parameters) + inner_parameters = scale_back_value_dict(inner_parameters, problem) for x in problem.get_xs_for_type(InnerParameterType.OFFSET): apply_offset( @@ -140,6 +146,8 @@ def calculate_gradients( relevant_data = copy.deepcopy(problem.data) sim = copy.deepcopy(sim) sigma = copy.deepcopy(sigma) + inner_parameters = copy.deepcopy(inner_parameters) + inner_parameters = scale_back_value_dict(inner_parameters, problem) # restructure sensitivities to have parameter index as second index ssim = [ diff --git a/pypesto/hierarchical/semiquantitative/problem.py b/pypesto/hierarchical/semiquantitative/problem.py index 3b4705278..27a54ffba 100644 --- a/pypesto/hierarchical/semiquantitative/problem.py +++ b/pypesto/hierarchical/semiquantitative/problem.py @@ -39,6 +39,7 @@ NOISE_PARAMETERS, OBSERVABLE_ID, PARAMETER_ID, + PARAMETER_SCALE, UPPER_BOUND, ) except ImportError: @@ -149,6 +150,18 @@ def get_interpretable_x_ids(self) -> list[str]: if x.inner_parameter_type == InnerParameterType.SIGMA ] + def get_interpretable_x_scales(self) -> list[str]: + """Get scales of interpretable inner parameters. + + The interpretable inner parameters of the semiquantitative + problem are the noise parameters. + """ + return [ + x.scale + for x in self.xs.values() + if x.inner_parameter_type == InnerParameterType.SIGMA + ] + def get_semiquant_observable_ids(self) -> list[str]: """Get the IDs of semiquantitative observables.""" return list( @@ -420,7 +433,7 @@ def noise_inner_parameters_from_parameter_df( SplineInnerParameter( inner_parameter_id=row[PARAMETER_ID], inner_parameter_type=InnerParameterType.SIGMA, - scale=LIN, + scale=row[PARAMETER_SCALE], lb=row[LOWER_BOUND], ub=row[UPPER_BOUND], observable_id=observable_id, diff --git a/pypesto/problem/hierarchical.py b/pypesto/problem/hierarchical.py index 07442d015..b6ed8f4f4 100644 --- a/pypesto/problem/hierarchical.py +++ b/pypesto/problem/hierarchical.py @@ -35,6 +35,11 @@ class HierarchicalProblem(Problem): Only relevant if hierarchical is True. Contains the bounds of easily interpretable inner parameters only, e.g. noise parameters, scaling factors, offsets. + inner_scales: + The scales for the inner optimization parameters. Only relevant if + hierarchical is True. Contains the scales of easily interpretable inner + parameters only, e.g. noise parameters, scaling factors, offsets. Can + be pypesto.C.{LIN,LOG,LOG10}. Used only for visualization purposes. semiquant_observable_ids: The ids of semiquantitative observables. Only relevant if hierarchical is True. If not None, the optimization result's `spline_knots` will be @@ -77,6 +82,10 @@ def __init__( self.inner_lb = np.array(inner_lb) self.inner_ub = np.array(inner_ub) + self.inner_scales = ( + self.objective.calculator.get_interpretable_inner_par_scales() + ) + self.semiquant_observable_ids = ( self.objective.calculator.semiquant_observable_ids ) diff --git a/pypesto/visualize/observable_mapping.py b/pypesto/visualize/observable_mapping.py index 5c8ceba41..ec378fd1c 100644 --- a/pypesto/visualize/observable_mapping.py +++ b/pypesto/visualize/observable_mapping.py @@ -28,6 +28,7 @@ from amici.petab.conditions import fill_in_parameters from ..hierarchical import InnerCalculatorCollector + from ..hierarchical.base_problem import scale_back_value_dict from ..hierarchical.relative.calculator import RelativeAmiciCalculator from ..hierarchical.relative.problem import RelativeInnerProblem from ..hierarchical.semiquantitative.calculator import SemiquantCalculator @@ -301,6 +302,18 @@ def plot_linear_observable_mappings_from_pypesto_result( ) ) + # Remove inner parameters not belonging to the relative inner problem. + inner_parameter_values = { + key: value + for key, value in inner_parameter_values.items() + if key in inner_problem.get_x_ids() + } + + # Scale the inner parameters back to linear scale. + inner_parameter_values = scale_back_value_dict( + inner_parameter_values, inner_problem + ) + ###################################### # Plot the linear observable mappings. ###################################### diff --git a/pypesto/visualize/parameters.py b/pypesto/visualize/parameters.py index c50f4fdeb..61269e3fc 100644 --- a/pypesto/visualize/parameters.py +++ b/pypesto/visualize/parameters.py @@ -11,7 +11,13 @@ from pypesto.util import delete_nan_inf -from ..C import INNER_PARAMETERS, RGBA, WATERFALL_MAX_VALUE +from ..C import ( + INNER_PARAMETERS, + LOG10, + RGBA, + WATERFALL_MAX_VALUE, + InnerParameterType, +) from ..result import Result from .clust_color import assign_colors from .misc import ( @@ -21,6 +27,13 @@ ) from .reference_points import ReferencePoint, create_references +try: + from ..hierarchical.base_problem import scale_value + from ..hierarchical.relative import RelativeInnerProblem + from ..hierarchical.semiquantitative import SemiquantProblem +except ImportError: + pass + logger = logging.getLogger(__name__) @@ -38,6 +51,7 @@ def parameters( start_indices: Optional[Union[int, Iterable[int]]] = None, scale_to_interval: Optional[tuple[float, float]] = None, plot_inner_parameters: bool = True, + log10_scale_hier_sigma: bool = True, ) -> matplotlib.axes.Axes: """ Plot parameter values. @@ -77,6 +91,9 @@ def parameters( ``None`` to use bounds as determined by ``lb, ub``. plot_inner_parameters: Flag indicating whether to plot inner parameters (default: True). + log10_scale_hier_sigma: + Flag indicating whether to scale inner parameters of type + ``InnerParameterType.SIGMA`` to log10 (default: True). Returns ------- @@ -108,13 +125,14 @@ def scale_parameters(x): for j, result in enumerate(results): # handle results and bounds - (lb, ub, x_labels, fvals, xs) = handle_inputs( + (lb, ub, x_labels, fvals, xs, x_axis_label) = handle_inputs( result=result, lb=lb, ub=ub, parameter_indices=parameter_indices, start_indices=start_indices, plot_inner_parameters=plot_inner_parameters, + log10_scale_hier_sigma=log10_scale_hier_sigma, ) # parse fvals and parameters @@ -136,6 +154,7 @@ def scale_parameters(x): lb=lb, ub=ub, x_labels=x_labels, + x_axis_label=x_axis_label, ax=ax, size=size, colors=colors[j], @@ -240,6 +259,7 @@ def parameters_lowlevel( lb: Optional[Union[np.ndarray, list[float]]] = None, ub: Optional[Union[np.ndarray, list[float]]] = None, x_labels: Optional[Iterable[str]] = None, + x_axis_label: str = "Parameter value", ax: Optional[matplotlib.axes.Axes] = None, size: Optional[tuple[float, float]] = None, colors: Optional[Sequence[Union[np.ndarray, list[float]]]] = None, @@ -327,7 +347,7 @@ def parameters_lowlevel( ub = np.array(ub, dtype="float64") ax.plot(ub.flatten(), parameters_ind, "k--", marker="+") - ax.set_xlabel("Parameter value") + ax.set_xlabel(x_axis_label) ax.set_ylabel("Parameter") ax.set_title("Estimated parameters") if legend_text is not None: @@ -343,6 +363,7 @@ def handle_inputs( ub: Optional[Union[np.ndarray, list[float]]] = None, start_indices: Optional[Union[int, Iterable[int]]] = None, plot_inner_parameters: bool = False, + log10_scale_hier_sigma: bool = True, ) -> tuple[np.ndarray, np.ndarray, list[str], np.ndarray, list[np.ndarray]]: """ Compute the correct bounds for the parameter indices to be plotted. @@ -363,6 +384,9 @@ def handle_inputs( int specifying up to which start index should be plotted plot_inner_parameters: Flag indicating whether inner parameters should be plotted. + log10_scale_hier_sigma: + Flag indicating whether to scale inner parameters of type + ``InnerParameterType.SIGMA`` to log10 (default: True). Returns ------- @@ -374,13 +398,21 @@ def handle_inputs( objective function values which are needed for plotting later xs: parameter values which will be plotted later + x_axis_label: + label for the x-axis """ # retrieve results fvals = result.optimize_result.fval xs = result.optimize_result.x # retrieve inner parameters in case of hierarchical optimization - inner_xs, inner_xs_names, inner_lb, inner_ub = _handle_inner_inputs(result) + ( + inner_xs, + inner_xs_names, + inner_xs_scales, + inner_lb, + inner_ub, + ) = _handle_inner_inputs(result, log10_scale_hier_sigma) # parse indices which should be plotted if start_indices is not None: @@ -404,8 +436,8 @@ def handle_inputs( if ub is None: ub = result.problem.ub_full - # get labels - x_labels = result.problem.x_names + # get labels as x_names and scales + x_labels = list(zip(result.problem.x_names, result.problem.x_scales)) # handle fixed and free indices if len(parameter_indices) < result.problem.dim_full: @@ -423,20 +455,30 @@ def handle_inputs( if inner_xs is not None and plot_inner_parameters: lb = np.concatenate([lb, inner_lb]) ub = np.concatenate([ub, inner_ub]) - x_labels = x_labels + inner_xs_names + inner_xs_labels = list(zip(inner_xs_names, inner_xs_scales)) + x_labels = x_labels + inner_xs_labels xs_out = [ np.concatenate([x, inner_x]) if x is not None else None for x, inner_x in zip(xs_out, inner_xs_out) ] - return lb, ub, x_labels, fvals_out, xs_out + # If all the scales are the same, put it in the x_axis_label + if len({x_scale for _, x_scale in x_labels}) == 1: + x_axis_label = "Parameter value (" + x_labels[0][1] + ")" + x_labels = [x_name for x_name, _ in x_labels] + else: + x_axis_label = "Parameter value" + x_labels = [f"{x_name} ({x_scale})" for x_name, x_scale in x_labels] + + return lb, ub, x_labels, fvals_out, xs_out, x_axis_label def _handle_inner_inputs( result: Result, + log10_scale_hier_sigma: bool = True, ) -> Union[ - tuple[None, None, None, None], - tuple[list[np.ndarray], list[str], np.ndarray, np.ndarray], + tuple[None, None, None, None, None], + tuple[list[np.ndarray], list[str], list[str], np.ndarray, np.ndarray], ]: """Handle inner parameters from hierarchical optimization, if available. @@ -444,6 +486,9 @@ def _handle_inner_inputs( ---------- result: Optimization result obtained by 'optimize.py'. + log10_scale_hier_sigma: + Flag indicating whether to scale inner parameters of type + ``InnerParameterType.SIGMA`` to log10 (default: True). Returns ------- @@ -451,6 +496,8 @@ def _handle_inner_inputs( Inner parameter values which will be appended to xs. inner_xs_names: Inner parameter names. + inner_xs_scales: + Inner parameter scales. inner_lb: Inner parameter lower bounds. inner_ub: @@ -460,6 +507,7 @@ def _handle_inner_inputs( res.get(INNER_PARAMETERS, None) for res in result.optimize_result.list ] inner_xs_names = None + inner_xs_scales = None inner_lb = None inner_ub = None @@ -473,19 +521,54 @@ def _handle_inner_inputs( inner_xs = [ ( np.full(len(inner_xs_names), np.nan) - if inner_xs_idx is None - else np.asarray(inner_xs_idx) + if inner_xs_for_start is None + else np.asarray(inner_xs_for_start) ) - for inner_xs_idx in inner_xs + for inner_xs_for_start in inner_xs ] # set bounds for inner parameters inner_lb = result.problem.inner_lb inner_ub = result.problem.inner_ub + # Scale inner parameter bounds according to their parameters scales + inner_xs_scales = result.problem.inner_scales + + if log10_scale_hier_sigma: + inner_problems_with_sigma = [ + inner_calculator.inner_problem + for inner_calculator in result.problem.objective.calculator.inner_calculators + if isinstance( + inner_calculator.inner_problem, RelativeInnerProblem + ) + or isinstance(inner_calculator.inner_problem, SemiquantProblem) + ] + for inner_problem in inner_problems_with_sigma: + for inner_x_idx, inner_x_name in enumerate(inner_xs_names): + if (inner_x_name in inner_problem.get_x_ids()) and ( + inner_problem.get_for_id( + inner_x_name + ).inner_parameter_type + == InnerParameterType.SIGMA + ): + # Scale all values, lower and upper bounds + for inner_x_for_start in inner_xs: + inner_x_for_start[inner_x_idx] = scale_value( + inner_x_for_start[inner_x_idx], LOG10 + ) + inner_xs_scales[inner_x_idx] = LOG10 + + for inner_x_idx, inner_scale in enumerate(inner_xs_scales): + inner_lb[inner_x_idx] = scale_value( + inner_lb[inner_x_idx], inner_scale + ) + inner_ub[inner_x_idx] = scale_value( + inner_ub[inner_x_idx], inner_scale + ) + if inner_xs_names is None: inner_xs = None - return inner_xs, inner_xs_names, inner_lb, inner_ub + return inner_xs, inner_xs_names, inner_xs_scales, inner_lb, inner_ub def parameters_correlation_matrix( diff --git a/test/hierarchical/test_hierarchical.py b/test/hierarchical/test_hierarchical.py index bb3645df0..3c749cc60 100644 --- a/test/hierarchical/test_hierarchical.py +++ b/test/hierarchical/test_hierarchical.py @@ -11,7 +11,7 @@ import pypesto from pypesto.C import ( INNER_PARAMETER_BOUNDS, - LOG10, + LIN, LOWER_BOUND, MODE_FUN, UPPER_BOUND, @@ -348,7 +348,7 @@ def inner_problem_exp(add_scaling: bool = True, add_offset: bool = True): InnerParameter( inner_parameter_id=inner_parameter_id, inner_parameter_type=inner_parameter_type, - scale=LOG10, + scale=LIN, lb=INNER_PARAMETER_BOUNDS[inner_parameter_type][LOWER_BOUND], ub=INNER_PARAMETER_BOUNDS[inner_parameter_type][UPPER_BOUND], ixs=mask, From 6c6cdfe17c9910ba461c9c1336c684ecc5916b69 Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Wed, 4 Sep 2024 13:53:11 +0200 Subject: [PATCH 08/37] Removing Aesara from readthedocs (#1453) * Aesara with numpy 2.0 thre ImportError * Removed aesara from the documentation for now --- doc/api.rst | 1 - tox.ini | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/api.rst b/doc/api.rst index f0ff709b6..d728e3bb5 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -13,7 +13,6 @@ API reference pypesto.history pypesto.logging pypesto.objective - pypesto.objective.aesara pypesto.objective.jax pypesto.objective.julia pypesto.objective.roadrunner diff --git a/tox.ini b/tox.ini index 94479aced..027aa27e6 100644 --- a/tox.ini +++ b/tox.ini @@ -148,6 +148,8 @@ description = [testenv:doc] extras = doc,amici,petab,aesara,jax,select,roadrunner +deps = + numpy < 2.0 commands = sphinx-build -W -b html doc/ doc/_build/html description = From 08a11746842cbb980a4fc2175e9e99fcba18916e Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Thu, 5 Sep 2024 09:37:30 +0200 Subject: [PATCH 09/37] FD objective with fixed parameters (#1446) * Fixed an issue when the objective passed to finite differences was not fixed value free * corrected usage in tests * Fixed tests in hierarchical --- pypesto/objective/finite_difference.py | 1 + test/base/test_objective.py | 15 +++++++++++---- test/hierarchical/test_censored.py | 4 ++-- test/hierarchical/test_ordinal.py | 4 ++-- test/hierarchical/test_spline.py | 6 ++++-- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/pypesto/objective/finite_difference.py b/pypesto/objective/finite_difference.py index 4bf7a505f..fe1a1fdac 100644 --- a/pypesto/objective/finite_difference.py +++ b/pypesto/objective/finite_difference.py @@ -327,6 +327,7 @@ def __init__( self.delta_grad: FDDelta = to_delta(delta_grad) self.delta_res: FDDelta = to_delta(delta_res) self.method: str = method + self.pre_post_processor = obj.pre_post_processor if method not in FD.METHODS: raise ValueError( diff --git a/test/base/test_objective.py b/test/base/test_objective.py index b5d3ee930..e09cb0e74 100644 --- a/test/base/test_objective.py +++ b/test/base/test_objective.py @@ -342,12 +342,20 @@ def fd_delta(request): return request.param -def test_fds(fd_method, fd_delta): +# add a fixture for fixed and unfixed parameters +@pytest.mark.parametrize("fixed", [True, False]) +def test_fds(fd_method, fd_delta, fixed): """Test finite differences.""" problem = CRProblem() - # reference objective - obj = problem.get_objective() + if fixed: + fixed_problem = problem.get_problem() + fixed_problem.fix_parameters([1], problem.p_true[1]) + obj = fixed_problem.objective + p = problem.p_true[0] + else: + obj = problem.get_objective() + p = problem.p_true # FDs for everything obj_fd = pypesto.FD( @@ -394,7 +402,6 @@ def test_fds(fd_method, fd_delta): delta_grad=fd_delta, delta_res=fd_delta, ) - p = problem.p_true # check that function values coincide (call delegated) for attr in ["fval", "res"]: diff --git a/test/hierarchical/test_censored.py b/test/hierarchical/test_censored.py index 2538e050f..bdb1ae6eb 100644 --- a/test/hierarchical/test_censored.py +++ b/test/hierarchical/test_censored.py @@ -88,7 +88,7 @@ def calculate(problem, x_dct): problem.objective, ) finite_differences_results = finite_differences( - petab_problem.x_nominal_scaled, + petab_problem.x_nominal_free_scaled, ( 0, 1, @@ -100,7 +100,7 @@ def calculate(problem, x_dct): # with finite differences. assert np.allclose( finite_differences_results[1], - calculator_result["grad"], + calculator_result["grad"][petab_problem.x_free_indices], ) diff --git a/test/hierarchical/test_ordinal.py b/test/hierarchical/test_ordinal.py index 9a316cb30..79d775ecb 100644 --- a/test/hierarchical/test_ordinal.py +++ b/test/hierarchical/test_ordinal.py @@ -175,7 +175,7 @@ def inner_calculate(problem, x_dct): problem.objective, ) finite_differences_results = finite_differences( - petab_problem.x_nominal_scaled, + petab_problem.x_nominal_free_scaled, ( 0, 1, @@ -209,7 +209,7 @@ def inner_calculate(problem, x_dct): # with finite differences. assert np.allclose( finite_differences_results[1], - calculator_results[STANDARD]["grad"], + calculator_results[STANDARD]["grad"][petab_problem.x_free_indices], ) # Since the nominal parameters are close to true ones, diff --git a/test/hierarchical/test_spline.py b/test/hierarchical/test_spline.py index dec410df2..424d5fa3f 100644 --- a/test/hierarchical/test_spline.py +++ b/test/hierarchical/test_spline.py @@ -172,7 +172,7 @@ def inner_calculate(problem, x_dct): finite_differences = pypesto.objective.FD(problem.objective) FD_results = finite_differences( - x=petab_problem.x_nominal_scaled, + x=petab_problem.x_nominal_free_scaled, sensi_orders=(0, 1), mode=MODE_FUN, ) @@ -210,7 +210,9 @@ def inner_calculate(problem, x_dct): # The gradient should be close to the one calculated using # finite differences. assert np.allclose( - calculator_results["minimal_diff_on"]["grad"], + calculator_results["minimal_diff_on"]["grad"][ + petab_problem.x_free_indices + ], FD_results[1], atol=atol, ) From 2df7e86b34fcf6a77a1095808110038d0735dcad Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 9 Sep 2024 11:12:19 +0200 Subject: [PATCH 10/37] Update references (#1404) * Update references * LakrisenkoPat2024 * Add SchmiesterBra2024 * JacksonCha2023 #1450 ... --- doc/using_pypesto.bib | 86 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 70 insertions(+), 16 deletions(-) diff --git a/doc/using_pypesto.bib b/doc/using_pypesto.bib index ebaa06055..890cedc88 100644 --- a/doc/using_pypesto.bib +++ b/doc/using_pypesto.bib @@ -11,7 +11,6 @@ @Article{FalcoCoh2023 timestamp = {2023-07-20}, doi = {10.1098/rsif.2023.0184}, publisher = {The Royal Society}, - url = {https://doi.org/10.1098/rsif.2023.0184}, } @Article{LakrisenkoSta2023, @@ -27,7 +26,6 @@ @Article{LakrisenkoSta2023 creationdate = {2023-01-26T11:19:52}, doi = {10.1371/journal.pcbi.1010783}, publisher = {Public Library of Science}, - url = {https://doi.org/10.1371/journal.pcbi.1010783}, } @Article{SchmiesterSch2021, @@ -44,7 +42,6 @@ @Article{SchmiesterSch2021 doi = {10.1371/journal.pcbi.1008646}, publisher = {Public Library of Science}, timestamp = {2021-01-30}, - url = {https://doi.org/10.1371/journal.pcbi.1008646}, } @Article{MishraWan2023, @@ -59,7 +56,6 @@ @Article{MishraWan2023 creationdate = {2023-01-26T11:31:17}, doi = {https://doi.org/10.1016/j.ymben.2022.11.003}, keywords = {Lipid metabolism, Kinetic model, Free fatty acid, Fatty alcohol}, - url = {https://www.sciencedirect.com/science/article/pii/S1096717622001380}, } @Article{FroehlichSor2022, @@ -75,7 +71,6 @@ @Article{FroehlichSor2022 creationdate = {2023-01-26T11:31:44}, doi = {10.1371/journal.pcbi.1010322}, publisher = {Public Library of Science}, - url = {https://doi.org/10.1371/journal.pcbi.1010322}, } @Article{FroehlichGer2022, @@ -91,7 +86,6 @@ @Article{FroehlichGer2022 modificationdate = {2024-05-13T09:29:21}, publisher = {Cold Spring Harbor Laboratory}, ranking = {rank1}, - url = {https://www.biorxiv.org/content/early/2022/02/18/2022.02.17.480899}, } @Article{GerosaChi2020, @@ -109,7 +103,6 @@ @Article{GerosaChi2020 creationdate = {2023-01-26T11:32:57}, doi = {10.1016/j.cels.2020.10.002}, publisher = {Elsevier}, - url = {https://doi.org/10.1016/j.cels.2020.10.002}, } @Article{SchmiesterWei2021, @@ -126,7 +119,6 @@ @Article{SchmiesterWei2021 creationdate = {2023-01-26T11:33:16}, doi = {10.1093/bioinformatics/btab512}, eprint = {https://academic.oup.com/bioinformatics/article-pdf/37/23/4493/41641709/btab512.pdf}, - url = {https://doi.org/10.1093/bioinformatics/btab512}, } @Article{SchmiesterWei2020, @@ -143,7 +135,6 @@ @Article{SchmiesterWei2020 doi = {10.1007/s00285-020-01522-w}, refid = {Schmiester2020}, timestamp = {2021-01-30}, - url = {https://doi.org/10.1007/s00285-020-01522-w}, } @InProceedings{DuttaShi2021, @@ -162,7 +153,6 @@ @InProceedings{DuttaShi2021 keywords = {Machine Learning, Flaky tests, Extreme Value Theory}, location = {Athens, Greece}, numpages = {12}, - url = {https://doi.org/10.1145/3468264.3468615}, } @Article{ContentoCas2021, @@ -176,7 +166,6 @@ @Article{ContentoCas2021 elocation-id = {2021.10.01.21263052}, eprint = {https://www.medrxiv.org/content/early/2021/10/01/2021.10.01.21263052.full.pdf}, publisher = {Cold Spring Harbor Laboratory Press}, - url = {https://www.medrxiv.org/content/early/2021/10/01/2021.10.01.21263052}, } @Article{AlbadryHoe2022, @@ -192,7 +181,6 @@ @Article{AlbadryHoe2022 creationdate = {2023-01-26T11:34:50}, doi = {10.1038/s41598-022-26483-6}, refid = {Albadry2022}, - url = {https://doi.org/10.1038/s41598-022-26483-6}, } @Article{FischerHolzhausenRoe2023, @@ -206,7 +194,6 @@ @Article{FischerHolzhausenRoe2023 elocation-id = {2023.01.17.523407}, eprint = {https://www.biorxiv.org/content/early/2023/01/19/2023.01.17.523407.full.pdf}, publisher = {Cold Spring Harbor Laboratory}, - url = {https://www.biorxiv.org/content/early/2023/01/19/2023.01.17.523407}, } @Article{KissVen2024, @@ -222,7 +209,6 @@ @Article{KissVen2024 doi = {10.1093/nar/gkae123}, eprint = {https://academic.oup.com/nar/advance-article-pdf/doi/10.1093/nar/gkae123/56756494/gkae123.pdf}, modificationdate = {2024-02-28T18:27:01}, - url = {https://doi.org/10.1093/nar/gkae123}, } @Article{DoresicGre2024, @@ -237,7 +223,6 @@ @Article{DoresicGre2024 eprint = {https://www.biorxiv.org/content/early/2024/01/30/2024.01.26.577371.full.pdf}, modificationdate = {2024-04-20T13:06:42}, publisher = {Cold Spring Harbor Laboratory}, - url = {https://www.biorxiv.org/content/early/2024/01/30/2024.01.26.577371}, } @Article{ArrudaSch2023, @@ -252,7 +237,6 @@ @Article{ArrudaSch2023 eprint = {https://www.biorxiv.org/content/early/2023/08/23/2023.08.22.554273.full.pdf}, modificationdate = {2024-04-22T12:56:00}, publisher = {Cold Spring Harbor Laboratory}, - url = {https://www.biorxiv.org/content/early/2023/08/23/2023.08.22.554273}, } @Article{MerktAli2024, @@ -283,4 +267,74 @@ @Article{FalcoCoh2024a publisher = {Elsevier BV}, } +@Article{HoepflAlb2024, + author = {Höpfl, Sebastian and Albadry, Mohamed and Dahmen, Uta and Herrmann, Karl-Heinz and Kindler, Eva Marie and König, Matthias and Reichenbach, Jürgen Rainer and Tautenhahn, Hans-Michael and Wei, Weiwei and Zhao, Wan-Ting and Radde, Nicole Erika}, + journal = {Bioinformatics}, + title = {{Bayesian modelling of time series data (BayModTS) - a FAIR workflow to process sparse and highly variable data}}, + year = {2024}, + issn = {1367-4811}, + month = {05}, + pages = {btae312}, + abstract = {{Systems biology aims to better understand living systems through mathematical modelling of experimental and clinical data. A pervasive challenge in quantitative dynamical modelling is the integration of time series measurements, which often have high variability and low sampling resolution. Approaches are required to utilise such information while consistently handling uncertainties.We present BayModTS (Bayesian Modelling of Time Series data), a new FAIR (Findable, Accessible, Interoperable and Reusable) workflow for processing and analysing sparse and highly variable time series data. BayModTS consistently transfers uncertainties from data to model predictions, including process knowledge via parameterised models. Further, credible differences in the dynamics of different conditions can be identified by filtering noise. To demonstrate the power and versatility of BayModTS, we applied it to three hepatic datasets gathered from three different species and with different measurement techniques: (i) blood perfusion measurements by magnetic resonance imaging in rat livers after portal vein ligation, (ii) pharmacokinetic time series of different drugs in normal and steatotic mice, and (iii) CT-based volumetric assessment of human liver remnants after clinical liver resection.The BayModTS codebase is available on GitHub at https://github.com/Systems-Theory-in-Systems-Biology/BayModTS. The repository contains a Python script for the executable BayModTS workflow and a widely applicable SBML (Systems Biology Markup Language) model for retarded transient functions. In addition, all examples from the paper are included in the repository. Data and code of the application examples are stored on DaRUS https://doi.org/10.18419/darus-3876. The raw MRI ROI voxel data were uploaded to DaRUS https://doi.org/10.18419/darus-3878. The steatosis metabolite data are published on FairdomHub 10.15490/fairdomhub.1.study.1070.1.}}, + creationdate = {2024-05-16T07:58:55}, + doi = {10.1093/bioinformatics/btae312}, + eprint = {https://academic.oup.com/bioinformatics/advance-article-pdf/doi/10.1093/bioinformatics/btae312/57572667/btae312.pdf}, + modificationdate = {2024-05-16T07:58:55}, +} + +@Misc{LakrisenkoPat2024, + author = {Polina Lakrisenko and Dilan Pathirana and Daniel Weindl and Jan Hasenauer}, + title = {Exploration of methods for computing sensitivities in ODE models at dynamic and steady states}, + year = {2024}, + archiveprefix = {arXiv}, + creationdate = {2024-05-30T09:47:51}, + eprint = {2405.16524}, + modificationdate = {2024-05-30T09:47:51}, + primaryclass = {q-bio.QM}, +} + +@Misc{PhilippsKoe2024, + author = {Maren Philipps and Antonia Körner and Jakob Vanhoefer and Dilan Pathirana and Jan Hasenauer}, + title = {Non-Negative Universal Differential Equations With Applications in Systems Biology}, + year = {2024}, + archiveprefix = {arXiv}, + creationdate = {2024-06-28T08:40:06}, + eprint = {2406.14246}, + modificationdate = {2024-06-28T08:40:06}, + primaryclass = {q-bio.QM}, + url = {https://arxiv.org/abs/2406.14246}, +} + +@Article{SchmiesterBra2024, + author = {Schmiester, Leonard and Brasó-Maristany, Fara and González-Farré, Blanca and Pascual, Tomás and Gavilá, Joaquín and Tekpli, Xavier and Geisler, Jürgen and Kristensen, Vessela N. and Frigessi, Arnoldo and Prat, Aleix and Köhn-Luque, Alvaro}, + journal = {Clinical Cancer Research}, + title = {{Computational Model Predicts Patient Outcomes in Luminal B Breast Cancer Treated with Endocrine Therapy and CDK4/6 Inhibition}}, + year = {2024}, + issn = {1078-0432}, + month = {07}, + pages = {OF1-OF9}, + abstract = {{Development of a computational biomarker to predict, prior to treatment, the response to CDK4/6 inhibition (CDK4/6i) in combination with endocrine therapy in patients with breast cancer.A mechanistic mathematical model that accounts for protein signaling and drug mechanisms of action was developed and trained on extensive, publicly available data from breast cancer cell lines. The model was built to provide a patient-specific response score based on the expression of six genes (CCND1, CCNE1, ESR1, RB1, MYC, and CDKN1A). The model was validated in five independent cohorts of 148 patients in total with early-stage or advanced breast cancer treated with endocrine therapy and CDK4/6i. Response was measured either by evaluating Ki67 levels and PAM50 risk of relapse (ROR) after neoadjuvant treatment or by evaluating progression-free survival (PFS).The model showed significant association with patient’s outcomes in all five cohorts. The model predicted high Ki67 [area under the curve; AUC (95\\% confidence interval, CI) of 0.80 (0.64–0.92), 0.81 (0.60–1.00) and 0.80 (0.65–0.93)] and high PAM50 ROR [AUC of 0.78 (0.64–0.89)]. This observation was not obtained in patients treated with chemotherapy. In the other cohorts, patient stratification based on the model prediction was significantly associated with PFS [hazard ratio (HR) = 2.92 (95\\% CI, 1.08–7.86), P = 0.034 and HR = 2.16 (1.02 4.55), P = 0.043].A mathematical modeling approach accurately predicts patient outcome following CDK4/6i plus endocrine therapy that marks a step toward more personalized treatments in patients with Luminal B breast cancer.}}, + creationdate = {2024-08-01T09:44:04}, + doi = {10.1158/1078-0432.CCR-24-0244}, + eprint = {https://aacrjournals.org/clincancerres/article-pdf/doi/10.1158/1078-0432.CCR-24-0244/3478451/ccr-24-0244.pdf}, + modificationdate = {2024-08-01T09:44:04}, + url = {https://doi.org/10.1158/1078-0432.CCR-24-0244}, +} + +@InProceedings{JacksonCha2023, + author = {Jackson, Clayton and Chardon, Matthieu and Wang, Y. Curtis and Rudi, Johann and Tresch, Matthew and Heckman, Charles J. and Quinn, Roger D.}, + booktitle = {Biomimetic and Biohybrid Systems}, + title = {Multimodal Parameter Inference for a Canonical Motor Microcircuit Controlling Rat Hindlimb Motion}, + year = {2023}, + address = {Cham}, + editor = {Meder, Fabian and Hunt, Alexander and Margheri, Laura and Mura, Anna and Mazzolai, Barbara}, + pages = {38--51}, + publisher = {Springer Nature Switzerland}, + abstract = {This work explored synaptic strengths in a computational neuroscience model of a controller for the hip joint of a rat which consists of Ia interneurons, Renshaw cells, and the associated motor neurons. This circuit has been referred to as the Canonical Motor Microcircuit (CMM). It is thought that the CMM acts to modulate motor neuron activity at the output stage. We first created a biomechanical model of a rat hindlimb consisting of a pelvis, femur, shin, foot, and flexor-extensor muscle pairs modeled with a Hill muscle model. We then modeled the CMM using non-spiking leaky-integrator neural models connected with conductance-based synapses. To tune the parameters in the network, we implemented an automated approach for parameter search using the Markov chain Monte Carlo (MCMC) method to solve a parameter estimation problem in a Bayesian inference framework. As opposed to traditional optimization techniques, the MCMC method identifies probability densities over the multidimensional space of parameters. This allows us to see a range of likely parameters that produce model outcomes consistent with animal data, determine if the distribution of likely parameters is uni- or multi-modal, as well as evaluate the significance and sensitivity of each parameter. This approach will allow for further analysis of the circuit, specifically, the function and significance of Ia feedback and Renshaw cells.}, + creationdate = {2024-09-06T15:49:21}, + doi = {10.1007/978-3-031-39504-8_3}, + isbn = {978-3-031-39504-8}, + modificationdate = {2024-09-06T15:49:47}, +} + @Comment{jabref-meta: databaseType:bibtex;} From 0465922f320abc4c10d14173a8e440a37505c345 Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Wed, 11 Sep 2024 14:06:46 +0200 Subject: [PATCH 11/37] Removed aesara as supported objective. (#1455) --- pypesto/objective/aesara/__init__.py | 6 - pypesto/objective/aesara/base.py | 282 --------------------------- setup.cfg | 4 - test/base/test_objective.py | 44 ----- 4 files changed, 336 deletions(-) delete mode 100644 pypesto/objective/aesara/__init__.py delete mode 100644 pypesto/objective/aesara/base.py diff --git a/pypesto/objective/aesara/__init__.py b/pypesto/objective/aesara/__init__.py deleted file mode 100644 index 6f2fd79d1..000000000 --- a/pypesto/objective/aesara/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -Aesara objective -================ -""" - -from .base import AesaraObjective diff --git a/pypesto/objective/aesara/base.py b/pypesto/objective/aesara/base.py deleted file mode 100644 index ce7ad8c22..000000000 --- a/pypesto/objective/aesara/base.py +++ /dev/null @@ -1,282 +0,0 @@ -""" -Aesara models interface. - -Adds an interface for the construction of loss functions -incorporating aesara models. This permits computation of derivatives using a -combination of objective based methods and aesara based backpropagation. -""" - -import copy -from collections.abc import Sequence -from typing import Optional - -import numpy as np - -from ...C import FVAL, GRAD, HESS, MODE_FUN, RDATAS, ModeType -from ..base import ObjectiveBase, ResultDict - -try: - import aesara - import aesara.tensor as aet - from aesara.tensor import Op - from aesara.tensor.var import TensorVariable -except ImportError: - raise ImportError( - "Using an aeasara objective requires an installation of " - "the python package aesara. Please install aesara via " - "`pip install aesara`." - ) from None - - -class AesaraObjective(ObjectiveBase): - """ - Wrapper around an ObjectiveBase. - - Computes the gradient at each evaluation, caching it for later calls. - Caching is only enabled after the first time the gradient is asked for - and disabled whenever the cached gradient is not used, in order not to - increase computation time for derivative-free samplers. - - Parameters - ---------- - objective: - The `pypesto.ObjectiveBase` to wrap. - aet_x: - Tensor variables that define the variables of `aet_fun` - aet_fun: - Aesara function that maps `aet_x` to the variables of `objective` - coeff: - Multiplicative coefficient for objective - """ - - def __init__( - self, - objective: ObjectiveBase, - aet_x: TensorVariable, - aet_fun: TensorVariable, - coeff: Optional[float] = 1.0, - x_names: Sequence[str] = None, - ): - if not isinstance(objective, ObjectiveBase): - raise TypeError("objective must be an ObjectiveBase instance") - if not objective.check_mode(MODE_FUN): - raise NotImplementedError( - f"objective must support mode={MODE_FUN}" - ) - super().__init__(x_names) - self.base_objective = objective - - self.aet_x = aet_x - self.aet_fun = aet_fun - self._coeff = coeff - - self.obj_op = AesaraObjectiveOp(self, self._coeff) - - # compiled function - if objective.has_fun: - self.afun = aesara.function([aet_x], self.obj_op(aet_fun)) - - # compiled gradient - if objective.has_grad: - self.agrad = aesara.function( - [aet_x], aesara.grad(self.obj_op(aet_fun), [aet_x]) - ) - - # compiled hessian - if objective.has_hess: - self.ahess = aesara.function( - [aet_x], aesara.gradient.hessian(self.obj_op(aet_fun), [aet_x]) - ) - - # compiled input mapping - self.infun = aesara.function([aet_x], aet_fun) - - # temporary storage for evaluation results of objective - self.cached_base_ret: ResultDict = {} - - def check_mode(self, mode: ModeType) -> bool: - """See `ObjectiveBase` documentation.""" - return mode == MODE_FUN - - def check_sensi_orders(self, sensi_orders, mode: ModeType) -> bool: - """See `ObjectiveBase` documentation.""" - if not self.check_mode(mode): - return False - else: - return self.base_objective.check_sensi_orders(sensi_orders, mode) - - def call_unprocessed( - self, - x: np.ndarray, - sensi_orders: tuple[int, ...], - mode: ModeType, - return_dict: bool, - **kwargs, - ) -> ResultDict: - """ - See `ObjectiveBase` for more documentation. - - Main method to overwrite from the base class. It handles and - delegates the actual objective evaluation. - """ - # hess computation in aesara requires grad - if 2 in sensi_orders and 1 not in sensi_orders: - sensi_orders = (1, *sensi_orders) - - # this computes all the results from the inner objective, rendering - # them accessible to aesara compiled functions - - set_return_dict, return_dict = ( - "return_dict" in kwargs, - kwargs.pop("return_dict", False), - ) - self.cached_base_ret = self.base_objective( - self.infun(x), sensi_orders, mode, return_dict=True, **kwargs - ) - if set_return_dict: - kwargs["return_dict"] = return_dict - ret = {} - if RDATAS in self.cached_base_ret: - ret[RDATAS] = self.cached_base_ret[RDATAS] - if 0 in sensi_orders: - ret[FVAL] = float(self.afun(x)) - if 1 in sensi_orders: - ret[GRAD] = self.agrad(x)[0] - if 2 in sensi_orders: - ret[HESS] = self.ahess(x)[0] - - return ret - - def __deepcopy__(self, memodict=None): - other = AesaraObjective( - copy.deepcopy(self.base_objective), - self.aet_x, - self.aet_fun, - self._coeff, - ) - - return other - - -class AesaraObjectiveOp(Op): - """ - Aesara wrapper around a (non-normalized) log-probability function. - - Parameters - ---------- - obj: - Base aesara objective - coeff: - Multiplicative coefficient for the objective function value - """ - - itypes = [aet.dvector] # expects a vector of parameter values when called - otypes = [aet.dscalar] # outputs a single scalar value (the log prob) - - def __init__(self, obj: AesaraObjective, coeff: Optional[float] = 1.0): - self._objective: AesaraObjective = obj - self._coeff: float = coeff - - # initialize the sensitivity Op - if obj.has_grad: - self._log_prob_grad = AesaraObjectiveGradOp(obj, coeff) - else: - self._log_prob_grad = None - - def perform(self, node, inputs, outputs, params=None): # noqa - # note that we use precomputed values from the outer - # AesaraObjective.call_unprocessed here, which means we can - # ignore inputs here - log_prob = self._coeff * self._objective.cached_base_ret[FVAL] - outputs[0][0] = np.array(log_prob) - - def grad(self, inputs, g): - """ - Calculate the hessian. - - Actually returns the vector-hessian product - g[0] is a vector of - parameter values. - """ - if self._log_prob_grad is None: - return super().grad(inputs, g) - (theta,) = inputs - log_prob_grad = self._log_prob_grad(theta) - return [g[0] * log_prob_grad] - - -class AesaraObjectiveGradOp(Op): - """ - Aesara wrapper around a (non-normalized) log-probability gradient function. - - This Op will be called with a vector of values and also return a vector of - values - the gradients in each dimension. - - Parameters - ---------- - obj: - Base aesara objective - coeff: - Multiplicative coefficient for the objective function value - """ - - itypes = [aet.dvector] # expects a vector of parameter values when called - otypes = [aet.dvector] # outputs a vector (the log prob grad) - - def __init__(self, obj: AesaraObjective, coeff: Optional[float] = 1.0): - self._objective: AesaraObjective = obj - self._coeff: float = coeff - - if obj.has_hess: - self._log_prob_hess = AesaraObjectiveHessOp(obj, coeff) - else: - self._log_prob_hess = None - - def perform(self, node, inputs, outputs, params=None): # noqa - # note that we use precomputed values from the outer - # AesaraObjective.call_unprocessed here, which means we can - # ignore inputs here - log_prob_grad = self._coeff * self._objective.cached_base_ret[GRAD] - outputs[0][0] = log_prob_grad - - def grad(self, inputs, g): - """ - Calculate the hessian. - - Actually returns the vector-hessian product - g[0] is a vector of - parameter values. - """ - if self._log_prob_hess is None: - return super().grad(inputs, g) - (theta,) = inputs - log_prob_hess = self._log_prob_hess(theta) - return [g[0].dot(log_prob_hess)] - - -class AesaraObjectiveHessOp(Op): - """ - Aesara wrapper around a (non-normalized) log-probability Hessian function. - - This Op will be called with a vector of values and also return a matrix of - values - the Hessian in each dimension. - - Parameters - ---------- - obj: - Base aesara objective - coeff: - Multiplicative coefficient for the objective function value - """ - - itypes = [aet.dvector] - otypes = [aet.dmatrix] - - def __init__(self, obj: AesaraObjective, coeff: Optional[float] = 1.0): - self._objective: AesaraObjective = obj - self._coeff: float = coeff - - def perform(self, node, inputs, outputs, params=None): # noqa - # note that we use precomputed values from the outer - # AesaraObjective.call_unprocessed here, which means we can - # ignore inputs here - log_prob_hess = self._coeff * self._objective.cached_base_ret[HESS] - outputs[0][0] = log_prob_hess diff --git a/setup.cfg b/setup.cfg index f0f618828..0dd09beba 100644 --- a/setup.cfg +++ b/setup.cfg @@ -74,7 +74,6 @@ all = %(all_optimizers)s %(mpi)s %(pymc)s - %(aesara)s %(jax)s %(julia)s %(emcee)s @@ -121,8 +120,6 @@ pymc = arviz >= 0.12.1 aesara >= 2.8.6 pymc >= 4.2.1 -aesara = - aesara >= 2.0.5 jax = jax >= 0.4.1 jaxlib >= 0.4.1 @@ -153,7 +150,6 @@ doc = %(fides)s %(amici)s %(petab)s - %(aesara)s %(jax)s %(roadrunner)s example = diff --git a/test/base/test_objective.py b/test/base/test_objective.py index e09cb0e74..703cb69f7 100644 --- a/test/base/test_objective.py +++ b/test/base/test_objective.py @@ -2,7 +2,6 @@ import copy import numbers -import sys from functools import partial import numpy as np @@ -13,11 +12,6 @@ from ..util import CRProblem, poly_for_sensi, rosen_for_sensi -pytest_skip_aesara = pytest.mark.skipif( - sys.version_info >= (3, 12), - reason="Skipped Aesara tests on Python 3.12 or higher", -) - @pytest.fixture(params=[True, False]) def integrated(request): @@ -184,44 +178,6 @@ def rel_err(eps_): ) -@pytest_skip_aesara -def test_aesara(max_sensi_order, integrated): - """Test function composition and gradient computation via aesara""" - import aesara.tensor as aet - - from pypesto.objective.aesara import AesaraObjective - - prob = rosen_for_sensi(max_sensi_order, integrated, [0, 1]) - - # create aesara specific symbolic tensor variables - x = aet.specify_shape(aet.vector("x"), (2,)) - - # apply inverse transform such that we evaluate at prob['x'] - x_ref = np.arcsinh(prob["x"]) - - # compose rosenbrock function with sinh transformation - obj = AesaraObjective(prob["obj"], x, aet.sinh(x)) - - # check function values and derivatives, also after copy - for _obj in (obj, copy.deepcopy(obj)): - # function value - assert _obj(x_ref) == prob["fval"] - - # gradient - if max_sensi_order > 0: - assert np.allclose( - _obj(x_ref, sensi_orders=(1,)), prob["grad"] * np.cosh(x_ref) - ) - - # hessian - if max_sensi_order > 1: - assert np.allclose( - prob["hess"] * (np.diag(np.power(np.cosh(x_ref), 2))) - + np.diag(prob["grad"] * np.sinh(x_ref)), - _obj(x_ref, sensi_orders=(2,)), - ) - - @pytest.mark.parametrize("enable_x64", [True, False]) @pytest.mark.parametrize("fix_parameters", [True, False]) def test_jax(max_sensi_order, integrated, enable_x64, fix_parameters): From fd652e80f72ba83246ecd70980be22b4a6c5fad1 Mon Sep 17 00:00:00 2001 From: Jonas Arruda <69197639+arrjon@users.noreply.github.com> Date: Thu, 12 Sep 2024 12:38:08 +0200 Subject: [PATCH 12/37] Bayes factor Tutorial (#1444) * adding utilities to compute Bayes Factor * adding tutorial on Bayes Factors --- doc/authors.rst | 3 +- doc/example.rst | 1 + .../model_evidence_and_bayes_factors.ipynb | 943 ++++++++++++++++++ pypesto/C.py | 3 + pypesto/sample/__init__.py | 6 + pypesto/sample/adaptive_parallel_tempering.py | 18 - pypesto/sample/diagnostics.py | 8 + pypesto/sample/dynesty.py | 57 +- pypesto/sample/evidence.py | 466 +++++++++ pypesto/sample/parallel_tempering.py | 93 +- pypesto/sample/util.py | 2 + test/optimize/test_optimize.py | 4 +- test/run_notebook.sh | 1 + test/sample/test_sample.py | 139 ++- 14 files changed, 1603 insertions(+), 141 deletions(-) create mode 100644 doc/example/model_evidence_and_bayes_factors.ipynb create mode 100644 pypesto/sample/evidence.py diff --git a/doc/authors.rst b/doc/authors.rst index afd83dd8e..f08b783e4 100644 --- a/doc/authors.rst +++ b/doc/authors.rst @@ -9,7 +9,7 @@ This package was mainly developed by: - Paul Jonas Jost - Jakob Vanhoefer -with major contributions by (status 2023): +with major contributions by (status 2024): - Daniel Weindl - Dilan Pathirana @@ -23,3 +23,4 @@ with major contributions by (status 2023): - Stephan Grein - Erika Dudkin - Domagoj Doresic +- Jonas Arruda diff --git a/doc/example.rst b/doc/example.rst index bd65951ed..7cbbbdb38 100644 --- a/doc/example.rst +++ b/doc/example.rst @@ -50,6 +50,7 @@ Algorithms and features example/store.ipynb example/history_usage.ipynb example/model_selection.ipynb + example/model_evidence_and_bayes_factors.ipynb example/julia.ipynb example/relative_data.ipynb example/ordinal_data.ipynb diff --git a/doc/example/model_evidence_and_bayes_factors.ipynb b/doc/example/model_evidence_and_bayes_factors.ipynb new file mode 100644 index 000000000..9b5d43031 --- /dev/null +++ b/doc/example/model_evidence_and_bayes_factors.ipynb @@ -0,0 +1,943 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "2db98323fb940c7f", + "metadata": {}, + "source": [ + "# Bayes Factor Tutorial\n", + "\n", + "Bayes factors are a key concept in Bayesian model comparison, allowing us to compare the relative likelihood of different models given the data. They are computed using the marginal likelihoods (or evidence) of the models. This tutorial will cover various methods for computing marginal likelihoods.\n", + "\n", + "You find an introduction and extensive review here: [Llorente et al. (2023)](https://doi.org/10.1137/20M1310849)." + ] + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "\n", + "## Marginal Likelihood\n", + "\n", + "The marginal likelihood (or evidence) of a model $\\mathcal{M}$ given data $\\mathcal{D}$ is defined as:\n", + "\n", + "$$\n", + "P(\\mathcal{D} \\mid \\mathcal{M}) = \\int P(\\mathcal{D} \\mid \\theta, \\mathcal{M}) P(\\theta \\mid \\mathcal{M}) \\, d\\theta\n", + "$$\n", + "\n", + "where $\\theta$ are the parameters of the model. This integral averages the likelihood over the prior distribution of the parameters, providing a measure of how well the model explains the data, considering all possible parameter values." + ], + "id": "5c56f766bcf7ab48" + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Bayes Factor\n", + "\n", + "The Bayes factor comparing two models $\\mathcal{M}_1$ and $\\mathcal{M}_2$ given data $\\mathcal{D}$ is the ratio of their marginal likelihoods:\n", + "\n", + "$$\n", + "\\operatorname{BF}_{12} = \\frac{P(\\mathcal{D} \\mid \\mathcal{M}_1)}{P(\\mathcal{D} \\mid \\mathcal{M}_2)}\n", + "$$\n", + "\n", + "A $\\operatorname{BF}_{12} > 1$ indicates that the data favors model $\\mathcal{M}_1$ over model $\\mathcal{M}_2$, while $\\operatorname{BF}_{12} < 1$ indicates the opposite.\n", + "\n", + "Jeffreys (1961) suggested interpreting Bayes factors in half-units on the log10 scale (this was further simplified in Kass and Raftery (1995)):\n", + "- Not worth more than a bare mention: $0 < \\log_{10} \\operatorname{BF}_{12} \\leq 0.5$\n", + "- Substantial: $0.5 < \\log_{10}\\operatorname{BF}_{12} \\leq 1$\n", + "- Strong: $1 < \\log_{10}\\operatorname{BF}_{12} \\leq 2$\n", + "- Decisive: $2 < \\log_{10}\\operatorname{BF}_{12}$" + ], + "id": "6337b6a3" + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Example\n", + "\n", + "To illustrate different methods to compute marginal likelihoods we introduce two toy models, for which we can compute the marginal likelihoods analytically:\n", + "\n", + "1. **Mixture of Two Gaussians (True Data Generator)**: Composed of two Gaussian distributions, $\\mathcal{N}(\\mu_1, \\sigma_1^2)$ and $\\mathcal{N}(\\mu_2, \\sigma_2^2)$, with mixing coefficient $\\pi=0.7$.\n", + "\n", + "2. **Single Gaussian (Alternative Model)**: A single Gaussian distribution, $\\mathcal{N}(\\mu, \\sigma^2)$.\n", + "\n", + "We sample synthetic data from the first model and create pypesto problems for both models with the same data. \n", + "The free parameters are the means of both models. \n", + "For this example, we assume that the standard deviation is known and fixed to the true value.\n", + "As priors, we assume normal distributions." + ], + "id": "a6b7640cff0280de" + }, + { + "cell_type": "code", + "id": "6eb930b7", + "metadata": {}, + "source": [ + "from functools import partial\n", + "from typing import Union\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "from scipy import stats\n", + "from scipy.special import logsumexp\n", + "\n", + "from pypesto import sample, optimize, visualize, variational\n", + "from pypesto.objective import (\n", + " AggregatedObjective,\n", + " NegLogParameterPriors,\n", + " Objective,\n", + ")\n", + "from pypesto.problem import Problem" + ], + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "# model hyperparameters\n", + "N = 10\n", + "N2_1 = 3\n", + "N2_2 = N - N2_1\n", + "sigma2 = 2.\n", + "true_params = np.array([-2., 2.])\n", + "\n", + "rng = np.random.default_rng(seed=0)\n", + "# Alternative Model\n", + "Y1 = rng.normal(loc=0., scale=1., size=N)\n", + "\n", + "# True Model\n", + "Y2_1 = rng.normal(loc=true_params[0], scale=sigma2, size=N2_1)\n", + "Y2_2 = rng.normal(loc=true_params[1], scale=sigma2, size=N2_2)\n", + "Y2 = np.concatenate([Y2_1, Y2_2])\n", + "mixture_data, sigma = Y2, sigma2\n", + "n_obs = len(mixture_data)\n", + "\n", + "# plot the alternative model distribution as a normal distribution\n", + "plt.figure()\n", + "x = np.linspace(-10, 10, 100)\n", + "plt.plot(x, stats.norm.pdf(x, loc=0., scale=1.), label='Alternative Model', color='red')\n", + "plt.plot(x, stats.norm.pdf(x, loc=true_params[0], scale=sigma2), label='True Model Y2_1', color='blue')\n", + "plt.plot(x, stats.norm.pdf(x, loc=true_params[1], scale=sigma2), label='True Model Y2_2', color='green')\n", + "\n", + "\n", + "# Plot the data of the alternative and true model as dots on the x-axis for each model\n", + "plt.scatter(Y1, np.zeros_like(Y1), label='Y1 samples', color='red')\n", + "plt.scatter(Y2_1, np.full(len(Y2_1), 0.05), label='Y2_1 samples', color='blue')\n", + "plt.scatter(Y2_2, np.full(len(Y2_2), 0.1), label='Y2_2 samples', color='green')\n", + "plt.legend()\n", + "plt.show()" + ], + "id": "ad47e3f67a7896d3", + "outputs": [], + "execution_count": null + }, + { + "metadata": { + "tags": [ + "hide-input" + ] + }, + "cell_type": "code", + "source": [ + "# evidence\n", + "def log_evidence_alt(data: np.ndarray, std: float):\n", + " n = int(data.size)\n", + " y_sum = np.sum(data)\n", + " y_sq_sum = np.sum(data**2)\n", + "\n", + " term1 = 1 / (np.sqrt(2 * np.pi) * std)\n", + " log_term2 = -0.5 * np.log(n + 1)\n", + " inside_exp = -0.5 / (std**2) * (y_sq_sum - (y_sum**2) / (n + 1))\n", + " return n * np.log(term1) + log_term2 + inside_exp\n", + "\n", + "\n", + "def log_evidence_true(data: np.ndarray, std: float):\n", + " y1 = data[:N2_1]\n", + " y2 = data[N2_1:]\n", + " n = N2_1 + N2_2\n", + "\n", + " y_mean_1 = np.mean(y1)\n", + " y_mean_2 = np.mean(y2)\n", + " y_sq_sum = np.sum(y1**2) + np.sum(y2**2)\n", + "\n", + " term1 = (1 / (np.sqrt(2 * np.pi) * std)) ** n\n", + " term2 = 1 / (np.sqrt(N2_1 + 1) * np.sqrt(N2_2 + 1))\n", + "\n", + " inside_exp = (\n", + " -1\n", + " / (2 * std**2)\n", + " * (\n", + " y_sq_sum\n", + " + 8\n", + " - (N2_1 * y_mean_1 - 2) ** 2 / (N2_1 + 1)\n", + " - (N2_2 * y_mean_2 + 2) ** 2 / (N2_2 + 1)\n", + " )\n", + " )\n", + " return np.log(term1) + np.log(term2) + inside_exp\n", + "\n", + "\n", + "true_log_evidence_alt = log_evidence_alt(mixture_data, sigma)\n", + "true_log_evidence_true = log_evidence_true(mixture_data, sigma)\n", + "\n", + "print(\"True log evidence, true model:\", true_log_evidence_true)\n", + "print(\"True log evidence, alternative model:\", true_log_evidence_alt)" + ], + "id": "2143410833d86594", + "outputs": [], + "execution_count": null + }, + { + "metadata": { + "tags": [ + "hide-input" + ] + }, + "cell_type": "code", + "source": [ + "# define likelihood for each model, and build the objective functions for the pyPESTO problem\n", + "def neg_log_likelihood(params: Union[np.ndarray, list], data: np.ndarray):\n", + " # normal distribution\n", + " mu, std = params\n", + " n = int(data.size)\n", + " return (\n", + " 0.5 * n * np.log(2 * np.pi)\n", + " + n * np.log(std)\n", + " + np.sum((data - mu) ** 2) / (2 * std**2)\n", + " )\n", + "\n", + "\n", + "def neg_log_likelihood_grad(params: Union[np.ndarray, list], data: np.ndarray):\n", + " mu, std = params\n", + " n = int(data.size)\n", + " grad_mu = -np.sum(data - mu) / (std**2)\n", + " grad_std = n / std - np.sum((data - mu) ** 2) / (std**3)\n", + " return np.array([grad_mu, grad_std])\n", + "\n", + "\n", + "def neg_log_likelihood_hess(params: Union[np.ndarray, list], data: np.ndarray):\n", + " mu, std = params\n", + " n = int(data.size)\n", + " hess_mu_mu = n / (std**2)\n", + " hess_mu_std = 2 * np.sum(data - mu) / (std**3)\n", + " hess_std_std = -n / (std**2) + 3 * np.sum((data - mu) ** 2) / (std**4)\n", + " return np.array([[hess_mu_mu, hess_mu_std], [hess_mu_std, hess_std_std]])\n", + "\n", + "\n", + "def neg_log_likelihood_m2(\n", + " params: Union[np.ndarray, list], data: np.ndarray, n_mix: int\n", + "):\n", + " # normal distribution\n", + " y1 = data[:n_mix]\n", + " y2 = data[n_mix:]\n", + " m1, m2, std = params\n", + "\n", + " neg_log_likelihood([m1, std], y1)\n", + " term1 = neg_log_likelihood([m1, std], y1)\n", + " term2 = neg_log_likelihood([m2, std], y2)\n", + " return term1 + term2\n", + "\n", + "\n", + "def neg_log_likelihood_m2_grad(params: np.ndarray, data: np.ndarray, n_mix: int):\n", + " m1, m2, std = params\n", + " y1 = data[:n_mix]\n", + " y2 = data[n_mix:]\n", + "\n", + " grad_m1, grad_std1 = neg_log_likelihood_grad([m1, std], y1)\n", + " grad_m2, grad_std2 = neg_log_likelihood_grad([m2, std], y2)\n", + " return np.array([grad_m1, grad_m2, grad_std1 + grad_std2])\n", + "\n", + "\n", + "def neg_log_likelihood_m2_hess(params: np.ndarray, data: np.ndarray, n_mix: int):\n", + " m1, m2, std = params\n", + " y1 = data[:n_mix]\n", + " y2 = data[n_mix:]\n", + "\n", + " [[hess_m1_m1, hess_m1_std], [_, hess_std_std1]] = neg_log_likelihood_hess(\n", + " [m1, std], y1\n", + " )\n", + " [[hess_m2_m2, hess_m2_std], [_, hess_std_std2]] = neg_log_likelihood_hess(\n", + " [m2, std], y2\n", + " )\n", + " hess_m1_m2 = 0\n", + "\n", + " return np.array(\n", + " [\n", + " [hess_m1_m1, hess_m1_m2, hess_m1_std],\n", + " [hess_m1_m2, hess_m2_m2, hess_m2_std],\n", + " [hess_m1_std, hess_m2_std, hess_std_std1 + hess_std_std2],\n", + " ]\n", + " )\n", + "\n", + "\n", + "nllh_true = Objective(\n", + " fun=partial(neg_log_likelihood_m2, data=mixture_data, n_mix=N2_1),\n", + " grad=partial(neg_log_likelihood_m2_grad, data=mixture_data, n_mix=N2_1),\n", + " hess=partial(neg_log_likelihood_m2_hess, data=mixture_data, n_mix=N2_1),\n", + ")\n", + "nllh_alt = Objective(\n", + " fun=partial(neg_log_likelihood, data=mixture_data),\n", + " grad=partial(neg_log_likelihood_grad, data=mixture_data),\n", + " hess=partial(neg_log_likelihood_hess, data=mixture_data),\n", + ")\n", + "\n", + "\n", + "def log_normal_density(x: float, mu: float, std: float):\n", + " return (\n", + " -1 / 2 * np.log(2 * np.pi)\n", + " - 1 / 2 * np.log(std**2)\n", + " - (x - mu) ** 2 / (2 * std**2)\n", + " )\n", + "\n", + "\n", + "def log_normal_density_grad(x: float, mu: float, std: float):\n", + " return -(x - mu) / (std**2)\n", + "\n", + "\n", + "def log_normal_density_hess(x: float, mu: float, std: float):\n", + " return -1 / (std**2)\n", + "\n", + "\n", + "prior_true = NegLogParameterPriors(\n", + " [\n", + " {\n", + " \"index\": 0,\n", + " \"density_fun\": partial(log_normal_density, mu=true_params[0], std=sigma2),\n", + " \"density_dx\": partial(\n", + " log_normal_density_grad, mu=true_params[0], std=sigma2\n", + " ),\n", + " \"density_ddx\": partial(\n", + " log_normal_density_hess, mu=true_params[0], std=sigma2\n", + " ),\n", + " },\n", + " {\n", + " \"index\": 1,\n", + " \"density_fun\": partial(log_normal_density, mu=true_params[1], std=sigma2),\n", + " \"density_dx\": partial(\n", + " log_normal_density_grad, mu=true_params[1], std=sigma2\n", + " ),\n", + " \"density_ddx\": partial(\n", + " log_normal_density_hess, mu=true_params[1], std=sigma2\n", + " ),\n", + " },\n", + " ]\n", + ")\n", + "\n", + "prior_alt = NegLogParameterPriors(\n", + " [\n", + " {\n", + " \"index\": 0,\n", + " \"density_fun\": partial(log_normal_density, mu=0., std=1.),\n", + " \"density_dx\": partial(log_normal_density_grad, mu=0., std=1.),\n", + " \"density_ddx\": partial(\n", + " log_normal_density_hess, mu=0., std=1.\n", + " ),\n", + " }\n", + " ]\n", + ")\n", + "\n", + "\n", + "mixture_problem_true = Problem(\n", + " objective=AggregatedObjective(objectives=[nllh_true, prior_true]),\n", + " lb=[-10, -10, 0],\n", + " ub=[10, 10, 10],\n", + " x_names=[\"mu1\", \"mu2\", \"sigma\"],\n", + " x_scales=[\"lin\", \"lin\", \"lin\"],\n", + " x_fixed_indices=[2],\n", + " x_fixed_vals=[sigma],\n", + " x_priors_defs=prior_true,\n", + ")\n", + "\n", + "mixture_problem_alt = Problem(\n", + " objective=AggregatedObjective(objectives=[nllh_alt, prior_alt]),\n", + " lb=[-10, 0],\n", + " ub=[10, 10],\n", + " x_names=[\"mu\", \"sigma\"],\n", + " x_scales=[\"lin\", \"lin\"],\n", + " x_fixed_indices=[1],\n", + " x_fixed_vals=[sigma],\n", + " x_priors_defs=prior_alt,\n", + ")" + ], + "id": "33659ff5eba21345", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "source": [ + "# to make the code more readable, we define a dictionary with all models\n", + "# from here on, we use the pyPESTO problem objects, so the code can be reused for any other problem\n", + "models = {\n", + " 'mixture_model1': {\n", + " 'name': 'True-Model',\n", + " 'true_log_evidence': true_log_evidence_true,\n", + " 'prior_mean': np.array([-2, 2]),\n", + " 'prior_std': np.array([2, 2]),\n", + " 'prior_cov': np.diag([4, 4]),\n", + " 'true_params': true_params,\n", + " 'problem': mixture_problem_true,\n", + " },\n", + " 'mixture_model2': {\n", + " 'name': 'Alternative-Model',\n", + " 'true_log_evidence': true_log_evidence_alt,\n", + " 'prior_mean': np.array([0]),\n", + " 'prior_std': np.array([1]),\n", + " 'prior_cov': np.diag([1]),\n", + " 'problem': mixture_problem_alt,\n", + " }\n", + "}\n", + "\n", + "for m in models.values(): \n", + " # neg_log_likelihood is called with full vector, parameters might be still in log space\n", + " m['neg_log_likelihood'] = lambda x: m['problem'].objective._objectives[0](\n", + " m['problem'].get_full_vector(x=x, x_fixed_vals=m['problem'].x_fixed_vals)\n", + " ) " + ], + "metadata": { + "collapsed": false + }, + "id": "cf9af2fa37f3a0cf", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "## Methods for Computing Marginal Likelihoods", + "id": "e273503367e8bf4d" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "# run optimization for each model\n", + "for m in models.values(): \n", + " m['results'] = optimize.minimize(problem=m['problem'], n_starts=100)\n", + " \n", + " if 'true_params' in m.keys():\n", + " visualize.parameters(\n", + " results=m['results'], reference={'x': m[\"true_params\"], 'fval': m['problem'].objective(m[\"true_params\"])})\n", + " else:\n", + " visualize.parameters(m['results'])" + ], + "id": "95ec6b53c9133332", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "### 1. Bayesian Information Criterion (BIC)\n", + "\n", + "The BIC is a simple and widely-used approximation to the marginal likelihood. It is computed as:\n", + "\n", + "$$\n", + "\\operatorname{BIC} = k \\ln(n) - 2 \\ln(\\hat{L})\n", + "$$\n", + "\n", + "where $k$ is the number of parameters, $n$ is the number of data points, and $\\hat{L}$ is the maximum likelihood estimate. $-\\frac12 \\operatorname{BIC}$ approximates the marginal likelihood under the assumption that the prior is non-informative and the sample size is large.\n", + "\n", + "\n", + "BIC is easy to compute and converges to the marginal likelihood, but it may not capture the full complexity of model selection, especially for complex models or significant prior information as the prior is completely ignored." + ], + "id": "ffd895262133fe00" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "for m in models.values(): \n", + " m['BIC'] = len(m['problem'].x_free_indices) * np.log(n_obs) + 2 * m['neg_log_likelihood'](m['results'].optimize_result.x[0])\n", + " print(m['name'], 'BIC marginal likelihood approximation:', -1/2*m['BIC'])" + ], + "id": "1b40d72091d00e9f", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "### 2. Laplace Approximation\n", + "\n", + "The Laplace approximation estimates the marginal likelihood by approximating the posterior distribution as a Gaussian centered at the maximum a posteriori (MAP) estimate $\\hat{\\theta}$ using the Hessian of the posterior distribution. The marginal likelihood is then approximated as:\n", + "\n", + "$$\n", + "P(\\mathcal{D} \\mid \\mathcal{M}) \\approx (2\\pi)^{k/2} \\left| \\Sigma \\right|^{1/2} P(\\mathcal{D} \\mid \\hat{\\theta}, \\mathcal{M}) P(\\hat{\\theta} \\mid \\mathcal{M})\n", + "$$\n", + "\n", + "where $\\Sigma$ is the covariance matrix of the posterior distribution (unnormalized, so likelihood $\\times$ prior).\n", + "\n", + "\n", + "The Laplace approximation is accurate if the posterior is unimodal and roughly Gaussian." + ], + "id": "67cb4a7bb781d42" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "%%time\n", + "for m in models.values(): \n", + " laplace_evidences = []\n", + " for x in m['results'].optimize_result.x:\n", + " log_evidence = sample.evidence.laplace_approximation_log_evidence(m['problem'], x)\n", + " laplace_evidences.append(log_evidence)\n", + " \n", + " m['laplace_evidences'] = np.array(laplace_evidences)\n", + " print(m['name'], f'laplace approximation: {m[\"laplace_evidences\"][0]}')" + ], + "id": "548513d76b8887dd", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### 3. Sampling-Based Methods\n", + "\n", + "Sampling-based methods, such as Markov Chain Monte Carlo (MCMC) or nested sampling, do not make assumptions about the shape of the posterior and can provide more accurate estimates of the marginal likelihood. However, they can be computationally very intensive." + ], + "metadata": { + "collapsed": false + }, + "id": "b5ac29500e0e678b" + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "\n", + "#### Arithmetic Mean Estimator\n", + "\n", + "The arithmetic mean estimator also uses samples from the prior evaluated at the likelihood function to approximate the marginal likelihood:\n", + "\n", + "$$\n", + "P(\\mathcal{D} \\mid \\mathcal{M}) \\approx \\frac{1}{N} \\sum_{i=1}^N P(\\mathcal{D} \\mid \\theta_i, \\mathcal{M})\n", + "$$\n", + "\n", + "The arithmetic mean estimator requires a large number of samples and is very inefficient. It approximates the marginal likelihood from below." + ], + "id": "212297d07ef90600" + }, + { + "cell_type": "code", + "source": [ + "%%time\n", + "for m in models.values(): \n", + " prior_sample = np.random.multivariate_normal(mean=m['prior_mean'], \n", + " cov=m['prior_cov'],\n", + " size=1000)\n", + " log_likelihoods = np.array([-m['neg_log_likelihood'](x) for x in prior_sample])\n", + " m['arithmetic_log_evidence'] = logsumexp(log_likelihoods) - np.log(log_likelihoods.size)\n", + " \n", + " print(m['name'], f'arithmetic mean: {m[\"arithmetic_log_evidence\"]}')" + ], + "metadata": { + "collapsed": false + }, + "id": "ec2f000c836abad6", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "#### Harmonic Mean\n", + "\n", + "The harmonic mean estimator uses posterior samples to estimate the marginal likelihood:\n", + "\n", + "$$\n", + "P(\\mathcal{D} \\mid \\mathcal{M}) \\approx \\left( \\frac{1}{N} \\sum_{i=1}^N \\frac{1}{P(\\mathcal{D} \\mid \\theta_i, \\mathcal{M})} \\right)^{-1}\n", + "$$\n", + "\n", + "where $\\theta_i$ are samples from the posterior distribution.\n", + "\n", + "The harmonic mean estimator approximates the evidence from above since it tends to ignore low likelihood regions, such as those comprising the prior, leading to overestimates of the marginal likelihoods, even when asymptotically unbiased.\n", + "Moreover, the estimator can have a high variance due to evaluating the likelihood at low probability regions and inverting it.\n", + "Hence, it can be very unstable and even fail catastrophically. A more stable version, the stabilized harmonic mean, also uses samples from the prior (see [Newton and Raftery (1994)](https://doi.org/10.1111/j.2517-6161.1994.tb01956.x)). However, there more efficient methods available.\n", + "\n", + "A reliable sampling method is bridge sampling (see [\"A Tutorial on Bridge Sampling\" by Gronau et al. (2017)](https://api.semanticscholar.org/CorpusID:5447695) for a nice introduction). It uses samples from a proposal and the posterior to estimate the marginal likelihood. The proposal distribution should be chosen to have a high overlap with the posterior (we construct it from half of the posterior samples by fitting a Gaussian distribution with the same mean and covariance). This method is more stable than the harmonic mean estimator. However, its accuracy may depend on the choice of the proposal distribution.\n", + "\n", + "A different approach, the learnt harmonic mean estimator, was proposed by [McEwen et al. (2021)](https://api.semanticscholar.org/CorpusID:244709474). The estimator solves the large variance problem by interpreting the harmonic mean estimator as importance sampling and introducing a new target distribution, which is learned from the posterior samples. The method can be applied just using samples from the posterior and is implemented in software package accompanying the paper.\n" + ], + "metadata": { + "collapsed": false + }, + "id": "77ec3e1ec016d0d1" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "%%time\n", + "for m in models.values(): \n", + " results = sample.sample(\n", + " problem=m['problem'], \n", + " n_samples=1000, \n", + " result=m['results'],\n", + " )\n", + " # compute harmonic mean\n", + " m['harmonic_log_evidence'] = sample.evidence.harmonic_mean_log_evidence(results)\n", + " print(m['name'], f'harmonic mean: {m[\"harmonic_log_evidence\"]}') " + ], + "id": "ba4cc742f71fad4", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "%%time\n", + "for m in models.values(): \n", + " results = sample.sample(\n", + " problem=m['problem'], \n", + " n_samples=800, \n", + " result=m['results'],\n", + " )\n", + " # compute stabilized harmonic mean\n", + " prior_samples = np.random.multivariate_normal(mean=m['prior_mean'], \n", + " cov=m['prior_cov'],\n", + " size=200)\n", + " m['harmonic_stabilized_log_evidence'] = sample.evidence.harmonic_mean_log_evidence(\n", + " result=results,\n", + " prior_samples=prior_samples,\n", + " neg_log_likelihood_fun=m['neg_log_likelihood']\n", + " )\n", + " print(m['name'], f'stabilized harmonic mean: {m[\"harmonic_stabilized_log_evidence\"]}')" + ], + "id": "a7272997b60de2e2", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "%%time\n", + "for m in models.values(): \n", + " results = sample.sample(\n", + " problem=m['problem'], \n", + " n_samples=1000, \n", + " result=m['results'],\n", + " )\n", + " m['bridge_log_evidence'] = sample.evidence.bridge_sampling_log_evidence(results)\n", + " print(m['name'], f'bridge sampling: {m[\"bridge_log_evidence\"]}')" + ], + "id": "ce38f1a4975cd72a", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "#### Nested Sampling\n", + "\n", + "Nested sampling is specifically designed for estimating marginal likelihoods. The static nested sampler is optimized for evidence computation and provides accurate estimates but may give less accurate posterior samples unless dynamic nested sampling is used. \n", + "\n", + "Dynamic nested sampling can improve the accuracy of posterior samples. The package [dynesty](https://dynesty.readthedocs.io/en/stable/) offers a lot of hyperparameters to tune accuracy and efficiency of computing samples from the posterior vs. estimating the marginal likelihood." + ], + "metadata": { + "collapsed": false + }, + "id": "443bf17c8ae27a15" + }, + { + "cell_type": "code", + "source": [ + "%%time\n", + "for m in models.values():\n", + " # define prior transformation needed for nested sampling\n", + " def prior_transform(u):\n", + " \"\"\"Transform prior sample from unit cube to normal prior.\"\"\"\n", + " t = stats.norm.ppf(u) # convert to standard normal\n", + " c_sqrt = np.linalg.cholesky(m['prior_cov']) # Cholesky decomposition\n", + " u_new = np.dot(c_sqrt, t) # correlate with appropriate covariance\n", + " u_new += m['prior_mean'] # add mean\n", + " return u_new\n", + "\n", + " # initialize nested sampler\n", + " nested_sampler = sample.DynestySampler(\n", + " #sampler_args={'nlive': 250},\n", + " run_args={'maxcall': 1000},\n", + " dynamic=False, # static nested sampler is optimized for evidence computation\n", + " prior_transform=prior_transform\n", + " )\n", + " \n", + " # run nested sampling\n", + " result_dynesty_sample = sample.sample(\n", + " problem=m['problem'],\n", + " n_samples=None,\n", + " sampler=nested_sampler\n", + " )\n", + "\n", + " # extract log evidence\n", + " m[f'nested_log_evidence'] = nested_sampler.sampler.results.logz[-1]\n", + " print(m['name'], f'nested sampling: {m[\"nested_log_evidence\"]}')" + ], + "metadata": { + "collapsed": false + }, + "id": "c0236f455dfc64d5", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "#### Thermodynamic Integration and Steppingstone Sampling\n", + "\n", + "These methods are based on the power posterior, where the posterior is raised to a power $t$ and integrated over $t$:\n", + "\n", + "$$\n", + "P(\\mathcal{D} \\mid \\mathcal{M}) = \\int_0^1 \\frac1{Z_t} P(\\mathcal{D} \\mid \\theta, \\mathcal{M})^t P(\\theta \\mid \\mathcal{M}) \\, dt\n", + "$$\n", + "\n", + "Parallel tempering is a sampling algorithm that improves accuracy for multimodal posteriors by sampling from different temperatures simultaneously and exchanging samples between parallel chains. It can be used to sample from all power posteriors simultaneously allowing for thermodynamic integration and steppingstone sampling [(Annis et al., 2019)](https://doi.org/10.1016/j.jmp.2019.01.005). These methods can be seen as path sampling methods, hence related to bridge sampling.\n", + "\n", + "These methods can be more accurate for complex posteriors but are computationally intensive. Thermodynamic integration (TI) relies on integrating the integral over the temperature $t$, while steppingstone sampling approximates the integral with a sum over a finite number of temperatures using an importance sampling estimator. Accuracy can be improved by using more temperatures. \n", + "Errors in the estimator might come from the MCMC sampler in both cases and from numerical integration when applying TI. Steppingstone sampling can be a biased estimator for a small number of temperatures [(Annis et al., 2019)](https://doi.org/10.1016/j.jmp.2019.01.005).\n" + ], + "metadata": { + "collapsed": false + }, + "id": "dcb16e2efcf4bf0d" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "%%time\n", + "for m in models.values():\n", + " # initialize parallel tempering sampler\n", + " ti_sampler = sample.ParallelTemperingSampler( # not adaptive, since we want fixed temperatures\n", + " internal_sampler=sample.AdaptiveMetropolisSampler(),\n", + " n_chains=10\n", + " )\n", + "\n", + " # run mcmc with parallel tempering\n", + " result_ti = sample.sample(\n", + " problem=m['problem'], \n", + " n_samples=1000, \n", + " sampler=ti_sampler,\n", + " result=m['results']\n", + " ) \n", + " # compute log evidence via thermodynamic integration\n", + " m['thermodynamic_log_evidence'] = sample.evidence.parallel_tempering_log_evidence(result_ti)\n", + " print(m['name'], f'thermodynamic integration: {m[\"thermodynamic_log_evidence\"]}')\n", + " \n", + " # compute log evidence via steppingstone sampling\n", + " m['steppingstone_log_evidence'] = sample.evidence.parallel_tempering_log_evidence(result_ti, method='steppingstone')\n", + " print(m['name'], f'steppingstone sampling: {m[\"steppingstone_log_evidence\"]}')" + ], + "id": "13059e00c982d98d", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "#### Variational Inference\n", + "\n", + "Variational inference approximates the posterior with a simpler distribution and can be faster than sampling methods for large problems. The marginal likelihood can be estimated using similar approaches as before, but the accuracy is limited by the choice of the variational family.\n", + "\n", + "Variational inference optimization is based on the Evidence Lower Bound (ELBO), providing an additional check for the estimator." + ], + "metadata": { + "collapsed": false + }, + "id": "90fd0f80a9d94b7d" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "%%time\n", + "for m in models.values():\n", + " \n", + " # one could define callbacks to check convergence during optimization\n", + " # import pymc as pm\n", + " # cb = [\n", + " # pm.callbacks.CheckParametersConvergence(\n", + " # tolerance=1e-3, diff='absolute'),\n", + " # pm.callbacks.CheckParametersConvergence(\n", + " # tolerance=1e-3, diff='relative'),\n", + " # ]\n", + "\n", + " pypesto_variational_result = variational.variational_fit(\n", + " problem=m['problem'],\n", + " method='advi',\n", + " n_iterations=10000,\n", + " n_samples=None,\n", + " result=m['results'],\n", + " #callbacks=cb,\n", + " )\n", + " \n", + " # negative elbo, this is bound to the evidence (optimization criterion)\n", + " vi_lower_bound = np.max(-pypesto_variational_result.variational_result.data.hist)\n", + " \n", + " # compute harmonic mean from posterior samples\n", + " approx_sample = pypesto_variational_result.variational_result.sample(1000)['trace_x'][0]\n", + " neg_log_likelihoods = np.array([m['neg_log_likelihood'](ps) for ps in approx_sample])\n", + " m['vi_harmonic_log_evidences'] = -logsumexp(neg_log_likelihoods) + np.log(\n", + " neg_log_likelihoods.size\n", + " )\n", + " print(m['name'], f'harmonic mean with variational inference: {m[\"vi_harmonic_log_evidences\"]}')\n", + " print(\"Evidence lower bound:\", vi_lower_bound)\n", + " \n", + " # evidence cannot be smaller than the lower bound\n", + " m[\"vi_harmonic_log_evidences\"] = max(m[\"vi_harmonic_log_evidences\"], vi_lower_bound)" + ], + "id": "c616b8a566478d0d", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "## Comparison", + "id": "5e6c53b1a6414210" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "labels = [\n", + " '-1/2 BIC',\n", + " 'Arithmetic Mean',\n", + " 'Laplace', \n", + " 'Harmonic Mean', \n", + " 'Stabilized\\nHarmonic Mean',\n", + " 'Bridge Sampling',\n", + " 'Nested Sampling',\n", + " 'Thermodynamic\\nIntegration',\n", + " 'Steppingstone\\nSampling',\n", + " 'Variational Inference\\nHarmonic Mean'\n", + "]\n", + "\n", + "bayes_factors = [\n", + " -1/2*models['mixture_model1']['BIC']+1/2*models['mixture_model2']['BIC'],\n", + " models['mixture_model1']['arithmetic_log_evidence']-models['mixture_model2']['arithmetic_log_evidence'],\n", + " models['mixture_model1']['laplace_evidences'][0]-models['mixture_model2']['laplace_evidences'][0],\n", + " models['mixture_model1']['harmonic_log_evidence']-models['mixture_model2']['harmonic_log_evidence'],\n", + " models['mixture_model1']['harmonic_stabilized_log_evidence']-models['mixture_model2']['harmonic_stabilized_log_evidence'],\n", + " models['mixture_model1']['bridge_log_evidence']-models['mixture_model2']['bridge_log_evidence'],\n", + " models['mixture_model1']['nested_log_evidence']-models['mixture_model2']['nested_log_evidence'],\n", + " models['mixture_model1']['thermodynamic_log_evidence']-models['mixture_model2']['thermodynamic_log_evidence'],\n", + " models['mixture_model1']['steppingstone_log_evidence']-models['mixture_model2']['steppingstone_log_evidence'],\n", + " models['mixture_model1']['vi_harmonic_log_evidences']-models['mixture_model2']['vi_harmonic_log_evidences']\n", + "]\n", + "\n", + "true_bf = models['mixture_model1']['true_log_evidence'] - models['mixture_model2']['true_log_evidence']" + ], + "id": "fbb5a071645523d4", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "fig, ax = plt.subplots(2, 1, tight_layout=True, sharex=True, figsize=(6, 6))\n", + "colors = ['blue', 'orange']\n", + "\n", + "for i, m in enumerate(models.values()):\n", + " m['log_evidences'] = np.array([\n", + " -1/2*m['BIC'],\n", + " m['arithmetic_log_evidence'],\n", + " m['laplace_evidences'][0],\n", + " m['harmonic_log_evidence'],\n", + " m['harmonic_stabilized_log_evidence'],\n", + " m['bridge_log_evidence'],\n", + " m['nested_log_evidence'],\n", + " m['thermodynamic_log_evidence'],\n", + " m['steppingstone_log_evidence'],\n", + " m['vi_harmonic_log_evidences']\n", + " ])\n", + " ax[0].scatter(x=np.arange(m['log_evidences'].size), y=m['log_evidences'], color=colors[i], label=m['name']) \n", + " ax[0].axhline(m['true_log_evidence'], color=colors[i], alpha=0.75, label=f'True evidence of {m[\"name\"]}')\n", + " \n", + " m['error'] = (np.exp(m['log_evidences']) - np.exp(m['true_log_evidence']))**2\n", + "mean_error = np.sum(np.array([m['error'] for m in models.values()]), axis=0)\n", + "ax[1].scatter(x=np.arange(len(labels)), y=mean_error) \n", + "\n", + "ax[1].set_xlabel('Estimator')\n", + "ax[0].set_title(f'Comparison of different evidence estimators')\n", + "ax[0].set_ylabel('Ln Evidence') \n", + "ax[1].set_ylabel('Squared Error of Evidence\\nsum of both models') \n", + "ax[1].set_yscale('log')\n", + "ax[1].set_xticks(ticks=np.arange(len(labels)), labels=labels, rotation=60)\n", + "fig.legend(ncols=1, loc='center right', bbox_to_anchor=(1.5, 0.7))\n", + "plt.show()" + ], + "id": "30fea0ed78548d6b", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "source": [ + "fig, ax = plt.subplots(1, 1, tight_layout=True, figsize=(6, 5))\n", + "ax.axhline(true_bf, linestyle='-', color='r', label='True Bayes Factor')\n", + "plt.scatter(x=np.arange(len(bayes_factors)), y=bayes_factors, label='Estimates')\n", + "\n", + "# add decision thresholds\n", + "c = lambda x: np.log(np.power(10, x)) # usually defined in log10, convert to ln\n", + "ax.axhline(c(0), color='red', linestyle='--', label='\"Not worth more than a bare mention\"')\n", + "ax.axhline(c(0.5), color='orange', linestyle='--', label='\"Substantial\"')\n", + "ax.axhline(c(1), color='yellow', linestyle='--', label='\"Strong\"')\n", + "ax.axhline(c(2), color='green', linestyle='--', label='\"Decisive\"')\n", + "\n", + "ax.set_ylabel('ln Bayes Factor')\n", + "ax.set_xlabel('Estimator')\n", + "ax.set_title(f'Bayes Factor of {models[\"mixture_model1\"][\"name\"]} vs. {models[\"mixture_model2\"][\"name\"]}')\n", + "plt.xticks(ticks=np.arange(len(bayes_factors)), labels=labels, rotation=60)\n", + "fig.legend(ncols=1, loc='center right', bbox_to_anchor=(1.5, 0.7))\n", + "plt.show()" + ], + "metadata": { + "collapsed": false + }, + "id": "5d6590690b5c7a30", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "We recommend to use either bridge sampling, nested sampling or one of the methods using power posteriors depending on the computational resources available. \n", + "\n", + "Bayes factors and marginal likelihoods are powerful tools for Bayesian model comparison. While there are various methods to compute marginal likelihoods, each has its strengths and weaknesses. Choosing the appropriate method depends on the specific context, the complexity of the models, and the computational resources available." + ], + "id": "6cbfd915823d6989" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pypesto/C.py b/pypesto/C.py index a3c36af31..fe36c99b2 100644 --- a/pypesto/C.py +++ b/pypesto/C.py @@ -265,6 +265,9 @@ class InnerParameterType(str, Enum): "exponential_decay" # temperature schedule for parallel tempering ) BETA_DECAY = "beta_decay" # temperature schedule for parallel tempering +TRAPEZOID = "trapezoid" # method to compute log evidence +SIMPSON = "simpson" # method to compute log evidence +STEPPINGSTONE = "steppingstone" # method to compute log evidence ############################################################################### # PREDICT diff --git a/pypesto/sample/__init__.py b/pypesto/sample/__init__.py index cb5fe4058..96529a319 100644 --- a/pypesto/sample/__init__.py +++ b/pypesto/sample/__init__.py @@ -11,6 +11,12 @@ from .diagnostics import auto_correlation, effective_sample_size, geweke_test from .dynesty import DynestySampler from .emcee import EmceeSampler +from .evidence import ( + bridge_sampling_log_evidence, + harmonic_mean_log_evidence, + laplace_approximation_log_evidence, + parallel_tempering_log_evidence, +) from .metropolis import MetropolisSampler from .parallel_tempering import ParallelTemperingSampler from .sample import sample diff --git a/pypesto/sample/adaptive_parallel_tempering.py b/pypesto/sample/adaptive_parallel_tempering.py index 1738c6ce7..a2966b46d 100644 --- a/pypesto/sample/adaptive_parallel_tempering.py +++ b/pypesto/sample/adaptive_parallel_tempering.py @@ -5,7 +5,6 @@ import numpy as np from ..C import EXPONENTIAL_DECAY -from ..result import Result from .parallel_tempering import ParallelTemperingSampler @@ -65,20 +64,3 @@ def adjust_betas(self, i_sample: int, swapped: Sequence[bool]): # fill in self.betas = betas - - def compute_log_evidence( - self, result: Result, method: str = "trapezoid" - ) -> float: - """Perform thermodynamic integration to estimate the log evidence. - - Parameters - ---------- - result: - Result object containing the samples. - method: - Integration method, either 'trapezoid' or 'simpson' (uses scipy for integration). - """ - raise NotImplementedError( - "Thermodynamic integration is not implemented for adaptive parallel tempering, " - "since the temperature schedule is adapted during the sampling process." - ) diff --git a/pypesto/sample/diagnostics.py b/pypesto/sample/diagnostics.py index c4e1fe109..223acf8fc 100644 --- a/pypesto/sample/diagnostics.py +++ b/pypesto/sample/diagnostics.py @@ -33,6 +33,14 @@ def geweke_test( Iteration where the first and the last fraction of the chain do not differ significantly regarding Geweke test -> Burn-In """ + if chain_number == 0: + # test if burn-in is already calculated + if result.sample_result.burn_in is not None: + logger.info( + f"Burn-in index ({result.sample_result.burn_in}) already calculated. Skipping Geweke test." + ) + return result.sample_result.burn_in + # Get parameter samples as numpy arrays chain = np.asarray(result.sample_result.trace_x[chain_number]) diff --git a/pypesto/sample/dynesty.py b/pypesto/sample/dynesty.py index 89a14ba24..c70e92019 100644 --- a/pypesto/sample/dynesty.py +++ b/pypesto/sample/dynesty.py @@ -75,6 +75,7 @@ def __init__( run_args: dict = None, dynamic: bool = True, objective_type: str = OBJECTIVE_NEGLOGPOST, + prior_transform: callable = None, ): """ Initialize sampler. @@ -95,6 +96,9 @@ def __init__( `pypesto.C.OBJECTIVE_NEGLOGPOST`. If `pypesto.C.OBJECTIVE_NEGLOGPOST`, then `x_priors` have to be defined in the problem. + prior_transform: + A function converting a sample from the unit cube to actual prior. If not provided, the default + `prior_transform` function is used, which assumes uniform priors. """ if importlib.util.find_spec("dynesty") is None: raise SamplerImportError("dynesty") @@ -118,17 +122,41 @@ def __init__( ) self.objective_type = objective_type + if prior_transform is None: + # if priors are uniform, we can use the default prior transform (assuming that bounds are set correctly) + logger.warning( + "Assuming 'prior_transform' is correctly specified. If 'x_priors' is not uniform, 'prior_transform'" + " has to be adjusted accordingly." + ) + self.prior_transform = self.prior_transform_from_uniform + else: + self.prior_transform = prior_transform + # set in initialize self.problem: Problem | None = None self.sampler: ( dynesty.DynamicNestedSampler | dynesty.NestedSampler | None ) = None - def prior_transform(self, prior_sample: np.ndarray) -> np.ndarray: - """Transform prior sample from unit cube to pyPESTO prior. + def loglikelihood(self, x): + """Log-probability density function.""" + # check if parameter lies within bounds + if any(x < self.problem.lb) or any(x > self.problem.ub): + return -np.inf + # invert sign + if self.objective_type == OBJECTIVE_NEGLOGPOST: + # problem.objective returns negative log-posterior + # compute log-likelihood by subtracting log-prior + return -1.0 * ( + self.problem.objective(x) - self.problem.x_priors(x) + ) + # problem.objective returns negative log-likelihood + return -1.0 * self.problem.objective(x) - TODO support priors that are not uniform. - raise warning in `self.initialize` for now. + def prior_transform_from_uniform( + self, prior_sample: np.ndarray + ) -> np.ndarray: + """Transform prior sample from unit cube to pyPESTO prior. Parameters ---------- @@ -144,21 +172,6 @@ def prior_transform(self, prior_sample: np.ndarray) -> np.ndarray: + self.problem.lb ) - def loglikelihood(self, x): - """Log-probability density function.""" - # check if parameter lies within bounds - if any(x < self.problem.lb) or any(x > self.problem.ub): - return -np.inf - # invert sign - if self.objective_type == OBJECTIVE_NEGLOGPOST: - # problem.objective returns negative log-posterior - # compute log-likelihood by subtracting log-prior - return -1.0 * ( - self.problem.objective(x) - self.problem.x_priors(x) - ) - # problem.objective returns negative log-likelihood - return -1.0 * self.problem.objective(x) - def initialize( self, problem: Problem, @@ -187,12 +200,6 @@ def initialize( f"'x_priors' defined in the problem will be ignored." ) - # if priors are uniform, we can use the default prior transform (assuming that bounds are set correctly) - logger.warning( - "Assuming 'prior_transform' is correctly specified. If 'x_priors' is not uniform, 'prior_transform'" - " has to be adjusted accordingly." - ) - # initialize sampler self.sampler = sampler_class( loglikelihood=self.loglikelihood, diff --git a/pypesto/sample/evidence.py b/pypesto/sample/evidence.py new file mode 100644 index 000000000..07f1f7c9f --- /dev/null +++ b/pypesto/sample/evidence.py @@ -0,0 +1,466 @@ +"""Various methods for estimating the log evidence of a model.""" + + +import logging +from typing import Optional, Union + +import numpy as np +from scipy import stats +from scipy.integrate import simpson, trapezoid +from scipy.optimize import minimize_scalar +from scipy.special import logsumexp + +from ..C import SIMPSON, STEPPINGSTONE, TRAPEZOID +from ..objective import ( + AggregatedObjective, + NegLogParameterPriors, + NegLogPriors, +) +from ..problem import Problem +from ..result import Result +from .diagnostics import geweke_test + +logger = logging.getLogger(__name__) + + +def laplace_approximation_log_evidence( + problem: Problem, x: np.ndarray +) -> float: + """ + Compute the log evidence using the Laplace approximation. + + The objective in your `problem` must be a negative log posterior, and support Hessian computation. + + Parameters + ---------- + problem: + The problem to compute the log evidence for. + x: + The maximum a posteriori estimate at which to compute the log evidence. + + Returns + ------- + log_evidence: float + """ + hessian = problem.objective( + problem.get_reduced_vector(x), sensi_orders=(2,) + ) + _, log_det = np.linalg.slogdet(hessian) + log_prop_posterior = -problem.objective(problem.get_reduced_vector(x)) + log_evidence = ( + 0.5 * np.log(2 * np.pi) * len(problem.x_free_indices) + - 0.5 * log_det + + log_prop_posterior + ) + return log_evidence + + +def harmonic_mean_log_evidence( + result: Result, + prior_samples: Optional[np.ndarray] = None, + neg_log_likelihood_fun: Optional[callable] = None, +) -> float: + """ + Compute the log evidence using the harmonic mean estimator. + + Stabilized harmonic mean estimator is used if prior samples are provided. + Newton and Raftery (1994): https://doi.org/10.1111/j.2517-6161.1994.tb01956.x + + Parameters + ---------- + result: + prior_samples: + Samples from the prior distribution. If samples from the prior are provided, + the stabilized harmonic mean is computed (recommended). Then, the likelihood function must be provided as well. + neg_log_likelihood_fun: callable + Function to evaluate the negative log likelihood. Necessary if prior_samples is not `None`. + + Returns + ------- + log_evidence + """ + if result.sample_result is None: + raise ValueError("No samples available. Run sampling first.") + + # compute negative log likelihood from traces + burn_in = geweke_test(result) + trace_neglogpost = result.sample_result.trace_neglogpost[0, burn_in:] + trace_neglogprior = result.sample_result.trace_neglogprior[0, burn_in:] + neg_log_likelihoods_posterior = trace_neglogpost - trace_neglogprior + + if prior_samples is None: + # compute harmonic mean from samples + return -logsumexp(neg_log_likelihoods_posterior) + np.log( + neg_log_likelihoods_posterior.size + ) + + # compute stabilized harmonic mean + if prior_samples is not None and neg_log_likelihood_fun is None: + raise ValueError( + "you need to provide a likelihood function to evaluate prior samples" + ) + + # compute delta (ratio of prior to posterior samples) + n_samples_prior = len(prior_samples) + n_samples_posterior = len(trace_neglogpost) + delta = n_samples_prior / (n_samples_prior + n_samples_posterior) + neg_log_likelihoods_prior = np.array( + [neg_log_likelihood_fun(x) for x in prior_samples] + ) + log_likelihoods_stack = -np.concatenate( + [neg_log_likelihoods_prior, neg_log_likelihoods_posterior] + ) + + def _log_evidence_objective(log_p: float): + # Helper function to compute the log evidence with stabilized harmonic mean + log_w_i = logsumexp( + np.stack( + ( + log_p * np.ones_like(log_likelihoods_stack), + log_likelihoods_stack, + ), + axis=1, + ), + b=np.array([delta, 1 - delta]), + axis=1, + ) + res, sign = logsumexp( + [ + log_p, + logsumexp(log_likelihoods_stack - log_w_i) + - logsumexp(-log_w_i), + ], + b=[1, -1], + return_sign=True, + ) + return sign * res + + sol = minimize_scalar(_log_evidence_objective) + return sol.x + + +def parallel_tempering_log_evidence( + result: Result, + method: str = "trapezoid", + use_all_chains: bool = True, +) -> Union[float, None]: + """Perform thermodynamic integration or steppingstone sampling to estimate the log evidence. + + Thermodynamic integration is performed by integrating the mean log likelihood over the temperatures. + Errors might come from the samples itself or the numerical integration. + Steppingstone sampling is a form of importance sampling that uses the maximum likelihood of each temperature. + It does not require an integration, but can be biased for a small number of temperatures. + See (Annis et al., 2019), https://doi.org/10.1016/j.jmp.2019.01.005, for more details. + + This should be used with a beta decay temperature schedule and not with the adaptive version of + parallel tempering sampling as the temperature schedule is not optimal for thermodynamic integration. + + Parameters + ---------- + result: + Result object containing the samples. + method: + Integration method, either 'trapezoid' or 'simpson' to perform thermodynamic integration + (uses scipy for integration) or 'steppingstone' to perform steppingstone sampling. + use_all_chains: + If True, calculate burn-in for each chain and use the maximal burn-in for all chains for the integration. + This will fail if not all chains have converged yet. + Otherwise, use only the converged chains for the integration (might increase the integration error). + """ + # compute burn in for all chains but the last one (prior only) + burn_ins = np.zeros(len(result.sample_result.betas), dtype=int) + for i_chain in range(len(result.sample_result.betas)): + burn_ins[i_chain] = geweke_test(result, chain_number=i_chain) + max_burn_in = int(np.max(burn_ins)) + + if max_burn_in >= result.sample_result.trace_x.shape[1]: + logger.warning( + f"At least {np.sum(burn_ins >= result.sample_result.trace_x.shape[1])} chains seem to not have " + f"converged yet. You may want to use a larger number of samples." + ) + if use_all_chains: + raise ValueError( + "Not all chains have converged yet. You may want to use a larger number of samples, " + "or try ´use_all_chains=False´, which might increase the integration error." + ) + + if use_all_chains: + # estimate mean of log likelihood for each beta + trace_loglike = ( + result.sample_result.trace_neglogprior[::-1, max_burn_in:] + - result.sample_result.trace_neglogpost[::-1, max_burn_in:] + ) + mean_loglike_per_beta = np.mean(trace_loglike, axis=1) + temps = result.sample_result.betas[::-1] + else: + # estimate mean of log likelihood for each beta if chain has converged + mean_loglike_per_beta = [] + trace_loglike = [] + temps = [] + for i_chain in reversed(range(len(result.sample_result.betas))): + if burn_ins[i_chain] < result.sample_result.trace_x.shape[1]: + # save temperature-chain as it is converged + temps.append(result.sample_result.betas[i_chain]) + # calculate mean log likelihood for each beta + trace_loglike_i = ( + result.sample_result.trace_neglogprior[ + i_chain, burn_ins[i_chain] : + ] + - result.sample_result.trace_neglogpost[ + i_chain, burn_ins[i_chain] : + ] + ) + trace_loglike.append(trace_loglike_i) + mean_loglike_per_beta.append(np.mean(trace_loglike_i)) + + if method == TRAPEZOID: + log_evidence = trapezoid( + # integrate from low to high temperature + y=mean_loglike_per_beta, + x=temps, + ) + elif method == SIMPSON: + log_evidence = simpson( + # integrate from low to high temperature + y=mean_loglike_per_beta, + x=temps, + ) + elif method == STEPPINGSTONE: + log_evidence = steppingstone(temps=temps, trace_loglike=trace_loglike) + else: + raise ValueError( + f"Unknown method {method}. Choose 'trapezoid', 'simpson' for thermodynamic integration or ", + "'steppingstone' for steppingstone sampling.", + ) + + return log_evidence + + +def steppingstone(temps: np.ndarray, trace_loglike: np.ndarray) -> float: + """Perform steppingstone sampling to estimate the log evidence. + + Implementation based on Annis et al. (2019): https://doi.org/10.1016/j.jmp.2019.01.005. + + Parameters + ---------- + temps: + Temperature values. + trace_loglike: + Log likelihood values for each temperature. + """ + from scipy.special import logsumexp + + ss_log_evidences = np.zeros(len(temps) - 1) + for t_i in range(1, len(temps)): + # we use the maximum likelihood times the temperature difference to stabilize the logsumexp + # original formulation uses only the maximum likelihood, this is equivalent + ss_log_evidences[t_i - 1] = logsumexp( + trace_loglike[t_i - 1] * (temps[t_i] - temps[t_i - 1]) + ) - np.log(trace_loglike[t_i - 1].size) + log_evidence = np.sum(ss_log_evidences) + return log_evidence + + +def bridge_sampling_log_evidence( + result: Result, + n_posterior_samples_init: Optional[int] = None, + initial_guess_log_evidence: Optional[float] = None, + max_iter: int = 1000, + tol: float = 1e-6, +) -> float: + """ + Compute the log evidence using bridge sampling. + + Based on "A Tutorial on Bridge Sampling" by Gronau et al. (2017): https://doi.org/10.1016/j.jmp.2017.09.005. + Using the optimal bridge function by Meng and Wong (1996) which minimises the relative mean-squared error. + Proposal function is calibrated using posterior samples, which are not used for the final bridge estimate + (as this may result in an underestimation of the marginal likelihood, see Overstall and Forster (2010)). + + Parameters + ---------- + result: + The pyPESTO result object with filled sample result. + n_posterior_samples_init: + Number of samples used to calibrate the proposal function. By default, half of the posterior samples are used. + initial_guess_log_evidence: + Initial guess for the log evidence. By default, the Laplace approximation is used to compute the initial guess. + max_iter: + Maximum number of iterations. Default is 1000. + tol: + Tolerance for convergence. Default is 1e-6. + + + Returns + ------- + log_evidence + """ + if result.sample_result is None: + raise ValueError("No samples available. Run sampling first.") + if not isinstance(result.problem.objective, AggregatedObjective): + raise ValueError("Objective must be an AggregatedObjective.") + + # use Laplace approximation to get initial guess for p(y) + if initial_guess_log_evidence is None: + initial_guess_log_evidence = laplace_approximation_log_evidence( + problem=result.problem, x=result.optimize_result.x[0] + ) + # extract posterior samples + burn_in = geweke_test(result) + posterior_samples = result.sample_result.trace_x[0, burn_in:] + + # build proposal function from posterior samples + if n_posterior_samples_init is None: + n_posterior_samples_init = int(posterior_samples.shape[0] * 0.5) + # randomly select samples for calibration + calibration_index = np.random.choice( + np.arange(posterior_samples.shape[0]), + n_posterior_samples_init, + replace=False, + ) + samples_calibration = posterior_samples[calibration_index] + # remove calibration samples from posterior samples + posterior_samples = posterior_samples[ + [ + j + for j in range(posterior_samples.shape[0]) + if j not in calibration_index + ] + ] + # generate proposal samples and define proposal function + n_proposal_samples = posterior_samples.shape[0] + posterior_mean = np.mean(samples_calibration, axis=0) + posterior_cov = np.cov(samples_calibration.T) + # if covariance matrix is not positive definite (numerically), use diagonal covariance matrix only + try: + # proposal density function + log_proposal_fun = stats.multivariate_normal( + mean=posterior_mean, cov=posterior_cov + ).logpdf + except np.linalg.LinAlgError: + posterior_cov = np.diag(np.diag(posterior_cov)) + log_proposal_fun = stats.multivariate_normal( + mean=posterior_mean, cov=posterior_cov + ).logpdf + + # generate proposal samples + if posterior_cov.size == 1: + # univariate case + proposal_samples = np.random.normal( + loc=posterior_mean, + scale=np.sqrt(posterior_cov), + size=n_proposal_samples, + ) + proposal_samples = proposal_samples.reshape(-1, 1) + else: + # multivariate case + proposal_samples = np.random.multivariate_normal( + mean=posterior_mean, cov=posterior_cov, size=n_proposal_samples + ) + + # Compute the weights for the bridge sampling estimate + log_s1 = np.log( + posterior_samples.shape[0] + / (posterior_samples.shape[0] + n_proposal_samples) + ) + log_s2 = np.log( + n_proposal_samples / (posterior_samples.shape[0] + n_proposal_samples) + ) + + # Start with the initial guess for p(y) + log_p_y = initial_guess_log_evidence + + # Compute the log-likelihood, log-prior, and log-proposal for the posterior and proposal samples + # assumes that the objective function is the negative log-likelihood + negative log-prior + + # get index of prior in the objective function + likelihood_fun_indices = [] + for i, obj in enumerate(result.problem.objective._objectives): + if not isinstance(obj, NegLogParameterPriors) and not isinstance( + obj, NegLogPriors + ): + likelihood_fun_indices.append(i) + + def log_likelihood_fun(x_array): + return np.array( + [ + np.sum( + [ + -obj( + result.problem.get_full_vector( + x=x, x_fixed_vals=result.problem.x_fixed_vals + ) + ) + for obj_i, obj in enumerate( + result.problem.objective._objectives + ) + if obj_i in likelihood_fun_indices + ] + ) + for x in x_array + ] + ) + + def log_prior_fun(x_array): + return np.array( + [ + np.sum( + [ + -obj( + result.problem.get_full_vector( + x=x, x_fixed_vals=result.problem.x_fixed_vals + ) + ) + for obj_i, obj in enumerate( + result.problem.objective._objectives + ) + if obj_i not in likelihood_fun_indices + ] + ) + for x in x_array + ] + ) + + log_likelihood_posterior = log_likelihood_fun(posterior_samples) + log_prior_posterior = log_prior_fun(posterior_samples) + log_proposal_posterior = log_proposal_fun(posterior_samples) + + log_likelihood_proposal = log_likelihood_fun(proposal_samples) + log_prior_proposal = log_prior_fun(proposal_samples) + log_proposal_proposal = log_proposal_fun(proposal_samples) + + log_h_posterior_1 = log_s1 + log_likelihood_posterior + log_prior_posterior + log_h_proposal_1 = log_s1 + log_likelihood_proposal + log_prior_proposal + for i in range(max_iter): + # Compute h(θ) for posterior samples + log_h_posterior_2 = log_s2 + log_p_y + log_proposal_posterior + log_h_posterior = logsumexp([log_h_posterior_1, log_h_posterior_2]) + + # Compute h(θ) for proposal samples + log_h_proposal_2 = log_s2 + log_p_y + log_proposal_proposal + log_h_proposal = logsumexp([log_h_proposal_1, log_h_proposal_2]) + + # Calculate the numerator and denominator for the bridge sampling estimate + temp = log_likelihood_proposal + log_prior_proposal + log_h_proposal + log_numerator = logsumexp(temp) - np.log( + temp.size + ) # compute mean in log space + temp = log_proposal_posterior + log_h_posterior + log_denominator = logsumexp(temp) - np.log( + temp.size + ) # compute mean in log space + + # Update p(y) + log_p_y_new = log_numerator - log_denominator + + # Check for convergence + if abs(log_p_y_new - log_p_y) < tol: + break + + log_p_y = log_p_y_new + + if i == max_iter - 1: + logger.warning( + "Bridge sampling did not converge in the given number of iterations." + ) + + return log_p_y diff --git a/pypesto/sample/parallel_tempering.py b/pypesto/sample/parallel_tempering.py index 306774c46..6e81777f1 100644 --- a/pypesto/sample/parallel_tempering.py +++ b/pypesto/sample/parallel_tempering.py @@ -7,9 +7,8 @@ from ..C import BETA_DECAY, EXPONENTIAL_DECAY from ..problem import Problem -from ..result import McmcPtResult, Result +from ..result import McmcPtResult from ..util import tqdm -from .diagnostics import geweke_test from .sampler import InternalSampler, Sampler logger = logging.getLogger(__name__) @@ -178,96 +177,6 @@ def swap_samples(self) -> Sequence[bool]: def adjust_betas(self, i_sample: int, swapped: Sequence[bool]): """Adjust temperature values. Default: Do nothing.""" - def compute_log_evidence( - self, - result: Result, - method: str = "trapezoid", - use_all_chains: bool = True, - ) -> Union[float, None]: - """Perform thermodynamic integration to estimate the log evidence. - - Parameters - ---------- - result: - Result object containing the samples. - method: - Integration method, either 'trapezoid' or 'simpson' (uses scipy for integration). - use_all_chains: - If True, calculate burn-in for each chain and use the maximal burn-in for all chains for the integration. - This will fail if not all chains have converged yet. - Otherwise, use only the converged chains for the integration (might increase the integration error). - """ - from scipy.integrate import simpson, trapezoid - - if self.options["beta_init"] == EXPONENTIAL_DECAY: - logger.warning( - "The temperature schedule is not optimal for thermodynamic integration. " - f"Carefully check the results. Consider using beta_init='{BETA_DECAY}' for better results." - ) - - # compute burn in for all chains but the last one (prior only) - burn_ins = np.zeros(len(self.betas), dtype=int) - for i_chain in range(len(self.betas)): - burn_ins[i_chain] = geweke_test(result, chain_number=i_chain) - max_burn_in = int(np.max(burn_ins)) - - if max_burn_in >= result.sample_result.trace_x.shape[1]: - logger.warning( - f"At least {np.sum(burn_ins >= result.sample_result.trace_x.shape[1])} chains seem to not have " - f"converged yet. You may want to use a larger number of samples." - ) - if use_all_chains: - raise ValueError( - "Not all chains have converged yet. You may want to use a larger number of samples, " - "or try ´use_all_chains=False´, which might increase the integration error." - ) - - if use_all_chains: - # estimate mean of log likelihood for each beta - trace_loglike = ( - result.sample_result.trace_neglogprior[::-1, max_burn_in:] - - result.sample_result.trace_neglogpost[::-1, max_burn_in:] - ) - mean_loglike_per_beta = np.mean(trace_loglike, axis=1) - temps = self.betas[::-1] - else: - # estimate mean of log likelihood for each beta if chain has converged - mean_loglike_per_beta = [] - temps = [] - for i_chain in reversed(range(len(self.betas))): - if burn_ins[i_chain] < result.sample_result.trace_x.shape[1]: - # save temperature-chain as it is converged - temps.append(self.betas[i_chain]) - # calculate mean log likelihood for each beta - trace_loglike_i = ( - result.sample_result.trace_neglogprior[ - i_chain, burn_ins[i_chain] : - ] - - result.sample_result.trace_neglogpost[ - i_chain, burn_ins[i_chain] : - ] - ) - mean_loglike_per_beta.append(np.mean(trace_loglike_i)) - - if method == "trapezoid": - log_evidence = trapezoid( - # integrate from low to high temperature - y=mean_loglike_per_beta, - x=temps, - ) - elif method == "simpson": - log_evidence = simpson( - # integrate from low to high temperature - y=mean_loglike_per_beta, - x=temps, - ) - else: - raise ValueError( - f"Unknown method {method}. Choose 'trapezoid' or 'simpson'." - ) - - return log_evidence - def beta_decay_betas(n_chains: int, alpha: float) -> np.ndarray: """Initialize betas to the (j-1)th quantile of a Beta(alpha, 1) distribution. diff --git a/pypesto/sample/util.py b/pypesto/sample/util.py index 30e322659..2824aaeee 100644 --- a/pypesto/sample/util.py +++ b/pypesto/sample/util.py @@ -25,6 +25,8 @@ def calculate_ci_mcmc_sample( The pyPESTO result object with filled sample result. ci_level: Lower tail probability, defaults to 95% interval. + exclude_burn_in: + Whether to exclude the burn-in samples. Returns ------- diff --git a/test/optimize/test_optimize.py b/test/optimize/test_optimize.py index 38ee80612..3a9098662 100644 --- a/test/optimize/test_optimize.py +++ b/test/optimize/test_optimize.py @@ -24,7 +24,9 @@ SacessOptimizer, get_default_ess_options, ) -from pypesto.optimize.util import assign_ids +from pypesto.optimize.util import ( + assign_ids, +) from pypesto.store import read_result from ..base.test_x_fixed import create_problem diff --git a/test/run_notebook.sh b/test/run_notebook.sh index 297c2a9d3..693baaa73 100755 --- a/test/run_notebook.sh +++ b/test/run_notebook.sh @@ -38,6 +38,7 @@ nbs_2=( 'sampler_study.ipynb' 'sampling_diagnostics.ipynb' 'model_selection.ipynb' + 'model_evidence_and_bayes_factors.ipynb' ) # All tested notebooks diff --git a/test/sample/test_sample.py b/test/sample/test_sample.py index b2d246111..252afc0fe 100644 --- a/test/sample/test_sample.py +++ b/test/sample/test_sample.py @@ -12,12 +12,27 @@ import pypesto.optimize as optimize import pypesto.sample as sample from pypesto.C import OBJECTIVE_NEGLOGLIKE, OBJECTIVE_NEGLOGPOST +from pypesto.objective import ( + AggregatedObjective, + NegLogParameterPriors, + Objective, +) def gaussian_llh(x): return float(norm.logpdf(x).item()) +def gaussian_nllh_grad(x): + mu, sigma = 0, 1 + return np.array([((x - mu) / (sigma**2))]) + + +def gaussian_nllh_hess(x): + sigma = 1 + return np.array([(1 / (sigma**2))]) + + def gaussian_problem(): def nllh(x): return -gaussian_llh(x) @@ -797,7 +812,7 @@ def test_thermodynamic_integration(): # approximation should be better for more chains n_chains = 10 - tol = 1 + tol = 2 sampler = sample.ParallelTemperingSampler( internal_sampler=sample.AdaptiveMetropolisSampler(), options={"show_progress": False, "beta_init": "beta_decay"}, @@ -817,11 +832,22 @@ def test_thermodynamic_integration(): ) # compute the log evidence using trapezoid and simpson rule - log_evidence = sampler.compute_log_evidence(result, method="trapezoid") - log_evidence_not_all = sampler.compute_log_evidence( + log_evidence = sample.evidence.parallel_tempering_log_evidence( + result, method="trapezoid" + ) + log_evidence_not_all = sample.evidence.parallel_tempering_log_evidence( result, method="trapezoid", use_all_chains=False ) - log_evidence_simps = sampler.compute_log_evidence(result, method="simpson") + log_evidence_simps = sample.evidence.parallel_tempering_log_evidence( + result, method="simpson" + ) + + # use steppingstone sampling + log_evidence_steppingstone = ( + sample.evidence.parallel_tempering_log_evidence( + result, method="steppingstone" + ) + ) # compute evidence evidence = quad( @@ -836,3 +862,108 @@ def test_thermodynamic_integration(): assert np.isclose(log_evidence, np.log(evidence[0]), atol=tol) assert np.isclose(log_evidence_not_all, np.log(evidence[0]), atol=tol) assert np.isclose(log_evidence_simps, np.log(evidence[0]), atol=tol) + assert np.isclose( + log_evidence_steppingstone, np.log(evidence[0]), atol=tol + ) + + +def test_laplace_approximation_log_evidence(): + """Test the laplace approximation of the log evidence.""" + log_evidence_true = 21.2 # approximated by hand + + problem = create_petab_problem() + + # hess + result = optimize.minimize( + problem=problem, + n_starts=10, + progress_bar=False, + ) + log_evidence = sample.evidence.laplace_approximation_log_evidence( + problem, result.optimize_result.x[0] + ) + assert np.isclose(log_evidence, log_evidence_true, atol=0.1) + + +@pytest.mark.flaky(reruns=2) +def test_harmonic_mean_log_evidence(): + tol = 2 + # define problem + problem = gaussian_problem() + + # run optimization and MCMC + result = optimize.minimize( + problem, + progress_bar=False, + n_starts=10, + ) + result = sample.sample( + problem, + n_samples=2000, + result=result, + ) + + # compute the log evidence using harmonic mean + harmonic_evidence = sample.evidence.harmonic_mean_log_evidence(result) + # compute the log evidence using stabilized harmonic mean + prior_samples = np.random.uniform(problem.lb, problem.ub, size=100) + harmonic_stabilized_evidence = sample.evidence.harmonic_mean_log_evidence( + result=result, + prior_samples=prior_samples, + neg_log_likelihood_fun=problem.objective, + ) + + # compute real evidence + evidence = quad( + lambda x: 1 + / (problem.ub[0] - problem.lb[0]) + * np.exp(gaussian_llh(x)), + a=problem.lb[0], + b=problem.ub[0], + ) + + # compare to known value + assert np.isclose(harmonic_evidence, np.log(evidence[0]), atol=tol) + assert np.isclose( + harmonic_stabilized_evidence, np.log(evidence[0]), atol=tol + ) + + +@pytest.mark.flaky(reruns=2) +def test_bridge_sampling(): + tol = 2 + # define problem + objective = Objective( + fun=lambda x: -gaussian_llh(x), + grad=gaussian_nllh_grad, + hess=gaussian_nllh_hess, + ) + prior_true = NegLogParameterPriors( + [ + { + "index": 0, + "density_fun": lambda x: (1 / (10 + 10)), + "density_dx": lambda x: 0, + "density_ddx": lambda x: 0, + }, + ] + ) + problem = pypesto.Problem( + objective=AggregatedObjective([objective, prior_true]), + lb=[-10], + ub=[10], + x_names=["x"], + ) + + # run optimization and MCMC + result = optimize.minimize(problem, progress_bar=False, n_starts=10) + result = sample.sample( + problem, + n_samples=1000, + result=result, + ) + + # compute the log evidence using harmonic mean + bridge_log_evidence = sample.evidence.bridge_sampling_log_evidence(result) + harmonic_evidence = sample.evidence.harmonic_mean_log_evidence(result) + assert np.isclose(bridge_log_evidence, harmonic_evidence, atol=tol) From f65928d2b022879670e8b1b9d28c9c3666f09096 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Fri, 13 Sep 2024 11:45:46 +0200 Subject: [PATCH 13/37] Doc: fix formatting (#1456) Fix list formatting in doc/example/model_evidence_and_bayes_factors.ipynb, get rid of ipywidgets warning, and fix typo. --- doc/example/model_evidence_and_bayes_factors.ipynb | 9 +++++---- setup.cfg | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/example/model_evidence_and_bayes_factors.ipynb b/doc/example/model_evidence_and_bayes_factors.ipynb index 9b5d43031..e7d0b2af7 100644 --- a/doc/example/model_evidence_and_bayes_factors.ipynb +++ b/doc/example/model_evidence_and_bayes_factors.ipynb @@ -44,6 +44,7 @@ "A $\\operatorname{BF}_{12} > 1$ indicates that the data favors model $\\mathcal{M}_1$ over model $\\mathcal{M}_2$, while $\\operatorname{BF}_{12} < 1$ indicates the opposite.\n", "\n", "Jeffreys (1961) suggested interpreting Bayes factors in half-units on the log10 scale (this was further simplified in Kass and Raftery (1995)):\n", + "\n", "- Not worth more than a bare mention: $0 < \\log_{10} \\operatorname{BF}_{12} \\leq 0.5$\n", "- Substantial: $0.5 < \\log_{10}\\operatorname{BF}_{12} \\leq 1$\n", "- Strong: $1 < \\log_{10}\\operatorname{BF}_{12} \\leq 2$\n", @@ -57,7 +58,7 @@ "source": [ "## Example\n", "\n", - "To illustrate different methods to compute marginal likelihoods we introduce two toy models, for which we can compute the marginal likelihoods analytically:\n", + "To illustrate different methods to compute marginal likelihoods, we introduce two toy models, for which we can compute the marginal likelihoods analytically:\n", "\n", "1. **Mixture of Two Gaussians (True Data Generator)**: Composed of two Gaussian distributions, $\\mathcal{N}(\\mu_1, \\sigma_1^2)$ and $\\mathcal{N}(\\mu_2, \\sigma_2^2)$, with mixing coefficient $\\pi=0.7$.\n", "\n", @@ -560,11 +561,11 @@ "\n", "The harmonic mean estimator approximates the evidence from above since it tends to ignore low likelihood regions, such as those comprising the prior, leading to overestimates of the marginal likelihoods, even when asymptotically unbiased.\n", "Moreover, the estimator can have a high variance due to evaluating the likelihood at low probability regions and inverting it.\n", - "Hence, it can be very unstable and even fail catastrophically. A more stable version, the stabilized harmonic mean, also uses samples from the prior (see [Newton and Raftery (1994)](https://doi.org/10.1111/j.2517-6161.1994.tb01956.x)). However, there more efficient methods available.\n", + "Hence, it can be very unstable and even fail catastrophically. A more stable version, the stabilized harmonic mean, also uses samples from the prior (see [Newton and Raftery (1994)](https://doi.org/10.1111/j.2517-6161.1994.tb01956.x)). However, there are more efficient methods available.\n", "\n", "A reliable sampling method is bridge sampling (see [\"A Tutorial on Bridge Sampling\" by Gronau et al. (2017)](https://api.semanticscholar.org/CorpusID:5447695) for a nice introduction). It uses samples from a proposal and the posterior to estimate the marginal likelihood. The proposal distribution should be chosen to have a high overlap with the posterior (we construct it from half of the posterior samples by fitting a Gaussian distribution with the same mean and covariance). This method is more stable than the harmonic mean estimator. However, its accuracy may depend on the choice of the proposal distribution.\n", "\n", - "A different approach, the learnt harmonic mean estimator, was proposed by [McEwen et al. (2021)](https://api.semanticscholar.org/CorpusID:244709474). The estimator solves the large variance problem by interpreting the harmonic mean estimator as importance sampling and introducing a new target distribution, which is learned from the posterior samples. The method can be applied just using samples from the posterior and is implemented in software package accompanying the paper.\n" + "A different approach, the learnt harmonic mean estimator, was proposed by [McEwen et al. (2021)](https://api.semanticscholar.org/CorpusID:244709474). The estimator solves the large variance problem by interpreting the harmonic mean estimator as importance sampling and introducing a new target distribution, which is learned from the posterior samples. The method can be applied just using samples from the posterior and is implemented in the software package accompanying the paper.\n" ], "metadata": { "collapsed": false @@ -912,7 +913,7 @@ "metadata": {}, "cell_type": "markdown", "source": [ - "We recommend to use either bridge sampling, nested sampling or one of the methods using power posteriors depending on the computational resources available. \n", + "We recommend using either bridge sampling, nested sampling or one of the methods using power posteriors depending on the computational resources available. \n", "\n", "Bayes factors and marginal likelihoods are powerful tools for Bayesian model comparison. While there are various methods to compute marginal likelihoods, each has its strengths and weaknesses. Choosing the appropriate method depends on the specific context, the complexity of the models, and the computational resources available." ], diff --git a/setup.cfg b/setup.cfg index 0dd09beba..4fc1a4e1b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -160,6 +160,7 @@ example = %(nlopt)s %(pyswarm)s notebook >= 6.1.4 + ipywidgets >= 8.1.5 benchmark_models_petab @ git+https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git@master#subdirectory=src/python select = # Remove when vis is moved to PEtab Select version From 36eec04a09f0a1e0a34d6db097141756ef9096df Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Sun, 15 Sep 2024 09:58:17 +0200 Subject: [PATCH 14/37] SacessOptimizer: Fix acceptance threshold for objective improvement (#1457) There seems to be a difference in the acceptance threshold for objective improvement that is reported in the sacess paper and the value that is used in the implementation. Changing to the value used in the original implementation. For the manager: [Paper](https://doi.org/10.1186/s12859-016-1452-4): 10% Implementation accompanying the paper: [0.5%](https://bitbucket.org/DavidPenas/sacess-library/src/18b2027af5b97158e5d5303487455e5e529bcf5a/src/method_module_fortran/eSS/parallelscattersearchfunctions.f90#lines-388) More [recent](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1011151) implementation: [0.01%](https://bitbucket.org/DavidPenas/sacess-library/src/508e7ac15579104731cf1f8c3969960c6e72b872/src/method_module_fortran/eSS/parallelscattersearchfunctions.f90#lines-396) Also added a check for the minimum acceptance threshold as in the original implementation. For the workers: should be 0.01% instead 0.01. --- pypesto/optimize/ess/sacess.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/pypesto/optimize/ess/sacess.py b/pypesto/optimize/ess/sacess.py index 0dee7482a..a6ed8915f 100644 --- a/pypesto/optimize/ess/sacess.py +++ b/pypesto/optimize/ess/sacess.py @@ -348,6 +348,9 @@ class SacessManager: adaptation of ``_rejection_threshold``. _rejection_threshold: Threshold for relative objective improvements that incoming solutions have to pass to be accepted + _rejection_threshold_min: ``_rejection_threshold`` will be reduced (halved) + if too few solutions are accepted. This value is the lower limit for + ``_rejection_threshold``. _lock: Lock for accessing shared state. _logger: A logger instance """ @@ -363,8 +366,13 @@ def __init__( self._best_known_fx = shmem_manager.Value("d", np.inf) self._best_known_x = shmem_manager.Array("d", [np.nan] * dim) self._rejections = shmem_manager.Value("i", 0) - # initial value from [PenasGon2017]_ p.9 - self._rejection_threshold = shmem_manager.Value("d", 0.1) + # The initial value for the acceptance/rejection threshold in + # [PenasGon2017]_ p.9 is 0.1. + # However, their implementation uses 0.1 *percent*. I assume this is a + # mistake in the paper. + self._rejection_threshold = shmem_manager.Value("d", 0.001) + self._rejection_threshold_min = 0.001 + # scores of the workers, ordered by worker-index # initial score is the worker index self._worker_scores = shmem_manager.Array( @@ -469,8 +477,11 @@ def submit_solution( ) # adapt acceptance threshold if too many solutions have been # rejected - if self._rejections.value > self._num_workers: - self._rejection_threshold.value /= 2 + if self._rejections.value >= self._num_workers: + self._rejection_threshold.value = min( + self._rejection_threshold.value / 2, + self._rejection_threshold_min, + ) self._logger.debug( "Lowered acceptance threshold to " f"{self._rejection_threshold.value}." @@ -523,7 +534,8 @@ def __init__( self._n_received_solutions = 0 self._neval = 0 self._ess_kwargs = ess_kwargs - self._acceptance_threshold = 0.005 + # Default value from original SaCeSS implementation + self._acceptance_threshold = 0.0001 self._n_sent_solutions = 0 self._max_walltime_s = max_walltime_s self._start_time = None From 5c58374fa966b0669b4270e952e066220f158959 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 16 Sep 2024 14:02:14 +0200 Subject: [PATCH 15/37] SacessOptimizer: expose more hyperparameters + minor fixes (#1459) * Introduces a new SacessOptions class to collect SacessOptimizer hyperparameters and makes some previously hard-coded values configurable. Closes #1458 * Fixes a mismatch in adaptation conditions between the original SaCeSS implementation and SacessOptimizer (adaptation condition: AND -> OR) * Log final worker configuration * Sort refset before resizing. --- pypesto/optimize/__init__.py | 1 + pypesto/optimize/ess/__init__.py | 1 + pypesto/optimize/ess/refset.py | 7 +- pypesto/optimize/ess/sacess.py | 162 ++++++++++++++++++++++--------- 4 files changed, 124 insertions(+), 47 deletions(-) diff --git a/pypesto/optimize/__init__.py b/pypesto/optimize/__init__.py index 968fe9e55..9c2679562 100644 --- a/pypesto/optimize/__init__.py +++ b/pypesto/optimize/__init__.py @@ -10,6 +10,7 @@ ESSOptimizer, SacessFidesFactory, SacessOptimizer, + SacessOptions, get_default_ess_options, ) from .load import ( diff --git a/pypesto/optimize/ess/__init__.py b/pypesto/optimize/ess/__init__.py index fef613895..c5f2d0df4 100644 --- a/pypesto/optimize/ess/__init__.py +++ b/pypesto/optimize/ess/__init__.py @@ -10,5 +10,6 @@ from .sacess import ( SacessFidesFactory, SacessOptimizer, + SacessOptions, get_default_ess_options, ) diff --git a/pypesto/optimize/ess/refset.py b/pypesto/optimize/ess/refset.py index 5c75d54a2..dd0556c42 100644 --- a/pypesto/optimize/ess/refset.py +++ b/pypesto/optimize/ess/refset.py @@ -80,7 +80,7 @@ def initialize_random( self, n_diverse: int, ): - """Create initial reference set from random parameters. + """Create an initial reference set from random parameters. Sample ``n_diverse`` random points, populate half of the RefSet using the best solutions and fill the rest with random points. @@ -90,7 +90,7 @@ def initialize_random( self.initialize_from_array(x_diverse=x_diverse, fx_diverse=fx_diverse) def initialize_from_array(self, x_diverse: np.array, fx_diverse: np.array): - """Create initial reference set using the provided points. + """Create an initial reference set using the provided points. Populate half of the RefSet using the best given solutions and fill the rest with a random selection from the remaining points. @@ -174,7 +174,8 @@ def resize(self, new_dim: int): If the dimension does not change, do nothing. If size is decreased, drop entries from the end (i.e., the worst values, assuming it is sorted). If size is increased, the new - entries are filled with randomly and the refset is sorted. + entries are filled with randomly sampled parameters and the refset is + sorted. NOTE: Any attributes are just truncated or filled with zeros. """ diff --git a/pypesto/optimize/ess/sacess.py b/pypesto/optimize/ess/sacess.py index a6ed8915f..775d5458f 100644 --- a/pypesto/optimize/ess/sacess.py +++ b/pypesto/optimize/ess/sacess.py @@ -1,4 +1,5 @@ """Self-adaptive cooperative enhanced scatter search (SACESS).""" +from __future__ import annotations import itertools import logging @@ -11,7 +12,7 @@ from multiprocessing import get_context from multiprocessing.managers import SyncManager from pathlib import Path -from typing import Any, Callable, Optional, Union +from typing import Any, Callable from uuid import uuid1 from warnings import warn @@ -31,6 +32,7 @@ "SacessOptimizer", "get_default_ess_options", "SacessFidesFactory", + "SacessOptions", ] logger = logging.getLogger(__name__) @@ -62,13 +64,14 @@ class SacessOptimizer: def __init__( self, - num_workers: Optional[int] = None, - ess_init_args: Optional[list[dict[str, Any]]] = None, + num_workers: int | None = None, + ess_init_args: list[dict[str, Any]] | None = None, max_walltime_s: float = np.inf, sacess_loglevel: int = logging.INFO, ess_loglevel: int = logging.WARNING, - tmpdir: Union[Path, str] = None, + tmpdir: Path | str = None, mp_start_method: str = "spawn", + options: SacessOptions = None, ): """Construct. @@ -110,6 +113,8 @@ def __init__( mp_start_method: The start method for the multiprocessing context. See :mod:`multiprocessing` for details. + options: + Further optimizer hyperparameters. """ if (num_workers is None and ess_init_args is None) or ( num_workers is not None and ess_init_args is not None @@ -138,10 +143,11 @@ def __init__( self._tmpdir = Path(f"SacessOptimizerTemp-{str(uuid1())[:8]}") self._tmpdir = Path(self._tmpdir).absolute() self._tmpdir.mkdir(parents=True, exist_ok=True) - self.histories: Optional[ - list["pypesto.history.memory.MemoryHistory"] - ] = None + self.histories: list[ + pypesto.history.memory.MemoryHistory + ] | None = None self.mp_ctx = get_context(mp_start_method) + self.options = options or SacessOptions() def minimize( self, @@ -212,6 +218,7 @@ def minimize( shmem_manager=shmem_manager, ess_options=ess_init_args, dim=problem.dim, + options=self.options, ) # create workers workers = [ @@ -225,6 +232,7 @@ def minimize( tmp_result_file=SacessWorker.get_temp_result_filename( worker_idx, self._tmpdir ), + options=self.options, ) for worker_idx, ess_kwargs in enumerate(ess_init_args) ] @@ -344,15 +352,13 @@ class SacessManager: more promising the respective worker is considered) _worker_comms: Number of communications received from the individual workers - _rejections: Number of rejected solutions received from workers since last - adaptation of ``_rejection_threshold``. + _rejections: Number of rejected solutions received from workers since the + last adaptation of ``_rejection_threshold``. _rejection_threshold: Threshold for relative objective improvements that incoming solutions have to pass to be accepted - _rejection_threshold_min: ``_rejection_threshold`` will be reduced (halved) - if too few solutions are accepted. This value is the lower limit for - ``_rejection_threshold``. _lock: Lock for accessing shared state. _logger: A logger instance + _options: Further optimizer hyperparameters. """ def __init__( @@ -360,7 +366,9 @@ def __init__( shmem_manager: SyncManager, ess_options: list[dict[str, Any]], dim: int, + options: SacessOptions = None, ): + self._options = options or SacessOptions() self._num_workers = len(ess_options) self._ess_options = [shmem_manager.dict(o) for o in ess_options] self._best_known_fx = shmem_manager.Value("d", np.inf) @@ -370,8 +378,9 @@ def __init__( # [PenasGon2017]_ p.9 is 0.1. # However, their implementation uses 0.1 *percent*. I assume this is a # mistake in the paper. - self._rejection_threshold = shmem_manager.Value("d", 0.001) - self._rejection_threshold_min = 0.001 + self._rejection_threshold = shmem_manager.Value( + "d", self._options.manager_initial_rejection_threshold + ) # scores of the workers, ordered by worker-index # initial score is the worker index @@ -433,7 +442,7 @@ def submit_solution( np.isfinite(fx) and not np.isfinite(self._best_known_fx.value) ) - # avoid division by 0. just accept any improvement if best + # avoid division by 0. just accept any improvement if the best # known value is 0. or (self._best_known_fx.value == 0 and fx < 0) or ( @@ -475,12 +484,12 @@ def submit_solution( f"(threshold: {self._rejection_threshold.value}) " f"(total rejections: {self._rejections.value})." ) - # adapt acceptance threshold if too many solutions have been - # rejected + # adapt the acceptance threshold if too many solutions have + # been rejected if self._rejections.value >= self._num_workers: self._rejection_threshold.value = min( self._rejection_threshold.value / 2, - self._rejection_threshold_min, + self._options.manager_minimum_rejection_threshold, ) self._logger.debug( "Lowered acceptance threshold to " @@ -507,9 +516,6 @@ class SacessWorker: to the manager. _ess_kwargs: ESSOptimizer options for this worker (may get updated during the self-adaptive step). - _acceptance_threshold: Minimum relative improvement of the objective - compared to the best known value to be eligible for submission to the - Manager. _n_sent_solutions: Number of solutions sent to the Manager. _max_walltime_s: Walltime limit. _logger: A Logger instance. @@ -527,6 +533,7 @@ def __init__( loglevel: int = logging.INFO, ess_loglevel: int = logging.WARNING, tmp_result_file: str = None, + options: SacessOptions = None, ): self._manager = manager self._worker_idx = worker_idx @@ -534,8 +541,6 @@ def __init__( self._n_received_solutions = 0 self._neval = 0 self._ess_kwargs = ess_kwargs - # Default value from original SaCeSS implementation - self._acceptance_threshold = 0.0001 self._n_sent_solutions = 0 self._max_walltime_s = max_walltime_s self._start_time = None @@ -544,6 +549,7 @@ def __init__( self._logger = None self._tmp_result_file = tmp_result_file self._refset = None + self._options = options or SacessOptions() def run( self, @@ -618,6 +624,7 @@ def run( exit_flag=ess.exit_flag, ) self._manager._result_queue.put(worker_result) + self._logger.debug(f"Final configuration: {self._ess_kwargs}") ess._report_final() def _setup_ess(self, startpoint_method: StartpointMethod) -> ESSOptimizer: @@ -665,19 +672,23 @@ def _cooperate(self): self.replace_solution(self._refset, x=recv_x, fx=recv_fx) def _maybe_adapt(self, problem: Problem): - """Perform adaptation step. + """Perform the adaptation step if needed. Update ESS settings if conditions are met. """ # Update ESS settings if we received way more solutions than we sent - # Magic numbers from [PenasGon2017]_ algorithm 5 + # Note: [PenasGon2017]_ Algorithm 5 uses AND in the following + # condition, but the accompanying implementation uses OR. if ( - self._n_received_solutions > 10 * self._n_sent_solutions + 20 - and self._neval > problem.dim * 5000 + self._n_received_solutions + > self._options.adaptation_sent_coeff * self._n_sent_solutions + + self._options.adaptation_sent_offset + or self._neval > problem.dim * self._options.adaptation_min_evals ): self._ess_kwargs = self._manager.reconfigure_worker( self._worker_idx ) + self._refset.sort() self._refset.resize(self._ess_kwargs["dim_refset"]) self._logger.debug( f"Updated settings on worker {self._worker_idx} to " @@ -693,17 +704,17 @@ def maybe_update_best(self, x: np.array, fx: float): f"Worker {self._worker_idx} maybe sending solution {fx}. " f"best known: {self._best_known_fx}, " f"rel change: {rel_change:.4g}, " - f"threshold: {self._acceptance_threshold}" + f"threshold: {self._options.worker_acceptance_threshold}" ) - # solution improves best value by at least a factor of ... + # solution improves the best value by at least a factor of ... if ( (np.isfinite(fx) and not np.isfinite(self._best_known_fx)) or (self._best_known_fx == 0 and fx < 0) or ( fx < self._best_known_fx and abs((self._best_known_fx - fx) / fx) - > self._acceptance_threshold + > self._options.worker_acceptance_threshold ) ): self._logger.debug( @@ -738,7 +749,7 @@ def replace_solution(refset: RefSet, x: np.array, fx: float): refset.attributes["cooperative_solution"] ) ).size == 0: - # the attribute exists, but no member is marked as cooperative + # the attribute exists, but no member is marked as the cooperative # solution. this may happen if we shrink the refset. cooperative_solution_idx = np.argmax(refset.fx) @@ -767,9 +778,7 @@ def _keep_going(self): return True @staticmethod - def get_temp_result_filename( - worker_idx: int, tmpdir: Union[str, Path] - ) -> str: + def get_temp_result_filename(worker_idx: int, tmpdir: str | Path) -> str: return str(Path(tmpdir, f"sacess-{worker_idx:02d}_tmp.h5").absolute()) @@ -786,7 +795,7 @@ def _run_worker( # different random seeds per process np.random.seed((os.getpid() * int(time.time() * 1000)) % 2**32) - # Forward log messages to logging process + # Forward log messages to the logging process h = logging.handlers.QueueHandler(log_process_queue) worker._logger = logging.getLogger(multiprocessing.current_process().name) worker._logger.addHandler(h) @@ -797,11 +806,9 @@ def _run_worker( def get_default_ess_options( num_workers: int, dim: int, - local_optimizer: Union[ - bool, - "pypesto.optimize.Optimizer", - Callable[..., "pypesto.optimize.Optimizer"], - ] = True, + local_optimizer: bool + | pypesto.optimize.Optimizer + | Callable[..., pypesto.optimize.Optimizer] = True, ) -> list[dict]: """Get default ESS settings for (SA)CESS. @@ -1017,8 +1024,8 @@ class SacessFidesFactory: def __init__( self, - fides_options: Optional[dict[str, Any]] = None, - fides_kwargs: Optional[dict[str, Any]] = None, + fides_options: dict[str, Any] | None = None, + fides_kwargs: dict[str, Any] | None = None, ): if fides_options is None: fides_options = {} @@ -1038,7 +1045,7 @@ def __init__( def __call__( self, max_walltime_s: int, max_eval: int - ) -> "pypesto.optimize.FidesOptimizer": + ) -> pypesto.optimize.FidesOptimizer: """Create a :class:`FidesOptimizer` instance.""" from fides.constants import Options as FidesOptions @@ -1085,5 +1092,72 @@ class SacessWorkerResult: fx: float n_eval: int n_iter: int - history: "pypesto.history.memory.MemoryHistory" + history: pypesto.history.memory.MemoryHistory exit_flag: ESSExitFlag + + +@dataclass +class SacessOptions: + """Container for :class:`SacessOptimizer` hyperparameters. + + Parameters + ---------- + manager_initial_rejection_threshold, manager_minimum_rejection_threshold: + Initial and minimum threshold for relative objective improvements that + incoming solutions have to pass to be accepted. If the number of + rejected solutions exceeds the number of workers, the threshold is + halved until it reaches ``manager_minimum_rejection_threshold``. + + worker_acceptance_threshold: + Minimum relative improvement of the objective compared to the best + known value to be eligible for submission to the Manager. + + adaptation_min_evals, adaptation_sent_offset, adaptation_sent_coeff: + Hyperparameters that control when the workers will adapt their settings + based on the performance of the other workers. + + The adaptation step is performed if all the following conditions are + met: + + * The number of function evaluations since the last solution was sent + to the manager times the number of optimization parameters is greater + than ``adaptation_min_evals``. + + * The number of solutions received by the worker since the last + solution it sent to the manager is greater than + ``adaptation_sent_coeff * n_sent_solutions + adaptation_sent_offset``, + where ``n_sent_solutions`` is the number of solutions sent to the + manager by the given worker. + + """ + + manager_initial_rejection_threshold: float = 0.001 + manager_minimum_rejection_threshold: float = 0.001 + + # Default value from original SaCeSS implementation + worker_acceptance_threshold: float = 0.0001 + + # Magic numbers for adaptation, taken from [PenasGon2017]_ algorithm 5 + adaptation_min_evals: int = 5000 + adaptation_sent_offset: int = 20 + adaptation_sent_coeff: int = 10 + + def __post_init__(self): + if self.adaptation_min_evals < 0: + raise ValueError("adaptation_min_evals must be non-negative.") + if self.adaptation_sent_offset < 0: + raise ValueError("adaptation_sent_offset must be non-negative.") + if self.adaptation_sent_coeff < 0: + raise ValueError("adaptation_sent_coeff must be non-negative.") + if self.manager_initial_rejection_threshold < 0: + raise ValueError( + "manager_initial_rejection_threshold must be non-negative." + ) + if self.manager_minimum_rejection_threshold < 0: + raise ValueError( + "manager_minimum_rejection_threshold must be non-negative." + ) + if self.worker_acceptance_threshold < 0: + raise ValueError( + "worker_acceptance_threshold must be non-negative." + ) From b88356f3666af8908e7796ea6fae6856bbc77319 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 16 Sep 2024 16:27:12 +0200 Subject: [PATCH 16/37] SacessOptimizer, ESSOptimizer: Bound-normalize parameters for proximity check (#1462) Previously, the distance was scaled to the second point, which didn't make too much sense. Now it's scaled to the parameter bounds. --- pypesto/optimize/ess/refset.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/pypesto/optimize/ess/refset.py b/pypesto/optimize/ess/refset.py index dd0556c42..74f5c8f6b 100644 --- a/pypesto/optimize/ess/refset.py +++ b/pypesto/optimize/ess/refset.py @@ -131,14 +131,29 @@ def prune_too_close(self): Assumes RefSet is sorted. """ + # Compare [PenasGon2007] + # Note that the main text states that distance between the two points + # is normalized to the bounds of the search space. However, + # Algorithm 1, line 9 normalizes to x_j instead. The accompanying + # code does normalize to max(abs(x_i), abs(x_j)). + # Normalizing to the bounds of the search space seems more reasonable. + # Otherwise, for a parameter with bounds [lb, ub], + # where (ub-lb)/ub < proximity_threshold, we would never find an + # admissible point. x = self.x + ub, lb = self.evaluator.problem.ub, self.evaluator.problem.lb + + def normalize(x): + """Normalize parameter vector to the bounds of the search space.""" + return (x - lb) / (ub - lb) + for i in range(self.dim): for j in range(i + 1, self.dim): # check proximity # zero-division may occur here with np.errstate(divide="ignore", invalid="ignore"): while ( - np.max(np.abs((x[i] - x[j]) / x[j])) + np.max(np.abs(normalize(x[i]) - normalize(x[j]))) <= self.proximity_threshold ): # too close. replace x_j. From 2e576879bfd3aa3d6b68a8a202080510c183bba0 Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Mon, 23 Sep 2024 18:03:10 +0200 Subject: [PATCH 17/37] More informative info message for start indices. (#1472) --- pypesto/visualize/misc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pypesto/visualize/misc.py b/pypesto/visualize/misc.py index 5d7c1b491..fd74c0fad 100644 --- a/pypesto/visualize/misc.py +++ b/pypesto/visualize/misc.py @@ -349,7 +349,7 @@ def process_start_indices( raise ValueError( f"Permissible values for start_indices are {ALL}, " f"{ALL_CLUSTERED}, {FIRST_CLUSTER}, an integer or a " - f"list of indices." + f"list of indices. Got {start_indices}." ) # if it is an integer n, select the first n starts if isinstance(start_indices, Number): From 0264a1edd64936c43ee9e777603c65b2aa724910 Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Tue, 24 Sep 2024 11:14:26 +0200 Subject: [PATCH 18/37] Petab Importer reforge (#1442) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Moved creation of the amici objective function to a separate class, leaving the importer really independent of amici * Remove check gradients, as this can be done in an objective function * Added functions with a not implementedError and moved most of them to AmiciFactory * Moved import to a factory, allowing a single PEtabImporter to function with different models. Created a basic PetabSimulator supported (inefficent most likely but as a starter) * Resolve potential annotation problems. * Solved the problem of x_names not saving the fixd parameters. similar solution to amici Objective * rewrote petab testsuite to fit new design. * Adjusted text and example code for new changes * import error with petab simulator * updated code * import petab as optional * Adjusted roadrunner example and also solved issue in PetabImporterRR * Another import * Another import and spline error * Changed the documentation that is included * added test for petabSimulator with the example of basico * Fix text for check gradients and fix roadrunner notebook. * added test for petabSimulator with the example of basico * cleared output * Hopeflly renamed everything * Temporary fix for roadrunner fd objective * correct installation of basico as petab simulator in test * Removed try-except in simulator as it is to unspecific. Now requiring the parameter update function * not basico but copasi-basico * not copasi-basico but copasi-basico[petab] * Adjusted some description text * More accurate documentation * Replaced Usage of roadrunner, amici, petab strings with constants * Update doc/example/petab_import.ipynb Co-authored-by: Dilan Pathirana <59329744+dilpath@users.noreply.github.com> * Update doc/example/petab_import.ipynb Co-authored-by: Fabian Fröhlich * Renamed factory mentions as it was not a fitting name. * added option to pass simulator type and simulator to create_objective_creator --------- Co-authored-by: Dilan Pathirana <59329744+dilpath@users.noreply.github.com> Co-authored-by: Fabian Fröhlich --- doc/example/censored_data.ipynb | 12 +- doc/example/ordinal_data.ipynb | 5 +- doc/example/petab_import.ipynb | 12 +- doc/example/roadrunner.ipynb | 13 +- doc/example/semiquantitative_data.ipynb | 5 +- pypesto/C.py | 4 + pypesto/objective/__init__.py | 2 + pypesto/objective/petab.py | 102 +++ pypesto/objective/roadrunner/__init__.py | 6 + .../roadrunner/petab_importer_roadrunner.py | 41 +- pypesto/objective/roadrunner/road_runner.py | 40 +- .../roadrunner/roadrunner_calculator.py | 24 +- pypesto/objective/roadrunner/utils.py | 36 +- pypesto/petab/importer.py | 823 ++++------------- pypesto/petab/objective_creator.py | 853 ++++++++++++++++++ pypesto/petab/util.py | 171 ++++ pypesto/select/misc.py | 5 +- test/base/test_engine.py | 9 +- test/base/test_roadrunner.py | 21 +- test/hierarchical/test_censored.py | 8 +- test/hierarchical/test_hierarchical.py | 3 +- test/hierarchical/test_ordinal.py | 8 +- test/hierarchical/test_spline.py | 11 +- test/petab/test_amici_objective.py | 2 +- test/petab/test_amici_predictor.py | 7 +- test/petab/test_petabSimulator.py | 108 +++ test/petab/test_petab_import.py | 39 +- test/petab/test_petab_suite.py | 7 +- tox.ini | 2 +- 29 files changed, 1595 insertions(+), 784 deletions(-) create mode 100644 pypesto/objective/petab.py create mode 100644 pypesto/petab/objective_creator.py create mode 100644 pypesto/petab/util.py create mode 100644 test/petab/test_petabSimulator.py diff --git a/doc/example/censored_data.ipynb b/doc/example/censored_data.ipynb index cfe783848..28ff83c3b 100644 --- a/doc/example/censored_data.ipynb +++ b/doc/example/censored_data.ipynb @@ -148,16 +148,6 @@ "As there are no censored data specific inner options, we will pass none to the constructor." ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model = importer.create_model(verbose=False)\n", - "objective = importer.create_objective(model=model)" - ] - }, { "attachments": {}, "cell_type": "markdown", @@ -172,7 +162,7 @@ "metadata": {}, "outputs": [], "source": [ - "problem = importer.create_problem(objective)\n", + "problem = importer.create_problem()\n", "\n", "engine = pypesto.engine.MultiProcessEngine(n_procs=3)\n", "\n", diff --git a/doc/example/ordinal_data.ipynb b/doc/example/ordinal_data.ipynb index 2488cadf5..2b1528f61 100644 --- a/doc/example/ordinal_data.ipynb +++ b/doc/example/ordinal_data.ipynb @@ -542,7 +542,8 @@ "metadata": {}, "outputs": [], "source": [ - "objective = importer.create_objective(verbose=False)" + "factory = importer.create_objective_creator()\n", + "objective = factory.create_objective(verbose=False)" ] }, { @@ -559,7 +560,7 @@ "metadata": {}, "outputs": [], "source": [ - "objective = importer.create_objective(\n", + "objective = factory.create_objective(\n", " inner_options={\n", " \"method\": \"reduced\",\n", " \"reparameterized\": True,\n", diff --git a/doc/example/petab_import.ipynb b/doc/example/petab_import.ipynb index 4b3b5bbdc..69040e595 100644 --- a/doc/example/petab_import.ipynb +++ b/doc/example/petab_import.ipynb @@ -105,7 +105,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The model must be imported to pyPESTO and AMICI. Therefore, we create a `pypesto.PetabImporter` from the problem, and create an AMICI model." + "In order to import the model into pyPESTO, we additionally need a simulator. We can specify the simulator through the `simulator_type` argument. Supported simulators are e.g.`amici` and `roadrunner`. We will use AMICI as our example simulator. Therefore, we create a `pypesto.PetabImporter` from the problem. The importer itself creates a `pypesto.petab.Factory`, which is used to create the AMICI objective and model." ] }, { @@ -116,9 +116,10 @@ }, "outputs": [], "source": [ - "importer = pypesto.petab.PetabImporter(petab_problem)\n", + "importer = pypesto.petab.PetabImporter(petab_problem, simulator_type=\"amici\")\n", + "factory = importer.create_objective_creator()\n", "\n", - "model = importer.create_model(verbose=False)\n", + "model = factory.create_model(verbose=False)\n", "\n", "# some model properties\n", "print(\"Model parameters:\", list(model.getParameterIds()), \"\\n\")\n", @@ -154,7 +155,7 @@ "metadata": {}, "outputs": [], "source": [ - "importer = pypesto.petab.PetabImporter.from_yaml(yaml_config)\n", + "importer = pypesto.petab.PetabImporter.from_yaml(yaml_config, simulator_type=\"amici\")\n", "problem = importer.create_problem() # creating the problem from the importer. The objective can be found at problem.objective" ] }, @@ -178,7 +179,8 @@ "converter_config = libsbml.SBMLLocalParameterConverter().getDefaultProperties()\n", "petab_problem.sbml_document.convert(converter_config)\n", "\n", - "obj = importer.create_objective()\n", + "factory = importer.create_objective_creator()\n", + "obj = factory.create_objective()\n", "\n", "# for some models, hyperparameters need to be adjusted\n", "# obj.amici_solver.setMaxSteps(10000)\n", diff --git a/doc/example/roadrunner.ipynb b/doc/example/roadrunner.ipynb index f666efb6e..10566c2ac 100644 --- a/doc/example/roadrunner.ipynb +++ b/doc/example/roadrunner.ipynb @@ -49,10 +49,9 @@ "import petab\n", "import pypesto.objective\n", "import pypesto.optimize as optimize\n", - "import pypesto.objective.roadrunner as pypesto_rr\n", - "import pypesto.sample as sample\n", + "import pypesto.petab\n", "import pypesto.visualize as visualize\n", - "import pypesto.profile as profile\n", + "import pypesto.objective.roadrunner as pypesto_rr\n", "from IPython.display import Markdown, display\n", "from pprint import pprint\n", "\n", @@ -80,7 +79,7 @@ "source": [ "## Creating pyPESTO problem from PEtab\n", "\n", - "The [PEtab file format](https://petab.readthedocs.io/en/latest/documentation_data_format.html) stores all the necessary information to define a parameter estimation problem. This includes the model, the experimental data, the parameters to estimate, and the experimental conditions. Using the `pypesto_rr.PetabImporterRR` class, we can create a pyPESTO problem directly from a PEtab problem." + "The [PEtab file format](https://petab.readthedocs.io/en/latest/documentation_data_format.html) stores all the necessary information to define a parameter estimation problem. This includes the model, the experimental data, the parameters to estimate, and the experimental conditions. Using the `pypesto.petab.PetabImporter` class, we can create a pyPESTO problem directly from a PEtab problem." ] }, { @@ -97,7 +96,7 @@ "petab_yaml = f\"./{model_name}/{model_name}.yaml\"\n", "\n", "petab_problem = petab.Problem.from_yaml(petab_yaml)\n", - "importer = pypesto_rr.PetabImporterRR(petab_problem)\n", + "importer = pypesto.petab.PetabImporter(petab_problem, simulator_type=\"roadrunner\")\n", "problem = importer.create_problem()" ] }, @@ -329,7 +328,7 @@ "# no support for sensitivities\n", "try:\n", " ret = problem.objective(\n", - " petab_problem.get_x_nominal(fixed=False,scaled=True),\n", + " petab_problem.x_nominal_free_scaled,\n", " mode=\"mode_fun\",\n", " return_dict=True,\n", " sensi_orders=(1,),\n", @@ -354,7 +353,7 @@ "# support through finite differences\n", "try:\n", " ret = objective_fd(\n", - " petab_problem.get_x_nominal(fixed=False,scaled=True),\n", + " petab_problem.x_nominal_scaled,\n", " mode=\"mode_fun\",\n", " return_dict=True,\n", " sensi_orders=(1,),\n", diff --git a/doc/example/semiquantitative_data.ipynb b/doc/example/semiquantitative_data.ipynb index ee5717e54..721de8f20 100644 --- a/doc/example/semiquantitative_data.ipynb +++ b/doc/example/semiquantitative_data.ipynb @@ -193,7 +193,8 @@ "metadata": {}, "outputs": [], "source": [ - "objective = importer.create_objective(verbose=False)" + "factory = importer.create_objective_creator()\n", + "objective = factory.create_objective(verbose=False)" ] }, { @@ -210,7 +211,7 @@ "metadata": {}, "outputs": [], "source": [ - "objective = importer.create_objective(\n", + "objective = factory.create_objective(\n", " inner_options={\n", " \"spline_ratio\": 1 / 2,\n", " \"min_diff_factor\": 1 / 2,\n", diff --git a/pypesto/C.py b/pypesto/C.py index fe36c99b2..5a0e7438a 100644 --- a/pypesto/C.py +++ b/pypesto/C.py @@ -84,6 +84,10 @@ class EnsembleType(Enum): X0 = "x0" ID = "id" +AMICI = "amici" +ROADRUNNER = "roadrunner" +PETAB = "petab" + ############################################################################### # HIERARCHICAL SCALING + OFFSET diff --git a/pypesto/objective/__init__.py b/pypesto/objective/__init__.py index ad4b3ba30..48da2a97d 100644 --- a/pypesto/objective/__init__.py +++ b/pypesto/objective/__init__.py @@ -8,8 +8,10 @@ from .base import ObjectiveBase from .finite_difference import FD, FDDelta from .function import Objective +from .petab import PetabSimulatorObjective from .priors import ( NegLogParameterPriors, NegLogPriors, get_parameter_prior_dict, ) +from .roadrunner import RoadRunnerObjective diff --git a/pypesto/objective/petab.py b/pypesto/objective/petab.py new file mode 100644 index 000000000..e40513f59 --- /dev/null +++ b/pypesto/objective/petab.py @@ -0,0 +1,102 @@ +"""Objective function for PEtab models using the PEtab simulator.""" +from __future__ import annotations + +try: + import petab.v1 as petab + from petab.v1.simulate import Simulator as PetabSimulator +except ImportError: + petab = None +from collections import OrderedDict +from collections.abc import Sequence + +import numpy as np + +from ..C import FVAL, MODE_FUN, MODE_RES, RES, ModeType +from .base import ObjectiveBase, ResultDict + + +class PetabSimulatorObjective(ObjectiveBase): + """Objective function for PEtab models using the PEtab simulator.""" + + def __init__( + self, + simulator: PetabSimulator, + x_names: Sequence[str] | None = None, + ): + """Initialize the PEtab simulator objective function. + + Parameters + ---------- + petab_problem: + The PEtab problem. + simulator: + The PEtab simulator. + x_names: + Names of optimization parameters. + """ + if petab is None: + raise ImportError( + "The `petab` package is required for this objective function." + ) + self.simulator = simulator + self.petab_problem = self.simulator.petab_problem + if x_names is None: + x_names = list(self.petab_problem.get_x_ids()) + super().__init__(x_names=x_names) + + def replace_parameters(self, x: np.ndarray): + """Replace the parameters in the PEtab problem with the given values. + + Parameters + ---------- + x: + Parameter vector for optimization. + """ + x_dict = OrderedDict(zip(self._x_names, x)) + x_unscaled = self.petab_problem.unscale_parameters(x_dict) + par_df = self.petab_problem.parameter_df + par_df["nominalValue"] = par_df.index.map(x_unscaled) + self.simulator.set_parameters(x_unscaled) + + def call_unprocessed( + self, + x: np.ndarray, + sensi_orders: tuple[int, ...], + mode: ModeType, + return_dict: bool, + **kwargs, + ) -> ResultDict: + """See :meth:`ObjectiveBase.call_unprocessed`.""" + + self.replace_parameters(x) + sim_df = self.simulator.simulate(noise=False, as_measurement=False) + result = {} + result["simulations"] = sim_df + if mode == MODE_FUN: + result[FVAL] = -petab.calculate_llh( + measurement_dfs=self.petab_problem.measurement_df, + simulation_dfs=sim_df, + observable_dfs=self.petab_problem.observable_df, + parameter_dfs=self.petab_problem.parameter_df, + ) + elif mode == MODE_RES: + result[RES] = petab.calculate_residuals( + measurement_dfs=self.petab_problem.measurement_df, + simulation_dfs=sim_df, + observable_dfs=self.petab_problem.observable_df, + parameter_dfs=self.petab_problem.parameter_df, + ) + return result + + def check_sensi_orders( + self, + sensi_orders: tuple[int, ...], + mode: ModeType, + ) -> bool: + """See :class:`ObjectiveBase` documentation.""" + if not sensi_orders: + return True + sensi_order = max(sensi_orders) + max_sensi_order = 0 + + return sensi_order <= max_sensi_order diff --git a/pypesto/objective/roadrunner/__init__.py b/pypesto/objective/roadrunner/__init__.py index 3b6e800e2..b87ab9dc0 100644 --- a/pypesto/objective/roadrunner/__init__.py +++ b/pypesto/objective/roadrunner/__init__.py @@ -2,6 +2,12 @@ RoadRunner objective ==================== """ +__all__ = [ + "PetabImporterRR", + "RoadRunnerCalculator", + "ExpData", + "SolverOptions", +] from .petab_importer_roadrunner import PetabImporterRR from .road_runner import RoadRunnerObjective diff --git a/pypesto/objective/roadrunner/petab_importer_roadrunner.py b/pypesto/objective/roadrunner/petab_importer_roadrunner.py index 0b674b755..d41c31250 100644 --- a/pypesto/objective/roadrunner/petab_importer_roadrunner.py +++ b/pypesto/objective/roadrunner/petab_importer_roadrunner.py @@ -9,24 +9,25 @@ import logging import numbers import re +import warnings from collections.abc import Iterable from pathlib import Path from typing import Any -import libsbml -import petab.v1 as petab -import roadrunner -from petab.v1.C import ( - OBSERVABLE_FORMULA, - PREEQUILIBRATION_CONDITION_ID, - SIMULATION_CONDITION_ID, -) -from petab.v1.models.sbml_model import SbmlModel -from petab.v1.parameter_mapping import ParMappingDictQuadruple +try: + import petab.v1 as petab + from petab.v1.C import ( + OBSERVABLE_FORMULA, + PREEQUILIBRATION_CONDITION_ID, + SIMULATION_CONDITION_ID, + ) + from petab.v1.models.sbml_model import SbmlModel + from petab.v1.parameter_mapping import ParMappingDictQuadruple +except ImportError: + petab = None import pypesto.C -from ...petab.importer import PetabStartpoints from ...problem import Problem from ...startpoint import StartpointMethod from ..aggregated import AggregatedObjective @@ -35,6 +36,13 @@ from .roadrunner_calculator import RoadRunnerCalculator from .utils import ExpData +try: + import libsbml + import roadrunner +except ImportError: + roadrunner = None + libsbml = None + logger = logging.getLogger(__name__) @@ -62,6 +70,14 @@ def __init__( validate_petab: Flag indicating if the PEtab problem shall be validated. """ + warnings.warn( + "The RoadRunner importer is deprecated and will be removed in " + "future versions. Please use the generic PetabImporter instead " + "with `simulator_type='roadrunner'`. Everything else will stay " + "same.", + DeprecationWarning, + stacklevel=2, + ) self.petab_problem = petab_problem if validate_petab: if petab.lint_problem(petab_problem): @@ -288,6 +304,7 @@ def create_objective( petab_problem=self.petab_problem, calculator=calculator, x_names=x_names, + x_ids=x_names, ) def create_prior(self) -> NegLogParameterPriors | None: @@ -344,6 +361,8 @@ def create_startpoint_method(self, **kwargs) -> StartpointMethod: Additional keyword arguments passed on to :meth:`pypesto.startpoint.FunctionStartpoints.__init__`. """ + from ...petab.util import PetabStartpoints + return PetabStartpoints(petab_problem=self.petab_problem, **kwargs) def create_problem( diff --git a/pypesto/objective/roadrunner/road_runner.py b/pypesto/objective/roadrunner/road_runner.py index c2f88de80..ce2ce1820 100644 --- a/pypesto/objective/roadrunner/road_runner.py +++ b/pypesto/objective/roadrunner/road_runner.py @@ -2,20 +2,28 @@ Currently does not support sensitivities. """ +from __future__ import annotations + from collections import OrderedDict from collections.abc import Sequence -from typing import Optional, Union import numpy as np -import roadrunner -from petab.v1 import Problem as PetabProblem -from petab.v1.parameter_mapping import ParMappingDictQuadruple from ...C import MODE_FUN, MODE_RES, ROADRUNNER_INSTANCE, X_NAMES, ModeType from ..base import ObjectiveBase from .roadrunner_calculator import RoadRunnerCalculator from .utils import ExpData, SolverOptions +try: + from petab.v1 import Problem as PetabProblem + from petab.v1.parameter_mapping import ParMappingDictQuadruple +except ImportError: + petab = None +try: + import roadrunner +except ImportError: + roadrunner = None + class RoadRunnerObjective(ObjectiveBase): """Objective function for RoadRunner models. @@ -26,12 +34,13 @@ class RoadRunnerObjective(ObjectiveBase): def __init__( self, rr: roadrunner.RoadRunner, - edatas: Union[Sequence[ExpData], ExpData], + edatas: Sequence[ExpData] | ExpData, parameter_mapping: list[ParMappingDictQuadruple], petab_problem: PetabProblem, - calculator: Optional[RoadRunnerCalculator] = None, - x_names: Optional[Sequence[str]] = None, - solver_options: Optional[SolverOptions] = None, + calculator: RoadRunnerCalculator | None = None, + x_ids: Sequence[str] | None = None, + x_names: Sequence[str] | None = None, + solver_options: SolverOptions | None = None, ): """Initialize the RoadRunner objective function. @@ -52,6 +61,8 @@ def __init__( Might be removed later. calculator: The calculator to use. If None, a new instance is created. + x_ids: + IDs of Roadrunner parameters. Includes fixed parameters as well. x_names: Names of optimization parameters. """ @@ -68,6 +79,11 @@ def __init__( if solver_options is None: solver_options = SolverOptions() self.solver_options = solver_options + if x_ids is None: + x_ids = list(rr.model.getGlobalParameterIds()) + self.x_ids = x_ids + if x_names is None: + x_names = x_ids super().__init__(x_names=x_names) def get_config(self) -> dict: @@ -87,7 +103,7 @@ def __call__( mode: ModeType = MODE_FUN, return_dict: bool = False, **kwargs, - ) -> Union[float, np.ndarray, dict]: + ) -> float | np.ndarray | dict: """See :class:`ObjectiveBase` documentation.""" return super().__call__(x, sensi_orders, mode, return_dict, **kwargs) @@ -97,8 +113,8 @@ def call_unprocessed( sensi_orders: tuple[int, ...], mode: ModeType, return_dict: bool, - edatas: Optional[Sequence[ExpData]] = None, - parameter_mapping: Optional[list[ParMappingDictQuadruple]] = None, + edatas: Sequence[ExpData] | None = None, + parameter_mapping: list[ParMappingDictQuadruple] | None = None, ) -> dict: """ Call objective function without pre- or post-processing and formatting. @@ -114,7 +130,7 @@ def call_unprocessed( if parameter_mapping is None: parameter_mapping = self.parameter_mapping # convert x to dictionary - x = OrderedDict(zip(self.x_names, x)) + x = OrderedDict(zip(self.x_ids, x)) ret = self.calculator( x_dct=x, mode=mode, diff --git a/pypesto/objective/roadrunner/roadrunner_calculator.py b/pypesto/objective/roadrunner/roadrunner_calculator.py index f6c1cf652..3df7b10ef 100644 --- a/pypesto/objective/roadrunner/roadrunner_calculator.py +++ b/pypesto/objective/roadrunner/roadrunner_calculator.py @@ -2,14 +2,12 @@ Handles all RoadRunner.simulate calls, calculates likelihoods and residuals. """ +from __future__ import annotations + import numbers from collections.abc import Sequence -from typing import Optional import numpy as np -import petab.v1 as petab -import roadrunner -from petab.v1.parameter_mapping import ParMappingDictQuadruple from ...C import ( FVAL, @@ -28,6 +26,16 @@ unscale_parameters, ) +try: + import petab.v1 as petab + from petab.v1.parameter_mapping import ParMappingDictQuadruple +except ImportError: + petab = None +try: + import roadrunner +except ImportError: + roadrunner = None + LLH_TYPES = { "lin_normal": lambda measurement, simulation, sigma: -0.5 * ( @@ -69,7 +77,7 @@ def __call__( x_ids: Sequence[str], parameter_mapping: list[ParMappingDictQuadruple], petab_problem: petab.Problem, - solver_options: Optional[SolverOptions], + solver_options: SolverOptions | None = None, ): """Perform the RoadRunner call and obtain objective function values. @@ -240,10 +248,10 @@ def simulate_per_condition( def fill_in_parameters( self, problem_parameters: dict, - roadrunner_instance: Optional[roadrunner.RoadRunner] = None, - parameter_mapping: Optional[ParMappingDictQuadruple] = None, + roadrunner_instance: roadrunner.RoadRunner | None = None, + parameter_mapping: ParMappingDictQuadruple | None = None, preeq: bool = False, - filling_mode: Optional[str] = None, + filling_mode: str | None = None, ) -> dict: """Fill in parameters into the roadrunner instance. diff --git a/pypesto/objective/roadrunner/utils.py b/pypesto/objective/roadrunner/utils.py index e29291ef3..8ae7e3527 100644 --- a/pypesto/objective/roadrunner/utils.py +++ b/pypesto/objective/roadrunner/utils.py @@ -11,20 +11,28 @@ import numpy as np import pandas as pd -import petab.v1 as petab -import roadrunner -from petab.v1.C import ( - LIN, - MEASUREMENT, - NOISE_DISTRIBUTION, - NOISE_FORMULA, - NORMAL, - OBSERVABLE_ID, - OBSERVABLE_TRANSFORMATION, - SIMULATION, - SIMULATION_CONDITION_ID, - TIME, -) + +try: + import petab.v1 as petab + from petab.v1.C import ( + LIN, + MEASUREMENT, + NOISE_DISTRIBUTION, + NOISE_FORMULA, + NORMAL, + OBSERVABLE_ID, + OBSERVABLE_TRANSFORMATION, + SIMULATION, + SIMULATION_CONDITION_ID, + TIME, + ) +except ImportError: + petab = None + +try: + import roadrunner +except ImportError: + roadrunner = None class ExpData: diff --git a/pypesto/petab/importer.py b/pypesto/petab/importer.py index 6160247dd..49b8eb083 100644 --- a/pypesto/petab/importer.py +++ b/pypesto/petab/importer.py @@ -4,69 +4,57 @@ import logging import os -import shutil -import sys import tempfile import warnings -from collections.abc import Iterable, Sequence -from dataclasses import dataclass -from functools import partial +from collections.abc import Callable, Iterable, Sequence from importlib.metadata import version from typing import ( Any, - Callable, ) -import numpy as np import pandas as pd import petab.v1 as petab -from petab.v1.C import ( - ESTIMATE, - NOISE_PARAMETERS, - OBSERVABLE_ID, - PREEQUILIBRATION_CONDITION_ID, - SIMULATION_CONDITION_ID, -) -from petab.v1.models import MODEL_TYPE_SBML + +try: + import roadrunner +except ImportError: + roadrunner = None from ..C import ( + AMICI, CENSORED, - CENSORING_TYPES, - CONDITION_SEP, - MEASUREMENT_TYPE, - MODE_FUN, - MODE_RES, ORDINAL, ORDINAL_OPTIONS, - PARAMETER_TYPE, - RELATIVE, + PETAB, + ROADRUNNER, SEMIQUANTITATIVE, SPLINE_APPROXIMATION_OPTIONS, - InnerParameterType, ) from ..hierarchical.inner_calculator_collector import InnerCalculatorCollector -from ..objective import AggregatedObjective, AmiciObjective -from ..objective.amici import AmiciObjectBuilder +from ..objective import AggregatedObjective, AmiciObjective, ObjectiveBase from ..objective.priors import NegLogParameterPriors, get_parameter_prior_dict from ..predict import AmiciPredictor from ..problem import HierarchicalProblem, Problem from ..result import PredictionResult -from ..startpoint import CheckedStartpoints, StartpointMethod +from ..startpoint import StartpointMethod +from .objective_creator import ( + AmiciObjectiveCreator, + ObjectiveCreator, + PetabSimulatorObjectiveCreator, + RoadRunnerObjectiveCreator, +) +from .util import PetabStartpoints, get_petab_non_quantitative_data_types try: import amici - import amici.petab - import amici.petab.conditions - import amici.petab.parameter_mapping import amici.petab.simulations - from amici.petab.import_helpers import check_model except ImportError: amici = None logger = logging.getLogger(__name__) -class PetabImporter(AmiciObjectBuilder): +class PetabImporter: """ Importer for PEtab files. @@ -83,12 +71,15 @@ class PetabImporter(AmiciObjectBuilder): def __init__( self, petab_problem: petab.Problem, - output_folder: str = None, - model_name: str = None, + output_folder: str | None = None, + model_name: str | None = None, validate_petab: bool = True, validate_petab_hierarchical: bool = True, hierarchical: bool = False, - inner_options: dict = None, + inner_options: dict | None = None, + simulator_type: str = AMICI, + simulator: petab.Simulator | None = None, + rr: roadrunner.RoadRunner | None = None, ): """Initialize importer. @@ -115,6 +106,13 @@ def __init__( inner_options: Options for the inner problems and solvers. If not provided, default options will be used. + simulator_type: + The type of simulator to use. Depending on this different kinds + of objectives will be created. Allowed types are 'amici', 'petab', + and 'roadrunner'. + simulator: + In case of a ``simulator_type == 'petab'``, the simulator object + has to be provided. Otherwise, the argument is not used. """ self.petab_problem = petab_problem self._hierarchical = hierarchical @@ -171,11 +169,21 @@ def __init__( model_name = _find_model_name(self.output_folder) self.model_name = model_name + self.simulator_type = simulator_type + self.simulator = simulator + if simulator_type == PETAB and simulator is None: + raise ValueError( + "A petab simulator object must be provided if the simulator " + "type is 'petab'." + ) + self.roadrunner_instance = rr + @staticmethod def from_yaml( yaml_config: dict | str, output_folder: str = None, model_name: str = None, + simulator_type: str = AMICI, ) -> PetabImporter: """Simplified constructor using a petab yaml file.""" petab_problem = petab.Problem.from_yaml(yaml_config) @@ -184,6 +192,7 @@ def from_yaml( petab_problem=petab_problem, output_folder=output_folder, model_name=model_name, + simulator_type=simulator_type, ) def validate_inner_options(self): @@ -217,438 +226,11 @@ def check_gradients( ------- match: Whether gradients match FDs (True) or not (False) """ - par = np.asarray(self.petab_problem.x_nominal_scaled) - problem = self.create_problem() - objective = problem.objective - free_indices = par[problem.x_free_indices] - dfs = [] - modes = [] - - if mode is None: - modes = [MODE_FUN, MODE_RES] - else: - modes = [mode] - - if multi_eps is None: - multi_eps = np.array([10 ** (-i) for i in range(3, 9)]) - - for mode in modes: - try: - dfs.append( - objective.check_grad_multi_eps( - free_indices, - *args, - **kwargs, - mode=mode, - multi_eps=multi_eps, - ) - ) - except (RuntimeError, ValueError): - # Might happen in case PEtab problem not well defined or - # fails for specified tolerances in forward sensitivities - return False - - return all( - any( - [ - np.all( - (mode_df.rel_err.values < rtol) - | (mode_df.abs_err.values < atol) - ), - ] - ) - for mode_df in dfs + raise NotImplementedError( + "This function has been removed. " + "Please use `objective.check_gradients_match_finite_differences`." ) - def create_model( - self, - force_compile: bool = False, - verbose: bool = True, - **kwargs, - ) -> amici.Model: - """ - Import amici model. - - Parameters - ---------- - force_compile: - If False, the model is compiled only if the output folder does not - exist yet. If True, the output folder is deleted and the model - (re-)compiled in either case. - - .. warning:: - If `force_compile`, then an existing folder of that name will - be deleted. - verbose: - Passed to AMICI's model compilation. If True, the compilation - progress is printed. - kwargs: - Extra arguments passed to amici.SbmlImporter.sbml2amici - """ - # courtesy check whether target is folder - if os.path.exists(self.output_folder) and not os.path.isdir( - self.output_folder - ): - raise AssertionError( - f"Refusing to remove {self.output_folder} for model " - f"compilation: Not a folder." - ) - - # add module to path - if self.output_folder not in sys.path: - sys.path.insert(0, self.output_folder) - - # compile - if self._must_compile(force_compile): - logger.info( - f"Compiling amici model to folder " f"{self.output_folder}." - ) - if self.petab_problem.model.type_id == MODEL_TYPE_SBML: - self.compile_model( - validate=self.validate_petab, - verbose=verbose, - **kwargs, - ) - else: - self.compile_model(verbose=verbose, **kwargs) - else: - logger.debug( - f"Using existing amici model in folder " - f"{self.output_folder}." - ) - - return self._create_model() - - def _create_model(self) -> amici.Model: - """Load model module and return the model, no checks/compilation.""" - # load moduĺe - module = amici.import_model_module( - module_name=self.model_name, module_path=self.output_folder - ) - model = module.getModel() - check_model( - amici_model=model, - petab_problem=self.petab_problem, - ) - - return model - - def _must_compile(self, force_compile: bool): - """Check whether the model needs to be compiled first.""" - # asked by user - if force_compile: - return True - - # folder does not exist - if not os.path.exists(self.output_folder) or not os.listdir( - self.output_folder - ): - return True - - # try to import (in particular checks version) - try: - # importing will already raise an exception if version wrong - amici.import_model_module(self.model_name, self.output_folder) - except ModuleNotFoundError: - return True - except amici.AmiciVersionError as e: - logger.info( - "amici model will be re-imported due to version " - f"mismatch: {e}" - ) - return True - - # no need to (re-)compile - return False - - def compile_model(self, **kwargs): - """ - Compile the model. - - If the output folder exists already, it is first deleted. - - Parameters - ---------- - kwargs: - Extra arguments passed to :meth:`amici.sbml_import.SbmlImporter.sbml2amici` - or :func:`amici.pysb_import.pysb2amici`. - """ - # delete output directory - if os.path.exists(self.output_folder): - shutil.rmtree(self.output_folder) - - amici.petab.import_petab_problem( - petab_problem=self.petab_problem, - model_name=self.model_name, - model_output_dir=self.output_folder, - **kwargs, - ) - - def create_solver( - self, - model: amici.Model = None, - verbose: bool = True, - ) -> amici.Solver: - """Return model solver.""" - # create model - if model is None: - model = self.create_model(verbose=verbose) - - solver = model.getSolver() - return solver - - def create_edatas( - self, - model: amici.Model = None, - simulation_conditions=None, - verbose: bool = True, - ) -> list[amici.ExpData]: - """Create list of :class:`amici.amici.ExpData` objects.""" - # create model - if model is None: - model = self.create_model(verbose=verbose) - - return amici.petab.conditions.create_edatas( - amici_model=model, - petab_problem=self.petab_problem, - simulation_conditions=simulation_conditions, - ) - - def create_objective( - self, - model: amici.Model = None, - solver: amici.Solver = None, - edatas: Sequence[amici.ExpData] = None, - force_compile: bool = False, - verbose: bool = True, - **kwargs, - ) -> AmiciObjective: - """Create a :class:`pypesto.objective.AmiciObjective`. - - Parameters - ---------- - model: - The AMICI model. - solver: - The AMICI solver. - edatas: - The experimental data in AMICI format. - force_compile: - Whether to force-compile the model if not passed. - verbose: - Passed to AMICI's model compilation. If True, the compilation - progress is printed. - **kwargs: - Additional arguments passed on to the objective. In case of ordinal - or semiquantitative measurements, ``inner_options`` can optionally - be passed here. If none are given, ``inner_options`` given to the - importer constructor (or inner defaults) will be chosen. - - Returns - ------- - A :class:`pypesto.objective.AmiciObjective` for the model and the data. - """ - # get simulation conditions - simulation_conditions = petab.get_simulation_conditions( - self.petab_problem.measurement_df - ) - - # create model - if model is None: - model = self.create_model( - force_compile=force_compile, verbose=verbose - ) - # create solver - if solver is None: - solver = self.create_solver(model) - # create conditions and edatas from measurement data - if edatas is None: - edatas = self.create_edatas( - model=model, simulation_conditions=simulation_conditions - ) - - parameter_mapping = ( - amici.petab.parameter_mapping.create_parameter_mapping( - petab_problem=self.petab_problem, - simulation_conditions=simulation_conditions, - scaled_parameters=True, - amici_model=model, - fill_fixed_parameters=False, - ) - ) - - par_ids = self.petab_problem.x_ids - - # fill in dummy parameters (this is needed since some objective - # initialization e.g. checks for preeq parameters) - problem_parameters = dict( - zip(self.petab_problem.x_ids, self.petab_problem.x_nominal_scaled) - ) - amici.petab.conditions.fill_in_parameters( - edatas=edatas, - problem_parameters=problem_parameters, - scaled_parameters=True, - parameter_mapping=parameter_mapping, - amici_model=model, - ) - - calculator = None - amici_reporting = None - - if ( - self._non_quantitative_data_types is not None - and self._hierarchical - ): - inner_options = kwargs.pop("inner_options", None) - inner_options = ( - inner_options - if inner_options is not None - else self.inner_options - ) - calculator = InnerCalculatorCollector( - self._non_quantitative_data_types, - self.petab_problem, - model, - edatas, - inner_options, - ) - amici_reporting = amici.RDataReporting.full - - # FIXME: currently not supported with hierarchical - if "guess_steadystate" in kwargs and kwargs["guess_steadystate"]: - warnings.warn( - "`guess_steadystate` not supported with hierarchical " - "optimization. Disabling `guess_steadystate`.", - stacklevel=1, - ) - kwargs["guess_steadystate"] = False - inner_parameter_ids = calculator.get_inner_par_ids() - par_ids = [x for x in par_ids if x not in inner_parameter_ids] - - max_sensi_order = kwargs.get("max_sensi_order", None) - - if ( - self._non_quantitative_data_types is not None - and any( - data_type in self._non_quantitative_data_types - for data_type in [ORDINAL, CENSORED, SEMIQUANTITATIVE] - ) - and max_sensi_order is not None - and max_sensi_order > 1 - ): - raise ValueError( - "Ordinal, censored and semiquantitative data cannot be " - "used with second order sensitivities. Use a up to first order " - "method or disable ordinal, censored and semiquantitative " - ) - - # create objective - obj = AmiciObjective( - amici_model=model, - amici_solver=solver, - edatas=edatas, - x_ids=par_ids, - x_names=par_ids, - parameter_mapping=parameter_mapping, - amici_object_builder=self, - calculator=calculator, - amici_reporting=amici_reporting, - **kwargs, - ) - - return obj - - def create_predictor( - self, - objective: AmiciObjective = None, - amici_output_fields: Sequence[str] = None, - post_processor: Callable | None = None, - post_processor_sensi: Callable | None = None, - post_processor_time: Callable | None = None, - max_chunk_size: int | None = None, - output_ids: Sequence[str] = None, - condition_ids: Sequence[str] = None, - ) -> AmiciPredictor: - """Create a :class:`pypesto.predict.AmiciPredictor`. - - The `AmiciPredictor` facilitates generation of predictions from - parameter vectors. - - Parameters - ---------- - objective: - An objective object, which will be used to get model simulations - amici_output_fields: - keys that exist in the return data object from AMICI, which should - be available for the post-processors - post_processor: - A callable function which applies postprocessing to the simulation - results. Default are the observables of the AMICI model. - This method takes a list of ndarrays (as returned in the field - ['y'] of amici ReturnData objects) as input. - post_processor_sensi: - A callable function which applies postprocessing to the - sensitivities of the simulation results. Default are the - observable sensitivities of the AMICI model. - This method takes two lists of ndarrays (as returned in the - fields ['y'] and ['sy'] of amici ReturnData objects) as input. - post_processor_time: - A callable function which applies postprocessing to the timepoints - of the simulations. Default are the timepoints of the amici model. - This method takes a list of ndarrays (as returned in the field - ['t'] of amici ReturnData objects) as input. - max_chunk_size: - In some cases, we don't want to compute all predictions at once - when calling the prediction function, as this might not fit into - the memory for large datasets and models. - Here, the user can specify a maximum number of conditions, which - should be simulated at a time. - Default is 0 meaning that all conditions will be simulated. - Other values are only applicable, if an output file is specified. - output_ids: - IDs of outputs, if post-processing is used - condition_ids: - IDs of conditions, if post-processing is used - - Returns - ------- - A :class:`pypesto.predict.AmiciPredictor` for the model, using - the outputs of the AMICI model and the timepoints from the PEtab data. - """ - # if the user didn't pass an objective function, we create it first - if objective is None: - objective = self.create_objective() - - # create a identifiers of preequilibration and simulation condition ids - # which can then be stored in the prediction result - edata_conditions = objective.amici_object_builder.petab_problem.get_simulation_conditions_from_measurement_df() - if PREEQUILIBRATION_CONDITION_ID not in list(edata_conditions.columns): - preeq_dummy = [""] * edata_conditions.shape[0] - edata_conditions[PREEQUILIBRATION_CONDITION_ID] = preeq_dummy - edata_conditions.drop_duplicates(inplace=True) - - if condition_ids is None: - condition_ids = [ - edata_conditions.loc[id, PREEQUILIBRATION_CONDITION_ID] - + CONDITION_SEP - + edata_conditions.loc[id, SIMULATION_CONDITION_ID] - for id in edata_conditions.index - ] - - # wrap around AmiciPredictor - predictor = AmiciPredictor( - amici_objective=objective, - amici_output_fields=amici_output_fields, - post_processor=post_processor, - post_processor_sensi=post_processor_sensi, - post_processor_time=post_processor_time, - max_chunk_size=max_chunk_size, - output_ids=output_ids, - condition_ids=condition_ids, - ) - - return predictor - def create_prior(self) -> NegLogParameterPriors | None: """ Create a prior from the parameter table. @@ -707,9 +289,46 @@ def create_startpoint_method(self, **kwargs) -> StartpointMethod: """ return PetabStartpoints(petab_problem=self.petab_problem, **kwargs) + def create_objective_creator( + self, + simulator_type: str = AMICI, + simulator: petab.Simulator | None = None, + ) -> ObjectiveCreator: + """Choose :class:`ObjectiveCreator` depending on the simulator type. + + Parameters + ---------- + simulator_type: + The type of simulator to use. Depending on this different kinds + of objectives will be created. Allowed types are 'amici', 'petab', + and 'roadrunner'. + simulator: + In case of a ``simulator_type == 'petab'``, the simulator object + has to be provided. Otherwise the argument is not used. + + """ + if simulator_type == AMICI: + return AmiciObjectiveCreator( + petab_problem=self.petab_problem, + output_folder=self.output_folder, + model_name=self.model_name, + hierarchical=self._hierarchical, + inner_options=self.inner_options, + non_quantitative_data_types=self._non_quantitative_data_types, + validate_petab=self.validate_petab, + ) + elif simulator_type == PETAB: + return PetabSimulatorObjectiveCreator( + petab_problem=self.petab_problem, simulator=simulator + ) + elif simulator_type == ROADRUNNER: + return RoadRunnerObjectiveCreator( + petab_problem=self.petab_problem, rr=self.roadrunner_instance + ) + def create_problem( self, - objective: AmiciObjective = None, + objective: ObjectiveBase = None, x_guesses: Iterable[float] | None = None, problem_kwargs: dict[str, Any] = None, startpoint_kwargs: dict[str, Any] = None, @@ -739,7 +358,11 @@ def create_problem( A :class:`pypesto.problem.Problem` for the objective. """ if objective is None: - objective = self.create_objective(**kwargs) + self.objective_constructor = self.create_objective_creator( + kwargs.pop("simulator_type", self.simulator_type), + kwargs.pop("simulator", self.simulator), + ) + objective = self.objective_constructor.create_objective(**kwargs) x_fixed_indices = self.petab_problem.x_fixed_indices x_fixed_vals = self.petab_problem.x_nominal_fixed_scaled @@ -809,39 +432,77 @@ def create_problem( return problem + def create_objective( + self, + model: amici.Model = None, + solver: amici.Solver = None, + edatas: Sequence[amici.ExpData] = None, + force_compile: bool = False, + verbose: bool = True, + **kwargs, + ) -> ObjectiveBase: + """See :meth:`AmiciObjectiveCreator.create_objective`.""" + warnings.warn( + "This function has been moved to `AmiciObjectiveCreator`.", + DeprecationWarning, + stacklevel=2, + ) + objective_constructor = self.create_objective_creator( + kwargs.pop("simulator_type", self.simulator_type), + kwargs.pop("simulator", self.simulator), + ) + return objective_constructor.create_objective( + model=model, + solver=solver, + edatas=edatas, + force_compile=force_compile, + verbose=verbose, + **kwargs, + ) + + def create_predictor( + self, + objective: AmiciObjective = None, + amici_output_fields: Sequence[str] = None, + post_processor: Callable | None = None, + post_processor_sensi: Callable | None = None, + post_processor_time: Callable | None = None, + max_chunk_size: int | None = None, + output_ids: Sequence[str] = None, + condition_ids: Sequence[str] = None, + ) -> AmiciPredictor: + """See :meth:`AmiciObjectiveCreator.create_predictor`.""" + if self.simulator_type != AMICI: + raise ValueError( + "Predictor can only be created for amici models and is " + "supposed to be created from the AmiciObjectiveCreator." + ) + warnings.warn( + "This function has been moved to `AmiciObjectiveCreator`.", + DeprecationWarning, + stacklevel=2, + ) + objective_constructor = self.create_objective_creator() + return objective_constructor.create_predictor( + objective=objective, + amici_output_fields=amici_output_fields, + post_processor=post_processor, + post_processor_sensi=post_processor_sensi, + post_processor_time=post_processor_time, + max_chunk_size=max_chunk_size, + output_ids=output_ids, + condition_ids=condition_ids, + ) + def rdatas_to_measurement_df( self, rdatas: Sequence[amici.ReturnData], model: amici.Model = None, verbose: bool = True, ) -> pd.DataFrame: - """ - Create a measurement dataframe in the petab format. - - Parameters - ---------- - rdatas: - A list of rdatas as produced by - ``pypesto.AmiciObjective.__call__(x, return_dict=True)['rdatas']``. - model: - The amici model. - verbose: - Passed to AMICI's model compilation. If True, the compilation - progress is printed. - - Returns - ------- - A dataframe built from the rdatas in the format as in - ``self.petab_problem.measurement_df``. - """ - # create model - if model is None: - model = self.create_model(verbose=verbose) - - measurement_df = self.petab_problem.measurement_df - - return amici.petab.simulations.rdatas_to_measurement_df( - rdatas, model, measurement_df + """See :meth:`AmiciObjectiveCreator.rdatas_to_measurement_df`.""" + raise NotImplementedError( + "This function has been moved to `AmiciObjectiveCreator`." ) def rdatas_to_simulation_df( @@ -855,8 +516,8 @@ def rdatas_to_simulation_df( Except a petab simulation dataframe is created, i.e. the measurement column label is adjusted. """ - return self.rdatas_to_measurement_df(rdatas, model).rename( - columns={petab.MEASUREMENT: petab.SIMULATION} + raise NotImplementedError( + "This function has been moved to `AmiciObjectiveCreator`." ) def prediction_to_petab_measurement_df( @@ -882,24 +543,9 @@ def prediction_to_petab_measurement_df( A dataframe built from the rdatas in the format as in ``self.petab_problem.measurement_df``. """ - - # create rdata-like dicts from the prediction result - @dataclass - class FakeRData: - ts: np.ndarray - y: np.ndarray - - rdatas = [ - FakeRData(ts=condition.timepoints, y=condition.output) - for condition in prediction.conditions - ] - - # add an AMICI model, if possible - model = None - if predictor is not None: - model = predictor.amici_objective.amici_model - - return self.rdatas_to_measurement_df(rdatas, model) + raise NotImplementedError( + "This function has been moved to `AmiciObjectiveCreator`." + ) def prediction_to_petab_simulation_df( self, @@ -912,9 +558,9 @@ def prediction_to_petab_simulation_df( Except a PEtab simulation dataframe is created, i.e. the measurement column label is adjusted. """ - return self.prediction_to_petab_measurement_df( - prediction, predictor - ).rename(columns={petab.MEASUREMENT: petab.SIMULATION}) + raise NotImplementedError( + "This function has been moved to `AmiciObjectiveCreator`." + ) def _find_output_folder_name( @@ -962,148 +608,3 @@ def _find_output_folder_name( def _find_model_name(output_folder: str) -> str: """Just re-use the last part of the output folder.""" return os.path.split(os.path.normpath(output_folder))[-1] - - -def get_petab_non_quantitative_data_types( - petab_problem: petab.Problem, -) -> set[str]: - """ - Get the data types from the PEtab problem. - - Parameters - ---------- - petab_problem: - The PEtab problem. - - Returns - ------- - data_types: - A list of the data types. - """ - non_quantitative_data_types = set() - caught_observables = set() - # For ordinal, censored and semiquantitative data, search - # for the corresponding data types in the measurement table - meas_df = petab_problem.measurement_df - if MEASUREMENT_TYPE in meas_df.columns: - petab_data_types = meas_df[MEASUREMENT_TYPE].unique() - for data_type in [ORDINAL, SEMIQUANTITATIVE] + CENSORING_TYPES: - if data_type in petab_data_types: - non_quantitative_data_types.add( - CENSORED if data_type in CENSORING_TYPES else data_type - ) - caught_observables.update( - set( - meas_df[meas_df[MEASUREMENT_TYPE] == data_type][ - OBSERVABLE_ID - ] - ) - ) - - # For relative data, search for parameters to estimate with - # a scaling/offset/sigma parameter type - if PARAMETER_TYPE in petab_problem.parameter_df.columns: - # get the df with non-nan parameter types - par_df = petab_problem.parameter_df[ - petab_problem.parameter_df[PARAMETER_TYPE].notna() - ] - for par_id, row in par_df.iterrows(): - if not row[ESTIMATE]: - continue - if row[PARAMETER_TYPE] in [ - InnerParameterType.SCALING, - InnerParameterType.OFFSET, - ]: - non_quantitative_data_types.add(RELATIVE) - - # For sigma parameters, we need to check if they belong - # to an observable with a non-quantitative data type - elif row[PARAMETER_TYPE] == InnerParameterType.SIGMA: - corresponding_observables = set( - meas_df[meas_df[NOISE_PARAMETERS] == par_id][OBSERVABLE_ID] - ) - if not (corresponding_observables & caught_observables): - non_quantitative_data_types.add(RELATIVE) - - # TODO this can be made much shorter if the relative measurements - # are also specified in the measurement table, but that would require - # changing the PEtab format of a lot of benchmark models. - - if len(non_quantitative_data_types) == 0: - return None - return non_quantitative_data_types - - -class PetabStartpoints(CheckedStartpoints): - """Startpoint method for PEtab problems. - - Samples optimization startpoints from the distributions defined in the - provided PEtab problem. The PEtab-problem is copied. - """ - - def __init__(self, petab_problem: petab.Problem, **kwargs): - super().__init__(**kwargs) - self._parameter_df = petab_problem.parameter_df.copy() - self._priors: list[tuple] | None = None - self._free_ids: list[str] | None = None - - def _setup( - self, - pypesto_problem: Problem, - ): - """Update priors if necessary. - - Check if ``problem.x_free_indices`` changed since last call, and if so, - get the corresponding priors from PEtab. - """ - current_free_ids = np.asarray(pypesto_problem.x_names)[ - pypesto_problem.x_free_indices - ] - - if ( - self._priors is not None - and len(current_free_ids) == len(self._free_ids) - and np.all(current_free_ids == self._free_ids) - ): - # no need to update - return - - # update priors - self._free_ids = current_free_ids - id_to_prior = dict( - zip( - self._parameter_df.index[self._parameter_df[ESTIMATE] == 1], - petab.parameters.get_priors_from_df( - self._parameter_df, mode=petab.INITIALIZATION - ), - ) - ) - - self._priors = list(map(id_to_prior.__getitem__, current_free_ids)) - - def __call__( - self, - n_starts: int, - problem: Problem, - ) -> np.ndarray: - """Call the startpoint method.""" - # Update the list of priors if needed - self._setup(pypesto_problem=problem) - - return super().__call__(n_starts, problem) - - def sample( - self, - n_starts: int, - lb: np.ndarray, - ub: np.ndarray, - ) -> np.ndarray: - """Actual startpoint sampling. - - Must only be called through `self.__call__` to ensure that the list of priors - matches the currently free parameters in the :class:`pypesto.Problem`. - """ - sampler = partial(petab.sample_from_prior, n_starts=n_starts) - startpoints = list(map(sampler, self._priors)) - - return np.array(startpoints).T diff --git a/pypesto/petab/objective_creator.py b/pypesto/petab/objective_creator.py new file mode 100644 index 000000000..72f98cf03 --- /dev/null +++ b/pypesto/petab/objective_creator.py @@ -0,0 +1,853 @@ +"""Contains the ObjectiveCreator class.""" + +from __future__ import annotations + +import logging +import numbers +import os +import re +import shutil +import sys +import warnings +from abc import ABC, abstractmethod +from collections.abc import Iterable, Sequence +from dataclasses import dataclass +from typing import ( + Any, + Callable, +) + +import numpy as np +import pandas as pd +import petab.v1 as petab +from petab.v1.C import ( + OBSERVABLE_FORMULA, + PREEQUILIBRATION_CONDITION_ID, + SIMULATION_CONDITION_ID, +) +from petab.v1.models import MODEL_TYPE_SBML +from petab.v1.models.sbml_model import SbmlModel +from petab.v1.parameter_mapping import ParMappingDictQuadruple +from petab.v1.simulate import Simulator + +from ..C import CENSORED, CONDITION_SEP, LIN, ORDINAL, SEMIQUANTITATIVE +from ..hierarchical.inner_calculator_collector import InnerCalculatorCollector +from ..objective import AmiciObjective, ObjectiveBase, PetabSimulatorObjective +from ..objective.amici import AmiciObjectBuilder +from ..objective.roadrunner import ( + ExpData, + RoadRunnerCalculator, + RoadRunnerObjective, +) +from ..predict import AmiciPredictor +from ..result import PredictionResult + +try: + import amici + import amici.petab + import amici.petab.conditions + import amici.petab.parameter_mapping + import amici.petab.simulations + from amici.petab.import_helpers import check_model +except ImportError: + amici = None +try: + import libsbml + import roadrunner +except ImportError: + roadrunner = None + libsbml = None + +logger = logging.getLogger(__name__) + + +class ObjectiveCreator(ABC): + """Abstract Creator for creating an objective function.""" + + @abstractmethod + def create_objective(self, **kwargs) -> ObjectiveBase: + """Create an objective function.""" + pass + + +class AmiciObjectiveCreator(ObjectiveCreator, AmiciObjectBuilder): + """ObjectiveCreator for creating an amici objective function.""" + + def __init__( + self, + petab_problem: petab.Problem, + hierarchical: bool = False, + non_quantitative_data_types: Iterable[str] | None = None, + inner_options: dict[str, Any] | None = None, + output_folder: str | None = None, + model_name: str | None = None, + validate_petab: bool = True, + ): + """ + Initialize the creator. + + Parameters + ---------- + petab_problem: + The PEtab problem. + hierarchical: + Whether to use hierarchical optimization. + non_quantitative_data_types: + The non-quantitative data types to consider. + inner_options: + Options for the inner optimization. + output_folder: + The output folder for the compiled model. + model_name: + The name of the model. + validate_petab: + Whether to check the PEtab problem for errors. + """ + self.petab_problem = petab_problem + self._hierarchical = hierarchical + self._non_quantitative_data_types = non_quantitative_data_types + self.inner_options = inner_options + self.output_folder = output_folder + self.model_name = model_name + self.validate_petab = validate_petab + + def create_model( + self, + force_compile: bool = False, + verbose: bool = True, + **kwargs, + ) -> amici.Model: + """ + Import amici model. + + Parameters + ---------- + force_compile: + If False, the model is compiled only if the output folder does not + exist yet. If True, the output folder is deleted and the model + (re-)compiled in either case. + + .. warning:: + If `force_compile`, then an existing folder of that name will + be deleted. + verbose: + Passed to AMICI's model compilation. If True, the compilation + progress is printed. + kwargs: + Extra arguments passed to amici.SbmlImporter.sbml2amici + """ + # courtesy check whether target is folder + if os.path.exists(self.output_folder) and not os.path.isdir( + self.output_folder + ): + raise AssertionError( + f"Refusing to remove {self.output_folder} for model " + f"compilation: Not a folder." + ) + + # add module to path + if self.output_folder not in sys.path: + sys.path.insert(0, self.output_folder) + + # compile + if self._must_compile(force_compile): + logger.info( + f"Compiling amici model to folder " f"{self.output_folder}." + ) + if self.petab_problem.model.type_id == MODEL_TYPE_SBML: + self.compile_model( + validate=self.validate_petab, + verbose=verbose, + **kwargs, + ) + else: + self.compile_model(verbose=verbose, **kwargs) + else: + logger.debug( + f"Using existing amici model in folder " + f"{self.output_folder}." + ) + + return self._create_model() + + def _create_model(self) -> amici.Model: + """Load model module and return the model, no checks/compilation.""" + # load moduĺe + module = amici.import_model_module( + module_name=self.model_name, module_path=self.output_folder + ) + model = module.getModel() + check_model( + amici_model=model, + petab_problem=self.petab_problem, + ) + + return model + + def _must_compile(self, force_compile: bool): + """Check whether the model needs to be compiled first.""" + # asked by user + if force_compile: + return True + + # folder does not exist + if not os.path.exists(self.output_folder) or not os.listdir( + self.output_folder + ): + return True + + # try to import (in particular checks version) + try: + # importing will already raise an exception if version wrong + amici.import_model_module(self.model_name, self.output_folder) + except ModuleNotFoundError: + return True + except amici.AmiciVersionError as e: + logger.info( + "amici model will be re-imported due to version " + f"mismatch: {e}" + ) + return True + + # no need to (re-)compile + return False + + def compile_model(self, **kwargs): + """ + Compile the model. + + If the output folder exists already, it is first deleted. + + Parameters + ---------- + kwargs: + Extra arguments passed to :meth:`amici.sbml_import.SbmlImporter.sbml2amici` + or :func:`amici.pysb_import.pysb2amici`. + """ + # delete output directory + if os.path.exists(self.output_folder): + shutil.rmtree(self.output_folder) + + amici.petab.import_petab_problem( + petab_problem=self.petab_problem, + model_name=self.model_name, + model_output_dir=self.output_folder, + **kwargs, + ) + + def create_solver( + self, + model: amici.Model = None, + verbose: bool = True, + ) -> amici.Solver: + """Return model solver.""" + # create model + if model is None: + model = self.create_model(verbose=verbose) + + solver = model.getSolver() + return solver + + def create_edatas( + self, + model: amici.Model = None, + simulation_conditions=None, + verbose: bool = True, + ) -> list[amici.ExpData]: + """Create list of :class:`amici.amici.ExpData` objects.""" + # create model + if model is None: + model = self.create_model(verbose=verbose) + + return amici.petab.conditions.create_edatas( + amici_model=model, + petab_problem=self.petab_problem, + simulation_conditions=simulation_conditions, + ) + + def create_objective( + self, + model: amici.Model = None, + solver: amici.Solver = None, + edatas: Sequence[amici.ExpData] = None, + force_compile: bool = False, + verbose: bool = True, + **kwargs, + ) -> AmiciObjective: + """Create a :class:`pypesto.objective.AmiciObjective`. + + Parameters + ---------- + model: + The AMICI model. + solver: + The AMICI solver. + edatas: + The experimental data in AMICI format. + force_compile: + Whether to force-compile the model if not passed. + verbose: + Passed to AMICI's model compilation. If True, the compilation + progress is printed. + **kwargs: + Additional arguments passed on to the objective. In case of ordinal + or semiquantitative measurements, ``inner_options`` can optionally + be passed here. If none are given, ``inner_options`` given to the + importer constructor (or inner defaults) will be chosen. + + Returns + ------- + A :class:`pypesto.objective.AmiciObjective` for the model and the data. + """ + simulation_conditions = petab.get_simulation_conditions( + self.petab_problem.measurement_df + ) + if model is None: + model = self.create_model( + force_compile=force_compile, verbose=verbose + ) + if solver is None: + solver = self.create_solver(model) + # create conditions and edatas from measurement data + if edatas is None: + edatas = self.create_edatas( + model=model, simulation_conditions=simulation_conditions + ) + parameter_mapping = ( + amici.petab.parameter_mapping.create_parameter_mapping( + petab_problem=self.petab_problem, + simulation_conditions=simulation_conditions, + scaled_parameters=True, + amici_model=model, + fill_fixed_parameters=False, + ) + ) + par_ids = self.petab_problem.x_ids + + # fill in dummy parameters (this is needed since some objective + # initialization e.g. checks for preeq parameters) + problem_parameters = dict( + zip(self.petab_problem.x_ids, self.petab_problem.x_nominal_scaled) + ) + amici.petab.conditions.fill_in_parameters( + edatas=edatas, + problem_parameters=problem_parameters, + scaled_parameters=True, + parameter_mapping=parameter_mapping, + amici_model=model, + ) + + calculator = None + amici_reporting = None + + if ( + self._non_quantitative_data_types is not None + and self._hierarchical + ): + inner_options = kwargs.pop("inner_options", None) + inner_options = ( + inner_options + if inner_options is not None + else self.inner_options + ) + calculator = InnerCalculatorCollector( + self._non_quantitative_data_types, + self.petab_problem, + model, + edatas, + inner_options, + ) + amici_reporting = amici.RDataReporting.full + + # FIXME: currently not supported with hierarchical + if "guess_steadystate" in kwargs and kwargs["guess_steadystate"]: + warnings.warn( + "`guess_steadystate` not supported with hierarchical " + "optimization. Disabling `guess_steadystate`.", + stacklevel=1, + ) + kwargs["guess_steadystate"] = False + inner_parameter_ids = calculator.get_inner_par_ids() + par_ids = [x for x in par_ids if x not in inner_parameter_ids] + + max_sensi_order = kwargs.get("max_sensi_order", None) + + if ( + self._non_quantitative_data_types is not None + and any( + data_type in self._non_quantitative_data_types + for data_type in [ORDINAL, CENSORED, SEMIQUANTITATIVE] + ) + and max_sensi_order is not None + and max_sensi_order > 1 + ): + raise ValueError( + "Ordinal, censored and semiquantitative data cannot be " + "used with second order sensitivities. Use a up to first order " + "method or disable ordinal, censored and semiquantitative " + ) + + # create objective + obj = AmiciObjective( + amici_model=model, + amici_solver=solver, + edatas=edatas, + x_ids=par_ids, + x_names=par_ids, + parameter_mapping=parameter_mapping, + amici_object_builder=self, + calculator=calculator, + amici_reporting=amici_reporting, + **kwargs, + ) + + return obj + + def create_predictor( + self, + objective: AmiciObjective = None, + amici_output_fields: Sequence[str] = None, + post_processor: Callable | None = None, + post_processor_sensi: Callable | None = None, + post_processor_time: Callable | None = None, + max_chunk_size: int | None = None, + output_ids: Sequence[str] = None, + condition_ids: Sequence[str] = None, + ) -> AmiciPredictor: + """Create a :class:`pypesto.predict.AmiciPredictor`. + + The `AmiciPredictor` facilitates generation of predictions from + parameter vectors. + + Parameters + ---------- + objective: + An objective object, which will be used to get model simulations + amici_output_fields: + keys that exist in the return data object from AMICI, which should + be available for the post-processors + post_processor: + A callable function which applies postprocessing to the simulation + results. Default are the observables of the AMICI model. + This method takes a list of ndarrays (as returned in the field + ['y'] of amici ReturnData objects) as input. + post_processor_sensi: + A callable function which applies postprocessing to the + sensitivities of the simulation results. Default are the + observable sensitivities of the AMICI model. + This method takes two lists of ndarrays (as returned in the + fields ['y'] and ['sy'] of amici ReturnData objects) as input. + post_processor_time: + A callable function which applies postprocessing to the timepoints + of the simulations. Default are the timepoints of the amici model. + This method takes a list of ndarrays (as returned in the field + ['t'] of amici ReturnData objects) as input. + max_chunk_size: + In some cases, we don't want to compute all predictions at once + when calling the prediction function, as this might not fit into + the memory for large datasets and models. + Here, the user can specify a maximum number of conditions, which + should be simulated at a time. + Default is 0 meaning that all conditions will be simulated. + Other values are only applicable, if an output file is specified. + output_ids: + IDs of outputs, if post-processing is used + condition_ids: + IDs of conditions, if post-processing is used + + Returns + ------- + A :class:`pypesto.predict.AmiciPredictor` for the model, using + the outputs of the AMICI model and the timepoints from the PEtab data. + """ + # if the user didn't pass an objective function, we create it first + if objective is None: + objective = self.create_objective() + + # create a identifiers of preequilibration and simulation condition ids + # which can then be stored in the prediction result + edata_conditions = objective.amici_object_builder.petab_problem.get_simulation_conditions_from_measurement_df() + if PREEQUILIBRATION_CONDITION_ID not in list(edata_conditions.columns): + preeq_dummy = [""] * edata_conditions.shape[0] + edata_conditions[PREEQUILIBRATION_CONDITION_ID] = preeq_dummy + edata_conditions.drop_duplicates(inplace=True) + + if condition_ids is None: + condition_ids = [ + edata_conditions.loc[id, PREEQUILIBRATION_CONDITION_ID] + + CONDITION_SEP + + edata_conditions.loc[id, SIMULATION_CONDITION_ID] + for id in edata_conditions.index + ] + + # wrap around AmiciPredictor + predictor = AmiciPredictor( + amici_objective=objective, + amici_output_fields=amici_output_fields, + post_processor=post_processor, + post_processor_sensi=post_processor_sensi, + post_processor_time=post_processor_time, + max_chunk_size=max_chunk_size, + output_ids=output_ids, + condition_ids=condition_ids, + ) + + return predictor + + def rdatas_to_measurement_df( + self, + rdatas: Sequence[amici.ReturnData], + model: amici.Model = None, + verbose: bool = True, + ) -> pd.DataFrame: + """ + Create a measurement dataframe in the petab format. + + Parameters + ---------- + rdatas: + A list of rdatas as produced by + ``pypesto.AmiciObjective.__call__(x, return_dict=True)['rdatas']``. + model: + The amici model. + verbose: + Passed to AMICI's model compilation. If True, the compilation + progress is printed. + + Returns + ------- + A dataframe built from the rdatas in the format as in + ``self.petab_problem.measurement_df``. + """ + # create model + if model is None: + model = self.create_model(verbose=verbose) + + measurement_df = self.petab_problem.measurement_df + + return amici.petab.simulations.rdatas_to_measurement_df( + rdatas, model, measurement_df + ) + + def rdatas_to_simulation_df( + self, + rdatas: Sequence[amici.ReturnData], + model: amici.Model = None, + ) -> pd.DataFrame: + """ + See :meth:`rdatas_to_measurement_df`. + + Except a petab simulation dataframe is created, i.e. the measurement + column label is adjusted. + """ + return self.rdatas_to_measurement_df(rdatas, model).rename( + columns={petab.MEASUREMENT: petab.SIMULATION} + ) + + def prediction_to_petab_measurement_df( + self, + prediction: PredictionResult, + predictor: AmiciPredictor = None, + ) -> pd.DataFrame: + """ + Cast prediction into a dataframe. + + If a PEtab problem is simulated without post-processing, then the + result can be cast into a PEtab measurement or simulation dataframe + + Parameters + ---------- + prediction: + A prediction result as produced by an :class:`pypesto.predict.AmiciPredictor`. + predictor: + The :class:`pypesto.predict.AmiciPredictor` instance. + + Returns + ------- + A dataframe built from the rdatas in the format as in + ``self.petab_problem.measurement_df``. + """ + + # create rdata-like dicts from the prediction result + @dataclass + class FakeRData: + ts: np.ndarray + y: np.ndarray + + rdatas = [ + FakeRData(ts=condition.timepoints, y=condition.output) + for condition in prediction.conditions + ] + + # add an AMICI model, if possible + model = None + if predictor is not None: + model = predictor.amici_objective.amici_model + + return self.rdatas_to_measurement_df(rdatas, model) + + def prediction_to_petab_simulation_df( + self, + prediction: PredictionResult, + predictor: AmiciPredictor = None, + ) -> pd.DataFrame: + """ + See :meth:`prediction_to_petab_measurement_df`. + + Except a PEtab simulation dataframe is created, i.e. the measurement + column label is adjusted. + """ + return self.prediction_to_petab_measurement_df( + prediction, predictor + ).rename(columns={petab.MEASUREMENT: petab.SIMULATION}) + + +class PetabSimulatorObjectiveCreator(ObjectiveCreator): + """ObjectiveCreator for creating an objective based on a PEtabSimulator.""" + + def __init__( + self, + petab_problem: petab.Problem, + simulator: Simulator, + ): + self.petab_problem = petab_problem + self.simulator = simulator + + def create_objective(self, **kwargs): + """Create a PEtabSimulatorObjective.""" + return PetabSimulatorObjective(self.simulator) + + +class RoadRunnerObjectiveCreator(ObjectiveCreator): + """ObjectiveCreator for creating an objective for a RoadRunner model.""" + + def __init__( + self, + petab_problem: petab.Problem, + rr: roadrunner.RoadRunner | None = None, + ): + self.petab_problem = petab_problem + if rr is None: + if roadrunner is None: + raise ImportError( + "The `roadrunner` package is required for this objective " + "function." + ) + rr = roadrunner.RoadRunner() + self.rr = rr + + def _check_noise_formulae( + self, + edatas: list[ExpData] | None = None, + parameter_mapping: list[ParMappingDictQuadruple] | None = None, + ): + """Check if the noise formulae are valid. + + Currently, only static values or singular parameters are supported. + Complex formulae are not supported. + """ + # check that parameter mapping is available + if parameter_mapping is None: + parameter_mapping = self.create_parameter_mapping() + # check that edatas are available + if edatas is None: + edatas = self.create_edatas() + # save formulae that need to be changed + to_change = [] + # check that noise formulae are valid + for i_edata, (edata, par_map) in enumerate( + zip(edatas, parameter_mapping) + ): + for j_formula, noise_formula in enumerate(edata.noise_formulae): + # constant values are allowed + if isinstance(noise_formula, numbers.Number): + continue + # single parameters are allowed + if noise_formula in par_map[1].keys(): + continue + # extract the observable name via regex pattern + pattern = r"noiseParameter1_(.*?)($|\s)" + observable_name = re.search(pattern, noise_formula).group(1) + to_change.append((i_edata, j_formula, observable_name)) + # change formulae + formulae_changed = [] + for i_edata, j_formula, obs_name in to_change: + # assign new parameter, formula in RR and parameter into mapping + original_formula = edatas[i_edata].noise_formulae[j_formula] + edatas[i_edata].noise_formulae[ + j_formula + ] = f"noiseFormula_{obs_name}" + # different conditions will have the same noise formula + if (obs_name, original_formula) not in formulae_changed: + self.rr.addParameter(f"noiseFormula_{obs_name}", 0.0, False) + self.rr.addAssignmentRule( + f"noiseFormula_{obs_name}", + original_formula, + forceRegenerate=False, + ) + self.rr.regenerateModel() + formulae_changed.append((obs_name, original_formula)) + + def _write_observables_to_model(self): + """Write observables of petab problem to the model.""" + # add all observables as species + for obs_id in self.petab_problem.observable_df.index: + self.rr.addParameter(obs_id, 0.0, False) + # extract all parameters from observable formulas + parameters = petab.get_output_parameters( + self.petab_problem.observable_df, + self.petab_problem.model, + noise=True, + observables=True, + ) + # add all parameters to the model + for param_id in parameters: + self.rr.addParameter(param_id, 0.0, False) + formulae = self.petab_problem.observable_df[ + OBSERVABLE_FORMULA + ].to_dict() + + # add all observable formulas as assignment rules + for obs_id, formula in formulae.items(): + self.rr.addAssignmentRule(obs_id, formula, forceRegenerate=False) + + # regenerate model to apply changes + self.rr.regenerateModel() + + def create_edatas(self) -> list[ExpData]: + """Create a List of :class:`ExpData` objects from the PEtab problem.""" + # Create Dataframes per condition + return ExpData.from_petab_problem(self.petab_problem) + + def fill_model(self): + """Fill the RoadRunner model inplace from the PEtab problem. + + Parameters + ---------- + return_model: + Flag indicating if the model should be returned. + """ + if not isinstance(self.petab_problem.model, SbmlModel): + raise ValueError( + "The model is not an SBML model. Using " + "RoadRunner as simulator requires an SBML model." + ) # TODO: add Pysb support + if self.petab_problem.model.sbml_document: + sbml_document = self.petab_problem.model.sbml_document + elif self.petab_problem.model.sbml_model: + sbml_document = ( + self.petab_problem.model.sbml_model.getSBMLDocument() + ) + else: + raise ValueError("No SBML model found.") + sbml_writer = libsbml.SBMLWriter() + sbml_string = sbml_writer.writeSBMLToString(sbml_document) + self.rr.load(sbml_string) + self._write_observables_to_model() + + def create_parameter_mapping(self): + """Create a parameter mapping from the PEtab problem.""" + simulation_conditions = ( + self.petab_problem.get_simulation_conditions_from_measurement_df() + ) + mapping = petab.get_optimization_to_simulation_parameter_mapping( + condition_df=self.petab_problem.condition_df, + measurement_df=self.petab_problem.measurement_df, + parameter_df=self.petab_problem.parameter_df, + observable_df=self.petab_problem.observable_df, + model=self.petab_problem.model, + ) + # check whether any species in the condition table are assigned + species = self.rr.model.getFloatingSpeciesIds() + # overrides in parameter table are handled already + overrides = [ + specie + for specie in species + if specie in self.petab_problem.condition_df.columns + ] + if not overrides: + return mapping + for (_, condition), mapping_per_condition in zip( + simulation_conditions.iterrows(), mapping + ): + for override in overrides: + preeq_id = condition.get(PREEQUILIBRATION_CONDITION_ID) + sim_id = condition.get(SIMULATION_CONDITION_ID) + if preeq_id: + parameter_id_or_value = ( + self.petab_problem.condition_df.loc[preeq_id, override] + ) + mapping_per_condition[0][override] = parameter_id_or_value + if isinstance(parameter_id_or_value, str): + mapping_per_condition[2][ + override + ] = self.petab_problem.parameter_df.loc[ + parameter_id_or_value, petab.PARAMETER_SCALE + ] + elif isinstance(parameter_id_or_value, numbers.Number): + mapping_per_condition[2][override] = LIN + else: + raise ValueError( + "The parameter value in the condition table " + "is not a number or a parameter ID." + ) + if sim_id: + parameter_id_or_value = ( + self.petab_problem.condition_df.loc[sim_id, override] + ) + mapping_per_condition[1][override] = parameter_id_or_value + if isinstance(parameter_id_or_value, str): + mapping_per_condition[3][ + override + ] = self.petab_problem.parameter_df.loc[ + parameter_id_or_value, petab.PARAMETER_SCALE + ] + elif isinstance(parameter_id_or_value, numbers.Number): + mapping_per_condition[3][override] = LIN + else: + raise ValueError( + "The parameter value in the condition table " + "is not a number or a parameter ID." + ) + return mapping + + def create_objective( + self, + rr: roadrunner.RoadRunner | None = None, + edatas: ExpData | None = None, + ) -> RoadRunnerObjective: + """Create a :class:`pypesto.objective.RoadRunnerObjective`. + + Parameters + ---------- + rr: + RoadRunner instance. + edatas: + ExpData object. + """ + roadrunner_instance = rr + if roadrunner_instance is None: + roadrunner_instance = self.rr + self.fill_model() + if edatas is None: + edatas = self.create_edatas() + + parameter_mapping = self.create_parameter_mapping() + + # get x_names + x_names = self.petab_problem.get_x_ids() + + calculator = RoadRunnerCalculator() + + # run the check for noise formulae + self._check_noise_formulae(edatas, parameter_mapping) + + return RoadRunnerObjective( + rr=roadrunner_instance, + edatas=edatas, + parameter_mapping=parameter_mapping, + petab_problem=self.petab_problem, + calculator=calculator, + x_names=x_names, + x_ids=x_names, + ) diff --git a/pypesto/petab/util.py b/pypesto/petab/util.py new file mode 100644 index 000000000..81b3590c5 --- /dev/null +++ b/pypesto/petab/util.py @@ -0,0 +1,171 @@ +from functools import partial + +import numpy as np + +try: + import petab.v1 as petab + from petab.v1.C import ( + ESTIMATE, + NOISE_PARAMETERS, + OBSERVABLE_ID, + ) +except ImportError: + petab = None + +from ..C import ( + CENSORED, + CENSORING_TYPES, + MEASUREMENT_TYPE, + ORDINAL, + PARAMETER_TYPE, + RELATIVE, + SEMIQUANTITATIVE, + InnerParameterType, +) +from ..problem import Problem +from ..startpoint import CheckedStartpoints + + +def get_petab_non_quantitative_data_types( + petab_problem: petab.Problem, +) -> set[str]: + """ + Get the data types from the PEtab problem. + + Parameters + ---------- + petab_problem: + The PEtab problem. + + Returns + ------- + data_types: + A list of the data types. + """ + non_quantitative_data_types = set() + caught_observables = set() + # For ordinal, censored and semiquantitative data, search + # for the corresponding data types in the measurement table + meas_df = petab_problem.measurement_df + if MEASUREMENT_TYPE in meas_df.columns: + petab_data_types = meas_df[MEASUREMENT_TYPE].unique() + for data_type in [ORDINAL, SEMIQUANTITATIVE] + CENSORING_TYPES: + if data_type in petab_data_types: + non_quantitative_data_types.add( + CENSORED if data_type in CENSORING_TYPES else data_type + ) + caught_observables.update( + set( + meas_df[meas_df[MEASUREMENT_TYPE] == data_type][ + OBSERVABLE_ID + ] + ) + ) + + # For relative data, search for parameters to estimate with + # a scaling/offset/sigma parameter type + if PARAMETER_TYPE in petab_problem.parameter_df.columns: + # get the df with non-nan parameter types + par_df = petab_problem.parameter_df[ + petab_problem.parameter_df[PARAMETER_TYPE].notna() + ] + for par_id, row in par_df.iterrows(): + if not row[ESTIMATE]: + continue + if row[PARAMETER_TYPE] in [ + InnerParameterType.SCALING, + InnerParameterType.OFFSET, + ]: + non_quantitative_data_types.add(RELATIVE) + + # For sigma parameters, we need to check if they belong + # to an observable with a non-quantitative data type + elif row[PARAMETER_TYPE] == InnerParameterType.SIGMA: + corresponding_observables = set( + meas_df[meas_df[NOISE_PARAMETERS] == par_id][OBSERVABLE_ID] + ) + if not (corresponding_observables & caught_observables): + non_quantitative_data_types.add(RELATIVE) + + # TODO this can be made much shorter if the relative measurements + # are also specified in the measurement table, but that would require + # changing the PEtab format of a lot of benchmark models. + + if len(non_quantitative_data_types) == 0: + return None + return non_quantitative_data_types + + +class PetabStartpoints(CheckedStartpoints): + """Startpoint method for PEtab problems. + + Samples optimization startpoints from the distributions defined in the + provided PEtab problem. The PEtab-problem is copied. + """ + + def __init__(self, petab_problem: petab.Problem, **kwargs): + super().__init__(**kwargs) + self._parameter_df = petab_problem.parameter_df.copy() + self._priors: list[tuple] | None = None + self._free_ids: list[str] | None = None + + def _setup( + self, + pypesto_problem: Problem, + ): + """Update priors if necessary. + + Check if ``problem.x_free_indices`` changed since last call, and if so, + get the corresponding priors from PEtab. + """ + current_free_ids = np.asarray(pypesto_problem.x_names)[ + pypesto_problem.x_free_indices + ] + + if ( + self._priors is not None + and len(current_free_ids) == len(self._free_ids) + and np.all(current_free_ids == self._free_ids) + ): + # no need to update + return + + # update priors + self._free_ids = current_free_ids + id_to_prior = dict( + zip( + self._parameter_df.index[self._parameter_df[ESTIMATE] == 1], + petab.parameters.get_priors_from_df( + self._parameter_df, mode=petab.INITIALIZATION + ), + ) + ) + + self._priors = list(map(id_to_prior.__getitem__, current_free_ids)) + + def __call__( + self, + n_starts: int, + problem: Problem, + ) -> np.ndarray: + """Call the startpoint method.""" + # Update the list of priors if needed + self._setup(pypesto_problem=problem) + + return super().__call__(n_starts, problem) + + def sample( + self, + n_starts: int, + lb: np.ndarray, + ub: np.ndarray, + ) -> np.ndarray: + """Actual startpoint sampling. + + Must only be called through `self.__call__` to ensure that the list of priors + matches the currently free parameters in the :class:`pypesto.Problem`. + """ + sampler = partial(petab.sample_from_prior, n_starts=n_starts) + startpoints = list(map(sampler, self._priors)) + + return np.array(startpoints).T diff --git a/pypesto/select/misc.py b/pypesto/select/misc.py index f5af4bf2b..99fb5fae5 100644 --- a/pypesto/select/misc.py +++ b/pypesto/select/misc.py @@ -62,10 +62,11 @@ def model_to_pypesto_problem( hierarchical=hierarchical, ) if objective is None: - amici_model = importer.create_model( + factory = importer.create_objective_creator() + amici_model = factory.create_model( non_estimated_parameters_as_constants=False, ) - objective = importer.create_objective( + objective = factory.create_objective( model=amici_model, ) pypesto_problem = importer.create_problem( diff --git a/test/base/test_engine.py b/test/base/test_engine.py index f86fc000f..6db8e79c3 100644 --- a/test/base/test_engine.py +++ b/test/base/test_engine.py @@ -64,8 +64,7 @@ def _test_petab(engine): "Boehm_JProteomeRes2014.yaml", ) ) - objective = petab_importer.create_objective() - problem = petab_importer.create_problem(objective) + problem = petab_importer.create_problem() optimizer = pypesto.optimize.ScipyOptimizer(options={"maxiter": 10}) result = pypesto.optimize.minimize( problem=problem, @@ -86,7 +85,8 @@ def test_deepcopy_objective(): "Boehm_JProteomeRes2014.yaml", ) ) - objective = petab_importer.create_objective() + factory = petab_importer.create_objective_creator() + objective = factory.create_objective() objective.amici_solver.setSensitivityMethod( amici.SensitivityMethod_adjoint @@ -123,7 +123,8 @@ def test_pickle_objective(): "Boehm_JProteomeRes2014.yaml", ) ) - objective = petab_importer.create_objective() + factory = petab_importer.create_objective_creator() + objective = factory.create_objective() objective.amici_solver.setSensitivityMethod( amici.SensitivityMethod_adjoint diff --git a/test/base/test_roadrunner.py b/test/base/test_roadrunner.py index 42dd4ec68..50a26dd28 100644 --- a/test/base/test_roadrunner.py +++ b/test/base/test_roadrunner.py @@ -10,7 +10,8 @@ import pytest import pypesto -import pypesto.objective.roadrunner as objective_rr +import pypesto.petab +from pypesto.objective.roadrunner import simulation_to_measurement_df logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) @@ -60,19 +61,21 @@ def _execute_case_rr(case, model_type, version): # import petab problem yaml_file = case_dir / petabtests.problem_yaml_name(case) - importer = objective_rr.PetabImporterRR.from_yaml(yaml_file) + importer = pypesto.petab.PetabImporter.from_yaml( + yaml_file, simulator_type="roadrunner" + ) petab_problem = importer.petab_problem - obj = importer.create_objective() + obj = importer.create_problem().objective # the scaled parameters - problem_parameters = importer.petab_problem.x_nominal_scaled + problem_parameters = importer.petab_problem.x_nominal_free_scaled # simulate ret = obj(problem_parameters, sensi_orders=(0,), return_dict=True) # extract results llh = -ret["fval"] - simulation_df = objective_rr.simulation_to_measurement_df( + simulation_df = simulation_to_measurement_df( ret["simulation_results"], petab_problem.measurement_df ) @@ -117,7 +120,9 @@ def test_deepcopy(): os.path.join(models.MODELS_DIR, model_name, model_name + ".yaml") ) petab_problem.model_name = model_name - importer = objective_rr.PetabImporterRR(petab_problem) + importer = pypesto.petab.PetabImporter( + petab_problem, simulator_type="roadrunner" + ) problem_parameters = petab_problem.x_nominal_free_scaled problem = importer.create_problem() @@ -147,7 +152,9 @@ def test_multiprocessing(): os.path.join(models.MODELS_DIR, model_name, model_name + ".yaml") ) petab_problem.model_name = model_name - importer = objective_rr.PetabImporterRR(petab_problem) + importer = pypesto.petab.PetabImporter( + petab_problem, simulator_type="roadrunner" + ) problem = importer.create_problem() # start 30 times from the same point diff --git a/test/hierarchical/test_censored.py b/test/hierarchical/test_censored.py index bdb1ae6eb..d204a81b2 100644 --- a/test/hierarchical/test_censored.py +++ b/test/hierarchical/test_censored.py @@ -39,10 +39,7 @@ def test_optimization(): ) importer = pypesto.petab.PetabImporter(petab_problem, hierarchical=True) - importer.create_model() - - objective = importer.create_objective() - problem = importer.create_problem(objective) + problem = importer.create_problem() result = pypesto.optimize.minimize( problem=problem, n_starts=1, optimizer=optimizer @@ -63,8 +60,7 @@ def test_ordinal_calculator_and_objective(): petab_problem = petab.Problem.from_yaml(example_censored_yaml) importer = pypesto.petab.PetabImporter(petab_problem, hierarchical=True) - objective = importer.create_objective() - problem = importer.create_problem(objective) + problem = importer.create_problem() def calculate(problem, x_dct): return problem.objective.calculator( diff --git a/test/hierarchical/test_hierarchical.py b/test/hierarchical/test_hierarchical.py index 3c749cc60..293467289 100644 --- a/test/hierarchical/test_hierarchical.py +++ b/test/hierarchical/test_hierarchical.py @@ -58,8 +58,7 @@ def test_hierarchical_optimization_pipeline(): problems = {} for flag in flags: importer = PetabImporter(petab_problems[flag], hierarchical=flag) - objective = importer.create_objective() - problem = importer.create_problem(objective) + problem = importer.create_problem() problem.objective.amici_solver.setSensitivityMethod( amici.SensitivityMethod_adjoint ) diff --git a/test/hierarchical/test_ordinal.py b/test/hierarchical/test_ordinal.py index 79d775ecb..6df4e2efc 100644 --- a/test/hierarchical/test_ordinal.py +++ b/test/hierarchical/test_ordinal.py @@ -102,9 +102,10 @@ def _create_problem( ) -> pypesto.Problem: """Creates the ordinal pyPESTO problem with given options.""" importer = pypesto.petab.PetabImporter(petab_problem, hierarchical=True) - importer.create_model() + factory = importer.create_objective_creator() + factory.create_model() - objective = importer.create_objective( + objective = factory.create_objective( inner_options=option, ) problem = importer.create_problem(objective) @@ -127,7 +128,8 @@ def test_ordinal_calculator_and_objective(): importer = pypesto.petab.PetabImporter( petab_problem, hierarchical=True ) - objective = importer.create_objective( + factory = importer.create_objective_creator() + objective = factory.create_objective( inner_options=options, ) problem = importer.create_problem(objective) diff --git a/test/hierarchical/test_spline.py b/test/hierarchical/test_spline.py index 424d5fa3f..32d99c54a 100644 --- a/test/hierarchical/test_spline.py +++ b/test/hierarchical/test_spline.py @@ -95,9 +95,10 @@ def _create_problem( petab_problem, hierarchical=True, ) - importer.create_model() + factory = importer.create_objective_creator() + factory.create_model() - objective = importer.create_objective( + objective = factory.create_objective( inner_options=option, ) problem = importer.create_problem(objective) @@ -125,7 +126,8 @@ def test_spline_calculator_and_objective(): petab_problem, hierarchical=True, ) - objective = importer.create_objective( + factory = importer.create_objective_creator() + objective = factory.create_objective( inner_options=option, ) problem = importer.create_problem(objective) @@ -476,8 +478,7 @@ def test_save_and_load_spline_knots(): petab_problem, hierarchical=True, ) - objective = importer.create_objective() - problem = importer.create_problem(objective) + problem = importer.create_problem() optimizer = pypesto.optimize.ScipyOptimizer( method="L-BFGS-B", diff --git a/test/petab/test_amici_objective.py b/test/petab/test_amici_objective.py index 00c399f6e..274962fc1 100644 --- a/test/petab/test_amici_objective.py +++ b/test/petab/test_amici_objective.py @@ -57,7 +57,7 @@ def test_error_leastsquares_with_ssigma(): ) petab_problem.model_name = model_name importer = pypesto.petab.PetabImporter(petab_problem) - obj = importer.create_objective() + obj = importer.create_objective_creator().create_objective() problem = importer.create_problem( obj, startpoint_kwargs={"check_fval": True, "check_grad": True} ) diff --git a/test/petab/test_amici_predictor.py b/test/petab/test_amici_predictor.py index b610e2b97..2d23620a4 100644 --- a/test/petab/test_amici_predictor.py +++ b/test/petab/test_amici_predictor.py @@ -365,7 +365,8 @@ def test_petab_prediction(): petab_problem.model_name = f"{model_name}_petab" importer = pypesto.petab.PetabImporter(petab_problem) # create prediction via PEtab - predictor = importer.create_predictor() + factory = importer.create_objective_creator() + predictor = factory.create_predictor() # ===== run test for prediction =========================================== p = predictor( @@ -373,8 +374,8 @@ def test_petab_prediction(): ) check_outputs(p, out=(0, 1), n_cond=1, n_timepoints=10, n_obs=1, n_par=2) # check outputs for simulation and measurement dataframes - importer.prediction_to_petab_measurement_df(p, predictor) - importer.prediction_to_petab_simulation_df(p, predictor) + factory.prediction_to_petab_measurement_df(p, predictor) + factory.prediction_to_petab_simulation_df(p, predictor) # ===== run test for ensemble prediction ================================== # read a set of ensemble vectors from the csv diff --git a/test/petab/test_petabSimulator.py b/test/petab/test_petabSimulator.py new file mode 100644 index 000000000..65db1aa83 --- /dev/null +++ b/test/petab/test_petabSimulator.py @@ -0,0 +1,108 @@ +"""Run PEtab tests for PetabSimulatorObjective.""" + +import logging + +import basico.petab +import petab.v1 as petab +import petabtests +import pytest + +from pypesto.objective.petab import PetabSimulatorObjective + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +@pytest.mark.parametrize( + "case, model_type, version", + [ + (case, "sbml", "v1.0.0") + for case in petabtests.get_cases(format_="sbml", version="v1.0.0") + ], +) +def test_petab_case(case, model_type, version): + """Wrapper for _execute_case for handling test outcomes""" + try: + _execute_case(case, model_type, version) + except Exception as e: + if isinstance( + e, NotImplementedError + ) or "Timepoint-specific parameter overrides" in str(e): + logger.info( + f"Case {case} expectedly failed. Required functionality is " + f"not implemented: {e}" + ) + pytest.skip(str(e)) + else: + raise e + + +def _execute_case(case, model_type, version): + """Run a single PEtab test suite case""" + case = petabtests.test_id_str(case) + logger.info(f"Case {case}") + if case in ["0006", "0009", "0010", "0017", "0018", "0019"]: + pytest.skip("Basico does not support these functionalities.") + + # case folder + case_dir = petabtests.get_case_dir(case, model_type, version) + + # load solution + solution = petabtests.load_solution( + case, format=model_type, version=version + ) + gt_llh = solution[petabtests.LLH] + gt_simulation_dfs = solution[petabtests.SIMULATION_DFS] + tol_llh = solution[petabtests.TOL_LLH] + tol_simulations = solution[petabtests.TOL_SIMULATIONS] + + # import petab problem + yaml_file = case_dir / petabtests.problem_yaml_name(case) + + # import and create objective function + petab_problem = petab.Problem.from_yaml(yaml_file) + simulator = basico.petab.PetabSimulator(petab_problem) + obj = PetabSimulatorObjective(simulator) + + # the scaled parameters + problem_parameters = petab_problem.x_nominal_scaled + + # simulate + ret = obj(problem_parameters, sensi_orders=(0,), return_dict=True) + + # extract results + llh = -ret["fval"] + simulation_df = ret["simulations"] + + simulation_df = simulation_df.rename( + columns={petab.SIMULATION: petab.MEASUREMENT} + ) + petab.check_measurement_df(simulation_df, petab_problem.observable_df) + simulation_df = simulation_df.rename( + columns={petab.MEASUREMENT: petab.SIMULATION} + ) + simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int) + + # check if matches + llhs_match = petabtests.evaluate_llh(llh, gt_llh, tol_llh) + simulations_match = petabtests.evaluate_simulations( + [simulation_df], gt_simulation_dfs, tol_simulations + ) + + # log matches + logger.log( + logging.INFO if simulations_match else logging.ERROR, + f"LLH: simulated: {llh}, expected: {gt_llh}, match = {llhs_match}", + ) + logger.log( + logging.INFO if simulations_match else logging.ERROR, + f"Simulations: match = {simulations_match}", + ) + + if not all([llhs_match, simulations_match]): + logger.error(f"Case {version}/{model_type}/{case} failed.") + raise AssertionError( + f"Case {case}: Test results do not match expectations" + ) + + logger.info(f"Case {version}/{model_type}/{case} passed.") diff --git a/test/petab/test_petab_import.py b/test/petab/test_petab_import.py index 5b9f94a2e..aa4eb0067 100644 --- a/test/petab/test_petab_import.py +++ b/test/petab/test_petab_import.py @@ -51,7 +51,9 @@ def test_1_compile(self): self.petab_importers.append(importer) # check model - model = importer.create_model(force_compile=False) + model = importer.create_objective_creator().create_model( + force_compile=False + ) # observable ids model_obs_ids = list(model.getObservableIds()) @@ -62,12 +64,13 @@ def test_1_compile(self): def test_2_simulate(self): for petab_importer in self.petab_importers: - obj = petab_importer.create_objective() - edatas = petab_importer.create_edatas() + factory = petab_importer.create_objective_creator() + obj = factory.create_objective() + edatas = factory.create_edatas() self.obj_edatas.append((obj, edatas)) # run function - x_nominal = petab_importer.petab_problem.x_nominal_scaled + x_nominal = factory.petab_problem.x_nominal_scaled ret = obj(x_nominal) self.assertTrue(np.isfinite(ret)) @@ -114,12 +117,12 @@ def test_4_optimize(self): def test_check_gradients(self): """Test objective FD-gradient check function.""" # Check gradients of simple model (should always be a true positive) - model_name = "Bachmann_MSB2011" - petab_problem = pypesto.petab.PetabImporter.from_yaml( + model_name = "Boehm_JProteomeRes2014" + importer = pypesto.petab.PetabImporter.from_yaml( os.path.join(models.MODELS_DIR, model_name, model_name + ".yaml") ) - objective = petab_problem.create_objective() + objective = importer.create_problem().objective objective.amici_solver.setSensitivityMethod( amici.SensitivityMethod_forward ) @@ -127,7 +130,9 @@ def test_check_gradients(self): objective.amici_solver.setRelativeTolerance(1e-12) self.assertFalse( - petab_problem.check_gradients(multi_eps=[1e-3, 1e-4, 1e-5]) + objective.check_gradients_match_finite_differences( + multi_eps=[1e-3, 1e-4, 1e-5] + ) ) @@ -167,16 +172,16 @@ def test_max_sensi_order(): """Test that the AMICI objective created via PEtab exposes derivatives correctly.""" model_name = "Boehm_JProteomeRes2014" - problem = pypesto.petab.PetabImporter.from_yaml( + importer = pypesto.petab.PetabImporter.from_yaml( os.path.join(models.MODELS_DIR, model_name, model_name + ".yaml") ) # define test parameter - par = problem.petab_problem.x_nominal_scaled + par = importer.petab_problem.x_nominal_scaled npar = len(par) # auto-computed max_sensi_order and fim_for_hess - objective = problem.create_objective() + objective = importer.create_objective_creator().create_objective() hess = objective(par, sensi_orders=(2,)) assert hess.shape == (npar, npar) assert (hess != 0).any() @@ -190,18 +195,24 @@ def test_max_sensi_order(): ) # fix max_sensi_order to 1 - objective = problem.create_objective(max_sensi_order=1) + objective = importer.create_objective_creator().create_objective( + max_sensi_order=1 + ) objective(par, sensi_orders=(1,)) with pytest.raises(ValueError): objective(par, sensi_orders=(2,)) # do not use FIM - objective = problem.create_objective(fim_for_hess=False) + objective = importer.create_objective_creator().create_objective( + fim_for_hess=False + ) with pytest.raises(ValueError): objective(par, sensi_orders=(2,)) # only allow computing function values - objective = problem.create_objective(max_sensi_order=0) + objective = importer.create_objective_creator().create_objective( + max_sensi_order=0 + ) objective(par) with pytest.raises(ValueError): objective(par, sensi_orders=(1,)) diff --git a/test/petab/test_petab_suite.py b/test/petab/test_petab_suite.py index 87219c6d8..820adf686 100644 --- a/test/petab/test_petab_suite.py +++ b/test/petab/test_petab_suite.py @@ -82,11 +82,12 @@ def _execute_case(case, model_type, version): yaml_file, output_folder=output_folder ) petab_problem = importer.petab_problem - model = importer.create_model(generate_sensitivity_code=False) - obj = importer.create_objective(model=model) + factory = importer.create_objective_creator() + model = factory.create_model(generate_sensitivity_code=False) + obj = factory.create_objective(model=model) # the scaled parameters - problem_parameters = importer.petab_problem.x_nominal_scaled + problem_parameters = factory.petab_problem.x_nominal_scaled # simulate ret = obj(problem_parameters, sensi_orders=(0,), return_dict=True) diff --git a/tox.ini b/tox.ini index 027aa27e6..801f8ed08 100644 --- a/tox.ini +++ b/tox.ini @@ -79,10 +79,10 @@ extras = test,amici,petab,pyswarm,roadrunner deps = git+https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git@master\#subdirectory=src/python git+https://github.com/AMICI-dev/amici.git@develop\#egg=amici&subdirectory=python/sdist - commands = python3 -m pip install git+https://github.com/PEtab-dev/petab_test_suite@main python3 -m pip install git+https://github.com/FFroehlich/pysb@fix_pattern_matching + python3 -m pip install -U copasi-basico[petab] python3 -m pip install -U sympy pytest --cov=pypesto --cov-report=xml --cov-append \ test/petab From f92fc8bf234c38f7ca57895b0bd5b27d5b8ed0fb Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:40:47 +0200 Subject: [PATCH 19/37] Pypesto features in Readme.md updated (#1474) * Updated features. Replaced example links from github to readthedocs. * Updated Samplng feature. * Added paper figure to docs and feature * replaced pdf with png * Apply suggestions from code review Co-authored-by: Daniel Weindl * Highlighted keywords * Apply suggestions from code review Co-authored-by: Daniel Weindl Co-authored-by: Doresic <85789271+Doresic@users.noreply.github.com> * Update README.md Co-authored-by: Daniel Weindl --------- Co-authored-by: Daniel Weindl Co-authored-by: Doresic <85789271+Doresic@users.noreply.github.com> --- README.md | 58 +++++++++++++++++++++++------------- doc/gfx/concept_pypesto.png | Bin 0 -> 170061 bytes 2 files changed, 37 insertions(+), 21 deletions(-) create mode 100644 doc/gfx/concept_pypesto.png diff --git a/README.md b/README.md index c75c53c08..fc81f21a4 100644 --- a/README.md +++ b/README.md @@ -13,27 +13,43 @@ parameter estimation. ## Feature overview +![](https://raw.githubusercontent.com/ICB-DCM/pyPESTO/main/doc/gfx/concept_pypesto.png) +*Feature overview of pyPESTO. Figure taken from the [Bioinformatics publication](https://doi.org/10.1093/bioinformatics/btad711).* + pyPESTO features include: -* Multi-start local optimization -* Profile computation -* Result visualization -* Interface to [AMICI](https://github.com/AMICI-dev/AMICI/) for efficient - simulation and sensitivity analysis of ordinary differential equation (ODE) - models - ([example](https://github.com/ICB-DCM/pyPESTO/blob/main/doc/example/amici.ipynb)) -* Parameter estimation pipeline for systems biology problems specified in - [SBML](http://sbml.org/) and [PEtab](https://github.com/PEtab-dev/PEtab) - ([example](https://github.com/ICB-DCM/pyPESTO/blob/master/doc/example/petab_import.ipynb)) -* Parameter estimation with relative (scaled and offset) data as described in - [Schmiester et al. (2020)](https://doi.org/10.1093/bioinformatics/btz581). - ([example](https://github.com/ICB-DCM/pyPESTO/blob/master/doc/example/relative_data.ipynb)) -* Parameter estimation with ordinal data as described in - [Schmiester et al. (2020)](https://doi.org/10.1007/s00285-020-01522-w) and - [Schmiester et al. (2021)](https://doi.org/10.1093/bioinformatics/btab512). - ([example](https://github.com/ICB-DCM/pyPESTO/blob/master/doc/example/ordinal_data.ipynb)) -* Parameter estimation with censored data. ([example](https://github.com/ICB-DCM/pyPESTO/blob/master/doc/example/censored_data.ipynb)) -* Parameter estimation with nonlinear-monotone data. ([example](https://github.com/ICB-DCM/pyPESTO/blob/master/doc/example/semiquantitative_data.ipynb)) +* Parameter estimation interfacing **multiple optimization algorithms** including + multi-start local and global optimization. ([example](https://pypesto.readthedocs.io/en/latest/example/getting_started.html), + [overview of optimizers](https://pypesto.readthedocs.io/en/latest/api/pypesto.optimize.html)) +* Interface to **multiple simulators** including + * [AMICI](https://github.com/AMICI-dev/AMICI/) for efficient simulation and + sensitivity analysis of ordinary differential equation (ODE) models. ([example](https://pypesto.readthedocs.io/en/latest/example/amici.html)) + * [RoadRunner](https://libroadrunner.org/) for simulation of SBML models. ([example](https://pypesto.readthedocs.io/en/latest/example/roadrunner.html)) + * [Jax](https://jax.readthedocs.io/en/latest/quickstart.html) and + [Julia](https://julialang.org) for automatic differentiation. +* **Uncertainty quantification** using various methods: + * **Profile likelihoods**. + * **Sampling** using Markov chain Monte Carlo (MCMC), parallel tempering, and + interfacing other samplers including [emcee](https://emcee.readthedocs.io/en/stable/), + [pymc](https://www.pymc.io/welcome.html) and + [dynesty](https://dynesty.readthedocs.io/en/stable/). + ([example](https://pypesto.readthedocs.io/en/latest/example/sampler_study.html)) + * **Variational inference** +* **Complete** parameter estimation **pipeline** for systems biology problems specified in + [SBML](http://sbml.org/) and [PEtab](https://github.com/PEtab-dev/PEtab). + ([example](https://pypesto.readthedocs.io/en/latest/example/petab_import.html)) +* Parameter estimation pipelines for **different modes of data**: + * **Relative (scaled and offset) data** as described in + [Schmiester et al. (2020)](https://doi.org/10.1093/bioinformatics/btz581). + ([example](https://pypesto.readthedocs.io/en/latest/example/relative_data.html)) + * **Ordinal data** as described in + [Schmiester et al. (2020)](https://doi.org/10.1007/s00285-020-01522-w) and + [Schmiester et al. (2021)](https://doi.org/10.1093/bioinformatics/btab512). + ([example](https://pypesto.readthedocs.io/en/latest/example/ordinal_data.html)) + * **Censored data**. ([example](https://pypesto.readthedocs.io/en/latest/example/censored_data.html)) + * **Semiquantitative data** as described in [Doresic et al. (2024)](https://doi.org/10.1093/bioinformatics/btae210). ([example](https://pypesto.readthedocs.io/en/latest/example/semiquantitative_data.html)) +* **Model selection**. ([example](https://pypesto.readthedocs.io/en/latest/example/model_selection.html)) +* Various **visualization methods** to analyze parameter estimation results. ## Quick install @@ -75,10 +91,10 @@ When using pyPESTO in your project, please cite Bioinformatics, 2023, btad711, [doi:10.1093/bioinformatics/btad711](https://doi.org/10.1093/bioinformatics/btad711) When presenting work that employs pyPESTO, feel free to use one of the icons in -[doc/logo/](https://github.com/ICB-DCM/pyPESTO/tree/main/doc/logo): +[doc/logo/](doc/logo):

- AMICI Logo + pyPESTO Logo

There is a list of [publications using pyPESTO](https://pypesto.readthedocs.io/en/latest/references.html). diff --git a/doc/gfx/concept_pypesto.png b/doc/gfx/concept_pypesto.png new file mode 100644 index 0000000000000000000000000000000000000000..c202a5b05554f72daa75fea3c1d3986fe27ceaa4 GIT binary patch literal 170061 zcmXtfbyVE^^EI%rxVvj{YjJldP~06_DDLho?ox`ow73*^cXyXX7mB;XOP}BS{Uhfj z*|W(fH#2u;?o1+-6=hJ7h>)P5pipH$OR7Ra!A(LzLCYZk-_MM$74^Iy&{|4JC|gU& zNZ8xhJE=LCd@~a>wf}0SDkJ`pkB=7$iaz0+v9aoBX8Iu$EMw!LaV7>NCwJBG@Mu+I z|L(3~ita8{8dU7WC}a`~A3sVm4=>TYAW9_1t`a{(`{OGcw zjW_T)#AtvDN3F+)g(X$HOjN}c?O0^5tE-!$nMQ{dleLmU40oGgD`p<~GD)xE02``a z8fb108aWnv7@ ziNsLjFg3=QGB;l0p=+)yMzKPnPW%V}Pzpow-N>0veX!hk2aeZFTlTwx0uJ^~bU zm?aeK`yTZBM)bZxLBZt0{O=6hWG>);`_OX#e)Iu-Qip;Pg_4yNQ}=*AHh}XWHlHEm z7#Xt}bX;;wO-lzxGW;HM*t7ONG1}k2J@s0=MbeT>#7X3~kifO8sltV&a>cuXp(cw14uYIt(kP)~1vL~@+#}zGn%{5qZ+@Wx_4OvG)2{36b-5& zLQ*0N4pf8Bqq*ayXOhdEN7EmfUB&O&`9Ybdghz8jjD$#-2Hx8-Z|A_@YZ(SNM{zRn zP+R9_Scxu{2fZ{unXE=sI558vG<|I!>s<$g-V!10z<&uSuVX&HK0p3ln-ccn9TZWe zDN)ZPEbdc56k<0r(DtQO3JM>yB($20dC&D9o%nQulONoB(x#|GL`|GrqLYftPjTJJ zNyNr?k0)-Ry;}=L>RPrhnF6Q-hTMoSK{&VS38O>*;U=mM2py1Ct~5ffXVMbg0lnm# zktn!a-GqP{)F@p}eUzig0CqJhfdz~|M@QfPG4&fdOB$6Ao?GH}!d2Pjv0DCm`|t#C zC@~=M_NSJj(FaQkx;O6i1mvne+@7^)QkmYzw`7Nmy>^WHQeCWABs1M zCX;Pcc1JLcgR`@;zvb!&6BU=21WhJan&f(FDcURN;CLMD^VNLt(qc3?m0u7= zpYPftecCE>7Y>;T+-A)H(RCafxb65foq|HZN1I%$@vSyaPBQ|<$We})c=aEceO$RRz+m4pyxtBjdC{s_w=`}X6BYKu zM|jF__ogYg3_%c9jiR;Gau9MEh@+qHe6NKquNukogf-r@&%Dck<192dJQ$f1rdjVF zj4fqAU0q%9qH8O-btB}%F-q3_^N37Fnz@q7-v;=%aeU^MU;!JV(!8(`D zBKLPbk=T`z@#Fw0zD<*f%9g_*2YOerR3}onGyn4!@N>jV^;YX$US-X0U<|uw7ecDr=ne)JA>_x$CXHf7 z)37{Nm0xS&@6S>My}iBW;2>1K(paiif_|KpmlB4rFZ4=8FzX-Pu2DEz?-7uR&gSGa00oBfrqnm76;Kl%3glFG%@Vb?gCj6Ug!OeNbtw}n_yJav7%e{7%3 z&Fi>2I3-Ezz8|IS4fM8$VLOXuxt@;g5}m}8X-OU(`R7}xTvL&gL`fF5hF4cg>t9_? zU!gIs?y)gAO}>T=%&x4!D2!X3_uZN%yc1q7#46z6^tR#wI@U&KnuS(uLXwW934i93 zjJt=RUEIqfKB3@QHPPNb!7?Nex42Yv4{@TnG-kf`8Pd-5~7btBAf>A z9hVK7^*l+|26)kRXDDrql#iWx+)B_hRAJWb0ih`|!6*3ipPdDHMT-I-+bbP?c0A6U zQ2!Z{ejc=0BGO+M)JltyU$!32-r_&~2|V1o5R*HppekdO16#*RAv4a@JhaxiOk7i? zi$PoZWC6PCN$V@~DSDEyO@sXcFB3)X_93;7>-L6j3WlsS?{GuS26Rus;MSw&+Ag>Y z#Lz$cQZNr~9(v;_e3mY3S+`7rr?KwxL#;c|6V*`-BT)rDF}a!y(^8a-n2 z4+O7xpv9t`f*L6&r~8=FSXY7$``fLx!uM?NXU^yQ3wK(}Sm|`VYvl#)ekd3FwPi?V zoj9Ol6v(Hy-mJK%`yUg022>2$+YJdI2MDaU6x3;Ih#?bwYNGoPl+pV9WsY<*L6BAl zkW~Wu-VQxtt)43b)h-Yz7jvh~xgCk(xKXY44$Dqj(%?HRXs86|z+cRz-@tXG{cPF^ z-xWCMqmC$PgohZQ+Tji;r^!WXn&;pIBAfz;@oF&!X*gwZ2X2VG5Cs?0W<66_>q{a7`!rd0zto;T^;R?uR_kYQ&0^ubi|OL?@?2 zxpXsIVR+9Xmm9!W${{VluNb0qjQBDkUCw#}0VK9+tpA^ba&aq~QOy%de_;nWR~=X= zN0Y+)y*vi-@V{K6d=h>;NRpev!wcB&bA0DvxW{y>R=VYtDJT}mWRSO9Wbc`en>!W$ z&wrG&Y7xLZKD^!53V>wxq`P<(i(91)DG#ztk@{b5dnLvF7wIriF;dmFBD*}{OLT!n z-@C8NE#uSp2?T`-?cwH({G^0!_!cW`!U_!_L)y5R)b6d(L8_lLFQAi`zgG z!0gtgY_IWs?glNJEo^3@>8)({Qx?1(15P7weoiY$VBh(Fq z0K<3M{sgJB7zkD2)rnSF&_D^n2$B>^7oTlNml;&ZHH*9(h#_E?ZZA>CYje2|QIe*IUc?0+eykUoIL z!g3?4M7MCRY=K)sp`EBFh}3&CQF^S<>&NWB5ZIJUWKnDtyiW_+4A}131~(iw_IB-p z+{S~LdDIHjmLa7dwR*Q%qD-D5Y~AL$*a~guKrNbp*F3*{9nGC=gzVPaKhsw3PQU-% zk8}#E-Qr`cGi-N%aMzLfX)BO6wz>`8cG;T*uWAY9L23cy{>QDp)Yvx?ilt^gK`m!{FbDZd(;D+v|sH4A;%`}EHeT;NgbaB#O8nqt-D9Gx%5hXVTW zjiBG4lP8Vz39qOxQs85VCxsbdC!-Agaa|CiSAQ)0wX#4&fV!Jv*OS`w`Hv)vfu%MAMN`5X#ybu@h6S{t>nuEv~Z*T|xE9#&k#CiF6iH-$&<7w3Z z123Ol7DA2ih<&ewcODzQG5JSIOlIZ(Vmg=~^&la=vQ+l41;{2UuK;A3J0c?etMkh} zb8|KiZMWw?9{&PhK@e_av^H2Uysuvr8_55a-_=aU(oc*=2DLYUs@!M^;3VTi36&mw ziu!s@Fm!)`kB}}nU+IW>zxh}Ee6vz@rpN0Iy^X8oN&w=1|qmAx)>A?U8tJ;n>=$;?q$7IMl z*`rkda+tft07Z$YZ&uTwA8_}`=$lYFFSB1Rwe0zz$o=2o57PUg-4lDbsjL*9+Vh** zLN5y6ZnCPt$xKCE3RkojcbU8Q^!JnUcfE8`T4HFIB`I1A3SCZs88uA!7HTxfn~n}J z%Vmw_w;`v7f7WYI4kFs+qw7QzGWe;Bd++_2Uq$p}2q7Zz*FQRB{4vO8ohxG%;4z^m z7iqwKd+VJ=nk#H`q`Zf2}RL^+NFZ zg>vr&J@24CMaMT$dYrn6cv4v%fvkPz0m61g<)r`5nzG-MN4_dCaB~r|5RxVXF(s|O z$)eCmM^Qy~C9dc5dsPebZZE}o^We2n!_GcGhr4p`;}(XcveH-Cm#s=d_wJ%Hk@`j? zqlqaSsCrcZbl$5^dOeim|G->;nyxQS;Lw*NYqu4S7G-r(9IAQhlwatoxw&<;aJnfc zjTqGrW6RpO{{aa@laejYu}`rLT*eMd)6PiTd$>Y3oE*V$Fj6%f@4D@ok#k#XyMy(3 zf?HA86bCmm|7QdCsKp7DQU#qJ*$ftx6hx%qE7XsgJjy#nzdE09&Er~j-@TJH6{s7D z$?J}Zs^1sGqXz{Uakb99UvCtPTR8a<_`*Xb+$X$e0f*hun4&CVS^!t{Vm7R0pPZ9$*m6Rkn;2S zi>8$$2-E>^bLKAo&H8P9ad8QP-xsu)XDE(2{ZLDXl8NB$bla0x3;$vCv)Rj`%yaMEL$Mk9O;Lhj$oN7y9F^ zTtM&vf->ympO7wTSL~nab6tmRRK=R5_jbdNnO)BQB5G)@aUFVyvx8`%A0;01#gYzv z2Z{O4RI&1x!-yUqQ&8_Wte_NpQDH-+u59&?4}8s5@T?yb-m58n3)CrbU0b!65VBDD zC;LyiY=kaef6vS|U&4y@o`teF`Wz0s_6Gi?CK&Ug))Iiah2~Jr5ptHnootWDv#?Xp zhc^%SQau03qGjwU$``o}z@7MUL7c>#t$Q^iY%X2lL%B?|MS>{%%t_$gUVPpzBBk>mAB(zblBK zrEPRF$u?=OC_0(EN9nEsxPQ2pP&LIuQ6fi6D@oI2S*Xfz$WjCIfm?}na^R(xZk>)# zrvoVj>`dl}8&Os`?3<(ibH*ZQnDmUf?Ob&iNd&<^c`#jBI)Q5qoxr{up( z+$SL)m>O#?xUJ`nEfzpwDYoS3UbU}ZXqjdHD_g-t7LxI4nYM;D`E%S$=-Je+@^yz5 z7hl!F1j*mysV;zJ&#aPAL|EcO$1`J-XG5=0zkMg-ya=mnrLT`BVnB`9?8)H40hFGuu2_h)PPav_nZnB1 z?*+MG&wrkz&yITF3Y_BRHTOK(JA|7J8;d7tMf3FSG(I5ZjPG{V^ZIf1ZqEp) z7Q~pmgZPXeJ~z(=!a!e^pTPhlbr5x9G^v@s+Iclsj8RK0-@oYv4aL`9c86B!8U z+|~%rqt2DDw}9pI-csB5%sYZPqP&O~>;u35Vm?i-ui`eBE9Glke1b2$e7PoR>m>R9 ztTRw7KjsfJ?}%p;hCQAh1FDv{(=}&dS7;Zf200rKrXNXiirQ=nwoAX)lwx&ixtqp1 z8T^aC{FNZhd|t6pXRXgmNm@bnRz#cXo(z|$2$1lXW1^N2(ae!7CmVybU179uzC0pK7 zS}}!dN`W)C#M#f)Uip&3<+ubROIF?**{Ay1m)GmG*g`l(TU!UAiq#X&Q0JbpFKp0x zn08Q%Ax#kk8QWI-1^0$=ROU06MyJ}y*}CsJV|r|WiFke$>R0k{Dq8|m)D$e>$1X)FcRClvxJGJoO*i~5V?qRTqu)+vS9`>n(F0t+S1 ze)4vZ=LdKm&F1K=`cMJWGl=r~a|fk_kK(=n$;pATvLa51dhMLOWFD1X=x1?)m)Y^| z_na0)%;#U0v2CIAm zH#YsVtd1iBi2lcn9CBP*iv)Y#;9jgdWN_eUGj+W0WmOAmv0%Vu+rCZDXZ>KX2 z|EN0ETC{4OVzGJ8RcX&IooBf4-dv;{1YEkGp7;5P+e8W)fBx*yNdT`#z(VX zqh{Xt)&pDW1?HPB(2zs6o8`ubnK<*hb4-4oeyNfWb0ahA6QPH~>$rrvmewWBYx5tJ zSB`|p;X$~-G0$(;g44%Tv2xas=`@v<7Q0N#*)~Oh@L-eN%;Q}s55BL(OlD0f5YP8s zB(bhC8smJTkKy8I9=Yc?p*z!p%?d*L8i%TsPK&+Mr{Kx-@t*Xy z+T7vEutFKKZx=9iiW;OflSLu(iu`=D0^Eu&W2%Q)f+S(-cj8PNLl0VkMsi3VR20kk zDnV%7i=jJ%xo8yui0rN*!HU^1uY?0q4&%*l6xZ`e3G1(5WBx$BhDj7|YM?PSR za|PMG)~e@MfwlId)Q<|AM*i?b*s}9%A{VwQqFSmb^-O+Y=VetUhaiiP(}=}_4dDHz zt{O5$5QMhnC-u9&5(x}n+5Ni1&$H+Y=rF(aI2Y_=gQWscb;CDO&-A_xU)msCp@$WB zeOAs-dHB&X(YVX0Wup|y>eTxuv0HIuhWQ+*ZpV=vP9ROs#7n3vksn!m-UdLnx}nd{ zU9r=W8Xr8c3M4Sq&)}TtP=b~GHqgSHBgdk6N&T+}7}%XeKbb<3y335p8&+Z)Q*Q9} z%92ROuBF+P-y14f70?yZ&mraHfnWf~kB8Gqyr12_4Yn7;`|Mvw3V70>AZAJR=Vww( zCYvgjP%>d;Gk4?yy~2bg7w53L6-BfI)xttk{4NteF@4?|V74g5*j>NSmaqB^A2I(elD!lnGs2-Kktw1hnKolIud?e#1$?ZH>b0xr3}+@XJr>hxdjzmRFv{ zf^S(vZ=AgVkY=K7M-Cg@CR-sUf)CvNeI`qE;P*{|cnzNH{($iI`FR0#Qq;EOh?&@c zYc*D%=QbMTwqkkRfM0)XStgJ|IhZFV4AKmdbcusCt!yKZsud-8zAuy0}hxJ7ZvmZ4`nwW_?K%TN9Kf3 zgwG?of$5ea(>K zGZD82oRaiqG1%GS9rd(Dv5+M2L!o|v$!(y=Pgm!{U%lRa&<}i9xz;?@OU>z{=`Du{ zX4C>zmuPD~(Q{*e+5u*aC3k84l18lmOLWVil_WtLxKxR!&?xp9 z_H_fAy>)XB1;fu|-Z{4$e&Ce$YfHgLs|)CCmt?Ih_!1cAct@@8m^iDOz;8)N7fWb1(n1K?fNwDON{Be!6pNE<&K4 zI=|m@1!htx;c6|tPa3xS2z^Y74kTQz%@N32G?@}qsxnFfxK)S<#0U{_hP^mG;Q)_r zx^+Ja6_s;Yx@YIxddE?9Ej4=fr$0&q%ds6cavOxvuUaPhl(Lz=;oWJBNSl|AzIS?- zDxkd>s7IYYrt$fO%U;eT>3!Mm84_BY3>NcMy{D<+_P^Pbzqn)C;5JgmC@w9LICUD` zq?m5U7!OR1|IivwoHf7<%qBi3Ge>ugO1RTsSz+@uW@|bND(W5`j6#*Eu#$ZILBbAp z`(D$gNIFL>n9%Z@ZDv96b#GKubCHO7R;`bmwEhQdaQNHZ+)vHk<^sOx1}W=?sN6e* zf__g4TOps+4%*PWJMyZjENXcTXE_#l~9uvSu*DP=0`1#zZFcfXrEsU zkJ)@UjbaR~u?6j|9E0}p)?TQbvcsC1ti(I}5YPWxR|0Xh{-S<AA~vn=w=idy@&>qK6uyg>sVMiiP5e#>+?I!82J zd=K}3)UoIeiLdd0LuSPH9Syom@bH~(u6ljpK@cyy{LPUpjbc^!{ZsMZZVAl3^jjHg zcRes4u1!C`oypdI?;j?zMSGk+R07-D;|SL%Q8gpmT74NL509-Y+Se)~mu=+QpzoV# zd5-P5pUwzuo;Kx459KPEtaJ4&L@R7h@pcZ5#Vj1MNgBRwq$r#Va`>^MDAn0DOW}<_ zA>H<@s99sE51#O{{un+~`x4+xYA0*M4YvB=$X*u#Z>K~P|1MbzwFs!YU?h{=laZUY z-t`~snfOef#d>w54oW6NNE4o&*2JUGgvR$!_yMl!?=4hXonI{-a-Sx%qran0aLjqh zqVEbe(l$;I4g(+>Lstsd^beosdm`9$O+nfbGm+0A!F=w_Ew-nT7cPjK7$F5ms%W`9s=OM6vt z2ZO_4MjSg)3dXK;j^C=FHYf@KKT1854HQT2r{}_-H$9MY$Z=4o0i2N1n6t&I$Qrcc z_}ncTe)g=2k#Cm=r=Uy!hgNbO>wELWP=M2XHvmJ>iW4vugvnX9+$2_40>-570$7%Q zCWcDT^oVJhem@WUwm?x##yfZKcaTWmzN*6uwU05c zb4~)fDr6qDR$USfac*EaQa=}@=O_IAwg*@^Szt2?D0QYA3+oybYO@t^+D9IJ*Ou1R z0WNV=Bct+S7TJnH5OKJq#T2pj1~MN(Ujkb>e9Yc1{{a^gcLTzIC_c{-)MAO$({4SIQ~{fhD~h6x4i3JyTS5)opmO zd&ylRCxebWHcVyM5oY+tSxrO6cyv~&@Ksq|jqaa1QC`>lf%&~S4&!sobyGZDKkA?I zFtb$RX{hO*G93X+In&!*PbMTYBc{Ix#j1i4H21qo#Fw1FC_i_!!&`l4rd$XXr8?xlm(5G80dEe+<^pq8!2ndG#Yk=>%J5%%C5kr#kd_Q-)pa z?L2azJ@ln2=X$7wU`^ntcb)0QfhvO*i~A{dgUNnDw`;C#|5mbsIcrL!tus4wRPl%U z>Ao09D7Yj+@ia=#;)uXYw{fLK-by&La-f@BxrP*_WdrhW_aHCqlTpEUAXd$LZYK6L z{`2BioNm@FUHD_mV*$~H=)Ip?zV0@a%5YDM?41E_uA0^<$t5BmOXo)V@mTbysiqAm z{5rZiglhLlq3gm({`z#kxixHW_-*0xTU9!efd4we{D-9SjlQGr;x=AX{L_LkgAJ*| z*3CPC{pGEdU%Q^NHycHTo%#}$y#~Uer~FAa%Bt03;1b@>*@5;MrRY=E+wO-fi-0~!X zE=g-okP%5!>0o(sz|?;@828E2aW_Paxj9i6WfFN!II(fKe#8)nn~QJ|tZcD6@QKY7 zNEq*4whmyPWW$}N3OJ2Avs{o2QIzQQiJttpzh~8GX9DK?6&9UCftVc@&6ADIDC7D$ z{1eS(BvUS&B_X5V<8+_Zx&U|bMZwdD4?*VI1ho!a6xzjQ<-wuUVT8B{*07yl?WM}B zV8iE|o0Xp+w5(|&(gv+iZQ>GQda5uQ{^h%~LK5Qfnx+J>Xe8m^B48$Sjq3*NU~F{= zixhY->1>OUdx&&_f)?5}_GJO*V!8344kKU)~Pf;bo3L zjBmv+74mVW$Ce`(4M*WJ%c95g@KDyz7;9FU~4pMxPrP}^c` z?^>B=C0W&C;o?Ssu*fTt-Tf*xvXg=$+V#VsR`O;zvT1W8%kJtfOizQ*`JH(j`}taS zAomEp{Nx`}(AR#H`^wd{ZETD_o-=BX!F}w#cLhD_3!ttbr&olvrbOpU(Qsr)y9DiO z&dDQ=ReWg(Hq3PGc39Wm3q(BL9-|hj^ZPl>P=(+Q7Ey|_zfY4mWxQbH$hBAiy`&db zOt`YUWY{N4TdHU!jzAln*PT8jicv2eQ&a~ciZE-^q{tQ|kJnX*$>~rtMx3G$)6czX zt4&7wl0RsysdwIxF#*cY+gky=7jgUV*%jgD7D`jx((Ec00e)zX=v{oCJJc zrp=P~3Jfpg^5A}UKmtXu=)9@hk{0|>-GLZSMBY8agrlVuZ>U&?Wm5uyLzntrYNm9Gl*3c>zjzga z?Ms!Aa&ClR%DHR^L?&|twnp;)8A=$k3jC(tJBh%;8u+RZg3gMijp*l@yMGJTwnDTIxaVUjLH@^d^) zZ&wAL4N_}_r2M7mm1?@`O@A-+jCV6{r8GvU{k-|vV0CjgoKdVsqgO`}?>i9jeLwq) zM|4T$iY11AB54e+l7Fl!t3K3o+V?Y z#Y?7Tz$5(H@m#+<#wCh;HO@lnpN;jmLLkhQSIejxEqjrtbqlAj`)c4A`@Qlo+}Z-C zqyvR75JjOXl-sDNx;yQSk_%-@5pe@IX~@LHFg{IqRFo-Y2e$|f^pl+(5FFgBWPoD@ z|B6#WWztK-B%F#x)UlCNlL}g)6hwC?iKv`s(wq4>>c`O4Ds){9F;xXt537Y381trr z9+B#Pqo%QAPPAX}7o@W@nPTE*L}jG#h=@cX1qKH|^OBBMVrQ#kA*B zttH>f2B+#94^Zo$+L{W~yl{!-%j0|6p@LBv-EF_b+A)J%>bstO6kWqsf@Z zWC?S*{hm~LQFBQ}mG!@VrVN)TFd!w8;Jo9zC_n#YmKiH)i?wBfp8e#*y67BroT3-W zcHmC;vda|sVoUf6rs zYEG+^rpbQEuy6N!LzDLi7SG>uYb5W)zv(%WR5)zc1~4zF^+cm_$REj@IG^T>MH-X~ zvAMIQrV1nekWi!GrpFuJ7Rp0mIlZ#JG?`69jpmjv&!)D-9dcJ}J87RHG-v)BNG6_- zsmznBA;fc=MCWJzldGNj69ofjgw7M2v={z_qj|UyJrl7K|J^OT5)V3kC0i)=ufZ+j zY3Sjtrh*D#&`mov2gOIu1RctpDk1dM%Z3^$q~S;^!q~rZ9(ERf64;?)ImI2kou zWt7B&9gW85>Rg&2HbWWeL}@V~F`aJ9R=pFRJDI**6p9zV&h`6f%vMIsW_YI9(~rs= z2chV{9J)2t%dIJ_*8@im8jI!uG$cRnqo`3HnzSm+!Q@R<qXv@V_Xk z$p)BD&pMo#+iv%WDqDQo`Z$fJc%mkFNyQu$5!(3O9tH8a_;n;w5))#dzP}qE)W;hm8YT{Wnp>wz}C*7CdcC0+{zFk9q7sm1XKZ zNlj$|c$f$Q-0+j%X0Zqy6m7GY{0NAS^czpseKPge$b22l=r(yX;3xFXU;Qk4?dV6D zq#?C_Q|MK`HB)!U*I~NFK$r-7-xJE;BBe@e=S6oU=7)920rw+e69h3W!=W!_ELG$% z6B*OG`v=Ls%0|Cnr7YNY$@qugM~$emOnF#^i%XN3zqI@!MV+?o)G)6?Uq~HF5%S8n zo?dV*=zfEYo;D90T8qZ{rL%sZ3D7JfPrnZ^}Z} ziZS`YX$AXU{|^eZKQuej@z18|Ly1;Ecw82VWT;~L*x$zwMY#pb7rAR48rmTt0$!&- zhx*BUNm)NV-HRXe4O{GWthq$PK7Zp!Jc(DfTMdok9>;%fYmhtM{swT}j%pej?#Y(4 zz*OvPt$Fi-)%bkIVWIc*c6l+p(;YA;FICmPt!!WSEdV=+I`c$9*Clp_k|6R(^O9iR znw{o5Vb#`-bqAbn!C^=5)V<4RVxf$Ak@h`&i(*ETEpRH%XL25NRFZUX-^v*UrL&;q zZ?wQESfAUzqtfeL!*g_7qvDB7XZ*fv(uuz__IrI^Yah%PQv5AA3u7|jFd1HS)(6s^ zq;H%qC&?X)niXBpOk{m2Jcn>7B2xpZEnN6@k#Ec(ly6nBaJC;>*HBP+LWpU&wf$b7oQR_KkW-X+GQf0>?lM&*$&`64qjcp$}=y%d=kzJPgBd066Z_Cel`CMRN`@6tCPRuq+z%-2V-Dh%hV_ODCvl;CgUsovk^}L0;Av0N-_fGT* zdSoxt!HKz}PYb_KFnFE|vVMvNHSJoc)AyZh1nxKwau*ijUZEghd8FFP(&aKRb4O~F z$(+~dFum6F)$3`@*eT$qO?^Digc_NguE}ibQ>f@$Gu|3SXCF!A)DH;n@+(zF-xtTB zerq|?7~_bwgr6A_ti87#Sl=+869r~3YRvVtyuAPcbQEUd#2$){-lli<89IMTkRen7 zp_#)LCamQd!gb`VG|{F%d8QE_Qwq5F>8K%!;J|A9L!}n+<7xO9YJ^5~h2~|CxZhVV z%h+dIYZK=Z`d1HC@d1mbu757FMNX2N1sz@?+Zxq>XLBBE1g7QQBB~5cM4!e4sxaUt zUGIsX?zb8VaM6u00d?oyL6G93ZIanHP^=+&Pe-tc_qFMntFzCMSWy+UcbDl_legCr z8IyOu?H}5D1bXQ)31hr7JxYrb-Pc)kTFsu!dDi;8H7mq`^tu>Z*g8J8ISmcUxO-w+ zW7_%?QtY6v@*D4b*$k|NmGO)`uN$kWq!dKCawvCL$5}%kIG=d_P^hDsvyOVidt@ST z&~E&!4<+c6rQa5Z`5&iU!TRfrDxK8Fn2uY1JldjY$c7hb`b5?@$H&smR`?%pkIb{q zb6kt!uNhTWo(nL$LtGY3nB@gdu&PBA{x{8y8?dA>SK@i3u)9+UqH66mnyF@7U)l}} z*|4_jc#5MZ7jA|k$OHB7=j=q^pr<^xn@j~td&Bb1Mc%dU@)^qmmgoA0h%PGgleZhxhgVO^*NkKm4|J!s!Nl_RuGOD zjU-`T1kgQsYEXzhUVb~y(4#{L2MG_qV0Q}{38tfcC1=a!!^zutAXR?1W@Z(!;b0a+ zN{W zK1Y0~zfew9czBrXT^qD|EcRv>27-)(V2eB6^TXlny$!9$MxXb7z#)&sKroX+`9q1~ zXB$QuwCtgMXQx@!O1^5Y?@C7g9pxnPT;OOR!WTDbDy$jf>ovU#Qfgcxt1|G^Z!?G6 zDYp!T8!~Rp9D^!J>Kq53H(umcEx)kI(UZ5Fuc%W`%hhR%fccxQtgmp(PbTOK)>d^f zsA-KdQEV@>-g{SY^>F1Mw?*+`3JQM6^j-^dyr&t)s~cC|sT%ha&H@u(pzq3QXn%e#v(x+=dBMhC@jy3WQiy*TWu&VRyWmn%^J;kTJ$?GS6r<*1S{)1g{3 zUaO?u;oOCCbB-+|_Wk(74>(_`ii=56D>NUo2q>MH#1BHUvKpjqcQ4Y0{=H9he_XcJ z-h7r7$#{z-XD;&RpR|R2e^L2HGpA$&4@Bg~bm%RqXlvGqN{Y+n)cN`}vV*TFGT*-i z8H8hrRoe7MEu?4~g@&jH046MD`bXceAhIISihO(*Vu~k)O>W9L@U$$K;zUE6<6Rr^PmL)l%Z&BJbE)Y1;5RTS&pF|6Z({3e_+gLf7D)f@qf9`kN!d24FOwjqNrRK$G3DrwR2T;IbrJ#CABBBc ztI8(Jm>wo9(O~19x5GMhbVdJsaEudy^YyzA@$IQ>8qst-mpw9bBAfRRT4f1bCv;o< z$aHX01Z7`{!}Ss_(we4GFCf>&XE6G~!q(4gv`X3ME52?rTWFv}#sha`>{)PPhTpEp z$d6hdGPpL3CR(t*hE8zomlTQ9N|E@|xh8F_Knfq#1=GY!28klE99b<6a3%`(1o;~5 z;DR4ASSmb))S3OCav3jZIuTp#OA04_`2XN%pw!+{=%bpr%=dkU+8ccSIJM&@yP?>2ZZ`G*V)c^;DuLim4L^pAs}!!ioAozqm26o?p9y}NvIvpRbvLo4G?l#h zXt`Y`WYqIVbqAR-#ZhCQ>$Bz8I#IfH-^Mdyzf8{4r$M3cXF)P{xbCYH&(%DY#V(D# zxXn|kvL~#9d7<%`@@W~`xUX~UbpingcI6RFE>c~5Sw~k~59LhxGoB_3qc$i%mCGta z{Ovq*AQK{?Q3&q|I6afR0QN2JvDsGC!zdYhGsk`oQ5I6%m_JUwc=TeCI{Cxi!}(4F zd0`EyVx#$=uGztvPqI%`40r=BpOVpB!JrOPAo^<#{)Sf!Rptaq7Ck#>#!&o$7 z+5BFEr+{QNWy=jx^+m2rOLL?cHWD8gU};j!0azUrOBe$c!#=5bA1Ry04p(CI`7dz> zIIC*$D)tKlOXYZa4jvMjg>xWdodz{saHb@6{X5a+0%^#Y!*7R(E2{WZxQOsYnt9!$ z(a=raUi8~?#~EnxA+cKsf{EBlFsuEAgx${Diui!dQH+CmN{8MUI$(+PCDJHMdWlTw zd-WjZWzr&a$B=J#@o}}|ODx+VTr@KuAAD+$+jy_!UN@o+j=Q=lym{C4>b}?`Mj;!< zvs=hXCb#2(|=eaQcLtrsJM|A-2}i96kFh-i0Zl((`ed@Jj^W5(%#fg2T(1 zUX0$^he{dqr=(Kzcb=Qg#HI{EO`#Q>aqYfH!shKbPkPef{zcr6TH&_xme{*Pop+%A z^ae;87IG-9DBlT-)hAbNUoN97dF;84)d!GN6jZlI@kKv z8CopX^II{f`jEpjz^Gi1Ptql1sz-bVTj|n;1vY-shLhDv<*6qGCSTO#nzJYN;!XjF zmPV38{Nyl!zc(EgmAt#@pajKn9RKAG`?Kd?!gG0f09)bIXU~1U?^_c(Xiqdm6>$m` z7Zo=FYx5hz7@STxOr{n#rgcpEF+f41ami+h37s%BfV;mxa@a5RWD-{_w$|o@Qbt0v z7Rk`D#djI&@k3J}792!^ZF`*cu^$4-AL}zabu7s0QmAZTp&4%Jeghcf>>159mtXy; z4uI?qfS3PIF96A7Guxhz>pOw0n&JGQBmgO~P0dV>xQ4&r8?kbANJn`m7R4I8OvhDt zSNN*6T)V&0oUN=FIzCyy6G7NL9T|8WEbz-jikwza9&QQUIW`Qe*%(Lu^Mib zK{hUs1T_UUfM{gFy`TAdGp$4rS0oCimG8~rO4=CW+ zGU)HcxtjB@dAgrQ+{{purGnf_GjYhz?`yTD6nkXFWuXyBz1RIH@Ii8e!BOXqbE?~Q zs%9SB8LPyZ-(QA8cU@f>2btwg7W*+;No<`EIg2}SW^Vc0&cX9-a-X>ic1x`+{rty8 zI`7g3Eulpi-W9r-XdGuE=VKdLDftb%!JDlPH_T^N5WT}6)I)V_uNb|MPLQq<#>I@L zyf-@N-tyDA6dU?UL%9G}J%G+(>`fCF6~$rl$?()VnbKEb#5*SEv=eDX683-=HaPdI z-w(-tOq1MK-&v_JY*V?CTqM>KK5sU}(OT2L?F%6xbIRaz5v_QeQ#g24e8|!a$43f- zDXAU#@^4byStaC9-Jt0hs|dlGP)6SU>L6&E3g1dOcFRutP7j-<+!oy&*~hc2u~a>Q znmQY7#4j!YV=rbo+1nV_IG}UiOeBH1whzrhCXH8sxrn0Wyk=a=P+>(dDJW6u)^XPi zVJ@da;{3H=k!m-l}w(3dM$IZ&FtU=r0O$Ppfq0>+!!x1G zF6;7~-5{2{(L~#g@SFchCLHze`kvupz*}ST{(7=>{&;|pPKSwws>5(o-KNti0 zER+M#?oGgG2D&**CD5%U5_knP8Bua4{w<7bVOWl*+vFmyS+QESXRvgr$Kpj0Jj%&P z^1WjRtGfHTL9FAZB+4q0R?6~;QS5Ghd%4@Q;x+x+)w&*ze-HSk*l+*-q|?^p>Ec-06Abth0J3%En-k<@R z_v0~Jvi?re72`aEoJ)JJu>o<-DphGCGZ@A{EEHmfaRd=K@8~er%DYM~+r8hvN6VaO zDrY3XN>J>^V4`$q-WtvIujRKlJLfx)22TuV@!kE^(Xs4gfS4N5q@v6ylt{|^=mAuu zk1$hc>I3sf{mnF&d*QSj{6Y%Vl~})c@cBbkSPqr)s}2)Ek%te-$|hjf*U(O6La^1} zgZ9YMFyw&6Q#(hUw90%1BpuDaoRqB?pw$!P#lqg~8FewmV$eHA5b9s%Hz>d)8m~?_ z_2fY|70)2V3+c@O+5w_Nl+(rC9}*DtW123Ct-J#*S^8{>tZI{O4}a_>9{m#%xNAr= zUAAg$kzd^xb5}Jh>C(DLcAw9XaimHq5hpf6^%jAPs7ZMN;Qqk3l>mf>XWC|&8rD|l zeAAZ{qD&|2x1bx0ms4SC6rCgc1T`qns(V@ITUdG3>4++zswd;%e#?VujrH4vhKfhs&xF;UK*FLYoSmnHiq)cF2xxMDAjh@L5$mk!L z$l%?*VsIb-4w;H(0}TThptl@5Q>b9hGv#_Z9fJ=-!ubIY`bzX5gIgkjP6PAA_sw&? zvhT(Ww?-G#rtq@qk|gF&M}8i$_v1mu+um3}KYPMmn}bdFIH6R{*Ss^8HDUG}#H zM3~Q~nbZ%ZISp2vf=KGud*j8}8r@yVqXI;i(vdoE#)I3F<%lW0Mlzy&B-*^{>cB&s zJtxl0(7P)V|CpQ>My<3&IqGrES~LZzqglNjykN(J2`sW^XKp1;=4%~|LA7SqN&FL@ zS8?Yb(d(O{jLI#yZtcm%HMupxgdiXA0-bQ~dN)Q#p*soR__5-Zr!@-DNp8f`vP;^o zf*o7sbcr0~`4I>}H}6LWws;oH;10dKH{7N*tX@Bb1Br^$suD))lAFWxZLChqh4L%v zOT>*SuQSZeIANlglq{5ZJ(g7zM9EWkf0Z=FY3%t70lUd$#)b+ji}3ggKj<+3vXlT~ z!G(d!n@C=D<8MHENMaq?D{J#fK{vaQT!F3ghl2exTSgM?V@W2g!4p-L%xR&u^h;3#vy$##Q8$&C zWIO;4yA}bl#)7}vqKTMK)gd0}b`b|}l`e7Ic;O4F`Oj^BsSa^(KyGI#A0IgV*@V~v znXlo_r7T5f37l5{^ywPmbC6B5wE6>Rxnq@2E{0RbB5IVC_)vXH0y2kGX_A z<}hMZhWFVM?&(qYjZQ5?YC|hYj*p>!g-@9zqEv8Re4cde9>--k;#hwH5f9Sv3;9UB z_E8U(d%t z>d)P(h%k^je2X^4(q8(k(CL;F=wrS-f$?@@em|cJ&6b7SqRg3G5&U|LD)ecv8LpD# z9P+$f(KF>(%ugY6fL^jXxOQ@tOOZ^vOFxwtL;nLYyMonHbluW7H^PL|J0qE_r){nd+IV+J z+-MSwf{REhv36m4{lV~8=k*l~BaS1ttRT#01%0{BTiyKhl^bb1}K(I|#z| z(8Y`7D=JZh-f#*Itg@xbojf^0gj4Zfx^ijqWTZ@Es2jwB55>-7+y+}nJEqxj0B|(> z>uiUE>FzXy`S}WC1LJTWdK+<$3+@YRl|RxU$}t%Opfj4V7>eRVN!_dXUC~vH;GlkJ zTQMTLE-WBZEbruw@PUmk_|;?8knU43S=Qt?o1^40DMtDqlkc>6AUb0t8#cC~NF0FynYE@F@AOGc zS0#3Yy3Xm-{!{h9wa^@E>teG6nzNs+e0K{n{zv6*zxpa>K7jNmBhrAK zs+as7KU!DTGm9M!@f@_DG(#i1>=!xcDdG}1V|e>Nu($`vXwSz68?^&5lbg_9XKb@f?W&aXoOkN)zq?p8omlcuXXO{i%tTbGho~93mlLEoc z>cDFH_Lu|6PM(Q^D0Iqr40wTD%ZPcY6wITRN;;bnCh<`s8Em+dt=f9-@#R(%6R{|2;DURGv)%tgIc{7nKjw5=%}eQpyye1R78v{Y&Co-oHgq|^fPzm!3H{Upg->V4 zM6B4L)|wyM!s6tM#AK2|Pi7Zj)b;U6MTm}dG-e|t)X$;c9%m> zxc+nS(WK>L# zuxxOU4+JRlcrThpMfBy#gM633TN1tzr9rcrqz^W=2^J~-85lP9tIh~ghl+eVqwe}g z^_p|%M+?#-#kgt!bW1@2~No<#ankkN!aY#(l3DM^|#!*_QH+c9`B9of$h4 zZ_l@Y`qcbsI3K+k!#}!Zjs*&Edf5YZoru)#d~wL8vZDh0{_m88V7sCM{1$jy86bX6 z!Hge-=pCQ1GR8|Jml~X%8_kk~GqAxV>`J|NjJFR|9ml^@=v;NLAqqg7HKu=)xa!}F zo*!+=`VW<9+QZ@A-ttv%yuoKa)4#&NTh0Q(&Mq+xoTqu(qT#zq_oU4MQv7fz8l$ z|+ry(*f`cSvrodZQ9mvv#S zI=hDLgB)~YJqzh|(kf}G*qOW4Ju;t|x-|>m`xuzeNLAbpHYpKt?`|e%{Zn+-el7h zJ_3;MCK#Cn?dcN>c4HQ8FHsi-MfYlN)_yZx<(_Gx z-4S$x?7kIr$IE&Ng|pGjK;Or9SmG4GOPjqwu7lk6Y3rvst9v%a0genE;_W9An|iV% z)pNm=^iw^Ke#b@0sZIoUyZm*J5tVxeKerm~9oxAM;d?eXi9{;msN-+pRzix<>Es(~ zmEQAV<|oCjJUiM2JL4Lhsw^uFjJPX&WFyuLIo2$bp>ZEdQ;GZbYc`Q|w*(R2LR+Kp z+Jqou)d&4GX42&7d?z+LbnB}FTpC5?sqf9Ai}??e!8)?W{cW+}(%>p%iVzT&E}3$G zxU>r@?e2}9uJ6;3F!Mo%BpJ%w{Ja~?53$*mkKc~`NR3HRm zL!(SJk}ck$gcmSGr~;91s`5x?$({?2SYzEHl7H)VN)x$gq(=@+UKCce-LmBXemvX< zy$G3#6C2E7er{LiDn?_DjR^8CWthvXT_NpX&j|2k-q=#Ee-Y&y5c{DK8xIQguY2fV zB<9!f1J=MHe`IlofAgm42j=3yu4QRBQEp_;AZmxfrlbz+#x$oXd+xDYF|fQ{3dzT* z3mu1kKx1=&@M6G9dY!0JeHp?ysYpe_=%K_Ta~gchN1IVi66p!Oa*~xHkP7W7fy&E* zN$19|oF-i+5oP7>zmJLP?_`qymZ2;HgTxE|V%J3svwoLJJdDR;%S%|f))0x54t4r? zE^c+1zQP$W6cfl&0NqoWaX4@u;kMW^w*!iB@e3M{GmKC|*#V_wYX8~+);aVp;xK{2 z*ip5>c1~P;3)Nu*p+ZWlK5Kez?u2aU z%1AX}r4}t`f}h-|OML31ddZC{nL_`WLI`qPU5=hm_|_*^G0?7JtJg2 zllHk8Sje>}X5T*P2h9e-->-Up77LR;jmA5&m{!n+2rfp-UxPpU!f;V7yuOfe6E@nS zgN6_HAI+;%(2TQkdOeD{3}DRa1W9;B5;G=aq#EUl@QJ{cU9tRW*}1p{YSR$$V&cHx z_J#L7o-Ki~<8_~gUq@%j7cF(ujN+VeJ$8~f8tKA2x6wbAQfuybO?`d*j)zrlkL)@F z&vWva6>c$0i+(i|i+h3W%-U&MtKOcHEO{Y?<(hYx16rISI$+~yjnKNCuia2s>upr- zPuYi167FbC|4t2!7rD|XF%|7hyHAZ&GV^WH?*n>W^G1oBD8w+qU zT@GWax9jll=EAE%r&VqKX47&o546=dXm09mRO+cx!+5 zkPP#9r^y%%O~@>DZD&BZfNA+kuSyA7w1h8K zp!b9$X{`oY{Bf_AFHhqY~o2C|?T{@~Mg ze&7IEomL6I?D}Q_4><6uFDu@vj9cIK(X$7K@DbQJN?&Mvhj}h( z6qB>|(ob1WaHu%jA)Jq`e*PSG)T-~cVkPQ67};MxD4)b04!Ju8VHA}jNgcb(;3tx79}P~GecFb_6S zv8Cz571q2g=3-6pM z3Ae5fuu6)2ZdOk>!N3DzB*^JdBoK9wAUre2cbkqRkJocJ%HGQ5hK7~I`-rY9C6^Ow z;_mJi-JoLW98n++41NUKz&VRS+*eZ>6?4BkFVeHBT?~gD3Fv{Wc$mWP7r8cY=c__+ zLD7rT-hX|z;`8g=Qr6KFdoz#FSD3JozlH~op)TNczGJ<^;**QCzq>>LAKd~xD9XgE|D_jt8mg^az%2q4B2Y-z0$N;+s z{bi&Wa=MjGsCzB^)88_j%s1p=mQ7&Z;S>t}0Ii+t$1bOe4 z!rx`YDqEW~mCX0zO7C5xf_i^S4{Xg>YYLAH4lWMkiDT+3U7qC3UJhr~+}_;VD273` zVel^kdl93{P!mB=p`AB~Sn*W4LS)J6>fBBhCMq{!zac$(mAqk^$MgJTzPFOlqb8Mr z`uNZV0%yL6t#!<4{@)t-&9)0MqD~L)r$gc7Ed)?LWXg{edB1O}0~m*5n(g3j1klxR z+Izb=FE~?oms=cQnZ~ww^@1XpSChbM<Ips6zm+M=;>P4AKF%-Htn+%|H_r7mlVnAeFZ;^km?IZQ+S>UrAQ*5_>D0R zn|`b<+3fm2gOLR6sT=pe6duP=s9GEx;OFx#?cFTWzfpRD8HwCv@f_(m3gR{;E<=p^|iSLs9P@j5tf}1G+4Rb0q2lxG^Lw_>y|>!ZO^>UL*P_zlw`F zfC%%rHVpGPMv-(s!8rc-Q2JhD$lA~MnFM1mx1@a{@fD;LzJ}eW6J8jc^OnQJ87)NTLco3xDY+l2;J>ynmFKW%|mSlKLd>a7%PB|1?D9J4q5hQ{CdJ5V(%XIfsC-pMSt|v^_363lE*^2 zsSSd+2S#Wqsj-PZJHv0~&M?Mufoc%A$jck?O$g&Jts^rB#)(;iW0zFr6B$%|i2CQB zQH*z^=Su*(ijkE=b+69-Icg>}kjs@11~UuBDo+BHWq}d@RDk_cs+Z<-7moly`G8V0 z^-WqZ?q!79&va%03iGC$pZw=JJ9gD~=fa85^QX^u9&~EB77FrM;f;>QN7M9-qsGBW z7w5T}V2b2V9d5VztnLL=$ADg{l471 zK<_NpR@4}g^Y!sd4ydU!y2YV8+H43Hr?)Z&@=cCMkYn5pq!V+Ro#=<^8{-xYFmMKy z169)D=@!=a_jtzcts!V87_Tz1s|_T-+c>7uxn6>-OXMt6idA0)E0*~-;^BgafQWoB zv9U1nSx>#68pw5Nj}JEi0e`iKBvKdmD?<;3EWN>z-9P=ZD@-B-q?mInvJe3wG=m&(j~*qN~IS7eJ!#s-L}f zOBioda_8M@MGN)YO4r^OKKBzF*6QOK<&9E-;424)mNyM79Fb_ZlhtO3$2yqMTk1Tg z_?VEKDB4@mUlJ4Q_ZtzrKs8cCPLJIyW^QrFwsXfcU)X$Vf2`rRR~b8Y%Po4?mG2pWO3)mGMu;Rh@{?1*}58X|tL-@iA| ze21gumqHk4(NT_dBj@=K;Slk$V?8#q9DO*Q5LAfBbH!_dx4bWv9Bze}OxEoqt;v;6 z+FOoA5}i#|5$Im8{jRUiE}r;k?Csa|hI#(LEp2)wO1-E?`hhi+?R>4gAzuHeVqFsn zI0o&P zP_{RDrYV(%%UD?~>MQNG!#|cJ;9Fl37mm)?&P%AC{){{Id-kgMC#shnr)u;}s?lcd z6Ed!tM*Zc3DD6}|!9K2Cn~}k^tmuqg#jRt?;E@wgdve=Knzq^KLTZ>oyoS#YT0gQi z=BxI;Y@N!y({y8^HKvZYdfz3t*J}wd!J(7#y0v?AGn0RR-THvEkoQUc^_}dg8tE4R ztxxUa`mH9~F&QwQEEn2*A|{e{5pglFKzC*K)f1e|d+$P0_I8QY1ndb;N&&C?-mMir%mEG+w~MOushX3~NgCMsarT=|p2+!D6rW@$L)ooJ8mGd+MoG zX2{`qfKX-)^rCJmqF)*y_zl{5F6WO~XVW*=@Kq8^&mUv=skK)gL|{s^Qw;=Toe2_u zZD73Y>I8mJ_Ea>??<%Z-G`DRKcp=iQhL3+tWqtwH%r?VSgUh8HstA@PWvl^8n@TaU z<*{CsX2KaGcM2iVyb~>aYPrsw9jfwP)xJTnr%<%5E4Vspb7L1yeyQ7b4NarvAwDU! z#$s*fAOh!zAhss6U9FW`IK0zT>>pmtE1rd=;EaCnsF{RYKHhuxXBv7n!b($4UmD?) zM^5@Ddhl{drv!&Wr1KgNgh3j|68_Z!g=$iB4ZY=BuF8#e`uiSO8^h7+R4$~n+#lp{ z4@(jR&LyOU&l&+;%2}I-#-+5YPo@t(76`PduPQ%he0lS}a)pTG3Li94D6i=#RPASy zI{#?Fg`po7Wc)&MscYXWH|ePL9S|-cPcKl_C$F|VF|_`?y$=4RmB5_&r;GevaXN@aHN)z$3(@ z_GUk}^CyjolF93J{6*dsDeaQgc3XAls&Q}QeSH%`RJCJ#Ys|>LD$)jx%ajn=w$!aH zp-dEHUtIl>*{{CsR!s~&*CLba(;A7 zS=uP?w$E{AbVt<-MtG03y(R)d#HULl&X!QM6bdoF+xbV9=fq~njm651(6Rx}nMX7* zE2s5(iFAvHRMa;*{`05Tqesc#ztb{xgS;JA9^%3)OI@&QBWUI;?-WXeeyAfz0=tD& zgjMvP1V1|7Nz(NF5IU09OzBY2?7lx->R$2-fp}57af@TJWp2Jg)g0(u1}r4`4L}4Z zoI8X)8Nj8S)1 zH-iHY4IGr;u;3KfpqSG3{YWf<#(S@Y&$D}OPu0G6fq~B(w4GAtTyuBVb64JJK_NcU zH?}0QSsos^jd7u_OtsEJD4imoo9Au}LsMBkXoLwiA=S-R7pUGJ%1;k=o^D4d@23Nm zAK7nM=1i=BqA^4Qm6WEj(L0sDl+!wOIB`u7cUh8~*YfYzkqU|lMlm*BeqcvWZXu`)>q#rq!s-5S*(9N`+L)?jnzU}E%HzpQwhjv9yu z{_r9N-)7Kup4CR{J#@Vb`1NZ~A=u27$7LgV{x$yDlH6x*kD}>ORcw-rbQGPHL!5Ji%$su*A6Ru84t4k2@8j$xxy(}71@#C1NI3^#2KNkz>NX$csx`#z(~?xNac|pmNMi_U^2qWl+45 zrCW9F2nHafmDZDouAi=Oikx_gS{?}!@k{{GN-I30eV;q+lnyo5r7g{S!5HbGd8HXi zE2K$i3#ah>zO7zkm=6Nt3nEgy2BocDZ-8;`ASUeL($0Op|Kc^&_)0d_gzsnPO%w*A z^2(oQ-VE86L7IYX>sKpFM1%~D1S&1d+d6V+VSmo+jFata4pzU^&U>X*2VQNnA)hR; z*dykUWi9%77HhY6CE(O0s=yllsrpx0Q>N_NO#kg$igbDC`CbbNB`LPw+cso{7R!(m zyIvu9;%AtXT4o(%D<1Eh$(Y2@eyrXVP4pzA!=~Ok1fYX7Q@`4+@GbrhfK*VfuN6J%EA$ zx|@k}a!#0~W%0N(ca&l9X}LnStbHJnD!sp#7OAAJQEMK4F7=!ZkgX>TxW zjo!yU!u@*dsWWZXS%G#sQ*dNV^%T_vbUy_71D9+L%EPvS=Z1lLNWejzP^eO|{Q5q> zX;0vTD#XoRNlZ!7=dM&%VHON?h?hjp?`0~x?{@?@9-nel`TeFajOCx>AMv4ANg2*` zSv&|%ks)QhU-ceXdY(t5r3Ct4eBMXWCIw`VcHG{lETiQ<<(mIWLAt^gqZrW?j6H;nu&!PY)NTP-to!P0~mkw2upJGaOT|5Y2yKJK^hr@SxZ z$2)|t8*`OT@OSCy6S*40TiG>l*4Qp#tF^%r|qlP_gY41ln>Ik?Lm2+j|Mv9w`Jf# zYMFDQI4H#8Ie_-Nt=9t?;wn`YELrHFT^s||M-z&vm*)_#?Zl}jg{(2ju&0p-!J(m{ zo@(=`X(*gX3 zDv0)rB$Zbx0pkCZk+c+WTrOR%tgKu*I!5oT9ei^`QKj6V3r4Dm}nOc{NcG73_`$ zElLY&y@ncADOFF94RFMUK!q%Tu}fnOkqt4Ed3cV z7?CaSFQUy&hwn0nvO|9%SeeS1uvW08J_> zMUG!%>I7m0)Kdo~#oT(|MX$%!jLfC$wanrK5Z?$S_B)_~?}Nw@g{gGc@GQeSP{&%T zFAxk6^l!1q|1B0XbiYDpO6olN{SP+yHNbwT0N}n@K)@P%Q{$m}OJb-8j5HZH7PR&R zdv7MnT;!j*CIzkChp{CZ+i*SG_c~-Auo4%Qt5GBsCWD0rryu~K*KAr|!#jov9Gd^2 zy&MYU5Lot$E91ntBq8BTCg}fcKa>AB6X_r-odlCaAH8tp3e6TqJM9dA1qTHMY@GjDLnjmOZ?i}>WtZXKT8=dZkNZ`jLQu1`#MB>-t_AVLrnK$DQ|0&P>T1`s;P0z zfWpYE$`$>v?}*6{o%MU}6+y*jOgSS{UF`ZgWM9tgs40k&%e|C3mbz|F*zu$Tunuko3Torf=8(*YF!8pPzMla| zkQBb4T!@ty>{>E0as&n_AWWF(2RX(D@c(cy;NHOw6Iv!;eF875`wdl>e!hT3x2+p3 z4n;zvC~;|YoaOV>@Y8&A^?!>950bAJh}af+t0J5A$GU=hY5ML!l6d$o9vO8F0u;qx z5I**{a4^4P5_NX7W{3;zpwpM7woU__G@dfsZ35ygd->b zM#O*NNIE*pkqthRyzi0r>+!5>duYq^ub=e#Po;#fB{zXczt4e4g9o=9=7iRDbw0=r z%&oUsQVmY8vZX_P{x8I3ia~WMb0TJi1+FVbYi;jbszn%Z5;%Zp14xVI09$_^E;$YwlDxZKFS{XZd zj@{~r@gqS^w`=EI=0EsD=#A2q1D@>_n@QL%BgsESKRV>b|Aai7AglNAvGag*y#r9z zJL45N{z(mjbbaG1>~R^MrGW`8N&!lTBRTzjdiNfa^n9Zg&NBLmt|?~G_1yF9gBZ#P z*s2Q(<;=Al;o&#C$f!DIN{`MGXMy*~pGc*Swqb#JR+aEnn=d%{*jbnEG`~5Jsfb1_q%mH+jqIO=v)tqf<{~uvERZmwW`uuib^OIo7vSA*!f3Rb!SSXPdT2<@Eru%+2}Rj6p6*unsFefm zAz(uYoC|t;=a)vSwB;;_MeZ#j(_`fvr6}HsRIB-TX}Zdxh=!BSN+M7z)g)A)NP}@XnaBv-$J?{-0Vywnh zzg9kgt!4iS11;izr@W9xe;mGRM%X}uh%J>g&e=^?5@8xk_aPLN2{nTcG_T!Apg%e5 zjMi3A(x5uCHVMKxv;VLlG=8G)GnnHXrk(;FFgyH(O=9=MJlI0WvP*GZg zEqlPTxN`tU=3^sS zwhV)x74W}|AOQuO5dbvO#Kt3R(pjCWp!Z;CCjrWqmO(dv!YM3)|0OxS@hMG@FOdek;xDFIhHgU`^E zw4+(xSFf(3$H&Xu$1rhjtcoIhHudP)rVUe6RZru`#DwwgE@3Z0(8BS*C27i)> zCi@(XL<>OyOeDx65jW2T@EY1UVS-O~>AjSz46xA!jOSC%R>V(jsS6B(=F<7aNg`T z6tLWFM`M+p?uV-8OnEChpSec*{jLa16oG@z1by>yLsPCZAO_vIxEGi{B4dBiZzAko z<3Th&>BU~IF(SsCqQ^^6aR&ZQp=h7|NSdbhZkzg?#}&j?DKi%Rd?qU{fy;6?<7`O* zk>4#&`AhsXWtjn}eo;uEH}@N=32ye813$rkf2~mO&EalUrm{hjodl?7j5Q3*+?-IG zA<9={{+GLr1^V6)J;5m0Bppqllq+Epr<#>#k7I<;H$`&4^pu)P@ahu*0mmo`{qLzm z1uYZ8d8cj6YvCC+(n?78u)VOUlNiPE+jn&?oqpO#1jZ2$oB!KLNhAt7F4&A$@2nMw zcJl4vycuN1ELqJD%EPi_xXK(!9hS{HyW*BVi6Sx~K+&QE-Q18i*E^Bc_+fcrnBR^k zvXVnJ=N#kH|A5MYTTWZ$lE*qdiklf_EF7>SOemqwjuo=+o9Q7+ActGoYB8Z@dmg)% zIo)$8!P|JNb9m;BU)cpP!pyV_673?J+gkU2eI8XSsCly+y%woZ-;7N8!E$W1{`%-9P#79ru$OkXPRoZSe;ZwD}43x<4uXNsqj@`{Ywu;mWsdO%aFoL?EH= zD*-|0fdwZ42mEy!s=105$eF|7a&knQt8*Bm>zfMAc&xt+B_)7 zjJwjZBSCJKzB|Ia?T08hD|0a|i~+oatU-C80KtAD`$Hw_&+AjK;~Lp>1PsbBkNZs* zjW%yX$r_Y>4@m>`-#~934`6$X;CmO`WiYh1M%m8z$799)^~)Q3<)y{=GLmR&2tKz2 zXhxW!oqx>d-(Y<~V3EUS9y6`~B^Z(SfvhcvAe4c=^`<)x_kjS75B4uCL}}CafRtr7 zMcb^+Be$w~-yA}bV2vKa0X`RPfRP_S5S+MMPkD%ua3ERas{0V{i<&k3CVaX67G(Sn z3!_O1^ewN6LPP{OC|#s^yjBm}6TSWZ(hZt~NX z#F@U{A7qSH79L#cY!95(mRyfRw0#c^Ml8h^L4$RaaFi4a2KrVKIicr8 zi08|JHhw->69|VjPZB~|J!7_-;cbK~LHqSdagCJ2cX7ScG-%MG@SwUjmF8Y|2+rrL zFmgmVmkMO$#5$Rfp4jH~W`m0%tdukPb9% zW0J$lRc|*wN84Eix!kAfgH;ZR* z8e57yfN08;tiMgrz>!2janAq3D`uyE1=TshRG2C-v2Q$6(SJ>iz9%CuKtb(7HHB!Z zV;h+e95fLYL{8M(`ij`Q-bF&=eflB9ole`0^@nvknpEkCVUJwgks&};g$6n7J9z8r zs)2_c95?@Jw5=ARO9?D&7*iL1NW)nen|esAr%+ZXo}yx|AkklB1JsZ8o+m{|BLR1= z`S_&xNKMgHdHs6W2xP=_k4NSItUsBU|AkJ~qFzraEoJwJm^QbTxKF{))T!aja4|8N zIF3P+9DuWM+z}v$Y!qmA2|=L9zw!ANeJ9MO7kALa$f%krMcb9^&`iw^4t6RsTtTQa zU^(tXj3JupL##CiY6uE6+x+~_^(7eR(~X5nd?DPHel!IEJ z<&$jO5ew?BE)i1;iUAMS&>z+s0-}J^b8P3PsLwz0zf+JddT|XjI&j+YCXq+)?DJ?( z|4ZSmkCeO)c&oDbgI3G244R_!w*RDN1!m-Vq`LM^7~Jx%10yDTQo2)M;)Dp4KKWPD zLP~CK1HoaVINO|A@fr+=X1nvOGY&a5?7Vbz(lzT!Y5*I__*~*FGkYDfxX-b702o00 zKjKqhfo_)g2wtO49k|KU zQ#7I>nQ`dRZ#=13S#rlmLufd#(lqOGnQ(v|@4-FzZ51LzfW{+O&`X&QhemqZwo1fH z8J}Bg+`bL(*s*PTV-^`zz^Sd-~8`vLRk{?g{0j7 z+)j9L!MLM96qM|Lj8L~Q9e+BXON`G zJnq5IbinKXecyne0Dqt#_Ae9p|2hSn;R7$$zf$+-{Fhhz?+5;8n(qPrL{p)HBx?V! z+5R)=|JIZ$LFpp+s+D763IC;xY^ZBGf{@TE5lq8R=ER>c4N zp+bm|+4o!8#t_=pVm3E7iJ^%9XL9%p5+y|q(2)Ssc4wL`PEE(T|vbdTy(Uvz$#J+$+By#AhR5*TEqDS2A?u!+)vqREZ zbmipH+WFhiXLBW3JQu@|0>-zYyQG&n%OhEgT^=T4jn{F^7oz7tgOG&7rvm)r?kjl7 zp6vAZ1oJkM_sr}d`g>=$eD|$iWWTUTd|nX}uc0OUBr6xx74+Yke^q!Ww0?@~CoZ$% zL5s3^Y#NjfjScAwy2&>!)!K1AVM>n=G@D%xLKA0ov~ui15g;!+UfSGGBeiIi0!UnU z{oK#!U543Aqk(L)S$Ayhljn_%#7H(m?_GHn{JyAr&F1u~x#s1t&oYEd&v~CtCDXwn zEZX?QEUICr*gCgYNA?)qlJ00HScTN|(^@gQQRpx$32~C=rw&T#>0b?Sx4iDhpqs`= zW7@eYv<|LYL(MzMrkqV*;6JIu`x1^i`1p=5PsM*))1f=$c61%*wt8{5o#e8p@v=dB zV@fb+_m^z-NkpJgiMvHDI6Ti?o$+`gCX+FR zvfemDGk>vK7t0fe_^LyAT!Q5gsdGg{8Va~!w&r0^PoM@~3-wY)yb1Zk+&qRzYn;*= z*i;p&zO3+FFOVhAeMimxUjX|+1iw$pfFUIdhO|7GGV+OBHwIqEbz0Ka zQjeOm2hh=2CCKwZ*FfE+W2n1$6uI+PA)c-)5s`&J#4>u$P6u8)@$wMjoZ(~Axvc35 zE;n3^It6;WqYZDK`U{eD$!H=qy+*4;oMPe?p-S8MTnaoO^7ffGq(P6>)7D{n+Dxcu zHo;_yUi6^JYNnY5A4%7!I!=Q-X5T%T4l(kMMYeMiu1;R(rkCXt6!N>*Mjs!#i3o8x zJK-X7tf!}!ew$*U)hMA+kI(Nz51mJ+vkOjFC%ULFv9U2y`#S1F*oYZ%?fAG70$U|r zSK>Pt7+9k$<+RzM(I*Yr``|mfkUL|F^x}=I7y0RkdX|9G7h$OE6(o!7}-Sra=%235&`-5XoTJQ96Kyl zQqMWj?e3C(<8TEJZc=;> z0DQtg)VR5DwL~HhKJLl>`9SF& z=>cowyay#!_GCR?BiXjF1{!fy}nr@uI1We>32CcCYA;^T+++l+JWT!Oz9es z%Z&A%XEWGZEp$D#g9&pEp4p(%X>e@UA)G&aT3TmmYQr5*ZbDLC2IAAkaff@`mCQs5-S59p>tAG%>0=ZLJOT_p*WdwTA|@laVuT1+wSfOn;L{FJc%~ zM&Nql4wmWnf~N?MytQv!@fbPMb8-!2ghm=N^%@%s>9hSS8|>dg&pSd&i3U z8*-LlSMdj^Y$+#ydqJK@)>#w1=_W#?!EA-a+NV@iDitv3R8W)BTtTYu&aNIJABXQ9>(nD3+eYEJc{Qb9lZ`$SjiWi&kcu? z&Z|C&JXcBmFMF=CRZmA_3+!zjXsK>M>Xfl7!1?#uL9+@hRIfyu)4h%^!63c34}48@ z(<~)9Ttwh^(ZEY{MIBP7W{c}V21dLK1pFYt-?1LI3ktQys|2r$9YnVEc69;dvF!O< z?6s5lAP~4t5NIb$mWw-IrF)zDh(2Pcr5RTayo<_{A4wk8Y10-&bbrUcbCHnuZ~Tv1 zv>3ezL@ar(A*&0uLJe2<<20UUdr zclC5h`%e&Sp{gafCq;_lV+r016TGrdU_#)DPA_kH0}y!4~r zV*8)p#*;tzG9|hqbuihsFsZ4cmsF0vWW2wp-HNoq$A7F`Zmz7C2=j!@6ta3zQDQ9z z5wVTv?(8C!u^Ne4sgeqvJ=nKD|0c2v^Rf1!JCId0KFP*?bo;tk)n4Buom(DENytnE z&oCGq>~Ehy=Z8sj7g_A^OGASnqX~&LrDK9dK5Zt{`0o;d@<;>Cz1Yo)<) z=D&Z2$}R7pVABJb{@4@Hq@R);ZafyjYzT*xe z)`#A&dDd;ea)q)Uk|1H~RL|97Ss)Pg2sBq*!s#u~hTg+GjL2|;(zDy1$MlsOk$1z| zFt!vORr5WD6oNY+*Ar&0E<<}$9h8&+H@%<;8r{%)$#Lya6gD<0@|^8Q+MI3ZrtHL8 zl@2CtLciKJR2XELvY}J!;j;TC&p82OQJS}UIaFXUybJ$@+JerK`}=X@f%d+FxZ)_`0?&b9AE-@yjzIUPMGQ}lE=1lR_}LKY}CdS`Ol zU#N*JPKa02OEYFgx4$-gt!Hm@nK(D-)yO1E7WNnS6|8VdCt}@75m{bS z-wp>^v~X~nCW9I}BI|iDfr)&+kK&stUy7=v1g)e#XFqoKL)VZBRz&n8(0}vnaVWml zd29~JqML}|inm7Gx!D<`yE&Ka>)Lu6g)&DS4!We9wJ_wW%Gq zJaRX7yt)N1{^&Qj?a_N-Oiqx9?)VfFjH!JCP12uU-(PR9YCEVtwN8t~?7rl}OvblV znUi06Fq9to^)18k%rPlVZ7YoB95EOadY-_^dvICNWnr%q< zAvaDD2fa#OZbm$CFR=3hci8;SlvXdpolA$Gq&wpZapX?kx6S-GjB$*AKBO z;Zw7+V6}wb@TV#KTVHB^KKzaNXrbM0@v%0#S9SGv!QSPNEL@`dNJQ_0L4Ykr+Q?$0 zmF!4lWi{`wB?}ZL$yMx2O9VA1#bh$sK}p`2Ol}}0cC*Ea%w#>{$+9He!N1$A;AT>v zRpl9}2B}FW^`(nQY_@D+LeVoMr97?YGTeVAyaNr!rLwk7YEMsOITP`$bmwpp26d!r z=XyEVwuSq{mszuX-y1wN7f_dM5 z;5_!6DCytjsf{ZzhdjY2GP;P+-}Bmb9Q$BDQnJ$U=(j(Msq>5ccBwC6qz&n0jq%9e zKZTcm{5$M;Z7V+guU{jQrU$JxX6VUYg9%qoDyfMdd8S?``On8E4?NJZ{f5J8!$+In zM;Vc(97-Z?3&}=?Nz*h^9XFQNkk`3TuJ}DsW8A>L^Gu$zH4FQg^Z8_37H5Y|nsHEm z;VMitNb;#4d zz8vg>UUs%vBG2V@{LaZrEkv9%ZI?pVHT&<25C{kad?S#xW*yR&E+^vruekEkizxl; zYbbwvGxF}a4>?4lx6^X-D=)!%V`JH!=a?d?bXa(uv8(Fb-G9u93 z)k!nrqQ|y(@HybWo6e3lL7ra|R7z8j2ZJO&hZF&ur4{e~_TS*Nbs(#7HgbyQ!)dpo z^zQ5E3TP1ZzVgE zDk7276Lmx;hWA0Jv6-gglh?S0$Z#gIB~^HXMIxqUpqW(XZJxgip38E%K;F&N9uxIU zelsyH*Td>}Ce^#Ud!+#e#>qxr=+$+7-s()^dxnf7s~W=@@9sT=i=}l~y|@TB%_$i6 z{Dre0*F)WEA_Bkm$y>1V*d-}p=*;{KDPd^1kKI8DFgb3VosUG59#eDrl-A*H*I)f? zv+?SGK8vQB2Hf|>M=@*Zg8uJ`zkQfgWos5SC2VAJ{Eo*rVe1QTV9W1c!-IeKq_i6+ z`eag`lY45jMz-(Z#FK2#A_v=m?0oG#bP@^sH~;i`XlR4k45X3NuT-QT#6tD zIk`Mvhn#qprvb73%YiMsacuj3sj5py&v_B&yi!M&FsW%Nk{3E(J92#Z;ycoQtZI7T zYmZ}wkA$~!S@A9qnCu9|aH3}VE)J&xkJD%{{gaPl>H`lEasC!cUiuTtNrhfPgJ$`+ z-GO8R$$9=9s{ZL8k#zq9&=wZLarG*iWCC43^!J_H9j?vEj=MO%$%%j>PKmVX3(;6| zDk=|9pOir}vZBj{;Nq^=D4$AINb&CO>>G%;Hr3$ht3MBE-7&-uPG5N+-Ny*aDhBqz zThB|x`3>tg(S73m{a7!e&-*w35m!zg!0uO{!JVJ^&WO(*{X1XB3*PL6A0W)uM!az3 zSwCC*Rx`19zWy8x8Y6BlTs>^rU^X&Y&kFO!xi?9E*(R+CTJk!V37BB&8qc!Lwr0F> z@-^fp=3@=1&KV!iVCBAuz!A7oe;IXcHONbu0+TKw@Exy}yi|7{ZMGInOD`e?u>Wa} z{wlsueFkhX6DWDk51%3?Hv&wWH#2EY#5bR9POzCmq;sM{6R?*#E4(wvL)=J8>uOSi zvkINpanjCkgw;mM_;!b+@XpETQuXP_+<6l+QwP+z;cWYQs=KMxisS@6bTq)UXhxotDAqtk@Ob`h zXO(+)TJV2QLVcbt96p8mvKoB$$KOCgT3{a0Oo+3$IR|6LMEb@rKZZa3@YguC>j=`b z$lik#zpb@RP*YM#_AehPI58+&x9AcL(!LjupF>MS6Yl@QV^D(0(O#G`lO=C;CeH1x z9g@WhtD!}N9sz4pn}l^d`Hb&+Z5v8Hz92QigCe=Jrb4Ar!%Y?|eEyR>t{d6kK3K)K zgB0ydyc@&WIe&jV0&nLR$(qSe(5ndX#nT+o?|eR-}y~sJ;U^xI{Ld@ieH$e&zJT&=TO|^G3aQg| zEzM{lWxBec0J@vkz*SSzuRxD8c;7SWrcb{f;TFd{$r0$HM7lHAK1`wjCt4~mMb*(V zalYW*r%9yXCd;Mp3Z+5?Ob=N!*PcHVs_KT+!&r2ftB0%~>x#y!X9RgpAU4-kqUzEq zBxmL0hFdmy!O8tAc2cI-UcG?J$M#_6O>4c%hz~;%2thH-brMCa@(Raq`AYPt_5DsYVhVOKD|GUvf}V z!ZR_$_A0DUXM#PL=pp$oIM&HAM?F}pAm+xDNsdFs9S zcwH>3!?T+Y;E~&xpt;S89Y+QpfY{#1Kq_15MPxer;}>M5VDYRxf7M0(`qqYKn1~={ z`;CBuoRj3gNt`^=%AVLgF_^P-A@;wu15bSSixRQQs`S(;fmzb9SGLpYKvQ|0M4+DB zc^Gq7F2?kwbIJeOMapDKNJ@mcquoYC5D~Vy>0}2J9S?v3;~F6anWSc{Svkt&IsFj# z_pE~~QI2lgCw=Cmoj2aH3AGHi$#v4Ej!$m%~+61;ed(pxa{USD?q41_t`a zb0GS9KfmJm#}9!X;)YmtzVN=!q3+^QlpfhmIX>FPuLGlKDUN3CagxxyMYl)_Zby3) z-9P!aehpZh7DcLXW`U~Pyb1-}Tqamxeq4)$UPV5R$gk z`#&OiZgIAwp`$L~K6JP&C^cWfqP!ae-p60TE9PQo;-@h(vvi2LfA>R2a@MGk;Y6gX~^GO zqlz0s9uGcZ_C`-8%NAC%w{l`mBC$gmtS)!CyCpwyR+w|JeM=1~NZ&tv4n`vXt`a)V>?5NvS+saDQ2~ z{@mLK@QugTlahaAweN8CP1bD}OY5aWGZIODfYkj=3(lLKjSfl>-a$zYJBaXYCW2Ou z6LcPeAPtC#h zDyd|p!er~>4a;%<$SKrS)*vw{5qeUQ2ckOXdYEiU&Pyj1P#eq@b*Q>rhFMGIp}x41 z6v_01M!^bqBPD^?jZ*QXz##8YJPaxLU#-goJ(HzO-Q8yTcfXN9~7fq+0@bO^*!g3*GF_apD_ zdr`IhJzW0NA5l#9Ek>Ol*_1rAWBXQU7A%q~mp#yxi9rm)j;>o#xmTB^9oFN=`}?ji z7-6_^>|8~Eg<@gc5GdZa88MV|FMG~1WXxCsbLkn0NN=esiKs-P#*BPq&b*QOxR}UF z4JBYN!P#xkW8UpgjvH1XYl!X-4XN27tFFu4_gtwYTtZoI8E$&Gb>|`^guUUn~}JU{n+Elr+sGnY^i&Ui?8stURI;? zHecI))x-OE?&qtFuXXZ1zP5>V0s(XF{BlZ~BWQvVD=PU`S;hYVw>F<{a@JmM~pJxMWa*z*HjG8c4ay z!9;%!kz*W8{$RldrEavc-4=}89s?u~tT`=isOpjQt|;f&-7d2cU@7SG1rPd^N;zE1@_xS#AZZf$Ody`5&Nkn)_Z zQ<&7|SJC1*I6)-phDYy_6y$^V9quy|=v+s2X$9h_&Y~OVLP_6GCg|g&91a8D;o<5L z+s1|fCxqotv6U5tG0B8=58Q@c`Z+mH*9j}a+3$Ykee0#|3jPiCNao2&K7&=Bvb~S3 zr5&}G%TaNq0%^G!n6u&rseUC{#K_^IL7S%XS}a_-1P8Y5fkvw#+m_WdjowY@i||CCyTc00p}xUIUa!i?6y&)lKvA^JmSC(}ZY(*8!sT}(efka3tII{Rf~$%p zlDfUFjP8+0{7KIh;}u#{3R&9~ASq`$sd{JhCk17tdGX%O62U)SeTn#XPjz?$i@OwLINs3-U$T_RAkRxL9-dNC|6PkYA?_;$LW2rnrl!*vm%&0ze-6&1$jRF zDH)MGXX_Nc2@fV*fb__m=F! z(Tc$KTRvqaR!v(g?YFODe?V<(bAUt$yXCQl

)B&mR%iPv8|*qJ!8w@FmV=k$NNu=iW*$B`e7?WGhtHNGo(RiB=M<#+cEL@3xpVp4 z5!Lf@z4AMgH;FW0&`e(LwUiJwC&SoJ=&)@}UX}^94IM;|YRH-+l?EmBusR*m?%bx0 zaC4H1*hZ@FW+Dk?f?uwGJl=EC6Y=x|EAh;0d+^!^$B{;4dM>T=7aMMxDG|bFOKR{Q zS-;$~YII48{na1s>)Sf)Fq7Z;)SMJl*0)Ic0(j72aPYO6OzP%$Xwc$OvUdq4WR>*F zg@sr!BiHAyY$cRL^+!r15z%e_IU-HN{q$Ws-B?s>t$mfa+rO2H*1xB_E5i#xv$cbBJ-*O(jV1j(l zn;(#UO5eZ`+oR}-Txb7#oymY3Z(E7FtJQGY9k_#(;lYL`p8mGc@BZt*dJ#5@RoXp{ zl&>>r0OhXFK8T$!ZKZX+q%2=aKKeQ=A)oUV4{nfr+1dZzTv{V3O7=PTOhr-G4|88wbT=d zUJrX)BU#Qh6PegbHZSeybXe(c*Fb{wKsv{>?UZD5q1KzovW{}_5n0YJ_A+NeQt9Di z&fe?|WG_>9=@^5po2&$)e@r!S|(l^*S|@3yLI#c|2XK za^x>sM>8Q_kgiW{LMpU$O;;D6k!CW=GZE#o;(Y*t5y^9{if*PADpH^K5kv#K`d>1r z$L{rdso^3LbxBAwr2B73Y?+h@bouyWCM)pSSAXz!PGTh#^po;Zn9zf2BlR~Y@Z_YI zDkAbYu_qJTE>gr+6A>(v=1i2cV%(@#NlJ8IolZ~cm3-3K@};uDg8X#4=^~QdSFrM% z$#W-Jtb~$0m+wzJEmRUoL2v1B(SU%75;v-)Pyt<7Nhfb3OtdBOAM4Nl`0wz+pWenB|NSR?hL(K=ljH1pV`*rSUOZEzhmJjadOm%hdQn?e zg=q_C`Py~Fbv2|s7Kt85glQZK45;Qz=}B;qor@f8RFiKk5N$Pl>{lLEZ95FQb zYf4KdtDvzz1IX=*cY#2(Bf#W&7x|KN(p~l}4@EGEmvigw$lh=pYIpBM$shj!{iX9r zA+eis+6*+l`6l#p=R%bdeSiN#flr`ybT*mcuJ5y>;(e5a7c0_5L>B^<`U>#t((b>~ zz-=!54V;dIOd9-8&+C8rIwB_5yW9@xyU9el{9?5_&-P$znAmGCwq4}sG>WU%Yh!sI2z}73Aw37*J{-d5szUFMx!YXoJ-$ukPhnfiW6xwDWB6y_f@oe3Z zn!t+nSgON@nx+m+%_LPQkzIp>%WyCe7|M_p=Pn|rISC^Nd!J{d#-q;MfwhZf&;S4> z4P|QvMK4SSHO$QpDS_vitCcuiT#YUJ&*J`d3xnzwAA2q-%`BC3yAv!>jKfm+V`HBe;`EQQbtd4n`u+ZA9e# z__^)UOa*R7ilJXGc}v8MXlfdvie*{%s6Lgr#*iQ>$9cU{qo==_26~1Y-j0s> ziSAC9q+EBCV%=qHmy(pSlHE?0EUa#KS|Tf#PAJY^bLAx(1SpfL;(>*-V;>+Pdm0k6 zM$hxHBD&%7$7rp+D%I<-G*i7Fk~jPk5&`(1)jQJh@Odda`aUIOt(J=Ntc3GBUdFWL z_t5#iJ_hvJrs(R?Z8QVb2D{ZtVxlf2rlg|qraQp+bWoC3i@6GA$3K9X$n`0UZ-YEEpQLd2;r)#j`W&rFV| z!NXX|a>d+6>IAxhVq%mDbrTWLBa!BV8&05vmkuJYnSgI08y8=`=JYHi1$7~$q0{zR z#jvV7g;ex%B+$SBlj*q`eO}^PD#u>#t)!BVQ@ACc^H1EiShCpIPsttUOwB}5esD8_ zxW4RkBdqas+*GgK!L~GggAj5*_?-t1Se33PdmIBP`DJ`HWLLn!7B~|b_pY9glBz~Z za9W8C%VuKd;Y&l3gL1OV7v4RLvYIBkPWp)7=iWJpZ~V>rpuM{%@>s*wHY?B7G`qq` z{>qJH^}xirioCV`MG9G~G?P8jNWsZQd2l73rYEws4%-n0d!3Hd#&~bHTj;pyH%C2! z=Q~?^uvLa<pJq-@_z)3bsOvPx!$^q$D~0HuGdCHa;KAKkKY)e z`j>Z#;^=6qMajXprR!E_N<;a@^U~4GU40*tb7x4)nWQ*(&>&CMnY|K;eqr~kM6BP5 zl!953?NCcy-!nrKDbGdtM%Vl-#l6?!?ATViOn4JJ7!AJWb#0qrg$Jqnj(?Uh=AGgWjLzFHT z8Pz*mz{C?F%?VDK821cLLwH!G=GSD2rSO!9;`QgT&Kd|9Q=F_Fr1ZlBfrY{^2f;ON|R1Wn}GWiO;O*}jt(Md zr{$#LYIWa3y)VCW&tqcimFBhq8wq(EUxO93&Hb-hbDOkj1`X_d`4emL`3F}GnGqoG z8!hjrJ~|2mC5lYT3w|1T*0emZcX|(b;rfdnB11dKqN$;z3avF}i8xo0YTa(L^%0`< zQ`AsWEm^|IKJxzR66-^Y0F&o&L{4*ZP6rR7_z6xD+FVJqVCeTJfmGJ&97JDoI##> z%O{{UP*UP+!T#&{_*PH@%$BiAZQL<0i3$pzro;5bnH5gg=8Halm& zbgd2EEM#_3O#Tg(EZFAo;!Xav>3(>u#EX zT}LnD)$Pae)TR|)1UHunG}x|2Cd32TM^>IYNc}j`k(Zf-wTp|e`*<-p`Q^On+2G`$ z&%J#RB^3?QfXk+}3$SX@bg8TBmdub6h@QMug?IOy!KNT>ZzlS9L)m@}d6#n(6|moV zpZEE2e&<{^IYEz$A4bd%bw~kQXeOiPrXPY zBtaDR|Cqc^B+}YkRzv=&^t(a{Wp!l#Bo8VwS#EFZxHhoSL#m-Z`<}3OOf(=65C{Z? z0Dor{`hnSgLG2u1VTZX9XMXZy=*Yi2dFjmn4SdBfSb&7(%LA-o@)eU3rcS=Y5=SHu z7&ino@d?OTunJcXZKZn;1rg`#C99jUb|5)#2KmxYgZb(iNq`OWJR(~!4d^;oKlntjVp?4}V*?ad8TpNqs8;UDlF+^=Z(?9JK?(_=-f0uL>E6xFR& zxJ=1B%|xQJjZ3`RfWnmNC`y|FPASl#FCln1P zxu=dSVVFE;3zxyxyY=5-N+)pONYbj}IO}#}@qMHmFNo~%@V7wmcNOL=a0xTR}i>D(#(yYqE>ql2|7ub}H&xbCGOR{Pbu1IO(oW*Pupyu%yB<#d$+H65>zinnK&#|w z&gZFkEuQM_c`0KckW0ftgpSk3Tu6g!m{B5D1Kf0IRoa&K;7Hr7}4kswiA`7bRDv#FWz) z(YVy}$m4WBur-7|~PFD1zH#iwO||0fB&j1k7!A zNmTPAsco5(7}Q*CZ!wK=Vq72s^S(bnNYI}z?otPc+M>rNy)mqa3?eI)|*EH4x7 zOu~BxKL|0QE|bNc+d{Q0`J0n$i9>n{S+x+sKRDRZLF)5M zD9BD8yg96&S*h-f0OdI!B%eRFvH5?m?#5fYPvJA0ZpJgO?U9na>d0zgaIoFWr#G&^ zf4{T?ukZL6>B$E2m>-x5Ik@~}{mkCxi^ZQl%2TK0T(5X(<-%+ik->xm4$0Y)eD}AY_>MS=C+cOk{7ssij#?IR0>J4&Got5-I8LvkqG%#s}s58+s!Iuf5B>X z4&K)@>_nXND!r!9@-8P*l3qM8z&0+NY|$5K?%B3cubB|*5iR+y zx7)nE+CBR<-pfq(vL(x|W0$e#<6?a3-evgT&HLdmIcTC$hfm$J9KZeRUi|T$Ls9}! z17UeRhL#3GKKH;%TrO`w1^L8RH@4ygC1K?k$G89XHX69U+kqqVUafAf!@gE2WF=tE?JwY9d12yX>uEnR@k&%8uly9y+vrO|X-RX-We z_e-{G)-9YARBO;nDrY5~n=blnN=by3{Lpz@LPoMg&@xfmK~^t%vUG`&f5d|Bq1Yi< z1lapLiDn`2-y$9uVdXzt?2L5=0Q;raUag`Bqzvfj{EH9>L>mH@qeszk<_uJ%x=vVs z3u2Y@`(?GFZRaj@Q|<=SiWSh$oH?O~)kD9buF6VE;8{Swo55Rf4emVGSMuTuxcvNc zNF=g6FM|^8EnI|HvWC&km;uwWWzsJ0rY0$guqt_cg${%JD?SSZLWw}{wdbc$);ZpF z^-}W9XbQE?5c2M_Cpp`Oh!{Nr?9Xp8S4qC^HD?dfY{X7XU$qej-+xO=e4C!%XCFFJ z3#?j~2=x7L(Q^mc1)5t>arQ7~EeoE5^w$>W5GXyi9bIH8(?j_M5aK}-f9-~2y?=IR zjIH@2(AyntI9_=arA=4RMJL(S?WS2(U0{NlRZw=f9TumB{H^Vh4NRcjxN0uJfzrK1 zoZHBD!A%bpu~1Q+*@=1BFyl68h@2N85D*Al-w3eMyNVRoPVzZd6S>4BHxqMAjx%x2 z>hXdMQ@`4r%h!chw^>~%ud|?ll3{WizM_k!-E+hZ zdXQo(7mj#cc*%2agNYaRUguXhgNgZ0wz%o;l`xDWycy?UiJv)S`O!=SY9z(&%V*;P z*|nS?Kl52rGo|F9TlSunl7oK!!PWFP&HFCDkW|$h$q#)qsnwr*`vAW3$U3qn_5Uz! zC$+Vk$g%{(Kw_OpI{vZrg%jhlm-*B8ucTQYT1wd4j_n7}<5F2Y&4zFhc|K5kGO485 zx+OP#^4@4yT)2uYzkiKxcw_P8zkEruTp2(huq93`$rc}M;lb+i)G1j=dgFbZJ9rZ7 zAG=4Ygb7pjJGZy@C1I7+%d9Ss>$@Ff%b4nt3aNY&*$*{WHqZl(4r;wt@zA zVyQI>vNf=?61r0Sn;9yVIS-0F|NURVTwPB}BON~T<8MGs1C%`bfUg0z`C+1u6M`zq zwnsx{S{s@qJ0A^Q3nByp6OBN(-HwivC#7z<8ye8IcQ2Gg6uW9`(bd+MK)U_Vp@}EY zsh_U8I;gU;pdhk)EYWiQ98Uh9e6$xs+DHT`GZ3Gqe9$CpDAKc%y{K(8;Z`Wg(HiSdN&q(Op1 zRfj#h=69K`aaQJ2vKNZd)-qJL2lmGy&y2pnUzY2RoDW@lmd@WsEuTXa@}>IBDQva4_yn+9wz_(9yXFB<@{3KagVM zT80M#9=~fTe(~~7ys_g1zV*bds3D)Zlb5UT&fe45w07Z;{v;6*%wGDH^{o<#{?G>h z9)f>)|2VExHRA7|ybY{epG+q?s(KRSw;H_gXgpLvi38Nrj2Op3GO+(7DZ1K}vg@_W|e{TJTCtR?g4 zw=GU0M&n3bZcIy(tXsP2_s`mFK{pYmtZ+WGZI3jtHRHy)UnMVuk@ib<*BasIE%;lPF8J%iG} zz6J%|Bo@t>i8xxPS#%>}4MwOkGm*4@eMpq;<9ukMIe|b>2=tKPa_;R<;Kb%(x|y`XjJ>hsWd2Bv59QtQitqcSNt*6dvM75KvrsIm0pyVvkHUH z9jPM{e$FkApo9i#ZPpH)+WH(8ZTte=_l^Gf)9tXLE)(hRtT1n;fdeMa<)0R{iY#vEI2B}(!|S*VdsthDSm&2ct0a$W8g!s? z(HNPjCfq~BK6{_P^xjc??!i?ODZKB*wdA1W@h!)WBNws#z6U$G@WkDHQ?b1YMDYh!n)6kjL8@mt*gA-#%VRK|mk*pGRd&Z& zxtR3s%LCo3K!kr_^77D@k8p1P2_#X%$4`9g)0lR{tWo#6y{?(aZnYP2-b=GSatiZd zNHk&BYum8#^N*6^Lnqm{NP~4`m%|?BZ0F)~y0H7XH&9zzg$KU=#Q$gSIsoISuI;ha z+pc==mTbwr_lj*g4yFb|Zvj3bkV482DWn$y`3WJF0D;g$a{vb$+Zgw5Y|BNKZS~%J zvH!VmHPT92^|mX$YmcVAdF9URl=IFz7w!bk#oQ<@R7t58!-V-PK)W{k_CQs6CA_GY z-kJFZ2f!~RfNb(2ei*qTI3kptDc-z3ye#rQPEJZEzmfN9kpen((DFf6&+Fvyr+cyb z)-|1Ft+#6r42cIsp2aObj?a-XaF2?DXF`H1*JmarOguc$R8ax1$&-hqE`wP}-p=hf z_^V%_CNCEzb`C^ySp9{Ha(K=s(aN2&W@Ot3TzaYMNN7_Jc}dl#Jut#NAnesz%2mw0 z=^Hq;^kw)R zvzc@sC1AP(&gHks|W_shO_JFS^8G9U^P(j(C-k7JWCjxu|dvl0R zJH#Fkuv&m@PX@yb_G^_JP(V0AZUMkeUaccAvZ;wK>~KFHH-s{1r~plcO3%Db0q{ok zi_2>j*e$GdYZ($>1|JphTr|bogXbbwQB6QXnCjL{c9DA#^L0QS>_+-ZK=YC^izc~9 zk@%V2VP?mJ5ggUWKmz8mnbpZ^)h_aDWaOBShZr8$Mv9&98NBnasz&tTo3U#Bj*Ki1rP zBSNBD-?v0tTkctg2_D0&wb`i$1-D9Fk~XdVf5oxu7XEKh62ugR$s#S2Thb~F(Kgc%ONcM_U0ZLsg7&EU9W2oIJyUpu73$L zuD&0WF8&f?XRkuWp-+*=z=;B^+LMjpi1eUwQxG|2G2gYup{}|Dhu-@O5*D@vnuoK_ zqD9@`&WF0{j<`ufZFT?it+J7C9rNwyOK4qnD=~2zA8>*0Ad`^SsJ@VG*OyuU`j*F+-exUv5z}qUg?X`1n9-Yo{@KzhZw7*ZQ z9D)MG%jDq>0-UWwl0-U1Sm4464J6<@m?|0^DwjB`zB00>Hbu)KCw- zH4lTqF48aUt-QKUJ@-vkTI0fWuT%j{sQ}GDGQS^3fZtK9o*!=ln3tBC!p)?LRkQ69b6Gg$Lj^1`z%8n_Kbs zceW{@TqH^+#Dw6ME9UVWTV6cwShEm+e0?)sPys|&v9qD|`ZRp7U|Kvi^otY&T|oMg zV1@i0HSo6)PKgm~6B!&aYv#GJe?m;C>U1#H9dVwOnvU0h^Bi@fy>b25?&ufO+`9du zb?!=CYLR0xtcIHAqF)^p7seN9SFHTfRoL*-d$_>gY~;9T#V8Ms49D(|wqn!kA7R0j z%dqUaOO*7>d3J^wCX#w0*J7AjZyq6FY}0f5@j&|VGzFYXjbvH0^dsV<(7+DcdIqGe ztCV{+dp~%4mO!wPlM@tZF6oc$JcxJx@Gn`!(d`Fu<2Ub8%kt+2E}vrOi;tPoW&|h8 z$yk@`LNxQWX%7sw2O5fs&{SOo@5xh!y69e)Ct$uS12*@@@^|gT-XA@V`m!?k#Kj|d z)y1eHh<<^2`QLO4YIAZi*3%P#S6l%n&TZL9k*u-RHoa|hd!U|R?Vi`4Lfo8-5jAZo zl2%-g@k`gDIO7zG&K=|YBxAee4~vuBs1YqJ;DDGU1jbHeC$zg7#g&t?2dUfMBhkSE zb`DGBbfFdxi~s^iMPa6DOBLDYrjQuLIxoq~LQO>(qLQZb{mk+`)t6Il4h&VA=^_DB zoRbBUujO0q?EVcncij8X$=(xK zyP~5;dVGZCr6W7P)KOf~Z9Q`fEUb?(GVZO-r5U7^kqjfzc75q;p1_ z_P|hiK;)=uGt%MW9{^{vx<{gAZ`q`PbFauKEPw7rWIp~~#kP$2?&D~vBvX{&x$nY- za3=FyoA$tH^uYPX2AtmUAEY1HjEKo(wkFRdSe>jU@rN*~%kwf6m?r>Zb!mapUA8c- z`|mL)g5m-SdT{az2<3ZGI6=&)HUSpv$ehkPvV+rhRd)YN)j4PnSUgZskfjzH>uM1> zb+KB`NVZlDg&a2>5j<_hlHs`A4U^`zBFkAwgzPtGl{_*a1eC4JUNY5_9eg+T~ zrnN0LzUZAx(A3CaKbx)dXl4`FQI27xnCP90iC(<58O-zc0Yk0RIP!87~*qItaqga=_PL2{8`kzlV96xRAV*hu|)(Y>#x zoKSd71dcGRNWV;&o{Wn8Vl}x^O3W`jSV_1@s|7GOhXtZEy8x&596?6vIVJrvX+g)% zzv1zcE_Eb(pgunz=j-a=m6SYUn>q1;hfsXvbCjP>#j$7qfB@>6*QcL@3uS7WYSS(4 z?qYP`h;@u^nD#(B4+t23cJBr?c?E(aYKcee^(MeVt-?i|)BIC0arfE$?1grws6XavyVUJuG(e9KJus z5I{eU@z>vriu9u><~1$mADyIrTW&sPzE4_4rn@x6ZSI6po+5_4H zj_Uys(q>N%pjub-+&}ts$vWNcQF{AM8-++W!e9Yc_;HQx+vh$xasrx`6401m-1JXjGV4zRddNl0QnMX zrc1z3ce2{$-cZf}qXL`@qgmU~dq6(=U-;1_9EW><_9%loH^Mi_k8GoQvh^xZ&rX$sGg)L3g)0zV z(o0z-40^kcL1qPj-?w20ytyqVd7SLlradqy9uRP=HY<}dC?RlSP*6uOVahiYa+F3; zP>(?cS05iN{O$kY)BEqi>DOO{+mdC9D%Qx3gE20a5tJ7=x}+;>WWZFpPmEM{ephYR z`$ne+s)}+T)4BZ;G;s{P{DLVl5l&`!7@t`dmbid>ZeHd#4DsH2OZ2>JN()g_Uc_f) zAv>UR`8>^LERbv^(bC)ebb9T9-grQ+GqFZqhoU7e9XyQ$(sRxn=g2~TerJUDCOeyH z1pr^X_D;O_>=W4W{>!-Pf$vkhe-YAlexN{f0n(*&$(LIG=Fk|9x`=MpqL*)xo)epG z7H;Po2lWf6rlE$i2o`o{fBoR?=7XE(+y-Bdw(PuF(Q}bgmW_(KjzI;i%k*;dLX=;O z^(MWnJ)k`>3OpczLd85>TW@#50v4cGbj=O+v!pRJ%H5jN=253wESlP|%jYCw{kEe@ zf@5ZKGy^zJQ7m!+ghdX<=xLSmL^EBOZjLCww?f z!0s$cr$``A0lI?%yx=4H<9rTE+pPW6zMk#1&n2xt!Eez%H+*Rv(?q}L7geIHq7E69 zyortUYsov9QaJ*i7gIXNQNeR*=MAf8p@1DPM^dw~arX(_dDTKZ^)iElZrqPh1`id6 zdQa`(iAE(Yq}iDwn!k^0LqGpog+K{^VBTG&zYGNjT2|{)Fk^vCTJvC z;*5!qNvyuM4##(Yu5`x_e6kDEmd(LZvctXode?a%vKb9zATt`R%6BY*P+=O^R#dlu z;XXmWmTi^Q)FW(!6Yp_m69cxgQ%2HSn}F)2*@f!<DPhN{@2aeYUV;d21`J-qz}Y+OucSBpnaU<`xsH<#7yWIMn+%_Weh zD_P_&?j!@aQErmp;}}j(1OsKxPaefU7bf|6c2db)f{_YKy`iRx>s|l$+7zZ&v$r$M}7a zhFk{dEjr8i0tC?wM!FO%za`QAck?pwH|E1{4OfobkP(k#*|EgY?ELM;&RWSdZOKyu zR_P(gs1;2bgu-eYc%wN#FgF+hZ$#s~y+!#&V?o>^{P*~W?RT}T3*dg!tXtv2o8oYo ztCK4hM=$Di(Q$sUi1qCm;BNqTQph7K9wP`LJwM%>)J>cAz)1Fhk{)3LttC_pSrr4s zHrj8HH;Mf!E3L2q+tJ~H2=@2I?1?ezV`Rmg38LTEKdyv^kGZb?H&eO9?QA^nxVUx^CNk5@6w|zzehF;$i7?x}*|{M32}51#;)o`|@LdcB z`qaxC@$s${g2Z09{i+3c_Khug{{0O)q42?1~?UW>vKwgh+d=5j~*I{7UNpk7aHehEG* zI_ILH?nh}3(a09yypq7EAGOfi1MNMjpH4F{9`NxqBWdnbcDNjdm&u!wKjRS;AK4P{ zvwbNd@$&M|{-lD1?pU`8Ywy1aX67L#@-3oW@4*oX#`|MsNx6D=lyVF!%Ff5@PrpF0 zpa};*+lRY<{0Pj=azH^bVVJsTCQ=B_M<+3;;avL<-e|N-&xh6nMQ^=<%H2ERJ!c*Q zuDd}MUZ0nX{J;H$L5v#^eCIuIi?Mv~lzjLflyBYy&nZ(8_@!Ib_1RcljJ&`83H7KPLe3Qu=uq*Iq$*yUm& z1bz(=KA&X%whN#xW(~Bf4MX@x)sO8F;9P*~@K93= zsM)?6l2)W`gdJW%&G@D!ODCPkxA^;dD9H_Bor^xY0PFJIW|3^^2&9Jw`4A*8Qs8?4 zgN+*J$A|JY0-j4yQbz^P#cw8pk9V(KglAscjMqQjhkMp8###mkeUrgK1w_C1n#C>5 z>-K(@^-{8i|Nizir89rc${C$@M%X&mp0`Q-05NO5bJ6G)yOS{l+}g3AWxM<;BSZaU zaYUM2`xE9&M&it5>XC29=GQ(%7Imbr{-2wboJMzz7NFb3-4$hdMewISx#TCHsER#6 ziH~xE-?Co-bm>=@-?5tLcaaom;_sCkR9Pa?5)e)K7Rt4VzPW(%wY(1sc<(z6wCE-SU{RLMV=*#e%+b++)T6_~kft^)CO z$BJ^9{?pC_0z?+S`MTO#oSu#g?z~@xg`w=dcTjiogxa3-%?Pc%14c{e8)$(Zg<2!x=COq_2RKEBR6nwe`9@A&4W_Jso zj|S&7x3NAe3$5!Yriql9EoVNrT1w^#!hvSwohYgeQx9&IFB9F%fME#dYksZ zka)lX5(y)^EqH6_13R_5H;A4$I9juk0I?T?I2z3J3DE(#Fvdygq}Nwkf`PgckQdvtT(@BZ}())Ew-F~M@* zwr=amb9s1W{T>x`^$N-ZjTF|ov@xg9BC!_MPVZdy#S@@4t|eI?Z6-kI+)J0JK#wo~ z>@WD_l@GA;OILR526@Nv<>;8gvpFbZKx1>Lzk2WS^7DbP(nX>}K-BXD%H?fHy%VvM zOXdQoOS!^W7w}WI$$hhmEM#GvkKz76GK?kIq_*MpfUv*Ia|;#gJSZ;QCU7o+Lq&3@ zk(%aiWao~fTua)~R7h~x8J8@C8+D?kv*fw&J&l6QJg&Q8&E41I*sen@?~Qx^yA4w* zAtcvAJOkPWMg*fUD<6>w(eUzaeYS}=q*JDrqZJzK>(%n`dPtXUj~*~Ix{bQI5;%7a z3x^YRy$y5n;`buAHF^-#;C07~QSB0KbJ^}}_BE1XjH_eai^h6-vb|ybV#^0FTc*Hs z7v7JLKle8zEM0-_z;jn>tW%4r#cz5+dq8_Yd!W4sBminabTR{+rnKMHqjdp9LlZ0k zE3FR@S)BglE6;%tTlq3DWnj!i6rMX~U0lzXRlaS3-ku*k{3)wSa#bZHXlPKpWwc*@ z#%WZSmr$B-He(W5zQ+f;rsJkB;Ctjm?EmB)oZ7b)`!~L=rhu?$gmHUVToOWJ5-AmF zc}H}1b>}xl&t=L6y1s*%U!PD@kxw~SbOz}q=TJ*sb6-z?#0QQ;xH+OtU0tYgQ{U84 zYNbu09kx;2_urN%!E7Q5DwT(?2R@7738%b+eeUn8( z!C2RG?Sawif#&Zo^(|)Y0|g#&Lwk86i*`8~dIWR>DLo?WUop{U7TE}4q>EmBQY zOaIx04LegXB{2g21UW0IHJy@{gS4zd<)7-z1j4X89smgLa?WJp+_BTt(NBg4gNK$9 z!xc1T8@z(#ngwuzPk;wU1g2BzU0~xGnnsEFKyZbwmje#9Rr_BFGqdaX_zUe zVVY&pXbrjNJ`{R-DcP35o4>?>n!$W9gd_B!hfz+3waB&@@*Bao-9ZT{XKGQGBjAQF z+A79ww@JM{&^q#oFYGe`CyKRp#iIq5j^ z*+;5acP}3XoDYYOe-KGGUT~AR0_^aXxB*R!AK=7**OM1rLb*Z5lx3Ahwj#AX^mJ)W z3En^aHj1l@sX@*FRn2xNb00!XVYp`c4JfZI!;Z9VIGS@<0qYGCaN$BLxN98K)48Xs z7bb^Wh9>m%7@bagKzl%YpflMc=Vp-DkZ&u>`)7JPZ>+UOFvy9La{up|QB};@KVKb&Rx* zG8e7<8Zv+1*>n(pdgD_(aKkcu^Nx%0F&XIlj;G^e0_fH@d0aRyTRd3>7q#9#5|%6Y zyAZAOa0al{*0~1~XHLYp=}FW=M0nZT8^-IZxH>*pQDu0nf#X$wGhOhtDWkm2K~A z#5MPS5$>MUs%IyP0Ow-)8953#w=C!4@TT1ejSAyXTN5fcxE24>Z-(zoF21ezeP>@k zgg*FChkE-gTnw44&56Kq$h}{--Zbi#rE{U~+*!nm<~ch^^R{h;&+J)n4+tD}zR(RG zy&h;!dU8Zh)HCp?J(;`*2*^DcCZPOam~U7dvz2LdnFQ+;Glf`Uun8cYc4#LF6&{FA z9vViyJK5y|fXh_J0J?c+Pa-ERg=CAfC@suE?%5M+>g>!2gFPs6aITBa${fh;y#mig z4&^VOJ!7-{s`@INB^&*3+n!b6d3<03Zk>AMo>P=H@YWD zbsFsf?E&q977qwW$N$vkZUtv$rxBb}^vT;>fZ0RIvq&({GJ?3mI&W_R9+X$rE6sAz zRCguF6h-E?!A379n4U%8nMEU#!7+{UB%e|;(JMvIn_#tnGsxUN|DjlyHk2{gr!GxAxjcS-%d}AMv#B3N^>$j4_o%0Qu!J6k(atJT{gJA=|)+DIbSj(L0yOMe|i{y ze0>YaY1% zEW^t`{WG;WBgv7KLfyXq25@cirJf)JhGmz+#KmG+S z?c|hto3~D{J>V!FaG`96tEY(p&Mgvvj#6)6o+U;)p( zlLz2ME$xBSz&=kUhT?O_Ng_y8`q~cR;2u^|-q&g?iWQ4HD0Y%MD&zP;70`3SoaKWi z+0s?@+{i8$(B2xj7eKwLw21X$9Lm~iG&YblMfDJ2n+x#n$iTVyqF3Pg*3?a1`A@OS zD{9NJa>6B;!62T7txmcW?|=R_1YqxrtxmqIB<$-7SYy(9k>AEJR{-FcLV8bgLs+`_j5yN}8s9Tk{FOx(9(KELOMBC=3*X+Rqbp*e~ z+zEUe7Dqkvu=q$aeligeABCvo_%^_~q^sv~m)~|ZG6;sB*nbrJH|@gy&ATDjR(lg& z^SpCeXyP>!l^BDATlezArw$QZ_A)cTb6$y(X)&Z)wVzf~tynQ42n8>t^hH>}y!~^dI2F7zfrS;JVn@cH6Df=wr#G9$0$sPX?^sRmFKY zvi?Pw$snJ7-NOTxZ|?DZNZI%b;^tn0_<5HOSibgbeu>hW z76!0!hw>y-b8Zn3=^Q-m%%dq87xy|UBpD+JCC4wDK)$WAdMsw-5NL- z=5;3JO_&Vf3@B>IyBK*6^<090ig{Xr`$io^f<@|UV#k0xwcZUA;5;L{O!-2VD{5<$ z??moKWJzSICn$d;H5)UNqVdodS3nr(2U0STA%P6ZZWRkAdr|^v?m3c<&km*{GoQ>f zT5?<{186cR9beKy*tsx!QZ$}^c_Y?sJ&dp*GbS|4yr{$u`e*4Z22|mwgEuzp!(aZr z6*sP)&348(Xlr_2?ZT2u96MvtRJL`LAAwx!5gNAAjK*zoQ`p(mtVP|I_6rl;(8un} z@%kA1@PIKwz*>Sw&g-2|JdBJZr5#g z%x#laRz$ZvW&1&lpO%b>330gp$;VKX%|M|9&qdnj0y{w6DFbAfJOk>hh0#tqGppx> zv0u-%2L!-vs;pE&KS!%K8Y?PLdH66)vu9(hTR%pXHIEu64dB)lYk+R_x)RbFd0NA@zoZUPz ze(BW+W)RHEqFnd{hTD6URsH0-FP#)kjji9n15xp&MoZ_OXkibeHPu^2R5v!y2Qu&B z&a1p2Lsh}a#f_vFZ%X%NAUpLagSy8eBsyWZt2b)O?=g5@URMqQ#(QGLRUL0+iNWJ> z%beS>;lz5SmtN1Hqn(*(oQG1X<>I8vG;rRTlk|3*9>^`thOO&Q0^5q6%t_(s(YBYp zyNf$w8SvH^3T-oEz0qMk;7*`YWKwGC8asgJfBpBTWKg$`hzTb^JCOk%ljFk>P5BG~ z!Nml4ub1KJ1inSaL?&z2xG>7IDbrHd(4_W>go^>H%kjc)&&;n-`S?+)#Z2IRxXsWR zcWKsWx2}(X=VE4W3)pS!%Pp+Hfs=e#GjLr-9^+n+O)Mr}l1X&k=MV^9vwWJ8C=rR8 zqLLa!2Ki%Fay0xXiDOs|J0pBNJg|283=XX$qmt(-h@kq}%o5b{TuDGu>Ff}Y{V#8A z#Xr~W#3Nr?36XknbelIN4nFKikT$)teh>XwiYu2+Z5h4b=XtPKjtU*( zl1F}VB99^gfoG3BZ%jY_c!6-<~<0G2vLDP&!(JK4l)J!qP(b7jW`e) z7sI@LO-hzUztyuLYLXhG^pG?bSiaOP|T&FLs@ zC!m+*4Yqx5dR=?KaXlcK=f^(&7pAZIGUng%ZDb$Yg^WX=GA4*6Shl^jOeAI^CNDtL zw55DbyOS0D8bZf+dh9vPmL zZBfK7>fkiSnGfNfJ%*jfQr^IZh5JV0=GnL7c-}EiM-|9j7^M7Zm`s zqNctLXkEqO=mMOZs0TZf5+Mtx#Msm~mzEoVd3!I2WQhb9Ehb1TW+LDl8sJ4RqZJ4g z;5;M095n>kr7j^1&S?*3z<0Lp>h;Y5Jl$Md@8!AFPyXW@o5?!20P8-UZkRS9Qgt>2 zn7tH>Iw2|;b0@|jJ*!j!;L=XHw+oBh8aS7a$buqEr-eTObyKtUy8!9}oO_TBZ;TQU zS(B@lO~t#L58*j#s6Twu3Igq|Y%TvSiE=(GzIeGs$DYL*!Cg!8i?Hdn4^;(9@(SS}+8Rty zman?+1`D8mG!=U{Y=@VRH&$JLDeCw<^kpZffa9C~y&iRp!Lah?D|yX@qmqpE21=T_ zdUB){&+mU4EFyG@m`;L;^nbj|8PXt2(5BzQpd&YjI>jG^Yaw!V9ya?frwm6gJK z`9)|fE5msP20roP-{Bb=hPglanUdk^d+z#3?E%N~Kv~Wi?0x+yB(MA;BBv};Q(5j= z^ybwrrLG&dkmhNJT&IgIm!>^l@@SafOl{D!f7J&U|&2+HvKCf?NJ*0PyX{` zd|~-?%qKHl+sN~PFn?FvczM4XChX#vi3v!aJEdRw7zYcpUF14cFi~Tpp4*)V7Oo$jiuq6Q6m>Gbh5$+XEl`?M=*|yJ6ws})a6HM*oYFc*ag%Rxm0m%2h z`8xyybJ4FJRs-TyYVKD_q zQ^PwJp61q0>+<{zWFGzuxhM82>9c_@-%<0EZ$4!s@E$c7=S1G=MC#@|<#YU_Cg8KT zsrk?M_Zb&mt={MQNo4 zJP#+J9YBCr^v|;xBvZ7=O$?$bTHr1O#YNv-0*8vsNi{+6Dl)IDYc2Ock#z|qbK2kJ zp@z;|d#a91^9*X7i{y(i&Le_+T4Z3X(+$_Mo15j{B24f0Ci|qbK>+S52KW5ao1dz7 zU%6xoCdEgfq_hSVRThw3j&&hW?!yl`59;2!yE{`Sol&)|*!M>M^0Jgid7D!DS|nb= zLs~l%BnW8@{pv|y3FzM5u3I&en(`GmaylF9cO1i|3n#Tt;*jfJ4Aywh+Qry$C=Ks1 zkmws9@5A9!nOM7Gx{|SR$VN3+T4a4v)ALmaleIjOBe!X|>wfsty$tG@-EZ(b?eGcg zShpGJ$Iqx2@F;3rUwY?S1Vw7emEk_ew#s+meL{kUN&r~H1O%7on=3~l)RQS*U&GOx zWR73^)m!oTXZsO5X&jR0Ou>}Jv-rW)h`pceRKU3$f9BX}oI7zwb(|dDwjcNZ<}u2( zcq(ZaBY){sDao`j`1~S+TeS0~<7V5tpQy5@E}d;8*K_TGHXdk@z?OXGyZHIH**@A< z1U&bd(zEYkTfV8w$UsfXF}Tm2j|&7)a*iKE9ogq|p7<%uiJj}2yGKUa%1&?79vD#` z5T?0IZeC^tk?9>6n+)IZcxsDJRg=+J0Um40DK}XuBdIF*%&g?ZZ?xKKrRRNt+S{VP zEt5#Yj3wFAdUA1RfOYCjyL$Mr!X4EpI|4q&kCRvb&p5|FbrI}JtWQc!UE^I>Pf z8|}BhxdlA$sj7wj)qiMfRGy!%-npFEfm)KEh0}+3!W0mK#F>kSwj#PT^a`1s6FVO# z^N*vTQa*+|vK0mBU7d7kpMh^T0_l;yQAjN~)#p6y@tig^HQ+!_&-6*hjv0&D5td-9 zyUy;w;j9BaYSN5I2Azws4BOa~#okS$N3WZsXNAvf!&>hqx!vz}=1s-q*^z}|F!Oyp zOaw>UKIrVeQM!Wt_JA+JbCFh&N!Z~Y1a}SQxi0~OU_USTP;#Y!pl}rdW0_N7xYKe4 zU>DtT2^wk(BrdJ2MK0OZ@-y^f8UgI>P1j9W-va(uRMjb273=z$SZ4vVQ_^zrpKXT~ z<6NXumd~<$s0Gqr^<Y}qA8ER@_q?a?uY9Y%LKs}6l z@wUv_*-0_ZpJD*fk9VdZD#QX6$OOpE6Th#zkcDrSUe*E7tKv# z=S#=V1V?D(rrjs-@vf9MRkihdX_G*&bnk&4KHqI+BwE)W{o~&_w(Bq^Etrn$9=%OT z^6mY28(#X!pK$k2AK?pm$7iUmy!5uA^ML4(O90TNSFgtYExX`D*_H)YE>{)s=J_hF z&Lz;DkAUbfeoReNlTkYllVxOGGSByc&AZk6p~$gFCyZDVM^cp*Fragw9~OV%A`UOF zKy^hGX04c~7)}0BAua53k#CtWH5ox6K?tC>eMnr-Mn@WT(eu&c0g-HR@*!z}y7=1q z?t!um>oJz>^0A)YsFxt^dv+r6mfI1rQp+Xv-K)JjvS=M+l{Cj5ks|}Hyb5ESdj+BN zu0Wl-*B+>=DkH;ttC}P}h<9KFOd-(_eQ0k681?iGg13LDntGjiCkHgv)j*i#RSWYzF+J~!n>Fiv7%q-p-T4I^ zzir;#*mQCOP8FPB06j)gKi{O7ydEwdSQNh$<3p1A)r|PSM1tohFh6c#zw)uCgQW+v zrm=G%&bqDwo*PzEUxkut2HWX@iTsUW)mF>Kw$A2KH*MMjBi#ccQ4&f3JCjWC%z|>n zgql&-%se;ySVkQPYdSi_MS;SV1kfc2sIbii_!Yp<*?Ei#YU#;zh8}l&zloen4z;`m zfEPG9jQZ!k?SgK$KW4br1voePxG5HR89Nnx$(|RJ_KL1~VOcqW^UvvzM#b`8M(Gs8 z1S}VJd1oehi0?$MrkYyw0?;=#S?)DvmKhx2t$=blzJ#LzgpDr#im(FIZRKsItiYXD zEx;csA(N3?GNQn9X|DuK6&-Yur}@v;&#`6SDeO3$hDFm8u!NIwt=jU9J#I4s%Fdk} z%h4IqfdtH$5M_^59TjyFfb`X0{*3{Q&*S#*KZuCLn3i_OO&O2pfAD*3d2>Ck`tpq} z+x3Edc)*n%Xu>`Z3JbzbkKF^2Y%xq2<6Z$~e!!QZAT1LfWS`466-1OBlJdiodvfIX zSlspFhn4J0baEWl{rL^DmrhVGGZ8-2KF?3hKqIAL7GAbgWi3HNeS-X2KF(?@s}&Pn zH2b3{`{EO*J6Vi#pyzEpAj8QUN=x7#sqgn~J=43j^90K!P^jzF8S22aPd6cG-h52_ z@&mogNT=?X2da-9#f64eK-B-T%Qbl3FOLrH!E*npAgC>qwK=(Pq&dMyS8uaokh^&B zImwadEevyZh{&>Np;+ zjd`&P$UrZEu&TY>yb$aY(kF?L_Qi|~O2n2^8yQ@$TCv*&Zt-;WBtx&I7_M7l{=K@Od_;j1n#uc^_XZ5JK`0VBDWmZ%lN#KKUw zXY=>}L*#f#tTG2Qc2^ZNP!43_P%IVHx!UOQc{cAey!07IHd*3h5 z*twHpF)c9?pB+fWrhO-{Y0n94*?SUG6C*H(K=k1X4IK~#xHhdna z;&1zvU;LG_9wuD>wL9S6F0iKroLq3#3cUa4*Ko=0S2IAVV~%zhJ`WFm$)=D14o}af zmTMtgDW4_U=7yQM&zXUgEAop`T~dy6c9i5(kK9b~TY`dGo7v=pvaCWZxpD;-5h#~@ zYWYDXCLp~#JGwl~J}p5=6%bKSLS|i=N@EHQAbZcOmh~U)0b3rBpq6|gfQ$d=mTWP6 zBflYG!}iw7w{FGQfIy5T$X9eG4d)5&&Hmv}dX$&En~7PED(r3fI=%LQy?LNNG9OJ1 zbxKBMxC4sXn@_u{k8%GH@chU}FLsr*Py4C~q7O#CV@4QmSVxJW+L{`q9zQ{;RCm6w z-`XdC>x265fnI~>Mv-1_-UQ9tzK|RH`ZM=to)Yx&`JI1j)(0NLAau?cADo1_G4r+L zMt_{EkJTO+~(;LJ*mYfqvc$3>rd~et=?!SCK(pUDL|E>8weThsd(XB*9ue zs9`RkxNJA9ihAm~WSc7q7w54GIPZ$dZX9RK-I(u4H)-R_c}Z9_V;puLIjey5gC{c~ zlaH4>rcknGVtg19qNqhoUu)AI=*$D6jV*y9UEEx8!`JVGTPFb?6Q(DtEKVOff#i8p z)w1Y!?)u4{aJI;DQko(Ty(Nv_jq4`>f)4|MQ=F!2)>U4@E*%ntkd zvq3cHOEOO>9eMw#){)Kq$70I{aKZUz<4kAw4aPwQ%BhpjfKFtldvae_n#{s57de+Ig2M7GACZM^$*`0w&IsDHG zB({?#cUp}a%Cj6gl|hi(1y?PZPH7txBE!w>uxtfn+UyXrKM;H08^F5tPCnOgM0fnHtuTqPXEf8^XOrDTripPypz&3%+~`TSHCwo}eU zCh>bD_1|N|0uUBtMreSE*P_>u>!fEFlw!lK<7(uAak_E}ElgEoUC)Qd0~=m`ALWH5 zxcg_1bPAkH&0GndCe3DG`1dy?=q=mWJr$c)b4P^H_1irHY|GJT1|s znnL`Qv`QVHUxjH|5Iu5{XE98`Z;|B@;9N{PPRc34HGbX{BT2V&VXW3s*6ifoBUtzM zH&yzSy`SU3-+qfAduv(UW~390N)O2O(@;_j*N~7=+5Yy8R$AHgi2xwIv6|y40D}~Y zws!Wu-I)5=ciqkp7<>=J z%)Ds83n|Gut-y29p`U!oZ3CYFNXoDz`!upMGEq}qgP2LPFk!YuT5lw^(UIC>H{iKw zr~hlu3&^X;B`jk-;NRj-6`+(sO5Zv3Ca#}(6I>V=Qk(XG-FiS!L~VBCft-Req@2m& zl#8?3MM`c&DkrR6(f$_ZxJ+U$1dd(EhPHsgW0Vw$NQ?+j-o&}Jc@b2WprHc1cLf?- zA7f9;0=`P%&sB315l-M$nB`|POArylDDusKxpjRdpr=f|H37?u$@Vuub77eWQxD$L zLjy5A^`%^=6JkQt)Udh{}zu6GPja;{UFPb7&aq%FEzOo%66IL-Q;j+E#X5+6CT+>T_G~ zLwsSk3-DZIi7IyQYN@=l$prJ#Wi9J^L3==ZpgRvVe^24Bd?)|_KmbWZK~(ddMqAe& zNISR<#RUa$=V-2_*WTN8V!e0JJzzKBd1fgi6xWs81kSC!P*9nV(*>uH6gts*vtD*k z54etXMO~8xw6y;w^!x_zI#ze?*u(kUuqy>S4y93ArKY7i!pu#M3*{7s*oXiHo=bpF zVUmjsi-5@uoVPb&mwOWQ6}GxC)CD;2ilyHEczatH7X4OgY75A^gv@3y0@)I{Q#8oW zWtA#ccz~a0pS8usj}?{dl%Nj0EQ`6hIw8Q%i?Sn*+1X$(=g@`v)N=jGBql*wGxCZV zthO9^oJz~9+g$TQQSSv)XmfMJbo|R+3+YGoPPA3`Z1rGVVtWV}M9NP^CY{#O^*`n0fIcocjDEiYeJM@6o2shej54bw_P2W1wKbp*Ai_}DS<+~6#_jI;sYj>Ac?;l(b>`zHY zHKj>J8`=QO{Y)LtjW>Z~nGBPlmjVKEq^)A03kzKsv?Iw5pUOlVcR&2qV6*#&7r~=S${N`OY)98(Z>Mmnn$Ax#*cU@EItNFZoz)l1|OyQoQivKcb%7MeF<;YN88( zK9-$wqM7bVU3Jk)7yI(BzQJ^LaP!ckM+WDt+JCN$eN|9gLAP~q2r?uDcL?smVUPrO zLU2!T9boW50|9~)+=IIg?(P|!!QI_mF5mapeZBRct~yU$U8hd#UcJ{^ghV$Tm6*t* z98>T$FN3lwIhTfkCR;5v>VJ^ZC>tf08Ss8RA^k<+kRLa$T?|MA?Tezuwl?*DcyCvsLE7Xwh{k7U z>a7EaeE5;i?x#3^*p2ks^NH|lebUCspH?WgLr3~j=>$gp0F1UngZ<6n=<!JPU>FbrnZpnP~^A zqhq42yVoT@?~PL_+%FR}_VsJvr*+A*);XL*4}X<^#0NZa1ihNk3R3H%vCW-}Ce4if(Y!3nk8Lz|S7kO(V|B$wJ+Ze`Or zQ_Mim6a?$eTy=H0i%V21%qTj5RQw;#7u{kgsPSv(u{Eh`G$@F z+pA?)Z6c4o#%sT8@ljxO^#P}FMM|_l4jXh)j;JT#O&V$K+us=dPN+CfyLa_#$_e*@ zf=>r&eCN{_>Y3rrmZagZ??lL1#bn(V^2^%!)&G)1-TNUEJ{?E!q(u_h-kTT zzZ90#;!eM9*d&R9PhR-wB1$eo75>t3ii_er+G;D;kNi*XHR8+Bam|BDx;^i*Fcv5` z{eI7uZC9c6-^5ho1`*uK>l8i>hy2L4D?SDQK;Z7SV-EhlM)Q3$XT(DTvb$2|vQw#z zEF)sOL+zj9Pkc?ts5GnlwSCNepExXHiIuUgt&%WhG}0<6K;7=I>GLuj8b-#3I}xVE ziU*z%E^fixNc%z__3xMU5fODK4u1LHh%cs1qf_u}aE)E4P+HU8aG&N~amcKMIEG}p zF`2|)dLw(kSR0GUG^@HYIG&NDE#8+GPyM=3I>iVLbY^`W@(##=8XB5z{e=cU_gl|f z>WVJr;<^Z-M%5i_uK{FI$R)kZX-Da97WpyDs^U1ejP|PXhHo**`J>lZ+v9fE=)glaFAxmS5BG1 zjec69rY$N8zEkj~U%#(d;~;iO^sXZo2|Ki*DN3hISeb5tbO$@+uX#moad7Lf6jr8 zsbRw9NiC?ON4MYtm=noCIH|=_*WZ}PPSx_lvW^(r>mVUNj9K9id`z})s}-OQRA7$h zstu0f?w+!!&%og9HgBd4;2cICFCA{?Jtx(E3X{!(Y~v-Jinrn`Mr%3aw$yU+@tQ&y z?(zd|@j!n2CXqcAQ?FCG*;-l7qjvtOum53n`_p;QmV)2VLPZK-$H%fb>eeRsvmt1^ zb^~M|5jH-}JDKXO!Fy$CY1;*QY0Tme7F3*<3&(I6{258P+CBFsX4Ud<+{7bGV$3hH z@Hm6+mq@OMb*Vz;67><_xGtPzuC@IObCDK%T>yeHz;15x0qgJ)UCWK%YppZY_^qW` z_`FBFjMO902kJ?y>1I~#>Rtm*L%%sZI>agDKnjqangTc`#3Wq zR`wNE2?TipGy9wru%Le;NLB0X7G$0Btiw3BkSbbnx)kAj?i3C;*LgVi%)HZDT7N%s zW=i_0x znm)#ge~tokoNeqps;uu~q6}3#9c5BTfp-f@UUPOmePrONoF%^{mvh$ViPKI7Jj(MwkJ0?xh>bC01)?8q8#r?eA)&MZX+R(D-S4%JkDTnyNUZ2r8=Xrnp&&qb zyt3=E2|Mmb`+?!wlcDQ8*A^LGUZgs2!NtMJb72vqZ%E0~X=jW48rVw@*?z-6l+4Vm zNvxi2e>|}BoIWW8>`1s;8;E%0e)0)Q$yMFMEoI~QA%65_Dh@xpyf_@8mPJrQ)w3Ki zyVJX^3SlReM30^#kPF$pUkLn1Py?Vp5bb((859-_Skw+GNyMPYf8&0O+BHem!{hye zL;PuH_UmV#L*tN?HZ1DjWljpN>aE|)LMzXDixtj;YbgRW+`oCVncxqAY%Ar77QOmS zwKjDuQ3LdpE1VKZ(>dMD{+Pu!>+G2f2PeOBKwrr)TL_7mkGtjIk>$a0mwacN+W(}t zwhVTNsYo~cr^ai6Ty`XJ5w5E1{fN?-beZ@srrUwi1tj!hHDx@)`QJ)@23(`qXc)4` zJ9`!MujgQ!|H4fF4{Z9s-&pZzJn7&Ed_g`0`hMy#KHA zD!tReZl)Jhd={VsOHkpAY^Z(a{*E4?oK1}g^}4^AB$QQm+e2IV=Rv)2Uj;zfr@!(F5v(Nr5~ z^QGR<8%gD5p9y$T1MHN}<4S3{e}4 zb5L}wx0344tlJ~$QPE>#d{=uQ1Ea_OmD7iU%pN?k2YmMC^_$^6y+{5)FR!oTc;r}B8-f4j{8Ldb6j>GqrXY%v1{kO})>3Ev!k@iZ6G*VlhX zMifXqX>NQUXXW5}d>!AqgM#fegs)YucC*Ju>Gf%VZtoCn80kDsB<)XjehqjBm*V}; zqQSu=F0;3b`UYqv+VLk7weZ4&9jMz1@tBh#JrjLv@2a&%Iri2tjA(!g~X09Uqir8?h8!U<89?4I+=~ef?s|H zoUS#4oKqjK>X$pCeC`#1&*`A`Fu2jA-Mzo;*imW&@2ViRAV#VyXixQc8#~ou#qOr? z=-tnMqtpd?Ccgq;++Y{+YV~m zHy{hyNs$~mRE&V{K{C+Fs)sSyy07C=Nq#I5axrq}H= zTC~lszwk+aGiCbZ>U^uO_hAdOw|WvTK8@riLaB zgK*eQRaQHo401y5b+TY2^ZoPZ;B8DQ@HAn0C&1?sjJQuwC+&N{Oiny8Q(WA(1Qk}F zunyQ{A+JNSlKS<_{js@)q5Uc0pY6ta|C&vdH=0c_k@^U1=w~yMm$>!IT#K!c?IR4( z9u`|fEFVtB%HUr3c z`>Y9dy3OcO=ChMHK|o-pyzlFFzbb><60}+0$+tXrmGIrnZS>h1Dj<1h*7dtaP6?Ue z#WkH&_1`MjLM8C}qr&>dd>%gJpQct;T< z@fsS@EiAX9`F6J=r?V_z%6x3|3N|(8%qh43IODnyov7CN%Kme>4J(V1CM0r+D=lbW# zZ3vljG~8WK_d?gyJnN&##9hhHCD*IpFm&bfsxxDGelBQVGejJmp%?cx_oPojXYY>; zlhAfgy0V{|p3W+-Q?@HvtE9{M);ZVPh&?u8AZquCyeqzN*Rr&68?(5_#=e2Thz9AU zI9|(`60};Nl2220hdLH}&(FidXyoqh*)ceXA@xg2z;%;x{nm+c&I_8DWnfiQ@HN`V zkLucVP!tdq$=Y5##2ij6jGB>=5$zR4cAxfM*gRZ;4BjWKRspuMh~Z%(_Bl*H8j+vv zbX=cxyj9I-aQs4}UzQ{!A`<+^P3vypIpbDb2-Jf{cW8TdaUo^vMK;oAE~vkNe$xT+ zy#ha8!^O19TDIzhtA0%P;&KXzaS--LQgjY;JR^bHuY~NIFER*Dy#kwal`e86zHd!< zw77ks5uzsmh(0pDdQS-c>?CCfM3G-Tn@)WB4S&9xhz{7XW410}nfGlK;}Uy%j(SJ( zeu%Kmw&fwyt?Fde2YCGiqNNu?Ezv67_YO)Y`>J7(=XVjkic7ZaSV^bh?JeSSGhCof zuFJRo>-$y5r0-)oA`Iyf$>GJ|=KJ`g)G6-+4KG_fd;k&+!+r%!B-RiRV1(s50qYZL zBPl{CMy)#(y~xTt%_o?kmPi*$P)F!Nd3TdiIPiZ2-< zyMAS3g&1ifo3@{Y?-}MuI#;XM+^z}QERxvEu^TP{S-4`g@ygw<#G!~Y(Te+ee@H}V zSMP8Fwu6-_+*J#{B91Sh{VH=kJ^JqAE-+UZ0Vb8Qb>e$3(!R5#!Tlbu?|Wy9p!;Ul zgI>z?v);lk5187jP=sqs-xGC$`_nso;z75n0h}DAExqQ1+c1VRq+k5Fr7w@#Z~Jj8otSnSjRYRyEkI4xM;cD#(@;YprTRR)bj!~N!8ycR}_;1T;bJ%Socw#}f zCjBZMrsti3J%M};KQ-#Sb<>0)^b`#@>{8{kU(&3X&?vue@|tIptJ z595D<;Cts!wq!gu+s=#L%}21FkdQiotcdQbe1uE!s7~eK8|{UU&A6W#1Pl zMc*@QU-RGcGaeJe)G@{&UQ?VV%ftB z@LcU0_igN}YWM)xwJ~%XCM^(*ioxie@}>eP;JJ=g^pN>{qVxC|EY^$Hevh&Ia??Bu z1xHy=z}qjdz0wsYPJE6QhO~HjdD)ckSM{$&ZUG^ z4xXP`5{#1+8I+lD*$G$?yYqG5Csg@Du^;qXzk|R1hRy3LHHs)zdRl)eN;$WXPbG;; zyhy9yi12!`k=truXedz3KO*o9k1=m;hTfZRZcK-m*qY~cngYAJbcxgL-s7Bw4p=@u z@2wOjb|f14k}4z`O{oQ(;r*#}-*Z~@7)^ZbK9L-uP+eD7X!Pe9YiVgA8YaPaQXLh- zaB@A%qM1}am58@?E>3g621-)j^PGNshqlLTjoV@H zi!GQyOm{O?}{;z z@xT)PMXO6JzW0coZ(&Xqx{#fdz3_-Lee(N;tx8S~PneUfqV3$;&_r3J+)K_1L(P4Z z=Prp;^!uZqPt8@sds>0DU`pRy-J9BAicTcAdY?yzGY{}qp4RMRVJz6|e#B~%kbu!6 z(cO@;Zqt_;qh+{tXtMhYmVnnKI7x7xntJ(M*{VT9nj%fa4HeI$_4clg4z;G89ts9W z_Z`^K2$MWLJw~v4@+3Z(F=wLDF!6{|$8>`wJe|{&d3Y#p*J4$e9nRLx6wc1MM{8tj zJ~Gg+|JfQoP0xHH8wI`qDIU&LDz{cZwTo^-hO{06-p8!;k(5zy+Gh@mBGcUvt$$BX zP1h}^z8)J_mmlrTFgfvO8n%S4RdZt`kjqDqW8Pz^>ZDeBkLDJXI6ZlLm$^|VbZE;O z8y>;*LemrbNzRfU5RT(x%QXIyMI4KxfQ;=UDG^CpWI60XoK=xKS> zs|f4PVB`A}4YiPTcv#rI%-6RCCo+LquG9N7RbyE>jGBl}TAu%~GS}UEzg%cU_6@5a zoXd2aioL8cv8IXDn=MdgJhAvlG7T9$-lDlbw7TpI%-tP^y zFruik(6UzrpRQP_wKj*Lu~5CF*4WCh1NRL{LtYSJ9#*>pRXjeIoW(}XLCd|PTf$S> zb__yJBR!7BXy+6Y?fkr#fvUU=bznMCH+2n0E#2O7wJP9ri9vd3T(K;Ja(ccE9He?~ zDmN|aez)A}X5@WBFTX2UrDM=JUz&+B>YbI-*@QJ(H_bati%kLTHg($m_hYt`mZpN{ z*IRAV-xgClFV9ePZu?cw-V51OmEq~VRy}&fXg_Psl0!2do5k_p;NN#sfy!qhE0EeMbLVLhY6ff#s|8$otE@+U%|(qZ4rTa2YuSgs=Rgr0s$h+B}bYNEPd5=k_(wVNxi+OqXuCc8g z=w?;3z0L=pqAR(lhw45cJ^9f@Ny+)8@@ky8SyW`;mtIVk@UFA4@&kW~8gMEm_I9vk z3-X!ISXldpYid*7kzy_B)Y@-FJO_($r)jIH8f9Ags|=>}-OilgiLY?l;`Hm|iP*OU zY_exd!w{Hb=njkgOWO~-Ijp@xEXeH2dg^UcyiE}#*!}~(zIJgBzt{;ZtM$$jtE%Q# z=iE1oh5&O$si=49x_p-Z()9;5mh^8ZY4kz<{~W|fMmR_@w_(){l+#5kA@?|B){bIb zocA8DB6lHKNNkmFN%_c>GNFFrbs1uA3A(&ClKdcTc`nnzg!Deeq%Qc`3Ar?{{S^9D zj82{>8|M0=`|?@me079(IwM0AWgZ_}3hn^d&M`?mQ^1A5Y&2N`Eje-YE&z#St3yt2 z%bM4!Ckcaq9JjT}B^!U3pah4Roa3WVqc(FMO01lZ8NbdKldI6Y&_W6x+c*?Qdsc3< z;;F#Q@drX*S$-MDb{b(hPx-(FV`>NP}4fpzMgF?OFa_eTD2ZBR77nxrhl(JXE z8wF}G+x#1U>9W*ldehG%46^BCWLg!QLm6Rw6^noQ?bw%&8@>)l+Gz(r6ncoE&wqZz zveRyE6I9gn!SR;?zDG|0S~;Z>>$LZ{?M+4`l1E8HF&w-VQ{FltI@-4j*&7gW1y?IeA{Iw7ZD&~?W*-~i3HUorIy2FD zu`}{etW_+%5fo9?P>||IqPZ#u1TV90IiEfU?D_m&Y6Nd)TbUGSJ7W?58QkZPe6!pT zUGs*Cv(!R`+tI!hF;_FeF%3${F_B=>HBt1OYGZ#g6sl7#E~O|Euxp(2HMp#GH6w^BpszPjKC*vq(^cJRKCT+4A3_l`EYlk(h=GEI?!FJ1pmg**ut)A zx=-8UhQCP!Kcz3!#x`M7tlK?ww_4wKN%0sTYF{);7W!98EXYO-HV^ zE<$?HQ)*3%dfm|Hap(V3LHkNtT<-ksl0$bQ0{o}adF<#lD)hg4*e!G+kPJ{Q@Y~NR z;}0jUSw#HCiX!drNz~thh5gwoVMZg0o*t7E>pgF}AYjj$z%QB1T3TSl_vI%ogJdGe zW42hpBa4x+W8=DD8O`FoZwXDiM1+5BfSv+I@NB_zc^|p(dcth7>Ty8F29WEo(%Tp$ zGi7-~c#f=8u(BiKsKQ_he(60^*~Fj=CMEvlX#HAyq%1I%?QErbd?VFLU1>7+S29;d zWE~uk=b{m5y=BQqhj25q(UfHRC{nr8NBDLsIku3>NkW6U#Ylz&56iz*!kwr z=SliVt|**S)=aE957DxdmEAt6*k@rO2?93Wr3bq_9jGnB=b3Gb;oA6mm2RG6+F-1| zNrz(q!s|g88{=~P_Uq~-T&{@1T0RrwQ_yy;`pBuG^nOTgYdU|EmosPC zQsrQ29_1%f^!Xuk%ZtZEzr8tJkwTzjphaQJPPwX1F~%tzJ&jKnJpTe9~s%~KAhB0=<2f!_+0 z{w=p)#}^#NRmECPt;K8$c=j@EW-A|1CfwpWz3#7s&utwU+i-kQlrVy98oE&3OXr%S z#mLZY_S|*eSG&fyuN*3>&z8wX*u;C0Yd$GQM3c_A=P=<=mg%3_n2e7b; z*{W54ca0~0CC-u)nLnj{VU7ssbsO}osijt z)+%ABg%Fk`P2sHYj2q=Sg+;%N^+=0F4%z zsJ{pIzvf*JvukE3s}qOW6OWwFZ@H}d;9HTcZ06$<|}HWdonoD%ZpvME3WVy?YqOZ$mLjN^YPwq@LayH^R+NE5NGW@HB?%hv@2x2=ObUG? z&M9U_+iW`GX2tOZNDDV#nJJVeE0vMthiQqVBPq?6XFS@?4KDn<(R92T4!rxepgW`J zcw*1Px>Rgcm|1Na%<6pfF)oxe(SJsXf$hQJBtsM--={pT zhPqKVUBzexwau=Ia&8_s>}%R6DE`!7(Qd+ASRzgMIAu_R9l)XI19#do`Zve9(*C@${`&qh)+I%pG~O62*z|v1KZ;TyJ!TPe6&{Q<61j z%yWv2y`8gTmVBDZajA~RL_-y;%%L}&m?Xj8?c{5-sCUulT5Hz5cZmcQI>XD&@IvJU z*HVwiv~20$Uvlm*so{7G>dywV!fmo`Uh>b~(Ghpcgj`+h$Ef&RE0UrXvdkEbg%qR)>eb5RLM@H~BvyV6N=?ohJsxw} zQbnp6z{f+Z64=&!*+LvaoyZYr?DmYubKcpf?^l*CK1*TCc#-q&tt0cO;aNW)sJcPR z!==1z!7(4t2Vp*lUaMwueU7IIz-w}I*WCVKh3N~rHDKq zoZfw^Frtf7i#v8lC zFjs`#;}ORgW3_M}Z+@5bL+b6gr+pc~Bd zM+^nsE-+Eh!@fj<`p*E1o$IoAtz#tm>H3q!GT`X}ue}_a8;EzK{9u>#bvxoXNeJ)= zd`iNIoqj+8){O7HqJ%<1&bN#jrG|--hYQ~w|IH5pynffoZw;rk9=XioU7O1^`+XKd z+a}#A4);1Uf*U?De*MBQolhy^`6Vo|>@VA4xUkDMnbeP*!pTKBo>%r5{dOt5IOu%8 z?*cpatj&98ZE-agMw)i>p>&?pR@JDfr?+wT#UFrl;k z84W;LCS)9r+u&&7vP1D$x&zC5H45^-!!jS$liVtv%Ek-6j>Z%2@HCH{YGhIYVy(B> z7Oksu`if3dyJ9O>0Gl=s5-a$^972F-X97-R4tb6^=Z~3TiE)y$dHeAiqlqpUV;QK9 zIKKmwyuL~~wvVf?W^}MJeL4?JCXI7OH8sX{8@~U?N{*W`(`=i~>9z55i$mS-+hN^r1blw2Pa=D}L-L6SQSA zGRY6;KOJdkzV$!V|A3aUcFDF6uQFFQ33>c4jP|Of{ONQek%!X<%LZPAug6y>vKV z2B+f}_KSoGGdTQbsOhV&-g(up&zjT2qQ5G-dE+t;qWvM$5#8aoAD;SAKd+GX2p-Fj z)*s~$JbrppqFVG8LYSAUbO5hT1PjY2iqb;<7!@Q>(z5_o?V16qhzx*=fB9^>siQXj zU!;m0#@`c=8`{mgW&c3Aa6p5vC?Zn+ZGoZqVCGX9E-kjYt_ut5Od)g+FSmJ{yd$8I zl)X?$>&3Z=cqf5cm^hNpHeZ?ejEk>ZW=`IA17&!Q!{G7ajN;U-?xJCupPyF}cHa!J z()KBu*kT0Fo7OnZy|0-XK*;d-UURcWpmoD!!F$3^d zl3}Cj<1_~a1BAM0A+Sg91bKDl-rTQ5yqz}GJ73iOPg2j^1Ea?4O6N@-M+0y8rAel& z^@Ghv((jNwB5TO+XtRVN`Q0fqpfAc+?@PX7QFZfIEEX<zPcFlDKy(|f1+vIwM z8nFq?ZO_4w`@mRHvEHDHEXtKBu=j}bC*gqepK`=#Q68Oz5=0iV@3_~aA=R0JgO z(jp-gXA1130XEgfx7fx}5Sw!{N8PD1g>J~z)VSFmHez(o+{lj2R-Y1cJ~)!hTF`h= z;A+ukvJ%0~)miJi3$upW$DP1A@P}s>b44xMH7*(&AvaX^B+lja)H2Ay40F2M(df~! zLCS!~`W@uQ0w3=@0>-#9Gr!dr_6I;=3?bu+<#n2kv$DSc;H>>+{5YP4@hzmgyEu-i zOj`W!QLW8l$ftZclM&td7!;D+j~cJI&KHwpR*?}k`sknz(A*F&a@5`yrUi@BJo6kN zwOl5usB(fXOy{thPj;ePCQ>iZ1W7W3*D=!J6eYUj4HE|F$W2jvT^Q?Vce`vMB>OPw zVIbbi!%4>r{Fxcm@M?^QKV|s1)f(!tac^DZ@e2zm$BeEvpd?O!V%h>CIv4Y}$5Kib z)iVI^9-_XBc=xV%ZV)eBWL~JwM(XZuqS_{HOB!{s2@l!Bs51}I#kuuT1U}hnW7K&h z`J!3EpU(xd@w(WS3?>a;)4E^!Lt2AOxF`TEa|#+fA|-!IqFlELw>&l5c@Ov}__krC zY?`G>&J!+CQk=lno8#F+bn8P#eMYf(iwTdXJ~s1nvX6W)Uf@V-B{Y*SM<2J#Yr-R7 zXTQAcU>d@D?$`5Ph0$2(V|jmc z{Y!;|eoWemQWO6B{WLfv;v?S8`I`<4uY$LHIy!QpK_~TE?Auw(!;apayH=#iO+bxe zI$yke&YKD zsx>x8R7y`F)d6v9M2)s2r9eM6qFUzNKapGv*?;+51VS zv7@f+V=Pgy)M)HuVER{^!s3KaTl4zUOpjn)R%be!47*Sz2tY3Eq#d&pYyc>xa8=B< z`UrbRnQugK`ZwsQn_^__GRTlW#A?994~XXR)I0!|^;_+H6K0wx*9;nEzZNgezTWe_ zBIVU+ZTDR8^tcQudCUBDh-Y8+gyU;*4b5m&;|Y9C+uXQ9TPe+iw83H%u(Tl)HHJYS zy3XM6rP6cH>tg%W@>9jt9YiYJ=#1tUx;y-mA;P_cRC@NAOJgW88+O^tB$j+;jYz-G87eONleDeYl|_!El`O7Zqy2pon^nyL_tA7f8 zJIe}kVQ^52%Tay}TuU*toToNs#_Yu>Uh1OBWlF8yC_3$TV-CXo{OFvpL2MTD`hPK_nd*ahOUswj9&|y(0;Yw5-3VATcrCrh5FK6h!(v=Wc4N>*LMB` zQ{0ZA=~r=i(yMEYkD%uxF(*kYaxEqU-FG|p`m{RnpUggZ_*{2MRnw0i3BrR_QX*+B z5=IE@P+TM-OB?(0Atp*03$gY5!B_o9uG@DTr`>Kas|3qmQ$s@ZXw*9skPckB>9y(4 z0lPkD>v=S{)s_6s{sL9s^`|7Ut5dae6?>4+?~iH7kl1wZvfCvkeb^pZS)WN@ zaKphoCqsUexOvB-bMc`%(tbGz^=|afPV}6^T&;`}QL&lIDI0e}`p57Ie8Ra{B69q( z$>;kk($TYCyormeH}6+u`Es1Z7RHT5#X*|aluo>@M(T_OA>c5Zx*vj48JSNHEfSlJ zRkDk*f4}+$E$B2(&=ne0*jv{dCEAoqY!1XQN>%6fQPong?MI!gNWCTCd|h1PXl=~5 z1l&*B`k|b{qx`bp%qz8SJzb|{6_RH=ClbJUDng)8P8|RmuALy%;#-NkVd9Tp*mlt< zp`)||oL^5^H0?r&xPC6@CD4Tu)XnzoV!4k}I$uSDp&_enYqIH+`O10o94+%D4tfZ% zWgX5F8ts#czWHr5_iKabKQ;*!`2t>JR4qmq(eG`*&3pMjOXXycg}5uJM%R?En>UtO z62r`7OW9POTL?BARR*OFynJ`ASu(hEAgmT6f4*wu*(|V!3UI&(%}a2~@ax8^5n)7i zPZ5xiL#~yf{)}p`_Y54FqfFoekCZ8ftKj_lT9M6+6~n`SR}rA&SaYTPk-2I$;+p%Q%ha8)Y-t|7R!=E_sCQ7GI%C`m@R6N#emfk1l>$n4hHYz(?8aj}(BV24JJ{KcOr2Gq`rZB=r8^-hb9- zkTU4IJ389{D93Cd?w1)-7qooU#uac*H`-f3&FVBkX7-hSJFMdww_r{b6y z0RHQ73Xg#vvNuJK5M4BSHd(}qPc+U9GyY)y*p>xz?AYU$CR}PH419)nv+EA@i)G2x& z8!EkyCfI#2Yq@W7vV}R>PvNFW%mxlpq@aYfiCeZqULq-)4je2vk|wp|dWFz6sYltU z&dyNhyIv!QX#mM?v`TWfZglki-Yk^c_l+69$~lJ0Ne1hI$%Dkw6q9ACtYStSUThQz zcI?bg;Akd)JD0XgIF-0CDv{gvzj$~<(agp96Kcw>z1yPCYylEy6%&@{-tF8cJ=4}Q z_xIaw=*EcBvq&?DH$#^`}hhEx%G!oz8OQLQnbgED(nc+Ncy90qe^`qr!?4q=R5tA5)X} z$|DeDarbV$GvCL5?-N}{{7DCFfOynr7wdV#gM;LB(!*V!+}7@di5;DHq-?I0LCa;r zF62P%r-@2_&>12Mdd`Q{nB#N?3sr6h+#=0`WayayeXTt;O&e7qsogXXR^3(r{*N_Zq=5hz^xeJL6=xZ+^Y2xnA zLo?MdJ0x?WqdRlMI<>Pu$+Xc7;TaT+$I=9_*3`i@BYjjU$7U)lwCLjV>mw9!XT@w9 z-15Hf=@ZRRhasrgF}NwYMQ^!~@D}9sE0K%OY>AlJx!OF;MWP-&0vFmkKao~Wmm)=F zvPqx?s9;ZsM~e*pVag%7`nwSLpM?kH@8w~2U4zj_r2@y)5rhPp+GEm^+E+~_)zP3_ zK*>z3r{~lg)KR4BcFZj-+_Oy9b(2(tLFdf{TFIwk1I(SDs`|v}OZauO1C>L8{m4Vo z=VavkDbDaSu<3BA05DBKfLYgr)M#lH7^Bq_XJ@8o$ukzPX{QEnxCXFE`)i4B|t zM5`i=bn)TDptffxZ*uzt`L0X&mo)KMitqU4`ky57fk4{tc{^_;iNBF0lMiwFQ+rR{ zPCb9W4?*!WuEuZ8a8Z@O(dpfN5wB8nHqG4>*faT#&|hV=ciKA#ly%-E&n!A$)FCahHEPQ9*Nem}YCe z8K%qBQVC!>e}$}^MOua#I!@f$`)8ZXos;@#(Dxo<;4({P-}Iz_geIe{33kG#xwU6; zUWfyliu3`4GCl=#=3S2?d$G=2LuQ!gK#$Nko~TZLZ|))t&gLCxjK9J zI9Z)V0p>xH7!T+G-}!?j2ber4ul4AQiLqh{oLAX%0lH=M_!HrL$#U2E37`J<4r`0a zg#KNK!U?N1&#?0MCBv~O`z6uJvTkSK$@z_S`ekW2(_t&bFfXRNPt+j5QTL}@Gm9U@R%DWolU7K9a z32>z~osT&q^X#NQVL79&P$oDH$- zH)g+*U7YTzQ?B?)X=W#-eFg@k#RqU-pzo~TRxj^p2-m@obtmIf`SNxYCvf)XCvq-I+|gT;ESm`kD7vb;7e!JU!8l)VJB~U+p-j>=m9iN{r1K>s# z`p_o^mCQW;MQWt%Q3{nQGZrl{!xF@LzA$6L1a3wJI1()He&6J_e`EqH@J=NV(q?;S z&NI@88+J#v86#>y@{=BFl*}#M4O2VPoDj@>Kip~ET5y+>j>-PV3IIh-=MVOczL)h_XXv#U{i zY3DRf0~T9$@8sVbmyj;T`1W-w1S3cNx8=tMbVo3pBYE~L36!L;&7tXdiHXctW6no! z%bAjMM>KgK?6OiSW{<_W@hk8MBd2e!Wzb}0R5$C#uj#uKF);gZw!n)6)U!jvoXQvZ z;*jh|ChZ+nn*}NjB{^3gDYmqmxZX1bM7(8_5Nm)D1r_Y{!U!BVXO;f(p})n=XxTcUmX@LZitDYxozj7e!Hxdt+Qm>>v#xFu zNiN{9N+z%q58{c%4+QV$PN)PY3cp_Ox$buOY~<^@nIWR*yYy+X85Ljla{1fjF;7p6 zJ_gV?XCzIN?GCdLYuCN$N1Y>AywshQ7wRqOom*gEt)h&fb1|&q=K>B)=3bl@3pqPv z8uXg{nPC$?3bWA)!jXLcNcLu!FuOQPq^*xC-syZqZfG!8%39cCJMMlKGb&1iC7GVQ zKPKg zf1HEjRdh>P?f)2BOAho>MG@?Et6nx+Rz7}Kd^%l2ss^Euyp{p z>Ti2eXKlX#@CM%;29XP^yZbLvoFEF(m~xcby?lK|+?0e;Fv?aAjr}~oTxV&P%s3xy zE9JH0{IkmGgbGr*#vlK$763t%{jVDj&kWWNv8tg@Lda9#C^Mz8D#!Ih!J+tRlS|r= zGk#WI0)IK_93ZAN!=>e<^wSvE#9wc;t?AYT43~j2_vvS~k6bRc8-~*vyfE*{_tn=N zwowP)_h(C!rDhU?kK##qi}y%xo&zXl zCdJ*nmV*A(#9&C$dPLe0T`!pIHUtr;<5Y^6KWFZO$Q!mr_O=tyMq$*m&5vjiynK~cg#&bpZW8j;t$GRH7i7#X62ik z0T8zhlaBdP;l$FL13Na_Y8U8Mv_Pw!LiQF=#m6E}(&PNLYJ#^P6XZIR9E<|6-|t4r zmrfOModR&A_dT^$;Z(POtIuF&3I8B4XxE*fbK|oRn$t~YM`&qNNe*<(9oMhZu|M#p zy0cKq{G#Q-(|N+zc>>S$_d2d7Cau@SVF>O)Q#`qeNWo{)mNesrbdsf>=-hZrg(RI& z`N@fL73J6ZbBe7$d&$Rl@o0tj2RChrfDbpTe0)ZMZfhXS(d&K{ib?OdkVVcnl5~WMXYVZ?CyW;cETy!Pc+7lRYb0;FZVDh{ze08%@93c)TCWtpfFz$0_UgdwZSttOI3;F2Btef0l z*Xjgh!}517OM4Y;S1_^rZMcyz9+{9=QE(>H`Gla@!unIi32zare)3uK?B49aQZ!Hu z)|9D}w2nB8BGcqWw{Pd5fpxZar<|zNCwRlLY^nD+f}ZvD`VO&hM<7u@u?$+w=I2 zt05xr`}5|Nv9Sw})tWJkt7M9P&3eC8#IBH~Ubf_CJp<~x_$~Qu{H(0F%=hG2Ui8QR z5cZZ~aV|@{aF7HD9xS*^&_HkocXtRL+#xuFLm;@jySux)yTjn_?ry zs7_S$Kd_AxD#u*fOLP05hS`Ew>o{>}Nt`PoUYu_{I$iFJ-`dUvrNn>>QG*Y}S;X%^ z{fJ3*Dwf8RD+jwWNP_6O8fVKpI9GSW4k1PryRuidTq4HgocY>SEzAh1Y+vm`-}^<` zIjx_E>Z%lcCW%s^s#$cr7}HuERL0>wco}hW^lkT;2P?DehT{^y!)v&moWtag_w=ac<3zWR0l-k9iYV{$aTN+H2%H;i63>AyN_Rx)&T^#}MMX}Qw z>*Y-?k$<9!d32Y8&2 zU7a8McEoKnZqfp4D{yK}vbL>5V!bto?dC!_?Jk*JiM8!1yRHhtf@DfL<>b0Y(WAeI z?IP!zN>veZIDg0g21q?(Eh{(BIUeo*-lkJ-E_FRseTmn}e-wn9!DyVEM)?l=s~!sC z5t|TdKT&ulkjG}1EQ!hLqt-5N_GCnSshiJF_K_x#i^e>J4avoMQ>y=+oHYTc#sNxJ z)AkzLCssHRC$?z*;=JVM5RWq|oPJGZRWVmQcj>o-KR;?(h3;orf!lmF)w!R@-Dy2@ zDos)xMGCE$%BAwjDzsWchCx_4E{M3rCh&WsjH}^Aa~U(HQ$-c`a49he!}pV<1wy*U zMe`Yzmm;1c+YAGnkBG@~H|h>>kE_jzBPANyOs>+ZirJ#OBPm5Kd6B3FG9LMU8~*Fx zPdiLI@BIF749D=7Vez2@KT&4B@p$IZ#4{9rotfk+3@_t;oIs*D2LDpzt<9y^4%Q%F zkuq%h5JPfS(&!HK;=!&Il2a&GM8#sZB%sAx&>hd91B+HEu=(Kwf-XD5+smK+uW{d6 zron8*{H~Wroos;0e|I&XHoxH?VN(Is18`XS-ZI54}`kV&R>8Kvl8Q{BHv)C9UX z9Pn#sg0F1=e#2Ny2sI3oN#W!lo-dJ`*Cjj^yQ4em7qnpnl6#QT#OF=*N>;+iK(j<{ z(_3X*ueIjtyN2DX*m!fMAGq}G|@z4LN@d!5T&kNcfe)}oQAYX{iV!fcW3a;NwFFid#JYrtgU9sV&L)1Q4 zU2@9gC-s*rq&5V09X7>8o#|LR`%aR_jV?9Bo_TloEA%p907eBJpK+ko=ZYm^78Kp) zJHfZWnXyo7HGIvLN{X?n)PA^ab;USL^xOp*Hp8b zqF0~W{Ci)GDf$;i;3?qoX^K3}emXFUTt+C=Y^EYHU+29Z$wcnaT=DTnDZuuSIXG*x zO4N8298*EaBBNg6dE1*R(8w;fmMB%&_nHLoHT}bG@B;`mpWiLjbk>K2Suk-tMU>-|0SF!fzh-? z{m>1t1zEvMDp`w!I}`r_0h=E_aiNbv&-a`;^(x^ju&I|UBVa9A+|}8d!2r03-+X&n z{q>9Yfale1>LJ2cvn$ac^hl&rkie?TO@1gqXR#73PX==vEw)|J4A;OEbrfWU7z=)!9@wLkSEv?x*igZhL9&BTg;ZX#S44)wi_{Ar1FEme5Ok{f&00sfxsc zzcXY`G+fnww>tEh#Lzav%h}AakB_+=MlJ%^yLvrG?qUVrUoF%2YfyCjB0|vn28Ca+ zb&!kXPYc(gVJwrD*`k9Q2BTHYN>?EImd2T+9+t|g-6~b8)%g3-{vfxD4|dPs%-*EL zJPL&ij~L4#)yE}kvGC*Nj^~pso;`a%yUo_bF1N-368s~t>tl#Qg^7DYg{~40%V82h zMIRU-DeN>j4rfRT+Xa&<80LqSs3Z+RF*kLvqKDyu$`@B7L=Z~ZGSW?Bt)lyy;sZ?l zCdeX)_NlkK)5|WidV#2r_i7qOX(`*C$^CPkzJXlE^eLskeL&j;mric_ft~965$23_ zx@6aX&X}za&VKw?b#$|}9|AGO-evkT)3<+qH6_@4#Nl;WaP(LEICuXWk&ru$5H>P) z?-WdlQ~rD^Dh>SH$nvn(Rss!#8j^a>OqX ztBl(H$%9gBRbt*H6lnS~mpT(xUN}n=Ur0#8e}|*K6BwP4V}+78y|K{(+~UMpn71$Q z2Nr3SXS#F>0lq&XBo@4_Z!o+4^i;2N&U_U9@dCOyu4J^Fa&CGaC&!R|6C~jD#J~L} ziSUxt`s0hGX8rHJ5WJzD8>^+}RB{YSi#z>r#mvvWUdhJSwfF@l7LZwdl{sqT2OXt=F`o+0z${RxY zBj*VZV-@Q_d$cFAI)0Q+;R%Ezpy_|-aaYUoaj2WPI~zaaorWd)bEMLxs~qKv0&uY-vt^jSKEkHacuHs##idCzXDF>FH>9(y<~LVN zX{`z`KCAMT?a9dC@ZmW`968>(SESAhi^X$o?!+G}{M`PCs?9bYxGLo`r0klw*QJz* z*ivZ5;~RrK;nBmdP-dTmrPGRUyAMKs-x@UIyY8)TMuy9jFYkgGL!-h{iM(qqWvW$% z0~rH$5fx>)bBgi&E3aCb`wIutoI#g*Wupp2B3C74ee6|ZcQ~)VHr!kd1JTGbQfcB8 zr|Gp!7atK1{qMySuM;_Jp~<(%X=x>qa>dh9>W-I24W8&RetR)%Q6C;|@mVBhVeTGS5g(dt z&!FYJLEEKU&~=7IF@r$pHzk(G_h=40dV6u}os17Iw89k2T~Jf-b=z1dY=Vz9_QxIc zE|=F|8ZQh=iS#U`(P24I%E|cEnN16Z8XQkP2>;l`>j8}MGRIn~ZGCWn7m&HFe%(6qc`eex{RC(SH}%aonqhgT4LE64AG(5vF8Ti}#LB(01E{!zZ9TIx^Pf;FDhV4jB<&U--VPr>> zsJ+3M3VZ7DTVi{m#ydyWi24uUZDnM^8-Q#v`DLQ|j7Yt&p2X6k}d__Nal0dxCACbX0 zZSlxcr?^2pno_^(TZ{4J$VNk=v#3H6>2MGc5iB$nw2vopXWB*_@>n|eI|tq-7vY;2 zO6j9f_=ZnRDw1MBUAn2s-*esh?&YmFx&<7wQv=*Rf5*4A$p5wVS~{h?eC zS;aE%T57n=B3c{mo-li5BPIYFm8!rNgV0@#dMky??R}J*P-Nz@a-HTPSTmHHNAG>* zHjhf=>?|wI_Pmz$OmAe8HOgI+W}u+mb$JZ;bYeW527DD=boUkG9wC z12kLxLS(6$&#Pv(@@w>(?lr5MV?S|CC)sTkntB#g;-p@U2w|k5#U?T6YdFm9} z;cyvQ1j7}DG;|2=FNK;9!NaCfdjQ8j^EyBx8k8!cT+dP~=VAe5yQS+jVtme&b-NeZ z@^O#MTEpsGgVU2Yz8AAw9XCj5Oml`=Jkdk)`?%$|^Ca0{;SWB%4`*(6Ji_eVGUs6( z&)iB3O=bM`7>+IZEn%cYzO7}dpl#4VX(&e=z3@P-$qTdW)X&Q5I&OzckFgNLf3G>x zb)%dsIKr@#D!>(O+!VX%6-u*jR30r2M&WiEz(?V|&{EqF-Oq!Lfs}4Y7))fY z1C-3CRpzT{oLd|=@!SVG?H<_ycI)f2SDW#Xdye>-LZ6_aA^1B!LbMZ&mj*>h^Yfvg zKzn@jUgJ-DQ}QFPBwBw5Ik(*iB~D(%%ioc|-Pk%?4<-&sQ(l07#)t9_(%TP^&S4DU za55_!IXaG{9isi z1v7*yyUftn{3PRn=<||8F~r>Dl1T}N7Xa@F6I3+tSZ~2z5qqz9D`KO6%>ni+Y|H%} zzHr$36|oz{E?;ieUqeGPL1!qZQ5stXb#fN#`FPPmZ(jLU{vp04c+e6X7F)pm8H=AN zgXrDpE)sw;G2zb!1V9Vbk*}wIE370!DL_G4#C-E?vgUP`iB>tzPLS`4R6H(-p@ldq z6b&acXnNyyP!QA+;4{#0gA*GR0ZZ-~8lpYU@*-M7G7mfn6cZF!+A2ElUEd+^kC2{3 zkek3)m^390GO*;%-Ju=v8ijP)JaTg`vK0j=0)D|f4d21TWpI2ze#P=bLQNsyaV_=8 zr(abOooWyukicV5f5%N-Sogn4584sh7l3?B#}}ppo=J=F`pJ{*cGotC#rOry^}G&$ zB44^wZ)vBk+{3#$apJ7e?$7eDVl%Z4}M)Dxs zSrSdwzl8i}4gb=O|NMXzgAK5dj`PcgeHji zsT^xyW4z#QpewWF@U+7TH~{YrKXGI%`-i6=yhf|qhkH5^w*HuU=6EsRx+nF87bcp0 z>(}02oTkXB8ase%2!Mn1hJc8)ghMb^3zJ8IZTR!h4sIs}JAR_CL_}cuZqN=heuRX; zg&-b>x_yXVf`{}3K)(6`*dSl7cW{NX)*7B-RuZJ@D5g=_KD*}khv4v|9x!GLWtc>E zE7XW>P}$ypeT`CjyI#X`dE7+g-d&5{D?gYFp1)o{X!i}|D@9C&w8;`x(!LA)9!D;| zVsf3bHLB(mpgYDw4J$qQDa(Sk7)hsW3D4p(T0 zTIF@e0n}iHNT>ZZ{&clj5_~u(luqFcw%r}O8a}$a7Cs1N_Tgpq_5e!^5g||ul0_%PxrEE7K;qK;JG#v2sZ9H;D4h8lY z7MdW{yTc=I=l(r-U(chwgA3E4kE8H@dD?|)4;7oylt!KE|HshDZBLj8zw>4{GE4w$lL z+;vA|z6fLBdbNwl?e;Sr)K(ia{(#YMzi>lTYq8|z^B!JAt$HV8Kt4p7&EY!K5n1xTjyM`(2nED0%HX?2aZ(cIYqt1n*^{Z6F_qqJr$79aay^S4HOD z&=A9;rm5V2w$@G~@W>Ore-841w6?lc@$Y{GzlhW_-fpblv(DF#gV8^K|Dbc&FMJEH z7yQZ(S8g3KZ5D{|9UGiiIeUJ}qhNmNVz^3AYhN2f7!rWhfWrz-Pz`t5Q_TLco0e)0Sa*{mf+@bpVGBy-6D_gPnz(x3D{ ztspsM_R}qK%TX}gzbqp(0krP{EMQMPOzDqBMujA*-_Q!vY09Axz!%;Ue!_4q{J3y5(*FiO0KEwch!f^;1_s;eO(HgMb2B7y&nkq*4@U zaO)O6GE7BF2~~d4VUf=3nFqf&E`wmXSpBqxL)oh-9mt4_32DeX^4wCzmihoe&~nfPr5 z?~NZ!^1-01Sp&l!xeqm10MoQQ1Lg)|Tb%LQzWI&`6D zz8#F=H{Goh3kRYHMp24O5nHUJueEy0zG*g>UCJgh!})DIwbypqv6@U|pmQow-K;z( zpFKGL&XGz|@;kV9{@}T(QxHKsYeIov7Ej3Ip>)vw8&3ZMPgtvb%{IG#JWb)UC-93% zjR|KU(6j_V!#RJ+ARe2l{`X$#=-7*n>=ka>s^Dk{H$9nQpEJRYoCzE3zt>DKU5 z(<+NFO(%l!>HZ*^Q=~-}&}(;os4VfBm!~%ztzWb3Ca85`*=}VH_>^zC#Q~wRNB<&- z+U1{}1Ba%rgr+@T-$0iq5f?%t8e&o@YfUt^`g9kICZ2TjZ0(H$J;gp=d@w)mGXN>Y>2&V#sbSo`X=+ijj$b+VDA&JyIasg5D1 z_GOQ@A}#;2>(AdH1=61Os!1KdZqb|fBSg!Dh>sNf8{nIpT$lbc9ohkyjwXz>h+z`t z`Fa`4g4sUp^y*GJ{psPkmoV4!J-kF`g+U{hv$sDiYKXuKL&K#u%$G7iH#dz^*PkOM zbA`3+V-I0d1(kgE)JS9S7uPXOBm1#*F3Hj)7QLvaBn|``wJO7WMzlP+O|rp}jrnq| zod(2%&4>0;-S7NuWt3y`3l$qm>|=K;YSX6o5fInj;;@z+b~~9Dibmg2M$ZzxOxBvP zEmvFQMYAn9Y!!I+e0EQCYHh*Cc50Qc;f)Q{JCa2X5nC|fshQY{l#}P3FQy$4s5PeD z>YTvmgr&7gN#w(YT9{SGW?}p#=6oEPQGFctFHq-?BG!7T^SSmR_;nZfg_h3F$0vLQ0{lk(Uoc$zZ1Fp*5AFMXEznS0< z8XH#t@a8jU3PJUS<=znjC~U{0>2OCsb9DKj%sT8ZRbe;~uutH<-OSR^w%+~5aXJ*@ zx;h z7AJCZAc(7_dB14lf~4nJ*yU%+O4wXk-Kl+y&tav7_0}-6mm@hGHv{b8o0a!=q1Xxw z7=_0?zojgiYvIspvpP)Shv^vrwHAn%s_%AlxL%O394-Wwtu`4VbxYhzC7KLP5XXez z8$GJA8l^s*%!TsX2yY9DEiTlzAvNm&Yu@bR4 zcv58`&+G3y(-nfEOThJah7!qfICBMMoRgGeiNyhS7@g;5*se5OSBxz#FG)=QE>yIt zH^rE56l{M5R52hZ(`d{^_!PiLpZ&p6h@~2~K4X?@3u-u?WPQ2XRh(C$8?`y<1x;?T zVyz>q{*;->oXSvVu?@odtALUeyG%Z{?qna^^-y#N_1(VPPi`9l2R?gj<^*t(q2ETD zbb$ciQ6d6sZ@TCcRRQw=#go5$cdFdutTomhOL<*GAiQ8XfD(jDgLz{%AGfTO%8R~2)o)qeZ>J^Vv_6ASfpnil;~cB}d*CoRy{J7{-Yc%xf118whB9{Ss5V-iIMZ%ZsOjM6w51S}3X1pIe|4rZW`M~CBruJdw}&?NionJ?;0K&IF)>Oy|1 z){{|nry%L!Jn38ozmJJqtosw*a~6vdSAC&|m(A#nmGv7}1!V`ZAN@S1uv8lD5VLw#gzWhgaTrpF*^~;_FyNm-+PwVguj=R}s?o9-WU(_rhjy z@fG$HyvJOomuhFZjF>&vd}=0Qf5D90@9V=A44?5g2$G~6Ji-_B6Ww6`u`sw0x;d}M zS2UuO|B7mVz(5KI#a09;7ns=8y7$~faNrSY5+S`d+RFpI7*g1hcSqCs8CO}xSFKi+ zY%-S-&wiRjcmI0X3EtZUgwXik3*z^wW+aT!)ybY-qWo5mNDlk@L9|s~0Cvt@+ zviCb?k%@mHcI|vp9Zn;Se{U1!a=LhsRkU1SYF9hcpF;00k{kHC{>tqm!Z{MzK<)kf z*MT2=FhGTb2`5n0Zn-H^dGKZz{oS;6e?AyZ?J1UQvWW*VAu=9I9=w#lRe*Ts8Jl>` zs_x;`deMwbnDEQI8ivE-pvhtP>p+nOs8KRFven9jDlo4U+d1PpWw>EUo>UqTv5T>M^{pZb&oYNBe<*Sr2e z7DANlD?`Khmq7so7NSH6YgokO|3EUYSP*VKUfV3{a~2%1gf@4lAl<^a@N{wcVK+)*OqdZTn1t4sF`8gi* z1L;Xe!kEh{$niAV1$WqNwE}|WY))$w)e8tYl_Mt83q1AOrOnJS3=O$3)cVW#@vrZp zqFg@jF~;Eijk`d8g*?aW7Sa-#US66;dIFcw6$f1DkxR%+I)$E9luS1{&VD>xtR+Pz zyc@_?h&_y9)%vj1qKl6YsE8j+;VjHi#T-qS>>t<6nRrG;ezE^~!?fZpWp5{TZ{@^W}E7Wwq7G^W#wT%2HJVqyANd$B8v>9gxg2=*X2*vqj=NBXw6)Ar3E;0-&w(BX(3Rw&wVsELfLJ1RrV9W3FS!`23eXh5UlU8C|3=?M`cG}>%c>Yaqx=Xvdzj=~qI zKP-)>y=J~T47aPVvKqG?R2sJ{ZC!CUXrqC?Jknw8 z<7L5epa9kP$xU*&Zn9FuOZ)`rPjQTjMl8Bn4tTD0lIUak0~-4x%lct`qG<(3f%>BR z^vJr1WBs4#)a@nG8PWKuq0;8P7uVH1t^;T=s1-&Kx(ByJ;g*tR;LsBN5ebb#rQ8;f z@j1fudO}RYT=#CnyL~FZsa>MaRGK5-xZWI+*IG<@O_Z@(ewUnJlua7hUcKAshA~^r zYvth8ph*&PGISsA8wF5&7F-d8xm%XlWa#d?=qY5>?^7VyF`dr6tC{9g1%dr==)a>7 zJW)PZpY-PI@c6)5%twOx?wLw(;e3PZO77UFO5Z?PL^2L=L1I1nP_jQc%b47hP#~50 zhv*cTOX*0m${~XB@!JxcQ$@+o3~drYaN=CdmS&NNV&d7l3e(>ivuMt)t^sOP6lT|S zV6V2uX|+~#cQ&@SKAC;kYJqn&-hHHzd`mXJ45Gs~g#qmtj(e;XumNseYfQ5}5_0PK zpODOE`qe3BYUkRy9U|HP#*}Y~e=P3|x7B=-{8#jg2{B;FY^|jF;&{4Xbj8k9Z*Ca2 z^@s+&+YNIE$2l(2-4lp0>>Sb9=U%sodxtx}@qR9CM=*$ePYEEno{c-i}#He#Bjx3fAAXf-|~o0dB0tG~5qSBS|gw?5bzeF=3sYT{Wx;!h^Ealldg z_%GOqJ?vde!&^Uo54>t=Rxz9PYXk&mk}kNTlj7sv`Muw16|0y$9tTjua(6UN3x=yj zDxNwwDwExA1M&u(5zL)e-{-7$b&Tw9w;of!hu*g(Sw1W)YNSE?mS zv$Z=nmPhP(mN0df6>P`GN1;wqTMquVq{0DwjnziV$_=)wWN;?d?qq3yLw@eC&{j@@ zPhX?H|AEp6P`@|2!NpQ5jzPYUhy(KtDUzxdHY9N;x{Be>djzssT^T#Egp0@+_YxE9CavN9c1hJG+e#8Ij0G(8&9?lg=#TF@T1z^_O7*se5{b-TW{u9yh0W*WI;oYF0bWeT zi%hxF+SzU#63J8MtM+GnzIVXz%pJAi=^5DaO0U6CN%|@ z?lb22GR@qZ<@)KK&R+#lvt+q_fC_?mL!KoTQ>pAcnQTbqA}ahl*>v8?vS0pGU?lB2 zVULONQw0>KQ=X;k?bIB%YXLYESE@5X4MO1othT;+ITEn*SvO2S%{VS2Va*wW8(;jT zUf&)0wEA7u+Kv}ZpW57&5|=G&t9D}WKr`>*XMr6y*z67r$kUUK>xM?4|Yd$ zm766tY3a5GLsF3ssjpo-!$v?-jighMlDOj^xE-u-3*O+AlA{-yJlLlZos+y{vRgY% zz23gi4>oL*e%f_gkBIt)?DWzA5#Dtl~d~d3U ziE8ih9zGf1ME40;RcZI6QkyURG&Yk1q_LwF3yPsq?(^}zE7K(VpbPKwvN%nN9-FK4 zi9$juQ^1dDuUHjy0Yl5e5> z5symh2DdmY8v0&ul$+#>&ULxNP{1j~*WsE+Xf@V>tTJL~>mHidZG$)Wd(0Q#RrBF< z)5P2;?g3bKyHxU7BDrQPX36#Vf#xz~G=H0C9)S$i<~sD?9R_e}RuIi-oOKVCP69oM zkgE>p`rH9wrZ@iOYA?C9n;2p~PZ#QNzA~GGK;;}TL0TQcb$!Lp5c*UUODE|r61FLG z$PhFUX6XDjb-~GEoksyWX3>MrBv^{j0pm>*7Dt5g9e2jd}51DV9+Y} z@%EQ9TSj)7I{9+5|?n{5t>x+v0%Qm)o^PQg&Cs@4Pd{lg}Gt4}iFH_K=;DTYQTx@mka9>-J$QkgGsnJZzV*X=ba zG1k3w$%&O=Sj_ev=55OI$HypMgMkQeD&bSIp?<>&7@@m9UR0EbB*F00MN}F;26uL_ zKReZO>0Wx6Ob7|U;`NSyyo_*ZJaLTfs-kyJX|RENT(3i@qMI+%)*ZdxM(1&Z){uvk z`plU$fqiwls*&p#CIkQCZQ_m+)>A8UG{3A1MDMB}*LiOGJoqP33JpO>j6ycx^mw!- z**sljy!!G~a`uvt#blEf(K_|^cvMa}B`B~t6pB04%?8{QV%_t9AdV!2=Ih(sP33~t zx@?fT)PJZ}$x^Yj<+kw&vse-HM2#<%2dmou0d(5v3Socg0um`v+8&Z11>7IY^_5sIrM0?03`eFojse`8>RbqB zaxpA$qyBm7FwqnndHikS zJkEfwa~R>i5j9JXcgu?O_KomYIZ2CZ&0H1@KYihtiQ5?s_;ha5WcYH2l`9H9iS5di zz#}iBI;Z^Vaiag8!01{E2tzG%=+N&EG>^P>vRKb%!*C68H9W`j$>;O{VGo?e6k0g> zE=a)p;Paj#_yWNtm@UDWM7M%h_?9ySc4TGT58`-#?|^040%LrqO^Wq&)!*VZ(>eDX zftcl&^-enWw@Cftl#bwnB$wfA$g91fLO}cm5u?>Et#C@dM4S{bYk3H3ob33{5*%B` zJbmjwS-yGIdVpXUSlkb^a92l8uY*dDyQ{ttfr%`Y!B3wPo%%cZ<{S!k2ijYtu3#jN zD1sD)$$*N&ljbrf0zRe>+@~cm=8ok@p+YG|=g|rq9W&aB11@vk#xEGEQM;125Ql@w z+Qd^JGnP$P(B3{#(c3%z$$YIGQ=nWE)!h&PL=d?xX%OhX;EFtA$WP+7>{wnnU zeM)P@V9WV>8YAZSN7LF#)3n~PLhgiLY3#_?pK&W}3oi->S=d`@(K_#|kR>qH`4 zERo4DQ-iHcanc7!Wn8v`&T0SIY^LVDc^PE|7qSpE5Y+DEG!)Iin=6qTMl%J~GNmJe~1dBf*MnGF!>F{8}e+t=}Q&%qDV4 zES5}KfHyXBsh9+C9ju7MAzKw2eBj^E>VMdSe^em-j~nnnszm?uM+j61Iy%Uxd79}b z2e+P3Om0r;1SUo41mN>ND$Ytfaw@bQRH;O5Bl0Bnq}&x%!pR{F`(!5n$}2AOk}envlJLVjR~t4IV_ zN+lTzG#E?MZFkGHPH%@^Cygiq8_SOLkiWAH1Xsz=gZk!6A0v77` z;h%QKxy3k`{yk&|KXjY>i-2`HcSij<;Z#2x!$Eua*}-!B8+4+jq)wx3M!pFxqaOL$gE+_2&&i5b9zej~!<8jL{E>=}2Fc_p#k2Uj93NoPi zx1)qcfadXZmXs|^&(3Fi%~M6TnyfE>18Uer9H8oP%3qTmd=aUVx^9=#ZIvcPh5P`f zKEQ-rk=6beYXNpa_pc|(Lw8T}i~ z1F9=i62BGv#&!%Hky`)dh5u7M{>b`ZwcY+?{ez3!NeHJ{#1><;ah^%wl*0D?;C}g! zx2xOrcF(uGmKj1V9{%vMl&mK#q__0eqj%w1hgp=|-q(B|iDN;6XRTtY&=9C75D%3Z zK3-n-v3mVM3-3`;kTeyun!vy(2MHg}_Vl33q|NrIlsXWuu(v@ujS=?LzIYN* zZ!;b8GJe`sVHGx0#H1&L6}xP4b3?L4|L>3ak4f-h6Gfy&`NA*9lRn-mAC5vRA^Z)w@FQE)) z*owM`%2`6V%@!YhLuNuI`G{5W6YSIV(_g#4D5Kb19LR+IH?9eFh)L;koqxsE5@16b zS&t2Xe?aV(5_{VIxw7h_{4~13S4A&;(r2wAEF@9U1*r>$V->sdson zLd1P%85N^DSFfI)j{Ld(btLtmwanue!#m1{oOy#B>Y#i#!jUt*x#Ec63X91~%q4iEbY#jF)=2*YJ14(mcyCi!U z3h3E&Onz_|*}6XrN5!{&Uq62O%ZaaFp+#ZQUYz6yIxr0--HWkXo6k)XkhVXK<_>c$ znB4~v&_w8mCK|4PQ(hdwPw+md_Gktoo&E(uT7y|JieT8?-X6iH42GDV;0b?s!O-C zQ%`6c?ZsNAwnv*U78cBty2bM)VR4q~o3VlAhnm0Y%h@znHFYiZOQmJ=bG=LpO)Cn2 zKz;iLMpohaLpy#jo8e?whQr%6R_Bj(%#C5Pn9ZUa)`m{-lK^Xz6O#{Scq!eRWto<^7k};eysXh{)9LUIgt*`elj6HRYo3tGA`RB(}@4R8iCbNjg zrhWT4U_Q8`8=dVUjz3!d+y7QrPBrN}t*CIR`b+g+RE);vciqXAXmcL;<=pXw*9Fp zv`72HFJg|lk^2QElYQs;{$IqGUXUf*(SS!PrL%e1oa~~UCnw1@{MAdL7fRtCpBG%; zCCz>8h+cgnIakl98>(T;)9Xxe>U~J5HwvtKND@ILD5SG{9V+T3Ys(S;<1f5@6U6ZDw?vQC9^U(mAKJ2gO4WoWAfqID9Aoi?NfVJWi8!Ad z8+C69tE#U4=`kEGCLO>}-u_57J9QVS?PVQIpf@|)IxHG_k+GgY7H*$WWk(0;*KDFV zx#VCT{E;#|m5Ah?2EpYPTd7D>ZCT6%2AzjScJ8?Fn|!q+-H66BQkiB;$W8J5#)}gs zs*O8-^4p3pBSWZP>o2Idw{!S}eQhbvSe)Uh^x<9~=-qLwm_C-Ub3l!It3QwnLr|Efuuk!({UWtD+ zNC@0u2tjc<%a={-yFBykBO=*$!UqX9%uZq>=`#PqC4Fn0uewhB>ouaQvx7ueEFA=tc_S}yjpdBafu)&0)F=B$Db=e1J| z(bI`S_QunPs}?S6dgZo?lJE&rMTThVjpkB6YAi?wrU@fNT|%bJ;7$@PWMzIgmRiEG zP$fr#eLI0Dv_dpmC2&!ItXDX2V-2w-U}3=3WXiH*%b6pWbvGyBCv^G(#O>VJ*hmID zM%(~N=($L1zfZ0x0b8@fp>@yi57}AOT!VQC`ETJNm!$OPOw)={DE9Wnu$)7eh7?A- z6*YB!v8kjMg@mq7gx{Awzw-7M`QU37vkBeTP6r@>?`K>Zg+G=w;&FivAsNSIGxzZp zgz;`|+H2>QwRoL<@yi3IQ$A%Kp{-sH6mL73Voj-Q=55X(f$7zssFTa zB`7~gIBy&S_QP^Drn*TZ_(5v}CE4Q|a#Xqy^ ziNmZ!+IL8Hp>tmkV=+YTJ|YT<2$4h^1+Iu+r3_vr^Rqz%wZH3#+U2OZ+)U4kZ`x6e zhvNZK^5NQ4SZv1LC)Jzrd$yw!+=t?@3Ghw#ZRbQfVOl(!GyG!vuYP4+o+QGJ6m}bY zJwxNP6$p~q%=?Pl{UZ5%B#IsWNOWEf#qWWlm3ORe3D zbnRZDxQ$rIN55aqXM)#qj>?PjHT1g#U=7lRAaFBOK2o#MC7MDz-`J2 zPuGK=#jDV}aje^xr_i_>l#Gli0>wC=vlp5>e{81Jv`j=+tJW_) z5By4{^MdqWijE0fdM(=nt=A{VCqnP!kPwQ}e=;(j?sL* z`m1=f*GGbO=N;02C;0z2JP5W5{=OSvK}YMS2y-H(j4&20G@IhVwkvLKW^{81!GS@f z`)cP*LM~G1!wpQ;Xlv`48EKUiNe(85d=A&ag2I1&);jF*9l(A1Cx=iqD-&5PkB(=GbrSzn2}&S$?2HBRyBlYz2?VCJ2PLx!`OsPId!R=yQG= zW($alScE$~E=-X5oZmKfprbshy7%K4s3`=DSfp58*=RaH?$VO37g#WpkUroi?2>-f zlT)TeGdd6FwIZEbo?WBK{r>kaU7TRQ$5Jvn9MJun(w@OYrq$htIrDe2N*jXUN*POU z*80PDF_jGU6jfhTDm0I3?U14(a?Qr#t+YlqOSBIQ*Bd~e)C8Tajf~s#FX_qt!^b~N zFtdN~dKKDHj|`4u$otqt5Kk~?uMN|`nc-@ba#JbEu>b#;@>_IEiKP32 zo4Uo0Bys~d*UyzWr%^XOOrf0JNcYimh^b6Ho=g*8UwG(I1IwaNAXn}r`uvcJb>@b~ zPL?~CDbi5FL>a&A3+cp7aspE^lZH5?(WT;mt7U&|Cfbm26wbONJMhn5Yemut1tM%d z4O)mVT5h2BfMQ8s7F?EoHWrK%u8Vgd-6+SANKrDl-E*p;gAmeWH$t;EROm;X7Iqgh zGCaR4IHy<{D4Y!SES(4(UYat^We|MESRNCgf58%-I1|J5$ZF6~tqm<`C;GwN+7r>q zQEZAO>S+eYa9fxL*B`uz@=?kzXEn6eigJ5CmoJOYs=Hdp@1e;Z3LpP=u#O-MjyLcy zj;3_o1dg_hU?EvSL9URUKvdvb;ZZbKeXsRNxs7jHCk}KPKHkGJF743Vzaam;d9sK5 zF;7xX{$jaRi?7lN66IZw}YK%@-U_ zP7V>mf8E);Cp5F6QJhr2vnCuJO9_~%`83JcIB2RJ!$YNAs1^X)07#CMUYGKO`(5IGE9BNx zd_$Ea>GfWxoc)4yI~Ag*fASM2nb+s*K?QocmVv?@ZuDyJ$T)gTHK0g1%P9vr!GgK& z!1w0dD32}}*;hW}2T!5xqrSezs2n+wN|EN$hx%Od7PC9_T z;}2rFp6>Oz4u42vzk0%O8H`y?hB$I$>mUy{GAZw6W2uqa2s&YtW_PFKy!VPdHd(9M zYqMk&QXkN&k(u?o((?ZZtM%W^HUO> zmgC-gH03t4TCK}o!oH_G)lDP5nuqqlLr@%r<~soni4OgidYGk@-+q|hHD4+v(QLBP zQEy<@`@d&OG=ImS4+35t+>dOPnk_3^rEM<95o`iA%xP*HXy^%O4q#o#RF3FNO>4KC zvin~LNa#N|rC2!oEW?1G+-jP02CvAh9Enbf7mut#OuyP~qf3rs{Jk&Oce=RZ8QXIR7 zv>~sp4nHKh9FUR~#(Ax8_twt>akgR*s9Fat@ub^oJ}z*yRL9wE^qKZ=wEJh8-4lxx zT@kuGBWRZ|1lT18?8{h7?etd&@vL#Ss<3@QS6})mTIH$`IVqH^M31b&3_2L$Gqm;{ z_xi>sUXAVT&0{G1Ny>EqZ6lhyXP_cw`VOZZx9W+6J?c`R2~UY6H9q6pC&1pgq=Hx< zVS!@HD&x=Vir7dV)h!|A4)fI{GTp*|@4Zb6S{Je3YF8Hq_R4y(jPFGj80hc6ZFZ+xy*@@N}+p z-@QPD-hZ|}K?Vrb1d!(Kp1&wD1C*?y4zC{)HbYk5I9`uExHQ1?4lmz8&`3SmP_g{Z zO3i4S$LVTewSmd}vcUuP9)%jR%@QTicNL899;Ryi3o?R0dZ^1IH5q7!^PSUxtAe3C zn~G+8GAL?BP82_zjSm!sMgKGWT}KI;B1km_1#cgp^Oa&v!TD2~YqUkf$VT5C%F78+ zRJ92`@I_N{AH`;VYp_fw$Wab|q5sS&du|3neljVZnYE-~3k?OSDkPytVW6ntG1_x~ z=K{i+9uJjHqWM`5(bl}JQn(!^)-~1Df+7_N^4x5w^y|!C**yzr(p459X7GH6v*Yqwt$OA<{t2oh(L+ccFSK)(TN*nD8wxIdh znyvl}1u@;rX`9k@i#W~{KM0B{9uK<1o5xNdeZD^($k21)D=wce4dK|x5dA2CXH}zk zmCycbygof)=tE{_Olw)$!D8jyG&||2<#ke97DxOOLK`6!>q5FHsH0QiP6QZoS7(3j z#jX2AI`i0}RvQua+F0Q{62uU3Z8H*?HFC+Jmky^g|KXK7?I={wJE}(D86gx*)C(YJ zrw%id5CS~^y6Fqi$z=4Wz+8B{Z(nlvmaX<{K1HkpIkB6(lozw%+_u=hw&kza@V_c& z4#KW`-F5fIVq487St;GGeDU}P!kVN0kpRTmb?_p=BA?C{Q^U?%&Gn~_`I3E*nWJ18kyw3haAC-nEi{}qUHl%+PO%x|-W zYMb3MMuS#EYVD?Uw`a=|W@1?h>C(nZQ_;ktYq1#D^l!dzwIq{rJ~CuG**R=KmVOFy z)0ObKUGd^mJ?Of;A9IT>YrPy);gxE&(`VslJ753NwbaCI5(4Cjng)lgi!jOwE>!8j zPKbS}#TKbYO0Cfi;ge#UtJGDg#GgBglAkj=c*aYOjR_YDZ9Ws5c5h3)ZgSG%RrMnh z;N+B~lk~xdl(bx0RCg2{f7$!bl=UxLM1H-|s-qd2Y70pIe4}y}0aN`m2zz;C9l(aL zM$<_r0vyb?P2=ETQX1Yb#kbGwE_vLa&uvyhA|1LjKpOG5GumZk{fFx;wfi6)9++5Mx#$EBr4@RG=XPwQ@NIW?M&AI^u@Ug`RK{zWEoDYYAb7blu^UO@Gz5YP5job6bKvtpcpOv}A zD@c1nWu*FO87Q{5Wc*!gYv17$?KR8Igk(UxBk1czjz!7@`o7T(a|mM@bJbd~m{Q0! zRD&FWahWalIiwxfC7r%{@u;YOTml=TUc4n$DeM##U$37)yo8-tofIj>Wyfblcbtn1 zG?NenoDG_`zA7%IINh7(a3!7dVx#V>p;12S)F0}>t|x9w6lxccE1a*^6-%?tGMTTQ z)!R~#DP06O_j;W}&*1#8h5GF@+83h7fC>A6Kq!X<=cr6}E1?QW%fmgs(~;t?HUO$H1`3nZm_!|-Ap0Msjh|H)5nIDj=WEyTZzF$7HStSwL6ruzFr6$IbCo@ zIYb3?ZY+Bgy2XMZi5Q~k;;XdHbT*=b(Ab&cD}Uw#>KY*RV3usgi1l;}X^)Y|`z-Rn zA2HI`?Gb+*JNdpsHOe1S>X*}`bw5H^sm7DVR*?4nAty|%yPpitw2TQR2H^7d!uN@( zvf@|Z$&|t8u|a&Upd=>{s@O#!>|%|?s_DntmihidW|y1a8YX=gLNC>Q(4JU)1@NTn zf+rNL4rao8Y1A@pir(crX@`{jNE%A+f1!dQs1AcDS@`<}-$zKOH&JO2{Y%#C9(SF%O^C zpb1K3$h#WvX<^X3IKG|ETWj>hNZu^ey50S#I=9d6nAnwU7IV+0u_iL8Ov8lfw4U1a z*k_#tjDQQ%7*Q{o6%LHzy8Si5egZPX)s1*8IrqWuISE3E71*w?!rT{SrPFN!_-o#T zknH&LO)>W=oL=TEr~ry^!HRj!dvP%dHT&)*#TnzS&K!z-D^_viOPFhE1&Q4f-5&}3 z&mzdm?E0)w;vN8+s;e>8Up3{1Dl>%hfIy$}Prg_t_@}_LT@X!k{@hp%E&9O#$$0X; zJFhPP2INH0q{dt9mOiC8OPfvv>Qg%-;+qOfA>* zd49&aARaI4OpY0;5 zad0T98BY=2@5z3j84c+WYOS(|r1E{|go9utd5zi^O54<)2N-hDEAYK~>kKUei-qij z5^cJQ^e(=^2pIa2)lxQ0LF4mdsRj}orMzX`=q{GCL{_XGDLSMVzXaYbFDuJE{>bEF z>jB8Z$S)KnC^K*<8Ps7tN0KQZQRO zO?0Cxb~O2i!Bs~vr}HTzyX|^V%YiKHN#(PmM$@>b8`8`0*mIxTCiY6No*%d=E^}M| zL^~eL zL4-IEkjmWqU6P&Ts~QrKIEbCjAw>0x9ohVY(+tyOEGG!{vOOjo_(|5n=;DI5O^-2N zzCmy0;|BQ<1!d?rvXa8y4nr|c4-YG-s2LWV#mPy|VO;e6|* zUu-wOii}%!D&O!5pqR8lK4-!1gxel3-ggRB`n246evXpv6L!~_S{G*_=8-lGaX`bO zN3x|0LPtr`XAFuxjnkh^ZNy4+l|PbxI85w<_^-bu9(vOh0mCicSc$#FCDuC;at5<+ zSg-z&t#vslL2nuBI2eUFbyi>I&IWM0RckhghB?_Leu}Y3GWeIsLJGEbm(=*hiv%Ln#%}#=DNYw?=RJkRk~B>NLW0A zh?c}2rBsTIX6B9_3`d3@Zu`Xc29nEdLazX_f397q?tAjx2Glp*Z|DMZ0+r>@|0{H+ zvVfSrp_r}PR+dAyT4d{Re9N^`rZ(5ph>t!wL246Z$pK>rn;Ze~Rs&1VnMHa4JRYdc zms(n6kBR*y8ZTUr;)ZCvJ5uixwZ3WeA!wXVi}BAw3tgO+w7Qv3pH(a0Ff*I&25A^N zX-8@N9^8N?XX1Za+rkUqfKU0hk)PEcpY76aNAWy{U5hr_S*g3pz=x;|a639uX3Sk$ z?}VaZU74Z0Am(>31ANZ6NJCz(?u`O(I?EbYTZv6|0l=IvRv3Nm^}hMqVR`Y1=iGu~g>_-ugCN3rlF3J~;w`pm%JKJ?<` zbyhO&CXsVj(JY>=(}A&oIYVLoNayK>-ZCuIuN0yIfye2!U}E&6(~$`>E-tECEbbhT z**zH@Iwm?I4{jnnpPy9XKNdp|+S>Yz$$LTV54JZ>I1^Z@X9M1rpG^|;$ETs@N;K?) zZ8e@v#11@n)Q{QEpiI3L90)Ab+y1BV+2Qt5#M(R#PO%K-AYY;L*9_l$&pO8@Nl>D8 zWF!ZdX1wn6u4O#OL}XsOs-5{9d2N=oXl=24@(Nvaxk{2S?d*<5JIu%yE2`!*6Uky+ zh9LT|F-4&tKuY%zaHsX1_wtLOtzO^wE{`J6rYql>WGv<5KZV7oI1_9L1}At#YJao? zGc7cxYIB_|TPDaLr;xC?tFXy{zOj$sF@Y&!yWYQQ2MphyjKwnMLLU-%zoZL6It>Me zDj?Kc3yA>8`EJ~;mm5F6!hR90Sg;Yx`Ndli0lW^CBJ#<+xz{Sys9-XBqSoLv5y>^Tw`&qWrxV% zLUdg!;Pga>)y08aEm{m@HNdcphowt2hRh9O%ocPNHX{-jkt%?wIA#P(nEVLg%wu0;6b;*WB{7>MXs zPk>|TPdwmLj8NH-25K;Er>&o!-*zw|CIR^hI*jAn<%SCc26+cYlfno!J;~Lik zYrWF$SXye0V=8J=VFFj1D^B)p-d zPU}v*)}ge}S2BRhwCTr)gI``S1*gY?u%{>KkQ%7UBh82#rn!8ZZCcC*zYgyT-H_j4fG zHM_NDiMaW!^RqKVis5Cgx5UY=K^76Vpyk=gFG7*xF5S4Gr@huTzI#`Y9Miof(XFwa z!_##TN6pCh9d8uWex3Iy zc+9VxO_PtsV$itJazhz!FOd&u_SZ`bv=aQzYg-vOgq)|O!#C_n@9ID16 z@nE!UsUbtuW=5($SZ=rq zC2!nRc#?y5px1OCgxG-p>J*>*WMpadRVE_YWP4kl%<~H~sZ?TC;n`A}C}YK5giLC6 zzUhtW-m6<+0GWhR+}@g3a5?oZl2pdT+{8zD4AxeYmy*{~VNod|qC+7jygT@8EFJ^TzY>30?w&h_;+lK!cC&QdgAszn&^opU|GKnUgTa*4 z$_xxN<&{JwoHR8}sjWB-C!P^i2#=-cRsSwEYC|r%ffdw%=OKYjmi$Y-E0$Y;>6j44 zR4xRV*)lytd4<|CP@8Sun@1}vlRw+W7Qg4A~e-P}XwfG1rj^ z6UDN`_P--+A7u>xuM}o|w-*SNbaeiEa#l#led64(UXsf0bMhw2kX=~K=ZO$~+Xst$ zScnZOL_ET3NNq8s8x{dX^NHjP$H#8p{1C1DMINnyfD z(`qQ0W!{seechvBwOjcT)5slcMB8|@plV)n(qj4RH@-jH2vuoSZYt>l@&d>HM!X*I z@831C$=;Ry0b)o~NbL9335o5sRaLE~0O-w}8OWUK+YZB0Rq1iB~990NM~$zP~Q`iqk@`WOSw$! zoHsDW4t@n{u=;z%@J>~Y&~(-%6s)tz<15Zabv#7Rqb;h^7X$|3lXehW;J)UO&GvuJ ztZF!T&dt%A0YM~``BLwyz^(b(>~58#z7i>HApm5-4gS35U2(Ebh??Gy{r%2Ulqnx0 z>ULifh5uN;+qU=NO$i zImDd|bWZzHpO~Q(<=;{^=ZSp6(Ik_b={zdte>f~w@MSumRG;mWWzmogM7!%;th3!L z3T8I5c}Tb+cUbj$675;1(;{K%W-Y3Gys^kTKxZILtI2|GWIyp{;c%Td`?(hhWyZ5e z)l)9|%N<3`syl|VofKSXe}DDRDN`-VT1(~o*VB6aHHrQ2*?!;Bt7((`>m+Vp*ealhUQ-LDWORxZkv zO>P-zOn17O|KjSl=u=yog-)hu5ek+)uS*&`i1srWS{Uba)+chcUL|F}clO(F3jbes z3FGe%u#cz9XW_gV+p=jy{GRG_5M;P|sUegLPhqX>Y?pz5!l_0Xl-hr_j954#wK51G z%7_$SSid8xSX@7M)+(|AOWphvraS>CW?An5JA zm?r&RJ_^#GhxvHreG|(Ep5TPAZYMbI4h=2scmb~VBOX?~+l)&xdATruQrLXDw8@W& z8`Ex3Ql>@B6EAns^3?abpvK>CyFs@R@}?6~6eB&_JP+72vk9V?QqR(WOS@?r@P_O@ zCzXlzlHB_7j~Q*_J@@>EP)}KN#x*NetdVKA>hF7}y4=MG+99raYBAwge-i)7c+?k~ zKfOmpv8_e34Tco`9A*9hB-zo%VW<(-zvew&z?1^ejAvyoXfIb91U+8$i2WLCtu^nC(e zu4mnUEfRyV9~io-#K=9Ber0B7^R2D(K@9$%u5mAxS}VBQhljk+e4Yi!*{eIOhS(D_ zzeVi}_#iK6sMS(C_SFZ7ZV3c*&k&^=48EivE;U1}*;SOr>M`-v;O&CYZo+-YkOj+) zXBE}(+Gbkx2bqnctmqXMygNCb;gvQ_TO`2iuw_K|jGl#3Ir8=|F?l9mq=1$hhcp8VwPe0cwMJd3^Cu#^OKprN@?Ge&oqO#>|Kn7zX zJ*iWSNpm>d^pn`^vkbzAQBy-{XrA>?e=QpFq~5-?P4W?^^V!m5h9USE)z~9?rfk!l z^n5Z_s-2Y83{I6R+%s~J2V}?Iu&5-sFwG~{j@DGl zrv8H!6rf4%VA1_6;{$%2BFtEb$zX9Q&f6{r2L!72o+sygXT3B8SZX+5)Va*vp~N7x zW|iPm7LpIB=uh1Q{4v?#?HjIRe4OavQ`ybGZk92UjY1MRT_@)BS{%((piTw9AuTAW z@s8~F+M!-~;v{>Ypdlwq`2$#j4?ImN)@2Zx>CypTZ3BzUWe@d+A76jcgmQwO0t6?q z&=@8#l2r7ni*xCvc^IN8@Rlln(tJQTNd%tCSzI^K(sG^i%kkm|LjqH{0k6lM*&VF@ z@}3&JYRBGXoS$<;ox9=7V5zVG3>*G_TK1Zz_91Vi-h*OT|&D5+T# z0x=F%KBE`2E)Lk&&$o+X)UC4{*`B?lqcc?Xt$Utyz?AciPMOK8wvExpyA)A|4|#4P z@o@#%>3Xf9a(XStZr)FClxf^nnt;jr%E8OpMa8Tq`NGWSr%aX0jgI9sCKoi{Czr3s zC|pJA#-h*^QA9KyoUmVUD99v}cul&DR!s@6y?H#;8=7o3g{Yyhv7=Q@%@1{KRth-( zIA1~O;_^tZ1$MnYs?C*JJP4J`fC<%GU(8-^Ut9D&Fnt0tYXV&s5Lr_E914nNkDqL) z&3`DU!};8&nO~Z9-$TeLcm+kIZc8bdvi}%zTxfuwEA)ch3;H|$5)C^H_WhyhwcFs{ zQK{e_ves_MM|DP!&R@M-`ji??5_VWvSnpRR7$)5(E_$7L9}5=pln6cB+wmkoBx~`h z(}d&9*8%!1Uk>Yyu=I<+Siz5O?@&keH_)|)qd-BO&2U53)Af4T*@Fobf5q5@zlny) zhZ~P;TV}~Bb(3~~?~dpCaI5{~dT{ zGUH^;E|+goQXJ+g>-09sF-bDY6@S-{xwEEdcCe>2q17B)bIIt3A|~p|3QRn#df9C& z#=lH-p4PzAvFY{j_XUsgNoJ>R*5P>MYE4;U$^bFx9+Oj#AU#9(Zsdm(2Ry4&6Ya7H zsfx*2DE}+%xQCI8g}!ep|>4 z#f#zF1c2?De3FK{H6f<+Yl8Vq5T(HA%=bYQW%B%Lst=bd7%Ug61LL$ff>K%04_!{?hraw0l)HOKGnSX}B5s^i zfC$eZOK>7DPOQ*N%p)CYWho-nf9&r(ItjOTct&S@W3r5b>{iE@E%MSs^Ke+mKj9JX zxph}rB_=KN_I?$;oXTEpF$CSyeVb~5^-NqW=0ml2J6{{zYF&wf^`@PBwvKi#O)nKQF<4(0@LJDJI&|H{GPmQ4UzqU>M!5&CoJ`Vw>u zBvRh3*DCfaG4iS<@RbVBNG*I0@QERM6(G&Z>ATB4+3T@EhM;~z-^$pgFcn2=)>V;Q z?jg?>rQ=BJhGHDWhsJKpm>#sZ6-0N!PLe?}5MVJNZag>P{uOi@k z$459drm+=G+@?SdPj~x+>|+7NN~^A~k#F+(eoe86|J%aQ(4T(1UNGnPpRuB|IuvPrRN4HzV;3g4Ly_GhO9e!EbHlAU6kf2qx>PVzuz zs+gzF;cmUT^>#i#%VveO$mzHU)xhsUWDXMuq``2DDY>6avg=8!MU2eb91&m=SiPo6}iftUZP_Aszi-e*PG5z zbdjhML-7HiW{RgQp($pI_&lK_URdiq2kZ>zjD{m`o{Kj9ClN^}mro!H@?T?qvm}#7 zVSB4}9z03jM!D{G_w50k7fVIoJM|t5VzOj*P311v5WioptBpwxc5kE|5SGi5dg!O6 zXZD1oz?Vmx5Y9-JOs)*J3F0JG^R<;%Sj4IvZ6`?`42MbnbglZ*W%=guG=;@}P0Y#o zEyw4NXoqu+`a2%C+eu}WMuR*Z?{o=e2%i9cd&|XKF*fMQ7J9vVoySdI-cu>r{1nnl z&U%K}d+{zOh;gX%t5!nMfq!->I>m{k>=;yt8L^NpCnlwdHiT1@D&_^KPfVaw%ECGN zSsE!^X!UR?wd)d)g0to{f(h|hkX*jYVq9`QIDsx2eeI>}^z2BkdLKig{E*ccbo>cm zSj&|v#Qh76Xi;|;tF_VH9or!!K^Vm>)o*k^@RIVOSK*em{Fr8X+BDIp*vF6Y1CKsx zDpXWIO~onHm8H(M`_L$RM5iQBaC8b(OlN0YjZZz&nbr92vxu8k=#=PJJOq#&0*n#Dz4+I04*cNvt!JJ>U_D)3^qIJ zVJFJ##+6PGvFT)509_{gR)1Z=NOTVvt;VxCFA#_c|?_1QPRiZ4= ziq%yYq#>9aybGo@MYN-~8QCSSY-sAsbfz6L6u*EDV5KZmD6^^#OyZ4) zdpSWFCib~l@A8%3d(^2d9Q`yyYxlf&Q(G9`5I1HxlTnt{gWuEl#9*$!9{$MAN!|jw3zJMeCxE31+=kY<6RvfG{?wj^A>;Zj74LQvFgAedh z7MI(Q>R>$0s*V3JxSUwMxtwhgICkHQ#ee2KpiL`b@mHKIVPC6o54+e}wUb}m`IRjR+T^Gb z`LPXt*@_os)WclA1m9(YH~UH*7^v0{+8IVUlf->B@4x#TI1O8Ah-lxq__gnNHCnDp zQwJmqRCPCo+ZpsoL^=;ToD9l!Fuk!|@hBtQnDIl5Oi}ne(o$$%fULBaV={bxdr3;5 z%A&mbBgj(L%c%U}&Hmt@6EQlL$N4<$7v91iPM;%bh?$%R9z!QD(hXKupIFl%v`1zL z-2mCd(TSM_>Ci84dR~KO`*WzmDVb=)7asNcQ>UNgM>!HUFKFPM2ep2zKHWtBMXAM2 zL_#kA3k2NFDUi4!WBdq&h(t^L`XnTl+$Itd{lV^uedlp{OU|YeW+Jnb3E)P0N2u+H z@Y*I02K0`UFnm5{t@HAuhp$dF*y^7j|0>hya(W%OW7oJUmz57wb0QRIEQ`WjAZAZapS!7JsU3*Sx{|v72&^n^671< zSJ@DtPJzV8#MjZm678b^*4DBUkMWCjiT-ohiRfEj2S==Ly0 zvyiW-g+yLCAPK+?%okrPe^BGMCUn zx{dmlm$TS+R}B(#IRDG#e6}1dxwugK-KP^Gs@4ZB5V*T7c)LHkV`?b}^W`sxM0;0D z?y}uKQFJ^&957x2%dpvP&S)F6LW@v!2Ctt{jsK>1bYlBht7PwfZ|y73#wxahzzsooS3)-BW(rt#Os3#NrS0WiP7y^Wd$w zb-ZCR1{a@(YV*(#f~Q>5+50CAF@Cq(%TIQX>0aX$lHsv~Rf+AAcG|nNBcVk$K&ou@o7P$E zncpJ<~pYRDQKT-O>l(m{8R3v6VsSWlCD=n4V>J5ArL zR5rSeuSd;{dV%aPI$OU`UI;Y0&KQ_drr+@bA@cFCO<#@hD!i60KXhGXw<;&?HHRqA zD;U}b_jvP@qn=N}YUeF_zJcVTtbVse)rG-rePXP6{r#=oAr#mcyYL^8s@uYNJ20^~{%68tr7b*RThSg;tSln7nT#i=+a*03S}CL^-YqRw356 zxCpiT@dd~n#niTp+>9}1l1$^Qflju;?T`un}#?$3GEtDe>3Em1;67c`J$}hi> zFxI}$Zpr;~Y7(O=eveBRq}flq%>&7yBVbGrh91P=UO1^-qqP(+ef=LJqM!zGoV-xv z9bb8}uSz0%KM8uN&!9Z|21qUTU8KHvq_-liNWt2gVCpp7Y)hpjT)Aov>^t7p6T>@d zmzC?T67MFpF_3azKq0KwE)7%_HEo$-`Mhx2Tzfx}sEY=FR3>F#F%4aIr6ie`n=4EN zkhE7}i7*nP35U?r`9>^(Iu80%*BJP7Q^eT_aV<|Ou?$$j_$ghxyU zM^9$CptV-}-^c0QEe_ZO`TSjj>PTVZNo4+o$;i;oi;ftK6!!@x4Cb&Bu+A6jB1w$} z#7q!_EtHj_-J6q2XOl)^D<6#Pi`n94uanPR#C*QBQW*O^g(*$BYg6fM+Ka(X7GEUz z6{t3B_aX>bY!R_V?7bGB8aLDt^VaT&v)URh@+a_WSg=x581||iVw35_m{)%ajaI{? z>gUSgY3D?=v*qV^JcPS&t`v~lpc-7NJLl87c=K_Vd`Yoky{3kQMoyS=Px1F5PC9gOF}!Jj@`4P)I0 z_~BQ{wIk_A+y8$Jz-@5G`vw3JvG3Qh|D7I2w2wSy zYTKp7dRZg7$XzQd#(=$w*>akvE@oulkgP~-yj|}4Y=KgfY9BNCMDoWvN{_n*pKYO! zym)tbkrWERC@hhUAho`GagSu0Gu~5vF7^c8?Sd_h7EH^=qgZYN*hru8=1+<>V-1&f z8|d4M3frR_E`J)$Fb#zR37f|xl;7XR$4IBj>arWKyw8w6!F)bv%Y+|)JPG9>PNu{r zoKX&o>T*3)Ao;$hRDq*@vG&af*^eDD;)JBin^+&B$g;iDBk? zIBVIbLF~vUDYTj39<4kA+RkexB$WQ(H9@Q_9KEHmKx&@i$t$1pTO=`>t&}%{Y|{CL zuIH)aem4o=R@w3uV6IokJ~`Z@l2okL)?9kE>vhk|{`<6C3A0({Vpgl>LI#t>I%xZO z6Jvl|kyzd5WT`1pvQ5f!)6&Vw?Gr4|uS`D|KZo+o7(F&Q&wA++Egc0e`Uy_;AZDX^;~B(r{hN~JB!=IGdp=4K1(S<~h7)I< z22Uu{%3h{6^AXz}FM0KZbvnr%=7pj%oeu)`xg5T$`wk3l1Vcq}hh5b!x>yQ-K`Ri6 z$HxxOHPS*Qa%gW1)yO|9QTjbz@m@Kw9KD$!8*zo7QI*Ie_c^n-L7s*)KL2ClqS}2G z=10*;o^Evn6i=6TZd9_y1O=U+WmRK%m~-@F_qBBgz{YF;-_!MUU9b7^*4w}HI*wK$ zp%PMrd&O~orzUru?iI}x?!<1e;A-Z>XE$gQN`&ul=fxUQ8Fbm_UcO7I>@m+}mylTz z($8BA;%_1XLt7{nN^r_(z2DBT%~$eUFR#sFkLAZ#zu($kQGP8)buCet`a)_@0u94v zDm$pV*_}6;D^w7&^f2`K^xcd|oiDMJXMrl+`qr6jp%KZ@FmLea=^R1YGyEfuQbE zX`T>%WJp*Y{tY;?$h7nObz0z4{oiri1T{mvgx%zcAsn0xsoxMkyh!@gmUGkN;Zb|V z&0gLAaR~kcoIuCk~f)McOZCp zrzVrHArFkJv{P}K|DbM&E9Q_jkpA|-ewK29a=zJCiD#aynm!ctxp!lpW|MoLF3`k+ z^Mm={>=Vvckn&Hf*-&FkTq~~R-odf8NWTV36k*Sscn*fGl^y}5b-p7fGpDlS&_)fTssXuyAt3Ji2a>BAJD$A0)q!FQ3bUO@3Nz`M)y5y91ZIfl?v8AnSGEG3pk89qw8sU(N z=exTb%#!EQmmm&36*K>R=_PNe!NSHW&RzXUN2H~ZmF)-u0mm;#M@NRFBeX<7Mu4Ov z*d9B_<`V+eN|_a9QCkmR>OKU^_wVlNdy####)J~+ay-86=?g`;&gu%xZgU+BFQ8Ne zn!5aptkSA9Jp(<&|F7tR7ar8%V$hnkXYqJ3W_U4vNd#bj{19iM z(ojsD+sY8*m8V8q_g`k3A^_FiwFUtM5Zg^8EG*iZm~W7jk3#QZ(l{QNZ|81{%~Gej z($%I->j(PUe>&g%69l`5O5pR%jeAEBh!FGGHHqEA;KBzHJ;s_DP~4`zpfb5JVFyXVyQBc31;gmZ*j#&loU)$9f&&dmn0dWSG&GnVY2PvKx@ zuRA=y@C)T{BDX@9{9rS`q5SHlFjKjhtRhu%q1OA#BYaGevS-T~MrSqr4DIaC;!%%1 zurq9weZu2uj)4)Otd>H@_s{*TRyF0G9s7+1O=Sa+VaBRPuOptCl6}b&Z76_9wywESOmy<>D_0oOGc+ji1HM;)7$bUL=}j%_=gbjP;Sv2EM7U9qjHexC20 zcg@V2`cH55ZDn(PMl2(m1+SY&V)|e~TCBC_bEl zk0+%TTwkf4Fg|Z%l4m$|BqZMD>GTClVs?X*@kpG4dMHC@&`u4GMC&9?pL?~W_Hhz( zRlYh>QB;Har1AYVEG)sHWvuMgDl#DE9%v8W^HEhB-qY03btLeyB9-D8LjPQs*TukT zv)R-Fy@bXT8bQ~m;^L_uX6dysJ`|Lb(yvoZcB@}QM)g*q@t2wB~Ax9%{MIDkSSsJnbax=0tgsWQSe)1Rh zVE^*{lT1)hPvv-_qo`T^bg2u(0Z<8R0a3+;Y#vyerFtL!n{zS)5Kg~Txh)4IcK3A8 z^K3W)jrxOrcf=J1qQi$&YIbQY$>|zsEAA!aWIx&gmWDz;B8gT%d9>VhYeyOGsB(?qLy@mNm5$fh?+AoO`xDY> zkUpPLP^AXcKFlWe8|=oi^1^;=xMeTtzdIy8<4!E8Po&PLmH@)Nd0%e|3G z$vJAol>%^i{(mG|Lg%dwu`w}Z`~IBoE$F1)AF&#`d6nvmLvOEJ4L`zE)-_EDizWgpDa`arRc17iU%<*Z9GnqFenZj@oNGP4rIBPFzBthE9zzOMUpEw z5EDh|tiSPlxL&{U=(O9TaM?l4h5e>0R`2>EE5sR+n5b&FF!oKq#k4>~S{iYAjc@i7 z{NADpUZ-%M_Vxd@CQrViu*wAR4V?nK!`-^t;rfks|>V&f)km`S`q> zJAPvdC{eDCiI4l}#GTC?_mzN(%Hj@VscJW`POKzu+unmF?F|pS#9k z?)(6M_d0q<`M07bm(52p>V3UqhW8M0_?UBdKU4@{F*(%!9rZ^ouHNhGDu^o!SN`5Z zDc$r;Sa_+t-ogIJcfKT*a9>*Tlj#hxcPNb#7&7BybrURU@8;xZy#rkAf|Uho7>=z* zMxS%MCBtT=W(Az}Q-TM8CL25p5&Y8(85tjA)x%c*!NIUW?dJFX;%r6nkX!1`+QCJ! z7O{vqy59K$+L@ohI70X-Q@SPChU(IGEErDvOjRQ<9X4SjmLE9g-CS+$xcD=01AZTJ%Dde^*r)2re?Tp?}Jl=N4eLh?u}p}s8OaN2j* z(pqEN(4Xwfd0SzC9vmYinLM_D#m5rgVsX88#G}rl4&TZjWrF5&_k&CF#+qp!;aff5 zZ@%e0-&Pqp**kZejR_7VXM4jxT&Qa+RtB890N2q!iMFCsl})#^b^XF05+CSP%KdAY zI(}bQb(K?s7;nlX`uV*W?AGd4(W;~%LCCX|_f4Jp%XwkR-nCn^KgJ*HDfXtPkIsI= zyG(jF0UEN^XW@2N7v<07(SXZOS$B9Dq~gdL)3-ZW_}1Ky@|Fn4tgSFO7d}zqP*mtz zADbJ(6gC?iU;P#__JPgau_V9HQY@?{G7z0v@1%%VzgaB^=)>(xxkNEjphSgKK-L!4 z0_p>w!@3&Y(8rqxS~8)Ka#Pkmt#W7i8xgVJ_b~|$E@k`t>U%|n$H?kfP32$O&3V+_ zZ-umA1#YC?irQ->JsYOt`TQeiyVLYA{D^3IdKj~-Us-3A&!}$v$gdBAeb{k%AqkCj zEBdllZNrtuzMxxSRGh(kt_R%U7uGkMN(t*eU78>0XTfY%HXH9PX4$*hPjn7s4e=3| zn}L$Nb}%z~j7%O1sF`az;jr8tGXY@4W_bY?n3R{F|DTKn{=H$iDOEakx!*ag76yS1 zOoq!>M*|%iw0)QgZ}?wlDWi=-W|ys!$Ec_*E*_;;;+C_yJ)xL7xJM0?K~3qtJXAn| z*RQ6Yw?GW(y;nU2Kh~;!G;`s{+|NjoP?<2Qn#XNH>(x&Dw#SQ}W%#{nmkra*OM68x z-4<6>lAAYHk2w1NmfQy%zZ*duzn6evg4Z`ZXA1JIwJ}&O+l}tnJ@M+ZqRVQ}jWna! zBPqYLb3qXFZnR`FOJM!;wD4VSTB_E|(6AZX5tFdVW)OAEuYZ)IYiH>)Ip673E}BXv zfGg0zxT3B8xu!kl6PQysBF&awuWkVod^gD;Svx?oIPe5G1t4U%TmPr#A&aRb=eZB< zf0->Xl&NG@v>6V=7@k#l@16Ztp-TU9=Z@*?X6DEPV56?M9QD1w8Do4r{x&u1JMMi0 zB`>05&MbMT^W(?1X0w{zoq{nJwKy2=<*LzMNG-6=Brn}m+`!USc&TZ>Ywg}Acp`GE2OCkr<<_n}AbdCRs4 zbrg93fT)5-u(JdB=?9Udgsk#3U;4-B8eXpSe!EH=Odkt^;3|#$bADjuCkkTUxd>kW z%Cyd_6{xG<^lBYry7H^FGq)c1Prs*YKXYs0sR8Ow!0hv3db$>*u0=l0QqH{Lcdr$u ztv_>ZZWrWtC?kO?736<~v5Zt{tHlTqBR>$5Bch@aXd8iyF4Q=g^Lx-LV6C4dSaXE@8@B|0VWXJJ}z*tV=+Qz&2Kd>Ol>K`HN7)q7SKN2N6>YSJ$YCem6JIPj5XOh2s{-wZs$%H< zMK9`cTd~a2>0;LHY;K`>c^8>hj3kH;vR zVG#fUcx1ZKTUk{;jK|bg3vu9<+FOK;HfQb@W@gP{w5<7Jo@xyY2_|vmcFyVVNM#QI zR$0Ni6v9RO$qpyF%+X|44~UW9m_Rne_RUGaSNl~xYfH}7CdcMA>S_PBYNlQj(Z#WZ zl*?ul!}!nkR8D9)BmnRE433{A2RCX?Ah*VY(vq%ul5`^Dd@BcT1+a{Os= z&OWZmrTF838SH@^9AW7gt9?m*SQ*4SVC06%aK{j?6F*`wlHfF^N+*gM(t6~zLG3B~(Ue#cR`NmIyfk(4|)wILPQSRVqj zx(@`I-)l;hb>WEW5318KxIA!WXSP7+(M4N-uC)v=p61r8Wtj7U?${s(e;FIytD-9X zhDG2P-JF;*&b8(&c1@*TrgEqrUZ=wm?4M4JBq9ote`ONmgr~MUI;;f*p}L`5rjil5 zPscU8IttkSzITUO7`($Q`~2?c{z9v)y~HCZW$0fcD%6L$j#pEV$wUjO=Q$wzNp&8q zs@v6(qvS#16WCPGs?<%(*Ch=O5`^^0Y4e-_>$xveKiCs&$RsY9wnv494#Z{BTm{~5 zl$&Ums#etcH`6CGpUeYL9ETR_9f~H!vwj+Y0ApjaI^_f_==ykxj}{2= zmjoNKV%j64sbV6a4j3UG%<~=_|$z zm9>UH^>>1`o$_JTby&7@Jxnz-U2B)6l>=5I!|v! zA5NypMW#2XXyQoYae?G9=zJQ7ktE>c+yWL5>4@hAvH8m+Vn_8BhKu9`N>v)6GI<>$ zY*`ONGJUibkMw7m%KY(8uSj7wjaiSZB?kHYdjQK$X!Y<~SBNkI^|+BGU+lY-Z(t#{ zes`eLFCHC7=7hEkf1$daSc~uk5k%bUKh?Rn zTGnqypIV&H+OGu z6Jddyi!-k(I2;Cw|Hw=k3uemCbm+^*DZkEq<>ikh|H=U7MWK2i3$1)fX}#I;{uD!9 z0R9=+uTswE44e=vhxipqgArWPE99Ri5s94oBh2gZCdv*NMR_Wky2lsC9U(HlN8mA_ z@#aG+f(J-rcJbvy38MzqXEvfG80Mh&K{TOGX%5ck40Vz?2e8>`Fq-O&6$e(dwUrv4 zB9296`Ml70mLb{qgN1ih5O$Zk0-aa~-we=q{hEOO+5zbx2Exshc26!&o7 zYFHG|K)E56nA_`_}A@E)pTJ-0XhP6Nm~{)gsZ|BCq_r9FA>g_jqkoaT415vpq_=-7-4^#ny(A7s!2!4Rm zd1b5fB?Al6WSto(E^9|UplM#b?Ly#UFjg=T5&ye6|R&76C3a3ZAq=0pCO+YG16hEgFu1f)d zDxCyRViQQ=vxSi^fl=%N`bHr((8B`;cTQ!!MRs8QW{d-Pt{LUXTM*A;S`&ZC3{Em7 zzpPCZNEUl_H;AW4u+35_un#SbJ8G)SXv|1bFor19o>8Km(2YrazI0W}ILMz^nRLX2 z=Rd`}U{&(huJeo~C%HcnZ`F=@;n4Se$WKHJHUE%UYxeJP{EGeb?-$oohPNxT6}TY& z3M8W=xO((Nf!wyn%kON|a=5(B<58&t>AH!c0<^dsVeSK{Af~`t(@81VXd?0|-p$Bf%yKMNH#-17b{J96?;y?w2Qg~EF zF#@2-z}aEsh@97U(!!3TUTq4bM3+ZG1;rg|m`6JFy~jP+`C{RhT8*KY-6$`W*N*mpgZ86Us)HF~K4ns$4_E63K_@??EFyvh@s%r6(k-AHz zQtSx61Ns^>E+Sg1AL83Pp9v;TO z$67|S%@X?lglo7TZokc^Oo3{L$UrCbXZz7;yn1&T@JNOC#wD<6=Js+G@E>oJNER8?AFB^S?^0<>aQaTBS|5 zpRLuVpUo{x^L{i~ulGvyK{I>YjMIMia&&+EBGrSX6Qd@prF8$oVR5vnTW@qO#aZ~m z{MmeNQV`4h0h@#G7!+xZ<{lAr0ej@}euymQLne=}jULsc6IgND$hhq!nZy2Nno-vH-{&}s_KuYBHt(yH1WTYtWr%$AT?de0nj^=G z?2bUfG&}YRkqKNObE!?>J({Qd({S_Nyk;+yXF*(H2x1zqi&WFsG%fAyfNB~$ zOlb$QNwhiG5ttutm49trQ%ejMZ_X^f(Q{=t!8LNv`9ZZ08cSOx;viH9Has&60w;X3VmZ#eYnAS$($2|f+e-r4)0dANxpxxaYzKtl!HQf0-ajig~MAw z$H!|#D8O$7LbC}bqV#*1I+5Nf)AMG%aT62vu)hDS7WmNCjQ|=M4JEi2yq~OOHx~cZ zPtH$GHi!fot#!DJe0ga1x9|3(^!lY4<$I(NNF}L&4*});l4gXDP}Hy?_sp~gS$`*0 zNqlm&V(g&{&CWcqBRszo^@g})EI^;$c}z?~B7)%?lW)8G?5t)1u9ZYpftAp|T$5A- z(%$ri!}D7h3729s#1Wu+lmSJ+H1Kaig^rKww{WQ?RBN*7{2!Dp?OxCwKAzD=)RM9& zlln*6Jmkc7KhMKavcwlbblTdWlYwI*%cSteCsl1wf`0+8T{rc|BUwV`PO&nHp1r#n zFxxf+DC2|uI9{BK;}i{`{~D>iLNig1D~D1-6Pgd{vg1Fp?KY7n0NY?~v#l;ak|XDU z(g@20e1Cnw)7EG=$SPl|T|fMd_mco*%i?e29oTDuNT$TMf(byyUbR6=T?i4^l17g{h0qx1EpLgY-60& z$%zmqiIp2{nRay+dWf6t9v@9#`A-bi3el_#kKxo3uhLpU-iG7h&85(RbPeL7n_1PY ze!HzsY$juOV)e@Hk}%D0Gp4Qe$L7d{21(^QUtt)s!TRz%cHD{XP|t-)Y8?)&q!c;n zZ1q}tcDbT?h9?Sso4$QP&0(PAf<;k%PlWnNW+X%Hq5d+;$5)0u6dJTBLIzZ4fx)NxV}+sIL;M*!skphXFS}NzUCC$6#p_Xw+Vt4zfBk%(C0GFb zbv*4sk^Iaqw37v95Jip?c;?8{$lR|*$gIZg^hO+sxkcl#3@SocyND4K%`PRWhv9!x zm>!PP0-|mKt|y%4{aCo#zw`;asRPsU09UM)-E$XRAyKj(`nq2z(VTPytZi) z$Ot?IxtExvF5Wu6*M;c*a)2~};kXQ|yg zRdLX;TW^(os<666!iTa$k^)PjfHVCxgbXHZQ$ibzKtee_-cMC_{$oZKEUqw}^HOnx zMmC@3&$p&CUIp*+2kMdK?syI`o4}VHp7$#(jf;Nm0sL_!CZS<<8ux2fn|Tft8vp>7 z<#1tOfIn-7Mkt*WriiG*c8C#amXi|Ra`Eweq;HIFKT7dCjYD#Bi3Lj76nr;`bLg_$ zm)GHtQn%eZ_K+b<{s<)-=Y`nnzQIDDb+X|cc&aZBsoCCPu`pYZzzsP^v_E<4;+oe~ z`oAXCD+vilhJcWaxQ08OiP;9+D>K_y$%6@4rD{LW|Iq8Kb)ypK^p5cTV1PL&Urz8f zw)NH}uY_^F;@{08>@fRR5CkhhrYL1it%&-rI2{^YRS}*a3R$8>mD!r#BQi|veU+sM z1)CBO2}=6xAi`Yc`OJ&ng~y` z*6;keEv9~;`gN6UD|%J!Rl%gs>iOlK9{i%FE29NzbNtIj6EBR7fYXM>?f2RXwPHBg zEW=|Ht4$ar#-h|U0u_dL807o|XZC>qsTv!r`<~3->d%>jc}yMSWKE) zu6HK~($~Mmgr1AhxZ`IZ#+?3%dL0Zz5|aIrxyu>1&aJ8x6R4Xk_lDuBXzPf&b0}!y3ZTqvqk`#cC7Z zJG`%PB-mXBj~xz&m01p}Fi!V_g;U{NZhj~`6~&;Xp}O15$}YAvkXP9)74bqB%{@b; z39dKHm> zgVQ%g-5xuc4L2+EzaO3h*FX&{buK%_nx3GM1}ADyhjXpvOQHWY5ocs?NMx|$N&OBA zs@kdVew|*TMT>G&S5c7jAFDS{qYk{S&Iw>gHO-{Yr435~w^bE)%$8fNUK)5Dm0bD% z&AhIryHn?=kHK%5lMOe2I#n%!$Y3RL(Q=dnS_h<(@(RLnrmK72E$*I1zB=O>%JM)& z@67bU&OOca9anXI>~2R+ejhTdOa`eQBWMH&Wq`SSr@tce?SC7fO-X`*2uoutk_*$r zT0j6=uDYCj7lFAe$kOGTeWRiup>@L9V+3DrwAj>$Kvu=?hRpok@?^W!n{fft2X*t0 zUP!clo*EwWYaILCP+IT+OV{qUOTIXg$<@;Kf`Iq9yH#5)?F7z7?icqEieZz+GoF!x zoHALPH7z)Q4UR&mR2~})7mF>e(h{NE(#V)7w|Pj7D10>f<#^&d+-C?OMuk?8wF%D_ z=m>Rdmw(=b?_djqM@a<95b@C|$SaBbsd@x_8ysIc)m?5xvULV4EL*Hd3bDE7RTn58 zYhP-V7{_C=xJi+muQoA;e``+PW5c2Yin&@twGF`38&gNmAfw>vw7Sz8JGJqc_Fp+r zlvo6PMM@%QK8{QcC?|_CiwtQfKXkQT$hx_G=lKSu!ga1yU!Z7hPjjdkNKliDu?dv- z^OFM2lM4r32$jxH8YolDn;vw7Hw)X{M6ZEvMpC&Tad`M&6N43m7VB3vDh1yU&=Ln& zg2e!#Q6z;h41^;BkLZx|Zro}i2)&vK$U4Aq4sZ=YlSOuq#^s_KYCP1=A1^MKB5U~d zLsdGes5dmqS-Y$gcnkM^|Rm5^a{&!wmC>2PUYvkkuYL7Fp2j_vLa9B8ES<(BM|@$)56|&bt6Aei8yE7@isgk@ zXI*Z$ilHe}MW!|W4f=Ss*visXsj+sT>$^k2C(h&+IiYHi)_s(L7JcxlbFh;%ivCB$KS?f9b=r#+fkZ)cN+Ie5u@as zL+oC|=+Wq@L1B_`w{kn(h(@Rtw=f@2+c;nA2p^0fl0r^rM|HX)@)2j&^gm*5^vBfo z_*1rraO^HFU_JR0+zSk6r0}Yk+L$Lea6gf@l)U-myL1}X+40Aoj&_whcal7HtR-c~gx24BY zg7ZL4#Yu5*z&+t}X*pd`JqdA}B5NTjKDQB?Y=$7dB>5jqs=|3%z!oGQvw3#fVNHd(nyX5NW$I|2>pV*AeOjX>Ztnpda)^qn9LC8A|EX!c0~l=6X^tm z4HN|PUK0JE`2ByZ8;3w5b>J5U-_L@zA@AdzUqQXUd>`jYq~ZjrsociJuaygmreUt% z?jE!6&JKCNfxdUfPfG8f*sUTy8Yu^o1x+06_rG8Ta9$NsfaQki9Z<@ z2|t(NYnE4dQq>pto5`Q{UK?qNgKtmp7z*(?6(wF(zD6p{rcmYnLmf7ulk;6}sGyE( z$zO7?(})&CSujugf@lIR$>Q%Q#CJskZQfMOl-;+}Bz0Sx8?}Jx$-hSSBOT@rqnUh6 z9M-Ek?j5o(qeCbxJfUi{DuT}S9(%0@E|0(&NDEv~IJtQqrLpZ%3R2IpOC_Peb}d9K zg4ydRPV{0-XHMp@W^BeV%2*(ak%a#gMJP%fJz-XT9EF)}uRDQYGXR!Rmj)2AbFG{r zXgKQ*Z&}KKDgfh}5pVzvlj0~cThGh240oI|^T0q}4*B-bZbb}+S`KP{=_HgBsFo5U zlN_3$UhCff(vjB=dyt`!Lxc|t$oqpjl zwU5@`BOU70+w?K5SgZG(IeJZM6oPyhuAF+i+D3xCN=vz{xEfchi&j}FO>Bn)D8#IH zdWj=Bl_>oszw%2DuQ2IK@*RH-vcX%nU2pvY!)y}F%=8Fc^|_^7fidItJ*4n_}iz(npnTi{*^Ljc}3(unH!Sj*oqu+wcmRa z>SiNg7inE;`EzoI{J-8f$7&mqqM)~3U5EC6Nrfdu^ z?R3K zNKk6NSby2Li3RCsCl8J$$hli3)r53!T8XD9r}!#j$PYqckR1pPzF3BGi+9iQQFT{WnL> zc};ek)7N$CG>W_ZYN#yidmEM_J(QhhbcpV4LxwLf<0s6p%0VS0rYq;yg_F{38@85qrJdR|kT>-jRyF->Sy6y@hUtl0=b8D|!@4 zBW3-0*nlTAF+La;?z;MhrTfoy{MI0NiuU>X5l+ltBkgkr4n%*daa|CQcf@W1dN z@tDfoboysMi9R=$g;@$=%4UjWn%Of%$XN7y_aKwVy23X=Ap-x>?8h< zATQMUfLntG0#4`;!@hIA!s=HN=)o=bfiS=5&_dGq{~9c)@{Wb^HMP4<&_of#Gk*}>4Dj-Y!K(e3$|2q!CcUi7DFa+{^R z8z*Xq39UQQson6KQ>NGlZisVk$fFr0yucDzEwQP8BaDC97N}Y!Kn9bLH$S2Q$g;ja zZu#9^7JXjd=!d;X*V}QjsjpG{yr2Hd&$=po?-yPE#V}zGFf^(!x4nhN{`u}Bv-=}t z;szLet{L66)^hThypd32P|dF#(883M!QJ@+)t#X1A9nF!NoL3c+U;ox4a^9x_(ZiL zBm!6AFYrbaGS+3>TM%Mx@oaW^X7J$DhQ2)ce(k%lDGaioJ$ zy-%OKQ&dubhRqI0FDdSGdk3=Yc_&t5J|7C3QT-dgE4JzDKiP}}hBKL}t#*_;&c4>% zN77B@PIAWi2D`^kPvg%s=?~4x$|GERVc;6peh?su~o0?>-YKIwBQOt`WSt4`s@)YGwGz9X)wp9 zQ)^TzPNz{C4O*8@Ky~ryFaomY3{HaLUj?WA;W{)v*D6b$Tz zCWKC>J+fA48@f~zy5wuk3p7AD1{VoZjLE9_9~=Jfzwo4lOeEltK+s@GKEZ9`{5Od2 zogr=gyeX8R-kS2bM5!>(DO657p@u5=$AHq^Y1NGR>4|J3RT`f4}&M{Il$;$^hFA*y)B-uAcxzQ~7);lENi6RxKxm^EoplIRxdw=sYbBYe+Q#EX%BI0Q|=+6o;!E$*S}*(X1BHK*lqo#msCXyGyV6 zO^KfB&DNgg9gwBN3-{(IY5;{?s5dAy8mLdWMc;S2+-C(a%=IPaM@3{fvH54MR}&o7 zep@7-uMZV9q>6bza_39t6kZ}yX*})V_}rk?@bs4P0n$@-iz1H?H&B)qKjp`(QiO}jKRWg50X*h?aP|r zsY{JIH7QFDm4vlh@06VJd#!!dOL3-*AfP?dk`OxcZn+In3`Z|Y&6!;I9nc5t`uqsz z&O|Px-Yb&*E1xZ>7%nyuNc((Vm+nNmWTPj)S0wlMx^Kw$6-6|Mg!^bzl;LEdK>jV_ zp3gaj^L$;h?d`q9Ff?kv%{!I9sY}!i5tl}2E(Epct;I3K{gFw(tMIhR?l$80)a9Sj zSSAbghFqq99vRLL`I~OdC)B{?_#nHH-XYmph!?+yp~k&IiOS~ z3ckpw)j4RHm1SNM04i^lqZ!XFP7fn-QbG9(rs`GscMqN{sRO znCA}wOGPtKIX&O5s4FR-O2t1%`{ z@Dd{EJ8_+NE`F_KV%`%sm4E~6yEW?yXEgY^-1%B3Uf0U_K*}SEay*-F-W(=gl6PlS zuWxZ)NW9a!^}C(TJrj~&zt*t?-u>g`o_@8_iLd0XI|^JZpRmbx_R@Qjt#C?6GM0wSNSD z-|SVM4Xkp}lrI02|GS;eW^A5VWNBKh#%7ye^o2mPe8~z}LI`q3^Ft73+q`Q6rGt{i zR{L26UlS0tIcq%}Aq6UkbMH`&tu5!;Gh?j#*ggbF`v9NbdG!w{3FMkLLS?WEUE7nY zBY9NcRu@i)h3=oFaiI)`JbHN=zJ9)LeFAqArQ;G8Xv6V*uyrG!TYO$Gh&O)jAr2aQ zcjEFb7ZiYsdo8O)%JY#q>fMrT0V`L!m@a?GtW`K$9iO*7iqkv&unHD2BO<@2r`Rc4 zbpFA_ZQiG0u|9L_>5oJ|?m@ukOSlV~?%-D4(G-xC&~k2dMcL9)s?GM?vh_jGGbQd?qO3UTZ}zV5?e`Ks1hXIA^})W0>-T(s`@*1AZ?M+!x(&Xu z=OuH3MLQhzYT!Fni|56pH(g-0L}Qc2zH?Y&Crj#?R{1Y!kc+~%SRw&z)c#`I$U;)2eoVmC<2;jBp*4RBP&huxKH5;l*X=(Dl@r z%+evbrIP-_dSl+rC~GMdN9nLVm8o8JJeydCMJ;DNn((EgJvxPy*py53`Oj9HF%&;j z%iCCTRf&CWpwb;^rF;MJW$o^4#mRMAy1(wrTonfr);XWsbH@`pl?0mp z3sz*d)8dcKFHAbD?INb5H1Bgsh!@NHUA}!=2ulZEkS5B7)!w)vOM>S1)l8xF4+-Qg z=UdyWoj(%p(VVjM+q@GwnIETydnn97gmCqy&vdP0Ok!JFIOA#Z9>h*Fi;>!a=c8%Z zWd&y^CL!ro)!26s`hUSeSWT4{XVLUk8s%cjL`URQ5MUnp^p`Il@*N?2MB(*&&Xj*o zLO395tJ6oM10>)AY72;z2aRA3BG_k<8Gb)m%}4brd7jOJ95P>gqf#l6QVqj^62jWe z^f|)L0(glHY-B=n%a3g@7CXdS)LrVW)*GWD6AI`}E)2R@0Y5g{aMJvbo?7aX?y$>} zm}+ROvcQ56{GH&0Qb4?^skk*3gvJ)@e=XGfXdwzg8G-xCd0VF0ccPk-t*zE+P+Jf< zd9Jl!>&5l%}rP|jb zZ_SeL_fl_hZ$c*q7Rgi<{vu$gHI=?Y@|iKL$4;i?OG+2@&I_y-V;$CtH+LoE9_|6K zsLpBQ79~FmW^bg=f68Zix#R*q-unLPmuB)H9>wXZ!O|UQh9D%;lKb#_pH)k0T7$Ba z2*=igSp5Nltb{TJz0sYt^zuZGs&@NlBc3P_sFI*gs0BjM{>m! zg4^^Pkr@59@Ra_%DEG>k>b=2+my+7inaW}rzwS{@W#aaW*;vb{E)D5{^F+Q)Qo<&S z-mIy#`~ZCdr9#)w$?QnivsFVS9K!TQn?h$ERt1(r`JpL6D_*S@S8P%dxzVB4xzVQ* z%;DJkP|mW&lCNXVJZ!VeBYX7&<&{+^cvBo5rP}*zyp^R(RkTO=A@EeEP1bODeJBvX zvO7_klAs!a#G-SN!tpe^HK$Zt*geEOussOla@Y7)q;5Y2)kta5%QT4ADyld}_f)8lr51=dQl5e^I|3(*`AC(v zDwVFpty9jhS^R?ZxK#6>UO@V zlop~UmZ(ZM&(L#OQ$7>FJmmZ44-I=hei11^zH9NUJB-88XcL#q%(ET{8>{lpY)$BJ zhac6d$0O#oZBCXl`QWm+(6MlOm^BG@JVwC6(#^4+NUG?6${#(E{?C+83^Kfk}TC&2xT>yk|$BMgmX7VB7d3?KdN3S_bV)!qD^MS;v#SWXJ~&rAshjRRur@!Fo5@cMQT$} zsgi9E(Y&uhL$g$;BkCJ5>(O6$Wz*ndV(&epw}O$F$eUIiiYd#6U+Ms#B>sA@s%%m1 zvc*d3wuqGy^VG23t;>YD)W+8fPFmdMnk}O<+`ZLJ7TClRjl@w8HB7xHI*t>9(;xuJ zJD5KIXpZ-x4x>tIAg*qg!<+_VZN;0lRj7V7)nCF}ahnYTc)30Z{TeAM5TP@aJpLyq zQD}~pdp}N6G-)|3*fhxz?BTcXD{g1H`D4jeU@!^S6hn$jf*U2@O{8PqcHTpGC4Y`( zk$|R0U`v2Ttc5WkL3L@sE|oN)fOUFesH0HVZ1q3J}l+FXJWL<0Mb}o(Sw7 znjhY{){XGOim%Bo(Wl;GD>aSX&SYJGP$o*H(Hq)BQR$E06)f4)snI}KMI2Q5`!MMl z;5s{L5t7NvuR4^N`_B$b(f&++=-t(aPtD{iAGd-deU|-|{Y$t&5lFk?OFN%_Z{P8* z2LxFrr3Gtzd(k+uXS#rjFiUqWX7)GKp_%7niarjaZ!FeD_7RI$fG-PYS|=AyQiC<#5rOlK)E-NR~rdG zv*LwkY_q)W`*m~OQ1l$kGVk>aKO;T&*e@r(2hYiIRObJp>MeufYTBsX z!5snw*M#8iHh6G%cXxM(Ai>?;CAhl<2<~o!yStxx-tRkA=lq?T+O=o8_r1IOUh7)d zm_OLKBOa0cK&Geye`tUA>5X4$yu}X=cfg_$v#S6nXp=dCoT>G2bY*HZ$u&QHpWZUY z;${Y*|Nc^E;A&;PZK?|rnhq96jaMhK=Q%o+zkb#t;`8`%k3S#gXDC(v!N=bMg{DL9 z{tRq6o{gzcDU$)*_En)+E_FeD<1*KJ;IPx`smuD*L^_yWAH2nD^^tWh8CR11hjH$5 zJQs~WUqQJ!BrZ=(7I2)yujgVjW%zJVpXdZg;HywgLfi1-ccr$k2D(IC3f-pAeF1oV zKKLeaCnid+EpjyOc6z>IyX0K@VvTw!C_WZ^4YN$QyW+D3t4f_nTGT$J=ZG8J?@Q%B z6-qgXsQGS-1iWs--Oe7c1_Xn#QAX_7^mgCbT0d62svC#@yr6Zp94$uxKKc`Vh)8b1 zKGQPp(ri8tEtkN0O1a!3uL**Gx7EI5ZY*YRXheA_*;-?+=`yU=+g}N&;GzlpH~S>Z zs^tmN9j`+9JQcxtaD517OoA+C|11>e7ajKxc4&52El zKOAC6xZI+Ec+>pto{85)e>__*8fTVL>$$m=lwrF*pKclPIjt#B(wSyjNp7*u*{_pc zRa>2~VAr-VJyvLw=%_DM%^CFaf`?^mT9d6Yo^aE;Ux&YBoA0O|2JF zyw!XkI;VzVem;vzfPYO&>?t<*EYx8-VGIYVDI zjf|W1A`|n64#}j^v6l9xM!Tu~(GrJ4CHfW+5%ze_wE5V5HGO}wB?$F~Xq0p~r5`Q? z!)CzUU`{?2LF{t6XzS;{DM-|DZ$?Lot$KUImNGm_lyeMV_@GIqiIe{5BwRbJ+sozl z4wOoz5`)prO4nn`K5perGK>9`~KRYa*rOC1PGPudmdqflShM z9y9$a`8t*ZtdHxSJT6ZS5GU8hos9kk;6;=2iwo*qINvllqDPiVZ^3#!;mzHiy_>P} z{by*gEQlKrYcwee^CXeUK;N`Opmgvj9fqsj;Q?5sQ7_y%qupqKKhb*|N3~WJ&KsS} zAAfVzyRbjTes!}?+wS>J(dBvn5=lV7VpfJg`aFnog7S8Se^q7yn6hW~Xt2441al}7 z6TyJd%w5)U~AViEJ`mHt2C2QZ^Pc>m2s%Bl+gt_ z>@~!}DC;kPll6r)WE(two>Y2GiH+yJ#V&V$tBq>1Yyt03Eq3d_2KTOs8+)cx)y$YB z3he=CJ|>fCJikNHF65y?qw-|Wj@@_C`YPLvQ;B6_(I?Vc9N)DHhuMQx8_$=MSKW?e z!0Lr~4nVJP{r*46TbvrUe3n4Cd$5!OVKYby;O2I*slBRT*~|dZU!&8kL7WgA7R>@N zsY+ncV;Q+5SbAZ1Y)MK_4AE4{iEo()r76E4i!lc>edU6CsEztd5Efnx}6VqSu)`uRop36`Q*O zCmVzP;{_F0_!PHwa9LHUsU0mEmo<=y?`k{vbuWuH(gcFbT_#frtMFdirzhegnl~Jp zc437V$AF8943ML5B!%L^u3ot_5ui*LNe0m1@jUqYe8toG8W&Gdq?(ulZNHK2?amz6 zoe&+uKVK^ch0o-C;V{$h^id3O&Jx(VU&H;uiYrp#x1ba_RWqCx) z&qOhiCLAn=Tw>}RoGbYq3;aYpOzCkAt1w7fTqLbfFoJZ~+!Y6Hw6JM({}y#um|R%K z-eHJFC?D?>%S1^LX*J=Xc>tcl5^4qXmVS=MsYU1Fmm=L}+s)7y7xwknjN1IdtW-Fn z?YzcUJdXz|lN)8Ng^H$ z$~eDB3*F^hZLp-2&*#83lKx69lx*#J?Q32L<8jd|X82DvC%-7N7&h;4*L-SrukCj4 z1qL@KB+#L%u}nKP^zp<}rhNbvfN4Gz@4awsyY{4)ta7u;W+S1R$@WdZ=e;p1@Qz1M z&t-cQYqi!GZqRgKDW+IDJ%zF)iC*tt^i2|ju?=fkvR|P%64Pd_7z%<##VBN>x`D?x zb90`lS4fYGi!)&qD~-t*h5LD@-d0Daaa(*21E$W7AB9tO#)Cx&K0Z)GQN>uSC6zn% z&@kiOP1buJaSF2=*))sxf~U>)OMT9mvd0AAYODh_Qz&jsIvZ?>oQgE;S)H{q*nmS1 z)tSTW-90D=i$XxZiXF!z zlH@_)XASNj6glDs*WE9m2M+|bc$L9qGaDqI-i?2*sAXaEBtun!cFE5K(k zCZiB^vy?7>3i^P$d!5?~2HPq19BlLS1CQl^FI@UaPUS>G^5Uq3C6R}WqhVQ6>e9Ey zetl|epT4ih5Z(#P?eY%rAA@?|zpOhbpb1#N3*iN5n#*VBvr@_vOmWX9e?5%+^Ya+P zN;zfq8ePX%RS{^tlHe2LUj`M33N=?)QOejOfTCj(00TL7IjUNT4s>6t*6PdI@=qhA z>rBh7V{bA!u7}Jbn~U$V{07ztVDzePko=fA(}!3RPH-~#I|A}8xrset1Z zJZSZxPInpgIsYpMaQZ_Ga9j+9;OF@=tMSVo!hFE<7Q{N#9^4am!fjHPq5L4a2yA$& zXBfkV<;xiia=6@&SgGXa7X;m+Yw0rx*pW+}0K!>wg^qepgxJfa@!|;QB4}2yiqTH{ zj+SYq#hY>~HP?xBMk$F&@%s(xm5END;Kx#2TvY1x1xc3Yh8Lb*r$bI7BTc?7<~)ZU z))=CLUZtHc^YVQ4k^;cNe#*&Z|AsuW#IV+5n6PGMfrP472!eZ=L^{Var-T%fE@_HtYHz`W<(7Jug|`rl9cp3XI40+vp{B5zJ5*F9M47N9w3>FgBp$*n~m76Y#Dic`d=>#+@JPIYlo>f8KgOg^CQoxg3-NE)_R@7jdfFpi^V03 z0WNE+(OPMra-UB1C44KE#$LiMt}m8ti9a5G{N^bRK3fWKPJAV9U=^IKHLqqs7lDcy zzszP8=ZMU8GgDRIyWi+iug~PHW|VgAo+%O0^4=!!H-riPqp*pQ1nYpxnN6{PEGy}$%s+wfUW3D=B<9@fzWgYv#O6vL5w(Q~ zs-PtfuF9;G)r15R&07N|%rA(LsmNUkmJ0nu;L;S}P3oMBOk|k0x~<_(7RtlW=cRI~ zp{iCpLqAO7D)pM$U7|N&d9n^-HYkcOjgy<#H zgVf?E#kAA6;`%UUni@P9ylZdj=Iv(#LSu*+ zMnHM?Tb=Yod?p@(r07W~@1YN+?GQNJv-8XYBDQm@C@yc|+vLq;!6cEby12x0dVB*a zUnCXQsklnJcpr;GC-7(BV2hafN#Bebt}mbKtY zq06F#Lj#8krV@7@Qz?JXGWCL(x+#W+9v}HiC7SkFFI-F`hUT0EM?Su+IPfnw@EX7A zL<$q`;T6FmQZOBCYAW{$k#Z6M9$ix2F>pH_q&NZGrITo^jk0a$x-pX(4u*zmoRK>( zONB!sr8Mu+(28w49;9Ds&JpJ_1s!rX8+~KrX((jzy&?W>hoEfQi3%tUcJS;^ZT}#? zxpD}zS!vaKSq|-*ZxT|2s0dX9-J{j*qU}seES#_-fCRWE`Dnam!IlP?yT_YqL!g-T zIfH(?M1ms8j^aq-%iWO-!BSOmporLLmk^81Q%fM2*z0c7M3qjbiMZ;&S5yRc_-e3y z4f+LkgZ*}d(YZKXs@`GNrH5{!kA(acW>{t!$uaK^`a-R+Q5q99bF55pWdrM%a*NDb z1!{{W&67;P*9#i;Rw)3ku-qUAlS5||vbjB|vTMs1*54DsjhaPNBFTW!1&zxWS*d>e*4ruLo9z_F~oqD%ZkIs$TCrWV?(HRD_>whF!xww>h z64`7T&sX&z4Xjvm`25B0GuXeN+1s{j`AFkrCnVCcIoxv9FA>yO zL9#02b2NBx9R}(Es}YESs@f0YTJpF8{HD$>Ng_k-d~lGJx`UBW1|=MdP{5hn!xx)Y z@q=hy_oDHNiOgpLaI9PAXUFGN{*OLsqMb=lTN~+BnbjCj-qzSBxEUr;XlAMwL!p;6 zynemGX^+X0!~WsyYuIHj)$Pu{G$wv#Ldd%8Xg?-VpSt}LYS_lxq>Ky+s(#p&C$q|j2-m@Q+}qHVhe?vHkcTq zZ68XRv|bIO%Z=xi>t!84+T#(c!e;QqZ4R@a0mI^I+oT8mU!Ol2OR8%lxNBoOCLTkT zjZw!amwhKhO2yWxH{U}E6RDpuvlW?v3I2j`X9WF~VMc0JCnbqoWqZ=XzJ#UmFDUBJvL3Ks~; z`xm2rV^(#2aRIfnET8#4rBs$gz&V+XCIr`NhqOE_Z=@nV>)4JU>{;EEp|SWC(wjV6 zVi%YCwP*m&%}nBO##!QfvrKaxgaYn78z3VW=r{VVA+?rHYXMG>nD5pWH-1JB4xRg} zxJ!-gjHz0wnw`bKrHH?k&B?W~+Wvuv=~sA!r`D_yWQ zHSLAbYbt5^5uDi_u1dJ`l97ii2dQBxBjG>QMR)M9v6t+f*gLvV#wqQ zJydx>^_)&fEhyZufDg#?AX?wgZoZ<~?U45ffmT4uz!!K301N~b^2gC3*cLiXjptno z2^8-Yb-r3p7b5jg)=o4Ul+FkkeQu+Je(VFw^P^OiiX^CE1K`d!;HECXIss+=oLOLW z*W6P5%s1~zoutG*24|+f%K_1}hzd%L>>O&W*dhXkS<5ZvsU8Ufr-VuqrOkCw&xpv_ z9X>6QaC~N%f?%Bo*)SSn+C=7J17&-xXOm#dF6&e!`%BKx-xvg^;8PP^+7PE=xGBOi zt#Z0(V*Xz!h{+-=5&5g}18}A=zz;2I?akof4`?3zxSEN9Rt4}f3|$3h31HzE(*VcX z>#Q5JDCo%%1TsdM1TdAjTQ05q4JfHX$*~RZu>Jv~MXWW-{c z%V7W$4*!nfV8EI|vHoKzFEjzO1%+ z`}FbYyT6nUShYZg&x~yQ0NJASHGCnKT)lmNnXtY+x$#-<@WioQZ~xs4&V~e5!Tn0~ zUz#4)O2+Q$jONC*zjK?m{_DMZL~N zw8Sn+>^RfSX^|(vo-g7te2^f*2JY@XpR^W&m6k{*2R`$&Gy^oUZ~j2*VJw<5Z>-+S z9XnWgj@!^PyuF~ZTcdMtnGNxAa7QBgo_#5sL!j|g07hkgGgn2M!qUcPMyk=G$BGl> z)XbC)>4^HjwXe@_*xof@oF!W?IuV(eOC=kQg}!%>!X=Sa&*~}uzKUjYjc&%0XwG%_ zaIcqQ30yOU77dE^vzON1G+-x_`go~<_TA)wDeopJtZB9Rq0{H?3BmPzMc$}n!k|JV z;PiTD2x~VQ2`=0BpA7ziQ56xpo~`fK{#t8>3Y1%y^JW_3bNuO)0Wa?%D~POAM&*dr z7R$-YG3pg+T8b)g3ytTRxA?gw*z-NSoruoE@Ad_}T?bfd?_*mb3TRrasJb2YAB;bQq5!@1R8c)@*bO1yFeKx?3#-rqJt1 zt#Q~Eo7rlmxge2Fd2Pvx*&jQNha~eFqh80r!rP!WXQ~{ac$C@P-jaKWiU6k7Ye2G@ zi?%F5hXAKi`7|2FC_hS61*hh2!RgdhJ^scpvIR(q8pUDIIaRvZr|_&8J8TxXiDZIi zPw(Let}KumS^UOcqZ29W|E>ZDz?7S%qHf$T^LBU_T0D_uAvDk}!t? zm7|f|t0dtsV+8MGs9$P0W0KwQ)jI8TuVY#4kze8Hk$auh-PJ25zG4PSR-%Hf1wABbc@7;2Q1i5}oYUTsF~Cc*e` zGaoQ@xzSb_aXd!K768gi<~BGCDn=?+nW$KRqR*x#g(o{IS-nOM1VK-o#dW7u1Qc8` zi91MEo!xKIW}-e=HjNnVL6JwMVDC|F@d{>#<`uSH_DjQlrMjXMaD4d%jjb9@nnw=Q z1T+T`QxrjtVhu;>lIQUOw#BM4!oz?;p+U;NoX+}T#e@cO&W=N%ygwspHzFe&ukH&K zBP^{S`3@G7$s=-*E8*yTp&lS8!q6O{;X-Iu;y(y4kf?rGe-~4O^A{~Nq>(YwW=c0< zsuoC-skGc0nDLDnmCNYG}^g z026X0$muEx-EYDuT(BNB+jB~N_;u7en50y1wYR6cpsuOdTyANYTVFqp90SxRciEydZ$5U7)f3_75lK!54wSv3XIz4WnR)M=8O z{om}%`JQ|VBOqT-yJB~|%E6==JVJo)Bu`g?hYA_Ix*S#sG7I>Y8F z7KmE?PVYhZlS&s~JWqCjRzh**qB9^pxxWtC3hn$IN?ff)h>TlMp$T9B(*+kL!Jby} z9qbrW4kiFIWO(0EBbPW&F3bhhe?$I-xFNpUqO@&iLJ78P-}Ztmt1coSClx)lkfi;5otQ0%s-LmB`Zv&pft?`^m`X_%{1=#c-B1bjrL5FL@RI@7@(sN)mypV5h@=ih}!G;?<9HbhXy&M+RjpE zd|$8o-n7v5kvJYS{jY7oSJuZ8k+wJ~hh6fb^Yqlq&C1*#(Il+>67FVt6s@)}p?$Yg z>xqJV0@JX@Ruo1kX{z_CnG{%eD3$mYBlBcwlAK=us7l0YTeM(*Uva_3a!|2<(U{la z_j&8>i{$slq?C%2u;2x0T33EG*(I4I>a_<{OZ~U&6HogNzjI)8EQvT}Qe|_$L6LdX zfFX)m?|5pK{6qRGR79oQX88&Y)Y%+c208l0gGxl@N@@kZ&>wf}ieX^+Q-1Csu~goo zk7eLpOqb%@u|Bvgm^}(rSM=tCsfrwWTX~e4b#BR`UBfqF(m;ZDu>=b8Te@;C3NVzrHl8bno@ZE`Gd5TeVqqa5Au)NO!IP`ii8J8O)0 zbwqjG78Nk3B6tshkE`2_hLS|1r-EvNy=!!EDf_SXfZ6W+pZ~`*j^xK-uD@*ka8xeg zceu2E;26>_S`zD${W_Z^62Yo@zNi^I=6oHRL8e_6IMhDRmsLz`8yR$rFb3+lTAe2 zq&&sGsSMI1GX;!U4v_C=kGH0?Ns?*>8b{L6m%_61dRIav2-i-4RW1n1HLE|+3fTx6N3?0+Q89mO}~@Bj!p~;)61x4 z@}2&la%Lod&8G~j0;aZfzOpOgdwUy$$*Sym8yX-ueF%Bi;YFgL1;~JVyYmkCL21v+ z2o*37joZ}8M84deu=9pR+194WabQ>p>%k;!Z`&FbcpNf@jhSQb@r~1dS*45}axERx z8}-#=sfe65RY~%o(@v!{|7&)K2XWwByH~OmM$ug7hcIkRJX-Y$*gG%nt6=UjOQ>GM zz-JM8skZ$G_FrzU=S-~g%D%N)csx3@r11AxVu1o5QQcp=gn)wfmlfMI*!G0ar51^3C zl?mgL3s)BnCPR2YpLGo3$1>uxU#>@A;I6$eCqoW!@t6^59Kw@?AMAk=N0p*Cby1W( zGh0pD-W5}tgO&Zet6O|J24V;3-4WZaP6yN(mq*FdTPb9h)3chzIl3I5unS+!{0@eT zSAV|QOZo<-dQT-^$!Q&4PZlao0tH;+FaEM>n0xZF-=v)OJ7drh#e zXFcb5MOpqzsn%wEq&ri``A-+~JrwrDk`N*I1U-n*ZskCF&V0%^463`@^WdWnv(y4_ z*fe-fW(xod-8fTa%)8oX$_tha2<;GfwXDr3oT^%urH4na%*XpMwyokG|F`}-vTJj=mQRY4s(3p zGt;xB<10P7-_|rM8!RhVGZ7&CCuHxN_g9d%KcX-HvLvU)1?L~nX zUduz@+htFvJd&yyyH#}0Oq%%t{mA7)U__H|MWS)HymfRy^PtZ7DYYJLOMeLb5WpI8 z^D6I0k*$a7Z9!ib+-$~vW%*L?rp&uT05L0?QapxeI?f89*|JKF@8vNe^{2J3po3bK z1>b$su>}DeXScT7m)^mNQ_e_t`hkr;`0$!W3`4BpX3s#FI>2nl{sSj2Ac};=B$=HHTkgI5-yalu9Q7YFzdCevZ z5;1KcjzIyJr-e24C34hpv0td(oVg5%_n`O|>!FQ)L94ot`ytvnv8zfS(- z^Snt*U?B>8cyTf1K@IrRA3Zr-9WW0DgGhazvfF&ezU*xey~%FB|Iul4ceeVw^e079 zK5p z0`>MqKLj`|@FUf!y^`pcOJ|lD;|}IuPN#kp;&Hb+f9WTu7rCjbK=k_$70hB zW~_Di!;>oi?D!ly&~Bx|6hj7!@IlM--w>XJ#16d0sraI6FtuE&OOY#6i7XL^%+TGaY-ZUhs4qcpd?6W;n->WX`=ly0|IxSv z4IF)6swrIL^Pbrh34_-=`*K$4^Lt9H@$TV)&GMf?2A4PLGYOwqx8EzmD};w#)rO0k zTch)!lhtZ5ExmS+ke>x-a4^&aA$JhVjneOvSoSVQPP(BSu!i7&j;JAW91l5B$)mhA*408CE6XaOPXFbg(1^J_3x>f>0+E=gb zZ3QZ_!<*l@TT%Bx7JuOn!a>MMpj}qN>a}W$hJoRTJkNslp6wm!&Y4ej>@@91Bv{?UB%oTPxl}?RMU)~pf zcT~JTo=yN))g7I&9Fhm%`11ERrutrfpQp&~)4t#Lf%&;E43ABu?j`JF|DN{8^`md9 z<$KXkgI-7@7(L$Zb%qT58pU-O3D~2^du-W(a+byi{O2$^SO@hofTFMOP~U**wUZQ@btPr3 z(f~UxZhc(49*uN>biNGsh(_l49nY17{K$X^KK1g3@@+J$SSQ4_^s;*8a76yPU>6LP zT_Py{Yf)ltBdNP1%4g|d+mxagT3D?25Da%=0T(HpqrxVz4jPbrjXa!K@J}-WUp|me zH5zZEb-mpM-)^%#`cVczdB!gP$TNjF*WUU;lowiBoT`rslCRV9s8=(OKk8 zqEfy4Tub?7y^Ym!T>?B__=^L?Do-<7QfF1&1`llC_I(Jj*d^o`R7aBpS%g>3BPiV$ z6@A|&PvvJZ=tH;dglkG8rP?@s1Lj#3UMQ^j48{XF^=_GSXMgGp(5 zY>~b)kPb<|JN&;afNqyr+|qDU>JY);&1~QW*$p<`Sp^MQBQ`i;Fg*C?Sp@&B|6fji z^JLrHq05W|gw}CS2V+8LK8_?9#s2*wHb&F(NAAu2DW^mcS76ebCI1gol}cS*uBpR` zG%^zliOeo!H%vu*?q$okDVT->A;qML_!!%Cu@SLqBrB=loETyjjE>+~td08Hy(y?k z20T^GBIu}|c=SM3gaN~RA98mME^i1r?N;dfliTp!Qn8pOS~Y@=SMIY4S47v@2D355 zXCy49j8JtkRLC2h)H#rqZ6tcEr($LzEEgO2;D5jLK9l1Q7OI+4y%gD@%mRK>zV#G; zUUZ48s5me8l%?=#%vF?#abk~(UfOcT#C7V7(L!cOE5?w`S| zMjYOKN_l}m$|yX9S0g_LO#M!ChYd8z}JTz(?{@r3i0HeKZJ8SL7q zwN37EBgiFsWru#Y3Ig9YFq3X|Qsm3iOjjwPQa;0+Gtf8E1vt#047h%eaY29O%PGCf1uXchp;{jl= zVQ-#a*Ix?o0{lh!Z}6~MzX3=G32aB@@n zQQs!EgyuY_+$B^SYdss#*@_KVALaMB|>p6pv8YytYy4!8L>;dZ!scda6}^b{>-$MgBZ{1c~j zAK^8TmpWonT7D4q$=k(y2UF*#LY|QCieD^%J2RR)tEqSvkmcE6zEs_8!Og($zCT*w zN7+yY!2CVI2aCDl$&P6Ni5>^w9+%~wNM0y@Ex?qL*EBjvv6W2E;xNJ>&CQDx^IDzF zm^Enrv4Vjwc4!ToBwr}6p}pREbP8Xvx#NkK{kYfrsZFPG$W3H1U=|I6j!A3xo#VK2 zse~tAmx_R(qqf?|I9?;9v~3aO{P?45bI7TBj3P?&wIs`R1z>EbfW7 zlIcSs_FS4_VZLJ|#TgLo)}GSFz4r5nI?_!L{jkVamhpD>xtu;v_Eqj^`us;$UF`e2 zs0sr+Y{`%WzcGeqm*c{wpt7}>t7qPGtGXd^c2o)d+B9~8{Y<>QiuJ%lfu13G7bB>-<3?dj)48eWe`3A6+OGhR?lIW zk|@VZsJ*XYMEXyv>;}Iv29Ng>@~7tw2+Go^7iYD28E;m-)`*NW0 zRM9n6mQ<+TMbJMb*>{0I|L%f#m?6M#yfgrKlFI}j{6d3h$U6MW%;kRUgWko9N*4Ec zu@2?^l?+XJ&lTJ_S1jd->|P|JjKPlC)KoU>kLwQp>qS%Qxt22R9KfI?OHmSLd*@7$ z7FAoQhEzOaibpNEw)d8&C^GHYF6qKsa-Y;}xBJ2buTNZbd`sI{Z{jym?V{;WJWe)d zu~f`gB;xoX)CJpX6H@)9xG>zD7tT9UTL$NNI=^<3CK1_8Qwo=)(Xy`% z$=&fPQnT$kVyPT7&sS>UMzM|Fix?r|h6008k;&U%zcGYEMnf63*lJJHsCNxsiYy)8 zu9&xrO3U1jfu--byA|dY%MuMrElO&A32#5N8+NZ4j>5b$DPyyLB4m~dcZcBfuIor6 zYT{@z$eI%EskB8Oe%e9)P`t({?4l0Jf=xjl{U;=yTg%1+C&^DiMSoel-cxT=fg}n$ zH=G0pF=|*8O6$5SAInsg{_{~@Jt(>PnR0)S#aM*ncUheqK3~$OKo$|tn#+HH&t73c zpf)~2pcuQA)$WW>6xa`gxcvkpi7O6G$pitHz!zSx%oa`^4W|r|eGqcf5hSkp$W-pR zIIbDUkG`^FWM7`43Yi<0>{@K`Jbi+fPnUX_{(nc6>AxZV&KOAbahY2XlxX_!`#Y@t z3ERGnO%=*_*yLj0*9R24?W2<-%zeAwFOV3VOm%H7oQRB0?#BVv%g!*_?XWP}h6m!Gf zUI3xbAIY&2!D!6*nHVB|MAcFCP$#Uh$k4oh9>||o*~V`XA$)2Dp{v_yKhuB9o-Xz( zM|Brk9A>}90FK2PG{r!CR>;big|&7nwSUy&DcMB>y1kw;2+|YrfAWj%6{fK`5=P;& z04PAd`BF_8->R)l`23XN2+_pjKcDYA$|pK=5WxSyl{muB-#17FzMk*mr@g%9|4sL_ z?^j4rM}C8`rdF&q|NG?45z8a=7w-@J@<^3i!Q2e;fPP1`QEm*Z8BKx4?i5 zfh2)cDxGD)UX{9@_|u+<&D`w2oBrQu{=W|_Q=o)6>CgDX_zSAGer(J@W0&l+6pXk zO#fE_5FuiKXjoA?@ZgGI_%ncPY5<-NXAC+`upL5zYy>Zd%iNOD?&VC?+sSmhX^%v&*U_g5EqE z{iWCsN(-5>i^lc_H{q`S?4z((s)SXaci_rmqN~Y)d7s#H+VIvmHIUlYYvBsT5}IM) zv+n*0UU$-l2G(V5Z34mpXTrUgqjKdq8c(NxxwNT$4?UiP;PWAa-n6>u8fEda@ocblpHCcQjr_MdRcp@P-h8rr`>AS$ z!js%%M-udtFBop|U@GLPUZ)QhgKjsZ`^9E3Te*@@$~Z$DnKpJf7Of3j75%|M;M8gl zXga4VKZ5>y7O$bJO+MlYCzA2uvpwYA8F-rgM1~AQJqvcf+{CLI2ZOBOiq}Vl+mu9q zIa?fdlZnkyt5<7vLyN`YBKp3dg-Rt?QgOatKL`4H_k-cc2Dg*Gaa4*%!7uBbjzrb^ zU67o1I+(%lT5HR~7rez2S)zf-&IYQ1Xkl}5V)8JUn7en>tWK2co0kxRgQ8eOB?5K1PC6$jS+6!tt5 z_k)2^rwj3ZwJA&~1ZkkX&G|b_GZ-P(e`GnAFCX>*Qf1gXybg;c*`7(Hyduuc7PcC! zR0&Lb>TGacsgJ|%drs>WkOxi28&7x2ihI26l`CR{cW`0rZBDow*z==kPiuaApSYVn zXwUY*vYY+pWb4I&qwBRmH~x?UeigY*C>S9Iq8{12{yWRU0c7L#X@~jn z4I9pV|J9?w>2K3D>YS~IMo}0!ymE8St4SrpwY+6t;jnU)(DA*qiv`( z0dtBZ2Ia|K$B&yqq@$H4D4VtVk-SkbDGS`EOg3CDbNsHq?r}}(obspEqff_TwV-LV zPA)xoM{;e0RwXF?7JH74kk1KzwZ#Teo~eE@&33g;{nDaz@k8!hs}GET(JKS%dlmz)f z6_oZ!{*X1Boq^i8*)IoG*C0e^Yn4Vph%|m9=)H;fjdeQMW4qT47{mhuV@MfN6X?zD z4$TcQdJsLVbn-Cs%D-osnSL-=4l-BS++byt{Yn=T`b#}uh;Fqyu7S(iBdT8FCjMEG zg`s*H_@rZRB74q&pytUJa*xZM9z1JkI|H-0>xDYhCi)PF4$#1=P*G|1{*-gxd=qZ@ zREJ+XTv!YgSEIhZ*z+qQsKXKWt;L#8*k-yX! zLwZ2j&0kv&5qr%opfy5>DYR! ze=}$1otbma|NF_uJiBTacI{fV_PwrK{==rH($ej+bno+W-Jvf~57>LG$is*Dx<{PI z@)>7ikF8|*mh`s&ZS%w zVN6FRiztYJ_v@|?er!*)2sCQUcF#K`8pCRM&NC~6qy-7$5*>n0@6OA!0SB=-D)yzF zrlpUcIqMDk>Ge|bC%n>6 z%e#z4TR4w-;?SH=52xgW>`9+Z)41H8!Q1vXdeG0X%XW($4{5SPT-6}Ss6rko&ijTC z@(GCGH+Va}-=G0(aPRAfUwuJ6zlJf7q`a;GR|hwsp`PV_|a=IE;n(4;E(9r-+-k_f9@$Y)~~N_k#7YTCp_5&$Cp}?IW*jI zM3qb&ygXj-RQc89D=I68v^iI}_h9dRRnzna{jufr=VV-qKA>61M#;F0%wp%94S2v> zHMn7L^g_xQqrR+fe%B3L9?<+aT#?IDz4HKWgetT zsV;Og$kcJ4aV1lz;3W@!Pf0cyIXZ*?QXt7y?dqYkTF^S$XyZNGY*C+79ZSl*>~wjh ztLgq0pKBC516qC8b-PJxRdUL?P6jv;AV1WQyCOs(YkG#D(`x$#LLvKhWN8FPyNf*L zjl-U^+U*FH8AN7rUcV}kz^aAfQn$W82$mmdZ?oXeI&hi9lgS2OAhML5Cz28mi98MS z0AcT<&5=KLRu(pawN~FtR(yc#g0LA5G-=*#KPh19;KXI5&)BV@3qq@RD(*b*Q4kU z!&2?y7M)m=YqJJRWpqd{tzL_zbkP~kCPJ^!sf9I`b?3t*Y8~UhDzpbyVp}qQ`7h6@ z-`v5K-Jm zG-5=2di^PjQ{|{aDulLQV`##}aiKr*o8;ZPmxu`hJEh^kv+VxTQzUKm+;1A4_D2Js zTl_m-&u=Bc-|SoE3kIAVmDDv?eNj2$8h*fASmEltvwqp@*)&<1GxN>d-`jilpBN}U zPlp+5i09*Z?U2D_mM76r$jcKYMs+D=nqaCKY(P_UoP9{ zgO@Y|=20ua3nH$ZDt#v4tm3I&A8;D(-KJn{JD~jnFudnE&-b6F(&IhF%4Mv!`+p)o z6CSHUJm7#woiG7$FoSxRF<2&-gM{S^g3qk>ij%+0(z?fEa~z%10+ZKUz3~Xd*dYyw zu~D0sGgS`pIDqRPq~ooH-`bT};DhuO9AJOKX1|1e&?i+;S_I{-MC+OYDFNPogEj z3`=LVgy@5Woj;sZCaylZa=IABv6F7RO_fBYgc_-}Gkr==qBFk~yNWNP%{1vv^mW?s z?A=}~5rj0|{FP&SKLd|E7=uth7#VCo0G9Q6Pb$5|6$1Z01O1%cteTx>7?hg+Ji-_*AJ1R&t>d`Pdy6 z2jZ51Ql|#rQi~36LbnApiqi=#?$|)EyUUZ^p?6MSuxFZO%C6S-&irV}=TqeQCPMl` z6}5s)HY)-|Bo1W3%YBe^n{$so9-bHdpCXAKY%XW}RIba15UBx2*o|Eq2CIqg&_SHB zWN3=a6pSeMw<-RBLBD0m6#m7YfIPQ#4g=n2oJ&~bM@T5SYCQ&f6p0}>TZ}&&Z3xt> zPQkl8ttKE6vTh;pM_^GWi%>M2YVjZNEVqrPYhcmXFrG4bIEmyc;0u*%;90E?enm$g zgr{MXG{7mT6n?c-YXmRIQ4M7>3`X!Lg*y$ix{b*{$l3)bSUK4_FQtpyT ziO}NmFi~Uhn}f~giotTd7KUr|IJ=T~kpRrSFWm`iz&Wk@2eVBooqHYOc){R%2jOU8q*4^uQas{)HUZ(BEqv<6rs!kCRx168=Ru>$JHF74TtdTD~$U}yd@_tp^PmK2fo~^3lYlN?)RvNMQgN` zg_~kAG&p%)rw4@6Vlh#f*Itlt0qi&ES?l1 zjn^f@TVF?fey~p(e}>G>)+?L)*^MU~321WqT4qiBaQ7J1Oni~j1CGPUR3(V0ACB$l zAkC_EIv_SRJ1@~ReV6{pL+DHA0+xck6n0mwIo_%{m%_z@83*5gBHKTs@xvJyiZBr1 ztTy>3kb&Zfz4w`xzq=ZiuHwrPB@!EHu-bsD(w%6U5feDv&VBAcR%xMbeu7&f$s)uT z-_+N;m7~4nVa8o4+q}l(Wb9=@PxY%}4KXvJ5^g#s)U-P~``^iR%$#+XHBB5wxeb|)M zy)g*?n)BE<(a{!Y*bcaT*xJ5$+rfV)xW!)5gymSNS9C#)vxKgTeiAOI4c zXl?t{?Jf3os=zn%H}a23wCFoP(&!5&@=IRnX)vp8iyner42w%2G%}4xkZs;V^&S*m zR22X1nutu+-=tu0tKPW&ghTMfX1yHUwJjy&_2|_*(0#ZF8r|RJs2hKXrPAeh0anQX zG#2H^YYq=;+_ko5Be;i!Mf(Cz5YEd(z~pj$Z=RRqF2wwk7!WVBjI>y#5P-p+PvcbY zPwHKzH`X{!;-+%=fH+#LL&xh~wAz6^3r_0y2^YNPR%w@#>o;sZ&zC`K-;-D@@*kpc zhz70k`0Pv2Yj&9{w}h8SZRL<`?VJ%uzs;r9KW1{nm@ECH$~Te*ar5iYGy5tSZ#A0D z%sATay0s+bUE}^HvRzo5Z8G2P#0?Uaq%Zzn^0-QSpaLSMQEYUTFPG=}4KdTL(CfUftRU$iCP z0)f5UhNH!B(mZTC}n5CPABwRx`Y;JXh`SN>9PlOD`y6r;js9Ps}x+Nr@zfi|toK(is8=(nu0~9({zOY-WK#ZW|P?W|dXgOGeNvk)8~cXovXw3<|Ao z899gT24Aw}a-KlZ=$6YNONDM5X5%0ysF|i}m4Y_0O~{0%eWv1`bKDL|kYGfiIE#%I zna+YyTC!BNVIk)?;a19XVBR&Lx9J3RU3ZeRhIZcI$*sUztb)i`Py;QAq%pWtj}*!T z49;8&49U{stmhf38H>rzpXB=>3~vRshT|w>dd7ZAS(r<-Uf{_q5Tr=*fDOSjYq+XX zk>N5}0J^A#g~=>#FiErtAv%gV!hw^(c_cyc+piazvbYn`Y7UJ@Dr4$r1YGj@$>C}YqHxq~`V6N(-- zu+I)WNlcico1jl?|B@1CRhA8luH@vCk6h{qoFM>mb$$jx;bqY|#O(K0%U~#|l?|2U ztmW=##PWb)ZiFG)c-@kl%)~fFeKB;&{8h&gor1_BY74W7iK>kpvh&3zkz`jxBFl(egp1~FunOVFxmJ1)3V?}~-H!i^* zzHU&ocX(j3J-u>AnLzUEvL2DhAB(KR#A>?^5jsqwv=Ol%WIrNP>Ae2HVno1wNf43# zdMc>EtGJm)KGomlOuZtoY27ifPz;yO=VpVzX15~BmS4p4I()^!^Va$h#6lh=tWp&J z%Qll%yHSb?^5NpS5{od59`ZJJT5#ZLrPWLlV@(vDS#*=l#c7e~_-@Mo79=3d3Z2F>3ZwmakB*j&%plx!*CeqC$?QQXvqV2&bVYjol6OM~h@ zuQFMdx=3w$U4A0n?k>8;VsG?n3mq=N{AD%wrHsi6x(y!JfB#6V`t-b#BR2W&q$hNv zL+O-8yNf3CQ22U3B$(N8(uBcJGmen(yUwzy?8K=+5?9deIDw-AiVHP)LPlmh__&{w zg>b+plJ8ObDwb1U7cQ0vfE7}&jY1V_j8Hg8;@^BX5*Bimn2}lZ$Y>QBpR>|gXLVm> zvw3Na^sbG@Aku32{iC2>Pr>c?$KmJ8UBkZYbb^I|c%4~V1&X)|KiHFZ$ZOqY%7D=9 zvDvKUR@s_McmtvyP|Qc$uOeoU`&_?>KYR&@f_B^WxnW0pvfXCP^N{5_Jo8nZD&eqLtlmtr4gu03B9-NI7*=9jLdy8Vgf_kMdjVeqPKL^^gCulq`ULyo z*^}P!DD%`$FRbn1Vh5|KnrVTX%meCLfR_lQGWQ8Y9o&bg@)tqo%~pezch;;Int2+| z=itz2HG?qEIs4u8E%D~Q@*PIbEZD3OVjqss$I7}zTW>Z?+*R>S->y3U3D&sF=)GOW zp!1(__&zyfSm2O_eOP;mD-QNV3U@iCQmF-{Wq$x!ChIYmH5t8&MqcT4ipF-Yp;{Xr z{mXLTyY0@N8Jp`PIg7+Mh~E@9!FQf>o6CNpzH{NUO`+=M+#HU`(|n25ygXZ+#kI&e z5c4Nt`83&GUxFOa0@o(kyOA}o+Q&TjX-dxIel|d^b((ql%vp6>ZqE>ECtW8I)H{io zZnjr;mRA-z?cM%eAK?Kdf!_bT+AyM&LyhIM9^W7;D02vCVe;R#1=PCS$mYs4@i3XK zK*|?vrb$u9i0on!>@ycFmcW}X*ole7MPhP5Em*9PS4CmJkr$)jctbThioY8_X9mfY z=m?e`?Ze@;-TQip%A7C{Mg1WTP5Nbh)OxkLwDMD79m@9cNB0*RN$F(ac;>O z4UW>owJ)GK>J|(F_hpE2elLcCPqZd>RxS(~|3SUlJ6xNP>t5MPXD|+K>bH(VaiZWY zdJCGtY2&>;obnot%R}xgJn?~r@lyicf#Fz;R!Qi*rqD`hVqqOJSG~Mx%4SN=QH&;k zUZ6{Roc#UskVagh$%g^xDT~dg)A6QQnu5kDj?HGL1Y4vi91am7hlIY+OmXN99(^1Ui7;><{@#091B5H3G1@^wkfR0Vv(wfvzglnruL3)9BmWDXN_3OfIRl)n z#3HNgLwK*93~k`GAiPH14aPa{N|VQ?wLG!7SL}$_k0EkL`*@yy2=*CxRSjTe?=IRw zLpE|aG^|b2jb8Rcj3ERfwNUJo)zxcJLp_QZn%OcIhuvE#hl%)(@hBaU2fSfI*l^M^w8 z*hUFwAS_Y|I3gxY$Ega1jWc#MEIYi%{XDL(Y@WhqOx1P9LAzS-8)h1f0-xhvU-u~) zGQ2gWr5{%3s=xdY(XpsrYX-@nyY`x>GZs)FmF{k4cd4nJ!lu-22;H!gITp9zRvAxy zY@5Szu{DkbmtHm)*xKj|skt!Sax#7oM$E>KnauEuuQdcFI5e>=TtoTOM{fz^c5+Cb9&o1C zH=pm1^2zyGDXXm@u*TKwP^vSGa4|-zm|Ca`R+3gF429|(GiRS)-5j}lj$A*K3y!Q9 zDpi`3MGejk7e9QlkAvoDpTHe%IcfLL}H^u=pd;3`7|_+C7u) zFS%5b?A+c0WJyZ+Hr9XpNRVmg3N{3?5h}(!Hni(rI#mTdawCMjPgvS2--%}_(VeEF zujtx070NZr(~p|!taepO1J{kO4NtMuR~*d-ZlOeCX2DcmtZ{naA^mc{+;Vd|_fWhd zrO#V|w#A$)X9*>m$zAyASy@mwP!)(o`M1h2>N7WuPC?QC7-f^E`2f~_wk}9xQ6YmA zPrPQQNqf*#44nzL20DB#2oRZRX60_1qO!Q}_2gOS0Q*cyVzaf1pwt;bjBn=&*ad|) z@6eEu`8{+L@mYkE=o1-mIvdIa^rQ5zRr26W+LL1bLT0ozV~pwsiS)H|3i^BGBNLDL z^?{jVb7fUbqG#TCkeV5d$wG+R9Ee1CW%;jTdsl{X69=hVB4?(0Q<)fU&3YpeJ%OY6 zDv;6H{@)bIdj4ds1~rUM6NUKl7>dFN5D*RWMXj`oD4p}{Usrw_3WXE-W_~W@D;RM* zo1`+a4)(x~=!pVfEg&2Y!ag3e71DV_RLUxW&uk*5PB^GaH_#^O zwi}sEKN3C&!@HU5`vlVVXuw1_vT)OWWPyvN2N|&r$CHZtBh)O&wP8BX_ERi7ymBwv zY{&_rM4+boTt8H4Y1f&G@8^4GV;^+Lbp~P|X%xJvnHP@8G(mVwO%0`TyBfLv!ICuj z!-2UMUuZdeoixQwi8&_uLeezTF#W`U#7?FYAV#TW=6vK^(xYys*C_<$L@U#frGs5K zt6f}TYTgRZ>naq`^4<-i*%esoUEImof_I*{r&4O-KZAYABYcmD^I^oKMsaMZz{Co- zQ`SKE8HN%)AhX1N*i=rXXxi--dgvt-owX{~Lf(2TiM!-TM$)X5AfOXQoA^m?o2f(x z3`JiC4i|OQ?aR5deX)n~WgsBNg7&(jN~c!Kr5e8@Y@fd8Yhz)&2y&S0BX! z$xW16r-VTjJ2E2PxIC@m5>xEgJ8b>*6lP{CKZlYh)8syec7+I&?!(kwG9$*wZ4-Sq zs@yX%`zfb!b&Cxe1IK3Ew2fV*wWbPDvbadh5tA+vLWp@0bFmC+_5M8IiBd&iL?CF3 z`aOEVYLzDQnwY(B5D<*u`{f70fO?hEe^5ba#RrbKN0yPYr|^J<^xlda4txrVkq-}} zk|o%k6l;jWj%6TBPA-~u8e4m%Hyr5CR+=2>&Rqc^Hg+RO3-_QI#g6cx;V?@|hN*TT zUL^g~vZR;G6&z0z{yia_4{t`Sq=)oFnJCM%PV~L?P+SokkZ_=77`Yp_5-#Z#FrP^C zxjz?1eQ+j!H-^T>P<&haVPZ&4tQ3x}%-bbz15+paIBp&-ziw`T!cO5smClJ%3K=pZ zMi_kiaI7`6;y@JjWre9YdH+d8O3_C`H(yPE$>)-8iVZ}92%ZVQYzBK8U5XU=a$D{7 z;1Qa*S$rhg=yb!X=u_cgP$lA%KqZHlFA!#C%LYhJe2pvfdH7BETKv=m_StP{c0n9b za}!82asBq=Dfe3k8b~w+v+v|hkth^ZopiCYCVf@EV?py0l0=VFGozOSD`5Af zW3+MqCf^?0prpD*7!|>S&0}8QJK7ACdwh?$_wNCGTv_H}VHC|>YXsVGrBqqE&*NpM zElNVCt0?wehi=RT?K^1g#^OCCV3qZ+kZ zrWfGh;qWJqvfKi4Msb2par<)_TSs|dy)C*E1)z=>Ycg!JlPf=k zPX_5dx8z9NmCEkTA@1Yw(yBpm-&rcP} zvSOParSNYgjyO$nkHQtwN(Zzi1h_!Je@}xf3LmD&=5~iAR~uD^l`E67ncy<3O+f82 zB-d$fo~BZ;Sw9;5TJd_dTf(Eti)~3MfzoFpDG|$I{N0NqJr6`6>Kn7Q*>5E!40gmu zctOmgFsTD5v$seulNEQ06vORCVT(F=`wlLaR#mxA(Ck2BBBCs1wL$SXv8z0+J?W)D zQSMZ)$hbwOm`IepV@)xTUATUhT^-KY-V|TKJ`t}UViYEYb|-iCB2ZAr~RtW5!VbRk4~vnm*pO2u@reVqcFXF zhY^8xf0{k#Z_V9QI0?z{ILpx+?Uvtjo}AcgiP(os%_c)XqzSZ7=6b~&?Pp1knDp@9 z@8PN&2S>w=_m-|e^{IZ2I05OcLBl#E_*;{siY4E8+8n#wMt9}%FKJi_-`v2EA48z} z`*Crf=*yCL*nwCD*9)?Dg8KzL4v*JqlCoh6Z*w5u)IWf@Y3`@usQUa|{Off#*@a)3 zzqcX3dR3F9;e%ESrFr*kI+bMGyRLc@f-ReFr#thoV5jF@>yovJiAAgo&j$tT@WsB% z6Xwgrp7&mTmR8f={q@0b%~FlFyd!=3QeE0e16r~OM8%IG*<8))cfLc;yOZGzPO?r) zV(|Ex3Ee^_7M3GSWTmumtyn4VZ)UD352|S*Xl9WCEKyP~WW|26^FPiym^i8mL}MWE zcs=L3q>Os3X)8G!SccD2r$7nrWz?#doG&rIGnR1XqlDM=X%|| z7Z)eH-Lkn=;eog!Jl|`7JOSA=`bn?3B^6{Dq5Ucr3Ah_)+GED0nB_YUjKf}`5 z>W#JkR478MCpL~Jv2ey8N$m@{?`vT!@iQ_&qfLb*o7ASaRTz6ME_U3N3JI8<#!tQ2 z=uj5sT>UJBq0?o)6t%46X8&el*+@F%W}Zkh#W7)wDOr4rp8?q@e4GHl(T+CTIM3EQ z`iu}M6pQ?WrPBqoo}=(xRLJnPp9CQvAb+mYIISD+hEQp3XiWzdxa6|X*DH!NKuvVtImqJ-Bth`-cZ}M+_ zF;9=cbtpa0lGy&*`LLBO4B2e(8#=P^{Yzn#cBQ?tmrl4;a@^W;(vJx?cDC@uoZ7rf zud${d2hSZwd!8giZ1d~@PFF5J)UM?1;(Wl>+T0aC4XJ1^41wi~Hy~lPWLOHtjrFPX zUeOo|!+})L6NH8nkaT~3y4jm0BRIx&_`NKTekT}E_dUw-i6TFfa7;FblNv%kY1Pcf z(OO=~C*D^_j_I+)!do@3URLADNWnZ9Gfs@i>plLnK}Vu1A`HRn;b}>EH8MQP*bjD) zRoG+UVywfPk#9Fx4EHF&2tLfs;X~dH*sY2FP=i!W;>^S|WXn?3CSKM=W{GEK2gnaNs?V zcK?eN-uO(4-PWHniE!el-`U*qtZh$d6Fd7Ub(k%@wU52Wu`e#J1KI?$DMkTP3@B5Q z@$*!s^&-=|)oPJPEg{;>yV08wAY_NfZgm|%;x3RBjxvSHf_ub;?x|sqk(* z+eBSvFP5X*qHUchV~1Ihi0OB8I8h>ro*O~~Nw0jJ=tiCO{5cYGYaA%-nGU;cvD1@i zMW6M}JghFo+NHpB-!@-o-%BTx4IM2JyG~VmC!{+3(Y<9g>c7`mCg!cpfk~&Ri69|+ zEm3XvN#B9$n}lH;cdw4M1oEPWCk^7FhA{CfGAyZDjeU}sgpB_bQyI+s3o;8d1|#OD zq{qlqhR54VbM+Dy8Nbd^%7?`zCLfHfNg^VS zEwN)ly>B#NXv!AiUm4E%&Xmj7Q`(!R-xEk+nn^zs;SrvGFA^wJRIN%Gvv1RyRNMtG z^~;kK)t(YP#P!-9W!4UCk$Qvmijmf6LZ2LG()-awm9|g%b_-gjOFo%a;@ZU)a99}o zIEL=3D;0iS2{_8~tE28Nx_kNQbIsxorLY4ue7U~q)a`)CKh&4n+a=~NOHhEyol?!- zvg-47gibn{Om3?ifA|K9k<^eW*Qyy?^_R2u_TE*3 z+(H3%!=ELH#;Hntop#q6=K;)ZG1djE<)yLQcGr>6e%qg17lD8GAgqNa3b&r@p9*@w$7plC7{?- z5W-gAuy1x5urT}J?<0_G;;UqI%iKFc2+JBW?F%VARbS}v=TAAtS!IbJbT}9-@QKah zwBP_Vq3}k1L2HK=mP)bi`x7*2d*%g-9v4SQUWJ2%Gcu9wq)vTp^5E;jBADeBqP!i}pfSo6iK^71R`Skz9{Jjl~5` z=|*mGZ1dy$E+@PYCk&PkPbe6j=NqH9R!}WAgT0JGp(Fg6ncd@&{!)$dgC<=zwbK!hB)$+ ztyGx1%Q=*$i&a3{jY(K=e0wQ~4|Gd~TARWe*t7F#!FS0Lk1EY1^o!5LR>C4&Bl41m zq)9e57S)m{y&ou6KC=(WQ$dU7Dp`)`M?}Sp@b&Uf+M;f_Y+p2#K~X|h*K;Pm>%7j0&?ND{Z&_msO@%#GNu|OEOX@w z!my+(^;Vh~Y}VL`mC9J;&m08>0ikv6?z_dYOleF;bgmW<_euqRX zX>Za5G%lI!rZeY>b$Zt9c7v4qJC*6wB5_D}_B8}WY;m)6Cw#kV{I(jjd|GPff4c^w zd=0+X?uJiuk7B0Yg=H+6>BopzJT_$5o* zouNjeY_@&iCptfXKGa!1M(&Y!R^zliKtbC4NQrdaaAX~iv>09#JCui^3dt_|@mn3A z_!^V7Gy-h0Wd1OZU&su9wYO+gFc0(zUnY}{Foqt-$*Wi{d%44+6RH+JO&T0yyGk0*W;`jtBvTzQ$ z@zyj}GW^f|wQa!lA5;R66Mt^c$4~bc%hZG!2)MjpG1Fl>#6B|QL70ccx&mNs@kV21 z`;x-)1CkK;-Fc9Vb%2oL^C{P4$ixGX-0rn=yjScaKb1S8+ffZSG476zjDi(5qH`@f?H!h0l-3F`ln2Y#k1a2M_mLO>objihwtZqy3y zOa8=2V962F7DjI#cfx$mhr3o2Y zHcWfhb!SW|G2vGy-}j8*$4?aeNtghLf_18kV=|x<=rDFpqAbGaI7-S?(nCk*t4^0% z!8(Zx1H1*F`)*qpZ;j z6aqnw1p;WX7TTFGU7dznt)}V>DI9#Tyo5-y|RD7;rY2m(YZ!P!UdbDT<0k!F}|RN+SiRj*jnvn{O6E z-CC}SRL#g9yBPj?T5j{=lz#`1R{A@c?9UAGWYW;bv)~(JoWD$IAlA>g?TMwdYxr`a zC!E5Eo#kLLtj#|XM>bY4NfB^B6As3M`gq8MmAKhIM%;J+0}a>dDxAztTSOXt6v8BJ zVju6L)mpHI<9V!VFTX`|H_uUvz)aQmcO&6i^0y1cDy*fV`qcF>_?Mt^NBLKs{juY$ z!-m>u7vpU%*+BTt(dMWq;04bIs?KNvItjgRm)k>BW4WK+4aa6;V*<0z(TF_3+d=@S zacx~MRE{=0K3T4`f_YAbQB!{#eOcfi%4T}vRC)hHJtDi0v(kaOlCRMVa%(8xPx~!N zQbxm!X=#@j8_f3lRz3BAEjA$mB9Qp7rIur9MGeSBS>(B`M%NhJaa`rqVasipAMDu~ zd>>2eltKLT35`@-h+iqW^-mpkOLlR?#FD#ldY4zWyG)gO)2HIeY)D0ualiRf&;W6A zh7PA2(tLV>a(bE;SiCg^mYwF9j;rPLNLQ8$RV?OcVK*7K zy$-=(z(A^~M4qS{&(KKrArYj6fLBn{d0s6UT1YDYX*^6CZ`IOo8k$!~0 zH;PN-jsMjz?QhNGJG6C|usyo}>QcJP-rAKO(ImBa8!qjqO032}XKhYC1nfBpOm>em z#pPbr$Xj-&bE-m#ED1~Z@K#RYTAGWE&VUK-HoimO>yvL{0n`*m zlQkp^zZ~G{ea`X`)QaknZeC-6`#LO$KUl4Xafj~eK&O1OQ{@(DeW#z}(CcSGr5!DG zpFjWD+sOI+dr$7qN*-K=Zux1kB$k*?(I@8-Ja$L?ra2kEbf=T!SzeE4C9ShxR?CGa z6Nyaf45d2t@V6(dVn@!4Kjps-nKb8~w2H>lXw&;EB#TCGa;az8c?KKVp7h=U2na_j zD(!|(YXeosCA2!|0U;-pXms_?chISHKMF%LQ@CwRk%cpUnQf|@x{PHYz-6a#Mlf=8 z&USYaktwq}j3{b$YB^6FfqKAa`Tn(uco)$VdhWg|Ma{v?0-S15p^I`BO!cK!HzFn*CG?n|$Q=EUr3zZ@w(u(jRanQV zl0@2%VY5H@@3C9Nqr(A1#(61)bqY24OT0AP;{1Z zyz->StCW>$J!}3S>qFJIQF>jG1$D&s1~l_rEdUY1w%Yf zNBB^$XC_F9c)>UahRsyp(g{>6{~`whwTRzfg#W!H(jCHxIu&fP3w#M|TP-yc!RKU2 zz@ke&|JC1A*AdJ+U9*_C(`9`~&o^--zJQ1`suMD; ziKYmx;Yp0)0RG-q(w957^3pA+IL}xqrVRnN_ePbrXP}2476VhmIHVBUocxI0lC7aa z6+z_w7$=^P4FNec5!^fP;!^ch?Zm>X1g0Sp(r#shTM;^%*#A|VlV#IwZiHwLbvNF^6MO$U4Z=Y z=y?{=^VN(%+P7;OBlaqx7-ZxsRxmBUNL=+JdQ;nWaQMU(W9p7I)m625KZ2j-u}aSn zr|bf{Kg9jpMzfRVqcEV{4!Cm@QYzLz;F{De^^H}Z#&zYewEM=PvUp;9c#lIKKc?JG z8Jhpvv}9m{56rP1bI2WA;Z|9W4=r zJid8&lOq}y#81g5BkSDcjc~M|jK9$5@R@Q>{@!FPfu#hMICN7xC#o3L{)^!NlIVL! zx5RDH4^6?9nA;I!(y6>wG)lcXiU&)WmPeKgou4D|cvQr0Qxgtl3kkx~7qV7+6X_Fj zRp!%u@}UvEa|rCRMIQs6$G$&;4uw#h9e<9wTHar)(iZ4Epyd?e!6wy<_a&KdNC>)w zApHu^7$#iOw>s%5(_#3UI#&xZNDqR#WoWyDT0%AdfQL#ZH66f0L z#hS36#1N<4-`#prKAv`ea{nuL+nez}tXR%YG!6zuy+tv3)*y=KCu_3$O; zH`c%sPiP@H#IA$Qp^kQ7KPN#7aO8J8A*relE3p3vA*~`c)_HkbjZTvPr>+L;H}*i7)|s zoOw=w=1?Bd;}PQ|E<F0g{0U{0V z?{F1Fn6sfOctAO~>E`goVu^Xj3EQJRQYyRt^94HE1W7E-YfnK)jb>Yy?clP(MS;MR zc94-In`)2?I{%ly)!|QskxIKX_i~m|n5SWOQsE2(5&VtNsQr8aBJtPXNiP?XfLisg z`|_w;$R@M^2XrcR_^j1m!3G%PWA_(Gp;Hq#J`Wb#L_o2=X6A>;K8*_-~uR0f%DYdrA1OTRLaou>T)-^S{P` z6rUh)o|yRU!~X#R`EMQmM@yicC;b2M$N%6GOnucmY=QVf%kbZ>#sB6|4%u_S{#FqG z-&-;Pm!=A&H{xHn{Cf%i|ICa3pd|d)4*p-egg=lU?@rNQAD@VpZVw8d-fui=_H9A` z-Z$SO@J=6ucT7|bGpb6gl7B;7ygng&zkhk=xPzH}{c@l|nmyt#-q-LCEQEmI^3>z< z!mP``C3@PM0#3X9-zq%v#J3J(wyNb1incA3OucD+Jk+N@?!oqUVQ@e@CvAG}DKoZj zeoe-*ORJFY?f|RZrK>u)?VXUCG&X?`x*L9m&?dH%T2KZ;5qV|Zvo5lFvqesq&$E;@ z;^vU3b*{ZW^akZKm!N;gbhiR1LQY38iKf@q(<#DMA_2*q-6W*ld&JN02T!LDHKt=w zmp<=kmNUdbt+-DF`doK+hvzSoHKwOf&8HtoGo-SCX4m}J{a4ES|F%PXVsII2URX$# z3%-~cTJoK9h$rswZzv8|Q)Wwzu0)<6&p6k~l&~b`iL7D{ySK(mEGBTCPmkZ~&?jMx zv0V8ROHWJ~0;Jmg|BlnIuzoqZo!t>pr`u!at6#b848%^429~}r;o2(EuJ7nJoO)D{j{alHg(?!|lP9L~OU2YJ1 zg8`63qq}i)q5~@8nl~v@TT+JtG^k*GzMrl0AT?&sN}CKlxTiU7JQ=v;qIGzN>o6QS( zn-|=hlf%;^={WrH`l_B5_;2tVH00*pdO@}cirqsO+8`or&EYo($SJ$%MZ8t$T^KaG9z8|Q!%G=B*t0^(I#*0Yw^T`8aQhegTtS5oVWs~?xINs;&O1bfT zKLovOe{z$zZ>Sni`BcXnfPkm8tK#XF&3d|4X&ik1_vg-gr*Z$O{GrTJR;NRedbW2q z<-|!AxVST>Hy9|Rl>bakKfb!YAEoh^p~qyGEp)iQf{N7%n@!@C&9-}7VRpVJ+12*(HqU{C{8rN0z1i3EzR*q>4f!D|}Jzf-W^Cc#(KYU;GJ`StMnl@exh zhKL_owM;8c>e#Sj^yI0k{~W77fnLH0uED^&Y|eL)id4M>;Ww$o1p@AVX$;vMQuBsV zy&R$f@3bHBC6b@R6w7i(upLYkinjl`dVFGE`kfWJQr=8#5ajcP6;V-#)U!xQM_YwN zpU{d17Q`BaWaCi`h-Is!dT;)DD?~sQ){K>nCNK$#gknlp3T0^#bqUnbd<;kyj;5@s zP){Rq>NA9@nLH;SL>R~+xxEr)&- zJe_KySU~M7g8S;PMqeNprv)!Z&e?fsRz)3M8YFX?zZ;?VUeP2+SdARLp3C#gvNC@A zjv`$wh0)NR{388Zer+;c&X1n2p-;%dzr9O|f&)f+Gd}1+=?#ZKS1Y0gtVzt$QMNV; zp@}6k6od)fBQdsoC$4G9N8-tak@AeQ7j!E3zoAx(Mz?4MZn(V?L**%1A=HdiIQNzV zT4hpS@FNxUyBG8yJ>dStUm2CnL(vehF1_DxF^U1_PyE=UW zJ}fN*v)$%WPw0Q-lSzToD{MO~FMjY-l{>~Z2z(9=UO5ELpBTg)JEn+dQL1?wyjKm$U?zH}^kcO`rh!hxt z{8w>oAPf-jhQR#TfcjT0g-C(!W283G7L)!}NZA|oiF2~=GvnV?(*GJ8{`Vs%ME)E4 z7u!Uf0v`zf?R@`1=KbgP+XBEi+4fI!>HSwN Date: Thu, 26 Sep 2024 11:30:43 +0200 Subject: [PATCH 20/37] Fix #1461: test for bridge sampling (#1473) * change test_bridge_sampling to laplace * increase flaky --------- Co-authored-by: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> --- test/sample/test_sample.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/sample/test_sample.py b/test/sample/test_sample.py index 252afc0fe..589b336a2 100644 --- a/test/sample/test_sample.py +++ b/test/sample/test_sample.py @@ -929,7 +929,7 @@ def test_harmonic_mean_log_evidence(): ) -@pytest.mark.flaky(reruns=2) +@pytest.mark.flaky(reruns=3) def test_bridge_sampling(): tol = 2 # define problem @@ -965,5 +965,7 @@ def test_bridge_sampling(): # compute the log evidence using harmonic mean bridge_log_evidence = sample.evidence.bridge_sampling_log_evidence(result) - harmonic_evidence = sample.evidence.harmonic_mean_log_evidence(result) - assert np.isclose(bridge_log_evidence, harmonic_evidence, atol=tol) + laplace = sample.evidence.laplace_approximation_log_evidence( + problem, result.optimize_result.x[0] + ) + assert np.isclose(bridge_log_evidence, laplace, atol=tol) From 61c912d56b4e435105da9e95dbdb31ae1303a807 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 26 Sep 2024 14:03:32 +0200 Subject: [PATCH 21/37] Use cloudpickle for serializing NegLogParameterPriors (#1467) Cloudpickle is able to handle more complex objects than pickle. See #1465 Closes #1465 --- pypesto/objective/priors.py | 9 +++++++++ test/optimize/test_optimize.py | 16 ++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/pypesto/objective/priors.py b/pypesto/objective/priors.py index e460c1572..4ffcdaf6a 100644 --- a/pypesto/objective/priors.py +++ b/pypesto/objective/priors.py @@ -3,6 +3,7 @@ from collections.abc import Sequence from typing import Callable, Union +import cloudpickle import numpy as np from .. import C @@ -67,6 +68,14 @@ def __init__( self.prior_list = prior_list super().__init__(x_names) + def __getstate__(self): + """Get state using cloudpickle.""" + return cloudpickle.dumps(self.__dict__) + + def __setstate__(self, state): + """Set state using cloudpickle.""" + self.__dict__.update(cloudpickle.loads(state)) + def call_unprocessed( self, x: np.ndarray, diff --git a/test/optimize/test_optimize.py b/test/optimize/test_optimize.py index 3a9098662..160e5baa9 100644 --- a/test/optimize/test_optimize.py +++ b/test/optimize/test_optimize.py @@ -524,6 +524,22 @@ def test_ess_multiprocess(problem, request): from pypesto.optimize.ess import ESSOptimizer, FunctionEvaluatorMP, RefSet + # augment objective with parameter prior to check it's copyable + # https://github.com/ICB-DCM/pyPESTO/issues/1465 + # https://github.com/ICB-DCM/pyPESTO/pull/1467 + problem.objective = pypesto.objective.AggregatedObjective( + [ + problem.objective, + pypesto.objective.NegLogParameterPriors( + [ + pypesto.objective.get_parameter_prior_dict( + 0, "uniform", [0, 1], "lin" + ) + ] + ), + ] + ) + ess = ESSOptimizer( max_iter=20, # also test passing a callable as local_optimizer From 951f0db2921a4c8b3cc63edd7599384eb13278ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20H=C3=B6pfl?= <66028655+shoepfl@users.noreply.github.com> Date: Mon, 30 Sep 2024 15:38:54 +0200 Subject: [PATCH 22/37] Added HPD calculation to ensemble (#1431) * Added HPD calculation to ensemble The ensemble from_sample method has now an additional rel_cutoff argument. rel_cutoff allwos to cut the posterior to the alpha % highest density (here non-normalized posterior probability). * Update ensemble.py fixes print statement that was only used for debugging * Update pypesto/ensemble/ensemble.py Adds default value for burn_in Co-authored-by: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> * Update pypesto/ensemble/ensemble.py Fixes wrong default value for the ci_level Co-authored-by: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> * added test for the hpd calculation and renamed rel_cutoff * Forgot name change in test * Integrated comments --------- Co-authored-by: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Co-authored-by: PaulJonasJost --- pypesto/ensemble/ensemble.py | 88 ++++++++++++++++++++++++++++++++++++ test/base/test_ensemble.py | 47 +++++++++++++++++++ 2 files changed, 135 insertions(+) diff --git a/pypesto/ensemble/ensemble.py b/pypesto/ensemble/ensemble.py index ecd3d7b85..5446e2103 100644 --- a/pypesto/ensemble/ensemble.py +++ b/pypesto/ensemble/ensemble.py @@ -555,6 +555,7 @@ def __init__( def from_sample( result: Result, remove_burn_in: bool = True, + ci_level: float = None, chain_slice: slice = None, x_names: Sequence[str] = None, lower_bound: np.ndarray = None, @@ -571,6 +572,10 @@ def from_sample( remove_burn_in: Exclude parameter vectors from the ensemble if they are in the "burn-in". + ci_level: + A form of relative cutoff. Exclude parameter vectors, for which the + (non-normalized) posterior value is not within the `ci_level` best + values. chain_slice: Subset the chain with a slice. Any "burn-in" removal occurs first. x_names: @@ -594,14 +599,23 @@ def from_sample( lower_bound = result.problem.lb if upper_bound is None: upper_bound = result.problem.ub + burn_in = 0 if remove_burn_in: if result.sample_result.burn_in is None: geweke_test(result) burn_in = result.sample_result.burn_in x_vectors = x_vectors[burn_in:] + + # added cutoff + if ci_level is not None: + x_vectors = calculate_hpd( + result=result, burn_in=burn_in, ci_level=ci_level + ) + if chain_slice is not None: x_vectors = x_vectors[chain_slice] x_vectors = x_vectors.T + return Ensemble( x_vectors=x_vectors, x_names=x_names, @@ -1253,3 +1267,77 @@ def calculate_cutoff( range = chi2.ppf(q=percentile / 100, df=df) return fval_opt + range + + +def calculate_hpd( + result: Result, + burn_in: int = 0, + ci_level: float = 0.95, +): + """ + Calculate Highest Posterior Density (HPD) samples. + + The HPD is calculated for a user-defined credibility level (`ci_level`). The + HPD includes all parameter vectors with a (non-normalized) posterior + probability that is higher than the lowest `1-ci_level` % + posterior probability values. + + Parameters + ---------- + result: + The sampling result from which to create the ensemble. + burn_in: + Burn in index that is cut off before HPD is calculated. + ci_level: + Credibility level of the resulting HPD. 0.95 corresponds to the 95% CI. + Only values between 0 and 1 are allowed. + + Returns + ------- + The HPD parameter vectors. + """ + if not 0 <= ci_level <= 1: + raise ValueError( + f"ci_level={ci_level} is not valid. Choose 0<=ci_level<=1." + ) + # get names of chain parameters + param_names = result.problem.get_reduced_vector(result.problem.x_names) + + # Get converged parameter samples as numpy arrays + chain = np.asarray(result.sample_result.trace_x[0, burn_in:, :]) + neglogpost = result.sample_result.trace_neglogpost[0, burn_in:] + indices = np.arange( + burn_in, len(result.sample_result.trace_neglogpost[0, :]) + ) + + # create df first, as we need to match neglogpost to the according parameter values + pd_params = pd.DataFrame(chain, columns=param_names) + pd_fval = pd.DataFrame(neglogpost, columns=["neglogPosterior"]) + pd_iter = pd.DataFrame(indices, columns=["iteration"]) + + params_df = pd.concat( + [pd_params, pd_fval, pd_iter], axis=1, ignore_index=False + ) + + # get lower neglogpost bound for HPD + # sort neglogpost values of MCMC chain without burn in + neglogpost_sort = np.sort(neglogpost) + + # Get converged chain length + chain_length = len(neglogpost) + + # most negative ci percentage samples of the posterior are kept to get the according HPD + neglogpost_lower_bound = neglogpost_sort[int(chain_length * (ci_level))] + + # cut posterior to hpd + hpd_params_df = params_df[ + params_df["neglogPosterior"] <= neglogpost_lower_bound + ] + + # convert df to ensemble vector + hpd_params_df_vals_only = hpd_params_df.drop( + columns=["iteration", "neglogPosterior"] + ) + hpd_ensemble_vector = hpd_params_df_vals_only.to_numpy() + + return hpd_ensemble_vector diff --git a/test/base/test_ensemble.py b/test/base/test_ensemble.py index 12089af53..9a2383448 100644 --- a/test/base/test_ensemble.py +++ b/test/base/test_ensemble.py @@ -6,6 +6,7 @@ import pypesto import pypesto.optimize as optimize +import pypesto.sample as sample from pypesto.C import AMICI_STATUS, AMICI_T, AMICI_Y, MEAN, WEIGHTED_SIGMA from pypesto.engine import MultiProcessEngine from pypesto.ensemble import ( @@ -224,3 +225,49 @@ def post_processor(amici_outputs, output_type, output_ids): progress_bar=False, ) return ensemble_prediction + + +def test_hpd_calculation(): + """Test the calculation of Highest Posterior Density (HPD).""" + problem = create_petab_problem() + + sampler = sample.AdaptiveMetropolisSampler( + options={"show_progress": False} + ) + + result = optimize.minimize( + problem=problem, + n_starts=3, + progress_bar=False, + ) + + result = sample.sample( + problem=problem, + sampler=sampler, + n_samples=100, + result=result, + ) + + # Manually set up sample (only for testing) + burn_in = 1 + result.sample_result.burn_in = burn_in + result.sample_result.trace_neglogpost[0][1:] = np.random.permutation( + np.arange(len(result.sample_result.trace_neglogpost[0][1:])) + ) + + hpd_ensemble = Ensemble.from_sample( + result=result, remove_burn_in=True, ci_level=0.95 + ) + + expected_length = ( + int((result.sample_result.trace_x[0][burn_in:].shape[0]) * 0.95) + 1 + ) + # Check that the HPD parameters have the expected shape + assert hpd_ensemble.x_vectors.shape == (problem.dim, expected_length) + x_indices = np.where(result.sample_result.trace_neglogpost[0][1:] <= 95)[0] + assert np.all( + [ + np.any(np.all(x[:, None] == hpd_ensemble.x_vectors, axis=0)) + for x in result.sample_result.trace_x[0][burn_in:][x_indices] + ] + ) From b1f6ea8003c8bcdc45d7043aff5f853b27acf230 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Wed, 2 Oct 2024 09:24:29 +0200 Subject: [PATCH 23/37] Update Python requirement in INSTALL.rst (#1479) This wasn't updated in a while ... --- INSTALL.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/INSTALL.rst b/INSTALL.rst index eb796aab4..e5b75dd29 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -5,7 +5,7 @@ Install and upgrade Requirements ------------ -This package requires Python 3.8 or later (see :ref:`Python support`). +This package requires Python 3.10 or later (see :ref:`Python support`). It is continuously tested on Linux, and most parts should also work on other operating systems (MacOS, Windows). From d24db86e212a09fc536627fbf99ac45583e8948d Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Wed, 2 Oct 2024 09:38:11 +0200 Subject: [PATCH 24/37] ESSOptimizer: Fix recombination bug (#1477) This fixes a bug in the recombination step of the `ESSOptimizer`, also used by `SacessOptimizer`. This will in particular improve performance when using scatter search without local optimizers. --- pypesto/optimize/ess/ess.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pypesto/optimize/ess/ess.py b/pypesto/optimize/ess/ess.py index 479c67c61..c53b56837 100644 --- a/pypesto/optimize/ess/ess.py +++ b/pypesto/optimize/ess/ess.py @@ -443,11 +443,15 @@ def _combine(self, i, j) -> np.array: raise ValueError("i == j") x = self.refset.x - d = x[j] - x[i] - alpha = np.sign(j - i) + d = (x[j] - x[i]) / 2 + # i < j implies f(x_i) < f(x_j) + alpha = 1 if i < j else -1 + # beta is a relative rank-based distance between the two parents + # 0 <= beta <= 1 beta = (np.abs(j - i) - 1) / (self.refset.dim - 2) + # new hyper-rectangle, biased towards the better parent c1 = x[i] - d * (1 + alpha * beta) - c2 = x[i] - d * (1 - alpha * beta) + c2 = x[i] + d * (1 - alpha * beta) # this will not always yield admissible points -> clip to bounds ub, lb = self.evaluator.problem.ub, self.evaluator.problem.lb From b091d4605979d0cab640e1b0653b6ab406c6e6e6 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Tue, 8 Oct 2024 06:43:25 +0200 Subject: [PATCH 25/37] ESSOptimizer: Fix bug in go-beyond (#1480) Fixes some issues related to copies vs views, that stopped the go-beyond search prematurely. --- pypesto/optimize/ess/ess.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pypesto/optimize/ess/ess.py b/pypesto/optimize/ess/ess.py index c53b56837..a9cad7b36 100644 --- a/pypesto/optimize/ess/ess.py +++ b/pypesto/optimize/ess/ess.py @@ -560,7 +560,7 @@ def _do_local_search( def _maybe_update_global_best(self, x, fx): """Update the global best value if the provided value is better.""" if fx < self.fx_best: - self.x_best = x[:] + self.x_best[:] = x self.fx_best = fx self.x_best_has_changed = True self.history.update( @@ -583,9 +583,9 @@ def _go_beyond(self, x_best_children, fx_best_children): continue # offspring is better than parent - x_parent = self.refset.x[i] + x_parent = self.refset.x[i].copy() fx_parent = self.refset.fx[i] - x_child = x_best_children[i] + x_child = x_best_children[i].copy() fx_child = fx_best_children[i] improvement = 1 # Multiplier used in determining the hyper-rectangle from which to From be60f09445d91178df6aed1474776542d694c709 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Tue, 8 Oct 2024 10:10:57 +0200 Subject: [PATCH 26/37] Fix sphinx config (#1483) Convert path-li_StrPathke object to str. Changed in sphinx>=8. Fixes #1482. --- doc/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 27cd4cb7c..6e9e09c5f 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -165,7 +165,7 @@ # Add notebooks prolog to Google Colab and nbviewer nbsphinx_prolog = r""" -{% set docname = 'github/icb-dcm/pypesto/blob/main/doc/' + env.doc2path(env.docname, base=None) %} +{% set docname = 'github/icb-dcm/pypesto/blob/main/doc/' + env.doc2path(env.docname, base=None)|string %} .. raw:: html

From c5a46d4a9cbac6c2d2eb41936cdb2961a68798bb Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Tue, 8 Oct 2024 13:22:26 +0200 Subject: [PATCH 27/37] GHA: trigger CI workflow on merge_group event (#1484) Trigger CI workflow on `merge_group` event. Required for #1478. Closes #1478. --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 58bf8b6cc..b274e2a61 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,6 +8,7 @@ on: - develop pull_request: workflow_dispatch: + merge_group: schedule: # run Monday and Thursday at 03:42 UTC - cron: '42 3 * * MON,THU' From b2cde8c22e9c2a0c67b368c40a0a295b83d78f3a Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Tue, 8 Oct 2024 17:45:22 +0200 Subject: [PATCH 28/37] Updating the notebooks (#1470) * shortening many optimizations. Replacing Boehm with conversion reaction where possible. Thermodynamic more robust. * Correctly install pyswarms in notebooks1 * renamed "model_conversion_reaction" to "conversion_reaction" according to naming scheme in benchmark models as well * Revert "renamed "model_conversion_reaction" to "conversion_reaction" according to naming scheme in benchmark models as well" This reverts commit a1a54f771815ba2579259082dff59e045188dd12. * reverted changes and kept original structure. adjusted amici.ipynb * changed directory * changed directory II * Start out with boehm, for optimization etc change to conversion reaction * fixed custom_objective_function * for now: more starts with pyswarms. * changed julia notebooks ever so slightly. * Revert changes to julia notebook * Integrate comments * Correct PEtab logo now. * Update doc/example/getting_started.ipynb Co-authored-by: Dilan Pathirana <59329744+dilpath@users.noreply.github.com> * Update doc/example/getting_started.ipynb Co-authored-by: Dilan Pathirana <59329744+dilpath@users.noreply.github.com> * Adjusted amici logo * Removed pyswarms from waterfall visualization * Fixed a random seed for sampling notebook --------- Co-authored-by: Dilan Pathirana <59329744+dilpath@users.noreply.github.com> --- doc/example/amici.ipynb | 296 +++++++++++++-- doc/example/censored_data.ipynb | 6 +- doc/example/conversion_reaction.ipynb | 9 +- doc/example/custom_objective_function.ipynb | 162 ++++++++- doc/example/fixed_parameters.ipynb | 4 +- doc/example/getting_started.ipynb | 36 +- doc/example/history_usage.ipynb | 13 +- .../model_evidence_and_bayes_factors.ipynb | 339 ++++++++++-------- doc/example/model_selection.ipynb | 22 +- doc/example/ordinal_data.ipynb | 6 +- doc/example/petab_import.ipynb | 12 +- doc/example/prior_definition.ipynb | 4 +- doc/example/relative_data.ipynb | 2 +- doc/example/roadrunner.ipynb | 8 +- doc/example/sampler_study.ipynb | 9 +- doc/example/sampling_diagnostics.ipynb | 2 +- doc/example/semiquantitative_data.ipynb | 6 +- doc/example/store.ipynb | 96 ++++- doc/example/synthetic_data.ipynb | 6 +- tox.ini | 2 +- 20 files changed, 765 insertions(+), 275 deletions(-) diff --git a/doc/example/amici.ipynb b/doc/example/amici.ipynb index 3b0bcb96e..703fe4e95 100644 --- a/doc/example/amici.ipynb +++ b/doc/example/amici.ipynb @@ -4,6 +4,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -28,6 +31,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -52,6 +58,7 @@ "import pypesto.store as store\n", "import pypesto.visualize as visualize\n", "import pypesto.visualize.model_fit as model_fit\n", + "import pypesto\n", "\n", "mpl.rcParams[\"figure.dpi\"] = 100\n", "mpl.rcParams[\"font.size\"] = 18\n", @@ -71,6 +78,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -83,6 +93,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -98,6 +111,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -113,6 +129,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -125,6 +144,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -140,6 +162,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -153,6 +178,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -170,6 +198,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -190,6 +221,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -205,6 +239,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -221,6 +258,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -236,6 +276,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -258,6 +301,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -273,6 +319,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -286,6 +335,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -299,6 +351,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -316,6 +371,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -331,6 +389,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -429,6 +490,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -470,6 +534,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -499,6 +566,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -515,6 +585,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -532,6 +605,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -554,6 +630,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -567,6 +646,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -586,6 +668,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -598,6 +683,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -610,6 +698,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -617,13 +708,13 @@ "source": [ "#### Background on PEtab\n", "\n", - "\"pyPESTO\n", + "\"PEtab\n", "\n", "pyPESTO supports the [PEtab](https://github.com/PEtab-dev/PEtab) standard. PEtab is a data format for specifying parameter estimation problems in systems biology.\n", "\n", "A PEtab problem consist of an [SBML](https://sbml.org) file, defining the model topology and a set of `.tsv` files, defining experimental conditions, observables, measurements and parameters (and their optimization bounds, scale, priors...). All files that make up a PEtab problem can be structured in a `.yaml` file. The `pypesto.Objective` coming from a PEtab problem corresponds to the negative-log-likelihood/negative-log-posterior distribution of the parameters.\n", "\n", - "For more details on PEtab, the interested reader is referred to [PEtab's format definition](https://petab.readthedocs.io/en/latest/documentation_data_format.html), for examples the reader is referred to the [PEtab benchmark collection](https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab). The Model from _[Böhm et al. JProteomRes 2014](https://pubs.acs.org/doi/abs/10.1021/pr5006923)_ is part of the benchmark collection and will be used as the running example throughout this notebook.\n" + "For more details on PEtab, the interested reader is referred to [PEtab's format definition](https://petab.readthedocs.io/en/latest/documentation_data_format.html), for examples the reader is referred to the [PEtab benchmark collection](https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab). For demonstration purposes, a simple model of conversion-reaction will be used as the running example throughout this notebook.\n" ] }, { @@ -631,6 +722,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -638,6 +732,7 @@ "outputs": [], "source": [ "%%capture\n", + "\n", "petab_yaml = f\"./{model_name}/{model_name}.yaml\"\n", "\n", "petab_problem = petab.Problem.from_yaml(petab_yaml)\n", @@ -650,6 +745,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -665,6 +763,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -680,6 +781,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -695,6 +799,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -709,6 +816,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -722,6 +832,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -729,7 +842,7 @@ "outputs": [], "source": [ "# call the objective function\n", - "print(f\"Objective value: {problem.objective(benchmark_parameters)}\")\n", + "print(f\"Objective value: {problem.objective(petab_problem.x_free_indices)}\")\n", "# change things in the model\n", "problem.objective.amici_model.requireSensitivitiesForAllParameters()\n", "# change solver settings\n", @@ -746,6 +859,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -758,6 +874,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -772,6 +891,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -789,6 +911,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -806,14 +931,17 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } }, "source": [ - "### Startpoint method\n", + "### History options\n", "\n", - "The startpoint method describes how you want to choose your startpoints, in case you do a multistart optimization. The default here is `uniform` meaning that each startpoint is a uniform sample from the allowed parameter space. The other two notable options are either `latin_hypercube` or a self defined function." + "In some cases, it is good to trace what the optimizer did in each step, i.e., the history. There is a multitude of options on what to report here, but the most important one is `trace_record` which turns the history function on and off." ] }, { @@ -821,48 +949,44 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ - "startpoint_method = pypesto.startpoint.uniform" + "# save optimizer trace\n", + "history_options = pypesto.HistoryOptions(trace_record=True)" ] }, { "cell_type": "markdown", - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%% md\n" - } - }, + "metadata": {}, "source": [ - "### History options\n", + "### Startpoint method\n", "\n", - "In some cases, it is good to trace what the optimizer did in each step, i.e., the history. There is a multitude of options on what to report here, but the most important one is `trace_record` which turns the history function on and off." + "The startpoint method describes how you want to choose your startpoints, in case you do a multistart optimization. The default here is `uniform` meaning that each startpoint is a uniform sample from the allowed parameter space. The other two notable options are either `latin_hypercube` or a self-defined function. The startpoint method is an inherent attribute of the problem and can be set there." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - }, + "metadata": {}, "outputs": [], "source": [ - "# save optimizer trace\n", - "history_options = pypesto.HistoryOptions(trace_record=True)" + "problem.startpoint_method = pypesto.startpoint.uniform" ] }, { "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -878,6 +1002,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -892,6 +1019,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -907,6 +1037,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -922,6 +1055,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -933,7 +1069,6 @@ " problem=problem,\n", " optimizer=optimizer,\n", " n_starts=n_starts,\n", - " startpoint_method=startpoint_method,\n", " engine=engine,\n", " options=opt_options,\n", ")" @@ -943,6 +1078,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -956,6 +1094,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -969,6 +1110,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -983,6 +1127,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -995,6 +1142,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1010,6 +1160,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1025,6 +1178,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1042,6 +1198,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1055,6 +1214,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1069,6 +1231,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1084,6 +1249,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1097,6 +1265,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1111,7 +1282,6 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, "pycharm": { "name": "#%%\n" } @@ -1125,6 +1295,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1140,6 +1313,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1155,6 +1331,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1168,18 +1347,24 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } }, "source": [ - "We definitely need to look further into it, and thus we turn to uncertainty quantification in the next section." + "However, these visualizations are only an indicator for possible uncertainties. In the next section we turn to proper uncertainty quantification." ] }, { "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1196,6 +1381,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1213,6 +1401,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1234,6 +1425,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1247,6 +1441,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1261,6 +1458,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1276,6 +1476,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1287,7 +1490,7 @@ "result = sample.sample(\n", " problem=problem,\n", " sampler=sampler,\n", - " n_samples=5000,\n", + " n_samples=1000,\n", " result=result,\n", ")" ] @@ -1296,6 +1499,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1309,6 +1515,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1324,6 +1533,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1337,6 +1549,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1351,6 +1566,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1364,6 +1582,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1389,6 +1610,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1402,6 +1626,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1419,6 +1646,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1432,6 +1662,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1445,6 +1678,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1457,6 +1693,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -1470,6 +1709,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -1497,7 +1739,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.10" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/doc/example/censored_data.ipynb b/doc/example/censored_data.ipynb index 28ff83c3b..c5ac649db 100644 --- a/doc/example/censored_data.ipynb +++ b/doc/example/censored_data.ipynb @@ -237,7 +237,7 @@ ], "metadata": { "kernelspec": { - "display_name": "dev_venv", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -251,7 +251,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.2" + "version": "3.12.3" }, "vscode": { "interpreter": { @@ -260,5 +260,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/doc/example/conversion_reaction.ipynb b/doc/example/conversion_reaction.ipynb index e31217c77..c909388de 100644 --- a/doc/example/conversion_reaction.ipynb +++ b/doc/example/conversion_reaction.ipynb @@ -144,6 +144,7 @@ }, "outputs": [], "source": [ + "%%time\n", "# create objective function from amici model\n", "# pesto.AmiciObjective is derived from pesto.Objective,\n", "# the general pesto objective function class\n", @@ -157,7 +158,7 @@ "\n", "# do the optimization\n", "result = optimize.minimize(\n", - " problem=problem, optimizer=optimizer, n_starts=100, filename=None\n", + " problem=problem, optimizer=optimizer, n_starts=10, filename=None\n", ")" ] }, @@ -221,7 +222,7 @@ " problem=problem,\n", " result=result,\n", " optimizer=optimizer,\n", - " profile_index=np.array([1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0]),\n", + " profile_index=np.array([0,1]),\n", " result_index=0,\n", " profile_options=profile_options,\n", " filename=None,\n", @@ -270,7 +271,7 @@ ")\n", "\n", "result = sample.sample(\n", - " problem, n_samples=10000, sampler=sampler, result=result, filename=None\n", + " problem, n_samples=1000, sampler=sampler, result=result, filename=None\n", ")" ] }, @@ -425,7 +426,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/doc/example/custom_objective_function.ipynb b/doc/example/custom_objective_function.ipynb index 35c985884..f1276c5a4 100644 --- a/doc/example/custom_objective_function.ipynb +++ b/doc/example/custom_objective_function.ipynb @@ -4,6 +4,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -16,6 +19,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -30,6 +36,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -48,6 +57,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -63,6 +75,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -89,6 +104,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -101,6 +119,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -113,6 +134,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -127,6 +151,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -145,6 +172,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -159,6 +189,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -181,6 +214,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -194,6 +230,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -216,6 +255,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -229,6 +271,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -260,6 +305,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -281,6 +329,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -301,6 +352,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -320,6 +374,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -333,6 +390,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -349,6 +409,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -362,6 +425,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -383,6 +449,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -396,12 +465,16 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ + "%%time\n", "# run optimization of problem 1\n", "result1 = optimize.minimize(\n", " problem=problem1, optimizer=optimizer, n_starts=n_starts, engine=engine\n", @@ -424,6 +497,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -437,6 +513,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -461,6 +540,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -477,6 +559,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -490,6 +575,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -507,6 +595,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -520,6 +611,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -537,6 +631,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -550,6 +647,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -572,6 +672,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -584,6 +687,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -599,6 +705,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -636,12 +745,16 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ + "%%time\n", "# compute profiles\n", "profile_options = profile.ProfileOptions(whole_path=True)\n", "\n", @@ -649,7 +762,7 @@ " problem=problem1,\n", " result=result1,\n", " optimizer=optimizer,\n", - " profile_index=np.array([0, 1, 3, 5]),\n", + " profile_index=np.array([0, 3]),\n", " result_index=0,\n", " profile_options=profile_options,\n", " filename=None,\n", @@ -660,7 +773,7 @@ " problem=problem1,\n", " result=result1,\n", " optimizer=optimizer,\n", - " profile_index=np.array([0, 1, 3, 5]),\n", + " profile_index=np.array([0, 3]),\n", " result_index=-1,\n", " profile_options=profile_options,\n", " filename=None,\n", @@ -669,7 +782,7 @@ " problem=problem4,\n", " result=result4,\n", " optimizer=optimizer,\n", - " profile_index=np.array([0, 1, 3, 5]),\n", + " profile_index=np.array([0, 3]),\n", " result_index=0,\n", " profile_options=profile_options,\n", " filename=None,\n", @@ -681,6 +794,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -690,7 +806,7 @@ "# specify the parameters, for which profiles should be computed\n", "visualize.profiles(\n", " result1,\n", - " profile_indices=[0, 1, 3, 5],\n", + " profile_indices=[0, 3],\n", " reference=[ref, ref2],\n", " profile_list_ids=[0, 1],\n", ");" @@ -701,6 +817,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -709,7 +828,7 @@ "source": [ "visualize.profiles(\n", " result4,\n", - " profile_indices=[0, 1, 3, 5],\n", + " profile_indices=[0, 3],\n", " reference=[ref4],\n", ");" ] @@ -718,6 +837,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -730,6 +852,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -742,6 +867,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -755,6 +883,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -774,6 +905,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -787,6 +921,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -795,7 +932,7 @@ "source": [ "axes = visualize.profiles(\n", " result1,\n", - " profile_indices=[0, 1, 3, 5],\n", + " profile_indices=[0, 3],\n", " profile_list_ids=[0, 2],\n", " ratio_min=0.01,\n", " colors=[(1, 0, 0, 1), (0, 0, 1, 1)],\n", @@ -811,6 +948,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -825,23 +965,23 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.6" + "pygments_lexer": "ipython3", + "version": "3.12.3" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 4 } diff --git a/doc/example/fixed_parameters.ipynb b/doc/example/fixed_parameters.ipynb index 6f8838d9a..6074a50b7 100644 --- a/doc/example/fixed_parameters.ipynb +++ b/doc/example/fixed_parameters.ipynb @@ -146,7 +146,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3.10.2 64-bit", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -160,7 +160,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.2" + "version": "3.12.3" }, "vscode": { "interpreter": { diff --git a/doc/example/getting_started.ipynb b/doc/example/getting_started.ipynb index 2f98e6d69..bf6a80e32 100644 --- a/doc/example/getting_started.ipynb +++ b/doc/example/getting_started.ipynb @@ -33,6 +33,7 @@ "import amici\n", "import matplotlib as mpl\n", "import numpy as np\n", + "import scipy as sp\n", "\n", "import pypesto.optimize as optimize\n", "import pypesto.petab\n", @@ -54,11 +55,11 @@ "source": [ "## 1. Objective Definition\n", "\n", - "PyPESTO allows the definition of a custom objectives, as well as offers support for objectives defined in the [PEtab](https://github.com/PEtab-dev/PEtab) format.\n", + "pyPESTO allows the definition of custom objectives and offers support for objectives defined in the [PEtab](https://github.com/PEtab-dev/PEtab) format.\n", "\n", "### Custom Objective Definition\n", "\n", - "You can define an objective via a python function. Also providing an analytical gradient (and potentially also a Hessian) improves the performance of Gradient/Hessian-based optimizers. When accessing parameter uncertainties via profile-likelihoods/sampling, pyPESTO interprets the objective function as the negative-log-likelihood/negative-log-posterior." + "You can define an objective via a python function. Also providing an analytical gradient (and potentially also a Hessian) improves the performance of Gradient/Hessian-based optimizers. When accessing parameter uncertainties via profile-likelihoods/sampling, pyPESTO interprets the objective function as the negative-log-likelihood/negative-log-posterior. A more in-depth construction of a custom objective function can be found in [a designated example notebook.](./custom_objective_function.ipynb)" ] }, { @@ -195,7 +196,7 @@ "\n", "#### Background on PEtab\n", "\n", - "\"pyPESTO\n", + "\"PEtab\n", "\n", "PyPESTO supports the [PEtab](https://github.com/PEtab-dev/PEtab) standard. PEtab is a data format for specifying parameter estimation problems in systems biology. \n", "\n", @@ -231,7 +232,7 @@ "source": [ "%%capture\n", "# directory of the PEtab problem\n", - "petab_yaml = \"./boehm_JProteomeRes2014/boehm_JProteomeRes2014.yaml\"\n", + "petab_yaml = \"./conversion_reaction/conversion_reaction.yaml\"\n", "\n", "importer = pypesto.petab.PetabImporter.from_yaml(petab_yaml)\n", "problem = importer.create_problem(verbose=False)" @@ -329,7 +330,7 @@ " * Gradient-free\n", "* [FIDES](https://github.com/fides-dev/fides/) (`optimize.FidesOptimizer()`)\n", " * Interior Trust Region optimizer \n", - "* [Particle Swarm](https://github.com/tisimst/pyswarm) (`optimize.PyswarmOptimizer()`)\n", + "* [Particle Swarm](https://github.com/ljvmiranda921/pyswarms) (`optimize.PyswarmsOptimizer()`)\n", " * Particle swarm algorithm\n", " * Gradient-free\n", "* [CMA-ES](https://pypi.org/project/cma-es/) (`optimize.CmaOptimizer()`)\n", @@ -351,7 +352,7 @@ "optimizer_scipy_powell = optimize.ScipyOptimizer(method=\"Powell\")\n", "\n", "optimizer_fides = optimize.FidesOptimizer(verbose=logging.ERROR)\n", - "optimizer_pyswarm = optimize.PyswarmOptimizer()" + "optimizer_pyswarm = optimize.PyswarmsOptimizer(par_popsize=10)" ] }, { @@ -384,14 +385,12 @@ "# Due to run time we already use parallelization.\n", "# This will be introduced in more detail later.\n", "engine = pypesto.engine.MultiProcessEngine()\n", - "history_options = pypesto.HistoryOptions(trace_record=True)\n", "\n", "# Scipy: L-BFGS-B\n", "result_lbfgsb = optimize.minimize(\n", " problem=problem,\n", " optimizer=optimizer_scipy_lbfgsb,\n", " engine=engine,\n", - " history_options=history_options,\n", " n_starts=n_starts,\n", ")\n", "\n", @@ -400,7 +399,6 @@ " problem=problem,\n", " optimizer=optimizer_scipy_powell,\n", " engine=engine,\n", - " history_options=history_options,\n", " n_starts=n_starts,\n", ")\n", "\n", @@ -409,7 +407,6 @@ " problem=problem,\n", " optimizer=optimizer_fides,\n", " engine=engine,\n", - " history_options=history_options,\n", " n_starts=n_starts,\n", ")\n", "\n", @@ -419,8 +416,7 @@ " problem=problem,\n", " optimizer=optimizer_pyswarm,\n", " engine=engine,\n", - " history_options=history_options,\n", - " n_starts=n_starts,\n", + " n_starts=1, # Global optimizers are usually run once. The number of particles (par_popsize) is usually the parameter that is adapted.\n", ")" ] }, @@ -435,9 +431,7 @@ "### Optimizer Convergence\n", "\n", "\n", - "A common visualization of optimizer convergence are waterfall plots. Waterfall plots show the (ordered) results of the individual optimization runs. As we see below, Dlib and pyswarm, which are not gradient-based, are not able to find the global optimum. \n", - "\n", - "Furthermore, we hope to obtain clearly visible plateaus, as they indicate optimizer convergence to local minima. " + "A common visualization of optimizer convergence are waterfall plots. Waterfall plots show the (ordered) results of the individual optimization runs. In general, we hope to obtain clearly visible plateaus, as they indicate optimizer convergence to local minima. " ] }, { @@ -453,10 +447,9 @@ "optimizer_results = [\n", " result_lbfgsb,\n", " result_powell,\n", - " result_fides,\n", - " result_pyswarm,\n", + " result_fides\n", "]\n", - "optimizer_names = [\"Scipy: L-BFGS-B\", \"Scipy: Powell\", \"Fides\", \"pyswarm\"]\n", + "optimizer_names = [\"Scipy: L-BFGS-B\", \"Scipy: Powell\", \"Fides\"]\n", "\n", "pypesto.visualize.waterfall(optimizer_results, legends=optimizer_names);" ] @@ -612,7 +605,7 @@ " problem=problem,\n", " result=result,\n", " optimizer=optimizer_scipy_lbfgsb,\n", - " profile_index=[0, 1, 2],\n", + " profile_index=[0, 1],\n", ")" ] }, @@ -910,6 +903,9 @@ "cell_type": "markdown", "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%% md\n" } @@ -941,7 +937,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.2" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/doc/example/history_usage.ipynb b/doc/example/history_usage.ipynb index ae882fd77..21d661af3 100644 --- a/doc/example/history_usage.ipynb +++ b/doc/example/history_usage.ipynb @@ -376,12 +376,15 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "source": [ "We compare the function value trace of the loaded results with the original results to ensure consistency." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", @@ -471,7 +474,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.2" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/doc/example/model_evidence_and_bayes_factors.ipynb b/doc/example/model_evidence_and_bayes_factors.ipynb index e7d0b2af7..0cc7d188a 100644 --- a/doc/example/model_evidence_and_bayes_factors.ipynb +++ b/doc/example/model_evidence_and_bayes_factors.ipynb @@ -13,8 +13,9 @@ ] }, { - "metadata": {}, "cell_type": "markdown", + "id": "5c56f766bcf7ab48", + "metadata": {}, "source": [ "\n", "## Marginal Likelihood\n", @@ -26,12 +27,12 @@ "$$\n", "\n", "where $\\theta$ are the parameters of the model. This integral averages the likelihood over the prior distribution of the parameters, providing a measure of how well the model explains the data, considering all possible parameter values." - ], - "id": "5c56f766bcf7ab48" + ] }, { - "metadata": {}, "cell_type": "markdown", + "id": "6337b6a3", + "metadata": {}, "source": [ "## Bayes Factor\n", "\n", @@ -49,12 +50,12 @@ "- Substantial: $0.5 < \\log_{10}\\operatorname{BF}_{12} \\leq 1$\n", "- Strong: $1 < \\log_{10}\\operatorname{BF}_{12} \\leq 2$\n", "- Decisive: $2 < \\log_{10}\\operatorname{BF}_{12}$" - ], - "id": "6337b6a3" + ] }, { - "metadata": {}, "cell_type": "markdown", + "id": "a6b7640cff0280de", + "metadata": {}, "source": [ "## Example\n", "\n", @@ -68,13 +69,14 @@ "The free parameters are the means of both models. \n", "For this example, we assume that the standard deviation is known and fixed to the true value.\n", "As priors, we assume normal distributions." - ], - "id": "a6b7640cff0280de" + ] }, { "cell_type": "code", + "execution_count": null, "id": "6eb930b7", "metadata": {}, + "outputs": [], "source": [ "from functools import partial\n", "from typing import Union\n", @@ -90,14 +92,18 @@ " NegLogParameterPriors,\n", " Objective,\n", ")\n", - "from pypesto.problem import Problem" - ], - "outputs": [], - "execution_count": null + "from pypesto.problem import Problem\n", + "\n", + "# For testing purposes. Remove if not running the exact example.\n", + "np.random.seed(42)" + ] }, { - "metadata": {}, "cell_type": "code", + "execution_count": null, + "id": "ad47e3f67a7896d3", + "metadata": {}, + "outputs": [], "source": [ "# model hyperparameters\n", "N = 10\n", @@ -131,18 +137,18 @@ "plt.scatter(Y2_2, np.full(len(Y2_2), 0.1), label='Y2_2 samples', color='green')\n", "plt.legend()\n", "plt.show()" - ], - "id": "ad47e3f67a7896d3", - "outputs": [], - "execution_count": null + ] }, { + "cell_type": "code", + "execution_count": null, + "id": "2143410833d86594", "metadata": { "tags": [ "hide-input" ] }, - "cell_type": "code", + "outputs": [], "source": [ "# evidence\n", "def log_evidence_alt(data: np.ndarray, std: float):\n", @@ -186,18 +192,18 @@ "\n", "print(\"True log evidence, true model:\", true_log_evidence_true)\n", "print(\"True log evidence, alternative model:\", true_log_evidence_alt)" - ], - "id": "2143410833d86594", - "outputs": [], - "execution_count": null + ] }, { + "cell_type": "code", + "execution_count": null, + "id": "33659ff5eba21345", "metadata": { "tags": [ "hide-input" ] }, - "cell_type": "code", + "outputs": [], "source": [ "# define likelihood for each model, and build the objective functions for the pyPESTO problem\n", "def neg_log_likelihood(params: Union[np.ndarray, list], data: np.ndarray):\n", @@ -362,13 +368,19 @@ " x_fixed_vals=[sigma],\n", " x_priors_defs=prior_alt,\n", ")" - ], - "id": "33659ff5eba21345", - "outputs": [], - "execution_count": null + ] }, { "cell_type": "code", + "execution_count": null, + "id": "cf9af2fa37f3a0cf", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], "source": [ "# to make the code more readable, we define a dictionary with all models\n", "# from here on, we use the pyPESTO problem objects, so the code can be reused for any other problem\n", @@ -397,24 +409,24 @@ " m['neg_log_likelihood'] = lambda x: m['problem'].objective._objectives[0](\n", " m['problem'].get_full_vector(x=x, x_fixed_vals=m['problem'].x_fixed_vals)\n", " ) " - ], - "metadata": { - "collapsed": false - }, - "id": "cf9af2fa37f3a0cf", - "outputs": [], - "execution_count": null + ] }, { - "metadata": {}, "cell_type": "markdown", - "source": "## Methods for Computing Marginal Likelihoods", - "id": "e273503367e8bf4d" + "id": "e273503367e8bf4d", + "metadata": {}, + "source": [ + "## Methods for Computing Marginal Likelihoods" + ] }, { - "metadata": {}, "cell_type": "code", + "execution_count": null, + "id": "95ec6b53c9133332", + "metadata": {}, + "outputs": [], "source": [ + "%%time\n", "# run optimization for each model\n", "for m in models.values(): \n", " m['results'] = optimize.minimize(problem=m['problem'], n_starts=100)\n", @@ -424,14 +436,12 @@ " results=m['results'], reference={'x': m[\"true_params\"], 'fval': m['problem'].objective(m[\"true_params\"])})\n", " else:\n", " visualize.parameters(m['results'])" - ], - "id": "95ec6b53c9133332", - "outputs": [], - "execution_count": null + ] }, { - "metadata": {}, "cell_type": "markdown", + "id": "ffd895262133fe00", + "metadata": {}, "source": [ "### 1. Bayesian Information Criterion (BIC)\n", "\n", @@ -445,24 +455,24 @@ "\n", "\n", "BIC is easy to compute and converges to the marginal likelihood, but it may not capture the full complexity of model selection, especially for complex models or significant prior information as the prior is completely ignored." - ], - "id": "ffd895262133fe00" + ] }, { - "metadata": {}, "cell_type": "code", + "execution_count": null, + "id": "1b40d72091d00e9f", + "metadata": {}, + "outputs": [], "source": [ "for m in models.values(): \n", " m['BIC'] = len(m['problem'].x_free_indices) * np.log(n_obs) + 2 * m['neg_log_likelihood'](m['results'].optimize_result.x[0])\n", " print(m['name'], 'BIC marginal likelihood approximation:', -1/2*m['BIC'])" - ], - "id": "1b40d72091d00e9f", - "outputs": [], - "execution_count": null + ] }, { - "metadata": {}, "cell_type": "markdown", + "id": "67cb4a7bb781d42", + "metadata": {}, "source": [ "### 2. Laplace Approximation\n", "\n", @@ -476,12 +486,14 @@ "\n", "\n", "The Laplace approximation is accurate if the posterior is unimodal and roughly Gaussian." - ], - "id": "67cb4a7bb781d42" + ] }, { - "metadata": {}, "cell_type": "code", + "execution_count": null, + "id": "548513d76b8887dd", + "metadata": {}, + "outputs": [], "source": [ "%%time\n", "for m in models.values(): \n", @@ -492,26 +504,27 @@ " \n", " m['laplace_evidences'] = np.array(laplace_evidences)\n", " print(m['name'], f'laplace approximation: {m[\"laplace_evidences\"][0]}')" - ], - "id": "548513d76b8887dd", - "outputs": [], - "execution_count": null + ] }, { "cell_type": "markdown", + "id": "b5ac29500e0e678b", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "source": [ "### 3. Sampling-Based Methods\n", "\n", "Sampling-based methods, such as Markov Chain Monte Carlo (MCMC) or nested sampling, do not make assumptions about the shape of the posterior and can provide more accurate estimates of the marginal likelihood. However, they can be computationally very intensive." - ], - "metadata": { - "collapsed": false - }, - "id": "b5ac29500e0e678b" + ] }, { - "metadata": {}, "cell_type": "markdown", + "id": "212297d07ef90600", + "metadata": {}, "source": [ "\n", "#### Arithmetic Mean Estimator\n", @@ -523,11 +536,19 @@ "$$\n", "\n", "The arithmetic mean estimator requires a large number of samples and is very inefficient. It approximates the marginal likelihood from below." - ], - "id": "212297d07ef90600" + ] }, { "cell_type": "code", + "execution_count": null, + "id": "ec2f000c836abad6", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], "source": [ "%%time\n", "for m in models.values(): \n", @@ -538,16 +559,17 @@ " m['arithmetic_log_evidence'] = logsumexp(log_likelihoods) - np.log(log_likelihoods.size)\n", " \n", " print(m['name'], f'arithmetic mean: {m[\"arithmetic_log_evidence\"]}')" - ], - "metadata": { - "collapsed": false - }, - "id": "ec2f000c836abad6", - "outputs": [], - "execution_count": null + ] }, { "cell_type": "markdown", + "id": "77ec3e1ec016d0d1", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "source": [ "#### Harmonic Mean\n", "\n", @@ -566,15 +588,14 @@ "A reliable sampling method is bridge sampling (see [\"A Tutorial on Bridge Sampling\" by Gronau et al. (2017)](https://api.semanticscholar.org/CorpusID:5447695) for a nice introduction). It uses samples from a proposal and the posterior to estimate the marginal likelihood. The proposal distribution should be chosen to have a high overlap with the posterior (we construct it from half of the posterior samples by fitting a Gaussian distribution with the same mean and covariance). This method is more stable than the harmonic mean estimator. However, its accuracy may depend on the choice of the proposal distribution.\n", "\n", "A different approach, the learnt harmonic mean estimator, was proposed by [McEwen et al. (2021)](https://api.semanticscholar.org/CorpusID:244709474). The estimator solves the large variance problem by interpreting the harmonic mean estimator as importance sampling and introducing a new target distribution, which is learned from the posterior samples. The method can be applied just using samples from the posterior and is implemented in the software package accompanying the paper.\n" - ], - "metadata": { - "collapsed": false - }, - "id": "77ec3e1ec016d0d1" + ] }, { - "metadata": {}, "cell_type": "code", + "execution_count": null, + "id": "ba4cc742f71fad4", + "metadata": {}, + "outputs": [], "source": [ "%%time\n", "for m in models.values(): \n", @@ -586,14 +607,14 @@ " # compute harmonic mean\n", " m['harmonic_log_evidence'] = sample.evidence.harmonic_mean_log_evidence(results)\n", " print(m['name'], f'harmonic mean: {m[\"harmonic_log_evidence\"]}') " - ], - "id": "ba4cc742f71fad4", - "outputs": [], - "execution_count": null + ] }, { - "metadata": {}, "cell_type": "code", + "execution_count": null, + "id": "a7272997b60de2e2", + "metadata": {}, + "outputs": [], "source": [ "%%time\n", "for m in models.values(): \n", @@ -612,14 +633,14 @@ " neg_log_likelihood_fun=m['neg_log_likelihood']\n", " )\n", " print(m['name'], f'stabilized harmonic mean: {m[\"harmonic_stabilized_log_evidence\"]}')" - ], - "id": "a7272997b60de2e2", - "outputs": [], - "execution_count": null + ] }, { - "metadata": {}, "cell_type": "code", + "execution_count": null, + "id": "ce38f1a4975cd72a", + "metadata": {}, + "outputs": [], "source": [ "%%time\n", "for m in models.values(): \n", @@ -630,27 +651,36 @@ " )\n", " m['bridge_log_evidence'] = sample.evidence.bridge_sampling_log_evidence(results)\n", " print(m['name'], f'bridge sampling: {m[\"bridge_log_evidence\"]}')" - ], - "id": "ce38f1a4975cd72a", - "outputs": [], - "execution_count": null + ] }, { "cell_type": "markdown", + "id": "443bf17c8ae27a15", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "source": [ "#### Nested Sampling\n", "\n", "Nested sampling is specifically designed for estimating marginal likelihoods. The static nested sampler is optimized for evidence computation and provides accurate estimates but may give less accurate posterior samples unless dynamic nested sampling is used. \n", "\n", "Dynamic nested sampling can improve the accuracy of posterior samples. The package [dynesty](https://dynesty.readthedocs.io/en/stable/) offers a lot of hyperparameters to tune accuracy and efficiency of computing samples from the posterior vs. estimating the marginal likelihood." - ], - "metadata": { - "collapsed": false - }, - "id": "443bf17c8ae27a15" + ] }, { "cell_type": "code", + "execution_count": null, + "id": "c0236f455dfc64d5", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], "source": [ "%%time\n", "for m in models.values():\n", @@ -681,16 +711,17 @@ " # extract log evidence\n", " m[f'nested_log_evidence'] = nested_sampler.sampler.results.logz[-1]\n", " print(m['name'], f'nested sampling: {m[\"nested_log_evidence\"]}')" - ], - "metadata": { - "collapsed": false - }, - "id": "c0236f455dfc64d5", - "outputs": [], - "execution_count": null + ] }, { "cell_type": "markdown", + "id": "dcb16e2efcf4bf0d", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "source": [ "#### Thermodynamic Integration and Steppingstone Sampling\n", "\n", @@ -704,15 +735,14 @@ "\n", "These methods can be more accurate for complex posteriors but are computationally intensive. Thermodynamic integration (TI) relies on integrating the integral over the temperature $t$, while steppingstone sampling approximates the integral with a sum over a finite number of temperatures using an importance sampling estimator. Accuracy can be improved by using more temperatures. \n", "Errors in the estimator might come from the MCMC sampler in both cases and from numerical integration when applying TI. Steppingstone sampling can be a biased estimator for a small number of temperatures [(Annis et al., 2019)](https://doi.org/10.1016/j.jmp.2019.01.005).\n" - ], - "metadata": { - "collapsed": false - }, - "id": "dcb16e2efcf4bf0d" + ] }, { - "metadata": {}, "cell_type": "code", + "execution_count": null, + "id": "13059e00c982d98d", + "metadata": {}, + "outputs": [], "source": [ "%%time\n", "for m in models.values():\n", @@ -730,34 +760,37 @@ " result=m['results']\n", " ) \n", " # compute log evidence via thermodynamic integration\n", - " m['thermodynamic_log_evidence'] = sample.evidence.parallel_tempering_log_evidence(result_ti)\n", + " m['thermodynamic_log_evidence'] = sample.evidence.parallel_tempering_log_evidence(result_ti, use_all_chains=False)\n", " print(m['name'], f'thermodynamic integration: {m[\"thermodynamic_log_evidence\"]}')\n", " \n", " # compute log evidence via steppingstone sampling\n", - " m['steppingstone_log_evidence'] = sample.evidence.parallel_tempering_log_evidence(result_ti, method='steppingstone')\n", + " m['steppingstone_log_evidence'] = sample.evidence.parallel_tempering_log_evidence(result_ti, method='steppingstone', use_all_chains=False)\n", " print(m['name'], f'steppingstone sampling: {m[\"steppingstone_log_evidence\"]}')" - ], - "id": "13059e00c982d98d", - "outputs": [], - "execution_count": null + ] }, { "cell_type": "markdown", + "id": "90fd0f80a9d94b7d", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "source": [ "#### Variational Inference\n", "\n", "Variational inference approximates the posterior with a simpler distribution and can be faster than sampling methods for large problems. The marginal likelihood can be estimated using similar approaches as before, but the accuracy is limited by the choice of the variational family.\n", "\n", "Variational inference optimization is based on the Evidence Lower Bound (ELBO), providing an additional check for the estimator." - ], - "metadata": { - "collapsed": false - }, - "id": "90fd0f80a9d94b7d" + ] }, { - "metadata": {}, "cell_type": "code", + "execution_count": null, + "id": "c616b8a566478d0d", + "metadata": {}, + "outputs": [], "source": [ "%%time\n", "for m in models.values():\n", @@ -794,20 +827,22 @@ " \n", " # evidence cannot be smaller than the lower bound\n", " m[\"vi_harmonic_log_evidences\"] = max(m[\"vi_harmonic_log_evidences\"], vi_lower_bound)" - ], - "id": "c616b8a566478d0d", - "outputs": [], - "execution_count": null + ] }, { - "metadata": {}, "cell_type": "markdown", - "source": "## Comparison", - "id": "5e6c53b1a6414210" + "id": "5e6c53b1a6414210", + "metadata": {}, + "source": [ + "## Comparison" + ] }, { - "metadata": {}, "cell_type": "code", + "execution_count": null, + "id": "fbb5a071645523d4", + "metadata": {}, + "outputs": [], "source": [ "labels = [\n", " '-1/2 BIC',\n", @@ -836,14 +871,14 @@ "]\n", "\n", "true_bf = models['mixture_model1']['true_log_evidence'] - models['mixture_model2']['true_log_evidence']" - ], - "id": "fbb5a071645523d4", - "outputs": [], - "execution_count": null + ] }, { - "metadata": {}, "cell_type": "code", + "execution_count": null, + "id": "30fea0ed78548d6b", + "metadata": {}, + "outputs": [], "source": [ "fig, ax = plt.subplots(2, 1, tight_layout=True, sharex=True, figsize=(6, 6))\n", "colors = ['blue', 'orange']\n", @@ -876,13 +911,19 @@ "ax[1].set_xticks(ticks=np.arange(len(labels)), labels=labels, rotation=60)\n", "fig.legend(ncols=1, loc='center right', bbox_to_anchor=(1.5, 0.7))\n", "plt.show()" - ], - "id": "30fea0ed78548d6b", - "outputs": [], - "execution_count": null + ] }, { "cell_type": "code", + "execution_count": null, + "id": "5d6590690b5c7a30", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], "source": [ "fig, ax = plt.subplots(1, 1, tight_layout=True, figsize=(6, 5))\n", "ax.axhline(true_bf, linestyle='-', color='r', label='True Bayes Factor')\n", @@ -901,23 +942,17 @@ "plt.xticks(ticks=np.arange(len(bayes_factors)), labels=labels, rotation=60)\n", "fig.legend(ncols=1, loc='center right', bbox_to_anchor=(1.5, 0.7))\n", "plt.show()" - ], - "metadata": { - "collapsed": false - }, - "id": "5d6590690b5c7a30", - "outputs": [], - "execution_count": null + ] }, { - "metadata": {}, "cell_type": "markdown", + "id": "6cbfd915823d6989", + "metadata": {}, "source": [ "We recommend using either bridge sampling, nested sampling or one of the methods using power posteriors depending on the computational resources available. \n", "\n", "Bayes factors and marginal likelihoods are powerful tools for Bayesian model comparison. While there are various methods to compute marginal likelihoods, each has its strengths and weaknesses. Choosing the appropriate method depends on the specific context, the complexity of the models, and the computational resources available." - ], - "id": "6cbfd915823d6989" + ] } ], "metadata": { @@ -936,7 +971,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/doc/example/model_selection.ipynb b/doc/example/model_selection.ipynb index 72ae22a8d..f31338dc7 100644 --- a/doc/example/model_selection.ipynb +++ b/doc/example/model_selection.ipynb @@ -158,9 +158,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [], "source": [ "import logging\n", @@ -188,9 +186,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [], "source": [ "# Reduce notebook runtime\n", @@ -263,9 +259,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [], "source": [ "pvs.plot_calibrated_models_digraph(\n", @@ -485,9 +479,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [], "source": [ "petab_select_problem.model_space.reset_exclusions()\n", @@ -555,7 +547,7 @@ "metadata": { "celltoolbar": "Raw Cell Format", "kernelspec": { - "display_name": "Python 3.10.2 64-bit", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -569,7 +561,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.2" + "version": "3.12.3" }, "vscode": { "interpreter": { @@ -578,5 +570,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/doc/example/ordinal_data.ipynb b/doc/example/ordinal_data.ipynb index 2b1528f61..17dbbaa2a 100644 --- a/doc/example/ordinal_data.ipynb +++ b/doc/example/ordinal_data.ipynb @@ -917,7 +917,7 @@ ], "metadata": { "kernelspec": { - "display_name": "dev_venv", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -931,7 +931,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.10" + "version": "3.12.3" }, "vscode": { "interpreter": { @@ -940,5 +940,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/doc/example/petab_import.ipynb b/doc/example/petab_import.ipynb index 69040e595..e516fa008 100644 --- a/doc/example/petab_import.ipynb +++ b/doc/example/petab_import.ipynb @@ -327,7 +327,10 @@ { "cell_type": "markdown", "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "source": [ "### Dealing with function evaluations at the initial point\n", @@ -339,7 +342,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -438,7 +444,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.10" + "version": "3.12.3" }, "toc": { "base_numbering": 1, diff --git a/doc/example/prior_definition.ipynb b/doc/example/prior_definition.ipynb index 9ab018a14..60edde7f2 100644 --- a/doc/example/prior_definition.ipynb +++ b/doc/example/prior_definition.ipynb @@ -206,7 +206,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3.10.2 64-bit", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -220,7 +220,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.2" + "version": "3.12.3" }, "vscode": { "interpreter": { diff --git a/doc/example/relative_data.ipynb b/doc/example/relative_data.ipynb index 62ba09c05..677b7ff75 100644 --- a/doc/example/relative_data.ipynb +++ b/doc/example/relative_data.ipynb @@ -467,7 +467,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.2" + "version": "3.12.3" }, "toc": { "base_numbering": 1, diff --git a/doc/example/roadrunner.ipynb b/doc/example/roadrunner.ipynb index 10566c2ac..a553feceb 100644 --- a/doc/example/roadrunner.ipynb +++ b/doc/example/roadrunner.ipynb @@ -62,10 +62,7 @@ "\n", "\n", "# name of the model that will also be the name of the python module\n", - "model_name = \"boehm_JProteomeRes2014\"\n", - "\n", - "# output directory\n", - "model_output_dir = \"tmp/\" + model_name" + "model_name = \"conversion_reaction\"" ] }, { @@ -223,6 +220,7 @@ }, "outputs": [], "source": [ + "%%time\n", "result = optimize.minimize(\n", " problem=problem,\n", " optimizer=optimizer,\n", @@ -380,7 +378,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.2" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/doc/example/sampler_study.ipynb b/doc/example/sampler_study.ipynb index b466b028d..f5e0ccf95 100644 --- a/doc/example/sampler_study.ipynb +++ b/doc/example/sampler_study.ipynb @@ -301,7 +301,7 @@ "source": [ "sampler = sample.MetropolisSampler({\"std\": 0.5})\n", "result = sample.sample(\n", - " problem, 1e4, sampler, x0=np.array([0.5]), filename=None\n", + " problem, 1e3, sampler, x0=np.array([0.5]), filename=None\n", ")" ] }, @@ -331,7 +331,7 @@ "source": [ "sampler = sample.MetropolisSampler({\"std\": 1})\n", "result = sample.sample(\n", - " problem, 1e4, sampler, x0=np.array([0.5]), filename=None\n", + " problem, 1e3, sampler, x0=np.array([0.5]), filename=None\n", ")" ] }, @@ -620,8 +620,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The internal `dynesty` sampler can be saved and restored, for post-sampling analysis. For example, pyPESTO stores resampled MCMC-like samples from the `dynesty` sampler by default. The following code shows how to save and load the internal dynesty sampler, to facilitate post-sampling analysis of both the resampled and original chains. N.B.: when working across different computers, you might prefer to work with the raw sample results via `pypesto.sample.dynesty.save_raw_results` and `load_raw_results`.", - "\n", + "The internal `dynesty` sampler can be saved and restored, for post-sampling analysis. For example, pyPESTO stores resampled MCMC-like samples from the `dynesty` sampler by default. The following code shows how to save and load the internal dynesty sampler, to facilitate post-sampling analysis of both the resampled and original chains. N.B.: when working across different computers, you might prefer to work with the raw sample results via `pypesto.sample.dynesty.save_raw_results` and `load_raw_results`.\n", "First, we save the internal sampler." ] }, @@ -772,7 +771,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.10" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/doc/example/sampling_diagnostics.ipynb b/doc/example/sampling_diagnostics.ipynb index 21827fa74..70db98959 100644 --- a/doc/example/sampling_diagnostics.ipynb +++ b/doc/example/sampling_diagnostics.ipynb @@ -680,7 +680,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.2" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/doc/example/semiquantitative_data.ipynb b/doc/example/semiquantitative_data.ipynb index 721de8f20..bd277ca3f 100644 --- a/doc/example/semiquantitative_data.ipynb +++ b/doc/example/semiquantitative_data.ipynb @@ -457,7 +457,7 @@ ], "metadata": { "kernelspec": { - "display_name": "dev_venv", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -471,7 +471,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.2" + "version": "3.12.3" }, "vscode": { "interpreter": { @@ -480,5 +480,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/doc/example/store.ipynb b/doc/example/store.ipynb index 58f9902af..18df12962 100644 --- a/doc/example/store.ipynb +++ b/doc/example/store.ipynb @@ -25,6 +25,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -51,6 +54,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -99,6 +105,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -108,7 +117,7 @@ "source": [ "%%capture\n", "# directory of the PEtab problem\n", - "petab_yaml = \"./boehm_JProteomeRes2014/boehm_JProteomeRes2014.yaml\"\n", + "petab_yaml = \"./conversion_reaction/conversion_reaction.yaml\"\n", "\n", "importer = pypesto.petab.PetabImporter.from_yaml(petab_yaml)\n", "problem = importer.create_problem(verbose=False)" @@ -144,6 +153,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -159,7 +171,7 @@ ")\n", "\n", "# set number of starts\n", - "n_starts = 15 # usually a larger number >=100 is used\n", + "n_starts = 10 # usually a larger number >=100 is used\n", "\n", "# Optimization\n", "result = pypesto.optimize.minimize(\n", @@ -172,6 +184,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -198,6 +213,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -212,7 +230,7 @@ " problem=problem,\n", " result=result,\n", " optimizer=optimizer,\n", - " profile_index=np.array([1, 1, 1, 0, 0, 0, 0, 0, 1]),\n", + " profile_index=np.array([0, 1]),\n", ")" ] }, @@ -232,6 +250,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -246,7 +267,7 @@ "result = sample.sample(\n", " problem=problem,\n", " sampler=sampler,\n", - " n_samples=5000, # rather low\n", + " n_samples=1000, # rather low\n", " result=result,\n", " filename=None,\n", ")" @@ -270,6 +291,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -308,6 +332,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -337,6 +364,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -354,6 +384,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -406,6 +439,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -423,6 +459,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -451,6 +490,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -467,6 +509,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -494,6 +539,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -510,6 +558,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -581,6 +632,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -588,6 +642,7 @@ }, "outputs": [], "source": [ + "%%time\n", "# record the history\n", "history_options = pypesto.HistoryOptions(trace_record=True)\n", "\n", @@ -617,6 +672,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -660,12 +718,16 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ + "%%time\n", "# create temporary file\n", "with tempfile.NamedTemporaryFile(suffix=\"_{id}.csv\") as fn_csv:\n", " # record the history and store to CSV\n", @@ -699,6 +761,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -747,12 +812,16 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ + "%%time\n", "# create temporary file\n", "f_hdf5 = tempfile.NamedTemporaryFile(suffix=\".hdf5\", delete=False)\n", "fn_hdf5 = f_hdf5.name\n", @@ -777,6 +846,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" } @@ -808,6 +880,9 @@ "execution_count": null, "metadata": { "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, "pycharm": { "name": "#%%\n" }, @@ -827,14 +902,17 @@ { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "outputs": [], "source": [ "# close the temporary file\n", "f_hdf5.close()" - ], - "metadata": { - "collapsed": false - } + ] } ], "metadata": { @@ -853,7 +931,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.10" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/doc/example/synthetic_data.ipynb b/doc/example/synthetic_data.ipynb index 75041530c..e5a37cf4c 100644 --- a/doc/example/synthetic_data.ipynb +++ b/doc/example/synthetic_data.ipynb @@ -150,7 +150,7 @@ "outputs": [], "source": [ "pypesto_result_original = pypesto.optimize.minimize(\n", - " pypesto_problem_original, n_starts=20\n", + " pypesto_problem_original, n_starts=10 # For demonstration purposes only n=10 starts are used. Usually n >= 100\n", ")" ] }, @@ -270,7 +270,7 @@ ")\n", "pypesto_problem_synthetic = pypesto_importer_synthetic.create_problem()\n", "pypesto_result_synthetic = pypesto.optimize.minimize(\n", - " pypesto_problem_synthetic, n_starts=20\n", + " pypesto_problem_synthetic, n_starts=10\n", ")" ] }, @@ -324,7 +324,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.10" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/tox.ini b/tox.ini index 801f8ed08..59d940c9b 100644 --- a/tox.ini +++ b/tox.ini @@ -129,7 +129,7 @@ description = [testenv:notebooks1] allowlist_externals = bash -extras = example,amici,petab,pyswarm,pymc3,cma,nlopt,fides,roadrunner +extras = example,amici,petab,pyswarms,pymc3,cma,nlopt,fides,roadrunner commands = bash test/run_notebook.sh 1 description = From 2eee0812fb0e65d73bb71b64870e9265bc052897 Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:07:54 +0200 Subject: [PATCH 29/37] Changes to codeowners discussed at retreat (#1485) * Changes to codeowners discussed at retreat * Removed codeowner of visualization --- .github/CODEOWNERS | 21 ++++++++++----------- doc/authors.rst | 1 + 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 94a514f6b..7d10fd310 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,7 +1,7 @@ # see https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners # default owners = active maintainers -* @Doresic @PaulJonasJost @m-philipps +* @Doresic @PaulJonasJost @vwiela # Examples /doc/example/censored_data.ipynb @Doresic @@ -28,29 +28,28 @@ /pypesto/hierarchical/semiquantitative/ @Doresic /pypesto/history/ @PaulJonasJost /pypesto/objective/ @PaulJonasJost +/pypesto/objective/julia @vwiela /pypesto/objective/amici/ @dweindl @FFroehlich /pypesto/objective/jax/ @FFroehlich /pypesto/objective/aesara/ @FFroehlich /pypesto/optimize/ @PaulJonasJost /pypesto/petab/ @dweindl @FFroehlich /pypesto/predict/ @dilpath -/pypesto/problem/ @PaulJonasJost @m-philipps -/pypesto/profile/ @PaulJonasJost +/pypesto/problem/ @PaulJonasJost @vwiela +/pypesto/profile/ @PaulJonasJost @Doresic /pypesto/result/ @PaulJonasJost -/pypesto/sample/ @dilpath @vwiela +/pypesto/sample/ @dilpath @arrjon /pypesto/select/ @dilpath /pypesto/startpoint/ @PaulJonasJost /pypesto/store/ @PaulJonasJost -/pypesto/visualize/ @stephanmg @m-philipps # Tests -/test/base/ @PaulJonasJost @m-philipps +/test/base/ @PaulJonasJost @vwiela /test/doc/ @PaulJonasJost -/test/hierarchical/ @dweindl @doresic -/test/julia/ @PaulJonasJost +/test/hierarchical/ @dweindl @Doresic +/test/julia/ @PaulJonasJost @vwiela /test/optimize/ @PaulJonasJost /test/petab/ @dweindl @FFroehlich -/test/profile/ @PaulJonasJost -/test/sample/ @dilpath +/test/profile/ @PaulJonasJost @Doresic +/test/sample/ @dilpath @arrjon /test/select/ @dilpath -/test/visualize/ @stephanmg @m-philipps diff --git a/doc/authors.rst b/doc/authors.rst index f08b783e4..7025f54f3 100644 --- a/doc/authors.rst +++ b/doc/authors.rst @@ -24,3 +24,4 @@ with major contributions by (status 2024): - Erika Dudkin - Domagoj Doresic - Jonas Arruda +- Maren Philipps From dba0e59df57fbe39990378bf9d1f91a888af7d9a Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 17 Oct 2024 14:47:55 +0200 Subject: [PATCH 30/37] SacessOptimizer fixes (#1476) * Make SacessOptimizer conform more to the original saCeSS - default settings were shifted by one worker. * More informative debugging output * Add `RefSet.__repr__` * Test with `SacessOptions` * Fix x trace in history (`history.update` does not copy x by itself); fval trace was correct --- pypesto/optimize/ess/ess.py | 2 +- pypesto/optimize/ess/refset.py | 8 ++++++++ pypesto/optimize/ess/sacess.py | 24 +++++++++++------------- test/optimize/test_optimize.py | 18 ++++++++++++++++-- 4 files changed, 36 insertions(+), 16 deletions(-) diff --git a/pypesto/optimize/ess/ess.py b/pypesto/optimize/ess/ess.py index a9cad7b36..ca8ffa2f6 100644 --- a/pypesto/optimize/ess/ess.py +++ b/pypesto/optimize/ess/ess.py @@ -564,7 +564,7 @@ def _maybe_update_global_best(self, x, fx): self.fx_best = fx self.x_best_has_changed = True self.history.update( - self.x_best, + self.x_best.copy(), (0,), pypesto.C.MODE_FUN, {pypesto.C.FVAL: self.fx_best}, diff --git a/pypesto/optimize/ess/refset.py b/pypesto/optimize/ess/refset.py index 74f5c8f6b..0e3cff403 100644 --- a/pypesto/optimize/ess/refset.py +++ b/pypesto/optimize/ess/refset.py @@ -67,6 +67,14 @@ def __init__( self.n_stuck = np.zeros(shape=[dim]) self.attributes: dict[Any, np.array] = {} + def __repr__(self): + fx = ( + f", fx=[{np.min(self.fx)} ... {np.max(self.fx)}]" + if self.fx is not None and len(self.fx) >= 2 + else "" + ) + return f"RefSet(dim={self.dim}{fx})" + def sort(self): """Sort RefSet by quality.""" order = np.argsort(self.fx) diff --git a/pypesto/optimize/ess/sacess.py b/pypesto/optimize/ess/sacess.py index 775d5458f..c54ef0ad6 100644 --- a/pypesto/optimize/ess/sacess.py +++ b/pypesto/optimize/ess/sacess.py @@ -195,7 +195,7 @@ def minimize( start_time = time.time() logger.debug( f"Running {self.__class__.__name__} with {self.num_workers} " - f"workers: {self.ess_init_args}" + f"workers: {self.ess_init_args} and {self.options}." ) ess_init_args = self.ess_init_args or get_default_ess_options( num_workers=self.num_workers, dim=problem.dim @@ -563,7 +563,8 @@ def run( self._manager._logger = self._logger self._logger.debug( - f"#{self._worker_idx} starting " f"({self._ess_kwargs})." + f"#{self._worker_idx} starting " + f"({self._ess_kwargs}, {self._options})." ) evaluator = create_function_evaluator( @@ -694,6 +695,13 @@ def _maybe_adapt(self, problem: Problem): f"Updated settings on worker {self._worker_idx} to " f"{self._ess_kwargs}" ) + else: + self._logger.debug( + f"Worker {self._worker_idx} not adapting. " + f"Received: {self._n_received_solutions} <= {self._options.adaptation_sent_coeff * self._n_sent_solutions + self._options.adaptation_sent_offset}, " + f"Sent: {self._n_sent_solutions}, " + f"neval: {self._neval} <= {problem.dim * self._options.adaptation_min_evals}." + ) def maybe_update_best(self, x: np.array, fx: float): """Maybe update the best known solution and send it to the manager.""" @@ -840,13 +848,6 @@ def dim_refset(x): return max(min_dimrefset, ceil((1 + sqrt(4 * dim * x)) / 2)) settings = [ - # settings for first worker - { - "dim_refset": dim_refset(10), - "balance": 0.5, - "local_n2": 10, - }, - # for the remaining workers, cycle through these settings # 1 { "dim_refset": dim_refset(1), @@ -998,10 +999,7 @@ def dim_refset(x): elif local_optimizer is not False: cur_settings["local_optimizer"] = local_optimizer - return [ - settings[0], - *(itertools.islice(itertools.cycle(settings[1:]), num_workers - 1)), - ] + return list(itertools.islice(itertools.cycle(settings), num_workers)) class SacessFidesFactory: diff --git a/test/optimize/test_optimize.py b/test/optimize/test_optimize.py index 160e5baa9..48ebdea55 100644 --- a/test/optimize/test_optimize.py +++ b/test/optimize/test_optimize.py @@ -20,8 +20,11 @@ import pypesto.optimize as optimize from pypesto.optimize.ess import ( ESSOptimizer, + FunctionEvaluatorMP, + RefSet, SacessFidesFactory, SacessOptimizer, + SacessOptions, get_default_ess_options, ) from pypesto.optimize.util import ( @@ -490,6 +493,11 @@ def test_ess(problem, local_optimizer, ess_type, request): sacess_loglevel=logging.DEBUG, ess_loglevel=logging.WARNING, ess_init_args=ess_init_args, + options=SacessOptions( + adaptation_min_evals=500, + adaptation_sent_offset=10, + adaptation_sent_coeff=5, + ), ) else: raise ValueError(f"Unsupported ESS type {ess_type}.") @@ -522,8 +530,6 @@ def test_ess_multiprocess(problem, request): from fides.constants import Options as FidesOptions - from pypesto.optimize.ess import ESSOptimizer, FunctionEvaluatorMP, RefSet - # augment objective with parameter prior to check it's copyable # https://github.com/ICB-DCM/pyPESTO/issues/1465 # https://github.com/ICB-DCM/pyPESTO/pull/1467 @@ -563,6 +569,14 @@ def test_ess_multiprocess(problem, request): print("ESS result: ", res.summary()) +def test_ess_refset_repr(): + assert RefSet(10, None).__repr__() == "RefSet(dim=10)" + assert ( + RefSet(10, None, x=np.zeros(10), fx=np.arange(10)).__repr__() + == "RefSet(dim=10, fx=[0 ... 9])" + ) + + def test_scipy_integrated_grad(): integrated = True obj = rosen_for_sensi(max_sensi_order=2, integrated=integrated)["obj"] From 8c35ccffcb6d7fc40c911909613a13a305c84307 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 17 Oct 2024 15:25:15 +0200 Subject: [PATCH 31/37] GHA: Fix pysb installation failures (#1490) Branch was merged to master and deleted. Co-authored-by: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 59d940c9b..6ad9f8719 100644 --- a/tox.ini +++ b/tox.ini @@ -81,7 +81,7 @@ deps = git+https://github.com/AMICI-dev/amici.git@develop\#egg=amici&subdirectory=python/sdist commands = python3 -m pip install git+https://github.com/PEtab-dev/petab_test_suite@main - python3 -m pip install git+https://github.com/FFroehlich/pysb@fix_pattern_matching + python3 -m pip install git+https://github.com/pysb/pysb@master python3 -m pip install -U copasi-basico[petab] python3 -m pip install -U sympy pytest --cov=pypesto --cov-report=xml --cov-append \ From 58c7b5fcdcc54690f80f8d4e3e407e39420560b9 Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Fri, 18 Oct 2024 11:06:07 +0200 Subject: [PATCH 32/37] Clean up sampling tests (#1486) * Moved dyntesty mcmc test into pipeline * Merged two very similar tests * Remove unused sample result * Sized down sample size to a fifth (still getting through in 80% of test) and marked as flakey. * Moved harmonic mean to other evidence calculations * Added util file for problem definition and constants * Fixed Small typo and limited the number of chains for pymc * Removed "test_groundtruth_separated_modes", as it is not really a test as for of an exhibition, which is covered in the jupyter notebook "sampler_study" * Adapted import in variational to new changes * Fixed error in multi_startpoint tests due to increased number of chains --- test/sample/test_sample.py | 471 ++++++--------------------- test/sample/util.py | 145 +++++++++ test/variational/test_variational.py | 32 +- 3 files changed, 256 insertions(+), 392 deletions(-) create mode 100644 test/sample/util.py diff --git a/test/sample/test_sample.py b/test/sample/test_sample.py index 589b336a2..0371faf1c 100644 --- a/test/sample/test_sample.py +++ b/test/sample/test_sample.py @@ -1,12 +1,10 @@ """Tests for `pypesto.sample` methods.""" -import os import numpy as np import pytest -import scipy.optimize as so from scipy.integrate import quad -from scipy.stats import ks_2samp, kstest, multivariate_normal, norm, uniform +from scipy.stats import ks_2samp, kstest, norm import pypesto import pypesto.optimize as optimize @@ -18,148 +16,27 @@ Objective, ) - -def gaussian_llh(x): - return float(norm.logpdf(x).item()) - - -def gaussian_nllh_grad(x): - mu, sigma = 0, 1 - return np.array([((x - mu) / (sigma**2))]) - - -def gaussian_nllh_hess(x): - sigma = 1 - return np.array([(1 / (sigma**2))]) - - -def gaussian_problem(): - def nllh(x): - return -gaussian_llh(x) - - objective = pypesto.Objective(fun=nllh) - problem = pypesto.Problem(objective=objective, lb=[-10], ub=[10]) - return problem - - -def gaussian_mixture_llh(x): - return np.log( - 0.3 * multivariate_normal.pdf(x, mean=-1.5, cov=0.1) - + 0.7 * multivariate_normal.pdf(x, mean=2.5, cov=0.2) - ) - - -def gaussian_mixture_problem(): - """Problem based on a mixture of gaussians.""" - - def nllh(x): - return -gaussian_mixture_llh(x) - - objective = pypesto.Objective(fun=nllh) - problem = pypesto.Problem( - objective=objective, lb=[-10], ub=[10], x_names=["x"] - ) - return problem - - -def gaussian_mixture_separated_modes_llh(x): - return np.log( - 0.5 * multivariate_normal.pdf(x, mean=-1.0, cov=0.7) - + 0.5 * multivariate_normal.pdf(x, mean=100.0, cov=0.8) - ) - - -def gaussian_mixture_separated_modes_problem(): - """Problem based on a mixture of gaussians with far/separated modes.""" - - def nllh(x): - return -gaussian_mixture_separated_modes_llh(x) - - objective = pypesto.Objective(fun=nllh) - problem = pypesto.Problem( - objective=objective, lb=[-100], ub=[200], x_names=["x"] - ) - return problem - - -def rosenbrock_problem(): - """Problem based on rosenbrock objective. - - Features - -------- - * 3-dim - * has fixed parameters - * has gradient - """ - objective = pypesto.Objective(fun=so.rosen, grad=so.rosen_der) - - dim_full = 2 - lb = -5 * np.ones((dim_full, 1)) - ub = 5 * np.ones((dim_full, 1)) - - problem = pypesto.Problem( - objective=objective, - lb=lb, - ub=ub, - x_fixed_indices=[1], - x_fixed_vals=[2], - ) - return problem - - -def create_petab_problem(): - import petab.v1 as petab - - import pypesto.petab - - current_path = os.path.dirname(os.path.realpath(__file__)) - dir_path = os.path.abspath( - os.path.join(current_path, "..", "..", "doc", "example") - ) - # import to petab - petab_problem = petab.Problem.from_yaml( - dir_path + "/conversion_reaction/conversion_reaction.yaml" - ) - # import to pypesto - importer = pypesto.petab.PetabImporter(petab_problem) - # create problem - problem = importer.create_problem() - - return problem - - -def sample_petab_problem(): - # create problem - problem = create_petab_problem() - - sampler = sample.AdaptiveMetropolisSampler( - options={ - "show_progress": False, - }, - ) - result = sample.sample( - problem, - n_samples=1000, - sampler=sampler, - x0=np.array([3, -4]), - ) - return result - - -def prior(x): - return multivariate_normal.pdf(x, mean=-1.0, cov=0.7) - - -def likelihood(x): - return uniform.pdf(x, loc=-10.0, scale=20.0)[0] - - -def negative_log_posterior(x): - return -np.log(likelihood(x)) - np.log(prior(x)) - - -def negative_log_prior(x): - return -np.log(prior(x)) +from .util import ( + LB_GAUSSIAN, + N_CHAINS, + N_SAMPLE_FEW, + N_SAMPLE_MANY, + N_SAMPLE_SOME, + N_STARTS_FEW, + N_STARTS_SOME, + STATISTIC_TOL, + UB_GAUSSIAN, + X_NAMES, + create_petab_problem, + gaussian_llh, + gaussian_mixture_problem, + gaussian_nllh_grad, + gaussian_nllh_hess, + gaussian_problem, + negative_log_posterior, + negative_log_prior, + rosenbrock_problem, +) @pytest.fixture( @@ -200,16 +77,19 @@ def sampler(request): options={ "show_progress": False, }, - n_chains=5, + n_chains=N_CHAINS, ) elif request.param == "Pymc": from pypesto.sample.pymc import PymcSampler - return PymcSampler(tune=5, progressbar=False) + return PymcSampler(tune=5, progressbar=False, chains=N_CHAINS) elif request.param == "Emcee": return sample.EmceeSampler(nwalkers=10) elif request.param == "Dynesty": - return sample.DynestySampler(objective_type="negloglike") + return sample.DynestySampler( + objective_type=OBJECTIVE_NEGLOGLIKE, + run_args={"maxiter": N_SAMPLE_FEW}, + ) @pytest.fixture(params=["gaussian", "gaussian_mixture", "rosenbrock"]) @@ -228,7 +108,7 @@ def test_pipeline(sampler, problem): optimizer = optimize.ScipyOptimizer(options={"maxiter": 10}) result = optimize.minimize( problem=problem, - n_starts=3, + n_starts=N_STARTS_FEW, optimizer=optimizer, progress_bar=False, ) @@ -237,11 +117,17 @@ def test_pipeline(sampler, problem): result = sample.sample( problem=problem, sampler=sampler, - n_samples=100, + n_samples=N_SAMPLE_FEW, result=result, ) - # remove warnings in test/sample/test_sample. - # Warning here: pypesto/visualize/sampling.py:1104 + # test dynesty mcmc samples + if isinstance(sampler, sample.DynestySampler): + trace_original = sampler.get_original_samples().trace_neglogpost + trace_mcmc = result.sample_result.trace_neglogpost + # Nested sampling function values are monotonically increasing + assert (np.diff(trace_original) <= 0).all() + # MCMC samples are not + assert not (np.diff(trace_mcmc) <= 0).all() # geweke test sample.geweke_test(result=result) @@ -255,7 +141,7 @@ def test_ground_truth(): options={ "show_progress": False, }, - n_chains=5, + n_chains=N_CHAINS, ) problem = gaussian_problem() @@ -263,11 +149,12 @@ def test_ground_truth(): result = optimize.minimize( problem, progress_bar=False, + n_starts=N_STARTS_SOME, ) result = sample.sample( problem, - n_samples=5000, + n_samples=N_SAMPLE_MANY, result=result, sampler=sampler, ) @@ -278,135 +165,34 @@ def test_ground_truth(): # test against different distributions statistic, pval = kstest(samples, "norm") - print(statistic, pval) - assert statistic < 0.1 + assert statistic < STATISTIC_TOL statistic, pval = kstest(samples, "uniform") - print(statistic, pval) - assert statistic > 0.1 - - -def test_ground_truth_separated_modes(): - """Test whether we actually retrieve correct distributions.""" - # use best self-implemented sampler, which has a chance to correctly - # sample from the distribution - - # First use parallel tempering with 3 chains - sampler = sample.AdaptiveParallelTemperingSampler( - internal_sampler=sample.AdaptiveMetropolisSampler(), - options={ - "show_progress": False, - }, - n_chains=3, - ) - - problem = gaussian_mixture_separated_modes_problem() - - result = sample.sample( - problem, - n_samples=1e4, - sampler=sampler, - x0=np.array([0.0]), - ) - - # get samples of first chain - samples = result.sample_result.trace_x[0, :, 0] - - # generate bimodal ground-truth samples - # "first" mode centered at -1 - rvs1 = norm.rvs(size=5000, loc=-1.0, scale=np.sqrt(0.7)) - # "second" mode centered at 100 - rvs2 = norm.rvs(size=5001, loc=100.0, scale=np.sqrt(0.8)) - - # test for distribution similarity - statistic, pval = ks_2samp(np.concatenate([rvs1, rvs2]), samples) - - # only parallel tempering finds both modes - print(statistic, pval) - assert statistic < 0.2 - - # sample using adaptive metropolis (single-chain) - # initiated around the "first" mode of the distribution - sampler = sample.AdaptiveMetropolisSampler( - options={ - "show_progress": False, - }, - ) - result = sample.sample( - problem, - n_samples=1e4, - sampler=sampler, - x0=np.array([-2.0]), - ) - - # get samples of first chain - samples = result.sample_result.trace_x[0, :, 0] - - # test for distribution similarity - statistic, pval = ks_2samp(np.concatenate([rvs1, rvs2]), samples) - - # single-chain adaptive metropolis does not find both modes - print(statistic, pval) - assert statistic > 0.1 - - # actually centered at the "first" mode - statistic, pval = ks_2samp(rvs1, samples) - - print(statistic, pval) - assert statistic < 0.1 - - # sample using adaptive metropolis (single-chain) - # initiated around the "second" mode of the distribution - sampler = sample.AdaptiveMetropolisSampler( - options={ - "show_progress": False, - }, - ) - result = sample.sample( - problem, - n_samples=1e4, - sampler=sampler, - x0=np.array([120.0]), - ) - - # get samples of first chain - samples = result.sample_result.trace_x[0, :, 0] - - # test for distribution similarity - statistic, pval = ks_2samp(np.concatenate([rvs1, rvs2]), samples) - - # single-chain adaptive metropolis does not find both modes - print(statistic, pval) - assert statistic > 0.1 - - # actually centered at the "second" mode - statistic, pval = ks_2samp(rvs2, samples) - - print(statistic, pval) - assert statistic < 0.1 + assert statistic > STATISTIC_TOL def test_multiple_startpoints(): problem = gaussian_problem() - x0s = [np.array([0]), np.array([1])] + x0s = [np.array([0]), np.array([1]), np.array([2])] sampler = sample.ParallelTemperingSampler( internal_sampler=sample.MetropolisSampler(), options={ "show_progress": False, }, - n_chains=2, + n_chains=N_CHAINS, ) result = sample.sample( problem, - n_samples=10, + n_samples=N_SAMPLE_FEW, x0=x0s, sampler=sampler, ) - assert result.sample_result.trace_neglogpost.shape[0] == 2 + assert result.sample_result.trace_neglogpost.shape[0] == N_CHAINS assert [ result.sample_result.trace_x[0][0], result.sample_result.trace_x[1][0], + result.sample_result.trace_x[2][0], ] == x0s @@ -421,23 +207,20 @@ def test_regularize_covariance(): assert np.all(np.linalg.eigvals(reg) >= 0) -def test_geweke_test_switch(): - """Check geweke test returns expected burn in index.""" - warm_up = np.zeros((100, 2)) - converged = np.ones((901, 2)) - chain = np.concatenate((warm_up, converged), axis=0) - burn_in = sample.diagnostics.burn_in_by_sequential_geweke(chain=chain) - assert burn_in == 100 - - -def test_geweke_test_switch_short(): - """Check geweke test returns expected burn in index - for small sample numbers.""" - warm_up = np.zeros((25, 2)) - converged = np.ones((75, 2)) +@pytest.mark.parametrize( + "non_converged_size, converged_size", + [ + (100, 901), # "Larger" sample numbers + (25, 75), # Small sample numbers + ], +) +def test_geweke_test_switch(non_converged_size, converged_size): + """Check geweke test returns expected burn in index for different chain sizes.""" + warm_up = np.zeros((non_converged_size, 2)) + converged = np.ones((converged_size, 2)) chain = np.concatenate((warm_up, converged), axis=0) burn_in = sample.diagnostics.burn_in_by_sequential_geweke(chain=chain) - assert burn_in == 25 + assert burn_in == non_converged_size def test_geweke_test_unconverged(): @@ -453,7 +236,7 @@ def test_geweke_test_unconverged(): # optimization result = optimize.minimize( problem=problem, - n_starts=3, + n_starts=N_STARTS_FEW, progress_bar=False, ) @@ -461,7 +244,7 @@ def test_geweke_test_unconverged(): result = sample.sample( problem, sampler=sampler, - n_samples=100, + n_samples=N_SAMPLE_FEW, result=result, ) @@ -482,7 +265,7 @@ def test_autocorrelation_pipeline(): # optimization result = optimize.minimize( problem=problem, - n_starts=3, + n_starts=N_STARTS_FEW, progress_bar=False, ) @@ -490,7 +273,7 @@ def test_autocorrelation_pipeline(): result = sample.sample( problem=problem, sampler=sampler, - n_samples=1000, + n_samples=N_SAMPLE_SOME, result=result, ) @@ -532,7 +315,7 @@ def test_autocorrelation_short_chain(): # optimization result = optimize.minimize( problem=problem, - n_starts=3, + n_starts=N_STARTS_FEW, progress_bar=False, ) @@ -540,7 +323,7 @@ def test_autocorrelation_short_chain(): result = sample.sample( problem, sampler=sampler, - n_samples=10, + n_samples=N_SAMPLE_FEW, result=result, ) @@ -622,7 +405,7 @@ def test_empty_prior(): result = sample.sample( test_problem, - n_samples=50, + n_samples=N_SAMPLE_FEW, sampler=sampler, x0=np.array([0.0]), ) @@ -650,9 +433,9 @@ def test_prior(): test_problem = pypesto.Problem( objective=posterior_fun, x_priors_defs=prior_object, - lb=-10, - ub=10, - x_names=["x"], + lb=LB_GAUSSIAN, + ub=UB_GAUSSIAN, + x_names=X_NAMES, ) sampler = sample.AdaptiveMetropolisSampler( @@ -663,7 +446,7 @@ def test_prior(): result = sample.sample( test_problem, - n_samples=1e4, + n_samples=N_SAMPLE_MANY, sampler=sampler, x0=np.array([0.0]), ) @@ -682,9 +465,7 @@ def test_prior(): # check sample distribution agreement with the ground-truth statistic, pval = ks_2samp(rvs, samples) - print(statistic, pval) - - assert statistic < 0.1 + assert statistic < STATISTIC_TOL def test_samples_cis(): @@ -705,7 +486,7 @@ def test_samples_cis(): # optimization result = optimize.minimize( problem=problem, - n_starts=3, + n_starts=N_STARTS_FEW, progress_bar=False, ) @@ -713,7 +494,7 @@ def test_samples_cis(): result = sample.sample( problem=problem, sampler=sampler, - n_samples=1000, + n_samples=N_SAMPLE_SOME, result=result, ) @@ -745,26 +526,6 @@ def test_samples_cis(): assert lb.shape == ub.shape -def test_dynesty_mcmc_samples(): - problem = gaussian_problem() - sampler = sample.DynestySampler(objective_type=OBJECTIVE_NEGLOGLIKE) - - result = sample.sample( - problem=problem, - sampler=sampler, - n_samples=None, - filename=None, - ) - - original_sample_result = sampler.get_original_samples() - mcmc_sample_result = result.sample_result - - # Nested sampling function values are monotonically increasing - assert (np.diff(original_sample_result.trace_neglogpost) <= 0).all() - # MCMC samples are not - assert not (np.diff(mcmc_sample_result.trace_neglogpost) <= 0).all() - - def test_dynesty_posterior(): # define negative log posterior posterior_fun = pypesto.Objective(fun=negative_log_posterior) @@ -779,21 +540,21 @@ def test_dynesty_posterior(): test_problem = pypesto.Problem( objective=posterior_fun, x_priors_defs=prior_object, - lb=-10, - ub=10, - x_names=["x"], + lb=LB_GAUSSIAN, + ub=UB_GAUSSIAN, + x_names=X_NAMES, ) # define sampler sampler = sample.DynestySampler( - objective_type=OBJECTIVE_NEGLOGPOST + objective_type=OBJECTIVE_NEGLOGPOST, + run_args={"maxiter": N_SAMPLE_FEW}, ) # default result = sample.sample( problem=test_problem, sampler=sampler, n_samples=None, - filename=None, ) original_sample_result = sampler.get_original_samples() @@ -826,7 +587,7 @@ def test_thermodynamic_integration(): result = sample.sample( problem, - n_samples=2000, + n_samples=2 * N_SAMPLE_SOME, result=result, sampler=sampler, ) @@ -849,6 +610,16 @@ def test_thermodynamic_integration(): ) ) + # harmonic mean log evidence + harmonic_evidence = sample.evidence.harmonic_mean_log_evidence(result) + # compute the log evidence using stabilized harmonic mean + prior_samples = np.random.uniform(problem.lb, problem.ub, size=100) + harmonic_stabilized_evidence = sample.evidence.harmonic_mean_log_evidence( + result=result, + prior_samples=prior_samples, + neg_log_likelihood_fun=problem.objective, + ) + # compute evidence evidence = quad( lambda x: 1 @@ -865,6 +636,10 @@ def test_thermodynamic_integration(): assert np.isclose( log_evidence_steppingstone, np.log(evidence[0]), atol=tol ) + assert np.isclose(harmonic_evidence, np.log(evidence[0]), atol=tol) + assert np.isclose( + harmonic_stabilized_evidence, np.log(evidence[0]), atol=tol + ) def test_laplace_approximation_log_evidence(): @@ -876,7 +651,7 @@ def test_laplace_approximation_log_evidence(): # hess result = optimize.minimize( problem=problem, - n_starts=10, + n_starts=N_STARTS_SOME, progress_bar=False, ) log_evidence = sample.evidence.laplace_approximation_log_evidence( @@ -885,50 +660,6 @@ def test_laplace_approximation_log_evidence(): assert np.isclose(log_evidence, log_evidence_true, atol=0.1) -@pytest.mark.flaky(reruns=2) -def test_harmonic_mean_log_evidence(): - tol = 2 - # define problem - problem = gaussian_problem() - - # run optimization and MCMC - result = optimize.minimize( - problem, - progress_bar=False, - n_starts=10, - ) - result = sample.sample( - problem, - n_samples=2000, - result=result, - ) - - # compute the log evidence using harmonic mean - harmonic_evidence = sample.evidence.harmonic_mean_log_evidence(result) - # compute the log evidence using stabilized harmonic mean - prior_samples = np.random.uniform(problem.lb, problem.ub, size=100) - harmonic_stabilized_evidence = sample.evidence.harmonic_mean_log_evidence( - result=result, - prior_samples=prior_samples, - neg_log_likelihood_fun=problem.objective, - ) - - # compute real evidence - evidence = quad( - lambda x: 1 - / (problem.ub[0] - problem.lb[0]) - * np.exp(gaussian_llh(x)), - a=problem.lb[0], - b=problem.ub[0], - ) - - # compare to known value - assert np.isclose(harmonic_evidence, np.log(evidence[0]), atol=tol) - assert np.isclose( - harmonic_stabilized_evidence, np.log(evidence[0]), atol=tol - ) - - @pytest.mark.flaky(reruns=3) def test_bridge_sampling(): tol = 2 @@ -950,16 +681,18 @@ def test_bridge_sampling(): ) problem = pypesto.Problem( objective=AggregatedObjective([objective, prior_true]), - lb=[-10], - ub=[10], - x_names=["x"], + lb=LB_GAUSSIAN, + ub=UB_GAUSSIAN, + x_names=X_NAMES, ) # run optimization and MCMC - result = optimize.minimize(problem, progress_bar=False, n_starts=10) + result = optimize.minimize( + problem, progress_bar=False, n_starts=N_STARTS_SOME + ) result = sample.sample( problem, - n_samples=1000, + n_samples=N_SAMPLE_SOME, result=result, ) diff --git a/test/sample/util.py b/test/sample/util.py new file mode 100644 index 000000000..63521eb82 --- /dev/null +++ b/test/sample/util.py @@ -0,0 +1,145 @@ +"""Utility functions and constants for tests. Mainly problem definitions.""" + + +import numpy as np +import scipy.optimize as so +from scipy.stats import multivariate_normal, norm, uniform + +import pypesto + +# Constants for Gaussian problems or Uniform with Gaussian prior +MU = 0 # Gaussian mean +SIGMA = 1 # Gaussian standard deviation +LB_GAUSSIAN = [-10] # Lower bound for Gaussian problem +UB_GAUSSIAN = [10] # Upper bound for Gaussian problem +LB_GAUSSIAN_MODES = [-100] # Lower bound for Gaussian modes problem +UB_GAUSSIAN_MODES = [200] # Upper bound for Gaussian modes problem +X_NAMES = ["x"] # Parameter names +MIXTURE_WEIGHTS = [0.3, 0.7] # Weights for Gaussian mixture model +MIXTURE_MEANS = [-1.5, 2.5] # Means for Gaussian mixture model +MIXTURE_COVS = [0.1, 0.2] # Covariances for Gaussian mixture model + +# Constants for general testing +N_STARTS_FEW = 5 # Number of starts for tests that dont require convergence +N_STARTS_SOME = 10 # Number of starts for tests that converge reliably +N_SAMPLE_FEW = 100 # Number of samples for tests that dont require convergence +N_SAMPLE_SOME = 1000 # Number of samples for tests that converge reliably +N_SAMPLE_MANY = 5000 # Number of samples for tests that require convergence +STATISTIC_TOL = 0.2 # Tolerance when comparing distributions +N_CHAINS = 3 # Number of chains for ParallelTempering + + +def gaussian_llh(x): + """Log-likelihood for Gaussian.""" + return float(norm.logpdf(x, loc=MU, scale=SIGMA).item()) + + +def gaussian_nllh_grad(x): + """Negative log-likelihood gradient for Gaussian.""" + return np.array([((x - MU) / (SIGMA**2))]) + + +def gaussian_nllh_hess(x): + """Negative log-likelihood Hessian for Gaussian.""" + return np.array([(1 / (SIGMA**2))]) + + +def gaussian_problem(): + """Defines a simple Gaussian problem.""" + + def nllh(x): + return -gaussian_llh(x) + + objective = pypesto.Objective(fun=nllh) + problem = pypesto.Problem( + objective=objective, lb=LB_GAUSSIAN, ub=UB_GAUSSIAN + ) + return problem + + +def gaussian_mixture_llh(x): + """Log-likelihood for Gaussian mixture model.""" + return np.log( + MIXTURE_WEIGHTS[0] + * multivariate_normal.pdf( + x, mean=MIXTURE_MEANS[0], cov=MIXTURE_COVS[0] + ) + + MIXTURE_WEIGHTS[1] + * multivariate_normal.pdf( + x, mean=MIXTURE_MEANS[1], cov=MIXTURE_COVS[1] + ) + ) + + +def gaussian_mixture_problem(): + """Problem based on a mixture of Gaussians.""" + + def nllh(x): + return -gaussian_mixture_llh(x) + + objective = pypesto.Objective(fun=nllh) + problem = pypesto.Problem( + objective=objective, lb=LB_GAUSSIAN, ub=UB_GAUSSIAN, x_names=X_NAMES + ) + return problem + + +def rosenbrock_problem(): + """Problem based on Rosenbrock objective.""" + objective = pypesto.Objective(fun=so.rosen, grad=so.rosen_der) + + dim_full = 2 + lb = -5 * np.ones((dim_full, 1)) + ub = 5 * np.ones((dim_full, 1)) + + problem = pypesto.Problem( + objective=objective, + lb=lb, + ub=ub, + x_fixed_indices=[1], + x_fixed_vals=[2], + ) + return problem + + +def create_petab_problem(): + """Creates a petab problem.""" + import os + + import petab.v1 as petab + + import pypesto.petab + + current_path = os.path.dirname(os.path.realpath(__file__)) + dir_path = os.path.abspath( + os.path.join(current_path, "..", "..", "doc", "example") + ) + + petab_problem = petab.Problem.from_yaml( + dir_path + "/conversion_reaction/conversion_reaction.yaml" + ) + + importer = pypesto.petab.PetabImporter(petab_problem) + problem = importer.create_problem() + + return problem + + +def prior(x): + """Calculates the prior.""" + return multivariate_normal.pdf(x, mean=-1.0, cov=0.7) + + +def likelihood(x): + """Calculates the likelihood.""" + return uniform.pdf(x, loc=-10.0, scale=20.0)[0] + + +def negative_log_posterior(x): + """Calculates the negative log posterior.""" + return -np.log(likelihood(x)) - np.log(prior(x)) + + +def negative_log_prior(x): + """Calculates the negative log prior.""" + return -np.log(prior(x)) diff --git a/test/variational/test_variational.py b/test/variational/test_variational.py index c3b829bf3..f8e66c3a4 100644 --- a/test/variational/test_variational.py +++ b/test/variational/test_variational.py @@ -1,29 +1,15 @@ """Tests for `pypesto.sample` methods.""" -import pytest from scipy.stats import kstest import pypesto.optimize as optimize from pypesto.variational import variational_fit -from ..sample.test_sample import ( - gaussian_mixture_problem, - gaussian_problem, - rosenbrock_problem, -) +from ..sample.test_sample import problem # noqa: F401, fixture from sampling +from ..sample.util import STATISTIC_TOL, gaussian_problem -@pytest.fixture(params=["gaussian", "gaussian_mixture", "rosenbrock"]) -def problem(request): - if request.param == "gaussian": - return gaussian_problem() - if request.param == "gaussian_mixture": - return gaussian_mixture_problem() - elif request.param == "rosenbrock": - return rosenbrock_problem() - - -def test_pipeline(problem): +def test_pipeline(problem): # noqa: F811 """Check that a typical pipeline runs through.""" # optimization optimizer = optimize.ScipyOptimizer(options={"maxiter": 10}) @@ -35,7 +21,7 @@ def test_pipeline(problem): ) # sample - result = variational_fit( + variational_fit( problem=problem, n_iterations=100, n_samples=10, @@ -45,15 +31,15 @@ def test_pipeline(problem): def test_ground_truth(): """Test whether we actually retrieve correct distributions.""" - problem = gaussian_problem() + problem_gaussian = gaussian_problem() result = optimize.minimize( - problem, + problem_gaussian, progress_bar=False, ) result = variational_fit( - problem, + problem_gaussian, n_iterations=10000, n_samples=5000, result=result, @@ -65,8 +51,8 @@ def test_ground_truth(): # test against different distributions statistic, pval = kstest(samples, "norm") print(statistic, pval) - assert statistic < 0.1 + assert statistic < STATISTIC_TOL statistic, pval = kstest(samples, "uniform") print(statistic, pval) - assert statistic > 0.1 + assert statistic > STATISTIC_TOL From bff4bdfb04c0f6c66f7172750ee40281a2ed7017 Mon Sep 17 00:00:00 2001 From: Maren Philipps <55318391+m-philipps@users.noreply.github.com> Date: Tue, 22 Oct 2024 08:26:44 +0200 Subject: [PATCH 33/37] update preprint to article (#1498) --- doc/using_pypesto.bib | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/doc/using_pypesto.bib b/doc/using_pypesto.bib index 890cedc88..c4c85924d 100644 --- a/doc/using_pypesto.bib +++ b/doc/using_pypesto.bib @@ -293,16 +293,18 @@ @Misc{LakrisenkoPat2024 primaryclass = {q-bio.QM}, } -@Misc{PhilippsKoe2024, +@Article{PhilippsKoe2024, author = {Maren Philipps and Antonia Körner and Jakob Vanhoefer and Dilan Pathirana and Jan Hasenauer}, title = {Non-Negative Universal Differential Equations With Applications in Systems Biology}, year = {2024}, - archiveprefix = {arXiv}, - creationdate = {2024-06-28T08:40:06}, - eprint = {2406.14246}, - modificationdate = {2024-06-28T08:40:06}, - primaryclass = {q-bio.QM}, - url = {https://arxiv.org/abs/2406.14246}, + journal = {IFAC-PapersOnLine}, + volume = {58}, + number = {23}, + pages = {25-30}, + issn = {2405-8963}, + doi = {https://doi.org/10.1016/j.ifacol.2024.10.005}, + url = {https://www.sciencedirect.com/science/article/pii/S2405896324017518}, + abstract = {Universal differential equations (UDEs) leverage the respective advantages of mechanistic models and artificial neural networks and combine them into one dynamic model. However, these hybrid models can suffer from unrealistic solutions, such as negative values for biochemical quantities. We present non-negative UDE (nUDEs), a constrained UDE variant that guarantees non-negative values. Furthermore, we explore regularisation techniques to improve generalisation and interpretability of UDEs.} } @Article{SchmiesterBra2024, From ab21eea999b3a981c92c0feb59fab485719041a4 Mon Sep 17 00:00:00 2001 From: Doresic <85789271+Doresic@users.noreply.github.com> Date: Wed, 23 Oct 2024 12:31:19 +0200 Subject: [PATCH 34/37] Profile: improve profiling code (#1447) * Initial working update Implement two targets: higher and lower. Choose which one to go to depending on first guess. Different calculation of next_obj_target TODO: change 1.5 to magic factor * Add TODOs * Introduce adaptive max and min steps Implemented adaptive max and min steps in profiling. If the optimization during profiling fails (results in inf value), the algorithm will first try to iteratively decrease `max_step_size` to be closer to the last point that had a successful optimization. If that doesn't work (if we reduce max_step_size below min_step_size), then max_step_size is set back to the default and we try to increase min_step_size to "jump over" the problematic area. Resampling random points and start from those is only the last resort and will be done if these two do not work. The idea is that we want to stay as close as we can to the last profiling point. TODO: Put the adaptive reduction/increase of max_step_size/min_step_size into options * Fix flags for exiting the "trust area" * Bugfixes, Robustness, Logging, Better plotting - BUGFIX: absolute value in objective targets at last_delta_fval - BUGFIX: extrapolation explosions if we start at boundary - Feature: Trust region on extrapolation - Added y ticks back into the plot, sometimes the range is completely different. - Added points to the plotting of profiles (in case of one result and one profile list id) - Added color change to plotting of profiles (in case of one result and one profile list id) - LOGGING: added logging.INFO with informations of steps made and successful optimizations. * Fix default value for color_path * Fix color value issues -- failing tests * Add tuple to isinstance list * No color_path plotting if color requested * Fix if statements fixed_method * We're making more steps then before Test were testing whether the profiling method was making a lot of steps. Now we're making a lot more steps due to higher robustness. * Change default magic and max values * Change default method, remove TODOs * Update quality colors * Fix failing test * Fix test and docstring * Rewrite some too long if statements * Some more if statements cleanup * Change color if no * Correct y-axis in obj.fun plotting * Paul review changes * More Paul review changes * Fix if-while infinite loop bug * Correct comment variable name Co-authored-by: Maren Philipps <55318391+m-philipps@users.noreply.github.com> * Change i_color to color_i * Change docstring of color in lowlevel * Expand colors docstring * Fix color format checking and update docstring --------- Co-authored-by: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Co-authored-by: Maren Philipps <55318391+m-philipps@users.noreply.github.com> --- pypesto/profile/options.py | 16 +- pypesto/profile/profile.py | 10 +- pypesto/profile/profile_next_guess.py | 193 +++++++++++++++++++----- pypesto/profile/util.py | 1 + pypesto/profile/walk_along_profile.py | 204 ++++++++++++++++++++------ pypesto/result/profile.py | 20 +++ pypesto/visualize/clust_color.py | 23 +-- pypesto/visualize/profiles.py | 137 ++++++++++++++--- test/profile/test_profile.py | 7 +- 9 files changed, 490 insertions(+), 121 deletions(-) diff --git a/pypesto/profile/options.py b/pypesto/profile/options.py index f2c9dc42a..bcc3b805e 100644 --- a/pypesto/profile/options.py +++ b/pypesto/profile/options.py @@ -32,9 +32,9 @@ class ProfileOptions(dict): reg_order: Maximum degree of regression polynomial used in regression based adaptive profile points proposal. - magic_factor_obj_value: - There is this magic factor in the old profiling code which slows down - profiling at small ratios (must be >= 0 and < 1). + adaptive_target_scaling_factor: + The scaling factor of the next_obj_target in next guess generation. + Larger values result in larger next_guess step size (must be > 1). whole_path: Whether to profile the whole bounds or only till we get below the ratio. @@ -44,13 +44,13 @@ def __init__( self, default_step_size: float = 0.01, min_step_size: float = 0.001, - max_step_size: float = 1.0, + max_step_size: float = 0.1, step_size_factor: float = 1.25, delta_ratio_max: float = 0.1, ratio_min: float = 0.145, reg_points: int = 10, reg_order: int = 4, - magic_factor_obj_value: float = 0.5, + adaptive_target_scaling_factor: float = 1.5, whole_path: bool = False, ): super().__init__() @@ -63,7 +63,7 @@ def __init__( self.delta_ratio_max = delta_ratio_max self.reg_points = reg_points self.reg_order = reg_order - self.magic_factor_obj_value = magic_factor_obj_value + self.adaptive_target_scaling_factor = adaptive_target_scaling_factor self.whole_path = whole_path self.validate() @@ -112,5 +112,5 @@ def validate(self): if self.default_step_size < self.min_step_size: raise ValueError("default_step_size must be >= min_step_size.") - if self.magic_factor_obj_value < 0 or self.magic_factor_obj_value >= 1: - raise ValueError("magic_factor_obj_value must be >= 0 and < 1.") + if self.adaptive_target_scaling_factor < 1: + raise ValueError("adaptive_target_scaling_factor must be > 1.") diff --git a/pypesto/profile/profile.py b/pypesto/profile/profile.py index e4e124964..2df0b4f99 100644 --- a/pypesto/profile/profile.py +++ b/pypesto/profile/profile.py @@ -24,7 +24,7 @@ def parameter_profile( profile_index: Iterable[int] = None, profile_list: int = None, result_index: int = 0, - next_guess_method: Union[Callable, str] = "adaptive_step_regression", + next_guess_method: Union[Callable, str] = "adaptive_step_order_1", profile_options: ProfileOptions = None, progress_bar: bool = None, filename: Union[str, Callable, None] = None, @@ -93,7 +93,9 @@ def parameter_profile( profile_options = ProfileOptions.create_instance(profile_options) profile_options.validate() - # create a function handle that will be called later to get the next point + # Create a function handle that will be called later to get the next point. + # This function will be used to generate the initial points of optimization + # steps in profiling in `walk_along_profile.py` if isinstance(next_guess_method, str): def create_next_guess( @@ -104,6 +106,8 @@ def create_next_guess( current_profile_, problem_, global_opt_, + min_step_increase_factor_, + max_step_reduce_factor_, ): return next_guess( x, @@ -114,6 +118,8 @@ def create_next_guess( current_profile_, problem_, global_opt_, + min_step_increase_factor_, + max_step_reduce_factor_, ) elif callable(next_guess_method): diff --git a/pypesto/profile/profile_next_guess.py b/pypesto/profile/profile_next_guess.py index fd523e062..dd05b6b8b 100644 --- a/pypesto/profile/profile_next_guess.py +++ b/pypesto/profile/profile_next_guess.py @@ -1,3 +1,4 @@ +import logging from typing import Callable, Literal import numpy as np @@ -6,6 +7,8 @@ from ..result import ProfilerResult from .options import ProfileOptions +logger = logging.getLogger(__name__) + __all__ = ["next_guess", "fixed_step", "adaptive_step"] @@ -23,6 +26,8 @@ def next_guess( current_profile: ProfilerResult, problem: Problem, global_opt: float, + min_step_increase_factor: float = 1.0, + max_step_reduce_factor: float = 1.0, ) -> np.ndarray: """ Create the next initial guess for the optimizer. @@ -53,17 +58,22 @@ def next_guess( The problem to be solved. global_opt: Log-posterior value of the global optimum. + min_step_increase_factor: + Factor to increase the minimal step size bound. Used only in + :func:`adaptive_step`. + max_step_reduce_factor: + Factor to reduce the maximal step size bound. Used only in + :func:`adaptive_step`. Returns ------- The next initial guess as base for the next profile point. """ if update_type == "fixed_step": - return fixed_step( + next_initial_guess = fixed_step( x, par_index, par_direction, profile_options, problem ) - - if update_type == "adaptive_step_order_0": + elif update_type == "adaptive_step_order_0": order = 0 elif update_type == "adaptive_step_order_1": order = 1 @@ -73,18 +83,28 @@ def next_guess( raise ValueError( f"Unsupported `update_type` {update_type} for `next_guess`." ) + if update_type != "fixed_step": + next_initial_guess = adaptive_step( + x, + par_index, + par_direction, + profile_options, + current_profile, + problem, + global_opt, + order, + min_step_increase_factor, + max_step_reduce_factor, + ) - return adaptive_step( - x, - par_index, - par_direction, - profile_options, - current_profile, - problem, - global_opt, - order, + logger.info( + f"Next guess for {problem.x_names[par_index]} in direction " + f"{par_direction} is {next_initial_guess[par_index]:.4f}. Step size: " + f"{next_initial_guess[par_index] - x[par_index]:.4f}." ) + return next_initial_guess + def fixed_step( x: np.ndarray, @@ -138,6 +158,8 @@ def adaptive_step( problem: Problem, global_opt: float, order: int = 1, + min_step_increase_factor: float = 1.0, + max_step_reduce_factor: float = 1.0, ) -> np.ndarray: """Group of more complex methods for point proposal. @@ -168,6 +190,10 @@ def adaptive_step( * ``1``: the last two points are used to extrapolate all parameters * ``np.nan``: indicates that a more complex regression should be used as determined by :attr:`pypesto.profile.ProfileOptions.reg_order`. + min_step_increase_factor: + Factor to increase the minimal step size bound. + max_step_reduce_factor: + Factor to reduce the maximal step size bound. Returns @@ -177,9 +203,9 @@ def adaptive_step( # restrict step proposal to minimum and maximum step size def clip_to_minmax(step_size_proposal): - return np.clip( - step_size_proposal, options.min_step_size, options.max_step_size - ) + min_step_size = options.min_step_size * min_step_increase_factor + max_step_size = options.max_step_size * max_step_reduce_factor + return np.clip(step_size_proposal, min_step_size, max_step_size) # restrict step proposal to bounds def clip_to_bounds(step_proposal): @@ -193,6 +219,7 @@ def clip_to_bounds(step_proposal): delta_x_dir, reg_par, delta_obj_value, + last_delta_fval, ) = handle_profile_history( x, par_index, @@ -206,15 +233,18 @@ def clip_to_bounds(step_proposal): # check whether we must make a minimum step anyway, since we're close to # the next bound - min_delta_x = x[par_index] + par_direction * options.min_step_size + min_delta_x = ( + x[par_index] + + par_direction * options.min_step_size * min_step_increase_factor + ) if par_direction == -1 and (min_delta_x < problem.lb_full[par_index]): - step_length = problem.lb_full[par_index] - x[par_index] - return x + step_length * delta_x_dir + step_length = abs(problem.lb_full[par_index] - x[par_index]) + return clip_to_bounds(x + step_length * delta_x_dir) if par_direction == 1 and (min_delta_x > problem.ub_full[par_index]): - step_length = problem.ub_full[par_index] - x[par_index] - return x + step_length * delta_x_dir + step_length = abs(problem.ub_full[par_index] - x[par_index]) + return clip_to_bounds(x + step_length * delta_x_dir) # parameter extrapolation function n_profile_points = len(current_profile.fval_path) @@ -241,28 +271,58 @@ def par_extrapol(step_length): x[par_index] + step_length * par_direction ) ) + # Define a trust region for the step size in all directions + # to avoid overshooting + x_step = np.clip( + x_step, x - options.max_step_size, x + options.max_step_size + ) + return clip_to_bounds(x_step) else: # if not, we do simple extrapolation def par_extrapol(step_length): - x_step = x + step_length * delta_x_dir - return clip_to_bounds(x_step) + # Define a trust region for the step size in all directions + # to avoid overshooting + step_in_x = np.clip( + step_length * delta_x_dir, + -options.max_step_size, + options.max_step_size, + ) + x_stepped = x + step_in_x + return clip_to_bounds(x_stepped) # compute proposal next_x = par_extrapol(step_size_guess) # next start point has to be searched # compute the next objective value which we aim for - next_obj_target = ( + high_next_obj_target = ( -np.log(1.0 - options.delta_ratio_max) - + options.magic_factor_obj_value * delta_obj_value + + options.adaptive_target_scaling_factor * abs(last_delta_fval) + + current_profile.fval_path[-1] + ) + low_next_obj_target = ( + +np.log(1.0 - options.delta_ratio_max) + - options.adaptive_target_scaling_factor * abs(last_delta_fval) + current_profile.fval_path[-1] ) + # Clip both by 0.5 * delta_obj_value to avoid overshooting + if delta_obj_value != 0: + high_next_obj_target = min( + high_next_obj_target, + current_profile.fval_path[-1] + 0.5 * delta_obj_value, + ) + low_next_obj_target = max( + low_next_obj_target, + current_profile.fval_path[-1] - 0.5 * delta_obj_value, + ) + # compute objective at the guessed point problem.fix_parameters(par_index, next_x[par_index]) next_obj = problem.objective(problem.get_reduced_vector(next_x)) + current_obj = current_profile.fval_path[-1] # iterate until good step size is found return do_line_search( @@ -270,12 +330,16 @@ def par_extrapol(step_length): step_size_guess, par_extrapol, next_obj, - next_obj_target, + current_obj, + high_next_obj_target, + low_next_obj_target, clip_to_minmax, clip_to_bounds, par_index, problem, options, + min_step_increase_factor, + max_step_reduce_factor, ) @@ -304,6 +368,8 @@ def handle_profile_history( The regression polynomial for profile extrapolation. delta_obj_value: The difference of the objective function value between the last point and `global_opt`. + last_delta_fval: + The difference of the objective function value between the last two points. """ n_profile_points = len(current_profile.fval_path) @@ -313,32 +379,53 @@ def handle_profile_history( reg_par = None # Is this the first step along this profile? If so, try a simple step - if n_profile_points == 1: + # Do the same if the last two points are too close to avoid division by small numbers + if n_profile_points == 1 or np.isclose( + current_profile.x_path[par_index, -1], + current_profile.x_path[par_index, -2], + ): # try to use the default step size step_size_guess = options.default_step_size delta_obj_value = 0.0 + last_delta_fval = 0.0 else: # try to reuse the previous step size - step_size_guess = np.abs( + last_delta_x_par_index = np.abs( current_profile.x_path[par_index, -1] - current_profile.x_path[par_index, -2] ) + # Bound the step size by default values + step_size_guess = min( + last_delta_x_par_index, options.default_step_size + ) + # Step size cannot be smaller than the minimum step size + step_size_guess = max(step_size_guess, options.min_step_size) + delta_obj_value = current_profile.fval_path[-1] - global_opt + last_delta_fval = ( + current_profile.fval_path[-1] - current_profile.fval_path[-2] + ) if order == 1 or (np.isnan(order) and n_profile_points < 3): # set the update direction (extrapolate with order 1) last_delta_x = ( current_profile.x_path[:, -1] - current_profile.x_path[:, -2] ) - delta_x_dir = last_delta_x / step_size_guess + delta_x_dir = last_delta_x / last_delta_x_par_index elif np.isnan(order): # compute the regression polynomial for parameter extrapolation reg_par = get_reg_polynomial( par_index, current_profile, problem, options ) - return step_size_guess, delta_x_dir, reg_par, delta_obj_value + return ( + step_size_guess, + delta_x_dir, + reg_par, + delta_obj_value, + last_delta_fval, + ) def get_reg_polynomial( @@ -395,12 +482,16 @@ def do_line_search( step_size_guess: float, par_extrapol: Callable, next_obj: float, - next_obj_target: float, + current_obj: float, + high_next_obj_target: float, + low_next_obj_target: float, clip_to_minmax: Callable, clip_to_bounds: Callable, par_index: int, problem: Problem, options: ProfileOptions, + min_step_increase_factor: float, + max_step_reduce_factor: float, ) -> np.ndarray: """Perform the line search. @@ -429,14 +520,29 @@ def do_line_search( The parameter estimation problem. options: Profile likelihood options. + min_step_increase_factor: + Factor to increase the minimal step size bound. + max_step_reduce_factor: + Factor to reduce the maximal step size bound. Returns ------- Parameter vector that is expected to yield the objective function value closest to `next_obj_target`. """ - # Was the initial step too big or too small? - direction = "decrease" if next_obj_target < next_obj else "increase" + decreasing_to_low_target = False + decreasing_to_high_target = False + + # Determine the direction of the step + if next_obj > low_next_obj_target and next_obj < high_next_obj_target: + direction = "increase" + elif next_obj <= low_next_obj_target: + direction = "decrease" + decreasing_to_low_target = True + elif next_obj >= high_next_obj_target: + direction = "decrease" + decreasing_to_high_target = True + if direction == "increase": adapt_factor = options.step_size_factor else: @@ -452,12 +558,14 @@ def do_line_search( # Check if we hit the bounds if ( direction == "decrease" - and step_size_guess == options.min_step_size + and step_size_guess + == options.min_step_size * min_step_increase_factor ): return next_x if ( direction == "increase" - and step_size_guess == options.max_step_size + and step_size_guess + == options.max_step_size * max_step_reduce_factor ): return next_x @@ -467,11 +575,22 @@ def do_line_search( next_obj = problem.objective(problem.get_reduced_vector(next_x)) # check for root crossing and compute correct step size in case - if (direction == "decrease" and next_obj_target >= next_obj) or ( - direction == "increase" and next_obj_target <= next_obj + if (direction == "increase" and next_obj > high_next_obj_target) or ( + direction == "decrease" + and next_obj < high_next_obj_target + and decreasing_to_high_target + ): + return next_x_interpolate( + next_obj, last_obj, next_x, last_x, high_next_obj_target + ) + + if (direction == "increase" and next_obj < low_next_obj_target) or ( + direction == "decrease" + and next_obj > low_next_obj_target + and decreasing_to_low_target ): return next_x_interpolate( - next_obj, last_obj, next_x, last_x, next_obj_target + next_obj, last_obj, next_x, last_x, low_next_obj_target ) diff --git a/pypesto/profile/util.py b/pypesto/profile/util.py index 6a87403f8..3ea7a0d00 100644 --- a/pypesto/profile/util.py +++ b/pypesto/profile/util.py @@ -189,6 +189,7 @@ def fill_profile_list( gradnorm_path=np.array([gradnorm]), exitflag_path=np.array([optimizer_result["exitflag"]]), time_path=np.array([0.0]), + color_path=np.array([[1, 0, 0, 1]]), time_total=0.0, n_fval=0, n_grad=0, diff --git a/pypesto/profile/walk_along_profile.py b/pypesto/profile/walk_along_profile.py index c4f610001..0478c0dc0 100644 --- a/pypesto/profile/walk_along_profile.py +++ b/pypesto/profile/walk_along_profile.py @@ -63,6 +63,7 @@ def walk_along_profile( while True: # get current position on the profile path x_now = current_profile.x_path[:, -1] + color_now = current_profile.color_path[-1] # check if the next profile point needs to be computed # ... check bounds @@ -78,26 +79,164 @@ def walk_along_profile( ): break - # compute the new start point for optimization - x_next = create_next_guess( - x_now, - i_par, - par_direction, - options, - current_profile, - problem, - global_opt, - ) + optimization_successful = False + max_step_reduce_factor = 1.0 + + while not optimization_successful: + # Check max_step_size is not reduced below min_step_size + if ( + options.max_step_size * max_step_reduce_factor + < options.min_step_size + ): + logger.warning( + "Max step size reduced below min step size. " + "Setting a lower min step size can help avoid this issue." + ) + break + + # compute the new start point for optimization + x_next = create_next_guess( + x_now, + i_par, + par_direction, + options, + current_profile, + problem, + global_opt, + 1.0, + max_step_reduce_factor, + ) + + # fix current profiling parameter to current value and set start point + problem.fix_parameters(i_par, x_next[i_par]) + startpoint = x_next[problem.x_free_indices] + + if startpoint.size > 0: + optimizer_result = optimizer.minimize( + problem=problem, + x0=startpoint, + id=str(0), + optimize_options=OptimizeOptions( + allow_failed_starts=False + ), + ) + + if np.isfinite(optimizer_result.fval): + optimization_successful = True + if max_step_reduce_factor == 1.0: + # The color of the point is set to black if no changes were made + color_next = np.array([0, 0, 0, 1]) + else: + # The color of the point is set to red if the max_step_size was reduced + color_next = np.array([1, 0, 0, 1]) + else: + max_step_reduce_factor *= 0.5 + logger.warning( + f"Optimization at {problem.x_names[i_par]}={x_next[i_par]} failed. " + f"Reducing max_step_size to {options.max_step_size * max_step_reduce_factor}." + ) + else: + # if too many parameters are fixed, there is nothing to do ... + fval = problem.objective(np.array([])) + optimizer_result = OptimizerResult( + id="0", + x=np.array([]), + fval=fval, + n_fval=0, + n_grad=0, + n_res=0, + n_hess=0, + n_sres=0, + x0=np.array([]), + fval0=fval, + time=0, + ) + optimizer_result.update_to_full(problem=problem) + optimization_successful = True + color_next = np.concatenate((color_now[:3], [0.3])) + + if not optimization_successful: + # Cannot optimize successfully by reducing max_step_size + # Let's try to optimize by increasing min_step_size + logger.warning( + f"Failing to optimize at {problem.x_names[i_par]}={x_next[i_par]} after reducing max_step_size." + f"Trying to increase min_step_size." + ) + min_step_increase_factor = 1.25 + while not optimization_successful: + # Check min_step_size is not increased above max_step_size + if ( + options.min_step_size * min_step_increase_factor + > options.max_step_size + ): + logger.warning( + "Min step size increased above max step size. " + "Setting a higher max step size can help avoid this issue." + ) + break - # fix current profiling parameter to current value and set start point - problem.fix_parameters(i_par, x_next[i_par]) - startpoint = x_next[problem.x_free_indices] + # compute the new start point for optimization + x_next = create_next_guess( + x_now, + i_par, + par_direction, + options, + current_profile, + problem, + global_opt, + min_step_increase_factor, + 1.0, + ) + + # fix current profiling parameter to current value and set start point + problem.fix_parameters(i_par, x_next[i_par]) + startpoint = x_next[problem.x_free_indices] + + optimizer_result = optimizer.minimize( + problem=problem, + x0=startpoint, + id=str(0), + optimize_options=OptimizeOptions(allow_failed_starts=False), + ) + + if np.isfinite(optimizer_result.fval): + optimization_successful = True + # The color of the point is set to blue if the min_step_size was increased + color_next = np.array([0, 0, 1, 1]) + else: + min_step_increase_factor *= 1.25 + logger.warning( + f"Optimization at {problem.x_names[i_par]}={x_next[i_par]} failed. " + f"Increasing min_step_size to {options.min_step_size * min_step_increase_factor}." + ) + + if not optimization_successful: + # Cannot optimize successfully by reducing max_step_size or increasing min_step_size + # sample a new starting point for another attempt for max_tries times + logger.warning( + f"Failing to optimize at {problem.x_names[i_par]}={x_next[i_par]} after reducing max_step_size." + f"Trying to sample {max_tries} new starting points." + ) + + x_next = create_next_guess( + x_now, + i_par, + par_direction, + options, + current_profile, + problem, + global_opt, + 1.0, + 1.0, + ) + + problem.fix_parameters(i_par, x_next[i_par]) - # run optimization - if startpoint.size > 0: - # number of optimization attempts for the given value of i_par in case - # no finite solution is found for i_optimize_attempt in range(max_tries): + startpoint = problem.startpoint_method( + n_starts=1, problem=problem + )[0] + optimizer_result = optimizer.minimize( problem=problem, x0=startpoint, @@ -107,40 +246,22 @@ def walk_along_profile( ), ) if np.isfinite(optimizer_result.fval): + # The color of the point is set to green if the parameter was resampled + color_next = np.array([0, 1, 0, 1]) break logger.warning( f"Optimization at {problem.x_names[i_par]}={x_next[i_par]} failed." ) - # sample a new starting point for another attempt - # might be preferable to stay close to the previous point, at least initially, - # but for now, we just sample from anywhere within the parameter bounds - # alternatively, run multi-start optimization - startpoint = problem.startpoint_method( - n_starts=1, problem=problem - )[0] else: raise RuntimeError( f"Computing profile point failed. Could not find a finite solution after {max_tries} attempts." ) - else: - # if too many parameters are fixed, there is nothing to do ... - fval = problem.objective(np.array([])) - optimizer_result = OptimizerResult( - id="0", - x=np.array([]), - fval=fval, - n_fval=0, - n_grad=0, - n_res=0, - n_hess=0, - n_sres=0, - x0=np.array([]), - fval0=fval, - time=0, - ) - optimizer_result.update_to_full(problem=problem) + logger.info( + f"Optimization successful for {problem.x_names[i_par]}={x_next[i_par]:.4f}. " + f"Start fval {problem.objective(x_next[problem.x_free_indices]):.6f}, end fval {optimizer_result.fval:.6f}." + ) if optimizer_result[GRAD] is not None: gradnorm = np.linalg.norm( optimizer_result[GRAD][problem.x_free_indices] @@ -154,6 +275,7 @@ def walk_along_profile( ratio=np.exp(global_opt - optimizer_result.fval), gradnorm=gradnorm, time=optimizer_result.time, + color=color_next, exitflag=optimizer_result.exitflag, n_fval=optimizer_result.n_fval, n_grad=optimizer_result.n_grad, diff --git a/pypesto/result/profile.py b/pypesto/result/profile.py index af14e5f1a..5f8ce8405 100644 --- a/pypesto/result/profile.py +++ b/pypesto/result/profile.py @@ -38,6 +38,13 @@ class ProfilerResult(dict): Number of gradient evaluations. n_hess: Number of Hessian evaluations. + color_path: + The color of the profile path. Signifies types of steps made. + Red indicates a step for which min_step_size was reduced, blue + indicates a step for which max_step_size was increased, and green + indicates a step for which the profiler had to resample the parameter + vector due to optimization failure of the previous two. Black + indicates a step for which none of the above was necessary. message: Textual comment on the profile result. @@ -55,6 +62,7 @@ def __init__( gradnorm_path: np.ndarray = None, exitflag_path: np.ndarray = None, time_path: np.ndarray = None, + color_path: np.ndarray = None, time_total: float = 0.0, n_fval: int = 0, n_grad: int = 0, @@ -86,6 +94,13 @@ def __init__( else: self.time_path = time_path.copy() + if color_path is None: + self.color_path = np.full( + (x_path.shape[1], 4), np.array([1, 0, 0, 0.3]) + ) + else: + self.color_path = color_path.copy() + if ( not self.x_path.shape[1] == len(self.fval_path) @@ -122,6 +137,7 @@ def append_profile_point( ratio: float, gradnorm: float = np.nan, time: float = np.nan, + color: np.ndarray = np.nan, exitflag: float = np.nan, n_fval: int = 0, n_grad: int = 0, @@ -143,6 +159,8 @@ def append_profile_point( The gradient norm at `x`. time: The computation time to find `x`. + color: + The color of the profile path. Signifies types of steps made. exitflag: The exitflag of the optimizer (useful if an optimization was performed to find `x`). @@ -159,6 +177,7 @@ def append_profile_point( self.gradnorm_path = np.hstack((self.gradnorm_path, gradnorm)) self.exitflag_path = np.hstack((self.exitflag_path, exitflag)) self.time_path = np.hstack((self.time_path, time)) + self.color_path = np.vstack((self.color_path, color)) # increment the time and f_eval counters self.time_total += time @@ -180,6 +199,7 @@ def flip_profile(self) -> None: self.gradnorm_path = np.flip(self.gradnorm_path) self.exitflag_path = np.flip(self.exitflag_path) self.time_path = np.flip(self.time_path) + self.color_path = np.flip(self.color_path, axis=0) class ProfileResult: diff --git a/pypesto/visualize/clust_color.py b/pypesto/visualize/clust_color.py index bf0cf02c9..bbd39c41c 100644 --- a/pypesto/visualize/clust_color.py +++ b/pypesto/visualize/clust_color.py @@ -142,7 +142,11 @@ def assign_colors( colors = colors[0] return np.array([colors] * n_vals) - if colors.shape[1] == 4 and n_vals == colors.shape[0]: + if ( + len(colors.shape) > 1 + and colors.shape[1] == 4 + and n_vals == colors.shape[0] + ): return colors if colors.shape[0] == 4: @@ -195,13 +199,10 @@ def assign_colors_for_list( real_indices = np.arange(int(colors.shape[0] / 2)) return colors[real_indices] - # if the user specified color lies does not match the number of results - if len(colors) != num_entries: - raise ( - "Incorrect color input. Colors must be specified either as " - "list of [r, g, b, alpha] with length equal to function " - "values Number of function (here: " + str(num_entries) + "), " - "or as one single [r, g, b, alpha] color." - ) - - return colors + # Pass the colors through assign_colors to check correct format of RGBA + return assign_colors( + vals=np.array(list(range(num_entries))), + colors=colors, + balance_alpha=False, + highlight_global=False, + ) diff --git a/pypesto/visualize/profiles.py b/pypesto/visualize/profiles.py index f4ecb6443..bf333a165 100644 --- a/pypesto/visualize/profiles.py +++ b/pypesto/visualize/profiles.py @@ -24,6 +24,8 @@ def profiles( profile_list_ids: Union[int, Sequence[int]] = 0, ratio_min: float = 0.0, show_bounds: bool = False, + plot_objective_values: bool = False, + quality_colors: bool = False, ) -> plt.Axes: """ Plot classical 1D profile plot. @@ -45,7 +47,9 @@ def profiles( List of reference points for optimization results, containing at least a function value fval. colors: - List of colors, or single color. + List of colors, or single color. If multiple colors are passed, their + number needs to corresponds to either the number of results or the + number of profile_list_ids. Cannot be provided if quality_colors is set to True. legends: Labels for line plots, one label per result object. x_labels: @@ -56,12 +60,30 @@ def profiles( Minimum ratio below which to cut off. show_bounds: Whether to show, and extend the plot to, the lower and upper bounds. + plot_objective_values: + Whether to plot the objective function values instead of the likelihood + ratio values. + quality_colors: + If set to True, the profiles are colored according to types of steps the + profiler took. This gives additional information about the profile quality. + Red indicates a step for which min_step_size was reduced, blue indicates a step for which + max_step_size was increased, and green indicates a step for which the profiler + had to resample the parameter vector due to optimization failure of the previous two. + Black indicates a step for which none of the above was necessary. This option is only + available if there is only one result and one profile_list_id (one profile per plot). Returns ------- ax: The plot axes. """ + + if colors is not None and quality_colors: + raise ValueError( + "Cannot visualize the profiles with `quality_colors` of profiler_result.color_path " + " and `colors` provided at the same time. Please provide only one of them." + ) + # parse input results, profile_list_ids, colors, legends = process_result_list_profiles( results, profile_list_ids, colors, legends @@ -75,11 +97,12 @@ def profiles( # loop over results for i_result, result in enumerate(results): for i_profile_list, profile_list_id in enumerate(profile_list_ids): - fvals = handle_inputs( + fvals, color_paths = handle_inputs( result, profile_indices=profile_indices, profile_list=profile_list_id, ratio_min=ratio_min, + plot_objective_values=plot_objective_values, ) # add x_labels for parameters @@ -98,17 +121,30 @@ def profiles( # multiple results per axes object color_ind = i_result + # If quality_colors is set to True, we use the colors provided + # by profiler_result.color_path. This will be done only if there is + # only one result and one profile_list_id (basically one profile per plot). + if ( + len(results) == 1 + and len(profile_list_ids) == 1 + and quality_colors + ): + color = color_paths + else: + color = colors[color_ind] + # call lowlevel routine ax = profiles_lowlevel( fvals=fvals, ax=ax, size=size, - color=colors[color_ind], + color=color, legend_text=legends[color_ind], x_labels=x_labels, show_bounds=show_bounds, lb_full=result.problem.lb_full, ub_full=result.problem.ub_full, + plot_objective_values=plot_objective_values, ) # parse and apply plotting options @@ -132,6 +168,7 @@ def profiles_lowlevel( show_bounds: bool = False, lb_full: Sequence[float] = None, ub_full: Sequence[float] = None, + plot_objective_values: bool = False, ) -> list[plt.Axes]: """ Lowlevel routine for profile plotting. @@ -147,8 +184,9 @@ def profiles_lowlevel( size: Figure size (width, height) in inches. Is only applied when no ax object is specified. - color: RGBA, optional - Color for profiles in plot. + color: RGBA, list[np.ndarray[RGBA]], optional + Color for profiles in plot. In case of quality_colors=True, this is a list of + np.ndarray[RGBA] for each profile -- one color per profile point for each profile. legend_text: Label for line plots. show_bounds: @@ -157,6 +195,9 @@ def profiles_lowlevel( Lower bound. ub_full: Upper bound. + plot_objective_values: + Whether to plot the objective function values instead of the likelihood + ratio values. Returns ------- @@ -215,6 +256,12 @@ def profiles_lowlevel( # if we have empty profiles and more axes than profiles: skip if n_plots != n_fvals and fval is None: continue + # If we use colors from profiler_result.color_path, + # we need to take the color path of each profile + if isinstance(color, list) and isinstance(color[i_plot], np.ndarray): + color_i = color[i_plot] + else: + color_i = color # handle legend if i_plot == 0: @@ -235,7 +282,7 @@ def profiles_lowlevel( fval, ax[counter], size=size, - color=color, + color=color_i, legend_text=tmp_legend, show_bounds=show_bounds, lb=lb, @@ -249,13 +296,10 @@ def profiles_lowlevel( ax[counter].set_xlabel(x_labels[counter]) if counter % columns == 0: - ax[counter].set_ylabel("Log-posterior ratio") - else: - # fix pyPESTO/pyPESTO/pypesto/visualize/profiles.py:228: - # UserWarning: FixedFormatter should only be used - # together with FixedLocator. Fix from matplotlib #18848. - ax[counter].set_yticks(ax[counter].get_yticks()) - ax[counter].set_yticklabels(["" for _ in ax[counter].get_yticks()]) + if plot_objective_values: + ax[counter].set_ylabel("Objective function value") + else: + ax[counter].set_ylabel("Log-posterior ratio") # increase counter and cleanup legend counter += 1 @@ -302,9 +346,17 @@ def profile_lowlevel( """ # parse input fvals = np.asarray(fvals) - # get colors - color = assign_colors([1.0], color) + if ( + color is None + or isinstance(color, list) + or isinstance(color, tuple) + or (isinstance(color, np.ndarray) and not len(color.shape) == 2) + ): + color = assign_colors([1.0], color) + single_color = True + else: + single_color = False # axes if ax is None: @@ -317,7 +369,37 @@ def profile_lowlevel( # plot if fvals.size != 0: ax.xaxis.set_major_locator(MaxNLocator(integer=True)) - ax.plot(fvals[0, :], fvals[1, :], color=color[0], label=legend_text) + xs = fvals[0, :] + ratios = fvals[1, :] + + # If we use colors from profiler_result.color_path, + # we need to make a mapping from profile points to their colors + if not single_color: + # Create a mapping from (x, ratio) to color + point_to_color = dict(zip(zip(xs, ratios), color)) + else: + point_to_color = None + + # Plot each profile point individually to allow for different colors + for i in range(1, len(xs)): + point_color = ( + color + if single_color + else tuple(point_to_color[(xs[i], ratios[i])]) + ) + ax.plot( + [xs[i - 1], xs[i]], + [ratios[i - 1], ratios[i]], + color=color if single_color else (0, 0, 0, 1), + linestyle="-", + ) + if not single_color and point_color != (0, 0, 0, 1): + ax.plot(xs[i], ratios[i], color=point_color, marker="o") + else: + ax.plot(xs[i], ratios[i], color=point_color, marker=".") + + # Plot legend text + ax.plot([], [], color=color[0], label=legend_text) if legend_text is not None: ax.legend() @@ -366,6 +448,7 @@ def handle_inputs( profile_indices: Sequence[int], profile_list: int, ratio_min: float, + plot_objective_values: bool, ) -> list[np.array]: """ Retrieve the values of the profiles to be plotted. @@ -381,6 +464,8 @@ def handle_inputs( ratio_min: Exclude values where profile likelihood ratio is smaller than ratio_min. + plot_objective_values: + Whether to plot the objective function values instead of the likelihood Returns ------- @@ -388,6 +473,7 @@ def handle_inputs( """ # extract ratio values from result fvals = [] + colors = [] for i_par in range(0, len(result.profile_result.list[profile_list])): if ( i_par in profile_indices @@ -399,18 +485,31 @@ def handle_inputs( ratios = result.profile_result.list[profile_list][ i_par ].ratio_path[:] + colors_for_par = result.profile_result.list[profile_list][ + i_par + ].color_path # constrain indices = np.where(ratios > ratio_min) xs = xs[indices] ratios = ratios[indices] - - fvals_for_par = np.array([xs, ratios]) + colors_for_par = colors_for_par[indices] + + if plot_objective_values: + obj_vals = result.profile_result.list[profile_list][ + i_par + ].fval_path + obj_vals = obj_vals[indices] + fvals_for_par = np.array([xs, obj_vals]) + else: + fvals_for_par = np.array([xs, ratios]) else: fvals_for_par = None + colors_for_par = None fvals.append(fvals_for_par) + colors.append(colors_for_par) - return fvals + return fvals, colors def process_result_list_profiles( diff --git a/test/profile/test_profile.py b/test/profile/test_profile.py index f422fe8bf..8b2e3cbaa 100644 --- a/test/profile/test_profile.py +++ b/test/profile/test_profile.py @@ -68,7 +68,7 @@ def test_default_profiling(self): steps = result.profile_result.list[i_run][0]["ratio_path"].size if method == "adaptive_step_regression": self.assertTrue( - steps < 20, + steps < 100, "Profiling with regression based " "proposal needed too many steps.", ) @@ -79,7 +79,7 @@ def test_default_profiling(self): ) elif method == "adaptive_step_order_1": self.assertTrue( - steps < 25, + steps < 100, "Profiling with 1st order based " "proposal needed too many steps.", ) @@ -90,7 +90,7 @@ def test_default_profiling(self): ) elif method == "adaptive_step_order_0": self.assertTrue( - steps < 100, + steps < 300, "Profiling with 0th order based " "proposal needed too many steps.", ) @@ -479,6 +479,7 @@ def test_gh1165(lb, ub): progress_bar=False, profile_options=profile.ProfileOptions( min_step_size=0.1, + max_step_size=1.0, delta_ratio_max=0.05, default_step_size=0.5, ratio_min=0.01, From 47ae5d56c2f99ad88cc342cc1cef8d2ec528b30d Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Wed, 23 Oct 2024 13:50:00 +0200 Subject: [PATCH 35/37] Updated PEtab.jl to work with version 3.x (#1489) * Updated Julia version and PEtab.jl version to latest non breaking version * Updated strings and functions. Changed to version 3.x. Slightly adjusted tolerances in a test * removed specified version from petab.jl * remove caching as a test --- .github/workflows/ci.yml | 7 ++----- pypesto/objective/julia/petabJl.py | 16 ++++++++-------- pypesto/objective/julia/petab_jl_importer.py | 2 +- test/julia/test_pyjulia.py | 6 +++++- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b274e2a61..4db56ab55 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -199,17 +199,14 @@ jobs: - name: Install julia uses: julia-actions/setup-julia@v2 with: - version: 1.9 - - - name: Cache Julia - uses: julia-actions/cache@v2 + version: 1.11 - name: Install dependencies run: .github/workflows/install_deps.sh - name: Install PEtabJL dependencies run: > - julia -e 'using Pkg; Pkg.add(Pkg.PackageSpec(;name="PEtab", version="2.5.0")); + julia -e 'using Pkg; Pkg.add("PEtab"); Pkg.add("OrdinaryDiffEq"); Pkg.add("Sundials")' - name: Run tests diff --git a/pypesto/objective/julia/petabJl.py b/pypesto/objective/julia/petabJl.py index 84de76999..f9ddefb22 100644 --- a/pypesto/objective/julia/petabJl.py +++ b/pypesto/objective/julia/petabJl.py @@ -62,10 +62,10 @@ def __init__( self.petab_jl_problem = petab_jl_problem # get functions - fun = self.petab_jl_problem.compute_cost - grad = self.petab_jl_problem.compute_gradient - hess = self.petab_jl_problem.compute_hessian - x_names = np.asarray(self.petab_jl_problem.θ_names) + fun = self.petab_jl_problem.nllh + grad = self.petab_jl_problem.grad + hess = self.petab_jl_problem.hess + x_names = np.asarray(self.petab_jl_problem.xnames) # call the super super super constructor super(JuliaObjective, self).__init__( @@ -105,10 +105,10 @@ def __setstate__(self, state): self.petab_jl_problem = petab_jl_problem # get functions - fun = self.petab_jl_problem.compute_cost - grad = self.petab_jl_problem.compute_gradient - hess = self.petab_jl_problem.compute_hessian - x_names = np.asarray(self.petab_jl_problem.θ_names) + fun = self.petab_jl_problem.nllh + grad = self.petab_jl_problem.grad + hess = self.petab_jl_problem.hess + x_names = np.asarray(self.petab_jl_problem.xnames) # call the super super constructor super(JuliaObjective, self).__init__(fun, grad, hess, x_names) diff --git a/pypesto/objective/julia/petab_jl_importer.py b/pypesto/objective/julia/petab_jl_importer.py index 3d76900b5..b5b0b444e 100644 --- a/pypesto/objective/julia/petab_jl_importer.py +++ b/pypesto/objective/julia/petab_jl_importer.py @@ -309,7 +309,7 @@ def _write_julia_file( f"found at {link_to_options}\n" f"petabProblem = PEtabODEProblem(\n\t" f"petabModel,\n\t" - f"ode_solver=ODESolver({odeSolvOpt_str}),\n\t" + f"odesolver=ODESolver({odeSolvOpt_str}),\n\t" f"gradient_method=:{options['gradient_method']},\n\t" f"hessian_method=:{options['hessian_method']},\n\t" f"sparse_jacobian={options['sparse_jacobian']},\n\t" diff --git a/test/julia/test_pyjulia.py b/test/julia/test_pyjulia.py index dc0abcdef..8325a44e8 100644 --- a/test/julia/test_pyjulia.py +++ b/test/julia/test_pyjulia.py @@ -59,7 +59,11 @@ def test_pyjulia_pipeline(): # check with analytical value p_opt = obj.get("p_opt") - assert np.allclose(result.optimize_result[0].x, p_opt) # noqa: S101 + assert np.allclose( + result.optimize_result[0].x, + p_opt, + atol=1e-6, # noqa: S101 + ) def test_petabJL_interface(): From a187c81459bf769e5da51e2fff213ea4358356e1 Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Wed, 30 Oct 2024 19:00:16 +0100 Subject: [PATCH 36/37] Added a deprecated version of create_model (#1502) --- pypesto/petab/importer.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/pypesto/petab/importer.py b/pypesto/petab/importer.py index 49b8eb083..5a44b9698 100644 --- a/pypesto/petab/importer.py +++ b/pypesto/petab/importer.py @@ -432,6 +432,28 @@ def create_problem( return problem + def create_model( + self, + force_compile: bool = False, + verbose: bool = True, + **kwargs, + ) -> amici.Model: + """See :meth:`AmiciObjectiveCreator.create_model`.""" + warnings.warn( + "This function has been moved to `AmiciObjectiveCreator`.", + DeprecationWarning, + stacklevel=2, + ) + objective_constructor = self.create_objective_creator( + kwargs.pop("simulator_type", self.simulator_type), + kwargs.pop("simulator", self.simulator), + ) + return objective_constructor.create_model( + force_compile=force_compile, + verbose=verbose, + **kwargs, + ) + def create_objective( self, model: amici.Model = None, From a82e63722989e50c4bf4eaab6aa8b81f17698ef1 Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Wed, 30 Oct 2024 19:00:59 +0100 Subject: [PATCH 37/37] prepare release v0.5.4 (#1469) * Updated changelog and readthedocs * Fixed typo * Updated Readme * Updated changelog * Updated changelog * Updated Changelog --- CHANGELOG.rst | 29 +++++++++++++++++++++++++++++ pypesto/version.py | 2 +- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e96fb50cc..c56d4b29e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,35 @@ Release notes .......... +0.5.4 (2024-10-19) +------------------- + +- **Breaking Changes** + - Remove Aesara support (#1453, #1455) +- General + - CI improvements (#1436, #1437, #1438, #1439, #1440, #1443, #1473, #1484, #1486, #1490, #1485) + - Update references/documentation (#1404, #1456, #1474, #1479, #1483, #1470, #1498) +- Profile + - Improve Profiling Code (#1447) +- Visualize + - allow log and/or linear scale for visualization (#1435) + - More informative error message for start indices. (#1472) +- Optimization + - SacessOptimizer: Fix acceptance threshold for objective improvement (#1457) + - SacessOptimizer: expose more hyperparameters + minor fixes (#1459, #1476) + - SacessOptimizer, ESSOptimizer: Bound-normalize parameters for proximity check (#1462) + - ESSOptimizer: Fix bug in recombination and go-beyond (#1477, #1480) +- Objective + - FD-objective correctly working with fixed parameters (#1446) + - Petab Importer reforge (#1442, #1502) + - Use cloudpickle for serializing NegLogParameterPriors (#1467) + - Update PEtab.jl integration to match version 3.X (#1489) +- Sampling + - Bayes Factor Tutorial (#1444) +- Ensemble + - Added HPD calculation to ensemble (#1431) + + 0.5.3 (2024-08-01) ------------------- diff --git a/pypesto/version.py b/pypesto/version.py index 43a1e95ba..6b27eeebf 100644 --- a/pypesto/version.py +++ b/pypesto/version.py @@ -1 +1 @@ -__version__ = "0.5.3" +__version__ = "0.5.4"