diff --git a/INSTALL.rst b/INSTALL.rst index e5b75dd29..07b4eead2 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -53,6 +53,12 @@ and then install from the local repository via:: cd pypesto pip3 install . +Install and run pyPESTO docker +------------------------------ + +.. include:: containers.rst + + Upgrade ------- diff --git a/doc/containers.rst b/doc/containers.rst new file mode 100644 index 000000000..5a865be1e --- /dev/null +++ b/doc/containers.rst @@ -0,0 +1,46 @@ +.. _containers: + +Containers +========== + +We provide a pyPESTO container image in the OCI format through `docker.io/stephanmg/pypesto` (https://hub.docker.com/r/stephanmg/pypesto). + +Develop versions are pushed as tag ``latest``. + +The image can also be built using the provided `Dockerfile `_. + +If you manually build your container and need to convert to Apptainer, first export your docker image to `.tar` by: +`docker save -o pypesto.tar pypesto:latest` and pull the image before from docker.io by `docker pull docker.io/stephanmg/pypesto:latest`. + +Then execute for generating a `.sif` file: +``apptainer build pypesto.sif docker-archive://pypesto.tar`` + +Likewise for singularity: +``singularity build pypesto.sif docker-archive://pypesto.tar`` + + +Docker +------ + +To download / pull image: `docker pull docker.io/stephanmg/pypesto`. + +To run: `docker run --rm pypesto python3 -c 'import pypesto; print(f"PyPESTO version: {pypesto.__version__}")'`. + + +Apptainer +--------- + +Same principle as for Docker, but replace with `apptainer` commands: + +To download / pull image: `apptainer pull pypesto.sif docker://docker.io/stephanmg/pypesto:latest`. + +To run: `apptainer exec --contain --cleanenv pypesto.sif python3 -c 'import pypesto; print(f"PyPESTO version: {pypesto.__version__}")'` + +Note that we need to request a clean and contained environment with `--contain --cleanenv` for apptainer, otherwise some paths like +`/usr/`, `/lib`, `/home`, etc. will be inherited and lead to execution on the host system (e.g. pip packages and other dependencies +will be searched from the host) + +Singularity +----------- + +Same, but replace `apptainer` with `singularity`. diff --git a/pypesto/objective/amici/amici.py b/pypesto/objective/amici/amici.py index 3af3f7023..8edcf13fa 100644 --- a/pypesto/objective/amici/amici.py +++ b/pypesto/objective/amici/amici.py @@ -178,8 +178,12 @@ def __init__( parameter_mapping = create_identity_parameter_mapping( amici_model, len(edatas) ) + # parameter mapping where IDs of the currently fixed parameters + # have been replaced by their respective values + # (relevant for setting ``plist`` in ExpData later on) self.parameter_mapping = parameter_mapping - + # parameter mapping independent of fixed parameters + self._parameter_mapping_full = copy.deepcopy(parameter_mapping) # If supported, enable `guess_steadystate` by default. If not # supported, disable by default. If requested but unsupported, raise. if ( @@ -654,3 +658,38 @@ def check_gradients_match_finite_differences( return super().check_gradients_match_finite_differences( *args, x=x, x_free=x_free, **kwargs ) + + def update_from_problem( + self, + dim_full: int, + x_free_indices: Sequence[int], + x_fixed_indices: Sequence[int], + x_fixed_vals: Sequence[float], + ): + """Handle fixed parameters.""" + super().update_from_problem( + dim_full=dim_full, + x_free_indices=x_free_indices, + x_fixed_indices=x_fixed_indices, + x_fixed_vals=x_fixed_vals, + ) + + # To make amici aware of fixed parameters, and thus, avoid computing + # unnecessary sensitivities, we need to update the parameter mapping + # and replace the IDs of all fixed parameters by their respective + # values. + self.parameter_mapping = copy.deepcopy(self._parameter_mapping_full) + if not len(x_fixed_indices): + return + + id_to_val = { + self.x_ids[x_idx]: x_val + for x_idx, x_val in zip(x_fixed_indices, x_fixed_vals) + } + for condition_mapping in self.parameter_mapping: + for ( + model_par, + mapped_to_par, + ) in condition_mapping.map_sim_var.items(): + if (val := id_to_val.get(mapped_to_par)) is not None: + condition_mapping.map_sim_var[model_par] = val diff --git a/test/petab/test_petab_import.py b/test/petab/test_petab_import.py index aa4eb0067..1ce935880 100644 --- a/test/petab/test_petab_import.py +++ b/test/petab/test_petab_import.py @@ -5,6 +5,7 @@ import logging import os import unittest +from itertools import chain import amici import benchmark_models_petab as models @@ -18,8 +19,6 @@ import pypesto.petab from pypesto.petab import PetabImporter -from .test_sbml_conversion import ATOL, RTOL - # In CI, bionetgen is installed here BNGPATH = os.path.abspath( os.path.join(os.path.dirname(__file__), "..", "..", "BioNetGen-2.8.5") @@ -119,7 +118,7 @@ def test_check_gradients(self): # Check gradients of simple model (should always be a true positive) model_name = "Boehm_JProteomeRes2014" importer = pypesto.petab.PetabImporter.from_yaml( - os.path.join(models.MODELS_DIR, model_name, model_name + ".yaml") + models.get_problem_yaml_path(model_name) ) objective = importer.create_problem().objective @@ -137,35 +136,76 @@ def test_check_gradients(self): def test_plist_mapping(): - """Test that the AMICI objective created via PEtab correctly maps - gradient entries when some parameters are not estimated (realized via - edata.plist).""" - model_name = "Boehm_JProteomeRes2014" - petab_problem = pypesto.petab.PetabImporter.from_yaml( + """Test that the AMICI objective created via PEtab correctly uses + ExpData.plist. + + That means, ensure that + 1) it only computes gradient entries that are really required, and + 2) correctly maps model-gradient entries to the objective gradient + 3) with or without pypesto-fixed parameters. + + In Bruno_JExpBot2016, different parameters are estimated in different + conditions. + """ + model_name = "Bruno_JExpBot2016" + petab_importer = pypesto.petab.PetabImporter.from_yaml( os.path.join(models.MODELS_DIR, model_name, model_name + ".yaml") ) - - # define test parameter - par = np.asarray(petab_problem.petab_problem.x_nominal_scaled) - - problem = petab_problem.create_problem() + objective_creator = petab_importer.create_objective_creator() + problem = petab_importer.create_problem( + objective_creator.create_objective() + ) objective = problem.objective - # check that x_names are correctly subsetted - assert objective.x_names == [ - problem.x_names[ix] for ix in problem.x_free_indices - ] objective.amici_solver.setSensitivityMethod( - amici.SensitivityMethod_forward + amici.SensitivityMethod.forward ) - objective.amici_solver.setAbsoluteTolerance(1e-10) - objective.amici_solver.setRelativeTolerance(1e-12) + objective.amici_solver.setAbsoluteTolerance(1e-16) + objective.amici_solver.setRelativeTolerance(1e-15) + + # slightly perturb the parameters to avoid vanishing gradients + par = np.asarray(petab_importer.petab_problem.x_nominal_free_scaled) * 1.01 + + # call once to make sure ExpDatas are populated + fx1, grad1 = objective(par, sensi_orders=(0, 1)) + + plists1 = {edata.plist for edata in objective.edatas} + assert len(plists1) == 6, plists1 df = objective.check_grad_multi_eps( - par[problem.x_free_indices], multi_eps=[1e-3, 1e-4, 1e-5] + par[problem.x_free_indices], multi_eps=[1e-5, 1e-6, 1e-7, 1e-8] ) print("relative errors gradient: ", df.rel_err.values) print("absolute errors gradient: ", df.abs_err.values) - assert np.all((df.rel_err.values < RTOL) | (df.abs_err.values < ATOL)) + assert np.all((df.rel_err.values < 1e-8) | (df.abs_err.values < 1e-10)) + + # do the same after fixing some parameters + # we fix them to the previous values, so we can compare the results + fixed_ids = ["init_b10_1", "init_bcry_1"] + # the corresponding amici parameters (they are only ever mapped to the + # respective parameters in fixed_ids, and thus, should not occur in any + # `plist` later on) + fixed_model_par_ids = ["init_b10", "init_bcry"] + fixed_model_par_idxs = [ + objective.amici_model.getParameterIds().index(id) + for id in fixed_model_par_ids + ] + fixed_idxs = [problem.x_names.index(id) for id in fixed_ids] + problem.fix_parameters(fixed_idxs, par[fixed_idxs]) + assert objective is problem.objective + assert problem.x_fixed_indices == fixed_idxs + fx2, grad2 = objective(par[problem.x_free_indices], sensi_orders=(0, 1)) + assert np.isclose(fx1, fx2, rtol=1e-10, atol=1e-14) + assert np.allclose( + grad1[problem.x_free_indices], grad2, rtol=1e-10, atol=1e-14 + ) + plists2 = {edata.plist for edata in objective.edatas} + # the fixed parameters should have been in plist1, but not in plist2 + assert ( + set(fixed_model_par_idxs) - set(chain.from_iterable(plists1)) == set() + ) + assert ( + set(fixed_model_par_idxs) & set(chain.from_iterable(plists2)) == set() + ) def test_max_sensi_order():