diff --git a/src/ert/run_models/base_run_model.py b/src/ert/run_models/base_run_model.py index 09ca0b9557a..e31b939c55f 100644 --- a/src/ert/run_models/base_run_model.py +++ b/src/ert/run_models/base_run_model.py @@ -14,7 +14,7 @@ from contextlib import contextmanager from pathlib import Path from queue import SimpleQueue -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING, Any, cast import numpy as np @@ -31,8 +31,11 @@ AnalysisDataEvent, AnalysisErrorEvent, ) -from ert.config import ErtConfig, HookRuntime, QueueSystem +from ert.config import HookRuntime, QueueSystem from ert.config.analysis_module import BaseSettings +from ert.config.forward_model_step import ForwardModelStep +from ert.config.model_config import ModelConfig +from ert.config.workflow import Workflow from ert.enkf_main import _seed_sequence, create_run_path from ert.ensemble_evaluator import Ensemble as EEEnsemble from ert.ensemble_evaluator import ( @@ -58,6 +61,7 @@ from ert.mode_definitions import MODULE_MODE from ert.runpaths import Runpaths from ert.storage import Ensemble, Storage +from ert.substitutions import Substitutions from ert.trace import tracer from ert.workflow_runner import WorkflowRunner @@ -131,10 +135,18 @@ def captured_logs( class BaseRunModel(ABC): def __init__( self, - config: ErtConfig, storage: Storage, + runpath_file: Path, + user_config_file: Path, + env_vars: dict[str, str], + env_pr_fm_step: dict[str, dict[str, Any]], + model_config: ModelConfig, queue_config: QueueConfig, + forward_model_steps: list[ForwardModelStep], status_queue: SimpleQueue[StatusEvents], + substitutions: Substitutions, + templates: list[tuple[str, str]], + hooked_workflows: defaultdict[HookRuntime, list[Workflow]], active_realizations: list[bool], total_iterations: int = 1, start_iteration: int = 0, @@ -147,27 +159,35 @@ def __init__( the forward model and passing events back through the supplied queue. """ self._total_iterations = total_iterations - config.analysis_config.num_iterations = total_iterations - self.start_time: int | None = None self.stop_time: int | None = None self._queue_config: QueueConfig = queue_config self._initial_realizations_mask: list[bool] = copy.copy(active_realizations) self._completed_realizations_mask: list[bool] = [] self.support_restart: bool = True - self.ert_config = config self._storage = storage self._context_env: dict[str, str] = {} self.random_seed: int = _seed_sequence(random_seed) self.rng = np.random.default_rng(self.random_seed) - self.substitutions = config.substitutions + self._substitutions: Substitutions = substitutions + self._model_config: ModelConfig = model_config + self._runpath_file: Path = runpath_file + self._forward_model_steps: list[ForwardModelStep] = forward_model_steps + self._user_config_file: Path = user_config_file + self._templates: list[tuple[str, str]] = templates + self._hooked_workflows: defaultdict[HookRuntime, list[Workflow]] = ( + hooked_workflows + ) + + self._env_vars: dict[str, str] = env_vars + self._env_pr_fm_step: dict[str, dict[str, Any]] = env_pr_fm_step self.run_paths = Runpaths( - jobname_format=config.model_config.jobname_format_string, - runpath_format=config.model_config.runpath_format_string, - filename=str(config.runpath_file), - substitutions=self.substitutions, - eclbase=config.model_config.eclbase_format_string, + jobname_format=self._model_config.jobname_format_string, + runpath_format=self._model_config.runpath_format_string, + filename=str(self._runpath_file), + substitutions=self._substitutions, + eclbase=self._model_config.eclbase_format_string, ) self._iter_snapshot: dict[int, EnsembleSnapshot] = {} self._status_queue = status_queue @@ -603,12 +623,12 @@ def _build_ensemble( Realization( active=run_arg.active, iens=run_arg.iens, - fm_steps=self.ert_config.forward_model_steps, + fm_steps=self._forward_model_steps, max_runtime=self._queue_config.max_runtime, run_arg=run_arg, num_cpu=self._queue_config.preferred_num_cpu, - job_script=self.ert_config.queue_config.job_script, - realization_memory=self.ert_config.queue_config.realization_memory, + job_script=self._queue_config.job_script, + realization_memory=self._queue_config.realization_memory, ) ) return EEEnsemble( @@ -676,7 +696,7 @@ def run_workflows( storage: Storage | None = None, ensemble: Ensemble | None = None, ) -> None: - for workflow in self.ert_config.hooked_workflows[runtime]: + for workflow in self._hooked_workflows[runtime]: WorkflowRunner(workflow, storage, ensemble).run_blocking() def _evaluate_and_postprocess( @@ -688,13 +708,13 @@ def _evaluate_and_postprocess( create_run_path( run_args=run_args, ensemble=ensemble, - user_config_file=self.ert_config.user_config_file, - env_vars=self.ert_config.env_vars, - env_pr_fm_step=self.ert_config.env_pr_fm_step, - forward_model_steps=self.ert_config.forward_model_steps, - substitutions=self.ert_config.substitutions, - templates=self.ert_config.ert_templates, - model_config=self.ert_config.model_config, + user_config_file=str(self._user_config_file), + env_vars=self._env_vars, + env_pr_fm_step=self._env_pr_fm_step, + forward_model_steps=self._forward_model_steps, + substitutions=self._substitutions, + templates=self._templates, + model_config=self._model_config, runpaths=self.run_paths, context_env=self._context_env, ) @@ -735,10 +755,18 @@ def __init__( self, analysis_settings: BaseSettings, update_settings: UpdateSettings, - config: ErtConfig, storage: Storage, + runpath_file: Path, + user_config_file: Path, + env_vars: dict[str, str], + env_pr_fm_step: dict[str, dict[str, Any]], + model_config: ModelConfig, queue_config: QueueConfig, + forward_model_steps: list[ForwardModelStep], status_queue: SimpleQueue[StatusEvents], + substitutions: Substitutions, + templates: list[tuple[str, str]], + hooked_workflows: defaultdict[HookRuntime, list[Workflow]], active_realizations: list[bool], total_iterations: int, start_iteration: int, @@ -749,10 +777,18 @@ def __init__( self._update_settings: UpdateSettings = update_settings super().__init__( - config, storage, + runpath_file, + user_config_file, + env_vars, + env_pr_fm_step, + model_config, queue_config, + forward_model_steps, status_queue, + substitutions, + templates, + hooked_workflows, active_realizations=active_realizations, total_iterations=total_iterations, start_iteration=start_iteration, diff --git a/src/ert/run_models/ensemble_experiment.py b/src/ert/run_models/ensemble_experiment.py index 4df1c60c661..a7aac4d0563 100644 --- a/src/ert/run_models/ensemble_experiment.py +++ b/src/ert/run_models/ensemble_experiment.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +from pathlib import Path from queue import SimpleQueue from typing import TYPE_CHECKING @@ -46,11 +47,24 @@ def __init__( self.experiment: Experiment | None = None self.ensemble: Ensemble | None = None + self._design_matrix = config.analysis_config.design_matrix + self._observations = config.observations + self._parameter_configuration = config.ensemble_config.parameter_configuration + self._response_configuration = config.ensemble_config.response_configuration + super().__init__( - config, storage, + config.runpath_file, + Path(config.user_config_file), + config.env_vars, + config.env_pr_fm_step, + config.model_config, queue_config, + config.forward_model_steps, status_queue, + config.substitutions, + config.ert_templates, + config.hooked_workflows, total_iterations=1, active_realizations=active_realizations, random_seed=random_seed, @@ -67,8 +81,8 @@ def run_experiment( self.restart = restart # If design matrix is present, we try to merge design matrix parameters # to the experiment parameters and set new active realizations - parameters_config = self.ert_config.ensemble_config.parameter_configuration - design_matrix = self.ert_config.analysis_config.design_matrix + parameters_config = self._parameter_configuration + design_matrix = self._design_matrix design_matrix_group = None if design_matrix is not None: try: @@ -87,8 +101,8 @@ def run_experiment( if design_matrix_group is not None else parameters_config ), - observations=self.ert_config.observations, - responses=self.ert_config.ensemble_config.response_configuration, + observations=self._observations, + responses=self._response_configuration, ) self.ensemble = self._storage.create_ensemble( self.experiment, diff --git a/src/ert/run_models/ensemble_smoother.py b/src/ert/run_models/ensemble_smoother.py index 620a93af0d0..e11021cdaea 100644 --- a/src/ert/run_models/ensemble_smoother.py +++ b/src/ert/run_models/ensemble_smoother.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +from pathlib import Path from queue import SimpleQueue from typing import TYPE_CHECKING @@ -42,10 +43,18 @@ def __init__( super().__init__( es_settings, update_settings, - config, storage, + config.runpath_file, + Path(config.user_config_file), + config.env_vars, + config.env_pr_fm_step, + config.model_config, queue_config, + config.forward_model_steps, status_queue, + config.substitutions, + config.ert_templates, + config.hooked_workflows, active_realizations=active_realizations, start_iteration=0, total_iterations=2, @@ -57,6 +66,10 @@ def __init__( self.support_restart = False + self._parameter_configuration = config.ensemble_config.parameter_configuration + self._observations = config.observations + self._response_configuration = config.ensemble_config.response_configuration + @tracer.start_as_current_span(f"{__name__}.run_experiment") def run_experiment( self, evaluator_server_config: EvaluatorServerConfig, restart: bool = False @@ -66,9 +79,9 @@ def run_experiment( self.run_workflows(HookRuntime.PRE_EXPERIMENT) ensemble_format = self.target_ensemble_format experiment = self._storage.create_experiment( - parameters=self.ert_config.ensemble_config.parameter_configuration, - observations=self.ert_config.observations, - responses=self.ert_config.ensemble_config.response_configuration, + parameters=self._parameter_configuration, + observations=self._observations, + responses=self._response_configuration, name=self.experiment_name, ) diff --git a/src/ert/run_models/evaluate_ensemble.py b/src/ert/run_models/evaluate_ensemble.py index ddc194190d5..7d23295fb78 100644 --- a/src/ert/run_models/evaluate_ensemble.py +++ b/src/ert/run_models/evaluate_ensemble.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +from pathlib import Path from typing import TYPE_CHECKING from uuid import UUID @@ -47,11 +48,20 @@ def __init__( self.ensemble = storage.get_ensemble(UUID(ensemble_id)) except KeyError as err: raise ValueError(f"No ensemble: {ensemble_id}") from err + super().__init__( - config, storage, + config.runpath_file, + Path(config.user_config_file), + config.env_vars, + config.env_pr_fm_step, + config.model_config, queue_config, + config.forward_model_steps, status_queue, + config.substitutions, + config.ert_templates, + config.hooked_workflows, start_iteration=self.ensemble.iteration, total_iterations=1, active_realizations=active_realizations, diff --git a/src/ert/run_models/everest_run_model.py b/src/ert/run_models/everest_run_model.py index 3c8c74e632b..f05ac95ff8c 100644 --- a/src/ert/run_models/everest_run_model.py +++ b/src/ert/run_models/everest_run_model.py @@ -137,14 +137,26 @@ def __init__( storage = open_storage(config.ens_path, mode="w") status_queue: queue.SimpleQueue[StatusEvents] = queue.SimpleQueue() + super().__init__( - config, storage, + config.runpath_file, + Path(config.user_config_file), + config.env_vars, + config.env_pr_fm_step, + config.model_config, config.queue_config, + config.forward_model_steps, status_queue, + config.substitutions, + config.ert_templates, + config.hooked_workflows, active_realizations=[], # Set dynamically in run_forward_model() ) self.support_restart = False + self._parameter_configuration = config.ensemble_config.parameter_configuration + self._parameter_configs = config.ensemble_config.parameter_configs + self._response_configuration = config.ensemble_config.response_configuration @classmethod def create( @@ -187,8 +199,8 @@ def run_experiment( self._eval_server_cfg = evaluator_server_config self._experiment = self._storage.create_experiment( name=f"EnOpt@{datetime.datetime.now().strftime('%Y-%m-%d@%H:%M:%S')}", - parameters=self.ert_config.ensemble_config.parameter_configuration, - responses=self.ert_config.ensemble_config.response_configuration, + parameters=self._parameter_configuration, + responses=self._response_configuration, ) # Initialize the ropt optimizer: @@ -494,7 +506,7 @@ def _check_suffix( raise KeyError(err_msg) for control_name, control in controls.items(): - ext_config = self.ert_config.ensemble_config.parameter_configs[control_name] + ext_config = self._parameter_configs[control_name] if isinstance(ext_config, ExtParamConfig): if len(ext_config) != len(control.keys()): raise KeyError( @@ -515,7 +527,7 @@ def _get_run_args( evaluator_context: EvaluatorContext, batch_data: dict[int, Any], ) -> list[RunArg]: - substitutions = self.ert_config.substitutions + substitutions = self._substitutions substitutions[""] = ensemble.name self.active_realizations = [True] * len(batch_data) for sim_id, control_idx in enumerate(batch_data.keys()): @@ -525,11 +537,11 @@ def _get_run_args( ] ) run_paths = Runpaths( - jobname_format=self.ert_config.model_config.jobname_format_string, - runpath_format=self.ert_config.model_config.runpath_format_string, - filename=str(self.ert_config.runpath_file), + jobname_format=self._model_config.jobname_format_string, + runpath_format=self._model_config.runpath_format_string, + filename=str(self._runpath_file), substitutions=substitutions, - eclbase=self.ert_config.model_config.eclbase_format_string, + eclbase=self._model_config.eclbase_format_string, ) return create_run_arguments( run_paths, diff --git a/src/ert/run_models/iterated_ensemble_smoother.py b/src/ert/run_models/iterated_ensemble_smoother.py index ca628c1dd04..e89c7935533 100644 --- a/src/ert/run_models/iterated_ensemble_smoother.py +++ b/src/ert/run_models/iterated_ensemble_smoother.py @@ -2,6 +2,7 @@ import functools import logging +from pathlib import Path from queue import SimpleQueue from typing import TYPE_CHECKING @@ -62,11 +63,21 @@ def __init__( self.target_ensemble_format = target_ensemble self.experiment_name = experiment_name + config.analysis_config.num_iterations = number_of_iterations + super().__init__( - config, storage, + config.runpath_file, + Path(config.user_config_file), + config.env_vars, + config.env_pr_fm_step, + config.model_config, queue_config, + config.forward_model_steps, status_queue, + config.substitutions, + config.ert_templates, + config.hooked_workflows, active_realizations=active_realizations, total_iterations=number_of_iterations, random_seed=random_seed, @@ -78,6 +89,11 @@ def __init__( self.sies_smoother = None self.num_retries_per_iter = num_retries_per_iter + self._design_matrix = config.analysis_config.design_matrix + self._observations = config.observations + self._parameter_configuration = config.ensemble_config.parameter_configuration + self._response_configuration = config.ensemble_config.response_configuration + @property def sies_iteration(self) -> int: """Returns the SIES iteration number, starting at 1.""" @@ -126,9 +142,9 @@ def run_experiment( self.run_workflows(HookRuntime.PRE_EXPERIMENT) target_ensemble_format = self.target_ensemble_format experiment = self._storage.create_experiment( - parameters=self.ert_config.ensemble_config.parameter_configuration, - observations=self.ert_config.observations, - responses=self.ert_config.ensemble_config.response_configuration, + parameters=self._parameter_configuration, + observations=self._observations, + responses=self._response_configuration, name=self.experiment_name, ) prior = self._storage.create_ensemble( diff --git a/src/ert/run_models/manual_update.py b/src/ert/run_models/manual_update.py index 8fbf54c3e79..f0af885542e 100644 --- a/src/ert/run_models/manual_update.py +++ b/src/ert/run_models/manual_update.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +from pathlib import Path from queue import SimpleQueue from typing import TYPE_CHECKING from uuid import UUID @@ -38,20 +39,27 @@ def __init__( status_queue: SimpleQueue[StatusEvents], ): try: - prior_id = UUID(ensemble_id) - prior = storage.get_ensemble(prior_id) + prior = storage.get_ensemble(UUID(ensemble_id)) except (KeyError, ValueError) as err: raise ErtRunError( - f"Prior ensemble with ID: {prior_id} does not exists" + f"Prior ensemble with ID: {UUID(ensemble_id)} does not exists" ) from err super().__init__( es_settings, update_settings, - config, storage, + config.runpath_file, + Path(config.user_config_file), + config.env_vars, + config.env_pr_fm_step, + config.model_config, queue_config, + config.forward_model_steps, status_queue, + config.substitutions, + config.ert_templates, + config.hooked_workflows, active_realizations=active_realizations, total_iterations=1, start_iteration=prior.iteration, diff --git a/src/ert/run_models/multiple_data_assimilation.py b/src/ert/run_models/multiple_data_assimilation.py index 377c720a2ad..9d7a43ad802 100644 --- a/src/ert/run_models/multiple_data_assimilation.py +++ b/src/ert/run_models/multiple_data_assimilation.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +from pathlib import Path from queue import SimpleQueue from typing import TYPE_CHECKING from uuid import UUID @@ -70,16 +71,27 @@ def __init__( super().__init__( es_settings, update_settings, - config, storage, + config.runpath_file, + Path(config.user_config_file), + config.env_vars, + config.env_pr_fm_step, + config.model_config, queue_config, + config.forward_model_steps, status_queue, + config.substitutions, + config.ert_templates, + config.hooked_workflows, active_realizations=active_realizations, total_iterations=total_iterations, start_iteration=start_iteration, random_seed=random_seed, minimum_required_realizations=minimum_required_realizations, ) + self._observations = config.observations + self._parameter_configuration = config.ensemble_config.parameter_configuration + self._response_configuration = config.ensemble_config.response_configuration @tracer.start_as_current_span(f"{__name__}.run_experiment") def run_experiment( @@ -109,9 +121,9 @@ def run_experiment( self.run_workflows(HookRuntime.PRE_EXPERIMENT) sim_args = {"weights": self._relative_weights} experiment = self._storage.create_experiment( - parameters=self.ert_config.ensemble_config.parameter_configuration, - observations=self.ert_config.observations, - responses=self.ert_config.ensemble_config.response_configuration, + parameters=self._parameter_configuration, + observations=self._observations, + responses=self._response_configuration, simulation_arguments=sim_args, name=self.experiment_name, ) diff --git a/src/everest/detached/jobs/everserver.py b/src/everest/detached/jobs/everserver.py index 33f5a96c8b8..8050aa549a2 100755 --- a/src/everest/detached/jobs/everserver.py +++ b/src/everest/detached/jobs/everserver.py @@ -305,7 +305,7 @@ def main(): simulation_callback=partial(_sim_monitor, shared_data=shared_data), optimization_callback=partial(_opt_monitor, shared_data=shared_data), ) - if run_model.ert_config.queue_config.queue_system == QueueSystem.LOCAL: + if run_model._queue_config.queue_system == QueueSystem.LOCAL: evaluator_server_config = EvaluatorServerConfig() else: evaluator_server_config = EvaluatorServerConfig( diff --git a/tests/ert/unit_tests/run_models/test_base_run_model.py b/tests/ert/unit_tests/run_models/test_base_run_model.py index e07e7268904..806c1ca07e6 100644 --- a/tests/ert/unit_tests/run_models/test_base_run_model.py +++ b/tests/ert/unit_tests/run_models/test_base_run_model.py @@ -18,16 +18,30 @@ def patch_abstractmethods(monkeypatch): monkeypatch.setattr(BaseRunModel, "__abstractmethods__", set()) -def test_base_run_model_supports_restart(minimum_case): - brm = BaseRunModel(minimum_case, None, None, minimum_case.queue_config, [True]) - assert brm.support_restart - - class MockJob: def __init__(self, status): self.status = status +def test_base_run_model_supports_restart(minimum_case): + brm = BaseRunModel( + storage=minimum_case, + runpath_file=MagicMock(), + user_config_file=MagicMock(), + env_vars=MagicMock(), + env_pr_fm_step=MagicMock(), + model_config=MagicMock(), + queue_config=minimum_case.queue_config, + forward_model_steps=MagicMock(), + status_queue=MagicMock(), + substitutions=MagicMock(), + templates=MagicMock(), + hooked_workflows=MagicMock(), + active_realizations=[True], + ) + assert brm.support_restart + + @pytest.mark.parametrize( "initials", [ @@ -40,7 +54,21 @@ def __init__(self, status): ], ) def test_active_realizations(initials): - brm = BaseRunModel(MagicMock(), None, None, None, initials) + brm = BaseRunModel( + storage=MagicMock(), + runpath_file=MagicMock(), + user_config_file=MagicMock(), + env_vars=MagicMock(), + env_pr_fm_step=MagicMock(), + model_config=MagicMock(), + queue_config=MagicMock(), + forward_model_steps=MagicMock(), + status_queue=MagicMock(), + substitutions=MagicMock(), + templates=MagicMock(), + hooked_workflows=MagicMock(), + active_realizations=initials, + ) brm._initial_realizations_mask = initials assert brm.ensemble_size == len(initials) @@ -59,7 +87,21 @@ def test_active_realizations(initials): ], ) def test_failed_realizations(initials, completed, any_failed, failures): - brm = BaseRunModel(MagicMock(), None, None, None, initials) + brm = BaseRunModel( + storage=MagicMock(), + runpath_file=MagicMock(), + user_config_file=MagicMock(), + env_vars=MagicMock(), + env_pr_fm_step=MagicMock(), + model_config=MagicMock(), + queue_config=MagicMock(), + forward_model_steps=MagicMock(), + status_queue=MagicMock(), + substitutions=MagicMock(), + templates=MagicMock(), + hooked_workflows=MagicMock(), + active_realizations=initials, + ) brm._initial_realizations_mask = initials brm._completed_realizations_mask = completed @@ -89,15 +131,19 @@ def test_check_if_runpath_exists( ): model_config = ModelConfig(runpath_format_string=run_path) subs_list = Substitutions() - config = MagicMock() - config.model_config = model_config - config.substitutions = subs_list - brm = BaseRunModel( - config, - None, - None, - None, + storage=MagicMock(), + runpath_file=MagicMock(), + user_config_file=MagicMock(), + env_vars=MagicMock(), + env_pr_fm_step=MagicMock(), + model_config=model_config, + queue_config=MagicMock(), + forward_model_steps=MagicMock(), + status_queue=MagicMock(), + substitutions=subs_list, + templates=MagicMock(), + hooked_workflows=MagicMock(), active_realizations=active_realizations_mask, start_iteration=start_iteration, total_iterations=number_of_iterations, @@ -121,17 +167,22 @@ def test_get_number_of_existing_runpaths( run_path = "out/realization-%d/iter-%d" model_config = ModelConfig(runpath_format_string=run_path) subs_list = Substitutions() - config = MagicMock() - config.model_config = model_config - config.substitutions = subs_list - brm = BaseRunModel( - config=config, storage=MagicMock(), + runpath_file=MagicMock(), + user_config_file=MagicMock(), + env_vars=MagicMock(), + env_pr_fm_step=MagicMock(), + model_config=model_config, queue_config=MagicMock(), + forward_model_steps=MagicMock(), status_queue=MagicMock(), + substitutions=subs_list, + templates=MagicMock(), + hooked_workflows=MagicMock(), active_realizations=active_realizations_mask, ) + assert brm.get_number_of_existing_runpaths() == expected_number @@ -162,13 +213,23 @@ def test_delete_run_path(run_path_format, active_realizations): os.makedirs(share_path) model_config = ModelConfig(runpath_format_string=run_path_format) subs_list = Substitutions({"": "0", "": "Case_Name"}) - config = MagicMock() - config.model_config = model_config - config.substitutions = subs_list brm = BaseRunModel( - config, MagicMock(), MagicMock(), MagicMock(), active_realizations + storage=MagicMock(), + runpath_file=MagicMock(), + user_config_file=MagicMock(), + env_vars=MagicMock(), + env_pr_fm_step=MagicMock(), + model_config=model_config, + queue_config=MagicMock(), + forward_model_steps=MagicMock(), + status_queue=MagicMock(), + substitutions=subs_list, + templates=MagicMock(), + hooked_workflows=MagicMock(), + active_realizations=active_realizations, ) + brm.rm_run_path() assert not any(path.exists() for path in expected_removed) assert all(path.parent.exists() for path in expected_removed) @@ -182,12 +243,21 @@ def test_num_cpu_is_propagated_from_config_to_ensemble(run_args): # Set up a BaseRunModel object from the config above: brm = BaseRunModel( - config=config, storage=MagicMock(spec=Storage), + runpath_file=MagicMock(), + user_config_file=MagicMock(), + env_vars=MagicMock(), + env_pr_fm_step=MagicMock(), + model_config=MagicMock(), queue_config=config.queue_config, + forward_model_steps=MagicMock(), status_queue=MagicMock(spec=SimpleQueue), + substitutions=config.substitutions, + templates=MagicMock(), + hooked_workflows=MagicMock(), active_realizations=[True], ) + run_args = run_args(config, MagicMock()) # Instead of running the BaseRunModel, we only test its implementation detail which is to @@ -226,13 +296,23 @@ def test_get_current_status( config = ErtConfig.from_file_contents("NUM_REALIZATIONS 3") initial_active_realizations = [True] * 3 new_active_realizations = [True] * 3 + brm = BaseRunModel( - config=config, storage=MagicMock(spec=Storage), + runpath_file=MagicMock(), + user_config_file=MagicMock(), + env_vars=MagicMock(), + env_pr_fm_step=MagicMock(), + model_config=MagicMock(), queue_config=config.queue_config, + forward_model_steps=MagicMock(), status_queue=MagicMock(spec=SimpleQueue), + substitutions=config.substitutions, + templates=MagicMock(), + hooked_workflows=MagicMock(), active_realizations=initial_active_realizations, ) + snapshot_dict_reals = {} for index, realization_status in real_status_dict.items(): snapshot_dict_reals[index] = {"status": realization_status} @@ -305,12 +385,21 @@ def test_get_current_status_when_rerun( """Active realizations gets changed when we choose to rerun, and the result from the previous run should be included in the current_status.""" config = ErtConfig.from_file_contents("NUM_REALIZATIONS 3") brm = BaseRunModel( - config=config, storage=MagicMock(spec=Storage), + runpath_file=MagicMock(), + user_config_file=MagicMock(), + env_vars=MagicMock(), + env_pr_fm_step=MagicMock(), + model_config=MagicMock(), queue_config=config.queue_config, + forward_model_steps=MagicMock(), status_queue=MagicMock(spec=SimpleQueue), + substitutions=config.substitutions, + templates=MagicMock(), + hooked_workflows=MagicMock(), active_realizations=initial_active_realizations, ) + brm.restart = True snapshot_dict_reals = {} for index, realization_status in real_status_dict.items(): @@ -328,13 +417,23 @@ def test_get_current_status_for_new_iteration_when_realization_failed_in_previou # Realization 0,1, and 3 failed in the previous iteration new_active_realizations = [False, False, True, False, True] config = ErtConfig.from_file_contents("NUM_REALIZATIONS 5") + brm = BaseRunModel( - config=config, storage=MagicMock(spec=Storage), + runpath_file=MagicMock(), + user_config_file=MagicMock(), + env_vars=MagicMock(), + env_pr_fm_step=MagicMock(), + model_config=MagicMock(), queue_config=config.queue_config, + forward_model_steps=MagicMock(), status_queue=MagicMock(spec=SimpleQueue), + substitutions=config.substitutions, + templates=MagicMock(), + hooked_workflows=MagicMock(), active_realizations=initial_active_realizations, ) + snapshot_dict_reals = { "2": {"status": "Running"}, "4": {"status": "Finished"}, @@ -371,13 +470,23 @@ def test_get_number_of_active_realizations_varies_when_rerun_or_new_iteration( When running a new iteration based on the result of the previous iteration, we only include the successful realizations.""" initial_active_realizations = [True] * 5 config = ErtConfig.from_file_contents("NUM_REALIZATIONS 5") + brm = BaseRunModel( - config=config, storage=MagicMock(spec=Storage), + runpath_file=MagicMock(), + user_config_file=MagicMock(), + env_vars=MagicMock(), + env_pr_fm_step=MagicMock(), + model_config=MagicMock(), queue_config=config.queue_config, + forward_model_steps=MagicMock(), status_queue=MagicMock(spec=SimpleQueue), + substitutions=config.substitutions, + templates=MagicMock(), + hooked_workflows=MagicMock(), active_realizations=initial_active_realizations, ) + brm.active_realizations = new_active_realizations brm.restart = was_rerun assert brm.get_number_of_active_realizations() == expected_result diff --git a/tests/ert/unit_tests/run_models/test_model_factory.py b/tests/ert/unit_tests/run_models/test_model_factory.py index 49c64a25fe7..ba2f3f403bc 100644 --- a/tests/ert/unit_tests/run_models/test_model_factory.py +++ b/tests/ert/unit_tests/run_models/test_model_factory.py @@ -64,7 +64,7 @@ def test_setup_single_test_run(poly_case, storage): ) assert isinstance(model, SingleTestRun) assert model._storage == storage - assert model.ert_config == poly_case + # assert model.ert_config == poly_case def test_setup_single_test_run_with_ensemble(poly_case, storage): @@ -81,7 +81,7 @@ def test_setup_single_test_run_with_ensemble(poly_case, storage): ) assert isinstance(model, SingleTestRun) assert model._storage == storage - assert model.ert_config == poly_case + # assert model.ert_config == poly_case def test_setup_ensemble_experiment(poly_case, storage): diff --git a/tests/everest/conftest.py b/tests/everest/conftest.py index 11a31c207b3..41ef76c3e75 100644 --- a/tests/everest/conftest.py +++ b/tests/everest/conftest.py @@ -149,7 +149,7 @@ def evaluator_server_config_generator(): def create_evaluator_server_config(run_model): return EvaluatorServerConfig( custom_port_range=range(49152, 51819) - if run_model.ert_config.queue_config.queue_system == QueueSystem.LOCAL + if run_model._queue_config.queue_system == QueueSystem.LOCAL else None ) diff --git a/tests/everest/test_simulator_cache.py b/tests/everest/test_simulator_cache.py index b9533c92cd2..14765b49b14 100644 --- a/tests/everest/test_simulator_cache.py +++ b/tests/everest/test_simulator_cache.py @@ -24,7 +24,7 @@ def new_call(*args): evaluator_server_config = EvaluatorServerConfig( custom_port_range=range(49152, 51819) - if run_model.ert_config.queue_config.queue_system == QueueSystem.LOCAL + if run_model._queue_config.queue_system == QueueSystem.LOCAL else None )