diff --git a/dnastack/cli/workbench/runs_commands.py b/dnastack/cli/workbench/runs_commands.py index 26022cb..21b66c7 100644 --- a/dnastack/cli/workbench/runs_commands.py +++ b/dnastack/cli/workbench/runs_commands.py @@ -7,7 +7,7 @@ from dnastack.cli.workbench.utils import get_ewes_client from dnastack.client.workbench.ewes.models import ExtendedRunListOptions, ExtendedRunRequest, BatchRunRequest, \ - MinimalExtendedRunWithOutputs, MinimalExtendedRunWithInputs, TaskListOptions, State + MinimalExtendedRunWithOutputs, MinimalExtendedRunWithInputs, TaskListOptions, State, ExecutionEngineListOptions from dnastack.client.workbench.ewes.models import LogType from dnastack.cli.helpers.command.decorator import command from dnastack.cli.helpers.command.spec import ArgumentSpec @@ -438,8 +438,9 @@ def write_logs(iterable: Iterable[bytes], writer): name='default_workflow_engine_parameters', arg_names=['--engine-params'], help='Set the global engine parameters for all runs that are to be submitted. ' - 'Engine params can be specified as a KV pair, inlined JSON, or as a json file preceded by the "@"' - 'symbol.', + 'Engine params can be specified as inlined JSON, json file preceded by the "@" symbol, ' + 'KV pair, parameter preset ID, or as a list of KV pairs and preset IDs separated by commas ' + '(e.g. my-preset-id,key=value).', as_option=True, default=None, required=False @@ -478,8 +479,8 @@ def write_logs(iterable: Iterable[bytes], writer): ), ArgumentSpec( name='overrides', - help='Additional arguments to set input values for all runs. The override values can be any JSON-like value' - 'such as inline JSON, command separated key value pairs or' + help='Additional arguments to set input values for all runs. The override values can be any ' + 'JSON-like value such as inline JSON, command separated key value pairs or ' 'a json file referenced preceded by the "@" symbol.', as_option=False, default=None, @@ -506,6 +507,15 @@ def submit_batch(context: Optional[str], ewes_client = get_ewes_client(context_name=context, endpoint_id=endpoint_id, namespace=namespace) + def get_default_engine_id(): + list_options = ExecutionEngineListOptions() + engines = ewes_client.list_engines(list_options) + for engine in engines: + if engine.default: + return engine.id + raise ValueError("No default engine found. Please specify an engine id in " + "the workflow engine parameters list using ENGINE_ID_KEY=....") + if default_workflow_engine_parameters: possible_literal_or_file = default_workflow_engine_parameters.return_parsed_literal_or_file() if possible_literal_or_file: @@ -513,17 +523,20 @@ def submit_batch(context: Optional[str], else: [param_ids_list, kv_pairs_list] = default_workflow_engine_parameters.separate_strings_and_kvps() param_presets = dict() - for param_id in param_ids_list: - try: - param_preset = ewes_client.get_engine_param_preset(engine_id, param_id) - param_presets.update(param_preset.preset_values) - except Exception as e: - raise ValueError(f"Unable to find engine parameter preset with id {param_id}. " - f"You may only specify parameter preset ids and key value pairs in a list " - f"together. To include file content or JSON literals, it must be specified " - f"as a key-value pair. For more information please refer to " - f"https://docs.omics.ai/products/command-line-interface/" - f"working-with-json-data. {e}") + if param_ids_list: + if not engine_id: + engine_id = get_default_engine_id() + for param_id in param_ids_list: + try: + param_preset = ewes_client.get_engine_param_preset(engine_id, param_id) + param_presets.update(param_preset.preset_values) + except Exception as e: + raise ValueError(f"Unable to find engine parameter preset with id {param_id}. " + f"You may only specify parameter preset ids and key value pairs in a list " + f"together. To include file content or JSON literals, it must be specified " + f"as a key-value pair. For more information please refer to " + f"https://docs.omics.ai/products/command-line-interface/" + f"working-with-json-data. {e}") if kv_pairs_list: # if there are key value pairs left try: diff --git a/tests/cli/test_workbench.py b/tests/cli/test_workbench.py index cdd67df..c890f9e 100644 --- a/tests/cli/test_workbench.py +++ b/tests/cli/test_workbench.py @@ -10,7 +10,8 @@ from dnastack.alpha.client.workflow.models import Workflow, WorkflowVersion from dnastack.client.workbench.ewes.models import ExtendedRunStatus, ExtendedRun, BatchActionResult, BatchRunResponse, \ - MinimalExtendedRunWithInputs, MinimalExtendedRun, MinimalExtendedRunWithOutputs, ExecutionEngine, EngineParamPreset + MinimalExtendedRunWithInputs, MinimalExtendedRun, MinimalExtendedRunWithOutputs, ExecutionEngine, EngineParamPreset, \ + BatchRunRequest from .base import WorkbenchCliTestCase @@ -353,6 +354,70 @@ def test_submit_batch_with_multiple_params(): test_submit_batch_with_multiple_params() + def test_submit_batch_with_engine_key_value_param(): + submitted_batch = BatchRunResponse(**self.simple_invoke( + 'workbench', 'runs', 'submit', + '--url', hello_world_workflow_url, + '--engine-params', 'key=value' + )) + self.assertEqual(len(submitted_batch.runs), 1, 'Expected exactly one run to be submitted.') + described_runs = [ExtendedRun(**described_run) for described_run in self.simple_invoke( + 'workbench', 'runs', 'describe', + submitted_batch.runs[0].run_id + )] + self.assertEqual(len(described_runs), 1, f'Expected exactly one run. Found {described_runs}') + self.assertEqual(described_runs[0].request.workflow_engine_parameters, {'key': 'value'}, + f'Expected workflow engine params to be exactly the same. ' + f'Found {described_runs[0].request.workflow_engine_parameters}') + + test_submit_batch_with_engine_key_value_param() + + def test_submit_batch_with_engine_preset_param(): + submitted_batch = BatchRunResponse(**self.simple_invoke( + 'workbench', 'runs', 'submit', + '--url', hello_world_workflow_url, + '--engine-params', self.engine_params.id, + )) + self.assertEqual(len(submitted_batch.runs), 1, 'Expected exactly one run to be submitted.') + described_runs = [ExtendedRun(**described_run) for described_run in self.simple_invoke( + 'workbench', 'runs', 'describe', + submitted_batch.runs[0].run_id + )] + + self.assertEqual(len(described_runs), 1, f'Expected exactly one run. Found {described_runs}') + processed_engine_params = {'engine_id': self.execution_engine.id} + processed_engine_params.update(self.engine_params.preset_values) + self.assertEqual(described_runs[0].request.workflow_engine_parameters, processed_engine_params, + f'Expected workflow engine params to be exactly the same. ' + f'Found {described_runs[0].request.workflow_engine_parameters}') + + test_submit_batch_with_engine_preset_param() + + def test_submit_batch_with_mixed_engine_preset_param_types(): + submitted_batch = BatchRunResponse(**self.simple_invoke( + 'workbench', 'runs', 'submit', + '--url', hello_world_workflow_url, + '--engine-params', f'goodbye=moon,{self.engine_params.id},hello=world', + )) + self.assertEqual(len(submitted_batch.runs), 1, 'Expected exactly one run to be submitted.') + described_runs = [ExtendedRun(**described_run) for described_run in self.simple_invoke( + 'workbench', 'runs', 'describe', + submitted_batch.runs[0].run_id + )] + + self.assertEqual(len(described_runs), 1, f'Expected exactly one run. Found {described_runs}') + processed_engine_params = { + 'engine_id': self.execution_engine.id, + 'goodbye': 'moon', + 'hello': 'world', + **self.engine_params.preset_values + } + self.assertEqual(described_runs[0].request.workflow_engine_parameters, processed_engine_params, + f'Expected workflow engine params to be exactly the same. ' + f'Found {described_runs[0].request.workflow_engine_parameters}') + + test_submit_batch_with_mixed_engine_preset_param_types() + def test_workflows_list(self): result = [Workflow(**workflow) for workflow in self.simple_invoke( 'workbench', 'workflows', 'list' diff --git a/tests/exam_helper_for_workbench.py b/tests/exam_helper_for_workbench.py index 9296714..b3dfc1d 100644 --- a/tests/exam_helper_for_workbench.py +++ b/tests/exam_helper_for_workbench.py @@ -93,7 +93,7 @@ class BaseWorkbenchTestCase(WithTestUserTestCase): ).json()))) namespace: str = None hello_world_workflow: Workflow = None - engine_param = { + engine_params = { "id": "presetId", "name": "presetName", "preset_values": { @@ -215,7 +215,7 @@ def _create_execution_engine(cls, session: HttpSession) -> ExecutionEngine: def _add_execution_engine_parameter(cls, session: HttpSession, engine_id: str) -> EngineParamPreset: response = session.post(urljoin(cls.workbench_base_url, f'/services/ewes-service/{cls.namespace}/engines/{engine_id}/param-presets'), - json=cls.engine_param) + json=cls.engine_params) return EngineParamPreset(**response.json()) @classmethod