From 1555aa74ea5711f8b1593a136201b584565035bc Mon Sep 17 00:00:00 2001 From: jetaba Date: Mon, 10 Jun 2024 13:33:12 -0500 Subject: [PATCH] clean up --- bclaw_runner/tests/test_qc_check.py | 19 +-- cloudformation/bc_core.yaml | 1 + lambda/src/compiler/pkg/batch_resources.py | 2 - lambda/tests/compiler/test_batch_resources.py | 57 ++++----- lambda/tests/compiler/test_qc_resources.py | 87 ------------- lambda/tests/qc_checker/test_qc_checker.py | 117 ------------------ 6 files changed, 32 insertions(+), 251 deletions(-) delete mode 100644 lambda/tests/compiler/test_qc_resources.py delete mode 100644 lambda/tests/qc_checker/test_qc_checker.py diff --git a/bclaw_runner/tests/test_qc_check.py b/bclaw_runner/tests/test_qc_check.py index b4d6674..5d360b1 100644 --- a/bclaw_runner/tests/test_qc_check.py +++ b/bclaw_runner/tests/test_qc_check.py @@ -95,14 +95,15 @@ def test_run_all_qc_checks(fake1_cond, fake2_cond, expect, mock_qc_data_files): assert result == expect -@pytest.mark.parametrize("fake1_cond, fake2_cond, expect_qc_fail", [ - (None, None, False), # no checks - (["a>1"], ["x<99"], False), # all pass - (["a>1", "b==2"], ["y<98"], True), # one fail - (["b==1"], ["x==99", "y==98"], True), # multi fail - (["a==1", "b==2"], ["x==99", "y==98"], True), # all fail +@pytest.mark.parametrize("fake1_cond, fake2_cond, expect", [ + (None, None, []), # no checks + (["a>1"], ["x<99"], []), # all pass + (["a>1", "b==2"], ["y<98"], ["fake1: b==2"]), # one fail + (["b==1"], ["x==99", "y==98"], ["fake2: x==99", "fake2: y==98"]), # multi fail + (["a==1", "b==2"], ["x==99", "y==98"], + ["fake1: a==1", "fake1: b==2", "fake2: x==99", "fake2: y==98"]), # all fail ]) -def test_do_checks(fake1_cond, fake2_cond, expect_qc_fail, mock_qc_data_files, mocker): +def test_do_checks(fake1_cond, fake2_cond, expect, mock_qc_data_files, mocker): mock_abort_execution = mocker.patch("bclaw_runner.src.runner.qc_check.abort_execution") if fake1_cond is None: @@ -119,9 +120,9 @@ def test_do_checks(fake1_cond, fake2_cond, expect_qc_fail, mock_qc_data_files, m }, ] - if expect_qc_fail: - # todo: should probably check the contents of qcf.failures + if expect: with pytest.raises(QCFailure) as qcf: do_checks(spec) + assert qcf.value.failures == expect else: do_checks(spec) diff --git a/cloudformation/bc_core.yaml b/cloudformation/bc_core.yaml index 2873b43..d314e6b 100644 --- a/cloudformation/bc_core.yaml +++ b/cloudformation/bc_core.yaml @@ -546,6 +546,7 @@ Resources: FilterPattern: '{$.function = "gather.*"}' LogGroupName: !Ref GatherLambdaLogGroup + # todo: remove this lambda + associated resources QCCheckerLambda: Type: AWS::Serverless::Function DeletionPolicy: "Retain" diff --git a/lambda/src/compiler/pkg/batch_resources.py b/lambda/src/compiler/pkg/batch_resources.py index f948590..e14e94e 100644 --- a/lambda/src/compiler/pkg/batch_resources.py +++ b/lambda/src/compiler/pkg/batch_resources.py @@ -173,9 +173,7 @@ def job_definition_rc(step: Step, shell_opt: str) -> Generator[Resource, None, str]: logical_name = make_logical_name(f"{step.name}.job.defx") - # todo: test this if isinstance(step.spec["commands"], str): - # todo: split on newlines? command_list = [step.spec["commands"]] else: command_list = step.spec["commands"] diff --git a/lambda/tests/compiler/test_batch_resources.py b/lambda/tests/compiler/test_batch_resources.py index d040036..e87f1bc 100644 --- a/lambda/tests/compiler/test_batch_resources.py +++ b/lambda/tests/compiler/test_batch_resources.py @@ -320,6 +320,27 @@ def helper(): assert resource.spec == expected_rc_spec +def test_job_definition_rc_single_line_command(sample_batch_step, compiler_env): + command_str = textwrap.dedent("""\ + commands: | + echo one > ${outfile1} + echo two > ${outfile2} + echo three > ${outfile3} + """) + command_spec = yaml.safe_load(command_str) + nu_sample_batch_step = sample_batch_step.copy() + nu_sample_batch_step.update(**command_spec) + step = Step("test_step", nu_sample_batch_step, "next_step") + + def helper(): + _ = yield from job_definition_rc(step, "arn:task:role", "sh") + + for resource in helper(): + result_spec = json.loads(resource.spec["Properties"]["spec"]) + command = result_spec["parameters"]["command"] + assert command == '["echo one > ${outfile1}\\necho two > ${outfile2}\\necho three > ${outfile3}\\n"]' + + @pytest.mark.parametrize("spec, expect", [ ({}, "none"), ({"skip_if_output_exists": True}, "output"), @@ -453,42 +474,6 @@ def helper(): assert job_def_spec["containerProperties"]["jobRoleArn"] == expected_job_role_arn -@pytest.mark.skip(reason="new qc_check") -def test_handle_batch_with_qc(sample_batch_step, compiler_env): - step = Step("step_name", sample_batch_step, "next_step_name") - - step.spec["qc_check"] = { - "qc_result_file": "qc_file.json", - "stop_early_if": "test_expression", - "email_subject": "test subject", - "notification": [ - "test_one@case.com", - "test_two@case.com", - ], - } - - def helper(): - states = yield from handle_batch(step, {"wf": "params", "versioned": "true"}, False) - assert len(states) == 2 - assert all(isinstance(s, State) for s in states) - - assert states[0].name == "step_name" - assert states[0].spec["Resource"] == "arn:aws:states:::batch:submitJob.sync" - assert states[0].spec["Parameters"]["JobDefinition"] == "${StepNameJobDefx}" - assert states[0].spec["Next"] == "step_name.qc_checker" - - assert states[1].name == "step_name.qc_checker" - assert states[1].spec["Next"] == "next_step_name" - - resource_gen = helper() - resource_dict = dict(resource_gen) - - expected_keys = ["StepNameJobDefx"] - assert list(resource_dict.keys()) == expected_keys - - assert resource_dict["StepNameJobDefx"]["Type"] == "Custom::BatchJobDefinition" - - def test_handle_batch_auto_inputs(sample_batch_step, compiler_env): step = Step("step_name", sample_batch_step, "next_step") step.spec["inputs"] = None diff --git a/lambda/tests/compiler/test_qc_resources.py b/lambda/tests/compiler/test_qc_resources.py deleted file mode 100644 index 8322874..0000000 --- a/lambda/tests/compiler/test_qc_resources.py +++ /dev/null @@ -1,87 +0,0 @@ -import pytest - -# from ...src.compiler.pkg.qc_resources import qc_checker_step, handle_qc_check -from ...src.compiler.pkg.util import Step, State - -# todo: remove module -pytest.skip(reason="to be removed", allow_module_level=True) - - -@pytest.mark.parametrize("next_step_name, next_or_end", [ - ("next_step", {"Next": "next_step"}), - ("", {"End": True}), -]) -def test_qc_checker_step(next_step_name, next_or_end, compiler_env): - qc_spec = { - "qc_result_file": "qc_file.json", - "stop_early_if": "test_expression" - } - batch_step = Step("test_step", {"qc_check": qc_spec}, next_step_name) - - result = qc_checker_step(batch_step) - expected = { - "Type": "Task", - "Resource": "qc_checker_lambda_arn", - "Parameters": { - "repo.$": "$.repo.uri", - "qc_result_file": "qc_file.json", - "qc_expression": "test_expression", - "execution_id.$": "$$.Execution.Id", - "logging": { - "branch.$": "$.index", - "job_file_bucket.$": "$.job_file.bucket", - "job_file_key.$": "$.job_file.key", - "job_file_version.$": "$.job_file.version", - "sfn_execution_id.$": "$$.Execution.Name", - "step_name": "test_step", - "workflow_name": "${WorkflowName}", - }, - }, - "Retry": [ - { - "ErrorEquals": ["QCFailed"], - "IntervalSeconds": 3600, - "MaxAttempts": 1, - }, - { - "ErrorEquals": ["States.ALL"], - "IntervalSeconds": 3, - "MaxAttempts": 3, - "BackoffRate": 1.5 - } - ], - "ResultPath": None, - "OutputPath": "$", - **next_or_end - } - - assert result == expected - - -def test_handle_qc_check(compiler_env): - batch_spec = { - "qc_check": { - "qc_result_file": "qc_file.json", - "stop_early_if": "test_expression", - # email_subject and notification fields are no longer used, retaining them - # here to test for backward compatibility - "email_subject": "test subject", - "notification": [ - "test1@case.com", - "test2@case.com", - ], - }, - } - - batch_step = Step("step_name", batch_spec, "next_step") - - result = handle_qc_check(batch_step) - - expected_state_name = "step_name.qc_checker" - - assert isinstance(result, State) - assert result.name == expected_state_name - assert result.spec["Resource"] == "qc_checker_lambda_arn" - assert result.spec["Parameters"]["qc_result_file"] == batch_spec["qc_check"]["qc_result_file"] - assert result.spec["Parameters"]["qc_expression"] == batch_spec["qc_check"]["stop_early_if"] - assert result.spec["Next"] == "next_step" diff --git a/lambda/tests/qc_checker/test_qc_checker.py b/lambda/tests/qc_checker/test_qc_checker.py deleted file mode 100644 index 2f66994..0000000 --- a/lambda/tests/qc_checker/test_qc_checker.py +++ /dev/null @@ -1,117 +0,0 @@ -import logging - -import boto3 -import moto -import pytest - -from ...src.qc_checker.qc_checker import lambda_handler, QCFailed - - -# todo: remove module -pytest.skip(reason="to be removed", allow_module_level=True) - -logging.basicConfig(level=logging.INFO) - - -@pytest.fixture(scope="function") -def mock_repo_bucket(): - with moto.mock_aws(): - bucket_name = "repo-bucket" - s3 = boto3.resource("s3", region_name="us-east-1") - bucket = s3.create_bucket(Bucket=bucket_name) - yield bucket - - -@pytest.fixture(scope="function") -def mock_state_machine(): - with moto.mock_aws(): - iam = boto3.resource("iam", region_name="us-east-1") - - role = iam.create_role( - RoleName="fakeRole", - AssumeRolePolicyDocument="{}" - ) - - # with moto.mock_aws(): - sfn = boto3.client("stepfunctions", region_name="us-east-1") - - state_machine = sfn.create_state_machine( - name="fakeStateMachine", - definition="{}", - roleArn=role.arn - ) - - yield state_machine["stateMachineArn"] - - -def test_lambda_handler(caplog, monkeypatch, mock_repo_bucket, mock_state_machine): - monkeypatch.setenv("AWS_DEFAULT_REGION", "us-east-1") - - # start step functions execution, get execution id - sfn = boto3.client("stepfunctions") - sfn_execution = sfn.start_execution( - stateMachineArn=mock_state_machine, - name="kryzyzniak", - input='{"in": "put"}', - ) - - # add qc file to repo bucket - mock_repo_bucket.put_object( - Body=b'{"x": 0}', - Key="repo/qc_file.json" - ) - - event = { - "repo": f"s3://{mock_repo_bucket.name}/repo", - "qc_result_file": "qc_file.json", - "qc_expression": "x >= 1", - "execution_id": sfn_execution["executionArn"], - "logging": { - "job_file_key": "job/file", - "step_name": "step.name", - }, - } - - _ = lambda_handler(event, {}) - - execution_desc = sfn.describe_execution(executionArn=sfn_execution["executionArn"]) - assert execution_desc["status"] == "RUNNING" - assert "passed QC check" in caplog.text - - -def test_lambda_handler_fail(caplog, monkeypatch, mock_repo_bucket, mock_state_machine): - monkeypatch.setenv("AWS_DEFAULT_REGION", "us-east-1") - - # start step functions execution, get execution id - sfn = boto3.client("stepfunctions") - sfn_execution = sfn.start_execution( - stateMachineArn=mock_state_machine, - name="kryzyzniak", - input='{"in": "put"}', - ) - - # add qc file to repo bucket - mock_repo_bucket.put_object( - Body=b'{"x": 3}', - Key="repo/qc_file.json" - ) - - event = { - "repo": f"s3://{mock_repo_bucket.name}/repo", - "qc_result_file": "qc_file.json", - "qc_expression": "x >= 1", - "execution_id": sfn_execution["executionArn"], - "logging": { - "job_file_key": "job/file", - "step_name": "step.name", - }, - } - - with pytest.raises(QCFailed, match=event["qc_expression"]): - lambda_handler(event, {}) - - execution_desc = sfn.describe_execution(executionArn=sfn_execution["executionArn"]) - - # moto quirk: aborted mock stepfunctions executions end up in SUCCEEDED state - assert execution_desc["status"] != "RUNNING" - assert "failed QC check" in caplog.text