diff --git a/README.md b/README.md index 8686d94..12d3a7d 100644 --- a/README.md +++ b/README.md @@ -86,7 +86,7 @@ jobs: with: submodules: true - name: wdl-ci - uses: dnastack/wdl-ci@0.1.4 + uses: dnastack/wdl-ci@v0.1.5 with: wallet-url: ${{ secrets.WALLET_URL }} wallet-client-id: ${{ secrets.WALLET_CLIENT_ID }} @@ -114,7 +114,7 @@ jobs: repository: ${{ github.event.pull_request.head.repo.full_name }} ref: ${{ github.event.pull_request.head.ref }} - name: wdl-ci - uses: dnastack/wdl-ci@0.1.4 + uses: dnastack/wdl-ci@v0.1.5 with: wallet-url: ${{ secrets.WALLET_URL }} wallet-client-id: ${{ secrets.WALLET_CLIENT_ID }} @@ -144,7 +144,7 @@ jobs: repository: ${{ github.event.pull_request.head.repo.full_name }} ref: ${{ github.event.pull_request.head.ref }} - name: wdl-ci - uses: dnastack/wdl-ci@0.1.4 + uses: dnastack/wdl-ci@v0.1.5 with: wallet-url: ${{ secrets.WALLET_URL }} wallet-client-id: ${{ secrets.WALLET_CLIENT_ID }} @@ -337,18 +337,12 @@ Test params can be used to avoid repeating paths and values for test inputs and } ``` - -# Custom workflow linters - -Custom linters may be added to [src/wdlci/linters/custom_linters.py](src/wdlci/linters/custom_linters.py). - - # WDL-based tests Tests are defined in the [src/wdlci/wdl_tests](src/wdlci/wdl_tests) directory. - Tests are written in WDL -- Test files must be named `${test_name}.wdl` +- Test files must be named `.wdl` - In the test WDL file, before the task declaration, add a short description of what the purpose of this test is and what input type is expected, e.g.: ```wdl @@ -358,17 +352,21 @@ version 1.0 # Input type: String task compare_string { + input { + String current_run_output + String validated_output + } ... ``` -- Each test file should contain a single test task, named `${test_name}` +- Each test file should contain a single test task, named `` - Tests must specify two inputs: - - `${input_type} current_run_output` - - `${input_type} validated_output` + - ` current_run_output` + - ` validated_output` - The `current_run_output` and `validated_output` will be passed to the test task automatically; it is up to the test writer to implement the desired comparison -Tests can be selected and applied to input sets by including the `${test_name}` as part of the `workflows.${workflow}.tasks.${task}.tests.[],test_tasks` array. For example, to run the `compare` test, which compares various output types, the `test_tasks` section should be set to `["compare"]`. Additional test tasks may be added for the same input set by adding test names to the `test_tasks` array for that set of inputs. +Tests can be selected and applied to task outputs by including the `` as part of the `test_tasks` array in [the output tests block](#output_tests). For example, to run the `compare_string` test, the `test_tasks` section for the output to be tested should be set to `["compare_string"]`. ## Array comparison diff --git a/action.yml b/action.yml index 59ac360..f630556 100644 --- a/action.yml +++ b/action.yml @@ -50,19 +50,19 @@ runs: echo "WDL_CI_CUSTOM_TEST_WDL_DIR"=${{ inputs.wdl_ci_custom_test_wdl_dir }} >> $GITHUB_ENV fi - name: lint - uses: docker://dnastack/wdl-ci:latest + uses: docker://dnastack/wdl-ci:v0.1.5 with: args: lint - name: detect-changes - uses: docker://dnastack/wdl-ci:latest + uses: docker://dnastack/wdl-ci:v0.1.5 with: args: detect-changes - name: submit - uses: docker://dnastack/wdl-ci:latest + uses: docker://dnastack/wdl-ci:v0.1.5 with: args: submit - name: monitor - uses: docker://dnastack/wdl-ci:latest + uses: docker://dnastack/wdl-ci:v0.1.5 with: args: monitor --update-digests # If a test fails, still update task digests for any tests that succeeded @@ -76,6 +76,6 @@ runs: default_author: github_actions - name: cleanup if: always() - uses: docker://dnastack/wdl-ci:latest + uses: docker://dnastack/wdl-ci:v0.1.5 with: args: cleanup diff --git a/pyproject.toml b/pyproject.toml index ae8d9c9..f952aed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "wdl-testing-cli" description = "DNAstack WDL testing CLI" -version = "0.1.4" +version = "v0.1.5" authors = [ { name = "DNAstack", email = "devs@dnastack.com" } ] diff --git a/src/wdlci/cli/monitor.py b/src/wdlci/cli/monitor.py index 27d955f..17e5da3 100644 --- a/src/wdlci/cli/monitor.py +++ b/src/wdlci/cli/monitor.py @@ -118,6 +118,7 @@ def monitor_handler(kwargs): f"run with id '{workflow_run.wes_run_id}' failed execution with a state of {workflow_run.wes_state}" ) task_status[workflow_path][task]["succeeded"] = False + task_status[workflow_path][task]["workflow_run"] = workflow_run elif ( workflow_run.status == SubmissionStateWorkflowRun.STATUS_FINISH_SUCCESS and workflow_run.validation_status @@ -128,12 +129,14 @@ def monitor_handler(kwargs): f"run with id '{workflow_run.wes_run_id}' failed output validation. message: {workflow_run.validation_message}" ) task_status[workflow_path][task]["succeeded"] = False + task_status[workflow_path][task]["workflow_run"] = workflow_run else: fail_n += 1 print( f"run with id '{workflow_run.wes_run_id}' failed with an unspecified error" ) task_status[workflow_path][task]["succeeded"] = False + task_status[workflow_path][task]["workflow_run"] = workflow_run submission_state_encoded = jsonpickle.encode(submission_state) open(SUBMISSION_JSON, "w").write(submission_state_encoded) @@ -160,6 +163,9 @@ def monitor_handler(kwargs): print( f"At least one test failed for [{workflow_path} - {task_name}]." ) + ewes_client.get_failed_task_logs( + tasks[task_name]["workflow_run"] + ) if config_updated: config.write() diff --git a/src/wdlci/workbench/ewes_client.py b/src/wdlci/workbench/ewes_client.py index 1d4ca1f..0bdfe0d 100644 --- a/src/wdlci/workbench/ewes_client.py +++ b/src/wdlci/workbench/ewes_client.py @@ -29,7 +29,9 @@ def submit_workflow_run(self, workflow_run): headers = {"Authorization": "Bearer " + self.ewes_auth.access_token} - output_test_task_params = {k: workflow_run._outputs[k]['value'] for k in workflow_run._outputs.keys()} + output_test_task_params = { + k: workflow_run._outputs[k]["value"] for k in workflow_run._outputs.keys() + } form_data = { "workflow_url": f"{env.workbench_workflow_service_url}/{env.workbench_namespace}/workflows/{workflow_run._workflow_id}/versions/v1_0_0/descriptor", @@ -71,5 +73,37 @@ def poll_workflow_run_status_and_update(self, workflow_run): elif wes_state in set(["COMPLETE"]): workflow_run.finish_success() + def _print_stderr(self, stderr_url, headers, task_name): + terminal_format_bold = "\033[1m" + terminal_format_end = "\033[0m" + print( + f"{terminal_format_bold}═════ stderr ═════════════════════════════════════════ [{task_name}]{terminal_format_end}" + ) + print() + response = requests.get(stderr_url, headers=headers) + print(response.content.decode("ascii")) + print( + f"{terminal_format_bold}═══════════════════════════════════════════════════════{terminal_format_end}" + ) + print() + + def get_failed_task_logs(self, workflow_run): + env = Config.instance().env + base_url, namespace = env.workbench_ewes_url, env.workbench_namespace + url = ( + f"{base_url}/{namespace}/ga4gh/wes/v1/runs/{workflow_run.wes_run_id}/tasks" + ) + headers = {"Authorization": "Bearer " + self.ewes_auth.access_token} + + response = requests.get(url, headers=headers) + if response.status_code == 200: + for task in response.json()["tasks"]: + if task["state"] == "EXECUTOR_ERROR": + task_name = task["pretty_name"].split(".")[1] + print( + f"EXECUTOR_ERROR for [{workflow_run._workflow_key} - {task_name}]." + ) + self._print_stderr(task["stderr"], headers, task_name) + def __get_url(self): return Config.instance().workbench_ewes_url