From c8a7d27f020cfe94a09043194e09396ac1ca0195 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:43:57 +0100 Subject: [PATCH] Only output failing models and violated rules per default in HumanReadableFormatter (#77) Fixes #71 It changes the default behavior of the HumanReadableFormatter: When running `dbt-score lint` it will now only show failing models and/or rules which were violated. To have all output printed (exactly the way it was before this PR) the user has now to pass the `--show_all` flag to `dbt-score lint`. It also considers the `fail_any_model_under` option. Furthermore, I had to adjust some of tests since the default behavior of the HumanReadableFormatter is now changed - let me know if I would need to expand this. The docs also reflect this change by slightly tweaking the first example in `index.md` In this current form, both rule violations and failing models are filtered through the same argument. This is based on @matthieucan. 's comment on https://github.com/PicnicSupermarket/dbt-score/issues/71#issuecomment-2383106560 --------- Co-authored-by: Jochem van Dooren Co-authored-by: Matthieu Caneill --- CHANGELOG.md | 2 + docs/index.md | 5 +- src/dbt_score/cli.py | 17 ++++- src/dbt_score/config.py | 2 + .../formatters/human_readable_formatter.py | 52 ++++++++------ .../test_human_readable_formatter.py | 68 +++++++++++++++++-- tests/test_cli.py | 3 +- 7 files changed, 120 insertions(+), 29 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c04cf73..4acb659 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,8 @@ and this project adheres to ## [Unreleased] - Documenting support for python 3.13. (#86) +- Only show failing rules per default in `HumanReadableFormatter`. Also added + `--show` parameter in the CLI to change this behavior. (#77) ## [0.8.0] - 2024-11-12 diff --git a/docs/index.md b/docs/index.md index c100708..1b45586 100644 --- a/docs/index.md +++ b/docs/index.md @@ -12,7 +12,7 @@ encourage) good practices. ## Example ``` -> dbt-score lint +> dbt-score lint --show all 🥇 M: customers (score: 10.0) OK dbt_score.rules.generic.has_description OK dbt_score.rules.generic.has_owner @@ -22,7 +22,8 @@ Score: 10.0 🥇 In this example, the model `customers` scores the maximum value of `10.0` as it passes all the rules. It also is awarded a golden medal because of the perfect -score. +score. By default a passing model or source with or without rule violations will +not be shown, unless we pass the `--show-all` flag. ## Philosophy diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 9585d95..2e7e6b9 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -94,8 +94,20 @@ def cli() -> None: is_flag=False, default=None, ) +@click.option( + "--show", + help="Type of output which should be shown " + "when using `plain` as `--format`. " + "`all` shows all items and all rules. " + "`failing-items` shows failing rules of failing items. " + "`failing-rules` shows failing rules of all items. " + "Default is --failing-rules.", + type=click.Choice(["all", "failing-items", "failing-rules"]), + is_flag=False, + default="failing-rules", +) @click.pass_context -def lint( +def lint( # noqa: PLR0913, C901 ctx: click.Context, format: Literal["plain", "manifest", "ascii"], select: tuple[str], @@ -105,6 +117,7 @@ def lint( run_dbt_parse: bool, fail_project_under: float, fail_any_item_under: float, + show: Literal["all", "failing-items", "failing-rules"], ) -> None: """Lint dbt metadata.""" manifest_provided = ( @@ -124,6 +137,8 @@ def lint( config.overload({"fail_project_under": fail_project_under}) if fail_any_item_under: config.overload({"fail_any_item_under": fail_any_item_under}) + if show: + config.overload({"show": show}) try: if run_dbt_parse: diff --git a/src/dbt_score/config.py b/src/dbt_score/config.py index a3e0b2a..0d7aa99 100644 --- a/src/dbt_score/config.py +++ b/src/dbt_score/config.py @@ -57,6 +57,7 @@ class Config: "inject_cwd_in_python_path", "fail_project_under", "fail_any_item_under", + "show", ] _rules_section: Final[str] = "rules" _badges_section: Final[str] = "badges" @@ -71,6 +72,7 @@ def __init__(self) -> None: self.badge_config: BadgeConfig = BadgeConfig() self.fail_project_under: float = 5.0 self.fail_any_item_under: float = 5.0 + self.show: str = "failing-rules" def set_option(self, option: str, value: Any) -> None: """Set an option in the config.""" diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index ed2db7a..86978fa 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -42,28 +42,40 @@ def evaluable_evaluated( self, evaluable: Evaluable, results: EvaluableResultsType, score: Score ) -> None: """Callback when an evaluable item has been evaluated.""" - if score.value < self._config.fail_any_item_under: + if evaluable_failed := score.value < self._config.fail_any_item_under: self._failed_evaluables.append((evaluable, score)) + if ( + evaluable_failed + or self._config.show == "all" + or ( + self._config.show not in ["failing-items"] + and any(result is not None for result in results.values()) + ) + ): + resource_type = type(evaluable).__name__ + name_formatted = f"{resource_type[0]}: {self.pretty_name(evaluable)}" + header = ( + f"{score.badge} " + f"{self.bold(name_formatted)} (score: {score.rounded_value!s})" + ) - resource_type = type(evaluable).__name__ - name_formatted = f"{resource_type[0]}: {self.pretty_name(evaluable)}" - header = ( - f"{score.badge} " - f"{self.bold(name_formatted)} (score: {score.rounded_value!s})" - ) - - print(header) - for rule, result in results.items(): - if result is None: - print(f"{self.indent}{self.label_ok} {rule.source()}") - elif isinstance(result, RuleViolation): - print( - f"{self.indent}{self.label_warning} " - f"({rule.severity.name.lower()}) {rule.source()}: {result.message}" - ) - else: - print(f"{self.indent}{self.label_error} {rule.source()}: {result!s}") - print() + print(header) + for rule, result in results.items(): + if result is None: + if self._config.show in ["all"]: + print(f"{self.indent}{self.label_ok} {rule.source()}") + elif isinstance(result, RuleViolation): + print( + f"{self.indent}{self.label_warning} " + f"({rule.severity.name.lower()}) {rule.source()}: " + f"{result.message}" + ) + else: + print( + f"{self.indent}{self.label_error} {rule.source()}: " + f"{result!s}" + ) + print() def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 0f1f90e..7e14723 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -1,13 +1,14 @@ """Unit tests for the human readable formatter.""" from textwrap import dedent +import pytest from dbt_score.evaluation import EvaluableResultsType from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter from dbt_score.rule import RuleViolation from dbt_score.scoring import Score -def test_human_readable_formatter_model( +def test_human_readable_formatter_model_with_defaults( capsys, default_config, manifest_loader, @@ -29,7 +30,6 @@ def test_human_readable_formatter_model( stdout = capsys.readouterr().out expected = """\ 🥇 \x1B[1mM: model1\x1B[0m (score: 10.0) - \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error @@ -37,6 +37,60 @@ def test_human_readable_formatter_model( assert stdout == dedent(expected) +@pytest.mark.parametrize( + "show,expected", + [ + ( + "all", + """\ + 🥇 \x1B[1mM: model1\x1B[0m (score: 10.0) + \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low + \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + + """, + ), + ( + "failing-rules", + """\ + 🥇 \x1B[1mM: model1\x1B[0m (score: 10.0) + \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + + """, + ), + ( + "failing-items", + "", + ), + ], +) +def test_human_readable_formatter_model_show_parameter( + capsys, + default_config, + manifest_loader, + model1, + rule_severity_low, + rule_severity_medium, + rule_severity_critical, + show, + expected, +): + """Ensure the formatter has the correct output after model evaluation.""" + default_config.overload({"show": show}) + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) + results: EvaluableResultsType = { + rule_severity_low: None, + rule_severity_medium: Exception("Oh noes"), + rule_severity_critical: RuleViolation("Error"), + } + formatter.evaluable_evaluated(model1, results, Score(10.0, "🥇")) + stdout = capsys.readouterr().out + assert stdout == dedent(expected) + + def test_human_readable_formatter_project(capsys, default_config, manifest_loader): """Ensure the formatter has the correct output after project evaluation.""" formatter = HumanReadableFormatter( @@ -57,6 +111,7 @@ def test_human_readable_formatter_near_perfect_model_score( rule_severity_critical, ): """Ensure the formatter has the correct output after model evaluation.""" + default_config.overload({"show": "all"}) formatter = HumanReadableFormatter( manifest_loader=manifest_loader, config=default_config ) @@ -126,14 +181,19 @@ def test_human_readable_formatter_low_evaluable_score( assert stdout == dedent(expected) -def test_human_readable_formatter_low_project_score( +def test_human_readable_formatter_low_project_score_high_model_score( capsys, default_config, manifest_loader, model1, rule_severity_critical, ): - """Ensure the formatter has the correct output when the projet has a low score.""" + """Ensure the formatter has the correct output when the projet has a low score. + + If model itself has a high project score then we need to pass `show_all` flag + to make it visible. + """ + default_config.overload({"show_all": True}) formatter = HumanReadableFormatter( manifest_loader=manifest_loader, config=default_config ) diff --git a/tests/test_cli.py b/tests/test_cli.py index 91d2bae..7597ed0 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -21,7 +21,7 @@ def test_lint_existing_manifest(manifest_path): """Test lint with an existing manifest.""" with patch("dbt_score.cli.Config._load_toml_file"): runner = CliRunner() - result = runner.invoke(lint, ["--manifest", manifest_path]) + result = runner.invoke(lint, ["--manifest", manifest_path, "--show", "all"]) assert "model1" in result.output assert "model2" in result.output @@ -112,7 +112,6 @@ def test_fail_any_model_under(manifest_path): result = runner.invoke( lint, ["--manifest", manifest_path, "--fail-any-item-under", "10.0"] ) - assert "model1" in result.output assert "model2" in result.output assert "Error: evaluable score too low, fail_any_item_under" in result.stdout