From ce39962f8ad09048b5fab41e7cefc9ecd54b3150 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Sun, 29 Sep 2024 15:14:03 +0000 Subject: [PATCH 01/18] only_show_failing argument --- src/dbt_score/cli.py | 10 ++++++++++ src/dbt_score/config.py | 1 + src/dbt_score/evaluation.py | 10 +++++++--- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 2c26df6..2ce5797 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -93,6 +93,13 @@ def cli() -> None: is_flag=False, default=None, ) +@click.option( + "--only_show_failing", + help="If set to True, only show failing models", + type=bool, + is_flag=True, + default=False, +) @click.pass_context def lint( ctx: click.Context, @@ -104,6 +111,7 @@ def lint( run_dbt_parse: bool, fail_project_under: float, fail_any_model_under: float, + only_show_failing: bool, ) -> None: """Lint dbt models metadata.""" manifest_provided = ( @@ -123,6 +131,8 @@ def lint( config.overload({"fail_project_under": fail_project_under}) if fail_any_model_under: config.overload({"fail_any_model_under": fail_any_model_under}) + if only_show_failing: + config.overload({"only_show_failing": only_show_failing}) try: if run_dbt_parse: diff --git a/src/dbt_score/config.py b/src/dbt_score/config.py index 4a4ddf5..e6172e2 100644 --- a/src/dbt_score/config.py +++ b/src/dbt_score/config.py @@ -71,6 +71,7 @@ def __init__(self) -> None: self.badge_config: BadgeConfig = BadgeConfig() self.fail_project_under: float = 5.0 self.fail_any_model_under: float = 5.0 + self.only_show_failing: bool = False def set_option(self, option: str, value: Any) -> None: """Set an option in the config.""" diff --git a/src/dbt_score/evaluation.py b/src/dbt_score/evaluation.py index c583d06..0a9d7ff 100644 --- a/src/dbt_score/evaluation.py +++ b/src/dbt_score/evaluation.py @@ -64,9 +64,13 @@ def evaluate(self) -> None: self.results[model][rule.__class__] = e self.scores[model] = self._scorer.score_model(self.results[model]) - self._formatter.model_evaluated( - model, self.results[model], self.scores[model] - ) + if ( + self.scores[model].value < self._scorer._config.fail_any_model_under + or not self._scorer._config.only_show_failing + ): + self._formatter.model_evaluated( + model, self.results[model], self.scores[model] + ) # Compute score for project self.project_score = self._scorer.score_aggregate_models( From 16f80794f6b689f5daf9d2cc0b3befb33802b70f Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Tue, 1 Oct 2024 19:23:48 +0000 Subject: [PATCH 02/18] added flag and changed default behavior to only show rules/models which fail --- src/dbt_score/cli.py | 15 ++--- src/dbt_score/config.py | 3 +- src/dbt_score/evaluation.py | 10 +--- .../formatters/human_readable_formatter.py | 36 +++++++---- .../test_human_readable_formatter.py | 59 ++++++++++++++----- tests/test_cli.py | 4 +- 6 files changed, 82 insertions(+), 45 deletions(-) diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 2ce5797..eb9252e 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -94,14 +94,16 @@ def cli() -> None: default=None, ) @click.option( - "--only_show_failing", - help="If set to True, only show failing models", + "--show_all", + help="If set to True,show all models and all rules in output" + "when using `plain` as `--format`." + "Default behavior is to only show failing models and violated rules", type=bool, is_flag=True, default=False, ) @click.pass_context -def lint( +def lint( # noqa: PLR0913, C901 ctx: click.Context, format: Literal["plain", "manifest", "ascii"], select: tuple[str], @@ -111,7 +113,7 @@ def lint( run_dbt_parse: bool, fail_project_under: float, fail_any_model_under: float, - only_show_failing: bool, + show_all: bool, ) -> None: """Lint dbt models metadata.""" manifest_provided = ( @@ -131,9 +133,8 @@ def lint( config.overload({"fail_project_under": fail_project_under}) if fail_any_model_under: config.overload({"fail_any_model_under": fail_any_model_under}) - if only_show_failing: - config.overload({"only_show_failing": only_show_failing}) - + if show_all: + config.overload({"show_all": show_all}) try: if run_dbt_parse: dbt_parse() diff --git a/src/dbt_score/config.py b/src/dbt_score/config.py index e6172e2..66335de 100644 --- a/src/dbt_score/config.py +++ b/src/dbt_score/config.py @@ -57,6 +57,7 @@ class Config: "inject_cwd_in_python_path", "fail_project_under", "fail_any_model_under", + "show_all", ] _rules_section: Final[str] = "rules" _badges_section: Final[str] = "badges" @@ -71,7 +72,7 @@ def __init__(self) -> None: self.badge_config: BadgeConfig = BadgeConfig() self.fail_project_under: float = 5.0 self.fail_any_model_under: float = 5.0 - self.only_show_failing: bool = False + self.show_all: bool = False def set_option(self, option: str, value: Any) -> None: """Set an option in the config.""" diff --git a/src/dbt_score/evaluation.py b/src/dbt_score/evaluation.py index 0a9d7ff..c583d06 100644 --- a/src/dbt_score/evaluation.py +++ b/src/dbt_score/evaluation.py @@ -64,13 +64,9 @@ def evaluate(self) -> None: self.results[model][rule.__class__] = e self.scores[model] = self._scorer.score_model(self.results[model]) - if ( - self.scores[model].value < self._scorer._config.fail_any_model_under - or not self._scorer._config.only_show_failing - ): - self._formatter.model_evaluated( - model, self.results[model], self.scores[model] - ) + self._formatter.model_evaluated( + model, self.results[model], self.scores[model] + ) # Compute score for project self.project_score = self._scorer.score_aggregate_models( diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index ba49a53..b1318b8 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -33,18 +33,30 @@ def model_evaluated( """Callback when a model has been evaluated.""" if score.value < self._config.fail_any_model_under: self._failed_models.append((model, score)) - print(f"{score.badge} {self.bold(model.name)} (score: {score.rounded_value!s})") - for rule, result in results.items(): - if result is None: - print(f"{self.indent}{self.label_ok} {rule.source()}") - elif isinstance(result, RuleViolation): - print( - f"{self.indent}{self.label_warning} " - f"({rule.severity.name.lower()}) {rule.source()}: {result.message}" - ) - else: - print(f"{self.indent}{self.label_error} {rule.source()}: {result!s}") - print() + if ( + score.value < self._config.fail_any_model_under + or score.value < self._config.fail_project_under + or self._config.show_all + ): + print( + f"{score.badge} {self.bold(model.name)} " + f"(score: {score.rounded_value!s})" + ) + for rule, result in results.items(): + if result is None: + if self._config.show_all: + print(f"{self.indent}{self.label_ok} {rule.source()}") + elif isinstance(result, RuleViolation): + print( + f"{self.indent}{self.label_warning} " + f"({rule.severity.name.lower()}) " + f"{rule.source()}: {result.message}" + ) + else: + print( + f"{self.indent}{self.label_error} {rule.source()}: {result!s}" + ) + print() def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 6a3438a..7649553 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -26,12 +26,36 @@ def test_human_readable_formatter_model( } formatter.model_evaluated(model1, results, Score(10.0, "🥇")) stdout = capsys.readouterr().out + assert stdout == "" + + +def test_human_readable_formatter_model_show_all( + capsys, + default_config, + manifest_loader, + model1, + rule_severity_low, + rule_severity_medium, + rule_severity_critical, +): + """Ensure the formatter has the correct output after model evaluation.""" + default_config.overload({"show_all": True}) + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) + results: ModelResultsType = { + rule_severity_low: None, + rule_severity_medium: Exception("Oh noes"), + rule_severity_critical: RuleViolation("Error"), + } + formatter.model_evaluated(model1, results, Score(10.0, "🥇")) + stdout = capsys.readouterr().out assert ( stdout - == """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0) - \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low - \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes - \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + == """🥇 \x1b[1mmodel1\x1b[0m (score: 10.0) + \x1b[1;32mOK \x1b[0m tests.conftest.rule_severity_low + \x1b[1;31mERR \x1b[0m tests.conftest.rule_severity_medium: Oh noes + \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error """ ) @@ -44,7 +68,7 @@ def test_human_readable_formatter_project(capsys, default_config, manifest_loade ) formatter.project_evaluated(Score(10.0, "🥇")) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n" + assert stdout == "Project score: \x1b[1m10.0\x1b[0m 🥇\n" def test_human_readable_formatter_near_perfect_model_score( @@ -57,6 +81,7 @@ def test_human_readable_formatter_near_perfect_model_score( rule_severity_critical, ): """Ensure the formatter has the correct output after model evaluation.""" + default_config.overload({"show_all": True}) formatter = HumanReadableFormatter( manifest_loader=manifest_loader, config=default_config ) @@ -69,10 +94,10 @@ def test_human_readable_formatter_near_perfect_model_score( stdout = capsys.readouterr().out assert ( stdout - == """🥈 \x1B[1mmodel1\x1B[0m (score: 9.9) - \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low - \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes - \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + == """🥈 \x1b[1mmodel1\x1b[0m (score: 9.9) + \x1b[1;32mOK \x1b[0m tests.conftest.rule_severity_low + \x1b[1;31mERR \x1b[0m tests.conftest.rule_severity_medium: Oh noes + \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error """ ) @@ -87,7 +112,7 @@ def test_human_readable_formatter_near_perfect_project_score( ) formatter.project_evaluated(Score(9.99, "🥈")) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1B[1m9.9\x1B[0m 🥈\n" + assert stdout == "Project score: \x1b[1m9.9\x1b[0m 🥈\n" def test_human_readable_formatter_low_model_score( @@ -107,13 +132,14 @@ def test_human_readable_formatter_low_model_score( formatter.model_evaluated(model1, results, Score(0.0, "🚧")) formatter.project_evaluated(Score(0.0, "🚧")) stdout = capsys.readouterr().out + print(stdout) print() assert ( stdout - == """🚧 \x1B[1mmodel1\x1B[0m (score: 0.0) - \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + == """🚧 \x1b[1mmodel1\x1b[0m (score: 0.0) + \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error -Project score: \x1B[1m0.0\x1B[0m 🚧 +Project score: \x1b[1m0.0\x1b[0m 🚧 Error: model score too low, fail_any_model_under = 5.0 Model model1 scored 0.0 @@ -129,6 +155,7 @@ def test_human_readable_formatter_low_project_score( rule_severity_critical, ): """Ensure the formatter has the correct output when the projet has a low score.""" + default_config.overload({"show_all": True}) formatter = HumanReadableFormatter( manifest_loader=manifest_loader, config=default_config ) @@ -141,10 +168,10 @@ def test_human_readable_formatter_low_project_score( print() assert ( stdout - == """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0) - \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + == """🥇 \x1b[1mmodel1\x1b[0m (score: 10.0) + \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error -Project score: \x1B[1m0.0\x1B[0m 🚧 +Project score: \x1b[1m0.0\x1b[0m 🚧 Error: project score too low, fail_project_under = 5.0 """ diff --git a/tests/test_cli.py b/tests/test_cli.py index 29a75ce..ecc4561 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -21,7 +21,7 @@ def test_lint_existing_manifest(manifest_path): """Test lint with an existing manifest.""" with patch("dbt_score.cli.Config._load_toml_file"): runner = CliRunner() - result = runner.invoke(lint, ["--manifest", manifest_path]) + result = runner.invoke(lint, ["--manifest", manifest_path, "--show_all"]) assert "model1" in result.output assert "model2" in result.output @@ -93,7 +93,7 @@ def test_fail_any_model_under(manifest_path): result = runner.invoke( lint, ["--manifest", manifest_path, "--fail_any_model_under", "10.0"] ) - + print(result.output) assert "model1" in result.output assert "model2" in result.output assert "Error: model score too low, fail_any_model_under" in result.stdout From 321a8463422f39ccbe7daf3415c54a282f4ce3f8 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Wed, 2 Oct 2024 05:55:31 +0000 Subject: [PATCH 03/18] changed HR formater to print Violations of all failing rules --- .../formatters/human_readable_formatter.py | 2 +- .../formatters/test_human_readable_formatter.py | 17 ++++++++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index b1318b8..8cd10d1 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -35,7 +35,7 @@ def model_evaluated( self._failed_models.append((model, score)) if ( score.value < self._config.fail_any_model_under - or score.value < self._config.fail_project_under + or any(isinstance(result, RuleViolation) for result in results.values()) or self._config.show_all ): print( diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 7649553..43cb728 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -26,7 +26,14 @@ def test_human_readable_formatter_model( } formatter.model_evaluated(model1, results, Score(10.0, "🥇")) stdout = capsys.readouterr().out - assert stdout == "" + assert ( + stdout + == """🥇 \x1b[1mmodel1\x1b[0m (score: 10.0) + \x1b[1;31mERR \x1b[0m tests.conftest.rule_severity_medium: Oh noes + \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error + +""" + ) def test_human_readable_formatter_model_show_all( @@ -147,14 +154,18 @@ def test_human_readable_formatter_low_model_score( ) -def test_human_readable_formatter_low_project_score( +def test_human_readable_formatter_low_project_score_high_model_score( capsys, default_config, manifest_loader, model1, rule_severity_critical, ): - """Ensure the formatter has the correct output when the projet has a low score.""" + """Ensure the formatter has the correct output when the projet has a low score. + + If model itself has a high project score then we need to pass `show_all` flag + to make it visible. + """ default_config.overload({"show_all": True}) formatter = HumanReadableFormatter( manifest_loader=manifest_loader, config=default_config From 1cd404d040d1bf05ba0fc301f643dcf8e0b40921 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Wed, 2 Oct 2024 05:59:36 +0000 Subject: [PATCH 04/18] removed redudant print statement --- tests/test_cli.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_cli.py b/tests/test_cli.py index ecc4561..b1b8bd7 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -93,7 +93,6 @@ def test_fail_any_model_under(manifest_path): result = runner.invoke( lint, ["--manifest", manifest_path, "--fail_any_model_under", "10.0"] ) - print(result.output) assert "model1" in result.output assert "model2" in result.output assert "Error: model score too low, fail_any_model_under" in result.stdout From 7fd5b7466651f9d5d33086e8c51eee3b7ff972c2 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Wed, 2 Oct 2024 06:06:46 +0000 Subject: [PATCH 05/18] changed doc example to showcase new default behavior --- docs/index.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/index.md b/docs/index.md index 0793a49..b6318dd 100644 --- a/docs/index.md +++ b/docs/index.md @@ -11,7 +11,7 @@ encourage) good practices. ## Example ``` -> dbt-score lint +> dbt-score lint --show_all 🥇 customers (score: 10.0) OK dbt_score.rules.generic.has_description OK dbt_score.rules.generic.has_owner: Model lacks an owner. @@ -21,7 +21,8 @@ Score: 10.0 🥇 In this example, the model `customers` scores the maximum value of `10.0` as it passes all the rules. It also is awarded a golden medal because of the perfect -score. +score. By default a passing model without rule violations will only be shown if +we pass the `---show_all` flag. ## Philosophy From 94a1c69706a10be96c74503d7a51872d567e357e Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Fri, 4 Oct 2024 10:19:26 +0000 Subject: [PATCH 06/18] renamed CLI argument from show_all to show-all --- docs/index.md | 2 +- src/dbt_score/cli.py | 2 +- tests/test_cli.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/index.md b/docs/index.md index b6318dd..94dd0ce 100644 --- a/docs/index.md +++ b/docs/index.md @@ -11,7 +11,7 @@ encourage) good practices. ## Example ``` -> dbt-score lint --show_all +> dbt-score lint --show-all 🥇 customers (score: 10.0) OK dbt_score.rules.generic.has_description OK dbt_score.rules.generic.has_owner: Model lacks an owner. diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index eb9252e..df46ea6 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -94,7 +94,7 @@ def cli() -> None: default=None, ) @click.option( - "--show_all", + "--show-all", help="If set to True,show all models and all rules in output" "when using `plain` as `--format`." "Default behavior is to only show failing models and violated rules", diff --git a/tests/test_cli.py b/tests/test_cli.py index b1b8bd7..8568359 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -21,7 +21,7 @@ def test_lint_existing_manifest(manifest_path): """Test lint with an existing manifest.""" with patch("dbt_score.cli.Config._load_toml_file"): runner = CliRunner() - result = runner.invoke(lint, ["--manifest", manifest_path, "--show_all"]) + result = runner.invoke(lint, ["--manifest", manifest_path, "--show-all"]) assert "model1" in result.output assert "model2" in result.output From b752a416e1e026392333abbccf3df0ba5e5e1b07 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Mon, 7 Oct 2024 21:04:04 +0200 Subject: [PATCH 07/18] Update docs/index.md Co-authored-by: Jochem van Dooren --- docs/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/index.md b/docs/index.md index 94dd0ce..a7cf9cf 100644 --- a/docs/index.md +++ b/docs/index.md @@ -21,8 +21,8 @@ Score: 10.0 🥇 In this example, the model `customers` scores the maximum value of `10.0` as it passes all the rules. It also is awarded a golden medal because of the perfect -score. By default a passing model without rule violations will only be shown if -we pass the `---show_all` flag. +score. By default a passing model with or without rule violations will not be shown, +unless we pass the `--show-all` flag. ## Philosophy From 48abfc1729ab5347c751cc63e6233248a5cbc427 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Mon, 7 Oct 2024 21:04:44 +0200 Subject: [PATCH 08/18] Update src/dbt_score/cli.py Co-authored-by: Jochem van Dooren --- src/dbt_score/cli.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index df46ea6..2dc4f39 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -95,9 +95,9 @@ def cli() -> None: ) @click.option( "--show-all", - help="If set to True,show all models and all rules in output" - "when using `plain` as `--format`." - "Default behavior is to only show failing models and violated rules", + help="If set to True, show all models and all rules in output " + "when using `plain` as `--format`. " + "Default behavior is to only show failing models and violated rules.", type=bool, is_flag=True, default=False, From 512b04fae9f2f14f9b9870f4c4efc7b969454253 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Tue, 3 Dec 2024 16:21:58 +0000 Subject: [PATCH 09/18] changed --show from flag to option --- src/dbt_score/cli.py | 21 +++++++++++-------- src/dbt_score/config.py | 4 ++-- .../formatters/human_readable_formatter.py | 11 +++++----- .../test_human_readable_formatter.py | 4 ++-- tests/test_cli.py | 2 +- 5 files changed, 22 insertions(+), 20 deletions(-) diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 2dc4f39..7098aa5 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -94,13 +94,16 @@ def cli() -> None: default=None, ) @click.option( - "--show-all", - help="If set to True, show all models and all rules in output " + "--show", + help="Type of output which should be shown" "when using `plain` as `--format`. " - "Default behavior is to only show failing models and violated rules.", - type=bool, - is_flag=True, - default=False, + "`all` shows all models and all rules" + "`failing-models` shows failing rules of failing models" + "`failing-rules` shows failing rules of all models" + "Default behavior is to show violated rules of all models.", + type=click.Choice(["all", "failing-models", "failing-rules"]), + is_flag=False, + default="failing-rules", ) @click.pass_context def lint( # noqa: PLR0913, C901 @@ -113,7 +116,7 @@ def lint( # noqa: PLR0913, C901 run_dbt_parse: bool, fail_project_under: float, fail_any_model_under: float, - show_all: bool, + show: Literal["all", "failing-models", "failing-rules"], ) -> None: """Lint dbt models metadata.""" manifest_provided = ( @@ -133,8 +136,8 @@ def lint( # noqa: PLR0913, C901 config.overload({"fail_project_under": fail_project_under}) if fail_any_model_under: config.overload({"fail_any_model_under": fail_any_model_under}) - if show_all: - config.overload({"show_all": show_all}) + if show: + config.overload({"show": show}) try: if run_dbt_parse: dbt_parse() diff --git a/src/dbt_score/config.py b/src/dbt_score/config.py index 66335de..4a59779 100644 --- a/src/dbt_score/config.py +++ b/src/dbt_score/config.py @@ -57,7 +57,7 @@ class Config: "inject_cwd_in_python_path", "fail_project_under", "fail_any_model_under", - "show_all", + "show", ] _rules_section: Final[str] = "rules" _badges_section: Final[str] = "badges" @@ -72,7 +72,7 @@ def __init__(self) -> None: self.badge_config: BadgeConfig = BadgeConfig() self.fail_project_under: float = 5.0 self.fail_any_model_under: float = 5.0 - self.show_all: bool = False + self.show: str = "failing-rules" def set_option(self, option: str, value: Any) -> None: """Set an option in the config.""" diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index 8cd10d1..555f116 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -33,18 +33,17 @@ def model_evaluated( """Callback when a model has been evaluated.""" if score.value < self._config.fail_any_model_under: self._failed_models.append((model, score)) - if ( - score.value < self._config.fail_any_model_under - or any(isinstance(result, RuleViolation) for result in results.values()) - or self._config.show_all - ): + if score.value < self._config.fail_any_model_under or self._config.show in [ + "all", + "failing-rules", + ]: print( f"{score.badge} {self.bold(model.name)} " f"(score: {score.rounded_value!s})" ) for rule, result in results.items(): if result is None: - if self._config.show_all: + if self._config.show in ["all", "failing-models"]: print(f"{self.indent}{self.label_ok} {rule.source()}") elif isinstance(result, RuleViolation): print( diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 43cb728..6dbfb8f 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -46,7 +46,7 @@ def test_human_readable_formatter_model_show_all( rule_severity_critical, ): """Ensure the formatter has the correct output after model evaluation.""" - default_config.overload({"show_all": True}) + default_config.overload({"show": "all"}) formatter = HumanReadableFormatter( manifest_loader=manifest_loader, config=default_config ) @@ -88,7 +88,7 @@ def test_human_readable_formatter_near_perfect_model_score( rule_severity_critical, ): """Ensure the formatter has the correct output after model evaluation.""" - default_config.overload({"show_all": True}) + default_config.overload({"show": "all"}) formatter = HumanReadableFormatter( manifest_loader=manifest_loader, config=default_config ) diff --git a/tests/test_cli.py b/tests/test_cli.py index 8568359..9571c17 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -21,7 +21,7 @@ def test_lint_existing_manifest(manifest_path): """Test lint with an existing manifest.""" with patch("dbt_score.cli.Config._load_toml_file"): runner = CliRunner() - result = runner.invoke(lint, ["--manifest", manifest_path, "--show-all"]) + result = runner.invoke(lint, ["--manifest", manifest_path, "--show", "all"]) assert "model1" in result.output assert "model2" in result.output From 0f8bfd8898855260937496098e7664050b517523 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Wed, 11 Dec 2024 06:44:58 +0000 Subject: [PATCH 10/18] replaced instancers of model with item --- src/dbt_score/cli.py | 12 ++++++------ src/dbt_score/formatters/human_readable_formatter.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 0a388e5..55534fe 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -98,11 +98,11 @@ def cli() -> None: "--show", help="Type of output which should be shown" "when using `plain` as `--format`. " - "`all` shows all models and all rules" - "`failing-models` shows failing rules of failing models" - "`failing-rules` shows failing rules of all models" - "Default behavior is to show violated rules of all models.", - type=click.Choice(["all", "failing-models", "failing-rules"]), + "`all` shows all items and all rules" + "`failing-items` shows failing rules of failing items" + "`failing-rules` shows failing rules of all items" + "Default behavior is to show violated rules of all items.", + type=click.Choice(["all", "failing-items", "failing-rules"]), is_flag=False, default="failing-rules", ) @@ -117,7 +117,7 @@ def lint( # noqa: PLR0913, C901 run_dbt_parse: bool, fail_project_under: float, fail_any_item_under: float, - show: Literal["all", "failing-models", "failing-rules"], + show: Literal["all", "failing-items", "failing-rules"], ) -> None: """Lint dbt metadata.""" manifest_provided = ( diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index 7f01972..0614ba7 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -58,7 +58,7 @@ def evaluable_evaluated( print(header) for rule, result in results.items(): if result is None: - if self._config.show in ["all", "failing-models"]: + if self._config.show in ["all", "failing-items"]: print(f"{self.indent}{self.label_ok} {rule.source()}") elif isinstance(result, RuleViolation): print( From 0b1dc0ba44d1d8414f353b36a44b02849d15ae8c Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Wed, 11 Dec 2024 06:57:45 +0000 Subject: [PATCH 11/18] improved docstring --- src/dbt_score/cli.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 55534fe..2e7e6b9 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -96,12 +96,12 @@ def cli() -> None: ) @click.option( "--show", - help="Type of output which should be shown" + help="Type of output which should be shown " "when using `plain` as `--format`. " - "`all` shows all items and all rules" - "`failing-items` shows failing rules of failing items" - "`failing-rules` shows failing rules of all items" - "Default behavior is to show violated rules of all items.", + "`all` shows all items and all rules. " + "`failing-items` shows failing rules of failing items. " + "`failing-rules` shows failing rules of all items. " + "Default is --failing-rules.", type=click.Choice(["all", "failing-items", "failing-rules"]), is_flag=False, default="failing-rules", From 3928d5649dbc44fae66d0b7f80f41242f1268e34 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Wed, 11 Dec 2024 07:11:46 +0000 Subject: [PATCH 12/18] fixed if condition for printing output from evaluable --- src/dbt_score/formatters/human_readable_formatter.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index 0614ba7..eca83af 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -42,12 +42,13 @@ def evaluable_evaluated( self, evaluable: Evaluable, results: EvaluableResultsType, score: Score ) -> None: """Callback when an evaluable item has been evaluated.""" - if score.value < self._config.fail_any_item_under: + if evaluable_failed := score.value < self._config.fail_any_item_under: self._failed_evaluables.append((evaluable, score)) - if score.value < self._config.fail_any_item_under or self._config.show in [ - "all", - "failing-rules", - ]: + if ( + evaluable_failed + or self._config.show == "all" + or any(result is not None for result in results.values()) + ): resource_type = type(evaluable).__name__ name_formatted = f"{resource_type[0]}: {self.pretty_name(evaluable)}" header = ( From 667908f122078a4677ff48f85026f97aeac4a286 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Wed, 11 Dec 2024 07:47:54 +0000 Subject: [PATCH 13/18] fixed output for --failing-items mode --- src/dbt_score/formatters/human_readable_formatter.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index eca83af..86978fa 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -47,7 +47,10 @@ def evaluable_evaluated( if ( evaluable_failed or self._config.show == "all" - or any(result is not None for result in results.values()) + or ( + self._config.show not in ["failing-items"] + and any(result is not None for result in results.values()) + ) ): resource_type = type(evaluable).__name__ name_formatted = f"{resource_type[0]}: {self.pretty_name(evaluable)}" @@ -59,7 +62,7 @@ def evaluable_evaluated( print(header) for rule, result in results.items(): if result is None: - if self._config.show in ["all", "failing-items"]: + if self._config.show in ["all"]: print(f"{self.indent}{self.label_ok} {rule.source()}") elif isinstance(result, RuleViolation): print( From 583cc6b5c1c9d581c769ee8c9e012d6afd95d0b6 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Wed, 11 Dec 2024 18:35:55 +0100 Subject: [PATCH 14/18] Update docs/index.md Co-authored-by: Jochem van Dooren --- docs/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.md b/docs/index.md index 1a46f1d..7b4baed 100644 --- a/docs/index.md +++ b/docs/index.md @@ -22,7 +22,7 @@ Score: 10.0 🥇 In this example, the model `customers` scores the maximum value of `10.0` as it passes all the rules. It also is awarded a golden medal because of the perfect -score. By default a passing model with or without rule violations will not be +score. By default a passing model or source with or without rule violations will not be shown, unless we pass the `--show-all` flag. ## Philosophy From ff241deeab94c1e5073734047bdcdc021520fd00 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Thu, 12 Dec 2024 07:02:42 +0000 Subject: [PATCH 15/18] added tests for show parameter --- .../test_human_readable_formatter.py | 48 ++++++++++++++----- 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index e313dbb..7e14723 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -1,13 +1,14 @@ """Unit tests for the human readable formatter.""" from textwrap import dedent +import pytest from dbt_score.evaluation import EvaluableResultsType from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter from dbt_score.rule import RuleViolation from dbt_score.scoring import Score -def test_human_readable_formatter_model( +def test_human_readable_formatter_model_with_defaults( capsys, default_config, manifest_loader, @@ -36,7 +37,35 @@ def test_human_readable_formatter_model( assert stdout == dedent(expected) -def test_human_readable_formatter_model_show_all( +@pytest.mark.parametrize( + "show,expected", + [ + ( + "all", + """\ + 🥇 \x1B[1mM: model1\x1B[0m (score: 10.0) + \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low + \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + + """, + ), + ( + "failing-rules", + """\ + 🥇 \x1B[1mM: model1\x1B[0m (score: 10.0) + \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + + """, + ), + ( + "failing-items", + "", + ), + ], +) +def test_human_readable_formatter_model_show_parameter( capsys, default_config, manifest_loader, @@ -44,9 +73,11 @@ def test_human_readable_formatter_model_show_all( rule_severity_low, rule_severity_medium, rule_severity_critical, + show, + expected, ): """Ensure the formatter has the correct output after model evaluation.""" - default_config.overload({"show": "all"}) + default_config.overload({"show": show}) formatter = HumanReadableFormatter( manifest_loader=manifest_loader, config=default_config ) @@ -57,13 +88,6 @@ def test_human_readable_formatter_model_show_all( } formatter.evaluable_evaluated(model1, results, Score(10.0, "🥇")) stdout = capsys.readouterr().out - expected = """\ - 🥇 \x1B[1mM: model1\x1B[0m (score: 10.0) - \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low - \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes - \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error - - """ assert stdout == dedent(expected) @@ -74,7 +98,7 @@ def test_human_readable_formatter_project(capsys, default_config, manifest_loade ) formatter.project_evaluated(Score(10.0, "🥇")) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1b[1m10.0\x1b[0m 🥇\n" + assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n" def test_human_readable_formatter_near_perfect_model_score( @@ -118,7 +142,7 @@ def test_human_readable_formatter_near_perfect_project_score( ) formatter.project_evaluated(Score(9.99, "🥈")) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1b[1m9.9\x1b[0m 🥈\n" + assert stdout == "Project score: \x1B[1m9.9\x1B[0m 🥈\n" def test_human_readable_formatter_low_evaluable_score( From 3c9df1d9b0bc3c755af286409e1244b26593d941 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Thu, 12 Dec 2024 07:18:37 +0000 Subject: [PATCH 16/18] added entry in changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c04cf73..c1169aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,8 @@ and this project adheres to ## [Unreleased] - Documenting support for python 3.13. (#86) +- Only show failing rules per default in `HumanReadableFormatter`. Also added + `show` parameter in the CLI to change this behavior. (#77) ## [0.8.0] - 2024-11-12 From ce49628897ba21bf361698e76980a3ffbc448f3d Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Tue, 17 Dec 2024 16:52:29 +0100 Subject: [PATCH 17/18] Update CHANGELOG.md Co-authored-by: Matthieu Caneill --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c1169aa..4acb659 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ and this project adheres to - Documenting support for python 3.13. (#86) - Only show failing rules per default in `HumanReadableFormatter`. Also added - `show` parameter in the CLI to change this behavior. (#77) + `--show` parameter in the CLI to change this behavior. (#77) ## [0.8.0] - 2024-11-12 From 825c48b2950d821628f03631dc215d5ea2689385 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Tue, 17 Dec 2024 15:58:06 +0000 Subject: [PATCH 18/18] run prettier to fix issue in docs --- docs/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/index.md b/docs/index.md index 7b4baed..1b45586 100644 --- a/docs/index.md +++ b/docs/index.md @@ -22,8 +22,8 @@ Score: 10.0 🥇 In this example, the model `customers` scores the maximum value of `10.0` as it passes all the rules. It also is awarded a golden medal because of the perfect -score. By default a passing model or source with or without rule violations will not be -shown, unless we pass the `--show-all` flag. +score. By default a passing model or source with or without rule violations will +not be shown, unless we pass the `--show-all` flag. ## Philosophy