From 512b04fae9f2f14f9b9870f4c4efc7b969454253 Mon Sep 17 00:00:00 2001 From: Thomas Mendelin <36770664+thomend@users.noreply.github.com> Date: Tue, 3 Dec 2024 16:21:58 +0000 Subject: [PATCH] changed --show from flag to option --- src/dbt_score/cli.py | 21 +++++++++++-------- src/dbt_score/config.py | 4 ++-- .../formatters/human_readable_formatter.py | 11 +++++----- .../test_human_readable_formatter.py | 4 ++-- tests/test_cli.py | 2 +- 5 files changed, 22 insertions(+), 20 deletions(-) diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 2dc4f39..7098aa5 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -94,13 +94,16 @@ def cli() -> None: default=None, ) @click.option( - "--show-all", - help="If set to True, show all models and all rules in output " + "--show", + help="Type of output which should be shown" "when using `plain` as `--format`. " - "Default behavior is to only show failing models and violated rules.", - type=bool, - is_flag=True, - default=False, + "`all` shows all models and all rules" + "`failing-models` shows failing rules of failing models" + "`failing-rules` shows failing rules of all models" + "Default behavior is to show violated rules of all models.", + type=click.Choice(["all", "failing-models", "failing-rules"]), + is_flag=False, + default="failing-rules", ) @click.pass_context def lint( # noqa: PLR0913, C901 @@ -113,7 +116,7 @@ def lint( # noqa: PLR0913, C901 run_dbt_parse: bool, fail_project_under: float, fail_any_model_under: float, - show_all: bool, + show: Literal["all", "failing-models", "failing-rules"], ) -> None: """Lint dbt models metadata.""" manifest_provided = ( @@ -133,8 +136,8 @@ def lint( # noqa: PLR0913, C901 config.overload({"fail_project_under": fail_project_under}) if fail_any_model_under: config.overload({"fail_any_model_under": fail_any_model_under}) - if show_all: - config.overload({"show_all": show_all}) + if show: + config.overload({"show": show}) try: if run_dbt_parse: dbt_parse() diff --git a/src/dbt_score/config.py b/src/dbt_score/config.py index 66335de..4a59779 100644 --- a/src/dbt_score/config.py +++ b/src/dbt_score/config.py @@ -57,7 +57,7 @@ class Config: "inject_cwd_in_python_path", "fail_project_under", "fail_any_model_under", - "show_all", + "show", ] _rules_section: Final[str] = "rules" _badges_section: Final[str] = "badges" @@ -72,7 +72,7 @@ def __init__(self) -> None: self.badge_config: BadgeConfig = BadgeConfig() self.fail_project_under: float = 5.0 self.fail_any_model_under: float = 5.0 - self.show_all: bool = False + self.show: str = "failing-rules" def set_option(self, option: str, value: Any) -> None: """Set an option in the config.""" diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index 8cd10d1..555f116 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -33,18 +33,17 @@ def model_evaluated( """Callback when a model has been evaluated.""" if score.value < self._config.fail_any_model_under: self._failed_models.append((model, score)) - if ( - score.value < self._config.fail_any_model_under - or any(isinstance(result, RuleViolation) for result in results.values()) - or self._config.show_all - ): + if score.value < self._config.fail_any_model_under or self._config.show in [ + "all", + "failing-rules", + ]: print( f"{score.badge} {self.bold(model.name)} " f"(score: {score.rounded_value!s})" ) for rule, result in results.items(): if result is None: - if self._config.show_all: + if self._config.show in ["all", "failing-models"]: print(f"{self.indent}{self.label_ok} {rule.source()}") elif isinstance(result, RuleViolation): print( diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 43cb728..6dbfb8f 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -46,7 +46,7 @@ def test_human_readable_formatter_model_show_all( rule_severity_critical, ): """Ensure the formatter has the correct output after model evaluation.""" - default_config.overload({"show_all": True}) + default_config.overload({"show": "all"}) formatter = HumanReadableFormatter( manifest_loader=manifest_loader, config=default_config ) @@ -88,7 +88,7 @@ def test_human_readable_formatter_near_perfect_model_score( rule_severity_critical, ): """Ensure the formatter has the correct output after model evaluation.""" - default_config.overload({"show_all": True}) + default_config.overload({"show": "all"}) formatter = HumanReadableFormatter( manifest_loader=manifest_loader, config=default_config ) diff --git a/tests/test_cli.py b/tests/test_cli.py index 8568359..9571c17 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -21,7 +21,7 @@ def test_lint_existing_manifest(manifest_path): """Test lint with an existing manifest.""" with patch("dbt_score.cli.Config._load_toml_file"): runner = CliRunner() - result = runner.invoke(lint, ["--manifest", manifest_path, "--show-all"]) + result = runner.invoke(lint, ["--manifest", manifest_path, "--show", "all"]) assert "model1" in result.output assert "model2" in result.output