From 2ba139a3a8b38438301a22d8a05a82ef07a39b36 Mon Sep 17 00:00:00 2001 From: "Rob.Sanderson" Date: Sat, 15 Jun 2024 21:10:57 +0100 Subject: [PATCH 1/7] feat: add fail_project_under and fail_any_model_under options --- docs/configuration.md | 8 ++++++- pyproject.toml | 2 +- src/dbt_score/cli.py | 37 +++++++++++++++++++++++++++++++- src/dbt_score/config.py | 4 ++++ src/dbt_score/evaluation.py | 4 ++++ src/dbt_score/lint.py | 4 +++- tests/resources/pyproject.toml | 2 ++ tests/test_cli.py | 39 ++++++++++++++++++++++++++++++++-- tests/test_config.py | 1 + tests/test_evaluation.py | 29 +++++++++++++++++++------ 10 files changed, 118 insertions(+), 12 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index 545be55..404b01d 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -17,6 +17,8 @@ below: rule_namespaces = ["dbt_score.rules", "dbt_score_rules", "custom_rules"] disabled_rules = ["dbt_score.rules.generic.columns_have_description"] inject_cwd_in_python_path = true +fail_project_under = 7.5 +fail_any_model_under = 8.0 [tool.dbt-score.badges] first.threshold = 10.0 @@ -47,6 +49,10 @@ The following options can be set in the `pyproject.toml` file: this setting, that the default rules are in `dbt_score.rules` and are disabled if not included here. - `disabled_rules`: A list of rules to disable. +- `fail_project_under`: If the project score is below this value the command + will fail with return code 1 +- `fail_any_model_under`: If ay model scores below this value the command will + fail with return code 1 #### Badges configuration @@ -94,5 +100,5 @@ Many configuration options can also be set via the command line. To understand how to configure `dbt-score` from the command line: ```bash -dbt score lint --help +dbt-score lint --help ``` diff --git a/pyproject.toml b/pyproject.toml index 788f499..02d9252 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ max-complexity = 10 convention = "google" [tool.ruff.lint.pylint] -max-args = 8 +max-args = 9 [tool.ruff.lint.per-file-ignores] "tests/**/*.py" = [ diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 4a3a017..4bb8caf 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -78,6 +78,20 @@ def cli() -> None: is_flag=True, default=False, ) +@click.option( + "--fail_project_under", + help="Fail if the project score is under this value", + type=float, + is_flag=False, + default=None, +) +@click.option( + "--fail_any_model_under", + help="Fail if any model is under this value", + type=float, + is_flag=False, + default=None, +) @click.pass_context def lint( ctx: click.Context, @@ -87,6 +101,8 @@ def lint( disabled_rule: list[str], manifest: Path, run_dbt_parse: bool, + fail_project_under: float, + fail_any_model_under: float, ) -> None: """Lint dbt models metadata.""" manifest_provided = ( @@ -102,14 +118,33 @@ def lint( config.overload({"rule_namespaces": namespace}) if disabled_rule: config.overload({"disabled_rules": disabled_rule}) + if fail_project_under: + config.overload({"fail_project_under": fail_project_under}) + if fail_any_model_under: + config.overload({"fail_any_model_under": fail_any_model_under}) if run_dbt_parse: dbt_parse() try: - lint_dbt_project( + evaluation = lint_dbt_project( manifest_path=manifest, config=config, format=format, select=select ) + + if evaluation.min_model_score < config.fail_any_model_under: + logger.error( + f"Individual model score {round(evaluation.min_model_score,1)} is less " + f"than `fail_any_model_under` setting of {config.fail_any_model_under}" + ) + ctx.exit(1) + + if evaluation.project_score.value < config.fail_project_under: + logger.error( + f"Project score {round(evaluation.project_score.value,1)} is less " + f"than `fail_project_under` setting of {config.fail_project_under}" + ) + ctx.exit(1) + except FileNotFoundError: logger.error( "dbt's manifest.json could not be found. If you're in a dbt project, be " diff --git a/src/dbt_score/config.py b/src/dbt_score/config.py index e9eb118..63aa99a 100644 --- a/src/dbt_score/config.py +++ b/src/dbt_score/config.py @@ -55,6 +55,8 @@ class Config: "rule_namespaces", "disabled_rules", "inject_cwd_in_python_path", + "fail_project_under", + "fail_any_model_under", ] _rules_section: Final[str] = "rules" _badges_section: Final[str] = "badges" @@ -67,6 +69,8 @@ def __init__(self) -> None: self.rules_config: dict[str, RuleConfig] = {} self.config_file: Path | None = None self.badge_config: BadgeConfig = BadgeConfig() + self.fail_project_under: float = 0.0 + self.fail_any_model_under: float = 0.0 def set_option(self, option: str, value: Any) -> None: """Set an option in the config.""" diff --git a/src/dbt_score/evaluation.py b/src/dbt_score/evaluation.py index 41b5e0f..603f559 100644 --- a/src/dbt_score/evaluation.py +++ b/src/dbt_score/evaluation.py @@ -49,6 +49,9 @@ def __init__( # The aggregated project score self.project_score: Score + # The minimum model score, starts at the maximum of 10 + self.min_model_score: float = 10 + def evaluate(self) -> None: """Evaluate all rules.""" rules = self._rule_registry.rules.values() @@ -67,6 +70,7 @@ def evaluate(self) -> None: self._formatter.model_evaluated( model, self.results[model], self.scores[model] ) + self.min_model_score = min(self.min_model_score, self.scores[model].value) # Compute score for project self.project_score = self._scorer.score_aggregate_models( diff --git a/src/dbt_score/lint.py b/src/dbt_score/lint.py index 28ff0c0..e650232 100644 --- a/src/dbt_score/lint.py +++ b/src/dbt_score/lint.py @@ -18,7 +18,7 @@ def lint_dbt_project( config: Config, format: Literal["plain", "manifest", "ascii"], select: Iterable[str] | None = None, -) -> None: +) -> Evaluation: """Lint dbt manifest.""" if not manifest_path.exists(): raise FileNotFoundError(f"Manifest not found at {manifest_path}.") @@ -44,3 +44,5 @@ def lint_dbt_project( scorer=scorer, ) evaluation.evaluate() + + return evaluation diff --git a/tests/resources/pyproject.toml b/tests/resources/pyproject.toml index b317d90..eb598f3 100644 --- a/tests/resources/pyproject.toml +++ b/tests/resources/pyproject.toml @@ -1,6 +1,8 @@ [tool.dbt-score] rule_namespaces = ["foo", "tests"] disabled_rules = ["foo.foo", "tests.bar"] +fail_project_under = 7.5 +fail_any_model_under = 6.9 [tool.dbt-score.badges] wip.icon = "🏗️" diff --git a/tests/test_cli.py b/tests/test_cli.py index cb50f19..d598cb5 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,5 +1,5 @@ """Test the CLI.""" - +import logging from unittest.mock import patch from click.testing import CliRunner @@ -20,7 +20,13 @@ def test_lint_existing_manifest(manifest_path): """Test lint with an existing manifest.""" with patch("dbt_score.cli.Config._load_toml_file"): runner = CliRunner() - result = runner.invoke(lint, ["--manifest", manifest_path]) + result = runner.invoke( + lint, + [ + "--manifest", + manifest_path, + ], + ) assert "model1" in result.output assert "model2" in result.output @@ -45,3 +51,32 @@ def test_lint_non_existing_manifest(caplog): assert result.exit_code == 2 assert "dbt's manifest.json could not be found" in caplog.text + + +def test_fail_project_under(manifest_path): + """Test `fail_project_under`.""" + with patch("dbt_score.cli.Config._load_toml_file"): + runner = CliRunner() + result = runner.invoke( + lint, ["--manifest", manifest_path, "--fail_project_under", "10.0"] + ) + + assert "model1" in result.output + assert "model2" in result.output + assert result.exit_code == 1 + + +def test_fail_any_model_under(manifest_path, caplog): + """Test `fail_any_model_under`.""" + caplog.set_level(logging.ERROR) + + with patch("dbt_score.cli.Config._load_toml_file"): + runner = CliRunner() + result = runner.invoke( + lint, ["--manifest", manifest_path, "--fail_any_model_under", "10.0"] + ) + + assert "model1" in result.output + assert "model2" in result.output + assert "Individual model score" in caplog.text + assert result.exit_code == 1 diff --git a/tests/test_config.py b/tests/test_config.py index d70ad73..8638cb9 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -25,6 +25,7 @@ def test_load_valid_toml_file(valid_config_path): assert config.badge_config.third.icon == "3️⃣" assert config.badge_config.second.icon == "2️⃣" assert config.badge_config.first.icon == "1️⃣" + assert config.fail_project_under == 7.5 def test_load_invalid_toml_file(caplog, invalid_config_path): diff --git a/tests/test_evaluation.py b/tests/test_evaluation.py index ae5cac1..a7e7a4e 100644 --- a/tests/test_evaluation.py +++ b/tests/test_evaluation.py @@ -7,6 +7,7 @@ from dbt_score.models import ManifestLoader from dbt_score.rule import RuleViolation from dbt_score.rule_registry import RuleRegistry +from dbt_score.scoring import Score def test_evaluation_low_medium_high( @@ -28,6 +29,9 @@ def test_evaluation_low_medium_high( rule_registry._add_rule(rule_severity_high) rule_registry._add_rule(rule_error) + # Ensure we get a valid Score object from the Mock + mock_scorer.score_model.return_value = Score(10, "🥇") + evaluation = Evaluation( rule_registry=rule_registry, manifest_loader=manifest_loader, @@ -66,12 +70,17 @@ def test_evaluation_critical( rule_registry._add_rule(rule_severity_low) rule_registry._add_rule(rule_severity_critical) + mock_formatter = Mock() + mock_scorer = Mock() + mock_scorer.score_model.return_value = Score(10, "🥇") + evaluation = Evaluation( rule_registry=rule_registry, manifest_loader=manifest_loader, - formatter=Mock(), - scorer=Mock(), + formatter=mock_formatter, + scorer=mock_scorer, ) + evaluation.evaluate() model2 = manifest_loader.models[1] @@ -85,11 +94,15 @@ def test_evaluation_no_rule(manifest_path, default_config): rule_registry = RuleRegistry(default_config) + mock_formatter = Mock() + mock_scorer = Mock() + mock_scorer.score_model.return_value = Score(10, "🥇") + evaluation = Evaluation( rule_registry=rule_registry, manifest_loader=manifest_loader, - formatter=Mock(), - scorer=Mock(), + formatter=mock_formatter, + scorer=mock_scorer, ) evaluation.evaluate() @@ -148,11 +161,15 @@ def test_evaluation_rule_with_config( rule_registry = RuleRegistry(config) rule_registry._add_rule(rule_with_config) + mock_formatter = Mock() + mock_scorer = Mock() + mock_scorer.score_model.return_value = Score(10, "🥇") + evaluation = Evaluation( rule_registry=rule_registry, manifest_loader=manifest_loader, - formatter=Mock(), - scorer=Mock(), + formatter=mock_formatter, + scorer=mock_scorer, ) evaluation.evaluate() From 79026e69be5deb9bb3f1825ca2ecceb17a98de92 Mon Sep 17 00:00:00 2001 From: "Rob.Sanderson" Date: Mon, 17 Jun 2024 10:42:53 +0100 Subject: [PATCH 2/7] chore: address pull request comments --- src/dbt_score/evaluation.py | 2 +- tests/test_cli.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/dbt_score/evaluation.py b/src/dbt_score/evaluation.py index 603f559..a5d59d2 100644 --- a/src/dbt_score/evaluation.py +++ b/src/dbt_score/evaluation.py @@ -50,7 +50,7 @@ def __init__( self.project_score: Score # The minimum model score, starts at the maximum of 10 - self.min_model_score: float = 10 + self.min_model_score: float = 10.0 def evaluate(self) -> None: """Evaluate all rules.""" diff --git a/tests/test_cli.py b/tests/test_cli.py index d598cb5..04d7ec4 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,4 +1,5 @@ """Test the CLI.""" + import logging from unittest.mock import patch From f1631c00c753d44caf7c34f502ea7acb3cc13530 Mon Sep 17 00:00:00 2001 From: "Rob.Sanderson" Date: Tue, 18 Jun 2024 08:44:22 +0100 Subject: [PATCH 3/7] chore: amend error messaging and display failed model list --- pyproject.toml | 2 +- src/dbt_score/cli.py | 20 ++++++++++--------- src/dbt_score/config.py | 4 ++-- src/dbt_score/evaluation.py | 4 ---- .../formatters/human_readable_formatter.py | 1 + .../test_human_readable_formatter.py | 2 +- tests/test_cli.py | 8 +++----- 7 files changed, 19 insertions(+), 22 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 02d9252..4fe444f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -90,7 +90,7 @@ extend-select = [ ] [tool.ruff.lint.mccabe] -max-complexity = 10 +max-complexity = 11 [tool.ruff.lint.pydocstyle] convention = "google" diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 05c2d8a..184dabe 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -131,18 +131,20 @@ def lint( manifest_path=manifest, config=config, format=format, select=select ) - if evaluation.min_model_score < config.fail_any_model_under: - logger.error( - f"Individual model score {round(evaluation.min_model_score,1)} is less " - f"than `fail_any_model_under` setting of {config.fail_any_model_under}" - ) + failed_models = { + model.unique_id: score.value + for model, score in evaluation.scores.items() + if (score.value < config.fail_any_model_under) + } + + if failed_models: + print(f"Error: fail_any_model_under = {config.fail_any_model_under}") + for model, value in failed_models.items(): + print(f"Model {model} scored {round(value,1)}") ctx.exit(1) if evaluation.project_score.value < config.fail_project_under: - logger.error( - f"Project score {round(evaluation.project_score.value,1)} is less " - f"than `fail_project_under` setting of {config.fail_project_under}" - ) + print(f"Error: fail_project_under = {evaluation.project_score.value}") ctx.exit(1) except FileNotFoundError: diff --git a/src/dbt_score/config.py b/src/dbt_score/config.py index 63aa99a..4a4ddf5 100644 --- a/src/dbt_score/config.py +++ b/src/dbt_score/config.py @@ -69,8 +69,8 @@ def __init__(self) -> None: self.rules_config: dict[str, RuleConfig] = {} self.config_file: Path | None = None self.badge_config: BadgeConfig = BadgeConfig() - self.fail_project_under: float = 0.0 - self.fail_any_model_under: float = 0.0 + self.fail_project_under: float = 5.0 + self.fail_any_model_under: float = 5.0 def set_option(self, option: str, value: Any) -> None: """Set an option in the config.""" diff --git a/src/dbt_score/evaluation.py b/src/dbt_score/evaluation.py index a5d59d2..41b5e0f 100644 --- a/src/dbt_score/evaluation.py +++ b/src/dbt_score/evaluation.py @@ -49,9 +49,6 @@ def __init__( # The aggregated project score self.project_score: Score - # The minimum model score, starts at the maximum of 10 - self.min_model_score: float = 10.0 - def evaluate(self) -> None: """Evaluate all rules.""" rules = self._rule_registry.rules.values() @@ -70,7 +67,6 @@ def evaluate(self) -> None: self._formatter.model_evaluated( model, self.results[model], self.scores[model] ) - self.min_model_score = min(self.min_model_score, self.scores[model].value) # Compute score for project self.project_score = self._scorer.score_aggregate_models( diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index 594b0ba..e1d6dba 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -43,3 +43,4 @@ def model_evaluated( def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" print(f"Project score: {self.bold(str(round(score.value, 1)))} {score.badge}") + print() diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index f86ee7c..1eaf3a4 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -40,4 +40,4 @@ def test_human_readable_formatter_project(capsys, manifest_loader): formatter = HumanReadableFormatter(manifest_loader=manifest_loader) formatter.project_evaluated(Score(10.0, "🥇")) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n" + assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n\n" diff --git a/tests/test_cli.py b/tests/test_cli.py index 04d7ec4..f9b83c1 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,6 +1,5 @@ """Test the CLI.""" -import logging from unittest.mock import patch from click.testing import CliRunner @@ -64,13 +63,12 @@ def test_fail_project_under(manifest_path): assert "model1" in result.output assert "model2" in result.output + assert "Error: fail_project_under" in result.stdout assert result.exit_code == 1 -def test_fail_any_model_under(manifest_path, caplog): +def test_fail_any_model_under(manifest_path): """Test `fail_any_model_under`.""" - caplog.set_level(logging.ERROR) - with patch("dbt_score.cli.Config._load_toml_file"): runner = CliRunner() result = runner.invoke( @@ -79,5 +77,5 @@ def test_fail_any_model_under(manifest_path, caplog): assert "model1" in result.output assert "model2" in result.output - assert "Individual model score" in caplog.text + assert "Error: fail_any_model_under" in result.stdout assert result.exit_code == 1 From 5ee77972e0d2b20098acf713c52ecabcab947c4d Mon Sep 17 00:00:00 2001 From: "Rob.Sanderson" Date: Wed, 19 Jun 2024 13:00:54 +0100 Subject: [PATCH 4/7] chore: addressed minor pr comments --- CHANGELOG.md | 7 +++++++ docs/configuration.md | 4 ++-- src/dbt_score/cli.py | 4 ++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ab3397..9f47610 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,13 @@ and this project adheres to ## [Unreleased] +### Added + +- Add `project_fail_under` configuration +- Add `fail_any_model_under` configuration +- **Breaking:** - default values of `5.0` for `project_fail_under` and + `fail_any_model_under` will cause command to exit return code 1 + ## [0.2.1] - 2023-06-17 - Lint the current dbt project only, not including the imported models. diff --git a/docs/configuration.md b/docs/configuration.md index 404b01d..1d3870b 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -50,9 +50,9 @@ The following options can be set in the `pyproject.toml` file: if not included here. - `disabled_rules`: A list of rules to disable. - `fail_project_under`: If the project score is below this value the command - will fail with return code 1 + will fail with return code 1. - `fail_any_model_under`: If ay model scores below this value the command will - fail with return code 1 + fail with return code 1. #### Badges configuration diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 184dabe..7afe478 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -80,14 +80,14 @@ def cli() -> None: ) @click.option( "--fail_project_under", - help="Fail if the project score is under this value", + help="Fail if the project score is under this value.", type=float, is_flag=False, default=None, ) @click.option( "--fail_any_model_under", - help="Fail if any model is under this value", + help="Fail if any model is under this value.", type=float, is_flag=False, default=None, From 62234b68df9d5f037e382a48fbcab505625ebf83 Mon Sep 17 00:00:00 2001 From: Matthieu Caneill Date: Thu, 20 Jun 2024 12:51:25 +0200 Subject: [PATCH 5/7] Move model/project failure output to formatter --- src/dbt_score/cli.py | 22 ++---- src/dbt_score/formatters/__init__.py | 4 +- .../formatters/human_readable_formatter.py | 26 ++++++- src/dbt_score/lint.py | 2 +- tests/formatters/test_ascii_formatter.py | 7 +- .../test_human_readable_formatter.py | 74 ++++++++++++++++++- tests/formatters/test_manifest_formatter.py | 10 ++- tests/test_cli.py | 12 +-- tests/test_config.py | 1 + 9 files changed, 121 insertions(+), 37 deletions(-) diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 7afe478..9694b5e 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -131,22 +131,6 @@ def lint( manifest_path=manifest, config=config, format=format, select=select ) - failed_models = { - model.unique_id: score.value - for model, score in evaluation.scores.items() - if (score.value < config.fail_any_model_under) - } - - if failed_models: - print(f"Error: fail_any_model_under = {config.fail_any_model_under}") - for model, value in failed_models.items(): - print(f"Model {model} scored {round(value,1)}") - ctx.exit(1) - - if evaluation.project_score.value < config.fail_project_under: - print(f"Error: fail_project_under = {evaluation.project_score.value}") - ctx.exit(1) - except FileNotFoundError: logger.error( "dbt's manifest.json could not be found. If you're in a dbt project, be " @@ -154,6 +138,12 @@ def lint( ) ctx.exit(2) + if ( + any(x.value < config.fail_any_model_under for x in evaluation.scores.values()) + or evaluation.project_score.value < config.fail_project_under + ): + ctx.exit(1) + @cli.command(name="list") @click.option( diff --git a/src/dbt_score/formatters/__init__.py b/src/dbt_score/formatters/__init__.py index 09466e1..ff37429 100644 --- a/src/dbt_score/formatters/__init__.py +++ b/src/dbt_score/formatters/__init__.py @@ -5,6 +5,7 @@ import typing from abc import ABC, abstractmethod +from dbt_score.config import Config from dbt_score.scoring import Score if typing.TYPE_CHECKING: @@ -15,9 +16,10 @@ class Formatter(ABC): """Abstract class to define a formatter.""" - def __init__(self, manifest_loader: ManifestLoader): + def __init__(self, manifest_loader: ManifestLoader, config: Config): """Instantiate a formatter.""" self._manifest_loader = manifest_loader + self._config = config @abstractmethod def model_evaluated( diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index e1d6dba..328a01e 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -1,6 +1,8 @@ """Human readable formatter.""" +from typing import Any + from dbt_score.evaluation import ModelResultsType from dbt_score.formatters import Formatter from dbt_score.models import Model @@ -16,6 +18,11 @@ class HumanReadableFormatter(Formatter): label_warning = "\033[1;33mWARN\033[0m" label_error = "\033[1;31mERR \033[0m" + def __init__(self, *args: Any, **kwargs: Any): + """Instantiate formatter.""" + super().__init__(*args, **kwargs) + self._failed_models: list[tuple[Model, Score]] = [] + @staticmethod def bold(text: str) -> str: """Return text in bold.""" @@ -25,6 +32,8 @@ def model_evaluated( self, model: Model, results: ModelResultsType, score: Score ) -> None: """Callback when a model has been evaluated.""" + if score.value < self._config.fail_any_model_under: + self._failed_models.append((model, score)) print( f"{score.badge} {self.bold(model.name)} (score: {round(score.value, 1)!s})" ) @@ -43,4 +52,19 @@ def model_evaluated( def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" print(f"Project score: {self.bold(str(round(score.value, 1)))} {score.badge}") - print() + + if len(self._failed_models) > 0: + print() + print( + f"Error: model score too low, fail_any_model_under = " + f"{self._config.fail_any_model_under}" + ) + for model, score in self._failed_models: + print(f"Model {model.name} scored {round(score.value, 1)}") + + elif score.value < self._config.fail_project_under: + print() + print( + f"Error: project score too low, fail_project_under = " + f"{self._config.fail_project_under}" + ) diff --git a/src/dbt_score/lint.py b/src/dbt_score/lint.py index e650232..5b6d95f 100644 --- a/src/dbt_score/lint.py +++ b/src/dbt_score/lint.py @@ -33,7 +33,7 @@ def lint_dbt_project( "manifest": ManifestFormatter, "ascii": ASCIIFormatter, } - formatter = formatters[format](manifest_loader=manifest_loader) + formatter = formatters[format](manifest_loader=manifest_loader, config=config) scorer = Scorer(config) diff --git a/tests/formatters/test_ascii_formatter.py b/tests/formatters/test_ascii_formatter.py index a53c9ca..f76cabb 100644 --- a/tests/formatters/test_ascii_formatter.py +++ b/tests/formatters/test_ascii_formatter.py @@ -9,6 +9,7 @@ def test_ascii_formatter_model( capsys, + default_config, manifest_loader, model1, rule_severity_low, @@ -16,7 +17,7 @@ def test_ascii_formatter_model( rule_severity_critical, ): """Ensure the formatter doesn't write anything after model evaluation.""" - formatter = ASCIIFormatter(manifest_loader=manifest_loader) + formatter = ASCIIFormatter(manifest_loader=manifest_loader, config=default_config) results: dict[Type[Rule], RuleViolation | Exception | None] = { rule_severity_low: None, rule_severity_medium: Exception("Oh noes"), @@ -27,9 +28,9 @@ def test_ascii_formatter_model( assert stdout == "" -def test_ascii_formatter_project(capsys, manifest_loader): +def test_ascii_formatter_project(capsys, default_config, manifest_loader): """Ensure the formatter has the correct output after project evaluation.""" - formatter = ASCIIFormatter(manifest_loader=manifest_loader) + formatter = ASCIIFormatter(manifest_loader=manifest_loader, config=default_config) formatter.project_evaluated(Score(10.0, "🥇")) stdout_gold = capsys.readouterr().out diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 1eaf3a4..295e804 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -9,6 +9,7 @@ def test_human_readable_formatter_model( capsys, + default_config, manifest_loader, model1, rule_severity_low, @@ -16,7 +17,9 @@ def test_human_readable_formatter_model( rule_severity_critical, ): """Ensure the formatter has the correct output after model evaluation.""" - formatter = HumanReadableFormatter(manifest_loader=manifest_loader) + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) results: dict[Type[Rule], RuleViolation | Exception | None] = { rule_severity_low: None, rule_severity_medium: Exception("Oh noes"), @@ -35,9 +38,72 @@ def test_human_readable_formatter_model( ) -def test_human_readable_formatter_project(capsys, manifest_loader): +def test_human_readable_formatter_project(capsys, default_config, manifest_loader): """Ensure the formatter has the correct output after project evaluation.""" - formatter = HumanReadableFormatter(manifest_loader=manifest_loader) + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) formatter.project_evaluated(Score(10.0, "🥇")) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n\n" + assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n" + + +def test_human_readable_formatter_low_model_score( + capsys, + default_config, + manifest_loader, + model1, + rule_severity_critical, +): + """Ensure the formatter has the correct output when a model has a low score.""" + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) + results: dict[Type[Rule], RuleViolation | Exception | None] = { + rule_severity_critical: RuleViolation("Error"), + } + formatter.model_evaluated(model1, results, Score(0.0, "🚧")) + formatter.project_evaluated(Score(0.0, "🚧")) + stdout = capsys.readouterr().out + print() + assert ( + stdout + == """🚧 \x1B[1mmodel1\x1B[0m (score: 0.0) + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + +Project score: \x1B[1m0.0\x1B[0m 🚧 + +Error: model score too low, fail_any_model_under = 5.0 +Model model1 scored 0.0 +""" + ) + + +def test_human_readable_formatter_low_project_score( + capsys, + default_config, + manifest_loader, + model1, + rule_severity_critical, +): + """Ensure the formatter has the correct output when the projet has a low score.""" + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) + results: dict[Type[Rule], RuleViolation | Exception | None] = { + rule_severity_critical: RuleViolation("Error"), + } + formatter.model_evaluated(model1, results, Score(10.0, "🥇")) + formatter.project_evaluated(Score(0.0, "🚧")) + stdout = capsys.readouterr().out + print() + assert ( + stdout + == """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0) + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + +Project score: \x1B[1m0.0\x1B[0m 🚧 + +Error: project score too low, fail_project_under = 5.0 +""" + ) diff --git a/tests/formatters/test_manifest_formatter.py b/tests/formatters/test_manifest_formatter.py index fa2bd0f..56c7daa 100644 --- a/tests/formatters/test_manifest_formatter.py +++ b/tests/formatters/test_manifest_formatter.py @@ -10,6 +10,7 @@ def test_manifest_formatter_model( capsys, + default_config, manifest_loader, model1, rule_severity_low, @@ -17,7 +18,9 @@ def test_manifest_formatter_model( rule_severity_critical, ): """Ensure the formatter has the correct output after model evaluation.""" - formatter = ManifestFormatter(manifest_loader=manifest_loader) + formatter = ManifestFormatter( + manifest_loader=manifest_loader, config=default_config + ) results = { rule_severity_low: None, rule_severity_medium: Exception("Oh noes"), @@ -30,6 +33,7 @@ def test_manifest_formatter_model( def test_manifest_formatter_project( capsys, + default_config, manifest_loader, model1, model2, @@ -38,7 +42,9 @@ def test_manifest_formatter_project( rule_severity_critical, ): """Ensure the formatter has the correct output after project evaluation.""" - formatter = ManifestFormatter(manifest_loader=manifest_loader) + formatter = ManifestFormatter( + manifest_loader=manifest_loader, config=default_config + ) result1: dict[Type[Rule], RuleViolation | Exception | None] = { rule_severity_low: None, rule_severity_medium: Exception("Oh noes"), diff --git a/tests/test_cli.py b/tests/test_cli.py index f9b83c1..68caa40 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -20,13 +20,7 @@ def test_lint_existing_manifest(manifest_path): """Test lint with an existing manifest.""" with patch("dbt_score.cli.Config._load_toml_file"): runner = CliRunner() - result = runner.invoke( - lint, - [ - "--manifest", - manifest_path, - ], - ) + result = runner.invoke(lint, ["--manifest", manifest_path]) assert "model1" in result.output assert "model2" in result.output @@ -63,7 +57,7 @@ def test_fail_project_under(manifest_path): assert "model1" in result.output assert "model2" in result.output - assert "Error: fail_project_under" in result.stdout + assert "Error: project score too low, fail_project_under" in result.stdout assert result.exit_code == 1 @@ -77,5 +71,5 @@ def test_fail_any_model_under(manifest_path): assert "model1" in result.output assert "model2" in result.output - assert "Error: fail_any_model_under" in result.stdout + assert "Error: model score too low, fail_any_model_under" in result.stdout assert result.exit_code == 1 diff --git a/tests/test_config.py b/tests/test_config.py index 8638cb9..14c8392 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -26,6 +26,7 @@ def test_load_valid_toml_file(valid_config_path): assert config.badge_config.second.icon == "2️⃣" assert config.badge_config.first.icon == "1️⃣" assert config.fail_project_under == 7.5 + assert config.fail_any_model_under == 6.9 def test_load_invalid_toml_file(caplog, invalid_config_path): From 63a928c9d4713bb90bd687ad16a0d3b9f56322a2 Mon Sep 17 00:00:00 2001 From: Matthieu Caneill Date: Thu, 20 Jun 2024 14:35:41 +0200 Subject: [PATCH 6/7] Update docs --- docs/configuration.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index 1d3870b..fa4815a 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -49,10 +49,10 @@ The following options can be set in the `pyproject.toml` file: this setting, that the default rules are in `dbt_score.rules` and are disabled if not included here. - `disabled_rules`: A list of rules to disable. -- `fail_project_under`: If the project score is below this value the command - will fail with return code 1. -- `fail_any_model_under`: If ay model scores below this value the command will - fail with return code 1. +- `fail_project_under` (default: `5.0`): If the project score is below this + value the command will fail with return code 1. +- `fail_any_model_under` (default: `5.0`): If ay model scores below this value + the command will fail with return code 1. #### Badges configuration From 1d603a26b617763a7e142eb968b31dab29d8493f Mon Sep 17 00:00:00 2001 From: Matthieu Caneill Date: Thu, 20 Jun 2024 14:39:35 +0200 Subject: [PATCH 7/7] typo --- docs/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration.md b/docs/configuration.md index fa4815a..bbc051b 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -51,7 +51,7 @@ The following options can be set in the `pyproject.toml` file: - `disabled_rules`: A list of rules to disable. - `fail_project_under` (default: `5.0`): If the project score is below this value the command will fail with return code 1. -- `fail_any_model_under` (default: `5.0`): If ay model scores below this value +- `fail_any_model_under` (default: `5.0`): If any model scores below this value the command will fail with return code 1. #### Badges configuration