diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 7afe478..9694b5e 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -131,22 +131,6 @@ def lint( manifest_path=manifest, config=config, format=format, select=select ) - failed_models = { - model.unique_id: score.value - for model, score in evaluation.scores.items() - if (score.value < config.fail_any_model_under) - } - - if failed_models: - print(f"Error: fail_any_model_under = {config.fail_any_model_under}") - for model, value in failed_models.items(): - print(f"Model {model} scored {round(value,1)}") - ctx.exit(1) - - if evaluation.project_score.value < config.fail_project_under: - print(f"Error: fail_project_under = {evaluation.project_score.value}") - ctx.exit(1) - except FileNotFoundError: logger.error( "dbt's manifest.json could not be found. If you're in a dbt project, be " @@ -154,6 +138,12 @@ def lint( ) ctx.exit(2) + if ( + any(x.value < config.fail_any_model_under for x in evaluation.scores.values()) + or evaluation.project_score.value < config.fail_project_under + ): + ctx.exit(1) + @cli.command(name="list") @click.option( diff --git a/src/dbt_score/formatters/__init__.py b/src/dbt_score/formatters/__init__.py index 09466e1..ff37429 100644 --- a/src/dbt_score/formatters/__init__.py +++ b/src/dbt_score/formatters/__init__.py @@ -5,6 +5,7 @@ import typing from abc import ABC, abstractmethod +from dbt_score.config import Config from dbt_score.scoring import Score if typing.TYPE_CHECKING: @@ -15,9 +16,10 @@ class Formatter(ABC): """Abstract class to define a formatter.""" - def __init__(self, manifest_loader: ManifestLoader): + def __init__(self, manifest_loader: ManifestLoader, config: Config): """Instantiate a formatter.""" self._manifest_loader = manifest_loader + self._config = config @abstractmethod def model_evaluated( diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index e1d6dba..328a01e 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -1,6 +1,8 @@ """Human readable formatter.""" +from typing import Any + from dbt_score.evaluation import ModelResultsType from dbt_score.formatters import Formatter from dbt_score.models import Model @@ -16,6 +18,11 @@ class HumanReadableFormatter(Formatter): label_warning = "\033[1;33mWARN\033[0m" label_error = "\033[1;31mERR \033[0m" + def __init__(self, *args: Any, **kwargs: Any): + """Instantiate formatter.""" + super().__init__(*args, **kwargs) + self._failed_models: list[tuple[Model, Score]] = [] + @staticmethod def bold(text: str) -> str: """Return text in bold.""" @@ -25,6 +32,8 @@ def model_evaluated( self, model: Model, results: ModelResultsType, score: Score ) -> None: """Callback when a model has been evaluated.""" + if score.value < self._config.fail_any_model_under: + self._failed_models.append((model, score)) print( f"{score.badge} {self.bold(model.name)} (score: {round(score.value, 1)!s})" ) @@ -43,4 +52,19 @@ def model_evaluated( def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" print(f"Project score: {self.bold(str(round(score.value, 1)))} {score.badge}") - print() + + if len(self._failed_models) > 0: + print() + print( + f"Error: model score too low, fail_any_model_under = " + f"{self._config.fail_any_model_under}" + ) + for model, score in self._failed_models: + print(f"Model {model.name} scored {round(score.value, 1)}") + + elif score.value < self._config.fail_project_under: + print() + print( + f"Error: project score too low, fail_project_under = " + f"{self._config.fail_project_under}" + ) diff --git a/src/dbt_score/lint.py b/src/dbt_score/lint.py index e650232..5b6d95f 100644 --- a/src/dbt_score/lint.py +++ b/src/dbt_score/lint.py @@ -33,7 +33,7 @@ def lint_dbt_project( "manifest": ManifestFormatter, "ascii": ASCIIFormatter, } - formatter = formatters[format](manifest_loader=manifest_loader) + formatter = formatters[format](manifest_loader=manifest_loader, config=config) scorer = Scorer(config) diff --git a/tests/formatters/test_ascii_formatter.py b/tests/formatters/test_ascii_formatter.py index a53c9ca..f76cabb 100644 --- a/tests/formatters/test_ascii_formatter.py +++ b/tests/formatters/test_ascii_formatter.py @@ -9,6 +9,7 @@ def test_ascii_formatter_model( capsys, + default_config, manifest_loader, model1, rule_severity_low, @@ -16,7 +17,7 @@ def test_ascii_formatter_model( rule_severity_critical, ): """Ensure the formatter doesn't write anything after model evaluation.""" - formatter = ASCIIFormatter(manifest_loader=manifest_loader) + formatter = ASCIIFormatter(manifest_loader=manifest_loader, config=default_config) results: dict[Type[Rule], RuleViolation | Exception | None] = { rule_severity_low: None, rule_severity_medium: Exception("Oh noes"), @@ -27,9 +28,9 @@ def test_ascii_formatter_model( assert stdout == "" -def test_ascii_formatter_project(capsys, manifest_loader): +def test_ascii_formatter_project(capsys, default_config, manifest_loader): """Ensure the formatter has the correct output after project evaluation.""" - formatter = ASCIIFormatter(manifest_loader=manifest_loader) + formatter = ASCIIFormatter(manifest_loader=manifest_loader, config=default_config) formatter.project_evaluated(Score(10.0, "🥇")) stdout_gold = capsys.readouterr().out diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 1eaf3a4..295e804 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -9,6 +9,7 @@ def test_human_readable_formatter_model( capsys, + default_config, manifest_loader, model1, rule_severity_low, @@ -16,7 +17,9 @@ def test_human_readable_formatter_model( rule_severity_critical, ): """Ensure the formatter has the correct output after model evaluation.""" - formatter = HumanReadableFormatter(manifest_loader=manifest_loader) + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) results: dict[Type[Rule], RuleViolation | Exception | None] = { rule_severity_low: None, rule_severity_medium: Exception("Oh noes"), @@ -35,9 +38,72 @@ def test_human_readable_formatter_model( ) -def test_human_readable_formatter_project(capsys, manifest_loader): +def test_human_readable_formatter_project(capsys, default_config, manifest_loader): """Ensure the formatter has the correct output after project evaluation.""" - formatter = HumanReadableFormatter(manifest_loader=manifest_loader) + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) formatter.project_evaluated(Score(10.0, "🥇")) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n\n" + assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n" + + +def test_human_readable_formatter_low_model_score( + capsys, + default_config, + manifest_loader, + model1, + rule_severity_critical, +): + """Ensure the formatter has the correct output when a model has a low score.""" + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) + results: dict[Type[Rule], RuleViolation | Exception | None] = { + rule_severity_critical: RuleViolation("Error"), + } + formatter.model_evaluated(model1, results, Score(0.0, "🚧")) + formatter.project_evaluated(Score(0.0, "🚧")) + stdout = capsys.readouterr().out + print() + assert ( + stdout + == """🚧 \x1B[1mmodel1\x1B[0m (score: 0.0) + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + +Project score: \x1B[1m0.0\x1B[0m 🚧 + +Error: model score too low, fail_any_model_under = 5.0 +Model model1 scored 0.0 +""" + ) + + +def test_human_readable_formatter_low_project_score( + capsys, + default_config, + manifest_loader, + model1, + rule_severity_critical, +): + """Ensure the formatter has the correct output when the projet has a low score.""" + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) + results: dict[Type[Rule], RuleViolation | Exception | None] = { + rule_severity_critical: RuleViolation("Error"), + } + formatter.model_evaluated(model1, results, Score(10.0, "🥇")) + formatter.project_evaluated(Score(0.0, "🚧")) + stdout = capsys.readouterr().out + print() + assert ( + stdout + == """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0) + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + +Project score: \x1B[1m0.0\x1B[0m 🚧 + +Error: project score too low, fail_project_under = 5.0 +""" + ) diff --git a/tests/formatters/test_manifest_formatter.py b/tests/formatters/test_manifest_formatter.py index fa2bd0f..56c7daa 100644 --- a/tests/formatters/test_manifest_formatter.py +++ b/tests/formatters/test_manifest_formatter.py @@ -10,6 +10,7 @@ def test_manifest_formatter_model( capsys, + default_config, manifest_loader, model1, rule_severity_low, @@ -17,7 +18,9 @@ def test_manifest_formatter_model( rule_severity_critical, ): """Ensure the formatter has the correct output after model evaluation.""" - formatter = ManifestFormatter(manifest_loader=manifest_loader) + formatter = ManifestFormatter( + manifest_loader=manifest_loader, config=default_config + ) results = { rule_severity_low: None, rule_severity_medium: Exception("Oh noes"), @@ -30,6 +33,7 @@ def test_manifest_formatter_model( def test_manifest_formatter_project( capsys, + default_config, manifest_loader, model1, model2, @@ -38,7 +42,9 @@ def test_manifest_formatter_project( rule_severity_critical, ): """Ensure the formatter has the correct output after project evaluation.""" - formatter = ManifestFormatter(manifest_loader=manifest_loader) + formatter = ManifestFormatter( + manifest_loader=manifest_loader, config=default_config + ) result1: dict[Type[Rule], RuleViolation | Exception | None] = { rule_severity_low: None, rule_severity_medium: Exception("Oh noes"), diff --git a/tests/test_cli.py b/tests/test_cli.py index f9b83c1..68caa40 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -20,13 +20,7 @@ def test_lint_existing_manifest(manifest_path): """Test lint with an existing manifest.""" with patch("dbt_score.cli.Config._load_toml_file"): runner = CliRunner() - result = runner.invoke( - lint, - [ - "--manifest", - manifest_path, - ], - ) + result = runner.invoke(lint, ["--manifest", manifest_path]) assert "model1" in result.output assert "model2" in result.output @@ -63,7 +57,7 @@ def test_fail_project_under(manifest_path): assert "model1" in result.output assert "model2" in result.output - assert "Error: fail_project_under" in result.stdout + assert "Error: project score too low, fail_project_under" in result.stdout assert result.exit_code == 1 @@ -77,5 +71,5 @@ def test_fail_any_model_under(manifest_path): assert "model1" in result.output assert "model2" in result.output - assert "Error: fail_any_model_under" in result.stdout + assert "Error: model score too low, fail_any_model_under" in result.stdout assert result.exit_code == 1 diff --git a/tests/test_config.py b/tests/test_config.py index 8638cb9..14c8392 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -26,6 +26,7 @@ def test_load_valid_toml_file(valid_config_path): assert config.badge_config.second.icon == "2️⃣" assert config.badge_config.first.icon == "1️⃣" assert config.fail_project_under == 7.5 + assert config.fail_any_model_under == 6.9 def test_load_invalid_toml_file(caplog, invalid_config_path):