Skip to content

Commit

Permalink
Move model/project failure output to formatter
Browse files Browse the repository at this point in the history
  • Loading branch information
matthieucan committed Jun 20, 2024
1 parent 5ee7797 commit 62234b6
Show file tree
Hide file tree
Showing 9 changed files with 121 additions and 37 deletions.
22 changes: 6 additions & 16 deletions src/dbt_score/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,29 +131,19 @@ def lint(
manifest_path=manifest, config=config, format=format, select=select
)

failed_models = {
model.unique_id: score.value
for model, score in evaluation.scores.items()
if (score.value < config.fail_any_model_under)
}

if failed_models:
print(f"Error: fail_any_model_under = {config.fail_any_model_under}")
for model, value in failed_models.items():
print(f"Model {model} scored {round(value,1)}")
ctx.exit(1)

if evaluation.project_score.value < config.fail_project_under:
print(f"Error: fail_project_under = {evaluation.project_score.value}")
ctx.exit(1)

except FileNotFoundError:
logger.error(
"dbt's manifest.json could not be found. If you're in a dbt project, be "
"sure to run 'dbt parse' first, or use the option '--run-dbt-parse'."
)
ctx.exit(2)

if (
any(x.value < config.fail_any_model_under for x in evaluation.scores.values())
or evaluation.project_score.value < config.fail_project_under
):
ctx.exit(1)


@cli.command(name="list")
@click.option(
Expand Down
4 changes: 3 additions & 1 deletion src/dbt_score/formatters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import typing
from abc import ABC, abstractmethod

from dbt_score.config import Config
from dbt_score.scoring import Score

if typing.TYPE_CHECKING:
Expand All @@ -15,9 +16,10 @@
class Formatter(ABC):
"""Abstract class to define a formatter."""

def __init__(self, manifest_loader: ManifestLoader):
def __init__(self, manifest_loader: ManifestLoader, config: Config):
"""Instantiate a formatter."""
self._manifest_loader = manifest_loader
self._config = config

@abstractmethod
def model_evaluated(
Expand Down
26 changes: 25 additions & 1 deletion src/dbt_score/formatters/human_readable_formatter.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""Human readable formatter."""


from typing import Any

from dbt_score.evaluation import ModelResultsType
from dbt_score.formatters import Formatter
from dbt_score.models import Model
Expand All @@ -16,6 +18,11 @@ class HumanReadableFormatter(Formatter):
label_warning = "\033[1;33mWARN\033[0m"
label_error = "\033[1;31mERR \033[0m"

def __init__(self, *args: Any, **kwargs: Any):
"""Instantiate formatter."""
super().__init__(*args, **kwargs)
self._failed_models: list[tuple[Model, Score]] = []

@staticmethod
def bold(text: str) -> str:
"""Return text in bold."""
Expand All @@ -25,6 +32,8 @@ def model_evaluated(
self, model: Model, results: ModelResultsType, score: Score
) -> None:
"""Callback when a model has been evaluated."""
if score.value < self._config.fail_any_model_under:
self._failed_models.append((model, score))
print(
f"{score.badge} {self.bold(model.name)} (score: {round(score.value, 1)!s})"
)
Expand All @@ -43,4 +52,19 @@ def model_evaluated(
def project_evaluated(self, score: Score) -> None:
"""Callback when a project has been evaluated."""
print(f"Project score: {self.bold(str(round(score.value, 1)))} {score.badge}")
print()

if len(self._failed_models) > 0:
print()
print(
f"Error: model score too low, fail_any_model_under = "
f"{self._config.fail_any_model_under}"
)
for model, score in self._failed_models:
print(f"Model {model.name} scored {round(score.value, 1)}")

elif score.value < self._config.fail_project_under:
print()
print(
f"Error: project score too low, fail_project_under = "
f"{self._config.fail_project_under}"
)
2 changes: 1 addition & 1 deletion src/dbt_score/lint.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def lint_dbt_project(
"manifest": ManifestFormatter,
"ascii": ASCIIFormatter,
}
formatter = formatters[format](manifest_loader=manifest_loader)
formatter = formatters[format](manifest_loader=manifest_loader, config=config)

scorer = Scorer(config)

Expand Down
7 changes: 4 additions & 3 deletions tests/formatters/test_ascii_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,15 @@

def test_ascii_formatter_model(
capsys,
default_config,
manifest_loader,
model1,
rule_severity_low,
rule_severity_medium,
rule_severity_critical,
):
"""Ensure the formatter doesn't write anything after model evaluation."""
formatter = ASCIIFormatter(manifest_loader=manifest_loader)
formatter = ASCIIFormatter(manifest_loader=manifest_loader, config=default_config)
results: dict[Type[Rule], RuleViolation | Exception | None] = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
Expand All @@ -27,9 +28,9 @@ def test_ascii_formatter_model(
assert stdout == ""


def test_ascii_formatter_project(capsys, manifest_loader):
def test_ascii_formatter_project(capsys, default_config, manifest_loader):
"""Ensure the formatter has the correct output after project evaluation."""
formatter = ASCIIFormatter(manifest_loader=manifest_loader)
formatter = ASCIIFormatter(manifest_loader=manifest_loader, config=default_config)

formatter.project_evaluated(Score(10.0, "🥇"))
stdout_gold = capsys.readouterr().out
Expand Down
74 changes: 70 additions & 4 deletions tests/formatters/test_human_readable_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,17 @@

def test_human_readable_formatter_model(
capsys,
default_config,
manifest_loader,
model1,
rule_severity_low,
rule_severity_medium,
rule_severity_critical,
):
"""Ensure the formatter has the correct output after model evaluation."""
formatter = HumanReadableFormatter(manifest_loader=manifest_loader)
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
results: dict[Type[Rule], RuleViolation | Exception | None] = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
Expand All @@ -35,9 +38,72 @@ def test_human_readable_formatter_model(
)


def test_human_readable_formatter_project(capsys, manifest_loader):
def test_human_readable_formatter_project(capsys, default_config, manifest_loader):
"""Ensure the formatter has the correct output after project evaluation."""
formatter = HumanReadableFormatter(manifest_loader=manifest_loader)
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
formatter.project_evaluated(Score(10.0, "🥇"))
stdout = capsys.readouterr().out
assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n\n"
assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n"


def test_human_readable_formatter_low_model_score(
capsys,
default_config,
manifest_loader,
model1,
rule_severity_critical,
):
"""Ensure the formatter has the correct output when a model has a low score."""
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
results: dict[Type[Rule], RuleViolation | Exception | None] = {
rule_severity_critical: RuleViolation("Error"),
}
formatter.model_evaluated(model1, results, Score(0.0, "🚧"))
formatter.project_evaluated(Score(0.0, "🚧"))
stdout = capsys.readouterr().out
print()
assert (
stdout
== """🚧 \x1B[1mmodel1\x1B[0m (score: 0.0)
\x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error
Project score: \x1B[1m0.0\x1B[0m 🚧
Error: model score too low, fail_any_model_under = 5.0
Model model1 scored 0.0
"""
)


def test_human_readable_formatter_low_project_score(
capsys,
default_config,
manifest_loader,
model1,
rule_severity_critical,
):
"""Ensure the formatter has the correct output when the projet has a low score."""
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
results: dict[Type[Rule], RuleViolation | Exception | None] = {
rule_severity_critical: RuleViolation("Error"),
}
formatter.model_evaluated(model1, results, Score(10.0, "🥇"))
formatter.project_evaluated(Score(0.0, "🚧"))
stdout = capsys.readouterr().out
print()
assert (
stdout
== """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0)
\x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error
Project score: \x1B[1m0.0\x1B[0m 🚧
Error: project score too low, fail_project_under = 5.0
"""
)
10 changes: 8 additions & 2 deletions tests/formatters/test_manifest_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,17 @@

def test_manifest_formatter_model(
capsys,
default_config,
manifest_loader,
model1,
rule_severity_low,
rule_severity_medium,
rule_severity_critical,
):
"""Ensure the formatter has the correct output after model evaluation."""
formatter = ManifestFormatter(manifest_loader=manifest_loader)
formatter = ManifestFormatter(
manifest_loader=manifest_loader, config=default_config
)
results = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
Expand All @@ -30,6 +33,7 @@ def test_manifest_formatter_model(

def test_manifest_formatter_project(
capsys,
default_config,
manifest_loader,
model1,
model2,
Expand All @@ -38,7 +42,9 @@ def test_manifest_formatter_project(
rule_severity_critical,
):
"""Ensure the formatter has the correct output after project evaluation."""
formatter = ManifestFormatter(manifest_loader=manifest_loader)
formatter = ManifestFormatter(
manifest_loader=manifest_loader, config=default_config
)
result1: dict[Type[Rule], RuleViolation | Exception | None] = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
Expand Down
12 changes: 3 additions & 9 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,7 @@ def test_lint_existing_manifest(manifest_path):
"""Test lint with an existing manifest."""
with patch("dbt_score.cli.Config._load_toml_file"):
runner = CliRunner()
result = runner.invoke(
lint,
[
"--manifest",
manifest_path,
],
)
result = runner.invoke(lint, ["--manifest", manifest_path])

assert "model1" in result.output
assert "model2" in result.output
Expand Down Expand Up @@ -63,7 +57,7 @@ def test_fail_project_under(manifest_path):

assert "model1" in result.output
assert "model2" in result.output
assert "Error: fail_project_under" in result.stdout
assert "Error: project score too low, fail_project_under" in result.stdout
assert result.exit_code == 1


Expand All @@ -77,5 +71,5 @@ def test_fail_any_model_under(manifest_path):

assert "model1" in result.output
assert "model2" in result.output
assert "Error: fail_any_model_under" in result.stdout
assert "Error: model score too low, fail_any_model_under" in result.stdout
assert result.exit_code == 1
1 change: 1 addition & 0 deletions tests/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ def test_load_valid_toml_file(valid_config_path):
assert config.badge_config.second.icon == "2️⃣"
assert config.badge_config.first.icon == "1️⃣"
assert config.fail_project_under == 7.5
assert config.fail_any_model_under == 6.9


def test_load_invalid_toml_file(caplog, invalid_config_path):
Expand Down

0 comments on commit 62234b6

Please sign in to comment.