Skip to content

Commit

Permalink
Add formatter for manifest.json containing scores
Browse files Browse the repository at this point in the history
  • Loading branch information
matthieucan committed May 24, 2024
1 parent 6011dac commit 08901aa
Show file tree
Hide file tree
Showing 8 changed files with 134 additions and 12 deletions.
13 changes: 11 additions & 2 deletions src/dbt_score/cli.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""CLI interface."""

from pathlib import Path
from typing import Final
from typing import Final, Literal

import click
from click.core import ParameterSource
Expand Down Expand Up @@ -32,6 +32,14 @@ def cli() -> None:


@cli.command()
@click.option(
"--format",
"-f",
help="Output format. Plain is suitable for terminals, markdown for rich "
"documentation.",
type=click.Choice(["plain", "manifest"]),
default="plain",
)
@click.option(
"--select",
"-s",
Expand Down Expand Up @@ -68,6 +76,7 @@ def cli() -> None:
default=False,
)
def lint(
format: Literal["plain", "manifest"],
select: tuple[str],
namespace: list[str],
disabled_rule: list[str],
Expand All @@ -92,7 +101,7 @@ def lint(
if run_dbt_parse:
dbt_parse()

lint_dbt_project(manifest, config)
lint_dbt_project(manifest_path=manifest, config=config, format=format)


@cli.command(name="list")
Expand Down
6 changes: 5 additions & 1 deletion src/dbt_score/formatters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,16 @@

if typing.TYPE_CHECKING:
from dbt_score.evaluation import ModelResultsType
from dbt_score.models import Model
from dbt_score.models import ManifestLoader, Model


class Formatter(ABC):
"""Abstract class to define a formatter."""

def __init__(self, manifest_loader: ManifestLoader):
"""Instantiate a formatter."""
self._manifest_loader = manifest_loader

@abstractmethod
def model_evaluated(
self, model: Model, results: ModelResultsType, score: float
Expand Down
32 changes: 32 additions & 0 deletions src/dbt_score/formatters/manifest_formatter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
"""Formatter for a manifest.json."""


import copy
import json
from typing import Any

from dbt_score.evaluation import ModelResultsType
from dbt_score.formatters import Formatter
from dbt_score.models import Model


class ManifestFormatter(Formatter):
"""Formatter to generate manifest.json with score metadata."""

def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Instantiate a manifest formatter."""
self._model_scores: dict[str, float] = {}
super().__init__(*args, **kwargs)

def model_evaluated(
self, model: Model, results: ModelResultsType, score: float
) -> None:
"""Callback when a model has been evaluated."""
self._model_scores[model.unique_id] = score

def project_evaluated(self, score: float) -> None:
"""Callback when a project has been evaluated."""
manifest = copy.copy(self._manifest_loader.raw_manifest)
for model_id, score in self._model_scores.items():
manifest["nodes"][model_id]["meta"]["score"] = round(score, 1)
print(json.dumps(manifest, indent=2))
9 changes: 7 additions & 2 deletions src/dbt_score/lint.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,20 @@
"""Lint dbt models metadata."""

from pathlib import Path
from typing import Literal

from dbt_score.config import Config
from dbt_score.evaluation import Evaluation
from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter
from dbt_score.formatters.manifest_formatter import ManifestFormatter
from dbt_score.models import ManifestLoader
from dbt_score.rule_registry import RuleRegistry
from dbt_score.scoring import Scorer


def lint_dbt_project(manifest_path: Path, config: Config) -> None:
def lint_dbt_project(
manifest_path: Path, config: Config, format: Literal["plain", "manifest"]
) -> None:
"""Lint dbt manifest."""
if not manifest_path.exists():
raise FileNotFoundError(f"Manifest not found at {manifest_path}.")
Expand All @@ -20,7 +24,8 @@ def lint_dbt_project(manifest_path: Path, config: Config) -> None:

manifest_loader = ManifestLoader(manifest_path)

formatter = HumanReadableFormatter()
formatters = {"plain": HumanReadableFormatter, "manifest": ManifestFormatter}
formatter = formatters[format](manifest_loader=manifest_loader)

scorer = Scorer()

Expand Down
7 changes: 7 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

from dbt_score import Model, Rule, RuleViolation, Severity, rule
from dbt_score.config import Config
from dbt_score.models import ManifestLoader
from pytest import fixture

# Configuration
Expand Down Expand Up @@ -50,6 +51,12 @@ def raw_manifest(manifest_path) -> Any:
return json.loads(manifest_path.read_text(encoding="utf-8"))


@fixture
def manifest_loader(manifest_path) -> ManifestLoader:
"""Return an instantiated and loaded manifest loader."""
return ManifestLoader(file_path=manifest_path)


# Models


Expand Down
19 changes: 13 additions & 6 deletions tests/formatters/test_human_readable_formatter.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,22 @@
"""Unit tests for the human readable formatter."""

from typing import Type

from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter
from dbt_score.rule import RuleViolation
from dbt_score.rule import Rule, RuleViolation


def test_human_readable_formatter_model(
capsys, model1, rule_severity_low, rule_severity_medium, rule_severity_critical
capsys,
manifest_loader,
model1,
rule_severity_low,
rule_severity_medium,
rule_severity_critical,
):
"""Ensure the formatter has the correct output after model evaluation."""
formatter = HumanReadableFormatter()
results = {
formatter = HumanReadableFormatter(manifest_loader=manifest_loader)
results: dict[Type[Rule], RuleViolation | Exception | None] = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
Expand All @@ -28,9 +35,9 @@ def test_human_readable_formatter_model(
)


def test_human_readable_formatter_project(capsys):
def test_human_readable_formatter_project(capsys, manifest_loader):
"""Ensure the formatter has the correct output after project evaluation."""
formatter = HumanReadableFormatter()
formatter = HumanReadableFormatter(manifest_loader=manifest_loader)
formatter.project_evaluated(10.0)
stdout = capsys.readouterr().out
assert stdout == "Project score: \x1B[1m10.0\x1B[0m\n"
58 changes: 58 additions & 0 deletions tests/formatters/test_manifest_formatter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
"""Unit tests for the manifest formatter."""

import json
from typing import Type

from dbt_score.formatters.manifest_formatter import ManifestFormatter
from dbt_score.rule import Rule, RuleViolation


def test_manifest_formatter_model(
capsys,
manifest_loader,
model1,
rule_severity_low,
rule_severity_medium,
rule_severity_critical,
):
"""Ensure the formatter has the correct output after model evaluation."""
formatter = ManifestFormatter(manifest_loader=manifest_loader)
results = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
}
formatter.model_evaluated(model1, results, 10.0)
stdout = capsys.readouterr().out
assert stdout == ""


def test_human_readable_formatter_project( # noqa: PLR0913
capsys,
manifest_loader,
model1,
model2,
rule_severity_low,
rule_severity_medium,
rule_severity_critical,
):
"""Ensure the formatter has the correct output after project evaluation."""
formatter = ManifestFormatter(manifest_loader=manifest_loader)
result1: dict[Type[Rule], RuleViolation | Exception | None] = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
}
result2: dict[Type[Rule], RuleViolation | Exception | None] = {
rule_severity_low: None,
rule_severity_medium: None,
rule_severity_critical: None,
}

formatter.model_evaluated(model1, result1, 5.0)
formatter.model_evaluated(model2, result2, 10.0)
formatter.project_evaluated(7.5)
stdout = capsys.readouterr().out
new_manifest = json.loads(stdout)
assert new_manifest["nodes"]["model.package.model1"]["meta"]["score"] == 5.0
assert new_manifest["nodes"]["model.package.model2"]["meta"]["score"] == 10.0
2 changes: 1 addition & 1 deletion tests/test_lint.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,6 @@ def test_lint_dbt_project(mock_evaluation, manifest_path):
# Instance of classes are the same Mocks
mock_evaluation.return_value = mock_evaluation

lint_dbt_project(manifest_path, Config())
lint_dbt_project(manifest_path=manifest_path, config=Config(), format="plain")

mock_evaluation.evaluate.assert_called_once()

0 comments on commit 08901aa

Please sign in to comment.