diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 8e649aa..ff382e9 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -1,7 +1,7 @@ """CLI interface.""" from pathlib import Path -from typing import Final +from typing import Final, Literal import click from click.core import ParameterSource @@ -32,6 +32,14 @@ def cli() -> None: @cli.command() +@click.option( + "--format", + "-f", + help="Output format. Plain is suitable for terminals, markdown for rich " + "documentation.", + type=click.Choice(["plain", "manifest"]), + default="plain", +) @click.option( "--select", "-s", @@ -68,6 +76,7 @@ def cli() -> None: default=False, ) def lint( + format: Literal["plain", "manifest"], select: tuple[str], namespace: list[str], disabled_rule: list[str], @@ -92,7 +101,7 @@ def lint( if run_dbt_parse: dbt_parse() - lint_dbt_project(manifest, config) + lint_dbt_project(manifest_path=manifest, config=config, format=format) @cli.command(name="list") diff --git a/src/dbt_score/formatters/__init__.py b/src/dbt_score/formatters/__init__.py index cb79e1b..1734b33 100644 --- a/src/dbt_score/formatters/__init__.py +++ b/src/dbt_score/formatters/__init__.py @@ -7,12 +7,16 @@ if typing.TYPE_CHECKING: from dbt_score.evaluation import ModelResultsType -from dbt_score.models import Model +from dbt_score.models import ManifestLoader, Model class Formatter(ABC): """Abstract class to define a formatter.""" + def __init__(self, manifest_loader: ManifestLoader): + """Instantiate a formatter.""" + self._manifest_loader = manifest_loader + @abstractmethod def model_evaluated( self, model: Model, results: ModelResultsType, score: float diff --git a/src/dbt_score/formatters/manifest_formatter.py b/src/dbt_score/formatters/manifest_formatter.py new file mode 100644 index 0000000..acb445a --- /dev/null +++ b/src/dbt_score/formatters/manifest_formatter.py @@ -0,0 +1,32 @@ +"""Formatter for a manifest.json.""" + + +import copy +import json +from typing import Any + +from dbt_score.evaluation import ModelResultsType +from dbt_score.formatters import Formatter +from dbt_score.models import Model + + +class ManifestFormatter(Formatter): + """Formatter to generate manifest.json with score metadata.""" + + def __init__(self, *args: Any, **kwargs: Any) -> None: + """Instantiate a manifest formatter.""" + self._model_scores: dict[str, float] = {} + super().__init__(*args, **kwargs) + + def model_evaluated( + self, model: Model, results: ModelResultsType, score: float + ) -> None: + """Callback when a model has been evaluated.""" + self._model_scores[model.unique_id] = score + + def project_evaluated(self, score: float) -> None: + """Callback when a project has been evaluated.""" + manifest = copy.copy(self._manifest_loader.raw_manifest) + for model_id, score in self._model_scores.items(): + manifest["nodes"][model_id]["meta"]["score"] = round(score, 1) + print(json.dumps(manifest, indent=2)) diff --git a/src/dbt_score/lint.py b/src/dbt_score/lint.py index 5fe1a40..9908fbe 100644 --- a/src/dbt_score/lint.py +++ b/src/dbt_score/lint.py @@ -1,16 +1,20 @@ """Lint dbt models metadata.""" from pathlib import Path +from typing import Literal from dbt_score.config import Config from dbt_score.evaluation import Evaluation from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter +from dbt_score.formatters.manifest_formatter import ManifestFormatter from dbt_score.models import ManifestLoader from dbt_score.rule_registry import RuleRegistry from dbt_score.scoring import Scorer -def lint_dbt_project(manifest_path: Path, config: Config) -> None: +def lint_dbt_project( + manifest_path: Path, config: Config, format: Literal["plain", "manifest"] +) -> None: """Lint dbt manifest.""" if not manifest_path.exists(): raise FileNotFoundError(f"Manifest not found at {manifest_path}.") @@ -20,7 +24,8 @@ def lint_dbt_project(manifest_path: Path, config: Config) -> None: manifest_loader = ManifestLoader(manifest_path) - formatter = HumanReadableFormatter() + formatters = {"plain": HumanReadableFormatter, "manifest": ManifestFormatter} + formatter = formatters[format](manifest_loader=manifest_loader) scorer = Scorer() diff --git a/tests/conftest.py b/tests/conftest.py index 980cb14..e949327 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,6 +6,7 @@ from dbt_score import Model, Rule, RuleViolation, Severity, rule from dbt_score.config import Config +from dbt_score.models import ManifestLoader from pytest import fixture # Configuration @@ -50,6 +51,12 @@ def raw_manifest(manifest_path) -> Any: return json.loads(manifest_path.read_text(encoding="utf-8")) +@fixture +def manifest_loader(manifest_path) -> ManifestLoader: + """Return an instantiated and loaded manifest loader.""" + return ManifestLoader(file_path=manifest_path) + + # Models diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index ae5e3cf..0cd0460 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -1,15 +1,22 @@ """Unit tests for the human readable formatter.""" +from typing import Type + from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter -from dbt_score.rule import RuleViolation +from dbt_score.rule import Rule, RuleViolation def test_human_readable_formatter_model( - capsys, model1, rule_severity_low, rule_severity_medium, rule_severity_critical + capsys, + manifest_loader, + model1, + rule_severity_low, + rule_severity_medium, + rule_severity_critical, ): """Ensure the formatter has the correct output after model evaluation.""" - formatter = HumanReadableFormatter() - results = { + formatter = HumanReadableFormatter(manifest_loader=manifest_loader) + results: dict[Type[Rule], RuleViolation | Exception | None] = { rule_severity_low: None, rule_severity_medium: Exception("Oh noes"), rule_severity_critical: RuleViolation("Error"), @@ -28,9 +35,9 @@ def test_human_readable_formatter_model( ) -def test_human_readable_formatter_project(capsys): +def test_human_readable_formatter_project(capsys, manifest_loader): """Ensure the formatter has the correct output after project evaluation.""" - formatter = HumanReadableFormatter() + formatter = HumanReadableFormatter(manifest_loader=manifest_loader) formatter.project_evaluated(10.0) stdout = capsys.readouterr().out assert stdout == "Project score: \x1B[1m10.0\x1B[0m\n" diff --git a/tests/formatters/test_manifest_formatter.py b/tests/formatters/test_manifest_formatter.py new file mode 100644 index 0000000..98ca453 --- /dev/null +++ b/tests/formatters/test_manifest_formatter.py @@ -0,0 +1,58 @@ +"""Unit tests for the manifest formatter.""" + +import json +from typing import Type + +from dbt_score.formatters.manifest_formatter import ManifestFormatter +from dbt_score.rule import Rule, RuleViolation + + +def test_manifest_formatter_model( + capsys, + manifest_loader, + model1, + rule_severity_low, + rule_severity_medium, + rule_severity_critical, +): + """Ensure the formatter has the correct output after model evaluation.""" + formatter = ManifestFormatter(manifest_loader=manifest_loader) + results = { + rule_severity_low: None, + rule_severity_medium: Exception("Oh noes"), + rule_severity_critical: RuleViolation("Error"), + } + formatter.model_evaluated(model1, results, 10.0) + stdout = capsys.readouterr().out + assert stdout == "" + + +def test_human_readable_formatter_project( # noqa: PLR0913 + capsys, + manifest_loader, + model1, + model2, + rule_severity_low, + rule_severity_medium, + rule_severity_critical, +): + """Ensure the formatter has the correct output after project evaluation.""" + formatter = ManifestFormatter(manifest_loader=manifest_loader) + result1: dict[Type[Rule], RuleViolation | Exception | None] = { + rule_severity_low: None, + rule_severity_medium: Exception("Oh noes"), + rule_severity_critical: RuleViolation("Error"), + } + result2: dict[Type[Rule], RuleViolation | Exception | None] = { + rule_severity_low: None, + rule_severity_medium: None, + rule_severity_critical: None, + } + + formatter.model_evaluated(model1, result1, 5.0) + formatter.model_evaluated(model2, result2, 10.0) + formatter.project_evaluated(7.5) + stdout = capsys.readouterr().out + new_manifest = json.loads(stdout) + assert new_manifest["nodes"]["model.package.model1"]["meta"]["score"] == 5.0 + assert new_manifest["nodes"]["model.package.model2"]["meta"]["score"] == 10.0 diff --git a/tests/test_lint.py b/tests/test_lint.py index 41243e2..4605392 100644 --- a/tests/test_lint.py +++ b/tests/test_lint.py @@ -13,6 +13,6 @@ def test_lint_dbt_project(mock_evaluation, manifest_path): # Instance of classes are the same Mocks mock_evaluation.return_value = mock_evaluation - lint_dbt_project(manifest_path, Config()) + lint_dbt_project(manifest_path=manifest_path, config=Config(), format="plain") mock_evaluation.evaluate.assert_called_once()