Skip to content

Commit

Permalink
Add JSON formatter for machine-readable output (#68)
Browse files Browse the repository at this point in the history
The ability to save and use results from dbt-score in other tooling
opens new possibilities.
To that end, add a new formatter, which uses `stdout` to print a JSON
document displaying `dbt-score`'s results.

Example:
```shell
$ dbt_score lint -f json
{
  "models": {
    "model1": {
      "score": 5.0,
      "badge": "🥈",
      "pass": true,
      "results": {
        "dbt_score.rules.generic.columns_have_description": {
          "result": "OK",
          "severity": "medium",
          "message": null
        },
        "dbt_score.rules.generic.has_description": {
          "result": "WARN",
          "severity": "medium",
          "message": "Model lacks a description."
        }
      }
    }
  },
  "project": {
    "score": 5.0,
    "badge": "🥈",
    "pass": true
  }
}
```

---------

Co-authored-by: Jochem van Dooren <[email protected]>
  • Loading branch information
matthieucan and jochemvandooren authored Aug 8, 2024
1 parent 1259fc2 commit 7660e8a
Show file tree
Hide file tree
Showing 6 changed files with 179 additions and 3 deletions.
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ and this project adheres to
## [Unreleased]

- Add null check before calling `project_evaluated` in the `evaluate` method to
prevent errors when no models are found. See PR #64.
prevent errors when no models are found. (#64)
- Add JSON formatter for machine-readable output. (#68)

## [0.3.0] - 2024-06-20

Expand Down
3 changes: 3 additions & 0 deletions docs/reference/formatters/json_formatter.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# JSON formatter

::: dbt_score.formatters.json_formatter
4 changes: 2 additions & 2 deletions src/dbt_score/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ def cli() -> None:
"--format",
"-f",
help="Output format. Plain is suitable for terminals, manifest for rich "
"documentation.",
type=click.Choice(["plain", "manifest", "ascii"]),
"documentation, json for machine-readable output.",
type=click.Choice(["plain", "manifest", "ascii", "json"]),
default="plain",
)
@click.option(
Expand Down
106 changes: 106 additions & 0 deletions src/dbt_score/formatters/json_formatter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
"""JSON formatter.
Shape of the JSON output:
{
"models": {
"model_foo": {
"score": 5.0,
"badge": "🥈",
"pass": true,
"results": {
"rule1": {
"result": "OK",
"severity": null
"message": null
},
"rule2": {
"result": "WARN",
"severity": "medium",
"message": "Model lacks a description."
}
]
},
"model_bar": {
"score": 0.0,
"badge": "🥉",
"pass": false,
"results": {
"rule1": {
"result": "ERR",
"message": "Exception message"
}
}
}
},
"project": {
"score": 2.5,
"badge": "🥉",
"pass": false
}
}
"""


import json
from typing import Any

from dbt_score.evaluation import ModelResultsType
from dbt_score.formatters import Formatter
from dbt_score.models import Model
from dbt_score.rule import RuleViolation
from dbt_score.scoring import Score


class JSONFormatter(Formatter):
"""Formatter for JSON output."""

def __init__(self, *args: Any, **kwargs: Any):
"""Instantiate formatter."""
super().__init__(*args, **kwargs)
self._model_results: dict[str, dict[str, Any]] = {}
self._project_results: dict[str, Any]

def model_evaluated(
self, model: Model, results: ModelResultsType, score: Score
) -> None:
"""Callback when a model has been evaluated."""
self._model_results[model.name] = {
"score": score.value,
"badge": score.badge,
"pass": score.value >= self._config.fail_any_model_under,
"results": {},
}
for rule, result in results.items():
severity = rule.severity.name.lower()
if result is None:
self._model_results[model.name]["results"][rule.source()] = {
"result": "OK",
"severity": severity,
"message": None,
}
elif isinstance(result, RuleViolation):
self._model_results[model.name]["results"][rule.source()] = {
"result": "WARN",
"severity": severity,
"message": result.message,
}
else:
self._model_results[model.name]["results"][rule.source()] = {
"result": "ERR",
"severity": severity,
"message": str(result),
}

def project_evaluated(self, score: Score) -> None:
"""Callback when a project has been evaluated."""
self._project_results = {
"score": score.value,
"badge": score.badge,
"pass": score.value >= self._config.fail_project_under,
}
document = {
"models": self._model_results,
"project": self._project_results,
}
print(json.dumps(document, indent=2, ensure_ascii=False))
2 changes: 2 additions & 0 deletions src/dbt_score/lint.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from dbt_score.evaluation import Evaluation
from dbt_score.formatters.ascii_formatter import ASCIIFormatter
from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter
from dbt_score.formatters.json_formatter import JSONFormatter
from dbt_score.formatters.manifest_formatter import ManifestFormatter
from dbt_score.models import ManifestLoader
from dbt_score.rule_registry import RuleRegistry
Expand All @@ -32,6 +33,7 @@ def lint_dbt_project(
"plain": HumanReadableFormatter,
"manifest": ManifestFormatter,
"ascii": ASCIIFormatter,
"json": JSONFormatter,
}
formatter = formatters[format](manifest_loader=manifest_loader, config=config)

Expand Down
64 changes: 64 additions & 0 deletions tests/formatters/test_json_formatter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
"""Unit tests for the JSON formatter."""

from typing import Type

from dbt_score.formatters.json_formatter import JSONFormatter
from dbt_score.rule import Rule, RuleViolation
from dbt_score.scoring import Score


def test_json_formatter(
capsys,
default_config,
manifest_loader,
model1,
rule_severity_low,
rule_severity_medium,
rule_severity_critical,
):
"""Ensure the formatter has the correct output after model evaluation."""
formatter = JSONFormatter(manifest_loader=manifest_loader, config=default_config)
results: dict[Type[Rule], RuleViolation | Exception | None] = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
}
formatter.model_evaluated(model1, results, Score(10.0, "🥇"))
formatter.project_evaluated(Score(10.0, "🥇"))
stdout = capsys.readouterr().out
print()
assert (
stdout
== """{
"models": {
"model1": {
"score": 10.0,
"badge": "🥇",
"pass": true,
"results": {
"tests.conftest.rule_severity_low": {
"result": "OK",
"severity": "low",
"message": null
},
"tests.conftest.rule_severity_medium": {
"result": "ERR",
"severity": "medium",
"message": "Oh noes"
},
"tests.conftest.rule_severity_critical": {
"result": "WARN",
"severity": "critical",
"message": "Error"
}
}
}
},
"project": {
"score": 10.0,
"badge": "🥇",
"pass": true
}
}
"""
)

0 comments on commit 7660e8a

Please sign in to comment.