Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add JSON formatter for machine-readable output #68

Merged
merged 7 commits into from
Aug 8, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions docs/reference/formatters/json_formatter.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# JSON formatter

::: dbt_score.formatters.json_formatter
4 changes: 2 additions & 2 deletions src/dbt_score/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ def cli() -> None:
"--format",
"-f",
help="Output format. Plain is suitable for terminals, manifest for rich "
"documentation.",
type=click.Choice(["plain", "manifest", "ascii"]),
"documentation, json for machine-readable output.",
type=click.Choice(["plain", "manifest", "ascii", "json"]),
default="plain",
)
@click.option(
Expand Down
106 changes: 106 additions & 0 deletions src/dbt_score/formatters/json_formatter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
"""JSON formatter.

Shape of the JSON output:

{
"models": {
"model_foo": {
"score": 5.0,
"badge": "🥈",
"pass": true,
"results": {
"rule1": {
"result": "OK",
"severity": null
"message": null
},
"rule2": {
"result": "WARN",
"severity": "medium",
"message": "Model lacks a description."
}
]
},
"model_bar": {
"score": 0.0,
"badge": "🥉",
"pass": false,
"results": {
"rule1": {
"result": "ERR",
"message": "Exception message"
}
}
}
},
"project": {
"score": 2.5,
"badge": "🥉",
"pass": false
}
}
"""


import json
from typing import Any

from dbt_score.evaluation import ModelResultsType
from dbt_score.formatters import Formatter
from dbt_score.models import Model
from dbt_score.rule import RuleViolation
from dbt_score.scoring import Score


class JSONFormatter(Formatter):
"""Formatter for JSON output."""

def __init__(self, *args: Any, **kwargs: Any):
"""Instantiate formatter."""
super().__init__(*args, **kwargs)
self._model_results: dict[str, dict[str, Any]] = {}
self._project_results: dict[str, Any]

def model_evaluated(
self, model: Model, results: ModelResultsType, score: Score
) -> None:
"""Callback when a model has been evaluated."""
self._model_results[model.name] = {
jochemvandooren marked this conversation as resolved.
Show resolved Hide resolved
"score": score.value,
"badge": score.badge,
"pass": score.value >= self._config.fail_any_model_under,
"results": {},
}
for rule, result in results.items():
severity = rule.severity.name.lower()
if result is None:
self._model_results[model.name]["results"][rule.source()] = {
"result": "OK",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we can make this an Enum or something, so it's clear what are the possible results codes?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's a good idea, but given it's also used in the HumanReadableFormatter, this would require a bit of refactoring to have it proper. I suggest to keep it simple for now :)

"severity": severity,
"message": None,
}
elif isinstance(result, RuleViolation):
self._model_results[model.name]["results"][rule.source()] = {
"result": "WARN",
"severity": severity,
"message": result.message,
}
else:
self._model_results[model.name]["results"][rule.source()] = {
"result": "ERR",
"severity": severity,
"message": str(result),
}

def project_evaluated(self, score: Score) -> None:
"""Callback when a project has been evaluated."""
self._project_results = {
"score": score.value,
"badge": score.badge,
"pass": score.value >= self._config.fail_project_under,
}
document = {
"models": self._model_results,
"project": self._project_results,
}
print(json.dumps(document, indent=2, ensure_ascii=False))
2 changes: 2 additions & 0 deletions src/dbt_score/lint.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from dbt_score.evaluation import Evaluation
from dbt_score.formatters.ascii_formatter import ASCIIFormatter
from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter
from dbt_score.formatters.json_formatter import JSONFormatter
from dbt_score.formatters.manifest_formatter import ManifestFormatter
from dbt_score.models import ManifestLoader
from dbt_score.rule_registry import RuleRegistry
Expand All @@ -32,6 +33,7 @@ def lint_dbt_project(
"plain": HumanReadableFormatter,
"manifest": ManifestFormatter,
"ascii": ASCIIFormatter,
"json": JSONFormatter,
}
formatter = formatters[format](manifest_loader=manifest_loader, config=config)

Expand Down
64 changes: 64 additions & 0 deletions tests/formatters/test_json_formatter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
"""Unit tests for the JSON formatter."""

from typing import Type

from dbt_score.formatters.json_formatter import JSONFormatter
from dbt_score.rule import Rule, RuleViolation
from dbt_score.scoring import Score


def test_json_formatter(
capsys,
default_config,
manifest_loader,
model1,
rule_severity_low,
rule_severity_medium,
rule_severity_critical,
):
"""Ensure the formatter has the correct output after model evaluation."""
formatter = JSONFormatter(manifest_loader=manifest_loader, config=default_config)
results: dict[Type[Rule], RuleViolation | Exception | None] = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
}
formatter.model_evaluated(model1, results, Score(10.0, "🥇"))
formatter.project_evaluated(Score(10.0, "🥇"))
stdout = capsys.readouterr().out
print()
assert (
stdout
== """{
"models": {
"model1": {
"score": 10.0,
"badge": "🥇",
"pass": true,
"results": {
"tests.conftest.rule_severity_low": {
"result": "OK",
"severity": "low",
"message": null
},
"tests.conftest.rule_severity_medium": {
"result": "ERR",
"severity": "medium",
"message": "Oh noes"
},
"tests.conftest.rule_severity_critical": {
"result": "WARN",
"severity": "critical",
"message": "Error"
Comment on lines +37 to +52
Copy link
Contributor

@jochemvandooren jochemvandooren Aug 8, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Interesting pass considering the rules.result 😁 Not a problem though!

}
}
}
},
"project": {
"score": 10.0,
"badge": "🥇",
"pass": true
}
}
"""
)