-
Notifications
You must be signed in to change notification settings - Fork 8
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add formatter for manifest.json containing scores
- Loading branch information
1 parent
6011dac
commit 08901aa
Showing
8 changed files
with
134 additions
and
12 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
"""Formatter for a manifest.json.""" | ||
|
||
|
||
import copy | ||
import json | ||
from typing import Any | ||
|
||
from dbt_score.evaluation import ModelResultsType | ||
from dbt_score.formatters import Formatter | ||
from dbt_score.models import Model | ||
|
||
|
||
class ManifestFormatter(Formatter): | ||
"""Formatter to generate manifest.json with score metadata.""" | ||
|
||
def __init__(self, *args: Any, **kwargs: Any) -> None: | ||
"""Instantiate a manifest formatter.""" | ||
self._model_scores: dict[str, float] = {} | ||
super().__init__(*args, **kwargs) | ||
|
||
def model_evaluated( | ||
self, model: Model, results: ModelResultsType, score: float | ||
) -> None: | ||
"""Callback when a model has been evaluated.""" | ||
self._model_scores[model.unique_id] = score | ||
|
||
def project_evaluated(self, score: float) -> None: | ||
"""Callback when a project has been evaluated.""" | ||
manifest = copy.copy(self._manifest_loader.raw_manifest) | ||
for model_id, score in self._model_scores.items(): | ||
manifest["nodes"][model_id]["meta"]["score"] = round(score, 1) | ||
print(json.dumps(manifest, indent=2)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
"""Unit tests for the manifest formatter.""" | ||
|
||
import json | ||
from typing import Type | ||
|
||
from dbt_score.formatters.manifest_formatter import ManifestFormatter | ||
from dbt_score.rule import Rule, RuleViolation | ||
|
||
|
||
def test_manifest_formatter_model( | ||
capsys, | ||
manifest_loader, | ||
model1, | ||
rule_severity_low, | ||
rule_severity_medium, | ||
rule_severity_critical, | ||
): | ||
"""Ensure the formatter has the correct output after model evaluation.""" | ||
formatter = ManifestFormatter(manifest_loader=manifest_loader) | ||
results = { | ||
rule_severity_low: None, | ||
rule_severity_medium: Exception("Oh noes"), | ||
rule_severity_critical: RuleViolation("Error"), | ||
} | ||
formatter.model_evaluated(model1, results, 10.0) | ||
stdout = capsys.readouterr().out | ||
assert stdout == "" | ||
|
||
|
||
def test_human_readable_formatter_project( # noqa: PLR0913 | ||
capsys, | ||
manifest_loader, | ||
model1, | ||
model2, | ||
rule_severity_low, | ||
rule_severity_medium, | ||
rule_severity_critical, | ||
): | ||
"""Ensure the formatter has the correct output after project evaluation.""" | ||
formatter = ManifestFormatter(manifest_loader=manifest_loader) | ||
result1: dict[Type[Rule], RuleViolation | Exception | None] = { | ||
rule_severity_low: None, | ||
rule_severity_medium: Exception("Oh noes"), | ||
rule_severity_critical: RuleViolation("Error"), | ||
} | ||
result2: dict[Type[Rule], RuleViolation | Exception | None] = { | ||
rule_severity_low: None, | ||
rule_severity_medium: None, | ||
rule_severity_critical: None, | ||
} | ||
|
||
formatter.model_evaluated(model1, result1, 5.0) | ||
formatter.model_evaluated(model2, result2, 10.0) | ||
formatter.project_evaluated(7.5) | ||
stdout = capsys.readouterr().out | ||
new_manifest = json.loads(stdout) | ||
assert new_manifest["nodes"]["model.package.model1"]["meta"]["score"] == 5.0 | ||
assert new_manifest["nodes"]["model.package.model2"]["meta"]["score"] == 10.0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters