Skip to content

Commit

Permalink
WIP Inital version of medals
Browse files Browse the repository at this point in the history
  • Loading branch information
jochemvandooren committed May 27, 2024
1 parent f348d4e commit 83f8610
Show file tree
Hide file tree
Showing 6 changed files with 53 additions and 16 deletions.
15 changes: 15 additions & 0 deletions src/dbt_score/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,21 @@ def load(self) -> None:
config_file = self.get_config_file(Path.cwd())
if config_file:
self._load_toml_file(str(config_file))
self.validate()


def validate(self) -> None:
"""Validate the config."""
if self.bronze_medal_threshold >= self.silver_medal_threshold:
raise ValueError(
"bronze_medal_threshold must be lower than silver_medal_threshold"
)
if self.silver_medal_threshold >= self.gold_medal_threshold:
raise ValueError(
"silver_medal_threshold must be lower than gold_medal_threshold"
)



def overload(self, values: dict[str, Any]) -> None:
"""Overload config with additional values."""
Expand Down
16 changes: 9 additions & 7 deletions src/dbt_score/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@ class Evaluation:
"""Evaluate a set of rules on a set of nodes."""

def __init__(
self,
rule_registry: RuleRegistry,
manifest_loader: ManifestLoader,
formatter: Formatter,
scorer: Scorer,
self,
rule_registry: RuleRegistry,
manifest_loader: ManifestLoader,
formatter: Formatter,
scorer: Scorer,
) -> None:
"""Create an Evaluation object.
Expand Down Expand Up @@ -64,12 +64,14 @@ def evaluate(self) -> None:
self.results[model][rule.__class__] = result

self.scores[model] = self._scorer.score_model(self.results[model])
model_medal = self._scorer.award_medal(self.scores[model])
self._formatter.model_evaluated(
model, self.results[model], self.scores[model]
model, self.results[model], self.scores[model], model_medal
)

# Compute score for project
self.project_score = self._scorer.score_aggregate_models(
list(self.scores.values())
)
self._formatter.project_evaluated(self.project_score)
project_medal = self._scorer.award_medal(self.project_score)
self._formatter.project_evaluated(self.project_score, project_medal)
4 changes: 2 additions & 2 deletions src/dbt_score/formatters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@ def __init__(self, manifest_loader: ManifestLoader):

@abstractmethod
def model_evaluated(
self, model: Model, results: ModelResultsType, score: float
self, model: Model, results: ModelResultsType, score: float, medal: str
) -> None:
"""Callback when a model has been evaluated."""
raise NotImplementedError

@abstractmethod
def project_evaluated(self, score: float) -> None:
def project_evaluated(self, score: float, medal: str) -> None:
"""Callback when a project has been evaluated."""
raise NotImplementedError
8 changes: 4 additions & 4 deletions src/dbt_score/formatters/human_readable_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def bold(text: str) -> str:
return f"\033[1m{text}\033[0m"

def model_evaluated(
self, model: Model, results: ModelResultsType, score: float
self, model: Model, results: ModelResultsType, score: float, medal: str
) -> None:
"""Callback when a model has been evaluated."""
print(f"Model {self.bold(model.name)}")
Expand All @@ -35,9 +35,9 @@ def model_evaluated(
)
else:
print(f"{self.indent}{self.label_error} {rule.source()}: {result!s}")
print(f"Score: {self.bold(str(round(score, 1)))}")
print(f"Score: {self.bold(str(round(score, 1)))} {medal}")
print()

def project_evaluated(self, score: float) -> None:
def project_evaluated(self, score: float, medal: str) -> None:
"""Callback when a project has been evaluated."""
print(f"Project score: {self.bold(str(round(score, 1)))}")
print(f"Project score: {self.bold(str(round(score, 1)))} {medal}")
2 changes: 1 addition & 1 deletion src/dbt_score/lint.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def lint_dbt_project(
formatters = {"plain": HumanReadableFormatter, "manifest": ManifestFormatter}
formatter = formatters[format](manifest_loader=manifest_loader)

scorer = Scorer()
scorer = Scorer(config)

evaluation = Evaluation(
rule_registry=rule_registry,
Expand Down
24 changes: 22 additions & 2 deletions src/dbt_score/scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

import typing

from dbt_score.config import Config

if typing.TYPE_CHECKING:
from dbt_score.evaluation import ModelResultsType
from dbt_score.rule import RuleViolation, Severity
Expand All @@ -22,14 +24,20 @@ class Scorer:
min_score = 0.0
max_score = 10.0

def __init__(self, config: Config) -> None:
"""Create a Scorer object."""
self.bronze_medal_threshold = config.bronze_medal_threshold
self.silver_medal_threshold = config.silver_medal_threshold
self.gold_medal_threshold = config.gold_medal_threshold

def score_model(self, model_results: ModelResultsType) -> float:
"""Compute the score of a given model."""
if len(model_results) == 0:
# No rule? No problem
return self.max_score
if any(
rule.severity == Severity.CRITICAL and isinstance(result, RuleViolation)
for rule, result in model_results.items()
rule.severity == Severity.CRITICAL and isinstance(result, RuleViolation)
for rule, result in model_results.items()
):
# If there's a CRITICAL violation, the score is 0
return self.min_score
Expand All @@ -53,3 +61,15 @@ def score_aggregate_models(self, scores: list[float]) -> float:
if len(scores) == 0:
return self.max_score
return sum(scores) / len(scores)

def award_medal(self, score: float) -> str:
"""Award a medal based on a score."""
rounded_score = round(score, 1)
if rounded_score >= self.gold_medal_threshold:
return "🥇"
elif rounded_score >= self.silver_medal_threshold:
return "🥈"
elif rounded_score >= self.bronze_medal_threshold:
return "🥉"
else:
return ""

0 comments on commit 83f8610

Please sign in to comment.