Skip to content

Commit

Permalink
Always compute score between 0 and 10 (#15)
Browse files Browse the repository at this point in the history
Using an internal representation of a score 0-1 and a displayed
representation of a score 0-10 is confusing.
Simplify by using a score 0-10 everywhere.
  • Loading branch information
matthieucan authored May 2, 2024
1 parent fffb83e commit cac2dda
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 25 deletions.
4 changes: 2 additions & 2 deletions src/dbt_score/formatters/human_readable_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@ def model_evaluated(
)
else:
print(f"{self.indent}{self.label_error} {rule.source()}: {result!s}")
print(f"Score: {self.bold(str(round(score * 10, 1)))}")
print(f"Score: {self.bold(str(round(score, 1)))}")
print()

def project_evaluated(self, score: float) -> None:
"""Callback when a project has been evaluated."""
print(f"Project score: {self.bold(str(round(score * 10, 1)))}")
print(f"Project score: {self.bold(str(round(score, 1)))}")
4 changes: 2 additions & 2 deletions src/dbt_score/scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class Scorer:
score_cardinality = 3

min_score = 0.0
max_score = 1.0
max_score = 10.0

def score_model(self, model_results: ModelResultsType) -> float:
"""Compute the score of a given model."""
Expand All @@ -43,7 +43,7 @@ def score_model(self, model_results: ModelResultsType) -> float:
else self.score_cardinality # 3/3
for rule, result in model_results.items()
]
) / (self.score_cardinality * len(model_results))
) / (self.score_cardinality * len(model_results)) * self.max_score

def score_aggregate_models(self, scores: list[float]) -> float:
"""Compute the score of a list of models."""
Expand Down
5 changes: 2 additions & 3 deletions tests/formatters/test_human_readable_formatter.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
"""Unit tests for the human readable formatter."""


from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter
from dbt_score.rule import RuleViolation

Expand All @@ -23,7 +22,7 @@ def test_human_readable_formatter_model(
\x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low
\x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes
\x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error
Score: \x1B[1m100.0\x1B[0m
Score: \x1B[1m10.0\x1B[0m
"""
)
Expand All @@ -34,4 +33,4 @@ def test_human_readable_formatter_project(capsys):
formatter = HumanReadableFormatter()
formatter.project_evaluated(10.0)
stdout = capsys.readouterr().out
assert stdout == "Project score: \x1B[1m100.0\x1B[0m\n"
assert stdout == "Project score: \x1B[1m10.0\x1B[0m\n"
36 changes: 18 additions & 18 deletions tests/test_scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,44 +8,44 @@
def test_scorer_model_no_results():
"""Test scorer with a model without any result."""
scorer = Scorer()
assert scorer.score_model({}) == 1.0
assert scorer.score_model({}) == 10.0


def test_scorer_model_severity_low(rule_severity_low):
"""Test scorer with a model and one low severity rule."""
scorer = Scorer()
assert scorer.score_model({rule_severity_low: None}) == 1.0
assert scorer.score_model({rule_severity_low: Exception()}) == 1.0
assert scorer.score_model({rule_severity_low: None}) == 10.0
assert scorer.score_model({rule_severity_low: Exception()}) == 10.0
assert (
round(scorer.score_model({rule_severity_low: RuleViolation("error")}), 2)
== 0.67
== 6.67
)


def test_scorer_model_severity_medium(rule_severity_medium):
"""Test scorer with a model and one medium severity rule."""
scorer = Scorer()
assert scorer.score_model({rule_severity_medium: None}) == 1.0
assert scorer.score_model({rule_severity_medium: Exception()}) == 1.0
assert scorer.score_model({rule_severity_medium: None}) == 10.0
assert scorer.score_model({rule_severity_medium: Exception()}) == 10.0
assert (
round(scorer.score_model({rule_severity_medium: RuleViolation("error")}), 2)
== 0.33
== 3.33
)


def test_scorer_model_severity_high(rule_severity_high):
"""Test scorer with a model and one high severity rule."""
scorer = Scorer()
assert scorer.score_model({rule_severity_high: None}) == 1.0
assert scorer.score_model({rule_severity_high: Exception()}) == 1.0
assert scorer.score_model({rule_severity_high: None}) == 10.0
assert scorer.score_model({rule_severity_high: Exception()}) == 10.0
assert scorer.score_model({rule_severity_high: RuleViolation("error")}) == 0.0


def test_scorer_model_severity_critical(rule_severity_critical):
"""Test scorer with a model and one critical severity rule."""
scorer = Scorer()
assert scorer.score_model({rule_severity_critical: None}) == 1.0
assert scorer.score_model({rule_severity_critical: Exception()}) == 1.0
assert scorer.score_model({rule_severity_critical: None}) == 10.0
assert scorer.score_model({rule_severity_critical: Exception()}) == 10.0
assert scorer.score_model({rule_severity_critical: RuleViolation("error")}) == 0.0


Expand Down Expand Up @@ -78,7 +78,7 @@ def test_scorer_model_multiple_rules(
),
2,
)
== 0.67
== 6.67
)

assert (
Expand All @@ -92,7 +92,7 @@ def test_scorer_model_multiple_rules(
),
2,
)
== 0.78
== 7.78
)

assert (
Expand All @@ -106,31 +106,31 @@ def test_scorer_model_multiple_rules(
),
2,
)
== 0.89
== 8.89
)


def test_scorer_aggregate_empty():
"""Test scorer aggregation with no results."""
scorer = Scorer()
assert scorer.score_aggregate_models([]) == 1.0
assert scorer.score_aggregate_models([]) == 10.0


def test_scorer_aggregate_with_0():
"""Test scorer aggregation with one result that is 0.0."""
scorer = Scorer()
assert scorer.score_aggregate_models([1.0, 0.5, 0.0]) == 0.0
assert scorer.score_aggregate_models([1.0, 5.0, 0.0]) == 0.0


def test_scorer_aggregate_single():
"""Test scorer aggregation with a single results."""
scorer = Scorer()
assert scorer.score_aggregate_models([0.42]) == 0.42
assert scorer.score_aggregate_models([4.2]) == 4.2


def test_scorer_aggregate_multiple():
"""Test scorer aggregation with multiple results."""
scorer = Scorer()
assert scorer.score_aggregate_models([1.0, 1.0, 1.0]) == 1.0
assert scorer.score_aggregate_models([0.0, 0.0, 0.0]) == 0.0
assert scorer.score_aggregate_models([0.1, 0.74, 0.42]) == 0.42
assert scorer.score_aggregate_models([1.0, 7.4, 4.2]) == 4.2

0 comments on commit cac2dda

Please sign in to comment.