From 00522f3a6fafdcb690e3026b53a0635bb5d4f52b Mon Sep 17 00:00:00 2001 From: Matthieu Caneill Date: Tue, 30 Apr 2024 15:34:54 +0200 Subject: [PATCH 1/2] Always compute score between 0 and 10 --- .../formatters/human_readable_formatter.py | 4 +-- src/dbt_score/scoring.py | 6 ++-- .../test_human_readable_formatter.py | 5 ++- tests/test_scoring.py | 36 +++++++++---------- 4 files changed, 25 insertions(+), 26 deletions(-) diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index 2867b62..dccf086 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -35,9 +35,9 @@ def model_evaluated( ) else: print(f"{self.indent}{self.label_error} {rule.source()}: {result!s}") - print(f"Score: {self.bold(str(round(score * 10, 1)))}") + print(f"Score: {self.bold(str(round(score, 1)))}") print() def project_evaluated(self, score: float) -> None: """Callback when a project has been evaluated.""" - print(f"Project score: {self.bold(str(round(score * 10, 1)))}") + print(f"Project score: {self.bold(str(round(score, 1)))}") diff --git a/src/dbt_score/scoring.py b/src/dbt_score/scoring.py index 34a83f0..da72218 100644 --- a/src/dbt_score/scoring.py +++ b/src/dbt_score/scoring.py @@ -20,7 +20,7 @@ class Scorer: score_cardinality = 3 min_score = 0.0 - max_score = 1.0 + max_score = 10.0 def score_model(self, model_results: ModelResultsType) -> float: """Compute the score of a given model.""" @@ -43,7 +43,7 @@ def score_model(self, model_results: ModelResultsType) -> float: else self.score_cardinality # 3/3 for rule, result in model_results.items() ] - ) / (self.score_cardinality * len(model_results)) + ) / (self.score_cardinality * len(model_results)) * self.max_score def score_aggregate_models(self, scores: list[float]) -> float: """Compute the score of a list of models.""" @@ -52,4 +52,4 @@ def score_aggregate_models(self, scores: list[float]) -> float: return self.min_score if len(scores) == 0: return self.max_score - return sum(scores) / len(scores) + return sum(scores) / len(scores) * self.max_score diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 80ceb0b..ae5e3cf 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -1,6 +1,5 @@ """Unit tests for the human readable formatter.""" - from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter from dbt_score.rule import RuleViolation @@ -23,7 +22,7 @@ def test_human_readable_formatter_model( \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error -Score: \x1B[1m100.0\x1B[0m +Score: \x1B[1m10.0\x1B[0m """ ) @@ -34,4 +33,4 @@ def test_human_readable_formatter_project(capsys): formatter = HumanReadableFormatter() formatter.project_evaluated(10.0) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1B[1m100.0\x1B[0m\n" + assert stdout == "Project score: \x1B[1m10.0\x1B[0m\n" diff --git a/tests/test_scoring.py b/tests/test_scoring.py index 112eb40..2c75845 100644 --- a/tests/test_scoring.py +++ b/tests/test_scoring.py @@ -8,44 +8,44 @@ def test_scorer_model_no_results(): """Test scorer with a model without any result.""" scorer = Scorer() - assert scorer.score_model({}) == 1.0 + assert scorer.score_model({}) == 10.0 def test_scorer_model_severity_low(rule_severity_low): """Test scorer with a model and one low severity rule.""" scorer = Scorer() - assert scorer.score_model({rule_severity_low: None}) == 1.0 - assert scorer.score_model({rule_severity_low: Exception()}) == 1.0 + assert scorer.score_model({rule_severity_low: None}) == 10.0 + assert scorer.score_model({rule_severity_low: Exception()}) == 10.0 assert ( round(scorer.score_model({rule_severity_low: RuleViolation("error")}), 2) - == 0.67 + == 6.67 ) def test_scorer_model_severity_medium(rule_severity_medium): """Test scorer with a model and one medium severity rule.""" scorer = Scorer() - assert scorer.score_model({rule_severity_medium: None}) == 1.0 - assert scorer.score_model({rule_severity_medium: Exception()}) == 1.0 + assert scorer.score_model({rule_severity_medium: None}) == 10.0 + assert scorer.score_model({rule_severity_medium: Exception()}) == 10.0 assert ( round(scorer.score_model({rule_severity_medium: RuleViolation("error")}), 2) - == 0.33 + == 3.33 ) def test_scorer_model_severity_high(rule_severity_high): """Test scorer with a model and one high severity rule.""" scorer = Scorer() - assert scorer.score_model({rule_severity_high: None}) == 1.0 - assert scorer.score_model({rule_severity_high: Exception()}) == 1.0 + assert scorer.score_model({rule_severity_high: None}) == 10.0 + assert scorer.score_model({rule_severity_high: Exception()}) == 10.0 assert scorer.score_model({rule_severity_high: RuleViolation("error")}) == 0.0 def test_scorer_model_severity_critical(rule_severity_critical): """Test scorer with a model and one critical severity rule.""" scorer = Scorer() - assert scorer.score_model({rule_severity_critical: None}) == 1.0 - assert scorer.score_model({rule_severity_critical: Exception()}) == 1.0 + assert scorer.score_model({rule_severity_critical: None}) == 10.0 + assert scorer.score_model({rule_severity_critical: Exception()}) == 10.0 assert scorer.score_model({rule_severity_critical: RuleViolation("error")}) == 0.0 @@ -78,7 +78,7 @@ def test_scorer_model_multiple_rules( ), 2, ) - == 0.67 + == 6.67 ) assert ( @@ -92,7 +92,7 @@ def test_scorer_model_multiple_rules( ), 2, ) - == 0.78 + == 7.78 ) assert ( @@ -106,14 +106,14 @@ def test_scorer_model_multiple_rules( ), 2, ) - == 0.89 + == 8.89 ) def test_scorer_aggregate_empty(): """Test scorer aggregation with no results.""" scorer = Scorer() - assert scorer.score_aggregate_models([]) == 1.0 + assert scorer.score_aggregate_models([]) == 10.0 def test_scorer_aggregate_with_0(): @@ -125,12 +125,12 @@ def test_scorer_aggregate_with_0(): def test_scorer_aggregate_single(): """Test scorer aggregation with a single results.""" scorer = Scorer() - assert scorer.score_aggregate_models([0.42]) == 0.42 + assert scorer.score_aggregate_models([0.42]) == 4.2 def test_scorer_aggregate_multiple(): """Test scorer aggregation with multiple results.""" scorer = Scorer() - assert scorer.score_aggregate_models([1.0, 1.0, 1.0]) == 1.0 + assert scorer.score_aggregate_models([1.0, 1.0, 1.0]) == 10.0 assert scorer.score_aggregate_models([0.0, 0.0, 0.0]) == 0.0 - assert scorer.score_aggregate_models([0.1, 0.74, 0.42]) == 0.42 + assert scorer.score_aggregate_models([0.1, 0.74, 0.42]) == 4.2 From 4f27640196f997e5d0a8903ee4bf124f50e639f0 Mon Sep 17 00:00:00 2001 From: Matthieu Caneill Date: Thu, 2 May 2024 11:48:03 +0200 Subject: [PATCH 2/2] Fix aggregate scores --- src/dbt_score/scoring.py | 2 +- tests/test_scoring.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/dbt_score/scoring.py b/src/dbt_score/scoring.py index da72218..cb8dbe9 100644 --- a/src/dbt_score/scoring.py +++ b/src/dbt_score/scoring.py @@ -52,4 +52,4 @@ def score_aggregate_models(self, scores: list[float]) -> float: return self.min_score if len(scores) == 0: return self.max_score - return sum(scores) / len(scores) * self.max_score + return sum(scores) / len(scores) diff --git a/tests/test_scoring.py b/tests/test_scoring.py index 2c75845..cb50eaa 100644 --- a/tests/test_scoring.py +++ b/tests/test_scoring.py @@ -119,18 +119,18 @@ def test_scorer_aggregate_empty(): def test_scorer_aggregate_with_0(): """Test scorer aggregation with one result that is 0.0.""" scorer = Scorer() - assert scorer.score_aggregate_models([1.0, 0.5, 0.0]) == 0.0 + assert scorer.score_aggregate_models([1.0, 5.0, 0.0]) == 0.0 def test_scorer_aggregate_single(): """Test scorer aggregation with a single results.""" scorer = Scorer() - assert scorer.score_aggregate_models([0.42]) == 4.2 + assert scorer.score_aggregate_models([4.2]) == 4.2 def test_scorer_aggregate_multiple(): """Test scorer aggregation with multiple results.""" scorer = Scorer() - assert scorer.score_aggregate_models([1.0, 1.0, 1.0]) == 10.0 + assert scorer.score_aggregate_models([1.0, 1.0, 1.0]) == 1.0 assert scorer.score_aggregate_models([0.0, 0.0, 0.0]) == 0.0 - assert scorer.score_aggregate_models([0.1, 0.74, 0.42]) == 4.2 + assert scorer.score_aggregate_models([1.0, 7.4, 4.2]) == 4.2