diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index 2867b62..dccf086 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -35,9 +35,9 @@ def model_evaluated( ) else: print(f"{self.indent}{self.label_error} {rule.source()}: {result!s}") - print(f"Score: {self.bold(str(round(score * 10, 1)))}") + print(f"Score: {self.bold(str(round(score, 1)))}") print() def project_evaluated(self, score: float) -> None: """Callback when a project has been evaluated.""" - print(f"Project score: {self.bold(str(round(score * 10, 1)))}") + print(f"Project score: {self.bold(str(round(score, 1)))}") diff --git a/src/dbt_score/scoring.py b/src/dbt_score/scoring.py index 34a83f0..cb8dbe9 100644 --- a/src/dbt_score/scoring.py +++ b/src/dbt_score/scoring.py @@ -20,7 +20,7 @@ class Scorer: score_cardinality = 3 min_score = 0.0 - max_score = 1.0 + max_score = 10.0 def score_model(self, model_results: ModelResultsType) -> float: """Compute the score of a given model.""" @@ -43,7 +43,7 @@ def score_model(self, model_results: ModelResultsType) -> float: else self.score_cardinality # 3/3 for rule, result in model_results.items() ] - ) / (self.score_cardinality * len(model_results)) + ) / (self.score_cardinality * len(model_results)) * self.max_score def score_aggregate_models(self, scores: list[float]) -> float: """Compute the score of a list of models.""" diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 80ceb0b..ae5e3cf 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -1,6 +1,5 @@ """Unit tests for the human readable formatter.""" - from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter from dbt_score.rule import RuleViolation @@ -23,7 +22,7 @@ def test_human_readable_formatter_model( \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error -Score: \x1B[1m100.0\x1B[0m +Score: \x1B[1m10.0\x1B[0m """ ) @@ -34,4 +33,4 @@ def test_human_readable_formatter_project(capsys): formatter = HumanReadableFormatter() formatter.project_evaluated(10.0) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1B[1m100.0\x1B[0m\n" + assert stdout == "Project score: \x1B[1m10.0\x1B[0m\n" diff --git a/tests/test_scoring.py b/tests/test_scoring.py index 112eb40..cb50eaa 100644 --- a/tests/test_scoring.py +++ b/tests/test_scoring.py @@ -8,44 +8,44 @@ def test_scorer_model_no_results(): """Test scorer with a model without any result.""" scorer = Scorer() - assert scorer.score_model({}) == 1.0 + assert scorer.score_model({}) == 10.0 def test_scorer_model_severity_low(rule_severity_low): """Test scorer with a model and one low severity rule.""" scorer = Scorer() - assert scorer.score_model({rule_severity_low: None}) == 1.0 - assert scorer.score_model({rule_severity_low: Exception()}) == 1.0 + assert scorer.score_model({rule_severity_low: None}) == 10.0 + assert scorer.score_model({rule_severity_low: Exception()}) == 10.0 assert ( round(scorer.score_model({rule_severity_low: RuleViolation("error")}), 2) - == 0.67 + == 6.67 ) def test_scorer_model_severity_medium(rule_severity_medium): """Test scorer with a model and one medium severity rule.""" scorer = Scorer() - assert scorer.score_model({rule_severity_medium: None}) == 1.0 - assert scorer.score_model({rule_severity_medium: Exception()}) == 1.0 + assert scorer.score_model({rule_severity_medium: None}) == 10.0 + assert scorer.score_model({rule_severity_medium: Exception()}) == 10.0 assert ( round(scorer.score_model({rule_severity_medium: RuleViolation("error")}), 2) - == 0.33 + == 3.33 ) def test_scorer_model_severity_high(rule_severity_high): """Test scorer with a model and one high severity rule.""" scorer = Scorer() - assert scorer.score_model({rule_severity_high: None}) == 1.0 - assert scorer.score_model({rule_severity_high: Exception()}) == 1.0 + assert scorer.score_model({rule_severity_high: None}) == 10.0 + assert scorer.score_model({rule_severity_high: Exception()}) == 10.0 assert scorer.score_model({rule_severity_high: RuleViolation("error")}) == 0.0 def test_scorer_model_severity_critical(rule_severity_critical): """Test scorer with a model and one critical severity rule.""" scorer = Scorer() - assert scorer.score_model({rule_severity_critical: None}) == 1.0 - assert scorer.score_model({rule_severity_critical: Exception()}) == 1.0 + assert scorer.score_model({rule_severity_critical: None}) == 10.0 + assert scorer.score_model({rule_severity_critical: Exception()}) == 10.0 assert scorer.score_model({rule_severity_critical: RuleViolation("error")}) == 0.0 @@ -78,7 +78,7 @@ def test_scorer_model_multiple_rules( ), 2, ) - == 0.67 + == 6.67 ) assert ( @@ -92,7 +92,7 @@ def test_scorer_model_multiple_rules( ), 2, ) - == 0.78 + == 7.78 ) assert ( @@ -106,26 +106,26 @@ def test_scorer_model_multiple_rules( ), 2, ) - == 0.89 + == 8.89 ) def test_scorer_aggregate_empty(): """Test scorer aggregation with no results.""" scorer = Scorer() - assert scorer.score_aggregate_models([]) == 1.0 + assert scorer.score_aggregate_models([]) == 10.0 def test_scorer_aggregate_with_0(): """Test scorer aggregation with one result that is 0.0.""" scorer = Scorer() - assert scorer.score_aggregate_models([1.0, 0.5, 0.0]) == 0.0 + assert scorer.score_aggregate_models([1.0, 5.0, 0.0]) == 0.0 def test_scorer_aggregate_single(): """Test scorer aggregation with a single results.""" scorer = Scorer() - assert scorer.score_aggregate_models([0.42]) == 0.42 + assert scorer.score_aggregate_models([4.2]) == 4.2 def test_scorer_aggregate_multiple(): @@ -133,4 +133,4 @@ def test_scorer_aggregate_multiple(): scorer = Scorer() assert scorer.score_aggregate_models([1.0, 1.0, 1.0]) == 1.0 assert scorer.score_aggregate_models([0.0, 0.0, 0.0]) == 0.0 - assert scorer.score_aggregate_models([0.1, 0.74, 0.42]) == 0.42 + assert scorer.score_aggregate_models([1.0, 7.4, 4.2]) == 4.2