diff --git a/CHANGELOG.md b/CHANGELOG.md index 6384542..64aa500 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,3 +17,4 @@ and this project adheres to - Linting and scoring functionality for dbt models. - Configuration through `pyproject.toml`. - Default rules in `dbt_score.rules.generic`. +- Badges for project and model evaluation. diff --git a/docs/configuration.md b/docs/configuration.md index 7d00b0d..e7fb6f6 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -17,6 +17,15 @@ below: rule_namespaces = ["dbt_score.rules", "dbt_score_rules", "custom_rules"] disabled_rules = ["dbt_score.rules.generic.columns_have_description"] +[tool.dbt-score.badges] +first.threshold = 10.0 +first.icon = "🥇" +second.threshold = 8.0 +second.icon = "🥈" +third.threshold = 6.0 +third.icon = "🥉" +wip.icon = "🏗️" + [tool.dbt-score.rules."dbt_score.rules.generic.sql_has_reasonable_number_of_lines"] severity = 1 max_lines = 300 @@ -38,6 +47,27 @@ The following options can be set in the `pyproject.toml` file: if not included here. - `disabled_rules`: A list of rules to disable. +#### Badges configuration + +```toml +[tool.dbt-score.badges] +``` + +Four badges can be configured: `first`, `second`, `third` and `wip`. Each badge +can be configured with the following option: + +- `icon`: The icon to use for the badge. A string that will be displayed in the + output, e.g. `🥇`. + +All badges except `wip` can be configured with the following option: + +- `threshold`: The threshold for the badge. A decimal number between `0.0` and + `10.0` that will be used to compare to the score. The threshold is the minimum + score required for a model to be rewarded with a certain badge. + +The default values can be found in the +[BadgeConfig](/reference/config/#dbt_score.config.BadgeConfig). + #### Rule configuration ```toml diff --git a/docs/index.md b/docs/index.md index da15bd3..043a536 100644 --- a/docs/index.md +++ b/docs/index.md @@ -12,15 +12,16 @@ encourage) good practices. ```shell $ dbt-score lint -Model customers +🥇 customers (score: 10.0) OK dbt_score.rules.generic.has_description - WARN (medium) dbt_score.rules.generic.has_owner: Model lacks an owner. + OK dbt_score.rules.generic.has_owner: Model lacks an owner. OK dbt_score.rules.generic.sql_has_reasonable_number_of_lines -Score: 6.7 +Score: 10.0 🥇 ``` -In this example, `dbt-score` reports a warning: the model `customers` does not -declare an owner. Hence, it doesn't score the maximum value of `10`. +In this example, the model `customers` scores the maximum value of `10.0` as it +passes all the rules. It also is awarded a golden medal because of the perfect +score. ## Philosophy diff --git a/src/dbt_score/config.py b/src/dbt_score/config.py index a7d1fc5..f15e2a9 100644 --- a/src/dbt_score/config.py +++ b/src/dbt_score/config.py @@ -2,6 +2,7 @@ import logging import tomllib +from dataclasses import dataclass, field, replace from pathlib import Path from typing import Any, Final @@ -12,12 +13,45 @@ DEFAULT_CONFIG_FILE = "pyproject.toml" +@dataclass +class Badge: + """Badge object.""" + + icon: str + threshold: float + + +@dataclass +class BadgeConfig: + """Configuration for badges.""" + + third: Badge = field(default_factory=lambda: Badge("🥉", 6.0)) + second: Badge = field(default_factory=lambda: Badge("🥈", 8.0)) + first: Badge = field(default_factory=lambda: Badge("🥇", 10.0)) + wip: Badge = field(default_factory=lambda: Badge("🚧", 0.0)) + + def validate(self) -> None: + """Validate the badge configuration.""" + if not (self.first.threshold > self.second.threshold > self.third.threshold): + raise ValueError("Invalid badge thresholds.") + if self.first.threshold > 10.0: # noqa: PLR2004 [magic-value-comparison] + raise ValueError("first threshold must 10.0 or lower.") + if self.third.threshold < 0.0: + raise ValueError("third threshold must be 0.0 or higher.") + if self.wip.threshold != 0.0: + raise AttributeError("wip badge cannot have a threshold configuration.") + + class Config: """Configuration for dbt-score.""" _main_section: Final[str] = "tool.dbt-score" - _options: Final[list[str]] = ["rule_namespaces", "disabled_rules"] - _rules_section: Final[str] = f"{_main_section}.rules" + _options: Final[list[str]] = [ + "rule_namespaces", + "disabled_rules", + ] + _rules_section: Final[str] = "rules" + _badges_section: Final[str] = "badges" def __init__(self) -> None: """Initialize the Config object.""" @@ -25,6 +59,7 @@ def __init__(self) -> None: self.disabled_rules: list[str] = [] self.rules_config: dict[str, RuleConfig] = {} self.config_file: Path | None = None + self.badge_config: BadgeConfig = BadgeConfig() def set_option(self, option: str, value: Any) -> None: """Set an option in the config.""" @@ -37,7 +72,8 @@ def _load_toml_file(self, file: str) -> None: tools = toml_data.get("tool", {}) dbt_score_config = tools.get("dbt-score", {}) - rules_config = dbt_score_config.pop("rules", {}) + rules_config = dbt_score_config.pop(self._rules_section, {}) + badge_config = dbt_score_config.pop(self._badges_section, {}) # Main configuration for option, value in dbt_score_config.items(): @@ -50,6 +86,25 @@ def _load_toml_file(self, file: str) -> None: f"Option {option} in {self._main_section} not supported." ) + # Badge configuration + for name, config in badge_config.items(): + try: + default_config = getattr(self.badge_config, name) + updated_config = replace(default_config, **config) + setattr(self.badge_config, name, updated_config) + except AttributeError as e: + options = list(BadgeConfig.__annotations__.keys()) + raise AttributeError(f"Config only accepts badges: {options}.") from e + except TypeError as e: + options = list(Badge.__annotations__.keys()) + if name == "wip": + options.remove("threshold") + raise AttributeError( + f"Badge {name}: config only accepts {options}." + ) from e + + self.badge_config.validate() + # Rule configuration self.rules_config = { name: RuleConfig.from_dict(config) for name, config in rules_config.items() diff --git a/src/dbt_score/evaluation.py b/src/dbt_score/evaluation.py index 284f1db..41b5e0f 100644 --- a/src/dbt_score/evaluation.py +++ b/src/dbt_score/evaluation.py @@ -8,7 +8,7 @@ from dbt_score.models import ManifestLoader, Model from dbt_score.rule import Rule, RuleViolation from dbt_score.rule_registry import RuleRegistry -from dbt_score.scoring import Scorer +from dbt_score.scoring import Score, Scorer # The results of a given model are stored in a dictionary, mapping rules to either: # - None if there was no issue @@ -44,10 +44,10 @@ def __init__( self.results: dict[Model, ModelResultsType] = {} # For each model, its computed score - self.scores: dict[Model, float] = {} + self.scores: dict[Model, Score] = {} # The aggregated project score - self.project_score: float + self.project_score: Score def evaluate(self) -> None: """Evaluate all rules.""" diff --git a/src/dbt_score/formatters/__init__.py b/src/dbt_score/formatters/__init__.py index 1734b33..09466e1 100644 --- a/src/dbt_score/formatters/__init__.py +++ b/src/dbt_score/formatters/__init__.py @@ -5,6 +5,8 @@ import typing from abc import ABC, abstractmethod +from dbt_score.scoring import Score + if typing.TYPE_CHECKING: from dbt_score.evaluation import ModelResultsType from dbt_score.models import ManifestLoader, Model @@ -19,12 +21,12 @@ def __init__(self, manifest_loader: ManifestLoader): @abstractmethod def model_evaluated( - self, model: Model, results: ModelResultsType, score: float + self, model: Model, results: ModelResultsType, score: Score ) -> None: """Callback when a model has been evaluated.""" raise NotImplementedError @abstractmethod - def project_evaluated(self, score: float) -> None: + def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" raise NotImplementedError diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index dccf086..594b0ba 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -5,6 +5,7 @@ from dbt_score.formatters import Formatter from dbt_score.models import Model from dbt_score.rule import RuleViolation +from dbt_score.scoring import Score class HumanReadableFormatter(Formatter): @@ -21,10 +22,12 @@ def bold(text: str) -> str: return f"\033[1m{text}\033[0m" def model_evaluated( - self, model: Model, results: ModelResultsType, score: float + self, model: Model, results: ModelResultsType, score: Score ) -> None: """Callback when a model has been evaluated.""" - print(f"Model {self.bold(model.name)}") + print( + f"{score.badge} {self.bold(model.name)} (score: {round(score.value, 1)!s})" + ) for rule, result in results.items(): if result is None: print(f"{self.indent}{self.label_ok} {rule.source()}") @@ -35,9 +38,8 @@ def model_evaluated( ) else: print(f"{self.indent}{self.label_error} {rule.source()}: {result!s}") - print(f"Score: {self.bold(str(round(score, 1)))}") print() - def project_evaluated(self, score: float) -> None: + def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" - print(f"Project score: {self.bold(str(round(score, 1)))}") + print(f"Project score: {self.bold(str(round(score.value, 1)))} {score.badge}") diff --git a/src/dbt_score/formatters/manifest_formatter.py b/src/dbt_score/formatters/manifest_formatter.py index e6d3c45..a1914cb 100644 --- a/src/dbt_score/formatters/manifest_formatter.py +++ b/src/dbt_score/formatters/manifest_formatter.py @@ -7,6 +7,7 @@ from dbt_score.evaluation import ModelResultsType from dbt_score.formatters import Formatter from dbt_score.models import Model +from dbt_score.scoring import Score class ManifestFormatter(Formatter): @@ -14,18 +15,19 @@ class ManifestFormatter(Formatter): def __init__(self, *args: Any, **kwargs: Any) -> None: """Instantiate a manifest formatter.""" - self._model_scores: dict[str, float] = {} + self._model_scores: dict[str, Score] = {} super().__init__(*args, **kwargs) def model_evaluated( - self, model: Model, results: ModelResultsType, score: float + self, model: Model, results: ModelResultsType, score: Score ) -> None: """Callback when a model has been evaluated.""" self._model_scores[model.unique_id] = score - def project_evaluated(self, score: float) -> None: + def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" manifest = copy.copy(self._manifest_loader.raw_manifest) - for model_id, score in self._model_scores.items(): - manifest["nodes"][model_id]["meta"]["score"] = round(score, 1) + for model_id, model_score in self._model_scores.items(): + manifest["nodes"][model_id]["meta"]["score"] = model_score.value + manifest["nodes"][model_id]["meta"]["badge"] = model_score.badge print(json.dumps(manifest, indent=2)) diff --git a/src/dbt_score/lint.py b/src/dbt_score/lint.py index ff6ca5a..78e6c04 100644 --- a/src/dbt_score/lint.py +++ b/src/dbt_score/lint.py @@ -30,7 +30,7 @@ def lint_dbt_project( formatters = {"plain": HumanReadableFormatter, "manifest": ManifestFormatter} formatter = formatters[format](manifest_loader=manifest_loader) - scorer = Scorer() + scorer = Scorer(config) evaluation = Evaluation( rule_registry=rule_registry, diff --git a/src/dbt_score/scoring.py b/src/dbt_score/scoring.py index 12fa2f1..abf9363 100644 --- a/src/dbt_score/scoring.py +++ b/src/dbt_score/scoring.py @@ -3,12 +3,23 @@ from __future__ import annotations import typing +from dataclasses import dataclass + +from dbt_score.config import Config if typing.TYPE_CHECKING: from dbt_score.evaluation import ModelResultsType from dbt_score.rule import RuleViolation, Severity +@dataclass +class Score: + """Class representing a score.""" + + value: float + badge: str + + class Scorer: """Logic for computing scores.""" @@ -22,20 +33,24 @@ class Scorer: min_score = 0.0 max_score = 10.0 - def score_model(self, model_results: ModelResultsType) -> float: + def __init__(self, config: Config) -> None: + """Create a Scorer object.""" + self._config = config + + def score_model(self, model_results: ModelResultsType) -> Score: """Compute the score of a given model.""" if len(model_results) == 0: # No rule? No problem - return self.max_score - if any( + score = self.max_score + elif any( rule.severity == Severity.CRITICAL and isinstance(result, RuleViolation) for rule, result in model_results.items() ): # If there's a CRITICAL violation, the score is 0 - return self.min_score + score = self.min_score else: # Otherwise, the score is the weighted average (by severity) of the results - return ( + score = ( sum( [ # The more severe the violation, the more points are lost @@ -49,11 +64,28 @@ def score_model(self, model_results: ModelResultsType) -> float: * self.max_score ) - def score_aggregate_models(self, scores: list[float]) -> float: + return Score(score, self._badge(score)) + + def score_aggregate_models(self, scores: list[Score]) -> Score: """Compute the score of a list of models.""" - if 0.0 in scores: + actual_scores = [s.value for s in scores] + if 0.0 in actual_scores: # Any model with a CRITICAL violation makes the project score 0 - return self.min_score - if len(scores) == 0: - return self.max_score - return sum(scores) / len(scores) + score = Score(self.min_score, self._badge(self.min_score)) + elif len(actual_scores) == 0: + score = Score(self.max_score, self._badge(self.max_score)) + else: + average_score = sum(actual_scores) / len(actual_scores) + score = Score(average_score, self._badge(average_score)) + return score + + def _badge(self, score: float) -> str: + """Compute the badge of a given score.""" + if score >= self._config.badge_config.first.threshold: + return self._config.badge_config.first.icon + elif score >= self._config.badge_config.second.threshold: + return self._config.badge_config.second.icon + elif score >= self._config.badge_config.third.threshold: + return self._config.badge_config.third.icon + else: + return self._config.badge_config.wip.icon diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 0cd0460..f86ee7c 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -4,6 +4,7 @@ from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter from dbt_score.rule import Rule, RuleViolation +from dbt_score.scoring import Score def test_human_readable_formatter_model( @@ -21,15 +22,14 @@ def test_human_readable_formatter_model( rule_severity_medium: Exception("Oh noes"), rule_severity_critical: RuleViolation("Error"), } - formatter.model_evaluated(model1, results, 10.0) + formatter.model_evaluated(model1, results, Score(10.0, "🥇")) stdout = capsys.readouterr().out assert ( stdout - == """Model \x1B[1mmodel1\x1B[0m + == """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0) \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error -Score: \x1B[1m10.0\x1B[0m """ ) @@ -38,6 +38,6 @@ def test_human_readable_formatter_model( def test_human_readable_formatter_project(capsys, manifest_loader): """Ensure the formatter has the correct output after project evaluation.""" formatter = HumanReadableFormatter(manifest_loader=manifest_loader) - formatter.project_evaluated(10.0) + formatter.project_evaluated(Score(10.0, "🥇")) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1B[1m10.0\x1B[0m\n" + assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n" diff --git a/tests/formatters/test_manifest_formatter.py b/tests/formatters/test_manifest_formatter.py index 7403e5f..fa2bd0f 100644 --- a/tests/formatters/test_manifest_formatter.py +++ b/tests/formatters/test_manifest_formatter.py @@ -5,6 +5,7 @@ from dbt_score.formatters.manifest_formatter import ManifestFormatter from dbt_score.rule import Rule, RuleViolation +from dbt_score.scoring import Score def test_manifest_formatter_model( @@ -22,7 +23,7 @@ def test_manifest_formatter_model( rule_severity_medium: Exception("Oh noes"), rule_severity_critical: RuleViolation("Error"), } - formatter.model_evaluated(model1, results, 10.0) + formatter.model_evaluated(model1, results, Score(10.0, "🥇")) stdout = capsys.readouterr().out assert stdout == "" @@ -49,10 +50,12 @@ def test_manifest_formatter_project( rule_severity_critical: None, } - formatter.model_evaluated(model1, result1, 5.0) - formatter.model_evaluated(model2, result2, 10.0) - formatter.project_evaluated(7.5) + formatter.model_evaluated(model1, result1, Score(5.0, "🚧")) + formatter.model_evaluated(model2, result2, Score(10.0, "🥇")) + formatter.project_evaluated(Score(7.5, "🥉")) stdout = capsys.readouterr().out new_manifest = json.loads(stdout) assert new_manifest["nodes"]["model.package.model1"]["meta"]["score"] == 5.0 + assert new_manifest["nodes"]["model.package.model1"]["meta"]["badge"] == "🚧" assert new_manifest["nodes"]["model.package.model2"]["meta"]["score"] == 10.0 + assert new_manifest["nodes"]["model.package.model2"]["meta"]["badge"] == "🥇" diff --git a/tests/resources/pyproject.toml b/tests/resources/pyproject.toml index ee8d495..b317d90 100644 --- a/tests/resources/pyproject.toml +++ b/tests/resources/pyproject.toml @@ -2,6 +2,18 @@ rule_namespaces = ["foo", "tests"] disabled_rules = ["foo.foo", "tests.bar"] +[tool.dbt-score.badges] +wip.icon = "🏗️" +third.threshold = 6.5 +third.icon = "3️⃣" + +[tool.dbt-score.badges.second] +threshold = 7.5 +icon = "2️⃣" + +[tool.dbt-score.badges.first] +threshold = 9.5 +icon = "1️⃣" [tool.dbt-score.rules."foo.bar"] severity=4 diff --git a/tests/test_config.py b/tests/test_config.py index e85730e..d70ad73 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,8 +1,9 @@ """Tests for the module config_parser.""" from pathlib import Path +from unittest.mock import patch import pytest -from dbt_score.config import Config +from dbt_score.config import BadgeConfig, Config from dbt_score.rule import RuleConfig, Severity @@ -17,6 +18,13 @@ def test_load_valid_toml_file(valid_config_path): config.rules_config["tests.rules.example.rule_test_example"].severity == Severity.CRITICAL ) + assert config.badge_config.third.threshold == 6.5 + assert config.badge_config.second.threshold == 7.5 + assert config.badge_config.first.threshold == 9.5 + assert config.badge_config.wip.icon == "🏗️" + assert config.badge_config.third.icon == "3️⃣" + assert config.badge_config.second.icon == "2️⃣" + assert config.badge_config.first.icon == "1️⃣" def test_load_invalid_toml_file(caplog, invalid_config_path): @@ -37,6 +45,59 @@ def test_invalid_rule_config(rule_severity_low): rule_severity_low(config) +@patch("dbt_score.config.open") +def test_load_invalid_badge_config(mock_open): + """Test that an invalid badge config raises an exception.""" + config = Config() + + with patch("dbt_score.config.tomllib.load") as mock_load: + mock_load.return_value = { + "tool": {"dbt-score": {"badges": {"foo": {"threshold": 1.0}}}} + } + with pytest.raises(AttributeError, match="Config only accepts badges:"): + config._load_toml_file("foo") + + mock_load.return_value = { + "tool": {"dbt-score": {"badges": {"first": {"foo": "bar"}}}} + } + with pytest.raises(AttributeError, match="Badge first: config only accepts"): + config._load_toml_file("foo") + + +def test_invalid_badge_thresholds(): + """Test that invalid badge thresholds raises an exception.""" + badge_config = BadgeConfig() + badge_config.third.threshold = 9.0 + badge_config.second.threshold = 8.0 + badge_config.first.threshold = 10.0 + with pytest.raises(ValueError, match="Invalid badge thresholds."): + badge_config.validate() + + badge_config = BadgeConfig() + badge_config.third.threshold = 8.0 + badge_config.second.threshold = 9.5 + badge_config.first.threshold = 9.5 + with pytest.raises(ValueError, match="Invalid badge thresholds."): + badge_config.validate() + + badge_config = BadgeConfig() + badge_config.third.threshold = -1 + with pytest.raises(ValueError, match="third threshold must be 0.0 or higher."): + badge_config.validate() + + badge_config = BadgeConfig() + badge_config.first.threshold = 11.0 + with pytest.raises(ValueError, match="first threshold must 10.0 or lower."): + badge_config.validate() + + badge_config = BadgeConfig() + badge_config.wip.threshold = 1.0 + with pytest.raises( + AttributeError, match="wip badge cannot have a threshold configuration." + ): + badge_config.validate() + + def test_valid_rule_config(valid_config_path, rule_with_config): """Test that a valid rule config can be loaded.""" config = RuleConfig(severity=Severity(4), config={"model_name": "baz"}) diff --git a/tests/test_scoring.py b/tests/test_scoring.py index cb50eaa..e47a493 100644 --- a/tests/test_scoring.py +++ b/tests/test_scoring.py @@ -2,71 +2,76 @@ from dbt_score.rule import RuleViolation -from dbt_score.scoring import Scorer +from dbt_score.scoring import Score, Scorer -def test_scorer_model_no_results(): +def test_scorer_model_no_results(default_config): """Test scorer with a model without any result.""" - scorer = Scorer() - assert scorer.score_model({}) == 10.0 + scorer = Scorer(config=default_config) + assert scorer.score_model({}).value == 10.0 -def test_scorer_model_severity_low(rule_severity_low): +def test_scorer_model_severity_low(default_config, rule_severity_low): """Test scorer with a model and one low severity rule.""" - scorer = Scorer() - assert scorer.score_model({rule_severity_low: None}) == 10.0 - assert scorer.score_model({rule_severity_low: Exception()}) == 10.0 + scorer = Scorer(config=default_config) + assert scorer.score_model({rule_severity_low: None}).value == 10.0 + assert scorer.score_model({rule_severity_low: Exception()}).value == 10.0 assert ( - round(scorer.score_model({rule_severity_low: RuleViolation("error")}), 2) + round(scorer.score_model({rule_severity_low: RuleViolation("error")}).value, 2) == 6.67 ) -def test_scorer_model_severity_medium(rule_severity_medium): +def test_scorer_model_severity_medium(default_config, rule_severity_medium): """Test scorer with a model and one medium severity rule.""" - scorer = Scorer() - assert scorer.score_model({rule_severity_medium: None}) == 10.0 - assert scorer.score_model({rule_severity_medium: Exception()}) == 10.0 + scorer = Scorer(config=default_config) + assert scorer.score_model({rule_severity_medium: None}).value == 10.0 + assert scorer.score_model({rule_severity_medium: Exception()}).value == 10.0 assert ( - round(scorer.score_model({rule_severity_medium: RuleViolation("error")}), 2) + round( + scorer.score_model({rule_severity_medium: RuleViolation("error")}).value, 2 + ) == 3.33 ) -def test_scorer_model_severity_high(rule_severity_high): +def test_scorer_model_severity_high(default_config, rule_severity_high): """Test scorer with a model and one high severity rule.""" - scorer = Scorer() - assert scorer.score_model({rule_severity_high: None}) == 10.0 - assert scorer.score_model({rule_severity_high: Exception()}) == 10.0 - assert scorer.score_model({rule_severity_high: RuleViolation("error")}) == 0.0 + scorer = Scorer(config=default_config) + assert scorer.score_model({rule_severity_high: None}).value == 10.0 + assert scorer.score_model({rule_severity_high: Exception()}).value == 10.0 + assert scorer.score_model({rule_severity_high: RuleViolation("error")}).value == 0.0 -def test_scorer_model_severity_critical(rule_severity_critical): +def test_scorer_model_severity_critical(default_config, rule_severity_critical): """Test scorer with a model and one critical severity rule.""" - scorer = Scorer() - assert scorer.score_model({rule_severity_critical: None}) == 10.0 - assert scorer.score_model({rule_severity_critical: Exception()}) == 10.0 - assert scorer.score_model({rule_severity_critical: RuleViolation("error")}) == 0.0 + scorer = Scorer(config=default_config) + assert scorer.score_model({rule_severity_critical: None}).value == 10.0 + assert scorer.score_model({rule_severity_critical: Exception()}).value == 10.0 + assert ( + scorer.score_model({rule_severity_critical: RuleViolation("error")}).value + == 0.0 + ) def test_scorer_model_severity_critical_overwrites( - rule_severity_low, rule_severity_critical + default_config, rule_severity_low, rule_severity_critical ): """Test scorer with a model and multiple rules including one critical.""" - scorer = Scorer() + scorer = Scorer(config=default_config) assert ( scorer.score_model( {rule_severity_low: None, rule_severity_critical: RuleViolation("error")} - ) + ).value == 0.0 ) def test_scorer_model_multiple_rules( - rule_severity_low, rule_severity_medium, rule_severity_high + default_config, rule_severity_low, rule_severity_medium, rule_severity_high ): """Test scorer with a model and multiple rules.""" - scorer = Scorer() + scorer = Scorer(config=default_config) assert ( round( scorer.score_model( @@ -75,7 +80,7 @@ def test_scorer_model_multiple_rules( rule_severity_medium: Exception(), rule_severity_high: RuleViolation("error"), } - ), + ).value, 2, ) == 6.67 @@ -89,7 +94,7 @@ def test_scorer_model_multiple_rules( rule_severity_medium: RuleViolation("error"), rule_severity_high: None, } - ), + ).value, 2, ) == 7.78 @@ -103,34 +108,59 @@ def test_scorer_model_multiple_rules( rule_severity_medium: Exception(), rule_severity_high: None, } - ), + ).value, 2, ) == 8.89 ) -def test_scorer_aggregate_empty(): +def test_scorer_aggregate_empty(default_config): """Test scorer aggregation with no results.""" - scorer = Scorer() - assert scorer.score_aggregate_models([]) == 10.0 + scorer = Scorer(config=default_config) + assert scorer.score_aggregate_models([]).value == 10.0 -def test_scorer_aggregate_with_0(): +def test_scorer_aggregate_with_0(default_config): """Test scorer aggregation with one result that is 0.0.""" - scorer = Scorer() - assert scorer.score_aggregate_models([1.0, 5.0, 0.0]) == 0.0 + scorer = Scorer(config=default_config) + scores = [Score(1.0, ""), Score(5.0, ""), Score(0.0, "")] + assert scorer.score_aggregate_models(scores).value == 0.0 -def test_scorer_aggregate_single(): +def test_scorer_aggregate_single(default_config): """Test scorer aggregation with a single results.""" - scorer = Scorer() - assert scorer.score_aggregate_models([4.2]) == 4.2 + scorer = Scorer(config=default_config) + assert scorer.score_aggregate_models([Score(4.2, "")]).value == 4.2 -def test_scorer_aggregate_multiple(): +def test_scorer_aggregate_multiple(default_config): """Test scorer aggregation with multiple results.""" - scorer = Scorer() - assert scorer.score_aggregate_models([1.0, 1.0, 1.0]) == 1.0 - assert scorer.score_aggregate_models([0.0, 0.0, 0.0]) == 0.0 - assert scorer.score_aggregate_models([1.0, 7.4, 4.2]) == 4.2 + scorer = Scorer(config=default_config) + assert ( + scorer.score_aggregate_models( + [Score(1.0, ""), Score(1.0, ""), Score(1.0, "")] + ).value + == 1.0 + ) + assert ( + scorer.score_aggregate_models( + [Score(1.0, ""), Score(7.4, ""), Score(4.2, "")] + ).value + == 4.2 + ) + assert ( + scorer.score_aggregate_models( + [Score(0.0, ""), Score(0.0, ""), Score(0.0, "")] + ).value + == 0.0 + ) + + +def test_scorer_badge(default_config): + """Test scorer awarding a badge.""" + scorer = Scorer(config=default_config) + assert scorer._badge(10.0) == scorer._config.badge_config.first.icon + assert scorer._badge(8.0) == scorer._config.badge_config.second.icon + assert scorer._badge(7.0) == scorer._config.badge_config.third.icon + assert scorer._badge(1.0) == scorer._config.badge_config.wip.icon