Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add fail_project_under and fail_any_model_under options #58

Merged
merged 9 commits into from
Jun 20, 2024
8 changes: 7 additions & 1 deletion docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ below:
rule_namespaces = ["dbt_score.rules", "dbt_score_rules", "custom_rules"]
disabled_rules = ["dbt_score.rules.generic.columns_have_description"]
inject_cwd_in_python_path = true
fail_project_under = 7.5
fail_any_model_under = 8.0

[tool.dbt-score.badges]
first.threshold = 10.0
Expand Down Expand Up @@ -47,6 +49,10 @@ The following options can be set in the `pyproject.toml` file:
this setting, that the default rules are in `dbt_score.rules` and are disabled
if not included here.
- `disabled_rules`: A list of rules to disable.
- `fail_project_under`: If the project score is below this value the command
matthieucan marked this conversation as resolved.
Show resolved Hide resolved
will fail with return code 1
WobblyRobbly marked this conversation as resolved.
Show resolved Hide resolved
- `fail_any_model_under`: If ay model scores below this value the command will
fail with return code 1
WobblyRobbly marked this conversation as resolved.
Show resolved Hide resolved

#### Badges configuration

Expand Down Expand Up @@ -94,5 +100,5 @@ Many configuration options can also be set via the command line. To understand
how to configure `dbt-score` from the command line:

```bash
dbt score lint --help
dbt-score lint --help
```
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ max-complexity = 10
convention = "google"

[tool.ruff.lint.pylint]
max-args = 8
max-args = 9

[tool.ruff.lint.per-file-ignores]
"tests/**/*.py" = [
Expand Down
37 changes: 36 additions & 1 deletion src/dbt_score/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,20 @@ def cli() -> None:
is_flag=True,
default=False,
)
@click.option(
"--fail_project_under",
help="Fail if the project score is under this value",
WobblyRobbly marked this conversation as resolved.
Show resolved Hide resolved
type=float,
is_flag=False,
default=None,
)
@click.option(
"--fail_any_model_under",
help="Fail if any model is under this value",
WobblyRobbly marked this conversation as resolved.
Show resolved Hide resolved
type=float,
is_flag=False,
default=None,
)
@click.pass_context
def lint(
ctx: click.Context,
Expand All @@ -87,6 +101,8 @@ def lint(
disabled_rule: list[str],
manifest: Path,
run_dbt_parse: bool,
fail_project_under: float,
fail_any_model_under: float,
) -> None:
"""Lint dbt models metadata."""
manifest_provided = (
Expand All @@ -102,14 +118,33 @@ def lint(
config.overload({"rule_namespaces": namespace})
if disabled_rule:
config.overload({"disabled_rules": disabled_rule})
if fail_project_under:
config.overload({"fail_project_under": fail_project_under})
if fail_any_model_under:
config.overload({"fail_any_model_under": fail_any_model_under})

if run_dbt_parse:
dbt_parse()

try:
lint_dbt_project(
evaluation = lint_dbt_project(
manifest_path=manifest, config=config, format=format, select=select
)

if evaluation.min_model_score < config.fail_any_model_under:
WobblyRobbly marked this conversation as resolved.
Show resolved Hide resolved
logger.error(
f"Individual model score {round(evaluation.min_model_score,1)} is less "
f"than `fail_any_model_under` setting of {config.fail_any_model_under}"
)
ctx.exit(1)

if evaluation.project_score.value < config.fail_project_under:
logger.error(
f"Project score {round(evaluation.project_score.value,1)} is less "
f"than `fail_project_under` setting of {config.fail_project_under}"
)
ctx.exit(1)

except FileNotFoundError:
logger.error(
"dbt's manifest.json could not be found. If you're in a dbt project, be "
Expand Down
4 changes: 4 additions & 0 deletions src/dbt_score/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,8 @@ class Config:
"rule_namespaces",
"disabled_rules",
"inject_cwd_in_python_path",
"fail_project_under",
"fail_any_model_under",
]
_rules_section: Final[str] = "rules"
_badges_section: Final[str] = "badges"
Expand All @@ -67,6 +69,8 @@ def __init__(self) -> None:
self.rules_config: dict[str, RuleConfig] = {}
self.config_file: Path | None = None
self.badge_config: BadgeConfig = BadgeConfig()
self.fail_project_under: float = 0.0
self.fail_any_model_under: float = 0.0
WobblyRobbly marked this conversation as resolved.
Show resolved Hide resolved

def set_option(self, option: str, value: Any) -> None:
"""Set an option in the config."""
Expand Down
4 changes: 4 additions & 0 deletions src/dbt_score/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@ def __init__(
# The aggregated project score
self.project_score: Score

# The minimum model score, starts at the maximum of 10
self.min_model_score: float = 10
WobblyRobbly marked this conversation as resolved.
Show resolved Hide resolved

def evaluate(self) -> None:
"""Evaluate all rules."""
rules = self._rule_registry.rules.values()
Expand All @@ -67,6 +70,7 @@ def evaluate(self) -> None:
self._formatter.model_evaluated(
model, self.results[model], self.scores[model]
)
self.min_model_score = min(self.min_model_score, self.scores[model].value)
WobblyRobbly marked this conversation as resolved.
Show resolved Hide resolved

# Compute score for project
self.project_score = self._scorer.score_aggregate_models(
Expand Down
4 changes: 3 additions & 1 deletion src/dbt_score/lint.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def lint_dbt_project(
config: Config,
format: Literal["plain", "manifest", "ascii"],
select: Iterable[str] | None = None,
) -> None:
) -> Evaluation:
"""Lint dbt manifest."""
if not manifest_path.exists():
raise FileNotFoundError(f"Manifest not found at {manifest_path}.")
Expand All @@ -44,3 +44,5 @@ def lint_dbt_project(
scorer=scorer,
)
evaluation.evaluate()

return evaluation
2 changes: 2 additions & 0 deletions tests/resources/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
[tool.dbt-score]
rule_namespaces = ["foo", "tests"]
disabled_rules = ["foo.foo", "tests.bar"]
fail_project_under = 7.5
fail_any_model_under = 6.9

[tool.dbt-score.badges]
wip.icon = "🏗️"
Expand Down
39 changes: 37 additions & 2 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""Test the CLI."""

import logging
WobblyRobbly marked this conversation as resolved.
Show resolved Hide resolved
from unittest.mock import patch

from click.testing import CliRunner
Expand All @@ -20,7 +20,13 @@ def test_lint_existing_manifest(manifest_path):
"""Test lint with an existing manifest."""
with patch("dbt_score.cli.Config._load_toml_file"):
runner = CliRunner()
result = runner.invoke(lint, ["--manifest", manifest_path])
result = runner.invoke(
lint,
[
"--manifest",
manifest_path,
],
)

assert "model1" in result.output
assert "model2" in result.output
Expand All @@ -45,3 +51,32 @@ def test_lint_non_existing_manifest(caplog):

assert result.exit_code == 2
assert "dbt's manifest.json could not be found" in caplog.text


def test_fail_project_under(manifest_path):
"""Test `fail_project_under`."""
with patch("dbt_score.cli.Config._load_toml_file"):
runner = CliRunner()
result = runner.invoke(
lint, ["--manifest", manifest_path, "--fail_project_under", "10.0"]
)

assert "model1" in result.output
assert "model2" in result.output
assert result.exit_code == 1


def test_fail_any_model_under(manifest_path, caplog):
"""Test `fail_any_model_under`."""
caplog.set_level(logging.ERROR)

with patch("dbt_score.cli.Config._load_toml_file"):
runner = CliRunner()
result = runner.invoke(
lint, ["--manifest", manifest_path, "--fail_any_model_under", "10.0"]
)

assert "model1" in result.output
assert "model2" in result.output
assert "Individual model score" in caplog.text
assert result.exit_code == 1
1 change: 1 addition & 0 deletions tests/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def test_load_valid_toml_file(valid_config_path):
assert config.badge_config.third.icon == "3️⃣"
assert config.badge_config.second.icon == "2️⃣"
assert config.badge_config.first.icon == "1️⃣"
assert config.fail_project_under == 7.5


def test_load_invalid_toml_file(caplog, invalid_config_path):
Expand Down
29 changes: 23 additions & 6 deletions tests/test_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from dbt_score.models import ManifestLoader
from dbt_score.rule import RuleViolation
from dbt_score.rule_registry import RuleRegistry
from dbt_score.scoring import Score


def test_evaluation_low_medium_high(
Expand All @@ -28,6 +29,9 @@ def test_evaluation_low_medium_high(
rule_registry._add_rule(rule_severity_high)
rule_registry._add_rule(rule_error)

# Ensure we get a valid Score object from the Mock
mock_scorer.score_model.return_value = Score(10, "🥇")

evaluation = Evaluation(
rule_registry=rule_registry,
manifest_loader=manifest_loader,
Expand Down Expand Up @@ -66,12 +70,17 @@ def test_evaluation_critical(
rule_registry._add_rule(rule_severity_low)
rule_registry._add_rule(rule_severity_critical)

mock_formatter = Mock()
mock_scorer = Mock()
mock_scorer.score_model.return_value = Score(10, "🥇")

evaluation = Evaluation(
rule_registry=rule_registry,
manifest_loader=manifest_loader,
formatter=Mock(),
scorer=Mock(),
formatter=mock_formatter,
scorer=mock_scorer,
)

evaluation.evaluate()

model2 = manifest_loader.models[1]
Expand All @@ -85,11 +94,15 @@ def test_evaluation_no_rule(manifest_path, default_config):

rule_registry = RuleRegistry(default_config)

mock_formatter = Mock()
mock_scorer = Mock()
mock_scorer.score_model.return_value = Score(10, "🥇")

evaluation = Evaluation(
rule_registry=rule_registry,
manifest_loader=manifest_loader,
formatter=Mock(),
scorer=Mock(),
formatter=mock_formatter,
scorer=mock_scorer,
)
evaluation.evaluate()

Expand Down Expand Up @@ -148,11 +161,15 @@ def test_evaluation_rule_with_config(
rule_registry = RuleRegistry(config)
rule_registry._add_rule(rule_with_config)

mock_formatter = Mock()
mock_scorer = Mock()
mock_scorer.score_model.return_value = Score(10, "🥇")

evaluation = Evaluation(
rule_registry=rule_registry,
manifest_loader=manifest_loader,
formatter=Mock(),
scorer=Mock(),
formatter=mock_formatter,
scorer=mock_scorer,
)
evaluation.evaluate()

Expand Down