From c5ed67fe2c504cd54e3e6d80c4e75c5bed4c63f4 Mon Sep 17 00:00:00 2001 From: Matthieu Caneill Date: Thu, 2 Jan 2025 18:12:32 +0100 Subject: [PATCH] Create debug mode for writing rules (#91) Executing `dbt-score lint` with the option `--debug`/`-d` will start a debugger in case of an error while evaluating a rule against a node. --- CHANGELOG.md | 2 ++ docs/create_rules.md | 19 +++++++++++++++++++ src/dbt_score/cli.py | 13 ++++++++++++- src/dbt_score/config.py | 2 ++ src/dbt_score/evaluation.py | 9 +++++++++ src/dbt_score/lint.py | 1 + tests/test_evaluation.py | 11 ++++++++++- 7 files changed, 55 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7bb3ff1..699109c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ and this project adheres to ## [Unreleased] +- Add debug mode to help writing new rules. (#91) + ## [0.9.0] - 2024-12-19 - Documenting support for python 3.13. (#86) diff --git a/docs/create_rules.md b/docs/create_rules.md index dc6c6df..736e4e3 100644 --- a/docs/create_rules.md +++ b/docs/create_rules.md @@ -170,3 +170,22 @@ def models_in_x_follow_naming_standard(model: Model) -> RuleViolation | None: if some_regex_fails(model.name): return RuleViolation("Invalid model name.") ``` + +### Debugging rules + +When writing new rules, or investigating failing ones, you can make use of a +debug mode, which will automatically give you a debugger in case of an exception +occurring. + +Run dbt-score with the debugger: + +```shell +dbt-score lint --debug +# --debug and -d are equivalent +``` + +The debugger is the standard `pdb`, see +[its available commands](https://docs.python.org/3/library/pdb.html#debugger-commands). + +Naturally, you're free to use your debugger of choice, this option exists to +enable quick debugging in any environment. diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 2e7e6b9..01421af 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -106,8 +106,16 @@ def cli() -> None: is_flag=False, default="failing-rules", ) +@click.option( + "--debug", + "-d", + help="Jump in a debugger in case of rule failure to evaluate.", + type=bool, + is_flag=True, + default=False, +) @click.pass_context -def lint( # noqa: PLR0913, C901 +def lint( # noqa: PLR0912, PLR0913, C901 ctx: click.Context, format: Literal["plain", "manifest", "ascii"], select: tuple[str], @@ -118,6 +126,7 @@ def lint( # noqa: PLR0913, C901 fail_project_under: float, fail_any_item_under: float, show: Literal["all", "failing-items", "failing-rules"], + debug: bool, ) -> None: """Lint dbt metadata.""" manifest_provided = ( @@ -139,6 +148,8 @@ def lint( # noqa: PLR0913, C901 config.overload({"fail_any_item_under": fail_any_item_under}) if show: config.overload({"show": show}) + if debug: + config.overload({"debug": debug}) try: if run_dbt_parse: diff --git a/src/dbt_score/config.py b/src/dbt_score/config.py index 0d7aa99..9afaf7b 100644 --- a/src/dbt_score/config.py +++ b/src/dbt_score/config.py @@ -58,6 +58,7 @@ class Config: "fail_project_under", "fail_any_item_under", "show", + "debug", ] _rules_section: Final[str] = "rules" _badges_section: Final[str] = "badges" @@ -73,6 +74,7 @@ def __init__(self) -> None: self.fail_project_under: float = 5.0 self.fail_any_item_under: float = 5.0 self.show: str = "failing-rules" + self.debug: bool = False def set_option(self, option: str, value: Any) -> None: """Set an option in the config.""" diff --git a/src/dbt_score/evaluation.py b/src/dbt_score/evaluation.py index bb29f03..8c72f1a 100644 --- a/src/dbt_score/evaluation.py +++ b/src/dbt_score/evaluation.py @@ -2,9 +2,12 @@ from __future__ import annotations +import pdb +import traceback from itertools import chain from typing import Type, cast +from dbt_score.config import Config from dbt_score.formatters import Formatter from dbt_score.models import Evaluable, ManifestLoader from dbt_score.rule import Rule, RuleViolation @@ -27,6 +30,7 @@ def __init__( manifest_loader: ManifestLoader, formatter: Formatter, scorer: Scorer, + config: Config, ) -> None: """Create an Evaluation object. @@ -35,11 +39,13 @@ def __init__( manifest_loader: A manifest loader to access dbt metadata. formatter: A formatter to display results. scorer: A scorer to compute scores. + config: A configuration. """ self._rule_registry = rule_registry self._manifest_loader = manifest_loader self._formatter = formatter self._scorer = scorer + self._config = config # For each evaluable, its results self.results: dict[Evaluable, EvaluableResultsType] = {} @@ -67,6 +73,9 @@ def evaluate(self) -> None: result = rule.evaluate(evaluable, **rule.config) self.results[evaluable][rule.__class__] = result except Exception as e: + if self._config.debug: + traceback.print_exc() + pdb.post_mortem() self.results[evaluable][rule.__class__] = e self.scores[evaluable] = self._scorer.score_evaluable( diff --git a/src/dbt_score/lint.py b/src/dbt_score/lint.py index 9a4dfcb..8071d17 100644 --- a/src/dbt_score/lint.py +++ b/src/dbt_score/lint.py @@ -44,6 +44,7 @@ def lint_dbt_project( manifest_loader=manifest_loader, formatter=formatter, scorer=scorer, + config=config, ) evaluation.evaluate() diff --git a/tests/test_evaluation.py b/tests/test_evaluation.py index 73573f4..f32fa8d 100644 --- a/tests/test_evaluation.py +++ b/tests/test_evaluation.py @@ -37,6 +37,7 @@ def test_evaluation_low_medium_high( manifest_loader=manifest_loader, formatter=mock_formatter, scorer=mock_scorer, + config=default_config, ) evaluation.evaluate() @@ -79,6 +80,7 @@ def test_evaluation_critical( manifest_loader=manifest_loader, formatter=mock_formatter, scorer=mock_scorer, + config=default_config, ) evaluation.evaluate() @@ -103,6 +105,7 @@ def test_evaluation_no_rule(manifest_path, default_config): manifest_loader=manifest_loader, formatter=mock_formatter, scorer=mock_scorer, + config=default_config, ) evaluation.evaluate() @@ -122,6 +125,7 @@ def test_evaluation_no_model(manifest_empty_path, rule_severity_low, default_con manifest_loader=manifest_loader, formatter=Mock(), scorer=Mock(), + config=default_config, ) evaluation.evaluate() @@ -140,6 +144,7 @@ def test_evaluation_no_model_no_rule(manifest_empty_path, default_config): manifest_loader=manifest_loader, formatter=Mock(), scorer=Mock(), + config=default_config, ) evaluation.evaluate() @@ -148,7 +153,7 @@ def test_evaluation_no_model_no_rule(manifest_empty_path, default_config): def test_evaluation_rule_with_config( - manifest_path, rule_with_config, valid_config_path + manifest_path, rule_with_config, valid_config_path, default_config ): """Test rule evaluation with parameters.""" manifest_loader = ManifestLoader(manifest_path) @@ -170,6 +175,7 @@ def test_evaluation_rule_with_config( manifest_loader=manifest_loader, formatter=mock_formatter, scorer=mock_scorer, + config=default_config, ) evaluation.evaluate() @@ -201,6 +207,7 @@ def test_evaluation_with_filter( manifest_loader=manifest_loader, formatter=mock_formatter, scorer=mock_scorer, + config=default_config, ) evaluation.evaluate() @@ -241,6 +248,7 @@ def test_evaluation_with_class_filter( manifest_loader=manifest_loader, formatter=mock_formatter, scorer=mock_scorer, + config=default_config, ) evaluation.evaluate() @@ -280,6 +288,7 @@ def test_evaluation_with_models_and_sources( manifest_loader=manifest_loader, formatter=mock_formatter, scorer=mock_scorer, + config=default_config, ) evaluation.evaluate()