Skip to content

Commit

Permalink
Create debug mode for writing rules (#91)
Browse files Browse the repository at this point in the history
Executing `dbt-score lint` with the option `--debug`/`-d` will start a
debugger in case of an error while evaluating a rule against a node.
  • Loading branch information
matthieucan authored Jan 2, 2025
1 parent 4fd13a6 commit c5ed67f
Show file tree
Hide file tree
Showing 7 changed files with 55 additions and 2 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ and this project adheres to

## [Unreleased]

- Add debug mode to help writing new rules. (#91)

## [0.9.0] - 2024-12-19

- Documenting support for python 3.13. (#86)
Expand Down
19 changes: 19 additions & 0 deletions docs/create_rules.md
Original file line number Diff line number Diff line change
Expand Up @@ -170,3 +170,22 @@ def models_in_x_follow_naming_standard(model: Model) -> RuleViolation | None:
if some_regex_fails(model.name):
return RuleViolation("Invalid model name.")
```

### Debugging rules

When writing new rules, or investigating failing ones, you can make use of a
debug mode, which will automatically give you a debugger in case of an exception
occurring.

Run dbt-score with the debugger:

```shell
dbt-score lint --debug
# --debug and -d are equivalent
```

The debugger is the standard `pdb`, see
[its available commands](https://docs.python.org/3/library/pdb.html#debugger-commands).

Naturally, you're free to use your debugger of choice, this option exists to
enable quick debugging in any environment.
13 changes: 12 additions & 1 deletion src/dbt_score/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,16 @@ def cli() -> None:
is_flag=False,
default="failing-rules",
)
@click.option(
"--debug",
"-d",
help="Jump in a debugger in case of rule failure to evaluate.",
type=bool,
is_flag=True,
default=False,
)
@click.pass_context
def lint( # noqa: PLR0913, C901
def lint( # noqa: PLR0912, PLR0913, C901
ctx: click.Context,
format: Literal["plain", "manifest", "ascii"],
select: tuple[str],
Expand All @@ -118,6 +126,7 @@ def lint( # noqa: PLR0913, C901
fail_project_under: float,
fail_any_item_under: float,
show: Literal["all", "failing-items", "failing-rules"],
debug: bool,
) -> None:
"""Lint dbt metadata."""
manifest_provided = (
Expand All @@ -139,6 +148,8 @@ def lint( # noqa: PLR0913, C901
config.overload({"fail_any_item_under": fail_any_item_under})
if show:
config.overload({"show": show})
if debug:
config.overload({"debug": debug})

try:
if run_dbt_parse:
Expand Down
2 changes: 2 additions & 0 deletions src/dbt_score/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ class Config:
"fail_project_under",
"fail_any_item_under",
"show",
"debug",
]
_rules_section: Final[str] = "rules"
_badges_section: Final[str] = "badges"
Expand All @@ -73,6 +74,7 @@ def __init__(self) -> None:
self.fail_project_under: float = 5.0
self.fail_any_item_under: float = 5.0
self.show: str = "failing-rules"
self.debug: bool = False

def set_option(self, option: str, value: Any) -> None:
"""Set an option in the config."""
Expand Down
9 changes: 9 additions & 0 deletions src/dbt_score/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,12 @@

from __future__ import annotations

import pdb
import traceback
from itertools import chain
from typing import Type, cast

from dbt_score.config import Config
from dbt_score.formatters import Formatter
from dbt_score.models import Evaluable, ManifestLoader
from dbt_score.rule import Rule, RuleViolation
Expand All @@ -27,6 +30,7 @@ def __init__(
manifest_loader: ManifestLoader,
formatter: Formatter,
scorer: Scorer,
config: Config,
) -> None:
"""Create an Evaluation object.
Expand All @@ -35,11 +39,13 @@ def __init__(
manifest_loader: A manifest loader to access dbt metadata.
formatter: A formatter to display results.
scorer: A scorer to compute scores.
config: A configuration.
"""
self._rule_registry = rule_registry
self._manifest_loader = manifest_loader
self._formatter = formatter
self._scorer = scorer
self._config = config

# For each evaluable, its results
self.results: dict[Evaluable, EvaluableResultsType] = {}
Expand Down Expand Up @@ -67,6 +73,9 @@ def evaluate(self) -> None:
result = rule.evaluate(evaluable, **rule.config)
self.results[evaluable][rule.__class__] = result
except Exception as e:
if self._config.debug:
traceback.print_exc()
pdb.post_mortem()
self.results[evaluable][rule.__class__] = e

self.scores[evaluable] = self._scorer.score_evaluable(
Expand Down
1 change: 1 addition & 0 deletions src/dbt_score/lint.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ def lint_dbt_project(
manifest_loader=manifest_loader,
formatter=formatter,
scorer=scorer,
config=config,
)
evaluation.evaluate()

Expand Down
11 changes: 10 additions & 1 deletion tests/test_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ def test_evaluation_low_medium_high(
manifest_loader=manifest_loader,
formatter=mock_formatter,
scorer=mock_scorer,
config=default_config,
)
evaluation.evaluate()

Expand Down Expand Up @@ -79,6 +80,7 @@ def test_evaluation_critical(
manifest_loader=manifest_loader,
formatter=mock_formatter,
scorer=mock_scorer,
config=default_config,
)

evaluation.evaluate()
Expand All @@ -103,6 +105,7 @@ def test_evaluation_no_rule(manifest_path, default_config):
manifest_loader=manifest_loader,
formatter=mock_formatter,
scorer=mock_scorer,
config=default_config,
)
evaluation.evaluate()

Expand All @@ -122,6 +125,7 @@ def test_evaluation_no_model(manifest_empty_path, rule_severity_low, default_con
manifest_loader=manifest_loader,
formatter=Mock(),
scorer=Mock(),
config=default_config,
)
evaluation.evaluate()

Expand All @@ -140,6 +144,7 @@ def test_evaluation_no_model_no_rule(manifest_empty_path, default_config):
manifest_loader=manifest_loader,
formatter=Mock(),
scorer=Mock(),
config=default_config,
)
evaluation.evaluate()

Expand All @@ -148,7 +153,7 @@ def test_evaluation_no_model_no_rule(manifest_empty_path, default_config):


def test_evaluation_rule_with_config(
manifest_path, rule_with_config, valid_config_path
manifest_path, rule_with_config, valid_config_path, default_config
):
"""Test rule evaluation with parameters."""
manifest_loader = ManifestLoader(manifest_path)
Expand All @@ -170,6 +175,7 @@ def test_evaluation_rule_with_config(
manifest_loader=manifest_loader,
formatter=mock_formatter,
scorer=mock_scorer,
config=default_config,
)
evaluation.evaluate()

Expand Down Expand Up @@ -201,6 +207,7 @@ def test_evaluation_with_filter(
manifest_loader=manifest_loader,
formatter=mock_formatter,
scorer=mock_scorer,
config=default_config,
)
evaluation.evaluate()

Expand Down Expand Up @@ -241,6 +248,7 @@ def test_evaluation_with_class_filter(
manifest_loader=manifest_loader,
formatter=mock_formatter,
scorer=mock_scorer,
config=default_config,
)
evaluation.evaluate()

Expand Down Expand Up @@ -280,6 +288,7 @@ def test_evaluation_with_models_and_sources(
manifest_loader=manifest_loader,
formatter=mock_formatter,
scorer=mock_scorer,
config=default_config,
)
evaluation.evaluate()

Expand Down

0 comments on commit c5ed67f

Please sign in to comment.