Skip to content

Commit

Permalink
Only output failing models and violated rules per default in HumanRea…
Browse files Browse the repository at this point in the history
…dableFormatter (PicnicSupermarket#77)

Fixes PicnicSupermarket#71 

It changes the default behavior of the HumanReadableFormatter:
When running `dbt-score lint` it will now only show failing models
and/or rules which were violated.
To have all output printed (exactly the way it was before this PR) the
user has now to pass the `--show_all` flag to `dbt-score lint`.
It also considers the `fail_any_model_under` option. 

Furthermore, I had to adjust some of tests since the default behavior of
the HumanReadableFormatter is now changed - let me know if I would need
to expand this.

The docs also reflect this change by slightly tweaking the first example
in `index.md`

In this current form, both rule violations and failing models are
filtered through the same argument. This is based on @matthieucan. 's
comment on
PicnicSupermarket#71 (comment)

---------

Co-authored-by: Jochem van Dooren <[email protected]>
Co-authored-by: Matthieu Caneill <[email protected]>
  • Loading branch information
3 people authored Dec 18, 2024
1 parent 9f6aa67 commit c8a7d27
Show file tree
Hide file tree
Showing 7 changed files with 120 additions and 29 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ and this project adheres to
## [Unreleased]

- Documenting support for python 3.13. (#86)
- Only show failing rules per default in `HumanReadableFormatter`. Also added
`--show` parameter in the CLI to change this behavior. (#77)

## [0.8.0] - 2024-11-12

Expand Down
5 changes: 3 additions & 2 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ encourage) good practices.
## Example

```
> dbt-score lint
> dbt-score lint --show all
🥇 M: customers (score: 10.0)
OK dbt_score.rules.generic.has_description
OK dbt_score.rules.generic.has_owner
Expand All @@ -22,7 +22,8 @@ Score: 10.0 🥇

In this example, the model `customers` scores the maximum value of `10.0` as it
passes all the rules. It also is awarded a golden medal because of the perfect
score.
score. By default a passing model or source with or without rule violations will
not be shown, unless we pass the `--show-all` flag.

## Philosophy

Expand Down
17 changes: 16 additions & 1 deletion src/dbt_score/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,20 @@ def cli() -> None:
is_flag=False,
default=None,
)
@click.option(
"--show",
help="Type of output which should be shown "
"when using `plain` as `--format`. "
"`all` shows all items and all rules. "
"`failing-items` shows failing rules of failing items. "
"`failing-rules` shows failing rules of all items. "
"Default is --failing-rules.",
type=click.Choice(["all", "failing-items", "failing-rules"]),
is_flag=False,
default="failing-rules",
)
@click.pass_context
def lint(
def lint( # noqa: PLR0913, C901
ctx: click.Context,
format: Literal["plain", "manifest", "ascii"],
select: tuple[str],
Expand All @@ -105,6 +117,7 @@ def lint(
run_dbt_parse: bool,
fail_project_under: float,
fail_any_item_under: float,
show: Literal["all", "failing-items", "failing-rules"],
) -> None:
"""Lint dbt metadata."""
manifest_provided = (
Expand All @@ -124,6 +137,8 @@ def lint(
config.overload({"fail_project_under": fail_project_under})
if fail_any_item_under:
config.overload({"fail_any_item_under": fail_any_item_under})
if show:
config.overload({"show": show})

try:
if run_dbt_parse:
Expand Down
2 changes: 2 additions & 0 deletions src/dbt_score/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ class Config:
"inject_cwd_in_python_path",
"fail_project_under",
"fail_any_item_under",
"show",
]
_rules_section: Final[str] = "rules"
_badges_section: Final[str] = "badges"
Expand All @@ -71,6 +72,7 @@ def __init__(self) -> None:
self.badge_config: BadgeConfig = BadgeConfig()
self.fail_project_under: float = 5.0
self.fail_any_item_under: float = 5.0
self.show: str = "failing-rules"

def set_option(self, option: str, value: Any) -> None:
"""Set an option in the config."""
Expand Down
52 changes: 32 additions & 20 deletions src/dbt_score/formatters/human_readable_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,28 +42,40 @@ def evaluable_evaluated(
self, evaluable: Evaluable, results: EvaluableResultsType, score: Score
) -> None:
"""Callback when an evaluable item has been evaluated."""
if score.value < self._config.fail_any_item_under:
if evaluable_failed := score.value < self._config.fail_any_item_under:
self._failed_evaluables.append((evaluable, score))
if (
evaluable_failed
or self._config.show == "all"
or (
self._config.show not in ["failing-items"]
and any(result is not None for result in results.values())
)
):
resource_type = type(evaluable).__name__
name_formatted = f"{resource_type[0]}: {self.pretty_name(evaluable)}"
header = (
f"{score.badge} "
f"{self.bold(name_formatted)} (score: {score.rounded_value!s})"
)

resource_type = type(evaluable).__name__
name_formatted = f"{resource_type[0]}: {self.pretty_name(evaluable)}"
header = (
f"{score.badge} "
f"{self.bold(name_formatted)} (score: {score.rounded_value!s})"
)

print(header)
for rule, result in results.items():
if result is None:
print(f"{self.indent}{self.label_ok} {rule.source()}")
elif isinstance(result, RuleViolation):
print(
f"{self.indent}{self.label_warning} "
f"({rule.severity.name.lower()}) {rule.source()}: {result.message}"
)
else:
print(f"{self.indent}{self.label_error} {rule.source()}: {result!s}")
print()
print(header)
for rule, result in results.items():
if result is None:
if self._config.show in ["all"]:
print(f"{self.indent}{self.label_ok} {rule.source()}")
elif isinstance(result, RuleViolation):
print(
f"{self.indent}{self.label_warning} "
f"({rule.severity.name.lower()}) {rule.source()}: "
f"{result.message}"
)
else:
print(
f"{self.indent}{self.label_error} {rule.source()}: "
f"{result!s}"
)
print()

def project_evaluated(self, score: Score) -> None:
"""Callback when a project has been evaluated."""
Expand Down
68 changes: 64 additions & 4 deletions tests/formatters/test_human_readable_formatter.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
"""Unit tests for the human readable formatter."""
from textwrap import dedent

import pytest
from dbt_score.evaluation import EvaluableResultsType
from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter
from dbt_score.rule import RuleViolation
from dbt_score.scoring import Score


def test_human_readable_formatter_model(
def test_human_readable_formatter_model_with_defaults(
capsys,
default_config,
manifest_loader,
Expand All @@ -29,14 +30,67 @@ def test_human_readable_formatter_model(
stdout = capsys.readouterr().out
expected = """\
🥇 \x1B[1mM: model1\x1B[0m (score: 10.0)
\x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low
\x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes
\x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error
"""
assert stdout == dedent(expected)


@pytest.mark.parametrize(
"show,expected",
[
(
"all",
"""\
🥇 \x1B[1mM: model1\x1B[0m (score: 10.0)
\x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low
\x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes
\x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error
""",
),
(
"failing-rules",
"""\
🥇 \x1B[1mM: model1\x1B[0m (score: 10.0)
\x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes
\x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error
""",
),
(
"failing-items",
"",
),
],
)
def test_human_readable_formatter_model_show_parameter(
capsys,
default_config,
manifest_loader,
model1,
rule_severity_low,
rule_severity_medium,
rule_severity_critical,
show,
expected,
):
"""Ensure the formatter has the correct output after model evaluation."""
default_config.overload({"show": show})
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
results: EvaluableResultsType = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
}
formatter.evaluable_evaluated(model1, results, Score(10.0, "🥇"))
stdout = capsys.readouterr().out
assert stdout == dedent(expected)


def test_human_readable_formatter_project(capsys, default_config, manifest_loader):
"""Ensure the formatter has the correct output after project evaluation."""
formatter = HumanReadableFormatter(
Expand All @@ -57,6 +111,7 @@ def test_human_readable_formatter_near_perfect_model_score(
rule_severity_critical,
):
"""Ensure the formatter has the correct output after model evaluation."""
default_config.overload({"show": "all"})
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
Expand Down Expand Up @@ -126,14 +181,19 @@ def test_human_readable_formatter_low_evaluable_score(
assert stdout == dedent(expected)


def test_human_readable_formatter_low_project_score(
def test_human_readable_formatter_low_project_score_high_model_score(
capsys,
default_config,
manifest_loader,
model1,
rule_severity_critical,
):
"""Ensure the formatter has the correct output when the projet has a low score."""
"""Ensure the formatter has the correct output when the projet has a low score.
If model itself has a high project score then we need to pass `show_all` flag
to make it visible.
"""
default_config.overload({"show_all": True})
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
Expand Down
3 changes: 1 addition & 2 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def test_lint_existing_manifest(manifest_path):
"""Test lint with an existing manifest."""
with patch("dbt_score.cli.Config._load_toml_file"):
runner = CliRunner()
result = runner.invoke(lint, ["--manifest", manifest_path])
result = runner.invoke(lint, ["--manifest", manifest_path, "--show", "all"])

assert "model1" in result.output
assert "model2" in result.output
Expand Down Expand Up @@ -112,7 +112,6 @@ def test_fail_any_model_under(manifest_path):
result = runner.invoke(
lint, ["--manifest", manifest_path, "--fail-any-item-under", "10.0"]
)

assert "model1" in result.output
assert "model2" in result.output
assert "Error: evaluable score too low, fail_any_item_under" in result.stdout
Expand Down

0 comments on commit c8a7d27

Please sign in to comment.