Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Only output failing models and violated rules per default in HumanReadableFormatter #77

Merged
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ encourage) good practices.
## Example

```
> dbt-score lint
> dbt-score lint --show all
🥇 M: customers (score: 10.0)
OK dbt_score.rules.generic.has_description
OK dbt_score.rules.generic.has_owner
Expand All @@ -22,7 +22,8 @@ Score: 10.0 🥇

In this example, the model `customers` scores the maximum value of `10.0` as it
passes all the rules. It also is awarded a golden medal because of the perfect
score.
score. By default a passing model with or without rule violations will not be
thomend marked this conversation as resolved.
Show resolved Hide resolved
shown, unless we pass the `--show-all` flag.

## Philosophy

Expand Down
17 changes: 16 additions & 1 deletion src/dbt_score/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,20 @@ def cli() -> None:
is_flag=False,
default=None,
)
@click.option(
"--show",
help="Type of output which should be shown"
"when using `plain` as `--format`. "
"`all` shows all models and all rules"
"`failing-models` shows failing rules of failing models"
thomend marked this conversation as resolved.
Show resolved Hide resolved
"`failing-rules` shows failing rules of all models"
"Default behavior is to show violated rules of all models.",
thomend marked this conversation as resolved.
Show resolved Hide resolved
type=click.Choice(["all", "failing-models", "failing-rules"]),
is_flag=False,
default="failing-rules",
)
@click.pass_context
def lint(
def lint( # noqa: PLR0913, C901
ctx: click.Context,
format: Literal["plain", "manifest", "ascii"],
select: tuple[str],
Expand All @@ -105,6 +117,7 @@ def lint(
run_dbt_parse: bool,
fail_project_under: float,
fail_any_item_under: float,
show: Literal["all", "failing-models", "failing-rules"],
) -> None:
"""Lint dbt metadata."""
manifest_provided = (
Expand All @@ -124,6 +137,8 @@ def lint(
config.overload({"fail_project_under": fail_project_under})
if fail_any_item_under:
config.overload({"fail_any_item_under": fail_any_item_under})
if show:
config.overload({"show": show})

try:
if run_dbt_parse:
Expand Down
2 changes: 2 additions & 0 deletions src/dbt_score/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ class Config:
"inject_cwd_in_python_path",
"fail_project_under",
"fail_any_item_under",
"show",
]
_rules_section: Final[str] = "rules"
_badges_section: Final[str] = "badges"
Expand All @@ -71,6 +72,7 @@ def __init__(self) -> None:
self.badge_config: BadgeConfig = BadgeConfig()
self.fail_project_under: float = 5.0
self.fail_any_item_under: float = 5.0
self.show: str = "failing-rules"

def set_option(self, option: str, value: Any) -> None:
"""Set an option in the config."""
Expand Down
46 changes: 27 additions & 19 deletions src/dbt_score/formatters/human_readable_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,26 +44,34 @@ def evaluable_evaluated(
"""Callback when an evaluable item has been evaluated."""
if score.value < self._config.fail_any_item_under:
self._failed_evaluables.append((evaluable, score))
if score.value < self._config.fail_any_item_under or self._config.show in [
jochemvandooren marked this conversation as resolved.
Show resolved Hide resolved
"all",
"failing-rules",
]:
jochemvandooren marked this conversation as resolved.
Show resolved Hide resolved
resource_type = type(evaluable).__name__
name_formatted = f"{resource_type[0]}: {self.pretty_name(evaluable)}"
header = (
f"{score.badge} "
f"{self.bold(name_formatted)} (score: {score.rounded_value!s})"
)

resource_type = type(evaluable).__name__
name_formatted = f"{resource_type[0]}: {self.pretty_name(evaluable)}"
header = (
f"{score.badge} "
f"{self.bold(name_formatted)} (score: {score.rounded_value!s})"
)

print(header)
for rule, result in results.items():
if result is None:
print(f"{self.indent}{self.label_ok} {rule.source()}")
elif isinstance(result, RuleViolation):
print(
f"{self.indent}{self.label_warning} "
f"({rule.severity.name.lower()}) {rule.source()}: {result.message}"
)
else:
print(f"{self.indent}{self.label_error} {rule.source()}: {result!s}")
print()
print(header)
for rule, result in results.items():
if result is None:
if self._config.show in ["all", "failing-models"]:
print(f"{self.indent}{self.label_ok} {rule.source()}")
elif isinstance(result, RuleViolation):
print(
f"{self.indent}{self.label_warning} "
f"({rule.severity.name.lower()}) {rule.source()}: "
f"{result.message}"
)
else:
print(
f"{self.indent}{self.label_error} {rule.source()}: "
f"{result!s}"
)
print()

def project_evaluated(self, score: Score) -> None:
"""Callback when a project has been evaluated."""
Expand Down
44 changes: 40 additions & 4 deletions tests/formatters/test_human_readable_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,36 @@ def test_human_readable_formatter_model(
formatter.evaluable_evaluated(model1, results, Score(10.0, "🥇"))
stdout = capsys.readouterr().out
expected = """\
🥇 \x1B[1mM: model1\x1B[0m (score: 10.0)
\x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes
\x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error

"""
assert stdout == dedent(expected)


def test_human_readable_formatter_model_show_all(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we have a test for failing-models as well?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I added a parametrized test to test for the different options in the show parameter - hope this works :)

capsys,
default_config,
manifest_loader,
model1,
rule_severity_low,
rule_severity_medium,
rule_severity_critical,
):
"""Ensure the formatter has the correct output after model evaluation."""
default_config.overload({"show": "all"})
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
results: EvaluableResultsType = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
}
formatter.evaluable_evaluated(model1, results, Score(10.0, "🥇"))
stdout = capsys.readouterr().out
expected = """\
🥇 \x1B[1mM: model1\x1B[0m (score: 10.0)
\x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low
\x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes
Expand All @@ -44,7 +74,7 @@ def test_human_readable_formatter_project(capsys, default_config, manifest_loade
)
formatter.project_evaluated(Score(10.0, "🥇"))
stdout = capsys.readouterr().out
assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n"
assert stdout == "Project score: \x1b[1m10.0\x1b[0m 🥇\n"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why did all the B's turn into b? 😁

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hexadecimal is not case sensitive, but indeed strange to see those changed 🤔

Copy link
Contributor Author

@thomend thomend Oct 7, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry for that! I am using the ruff vscode extension and the autoformat on save did that:
Hex codes and Unicode sequences. Although I don't know why the linter during pre-commit would not pick it up to revert it? The ruff version of the vscode extension is 0.6.6, I believe (which is newer than the one running in the pre-commit hook.
Let me know if you want it reverted.



def test_human_readable_formatter_near_perfect_model_score(
Expand All @@ -57,6 +87,7 @@ def test_human_readable_formatter_near_perfect_model_score(
rule_severity_critical,
):
"""Ensure the formatter has the correct output after model evaluation."""
default_config.overload({"show": "all"})
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
Expand Down Expand Up @@ -87,7 +118,7 @@ def test_human_readable_formatter_near_perfect_project_score(
)
formatter.project_evaluated(Score(9.99, "🥈"))
stdout = capsys.readouterr().out
assert stdout == "Project score: \x1B[1m9.9\x1B[0m 🥈\n"
assert stdout == "Project score: \x1b[1m9.9\x1b[0m 🥈\n"


def test_human_readable_formatter_low_evaluable_score(
Expand Down Expand Up @@ -126,14 +157,19 @@ def test_human_readable_formatter_low_evaluable_score(
assert stdout == dedent(expected)


def test_human_readable_formatter_low_project_score(
def test_human_readable_formatter_low_project_score_high_model_score(
capsys,
default_config,
manifest_loader,
model1,
rule_severity_critical,
):
"""Ensure the formatter has the correct output when the projet has a low score."""
"""Ensure the formatter has the correct output when the projet has a low score.

If model itself has a high project score then we need to pass `show_all` flag
to make it visible.
"""
default_config.overload({"show_all": True})
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
Expand Down
3 changes: 1 addition & 2 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def test_lint_existing_manifest(manifest_path):
"""Test lint with an existing manifest."""
with patch("dbt_score.cli.Config._load_toml_file"):
runner = CliRunner()
result = runner.invoke(lint, ["--manifest", manifest_path])
result = runner.invoke(lint, ["--manifest", manifest_path, "--show", "all"])

assert "model1" in result.output
assert "model2" in result.output
Expand Down Expand Up @@ -112,7 +112,6 @@ def test_fail_any_model_under(manifest_path):
result = runner.invoke(
lint, ["--manifest", manifest_path, "--fail-any-item-under", "10.0"]
)

assert "model1" in result.output
assert "model2" in result.output
assert "Error: evaluable score too low, fail_any_item_under" in result.stdout
Expand Down
Loading