Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "Improve validations for derived metrics" #284

Merged
merged 2 commits into from
Jun 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions .changes/unreleased/Fixes-20240524-092249.yaml

This file was deleted.

2 changes: 1 addition & 1 deletion .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@


# Parsing, validations, and protocols
/dbt_semantic_interfaces/** @dbt-labs/metricflow-admins @QMalcolm @graciegoheen @dbt-labs/semantic-layer
/dbt_semantic_interfaces/** @dbt-labs/metricflow-admins @QMalcolm @graciegoheen
61 changes: 8 additions & 53 deletions dbt_semantic_interfaces/validations/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,8 @@ def _validate_alias_collision(metric: Metric) -> List[ValidationIssue]:
file_context=FileContext.from_metadata(metadata=metric.metadata),
metric=MetricModelReference(metric_name=metric.name),
)
input_metrics = metric.type_params.metrics or []
used_names = {input_metric.name for input_metric in input_metrics}
for input_metric in input_metrics:
used_names = {input_metric.name for input_metric in metric.input_metrics}
for input_metric in metric.input_metrics:
if input_metric.alias:
issues += UniqueAndValidNameRule.check_valid_name(input_metric.alias, metric_context)
if input_metric.alias in used_names:
Expand All @@ -110,24 +109,15 @@ def _validate_input_metrics_exist(semantic_manifest: SemanticManifest) -> List[V

all_metrics = {m.name for m in semantic_manifest.metrics}
for metric in semantic_manifest.metrics:
metric_context = MetricContext(
file_context=FileContext.from_metadata(metadata=metric.metadata),
metric=MetricModelReference(metric_name=metric.name),
)
if metric.type == MetricType.DERIVED:
if not metric.type_params.metrics:
issues.append(
ValidationError(
context=metric_context,
message=f"No input metrics found for derived metric '{metric.name}'. "
"Please add metrics to type_params.metrics.",
)
)
for input_metric in metric.type_params.metrics or []:
for input_metric in metric.input_metrics:
if input_metric.name not in all_metrics:
issues.append(
ValidationError(
context=metric_context,
context=MetricContext(
file_context=FileContext.from_metadata(metadata=metric.metadata),
metric=MetricModelReference(metric_name=metric.name),
),
message=f"For metric: {metric.name}, input metric: '{input_metric.name}' does not "
"exist as a configured metric in the model.",
)
Expand All @@ -139,7 +129,7 @@ def _validate_input_metrics_exist(semantic_manifest: SemanticManifest) -> List[V
def _validate_time_offset_params(metric: Metric) -> List[ValidationIssue]:
issues: List[ValidationIssue] = []

for input_metric in metric.type_params.metrics or []:
for input_metric in metric.input_metrics or []:
if input_metric.offset_window and input_metric.offset_to_grain:
issues.append(
ValidationError(
Expand All @@ -154,40 +144,6 @@ def _validate_time_offset_params(metric: Metric) -> List[ValidationIssue]:

return issues

@staticmethod
@validate_safely(whats_being_done="checking that the expr field uses the input metrics")
def _validate_expr(metric: Metric) -> List[ValidationIssue]:
issues: List[ValidationIssue] = []

if metric.type == MetricType.DERIVED:
if not metric.type_params.expr:
issues.append(
ValidationError(
context=MetricContext(
file_context=FileContext.from_metadata(metadata=metric.metadata),
metric=MetricModelReference(metric_name=metric.name),
),
message=f"No expr set for derived metric {metric.name}. "
"Please add an expr that references all input metrics.",
)
)
else:
for input_metric in metric.type_params.metrics or []:
name = input_metric.alias or input_metric.name
if name not in metric.type_params.expr:
issues.append(
ValidationError(
context=MetricContext(
file_context=FileContext.from_metadata(metadata=metric.metadata),
metric=MetricModelReference(metric_name=metric.name),
),
message=f"Input metric '{name}' is not used in expr: '{metric.type_params.expr}' for "
f"derived metric '{metric.name}'. Please update the expr or remove the input metric.",
)
)

return issues

@staticmethod
@validate_safely(
whats_being_done="running model validation ensuring derived metrics properties are configured properly"
Expand All @@ -199,7 +155,6 @@ def validate_manifest(semantic_manifest: SemanticManifestT) -> Sequence[Validati
for metric in semantic_manifest.metrics or []:
issues += DerivedMetricRule._validate_alias_collision(metric=metric)
issues += DerivedMetricRule._validate_time_offset_params(metric=metric)
issues += DerivedMetricRule._validate_expr(metric=metric)
return issues


Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "dbt-semantic-interfaces"
version = "0.6.0dev0"
version = "0.6.0dev1"
description = 'The shared semantic layer definitions that dbt-core and MetricFlow use'
readme = "README.md"
requires-python = ">=3.8"
Expand Down
30 changes: 5 additions & 25 deletions tests/validations/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,21 +266,6 @@ def test_derived_metric() -> None: # noqa: D
expr="notexist * 2", metrics=[PydanticMetricInput(name="notexist")]
),
),
metric_with_guaranteed_meta(
name="no_expr",
type=MetricType.DERIVED,
type_params=PydanticMetricTypeParams(metrics=[PydanticMetricInput(name="random_metric")]),
),
metric_with_guaranteed_meta(
name="input_metric_not_in_expr",
type=MetricType.DERIVED,
type_params=PydanticMetricTypeParams(expr="x", metrics=[PydanticMetricInput(name="random_metric")]),
),
metric_with_guaranteed_meta(
name="no_input_metrics",
type=MetricType.DERIVED,
type_params=PydanticMetricTypeParams(expr="x"),
),
metric_with_guaranteed_meta(
name="has_valid_time_window_params",
type=MetricType.DERIVED,
Expand Down Expand Up @@ -315,17 +300,12 @@ def test_derived_metric() -> None: # noqa: D
)
)
build_issues = validation_results.errors
assert len(build_issues) == 6
expected_substrings = [
"is already being used. Please choose another alias",
"does not exist as a configured metric in the model",
"Both offset_window and offset_to_grain set",
"is not used in expr",
"No input metrics found for derived metric",
"No expr set for derived metric",
]
assert len(build_issues) == 3
expected_substr1 = "is already being used. Please choose another alias"
expected_substr2 = "does not exist as a configured metric in the model"
expected_substr3 = "Both offset_window and offset_to_grain set"
missing_error_strings = set()
for expected_str in expected_substrings:
for expected_str in [expected_substr1, expected_substr2, expected_substr3]:
if not any(actual_str.as_readable_str().find(expected_str) != -1 for actual_str in build_issues):
missing_error_strings.add(expected_str)
assert len(missing_error_strings) == 0, (
Expand Down
Loading