diff --git a/.changes/unreleased/Fixes-20240524-092249.yaml b/.changes/unreleased/Fixes-20240524-092249.yaml deleted file mode 100644 index 1a4b90ba..00000000 --- a/.changes/unreleased/Fixes-20240524-092249.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: Improve validations for derived metrics. Improve error messaging if input metrics - are not set. Validate that expr is set and references all input metrics. -time: 2024-05-24T09:22:49.247603-07:00 -custom: - Author: courtneyholcomb - Issue: "270" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d35ea0e3..af7c38ea 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -13,4 +13,4 @@ # Parsing, validations, and protocols -/dbt_semantic_interfaces/** @dbt-labs/metricflow-admins @QMalcolm @graciegoheen @dbt-labs/semantic-layer +/dbt_semantic_interfaces/** @dbt-labs/metricflow-admins @QMalcolm @graciegoheen diff --git a/dbt_semantic_interfaces/validations/metrics.py b/dbt_semantic_interfaces/validations/metrics.py index 5221c7af..4533a1df 100644 --- a/dbt_semantic_interfaces/validations/metrics.py +++ b/dbt_semantic_interfaces/validations/metrics.py @@ -87,9 +87,8 @@ def _validate_alias_collision(metric: Metric) -> List[ValidationIssue]: file_context=FileContext.from_metadata(metadata=metric.metadata), metric=MetricModelReference(metric_name=metric.name), ) - input_metrics = metric.type_params.metrics or [] - used_names = {input_metric.name for input_metric in input_metrics} - for input_metric in input_metrics: + used_names = {input_metric.name for input_metric in metric.input_metrics} + for input_metric in metric.input_metrics: if input_metric.alias: issues += UniqueAndValidNameRule.check_valid_name(input_metric.alias, metric_context) if input_metric.alias in used_names: @@ -110,24 +109,15 @@ def _validate_input_metrics_exist(semantic_manifest: SemanticManifest) -> List[V all_metrics = {m.name for m in semantic_manifest.metrics} for metric in semantic_manifest.metrics: - metric_context = MetricContext( - file_context=FileContext.from_metadata(metadata=metric.metadata), - metric=MetricModelReference(metric_name=metric.name), - ) if metric.type == MetricType.DERIVED: - if not metric.type_params.metrics: - issues.append( - ValidationError( - context=metric_context, - message=f"No input metrics found for derived metric '{metric.name}'. " - "Please add metrics to type_params.metrics.", - ) - ) - for input_metric in metric.type_params.metrics or []: + for input_metric in metric.input_metrics: if input_metric.name not in all_metrics: issues.append( ValidationError( - context=metric_context, + context=MetricContext( + file_context=FileContext.from_metadata(metadata=metric.metadata), + metric=MetricModelReference(metric_name=metric.name), + ), message=f"For metric: {metric.name}, input metric: '{input_metric.name}' does not " "exist as a configured metric in the model.", ) @@ -139,7 +129,7 @@ def _validate_input_metrics_exist(semantic_manifest: SemanticManifest) -> List[V def _validate_time_offset_params(metric: Metric) -> List[ValidationIssue]: issues: List[ValidationIssue] = [] - for input_metric in metric.type_params.metrics or []: + for input_metric in metric.input_metrics or []: if input_metric.offset_window and input_metric.offset_to_grain: issues.append( ValidationError( @@ -154,40 +144,6 @@ def _validate_time_offset_params(metric: Metric) -> List[ValidationIssue]: return issues - @staticmethod - @validate_safely(whats_being_done="checking that the expr field uses the input metrics") - def _validate_expr(metric: Metric) -> List[ValidationIssue]: - issues: List[ValidationIssue] = [] - - if metric.type == MetricType.DERIVED: - if not metric.type_params.expr: - issues.append( - ValidationError( - context=MetricContext( - file_context=FileContext.from_metadata(metadata=metric.metadata), - metric=MetricModelReference(metric_name=metric.name), - ), - message=f"No expr set for derived metric {metric.name}. " - "Please add an expr that references all input metrics.", - ) - ) - else: - for input_metric in metric.type_params.metrics or []: - name = input_metric.alias or input_metric.name - if name not in metric.type_params.expr: - issues.append( - ValidationError( - context=MetricContext( - file_context=FileContext.from_metadata(metadata=metric.metadata), - metric=MetricModelReference(metric_name=metric.name), - ), - message=f"Input metric '{name}' is not used in expr: '{metric.type_params.expr}' for " - f"derived metric '{metric.name}'. Please update the expr or remove the input metric.", - ) - ) - - return issues - @staticmethod @validate_safely( whats_being_done="running model validation ensuring derived metrics properties are configured properly" @@ -199,7 +155,6 @@ def validate_manifest(semantic_manifest: SemanticManifestT) -> Sequence[Validati for metric in semantic_manifest.metrics or []: issues += DerivedMetricRule._validate_alias_collision(metric=metric) issues += DerivedMetricRule._validate_time_offset_params(metric=metric) - issues += DerivedMetricRule._validate_expr(metric=metric) return issues diff --git a/pyproject.toml b/pyproject.toml index 04d57c9f..10afefa1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "dbt-semantic-interfaces" -version = "0.6.0dev0" +version = "0.6.0dev1" description = 'The shared semantic layer definitions that dbt-core and MetricFlow use' readme = "README.md" requires-python = ">=3.8" diff --git a/tests/validations/test_metrics.py b/tests/validations/test_metrics.py index 7a1d320d..da6bbf70 100644 --- a/tests/validations/test_metrics.py +++ b/tests/validations/test_metrics.py @@ -266,21 +266,6 @@ def test_derived_metric() -> None: # noqa: D expr="notexist * 2", metrics=[PydanticMetricInput(name="notexist")] ), ), - metric_with_guaranteed_meta( - name="no_expr", - type=MetricType.DERIVED, - type_params=PydanticMetricTypeParams(metrics=[PydanticMetricInput(name="random_metric")]), - ), - metric_with_guaranteed_meta( - name="input_metric_not_in_expr", - type=MetricType.DERIVED, - type_params=PydanticMetricTypeParams(expr="x", metrics=[PydanticMetricInput(name="random_metric")]), - ), - metric_with_guaranteed_meta( - name="no_input_metrics", - type=MetricType.DERIVED, - type_params=PydanticMetricTypeParams(expr="x"), - ), metric_with_guaranteed_meta( name="has_valid_time_window_params", type=MetricType.DERIVED, @@ -315,17 +300,12 @@ def test_derived_metric() -> None: # noqa: D ) ) build_issues = validation_results.errors - assert len(build_issues) == 6 - expected_substrings = [ - "is already being used. Please choose another alias", - "does not exist as a configured metric in the model", - "Both offset_window and offset_to_grain set", - "is not used in expr", - "No input metrics found for derived metric", - "No expr set for derived metric", - ] + assert len(build_issues) == 3 + expected_substr1 = "is already being used. Please choose another alias" + expected_substr2 = "does not exist as a configured metric in the model" + expected_substr3 = "Both offset_window and offset_to_grain set" missing_error_strings = set() - for expected_str in expected_substrings: + for expected_str in [expected_substr1, expected_substr2, expected_substr3]: if not any(actual_str.as_readable_str().find(expected_str) != -1 for actual_str in build_issues): missing_error_strings.add(expected_str) assert len(missing_error_strings) == 0, (