From 13281f40d552266cae6401c3dbdadedae9dc58e3 Mon Sep 17 00:00:00 2001 From: jvoravong <47871238+jvoravong@users.noreply.github.com> Date: Mon, 9 Sep 2024 17:44:09 -0600 Subject: [PATCH] Add support for writing out new expected results files via env var (#1438) * Add support for writing out new expected results files via the UPDATE_EXPECTED_RESULTS env var --- .github/workflows/functional_test_v2.yaml | 11 +++++- functional_tests/README.md | 1 + functional_tests/common.go | 14 +++++++ functional_tests/functional_test.go | 41 ++++++++++++------- functional_tests/histogram_test.go | 48 +++++++++++------------ 5 files changed, 75 insertions(+), 40 deletions(-) diff --git a/.github/workflows/functional_test_v2.yaml b/.github/workflows/functional_test_v2.yaml index f1bbe64186..e29791b43c 100644 --- a/.github/workflows/functional_test_v2.yaml +++ b/.github/workflows/functional_test_v2.yaml @@ -4,6 +4,12 @@ on: pull_request: push: branches: [ main ] + workflow_dispatch: + inputs: + UPDATE_EXPECTED_RESULTS: + description: 'Set this to true to update expected results and collect updated test output as a Github workflow artifact.' + required: false + default: false env: # Make sure to exit early if cache segment download times out after 2 minutes. @@ -65,11 +71,12 @@ jobs: - name: run functional tests env: K8S_VERSION: ${{ matrix.k8s-version }} + UPDATE_EXPECTED_RESULTS: ${{ github.event.inputs.UPDATE_EXPECTED_RESULTS || 'false' }} run: | cd functional_tests - TEARDOWN_BEFORE_SETUP=true go test -v -tags ${{ matrix.test-job }} + TEARDOWN_BEFORE_SETUP=true UPDATE_EXPECTED_RESULTS=${{ env.UPDATE_EXPECTED_RESULTS }} go test -v -tags ${{ matrix.test-job }} - name: 'Upload test results' - if: failure() + if: failure() && env.UPDATE_EXPECTED_RESULTS == 'true' uses: actions/upload-artifact@v4 with: name: functional_tests-${{ matrix.test-job }}-${{ matrix.k8s-version }} diff --git a/functional_tests/README.md b/functional_tests/README.md index adbac60776..e22b62c81f 100644 --- a/functional_tests/README.md +++ b/functional_tests/README.md @@ -26,3 +26,4 @@ When running tests you can use the following env vars to help with local develop - `SKIP_TEARDOWN`: skip deleting the chart and apps as part of cleanup. Useful to keep around for local development. - `SKIP_TESTS`: skip running tests, just set up and tear down the cluster. - `TEARDOWN_BEFORE_SETUP`: delete all the deployments made by these tests before setting up. +- `UPDATE_EXPECTED_RESULTS`: run golden.WriteMetrics() methods to generate new golden files for expected test results diff --git a/functional_tests/common.go b/functional_tests/common.go index 99299b450d..d53260c495 100644 --- a/functional_tests/common.go +++ b/functional_tests/common.go @@ -5,8 +5,12 @@ package functional_tests import ( "context" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "os" + "path/filepath" "runtime" "testing" "time" @@ -66,3 +70,13 @@ func waitForMetrics(t *testing.T, entriesNum int, mc *consumertest.MetricsSink) "failed to receive %d entries, received %d metrics in %d minutes", entriesNum, len(mc.AllMetrics()), timeoutMinutes) } + +func writeNewExpectedTracesResult(t *testing.T, file string, trace *ptrace.Traces) { + require.NoError(t, os.MkdirAll("results", 0755)) + require.NoError(t, golden.WriteTraces(t, filepath.Join("results", filepath.Base(file)), *trace)) +} + +func writeNewExpectedMetricsResult(t *testing.T, file string, metric *pmetric.Metrics) { + require.NoError(t, os.MkdirAll("results", 0755)) + require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", filepath.Base(file)), *metric)) +} diff --git a/functional_tests/functional_test.go b/functional_tests/functional_test.go index 37069fc3d0..6ed3c26f12 100644 --- a/functional_tests/functional_test.go +++ b/functional_tests/functional_test.go @@ -611,7 +611,9 @@ func testNodeJSTraces(t *testing.T) { ptracetest.IgnoreResourceSpansOrder(), ptracetest.IgnoreScopeSpansOrder(), ) - + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedTracesResult(t, expectedTracesFile, selectedTrace) + } require.NoError(t, err) } @@ -639,7 +641,6 @@ func testJavaTraces(t *testing.T) { } return selectedTrace != nil }, 3*time.Minute, 5*time.Second) - require.NotNil(t, selectedTrace) maskScopeVersion(*selectedTrace) @@ -674,7 +675,9 @@ func testJavaTraces(t *testing.T) { ptracetest.IgnoreResourceSpansOrder(), ptracetest.IgnoreScopeSpansOrder(), ) - + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedTracesResult(t, expectedTracesFile, selectedTrace) + } require.NoError(t, err) } @@ -697,12 +700,12 @@ func testDotNetTraces(t *testing.T) { selectedTrace = &trace break } + selectedTrace = &trace break } } return selectedTrace != nil }, 3*time.Minute, 5*time.Second) - require.NotNil(t, selectedTrace) maskScopeVersion(*selectedTrace) @@ -737,7 +740,9 @@ func testDotNetTraces(t *testing.T) { ptracetest.IgnoreResourceSpansOrder(), ptracetest.IgnoreScopeSpansOrder(), ) - + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedTracesResult(t, expectedTracesFile, selectedTrace) + } require.NoError(t, err) } @@ -794,7 +799,7 @@ func testK8sClusterReceiverMetrics(t *testing.T) { replaceWithStar := func(string) string { return "*" } - var selected *pmetric.Metrics + var selectedMetrics *pmetric.Metrics for h := len(metricsConsumer.AllMetrics()) - 1; h >= 0; h-- { m := metricsConsumer.AllMetrics()[h] foundCorrectSet := false @@ -814,16 +819,15 @@ func testK8sClusterReceiverMetrics(t *testing.T) { continue } if m.ResourceMetrics().Len() == expectedMetrics.ResourceMetrics().Len() && m.MetricCount() == expectedMetrics.MetricCount() { - selected = &m + selectedMetrics = &m break } } - - require.NotNil(t, selected) + require.NotNil(t, selectedMetrics) metricNames := []string{"k8s.node.condition_ready", "k8s.namespace.phase", "k8s.pod.phase", "k8s.replicaset.desired", "k8s.replicaset.available", "k8s.daemonset.ready_nodes", "k8s.daemonset.misscheduled_nodes", "k8s.daemonset.desired_scheduled_nodes", "k8s.daemonset.current_scheduled_nodes", "k8s.container.ready", "k8s.container.memory_request", "k8s.container.memory_limit", "k8s.container.cpu_request", "k8s.container.cpu_limit", "k8s.deployment.desired", "k8s.deployment.available", "k8s.container.restarts", "k8s.container.cpu_request", "k8s.container.memory_request", "k8s.container.memory_limit"} - err = pmetrictest.CompareMetrics(expectedMetrics, *selected, + err = pmetrictest.CompareMetrics(expectedMetrics, *selectedMetrics, pmetrictest.IgnoreTimestamp(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreMetricAttributeValue("container.id", metricNames...), @@ -859,7 +863,9 @@ func testK8sClusterReceiverMetrics(t *testing.T) { pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreSubsequentDataPoints("k8s.container.ready", "k8s.container.restarts"), ) - + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedMetricsResult(t, expectedMetricsFile, selectedMetrics) + } require.NoError(t, err) } @@ -1109,7 +1115,8 @@ func testAgentMetrics(t *testing.T) { ) assert.NoError(t, err) - expectedInternalMetrics, err := golden.ReadMetrics(filepath.Join(testDir, expectedValuesDir, "expected_internal_metrics.yaml")) + expectedInternalMetricsFile := filepath.Join(testDir, expectedValuesDir, "expected_internal_metrics.yaml") + expectedInternalMetrics, err := golden.ReadMetrics(expectedInternalMetricsFile) require.NoError(t, err) replaceWithStar := func(string) string { return "*" } @@ -1162,9 +1169,13 @@ func testAgentMetrics(t *testing.T) { pmetrictest.IgnoreScopeMetricsOrder(), pmetrictest.IgnoreMetricDataPointsOrder(), ) + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedMetricsResult(t, expectedInternalMetricsFile, selectedInternalMetrics) + } assert.NoError(t, err) - expectedKubeletStatsMetrics, err := golden.ReadMetrics(filepath.Join(testDir, expectedValuesDir, "expected_kubeletstats_metrics.yaml")) + expectedKubeletStatsMetricsFile := filepath.Join(testDir, expectedValuesDir, "expected_kubeletstats_metrics.yaml") + expectedKubeletStatsMetrics, err := golden.ReadMetrics(expectedKubeletStatsMetricsFile) require.NoError(t, err) selectedKubeletstatsMetrics := selectMetricSet(expectedKubeletStatsMetrics, "container.memory.usage", agentMetricsConsumer, false) if selectedKubeletstatsMetrics == nil { @@ -1172,6 +1183,7 @@ func testAgentMetrics(t *testing.T) { return } require.NotNil(t, selectedKubeletstatsMetrics) + err = pmetrictest.CompareMetrics(expectedKubeletStatsMetrics, *selectedKubeletstatsMetrics, pmetrictest.IgnoreTimestamp(), pmetrictest.IgnoreStartTimestamp(), @@ -1211,7 +1223,8 @@ func testAgentMetrics(t *testing.T) { pmetrictest.IgnoreScopeMetricsOrder(), pmetrictest.IgnoreMetricDataPointsOrder(), ) - if err != nil { + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedMetricsResult(t, expectedKubeletStatsMetricsFile, selectedKubeletstatsMetrics) t.Skipf("we have trouble identifying exact payloads right now: %v", err) } else { assert.NoError(t, err) diff --git a/functional_tests/histogram_test.go b/functional_tests/histogram_test.go index 0aa731f886..5f5ebdfe89 100644 --- a/functional_tests/histogram_test.go +++ b/functional_tests/histogram_test.go @@ -163,22 +163,28 @@ func testHistogramMetrics(t *testing.T) { otlpMetricsSink := setupOnce(t) waitForMetrics(t, 5, otlpMetricsSink) - expectedKubeSchedulerMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "scheduler_metrics.yaml")) + expectedKubeSchedulerMetricsFile := filepath.Join(testDir, "scheduler_metrics.yaml") + expectedKubeSchedulerMetrics, err := golden.ReadMetrics(expectedKubeSchedulerMetricsFile) require.NoError(t, err) - expectedKubeProxyMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "proxy_metrics.yaml")) + expectedKubeProxyMetricsFile := filepath.Join(testDir, "proxy_metrics.yaml") + expectedKubeProxyMetrics, err := golden.ReadMetrics(expectedKubeProxyMetricsFile) require.NoError(t, err) - expectedApiMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "api_metrics.yaml")) + expectedApiMetricsFile := filepath.Join(testDir, "api_metrics.yaml") + expectedApiMetrics, err := golden.ReadMetrics(expectedApiMetricsFile) require.NoError(t, err) - expectedControllerManagerMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "controller_manager_metrics.yaml")) + expectedControllerManagerMetricsFile := filepath.Join(testDir, "controller_manager_metrics.yaml") + expectedControllerManagerMetrics, err := golden.ReadMetrics(expectedControllerManagerMetricsFile) require.NoError(t, err) - expectedCoreDNSMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "coredns_metrics.yaml")) + expectedCoreDNSMetricsFile := filepath.Join(testDir, "coredns_metrics.yaml") + expectedCoreDNSMetrics, err := golden.ReadMetrics(expectedCoreDNSMetricsFile) require.NoError(t, err) - expectedEtcdMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "etcd_metrics.yaml")) + expectedEtcdMetricsFile := filepath.Join(testDir, "etcd_metrics.yaml") + expectedEtcdMetrics, err := golden.ReadMetrics(expectedEtcdMetricsFile) require.NoError(t, err) var corednsMetrics *pmetric.Metrics @@ -258,9 +264,8 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreSubsequentDataPoints("coredns_proxy_request_duration_seconds"), ) assert.NoError(t, err) - if err != nil { - require.NoError(t, os.MkdirAll("results", 0755)) - require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "coredns_metrics.yaml"), *corednsMetrics)) + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedMetricsResult(t, expectedCoreDNSMetricsFile, corednsMetrics) } err = pmetrictest.CompareMetrics(expectedKubeSchedulerMetrics, *schedulerMetrics, @@ -287,9 +292,8 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreMetricDataPointsOrder(), ) assert.NoError(t, err) - if err != nil { - require.NoError(t, os.MkdirAll("results", 0755)) - require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "scheduler_metrics.yaml"), *schedulerMetrics)) + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedMetricsResult(t, expectedKubeSchedulerMetricsFile, etcdMetrics) } err = pmetrictest.CompareMetrics(expectedKubeProxyMetrics, *kubeProxyMetrics, @@ -316,9 +320,8 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreMetricDataPointsOrder(), ) assert.NoError(t, err) - if err != nil { - require.NoError(t, os.MkdirAll("results", 0755)) - require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "proxy_metrics.yaml"), *kubeProxyMetrics)) + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedMetricsResult(t, expectedKubeProxyMetricsFile, &expectedKubeProxyMetrics) } err = pmetrictest.CompareMetrics(expectedApiMetrics, *apiMetrics, @@ -337,9 +340,8 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreMetricDataPointsOrder(), ) assert.NoError(t, err) - if err != nil { - require.NoError(t, os.MkdirAll("results", 0755)) - require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "api_metrics.yaml"), *apiMetrics)) + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedMetricsResult(t, expectedApiMetricsFile, apiMetrics) } err = pmetrictest.CompareMetrics(expectedControllerManagerMetrics, *controllerManagerMetrics, @@ -366,9 +368,8 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreMetricDataPointsOrder(), ) assert.NoError(t, err) - if err != nil { - require.NoError(t, os.MkdirAll("results", 0755)) - require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "controller_manager_metrics.yaml"), *controllerManagerMetrics)) + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedMetricsResult(t, expectedControllerManagerMetricsFile, controllerManagerMetrics) } err = pmetrictest.CompareMetrics(expectedEtcdMetrics, *etcdMetrics, @@ -397,8 +398,7 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreMetricDataPointsOrder(), ) assert.NoError(t, err) - if err != nil { - require.NoError(t, os.MkdirAll("results", 0755)) - require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "etcd_metrics.yaml"), *etcdMetrics)) + if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { + writeNewExpectedMetricsResult(t, expectedEtcdMetricsFile, etcdMetrics) } }