Skip to content

Commit

Permalink
Add support for writing out new expected results files via env var (#…
Browse files Browse the repository at this point in the history
…1438)

* Add support for writing out new expected results files via the UPDATE_EXPECTED_RESULTS env var
  • Loading branch information
jvoravong authored Sep 9, 2024
1 parent b7b3246 commit 13281f4
Show file tree
Hide file tree
Showing 5 changed files with 75 additions and 40 deletions.
11 changes: 9 additions & 2 deletions .github/workflows/functional_test_v2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@ on:
pull_request:
push:
branches: [ main ]
workflow_dispatch:
inputs:
UPDATE_EXPECTED_RESULTS:
description: 'Set this to true to update expected results and collect updated test output as a Github workflow artifact.'
required: false
default: false

env:
# Make sure to exit early if cache segment download times out after 2 minutes.
Expand Down Expand Up @@ -65,11 +71,12 @@ jobs:
- name: run functional tests
env:
K8S_VERSION: ${{ matrix.k8s-version }}
UPDATE_EXPECTED_RESULTS: ${{ github.event.inputs.UPDATE_EXPECTED_RESULTS || 'false' }}
run: |
cd functional_tests
TEARDOWN_BEFORE_SETUP=true go test -v -tags ${{ matrix.test-job }}
TEARDOWN_BEFORE_SETUP=true UPDATE_EXPECTED_RESULTS=${{ env.UPDATE_EXPECTED_RESULTS }} go test -v -tags ${{ matrix.test-job }}
- name: 'Upload test results'
if: failure()
if: failure() && env.UPDATE_EXPECTED_RESULTS == 'true'
uses: actions/upload-artifact@v4
with:
name: functional_tests-${{ matrix.test-job }}-${{ matrix.k8s-version }}
Expand Down
1 change: 1 addition & 0 deletions functional_tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,4 @@ When running tests you can use the following env vars to help with local develop
- `SKIP_TEARDOWN`: skip deleting the chart and apps as part of cleanup. Useful to keep around for local development.
- `SKIP_TESTS`: skip running tests, just set up and tear down the cluster.
- `TEARDOWN_BEFORE_SETUP`: delete all the deployments made by these tests before setting up.
- `UPDATE_EXPECTED_RESULTS`: run golden.WriteMetrics() methods to generate new golden files for expected test results
14 changes: 14 additions & 0 deletions functional_tests/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,12 @@ package functional_tests

import (
"context"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden"
"go.opentelemetry.io/collector/consumer/consumertest"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
"os"
"path/filepath"
"runtime"
"testing"
"time"
Expand Down Expand Up @@ -66,3 +70,13 @@ func waitForMetrics(t *testing.T, entriesNum int, mc *consumertest.MetricsSink)
"failed to receive %d entries, received %d metrics in %d minutes", entriesNum,
len(mc.AllMetrics()), timeoutMinutes)
}

func writeNewExpectedTracesResult(t *testing.T, file string, trace *ptrace.Traces) {
require.NoError(t, os.MkdirAll("results", 0755))
require.NoError(t, golden.WriteTraces(t, filepath.Join("results", filepath.Base(file)), *trace))
}

func writeNewExpectedMetricsResult(t *testing.T, file string, metric *pmetric.Metrics) {
require.NoError(t, os.MkdirAll("results", 0755))
require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", filepath.Base(file)), *metric))
}
41 changes: 27 additions & 14 deletions functional_tests/functional_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -611,7 +611,9 @@ func testNodeJSTraces(t *testing.T) {
ptracetest.IgnoreResourceSpansOrder(),
ptracetest.IgnoreScopeSpansOrder(),
)

if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedTracesResult(t, expectedTracesFile, selectedTrace)
}
require.NoError(t, err)
}

Expand Down Expand Up @@ -639,7 +641,6 @@ func testJavaTraces(t *testing.T) {
}
return selectedTrace != nil
}, 3*time.Minute, 5*time.Second)

require.NotNil(t, selectedTrace)

maskScopeVersion(*selectedTrace)
Expand Down Expand Up @@ -674,7 +675,9 @@ func testJavaTraces(t *testing.T) {
ptracetest.IgnoreResourceSpansOrder(),
ptracetest.IgnoreScopeSpansOrder(),
)

if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedTracesResult(t, expectedTracesFile, selectedTrace)
}
require.NoError(t, err)
}

Expand All @@ -697,12 +700,12 @@ func testDotNetTraces(t *testing.T) {
selectedTrace = &trace
break
}
selectedTrace = &trace
break
}
}
return selectedTrace != nil
}, 3*time.Minute, 5*time.Second)

require.NotNil(t, selectedTrace)

maskScopeVersion(*selectedTrace)
Expand Down Expand Up @@ -737,7 +740,9 @@ func testDotNetTraces(t *testing.T) {
ptracetest.IgnoreResourceSpansOrder(),
ptracetest.IgnoreScopeSpansOrder(),
)

if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedTracesResult(t, expectedTracesFile, selectedTrace)
}
require.NoError(t, err)
}

Expand Down Expand Up @@ -794,7 +799,7 @@ func testK8sClusterReceiverMetrics(t *testing.T) {

replaceWithStar := func(string) string { return "*" }

var selected *pmetric.Metrics
var selectedMetrics *pmetric.Metrics
for h := len(metricsConsumer.AllMetrics()) - 1; h >= 0; h-- {
m := metricsConsumer.AllMetrics()[h]
foundCorrectSet := false
Expand All @@ -814,16 +819,15 @@ func testK8sClusterReceiverMetrics(t *testing.T) {
continue
}
if m.ResourceMetrics().Len() == expectedMetrics.ResourceMetrics().Len() && m.MetricCount() == expectedMetrics.MetricCount() {
selected = &m
selectedMetrics = &m
break
}
}

require.NotNil(t, selected)
require.NotNil(t, selectedMetrics)

metricNames := []string{"k8s.node.condition_ready", "k8s.namespace.phase", "k8s.pod.phase", "k8s.replicaset.desired", "k8s.replicaset.available", "k8s.daemonset.ready_nodes", "k8s.daemonset.misscheduled_nodes", "k8s.daemonset.desired_scheduled_nodes", "k8s.daemonset.current_scheduled_nodes", "k8s.container.ready", "k8s.container.memory_request", "k8s.container.memory_limit", "k8s.container.cpu_request", "k8s.container.cpu_limit", "k8s.deployment.desired", "k8s.deployment.available", "k8s.container.restarts", "k8s.container.cpu_request", "k8s.container.memory_request", "k8s.container.memory_limit"}

err = pmetrictest.CompareMetrics(expectedMetrics, *selected,
err = pmetrictest.CompareMetrics(expectedMetrics, *selectedMetrics,
pmetrictest.IgnoreTimestamp(),
pmetrictest.IgnoreStartTimestamp(),
pmetrictest.IgnoreMetricAttributeValue("container.id", metricNames...),
Expand Down Expand Up @@ -859,7 +863,9 @@ func testK8sClusterReceiverMetrics(t *testing.T) {
pmetrictest.IgnoreMetricDataPointsOrder(),
pmetrictest.IgnoreSubsequentDataPoints("k8s.container.ready", "k8s.container.restarts"),
)

if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedMetricsResult(t, expectedMetricsFile, selectedMetrics)
}
require.NoError(t, err)
}

Expand Down Expand Up @@ -1109,7 +1115,8 @@ func testAgentMetrics(t *testing.T) {
)
assert.NoError(t, err)

expectedInternalMetrics, err := golden.ReadMetrics(filepath.Join(testDir, expectedValuesDir, "expected_internal_metrics.yaml"))
expectedInternalMetricsFile := filepath.Join(testDir, expectedValuesDir, "expected_internal_metrics.yaml")
expectedInternalMetrics, err := golden.ReadMetrics(expectedInternalMetricsFile)
require.NoError(t, err)

replaceWithStar := func(string) string { return "*" }
Expand Down Expand Up @@ -1162,16 +1169,21 @@ func testAgentMetrics(t *testing.T) {
pmetrictest.IgnoreScopeMetricsOrder(),
pmetrictest.IgnoreMetricDataPointsOrder(),
)
if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedMetricsResult(t, expectedInternalMetricsFile, selectedInternalMetrics)
}
assert.NoError(t, err)

expectedKubeletStatsMetrics, err := golden.ReadMetrics(filepath.Join(testDir, expectedValuesDir, "expected_kubeletstats_metrics.yaml"))
expectedKubeletStatsMetricsFile := filepath.Join(testDir, expectedValuesDir, "expected_kubeletstats_metrics.yaml")
expectedKubeletStatsMetrics, err := golden.ReadMetrics(expectedKubeletStatsMetricsFile)
require.NoError(t, err)
selectedKubeletstatsMetrics := selectMetricSet(expectedKubeletStatsMetrics, "container.memory.usage", agentMetricsConsumer, false)
if selectedKubeletstatsMetrics == nil {
t.Skip("No metric batch identified with the right metric count, exiting")
return
}
require.NotNil(t, selectedKubeletstatsMetrics)

err = pmetrictest.CompareMetrics(expectedKubeletStatsMetrics, *selectedKubeletstatsMetrics,
pmetrictest.IgnoreTimestamp(),
pmetrictest.IgnoreStartTimestamp(),
Expand Down Expand Up @@ -1211,7 +1223,8 @@ func testAgentMetrics(t *testing.T) {
pmetrictest.IgnoreScopeMetricsOrder(),
pmetrictest.IgnoreMetricDataPointsOrder(),
)
if err != nil {
if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedMetricsResult(t, expectedKubeletStatsMetricsFile, selectedKubeletstatsMetrics)
t.Skipf("we have trouble identifying exact payloads right now: %v", err)
} else {
assert.NoError(t, err)
Expand Down
48 changes: 24 additions & 24 deletions functional_tests/histogram_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,22 +163,28 @@ func testHistogramMetrics(t *testing.T) {
otlpMetricsSink := setupOnce(t)
waitForMetrics(t, 5, otlpMetricsSink)

expectedKubeSchedulerMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "scheduler_metrics.yaml"))
expectedKubeSchedulerMetricsFile := filepath.Join(testDir, "scheduler_metrics.yaml")
expectedKubeSchedulerMetrics, err := golden.ReadMetrics(expectedKubeSchedulerMetricsFile)
require.NoError(t, err)

expectedKubeProxyMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "proxy_metrics.yaml"))
expectedKubeProxyMetricsFile := filepath.Join(testDir, "proxy_metrics.yaml")
expectedKubeProxyMetrics, err := golden.ReadMetrics(expectedKubeProxyMetricsFile)
require.NoError(t, err)

expectedApiMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "api_metrics.yaml"))
expectedApiMetricsFile := filepath.Join(testDir, "api_metrics.yaml")
expectedApiMetrics, err := golden.ReadMetrics(expectedApiMetricsFile)
require.NoError(t, err)

expectedControllerManagerMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "controller_manager_metrics.yaml"))
expectedControllerManagerMetricsFile := filepath.Join(testDir, "controller_manager_metrics.yaml")
expectedControllerManagerMetrics, err := golden.ReadMetrics(expectedControllerManagerMetricsFile)
require.NoError(t, err)

expectedCoreDNSMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "coredns_metrics.yaml"))
expectedCoreDNSMetricsFile := filepath.Join(testDir, "coredns_metrics.yaml")
expectedCoreDNSMetrics, err := golden.ReadMetrics(expectedCoreDNSMetricsFile)
require.NoError(t, err)

expectedEtcdMetrics, err := golden.ReadMetrics(filepath.Join(testDir, "etcd_metrics.yaml"))
expectedEtcdMetricsFile := filepath.Join(testDir, "etcd_metrics.yaml")
expectedEtcdMetrics, err := golden.ReadMetrics(expectedEtcdMetricsFile)
require.NoError(t, err)

var corednsMetrics *pmetric.Metrics
Expand Down Expand Up @@ -258,9 +264,8 @@ func testHistogramMetrics(t *testing.T) {
pmetrictest.IgnoreSubsequentDataPoints("coredns_proxy_request_duration_seconds"),
)
assert.NoError(t, err)
if err != nil {
require.NoError(t, os.MkdirAll("results", 0755))
require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "coredns_metrics.yaml"), *corednsMetrics))
if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedMetricsResult(t, expectedCoreDNSMetricsFile, corednsMetrics)
}

err = pmetrictest.CompareMetrics(expectedKubeSchedulerMetrics, *schedulerMetrics,
Expand All @@ -287,9 +292,8 @@ func testHistogramMetrics(t *testing.T) {
pmetrictest.IgnoreMetricDataPointsOrder(),
)
assert.NoError(t, err)
if err != nil {
require.NoError(t, os.MkdirAll("results", 0755))
require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "scheduler_metrics.yaml"), *schedulerMetrics))
if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedMetricsResult(t, expectedKubeSchedulerMetricsFile, etcdMetrics)
}

err = pmetrictest.CompareMetrics(expectedKubeProxyMetrics, *kubeProxyMetrics,
Expand All @@ -316,9 +320,8 @@ func testHistogramMetrics(t *testing.T) {
pmetrictest.IgnoreMetricDataPointsOrder(),
)
assert.NoError(t, err)
if err != nil {
require.NoError(t, os.MkdirAll("results", 0755))
require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "proxy_metrics.yaml"), *kubeProxyMetrics))
if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedMetricsResult(t, expectedKubeProxyMetricsFile, &expectedKubeProxyMetrics)
}

err = pmetrictest.CompareMetrics(expectedApiMetrics, *apiMetrics,
Expand All @@ -337,9 +340,8 @@ func testHistogramMetrics(t *testing.T) {
pmetrictest.IgnoreMetricDataPointsOrder(),
)
assert.NoError(t, err)
if err != nil {
require.NoError(t, os.MkdirAll("results", 0755))
require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "api_metrics.yaml"), *apiMetrics))
if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedMetricsResult(t, expectedApiMetricsFile, apiMetrics)
}

err = pmetrictest.CompareMetrics(expectedControllerManagerMetrics, *controllerManagerMetrics,
Expand All @@ -366,9 +368,8 @@ func testHistogramMetrics(t *testing.T) {
pmetrictest.IgnoreMetricDataPointsOrder(),
)
assert.NoError(t, err)
if err != nil {
require.NoError(t, os.MkdirAll("results", 0755))
require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "controller_manager_metrics.yaml"), *controllerManagerMetrics))
if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedMetricsResult(t, expectedControllerManagerMetricsFile, controllerManagerMetrics)
}

err = pmetrictest.CompareMetrics(expectedEtcdMetrics, *etcdMetrics,
Expand Down Expand Up @@ -397,8 +398,7 @@ func testHistogramMetrics(t *testing.T) {
pmetrictest.IgnoreMetricDataPointsOrder(),
)
assert.NoError(t, err)
if err != nil {
require.NoError(t, os.MkdirAll("results", 0755))
require.NoError(t, golden.WriteMetrics(t, filepath.Join("results", "etcd_metrics.yaml"), *etcdMetrics))
if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" {
writeNewExpectedMetricsResult(t, expectedEtcdMetricsFile, etcdMetrics)
}
}

0 comments on commit 13281f4

Please sign in to comment.