Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(servicegraph): make virtual node tests reliable on Windows #37550

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .chloggen/fix_servicegraph-windows-tests.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change doesn't require a change log: it is not visible to users. You can remove this file from the change I will add the label to skip the check for a change log.


# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: connector/servicegraphconnector

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Make virtual node tests reliable on Windows by improving timing and polling mechanisms.

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [33836]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: [user]
56 changes: 32 additions & 24 deletions connector/servicegraphconnector/connector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -647,18 +647,17 @@ func TestExtraDimensionsLabels(t *testing.T) {
}

func TestVirtualNodeServerLabels(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping test on Windows, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33836")
}

virtualNodeDimensions := []string{"peer.service", "db.system", "messaging.system"}
cfg := &Config{
Dimensions: virtualNodeDimensions,
LatencyHistogramBuckets: []time.Duration{time.Duration(0.1 * float64(time.Second)), time.Duration(1 * float64(time.Second)), time.Duration(10 * float64(time.Second))},
Store: StoreConfig{MaxItems: 10},
VirtualNodePeerAttributes: virtualNodeDimensions,
VirtualNodeExtraLabel: true,
MetricsFlushInterval: time.Millisecond,
// Reduce flush interval for faster test execution
MetricsFlushInterval: 10 * time.Millisecond,
// Reduce store expiration interval
StoreExpirationLoop: 10 * time.Millisecond,
}

set := componenttest.NewNopTelemetrySettings()
Expand All @@ -675,14 +674,19 @@ func TestVirtualNodeServerLabels(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, conn.ConsumeTraces(context.Background(), td))

conn.store.Expire()
assert.Eventually(t, func() bool {
return conn.store.Len() == 0
}, 100*time.Millisecond, 2*time.Millisecond)
require.NoError(t, conn.Shutdown(context.Background()))
// Wait for metrics to be generated with timeout
deadline := time.Now().Add(5 * time.Second)
var metrics []pmetric.Metrics
for time.Now().Before(deadline) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Side note: you can keep using the assert.Eventually or assert.EventuallyWithT, the main advantage being the error message in case of failure.

metrics = conn.metricsConsumer.(*mockMetricsExporter).GetMetrics()
if len(metrics) > 0 {
break
}
time.Sleep(10 * time.Millisecond)
}

metrics := conn.metricsConsumer.(*mockMetricsExporter).GetMetrics()
require.GreaterOrEqual(t, len(metrics), 1) // Unreliable sleep-based check
require.NotEmpty(t, metrics, "no metrics generated within timeout")
require.NoError(t, conn.Shutdown(context.Background()))

expectedMetrics, err := golden.ReadMetrics(expected)
assert.NoError(t, err)
Expand All @@ -695,18 +699,17 @@ func TestVirtualNodeServerLabels(t *testing.T) {
}

func TestVirtualNodeClientLabels(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping test on Windows, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33836")
}

virtualNodeDimensions := []string{"peer.service", "db.system", "messaging.system"}
cfg := &Config{
Dimensions: virtualNodeDimensions,
LatencyHistogramBuckets: []time.Duration{time.Duration(0.1 * float64(time.Second)), time.Duration(1 * float64(time.Second)), time.Duration(10 * float64(time.Second))},
Store: StoreConfig{MaxItems: 10},
VirtualNodePeerAttributes: virtualNodeDimensions,
VirtualNodeExtraLabel: true,
MetricsFlushInterval: time.Millisecond,
// Reduce flush interval for faster test execution
MetricsFlushInterval: 10 * time.Millisecond,
// Reduce store expiration interval
StoreExpirationLoop: 10 * time.Millisecond,
}

set := componenttest.NewNopTelemetrySettings()
Expand All @@ -723,14 +726,19 @@ func TestVirtualNodeClientLabels(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, conn.ConsumeTraces(context.Background(), td))

conn.store.Expire()
assert.Eventually(t, func() bool {
return conn.store.Len() == 0
}, 100*time.Millisecond, 2*time.Millisecond)
require.NoError(t, conn.Shutdown(context.Background()))
// Wait for metrics to be generated with timeout
deadline := time.Now().Add(5 * time.Second)
var metrics []pmetric.Metrics
for time.Now().Before(deadline) {
metrics = conn.metricsConsumer.(*mockMetricsExporter).GetMetrics()
if len(metrics) > 0 {
break
}
time.Sleep(10 * time.Millisecond)
}

metrics := conn.metricsConsumer.(*mockMetricsExporter).GetMetrics()
require.GreaterOrEqual(t, len(metrics), 1) // Unreliable sleep-based check
require.NotEmpty(t, metrics, "no metrics generated within timeout")
require.NoError(t, conn.Shutdown(context.Background()))

expectedMetrics, err := golden.ReadMetrics(expected)
assert.NoError(t, err)
Expand Down