diff --git a/functional_tests/functional_test.go b/functional_tests/functional_test.go index 7987a9eeab..43d62117e4 100644 --- a/functional_tests/functional_test.go +++ b/functional_tests/functional_test.go @@ -4,24 +4,32 @@ package functional_tests import ( + "bytes" "context" "encoding/json" "fmt" "os" "path/filepath" + "regexp" "runtime" "strings" + "sync" "testing" + "text/template" "time" "github.com/docker/docker/api/types" docker "github.com/docker/docker/client" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/ptracetest" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/receiver/otlpreceiver" @@ -31,14 +39,27 @@ import ( "helm.sh/helm/v3/pkg/chart/loader" "helm.sh/helm/v3/pkg/kube" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/tools/clientcmd" + + "github.com/signalfx/splunk-otel-collector-chart/functional_tests/internal" ) -const testKubeConfig = "/tmp/kube-config-splunk-otel-collector-chart-functional-testing" +const ( + testKubeConfig = "/tmp/kube-config-splunk-otel-collector-chart-functional-testing" + hecReceiverPort = 8090 + hecMetricsReceiverPort = 8091 + hecLogsObjectsReceiverPort = 8092 + signalFxReceiverPort = 9443 + signalFxReceiverK8sClusterReceiverPort = 19443 + otlpReceiverPort = 4317 + apiPort = 8881 +) // Test_Functions tests the chart with a real k8s cluster. // Run the following commands prior to running the test locally: @@ -51,16 +72,49 @@ const testKubeConfig = "/tmp/kube-config-splunk-otel-collector-chart-functional- // cd functional_tests/testdata/nodejs // docker build -t nodejs_test:latest . // kind load docker-image nodejs_test:latest --name kind -func Test_Functions(t *testing.T) { - var expectedTraces ptrace.Traces - expectedTracesFile := filepath.Join("testdata", "expected_traces.yaml") - expectedTraces, err := readTraces(expectedTracesFile) - require.NoError(t, err) - expectedMetricsFile := filepath.Join("testdata", "expected_cluster_receiver.yaml") - expectedMetrics, err := readMetrics(expectedMetricsFile) - require.NoError(t, err) +// When running tests you can use the following env vars to help with local development: +// SKIP_SETUP: skip setting up the chart and apps. Useful if they are already deployed. +// SKIP_TEARDOWN: skip deleting the chart and apps as part of cleanup. Useful to keep around for local development. + +var globalSinks *sinks + +var setupRun = sync.Once{} +type sinks struct { + logsConsumer *consumertest.LogsSink + hecMetricsConsumer *consumertest.MetricsSink + logsObjectsConsumer *consumertest.LogsSink + agentMetricsConsumer *consumertest.MetricsSink + k8sclusterReceiverMetricsConsumer *consumertest.MetricsSink + tracesConsumer *consumertest.TracesSink +} + +func setupOnce(t *testing.T) *sinks { + setupRun.Do(func() { + // create an API server + internal.CreateApiServer(t, apiPort) + // set ingest pipelines + logs, metrics := setupHEC(t) + globalSinks = &sinks{ + logsConsumer: logs, + hecMetricsConsumer: metrics, + logsObjectsConsumer: setupHECLogsObjects(t), + agentMetricsConsumer: setupSignalfxReceiver(t, signalFxReceiverPort), + k8sclusterReceiverMetricsConsumer: setupSignalfxReceiver(t, signalFxReceiverK8sClusterReceiverPort), + tracesConsumer: setupTraces(t), + } + // deploy the chart and applications. + if os.Getenv("SKIP_SETUP") == "true" { + t.Log("Skipping setup as SKIP_SETUP is set to true") + return + } + deployChartsAndApps(t) + }) + + return globalSinks +} +func deployChartsAndApps(t *testing.T) { kubeConfig, err := clientcmd.BuildConfigFromFlags("", testKubeConfig) require.NoError(t, err) clientset, err := kubernetes.NewForConfig(kubeConfig) @@ -69,16 +123,37 @@ func Test_Functions(t *testing.T) { chartPath := filepath.Join("..", "helm-charts", "splunk-otel-collector") chart, err := loader.Load(chartPath) require.NoError(t, err) - valuesBytes, err := os.ReadFile(filepath.Join("testdata", "test_values.yaml")) + valuesBytes, err := os.ReadFile(filepath.Join("testdata", "test_values.yaml.tmpl")) + require.NoError(t, err) + replacements := struct { + K8sClusterEndpoint string + AgentEndpoint string + LogHecEndpoint string + MetricHecEndpoint string + OtlpEndpoint string + ApiURLEndpoint string + LogObjectsHecEndpoint string + }{ + fmt.Sprintf("http://%s:%d", hostEndpoint(t), signalFxReceiverK8sClusterReceiverPort), + fmt.Sprintf("http://%s:%d", hostEndpoint(t), signalFxReceiverPort), + fmt.Sprintf("http://%s:%d", hostEndpoint(t), hecReceiverPort), + fmt.Sprintf("http://%s:%d/services/collector", hostEndpoint(t), hecMetricsReceiverPort), + fmt.Sprintf("%s:%d", hostEndpoint(t), otlpReceiverPort), + fmt.Sprintf("http://%s:%d", hostEndpoint(t), apiPort), + fmt.Sprintf("http://%s:%d/services/collector", hostEndpoint(t), hecLogsObjectsReceiverPort), + } + tmpl, err := template.New("").Parse(string(valuesBytes)) + require.NoError(t, err) + var buf bytes.Buffer + err = tmpl.Execute(&buf, replacements) require.NoError(t, err) - valuesStr := strings.ReplaceAll(string(valuesBytes), "$ENDPOINT", fmt.Sprintf("%s:4317", hostEndpoint(t))) var values map[string]interface{} - err = yaml.Unmarshal([]byte(valuesStr), &values) + err = yaml.Unmarshal(buf.Bytes(), &values) require.NoError(t, err) actionConfig := new(action.Configuration) if err := actionConfig.Init(kube.GetConfig(testKubeConfig, "", "default"), "default", os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) { - t.Logf(format, v) + t.Logf(format+"\n", v) }); err != nil { require.NoError(t, err) } @@ -87,17 +162,12 @@ func Test_Functions(t *testing.T) { install.ReleaseName = "sock" _, err = install.Run(chart, values) if err != nil { - t.Logf("error reported during helm install: %v", err) + t.Logf("error reported during helm install: %v\n", err) retryUpgrade := action.NewUpgrade(actionConfig) retryUpgrade.Namespace = "default" retryUpgrade.Install = true - require.Eventually(t, func() bool { - _, err = retryUpgrade.Run("sock", chart, values) - if err != nil { - t.Logf("error reported during helm upgrade: %v\n", err) - } - return err == nil - }, 3*time.Minute, 30*time.Second) + _, err = retryUpgrade.Run("sock", chart, values) + require.NoError(t, err) } waitForAllDeploymentsToStart(t, clientset) @@ -110,83 +180,292 @@ func Test_Functions(t *testing.T) { deployment, _, err := decode(stream, nil, nil) require.NoError(t, err) _, err = deployments.Create(context.Background(), deployment.(*appsv1.Deployment), metav1.CreateOptions{}) + if err != nil { + _, err2 := deployments.Update(context.Background(), deployment.(*appsv1.Deployment), metav1.UpdateOptions{}) + assert.NoError(t, err2) + if err2 != nil { + require.NoError(t, err) + } + } + jobstream, err := os.ReadFile(filepath.Join("testdata", "test_jobs.yaml")) require.NoError(t, err) + var namespaces []*corev1.Namespace + var jobs []*batchv1.Job + for _, resourceYAML := range strings.Split(string(jobstream), "---") { + if len(resourceYAML) == 0 { + continue + } - waitForAllDeploymentsToStart(t, clientset) + obj, groupVersionKind, err := decode( + []byte(resourceYAML), + nil, + nil) + require.NoError(t, err) + if groupVersionKind.Group == "" && + groupVersionKind.Version == "v1" && + groupVersionKind.Kind == "Namespace" { + nm := obj.(*corev1.Namespace) + namespaces = append(namespaces, nm) + nms := clientset.CoreV1().Namespaces() + _, err := nms.Create(context.Background(), nm, metav1.CreateOptions{}) + require.NoError(t, err) + t.Logf("Deployed namespace %s", nm.Name) + } - tracesConsumer := new(consumertest.TracesSink) - metricsConsumer := new(consumertest.MetricsSink) - logsConsumer := new(consumertest.LogsSink) - wantEntries := 3 // Minimal number of traces, metrics, and logs to wait for. - waitForData(t, wantEntries, tracesConsumer, metricsConsumer, logsConsumer) + waitForAllNamespacesToBeCreated(t, clientset) - replaceWithStar := func(string) string { return "*" } - shortenNames := func(value string) string { - if strings.HasPrefix(value, "kube-proxy") { - return "kube-proxy" - } - if strings.HasPrefix(value, "local-path-provisioner") { - return "local-path-provisioner" - } - if strings.HasPrefix(value, "kindnet") { - return "kindnet" - } - if strings.HasPrefix(value, "coredns") { - return "coredns" + if groupVersionKind.Group == "batch" && + groupVersionKind.Version == "v1" && + groupVersionKind.Kind == "Job" { + job := obj.(*batchv1.Job) + jobs = append(jobs, job) + jobClient := clientset.BatchV1().Jobs(job.Namespace) + _, err := jobClient.Create(context.Background(), job, metav1.CreateOptions{}) + require.NoError(t, err) + t.Logf("Deployed job %s", job.Name) } - if strings.HasPrefix(value, "otelcol") { - return "otelcol" + } + + waitForAllDeploymentsToStart(t, clientset) + + t.Cleanup(func() { + if os.Getenv("SKIP_TEARDOWN") == "true" { + t.Log("Skipping teardown as SKIP_TEARDOWN is set to true") + return } - if strings.HasPrefix(value, "sock-splunk-otel-collector-agent") { - return "sock-splunk-otel-collector-agent" + waitTime := int64(0) + _ = deployments.Delete(context.Background(), "nodejs-test", metav1.DeleteOptions{ + GracePeriodSeconds: &waitTime, + }) + for _, job := range jobs { + jobClient := clientset.BatchV1().Jobs(job.Namespace) + _ = jobClient.Delete(context.Background(), job.Name, metav1.DeleteOptions{ + GracePeriodSeconds: &waitTime, + }) } - if strings.HasPrefix(value, "sock-splunk-otel-collector-k8s-cluster-receiver") { - return "sock-splunk-otel-collector-k8s-cluster-receiver" + for _, nm := range namespaces { + nmClient := clientset.CoreV1().Namespaces() + _ = nmClient.Delete(context.Background(), nm.Name, metav1.DeleteOptions{ + GracePeriodSeconds: &waitTime, + }) } - if strings.HasPrefix(value, "cert-manager-cainjector") { - return "cert-manager-cainjector" + uninstall := action.NewUninstall(actionConfig) + uninstall.IgnoreNotFound = true + uninstall.Wait = true + _, _ = uninstall.Run("sock") + }) +} + +func Test_Functions(t *testing.T) { + _ = setupOnce(t) + t.Run("node.js traces captured", testNodeJSTraces) + t.Run("kubernetes cluster metrics", testK8sClusterReceiverMetrics) + t.Run("agent logs", testAgentLogs) + t.Run("test HEC metrics", testHECMetrics) + t.Run("test k8s objects", testK8sObjects) + t.Run("test agent metrics", testAgentMetrics) +} + +func testNodeJSTraces(t *testing.T) { + tracesConsumer := setupOnce(t).tracesConsumer + + var expectedTraces ptrace.Traces + expectedTracesFile := filepath.Join("testdata", "expected_traces.yaml") + expectedTraces, err := readTraces(expectedTracesFile) + require.NoError(t, err) + + waitForTraces(t, 3, tracesConsumer) + + latestTrace := tracesConsumer.AllTraces()[len(tracesConsumer.AllTraces())-1] + + ignoreSpanAttribute("net.peer.port", expectedTraces) + ignoreSpanAttribute("net.peer.port", latestTrace) + ignoreSpanAttribute("http.user_agent", expectedTraces) + ignoreSpanAttribute("http.user_agent", latestTrace) + ignoreTraceID(expectedTraces) + ignoreSpanID(expectedTraces) + ignoreTraceID(latestTrace) + ignoreSpanID(latestTrace) + ignoreStartTimestamp(latestTrace) + ignoreEndTimestamp(latestTrace) + ignoreStartTimestamp(expectedTraces) + ignoreEndTimestamp(expectedTraces) + + require.NoError(t, ptracetest.CompareTraces(expectedTraces, latestTrace, + ptracetest.IgnoreResourceAttributeValue("container.id"), + ptracetest.IgnoreResourceAttributeValue("k8s.deployment.name"), + ptracetest.IgnoreResourceAttributeValue("k8s.pod.ip"), + ptracetest.IgnoreResourceAttributeValue("k8s.pod.name"), + ptracetest.IgnoreResourceAttributeValue("k8s.pod.uid"), + ptracetest.IgnoreResourceAttributeValue("k8s.replicaset.name"), + ptracetest.IgnoreResourceSpansOrder(), + ptracetest.IgnoreScopeSpansOrder(), + )) +} + +func ignoreStartTimestamp(traces ptrace.Traces) { + for i := 0; i < traces.ResourceSpans().Len(); i++ { + rs := traces.ResourceSpans().At(i) + for j := 0; j < rs.ScopeSpans().Len(); j++ { + ss := rs.ScopeSpans().At(j) + for k := 0; k < ss.Spans().Len(); k++ { + span := ss.Spans().At(k) + span.SetStartTimestamp(0) + } } - if strings.HasPrefix(value, "sock-operator") { - return "sock-operator" + } +} + +func ignoreEndTimestamp(traces ptrace.Traces) { + for i := 0; i < traces.ResourceSpans().Len(); i++ { + rs := traces.ResourceSpans().At(i) + for j := 0; j < rs.ScopeSpans().Len(); j++ { + ss := rs.ScopeSpans().At(j) + for k := 0; k < ss.Spans().Len(); k++ { + span := ss.Spans().At(k) + span.SetEndTimestamp(0) + } } - if strings.HasPrefix(value, "nodejs-test") { - return "nodejs-test" + } +} + +func ignoreSpanAttribute(attributeName string, traces ptrace.Traces) { + for i := 0; i < traces.ResourceSpans().Len(); i++ { + rs := traces.ResourceSpans().At(i) + for j := 0; j < rs.ScopeSpans().Len(); j++ { + ss := rs.ScopeSpans().At(j) + for k := 0; k < ss.Spans().Len(); k++ { + span := ss.Spans().At(k) + if _, ok := span.Attributes().Get(attributeName); ok { + span.Attributes().PutStr(attributeName, "*") + } + } } - if strings.HasPrefix(value, "cert-manager-webhook") { - return "cert-manager-webhook" + } +} + +func ignoreTraceID(traces ptrace.Traces) { + for i := 0; i < traces.ResourceSpans().Len(); i++ { + rs := traces.ResourceSpans().At(i) + for j := 0; j < rs.ScopeSpans().Len(); j++ { + ss := rs.ScopeSpans().At(j) + for k := 0; k < ss.Spans().Len(); k++ { + span := ss.Spans().At(k) + span.SetTraceID(pcommon.NewTraceIDEmpty()) + } } - if strings.HasPrefix(value, "cert-manager") { - return "cert-manager" + } +} + +func ignoreSpanID(traces ptrace.Traces) { + for i := 0; i < traces.ResourceSpans().Len(); i++ { + rs := traces.ResourceSpans().At(i) + for j := 0; j < rs.ScopeSpans().Len(); j++ { + ss := rs.ScopeSpans().At(j) + for k := 0; k < ss.Spans().Len(); k++ { + span := ss.Spans().At(k) + span.SetSpanID(pcommon.NewSpanIDEmpty()) + } } + } +} - return value +func shortenNames(value string) string { + if strings.HasPrefix(value, "kube-proxy") { + return "kube-proxy" + } + if strings.HasPrefix(value, "local-path-provisioner") { + return "local-path-provisioner" } + if strings.HasPrefix(value, "kindnet") { + return "kindnet" + } + if strings.HasPrefix(value, "coredns") { + return "coredns" + } + if strings.HasPrefix(value, "otelcol") { + return "otelcol" + } + if strings.HasPrefix(value, "sock-splunk-otel-collector-agent") { + return "sock-splunk-otel-collector-agent" + } + if strings.HasPrefix(value, "sock-splunk-otel-collector-k8s-cluster-receiver") { + return "sock-splunk-otel-collector-k8s-cluster-receiver" + } + if strings.HasPrefix(value, "cert-manager-cainjector") { + return "cert-manager-cainjector" + } + if strings.HasPrefix(value, "sock-operator") { + return "sock-operator" + } + if strings.HasPrefix(value, "nodejs-test") { + return "nodejs-test" + } + if strings.HasPrefix(value, "cert-manager-webhook") { + return "cert-manager-webhook" + } + if strings.HasPrefix(value, "cert-manager") { + return "cert-manager" + } + + return value +} + +func testK8sClusterReceiverMetrics(t *testing.T) { + metricsConsumer := setupOnce(t).k8sclusterReceiverMetricsConsumer + expectedMetricsFile := filepath.Join("testdata", "expected_cluster_receiver.yaml") + expectedMetrics, err := readMetrics(expectedMetricsFile) + require.NoError(t, err) + + replaceWithStar := func(string) string { return "*" } containerImageShorten := func(value string) string { return value[(strings.LastIndex(value, "/") + 1):] } - latestTrace := tracesConsumer.AllTraces()[len(tracesConsumer.AllTraces())-1] - actualSpan := latestTrace.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0) - - expectedSpan := expectedTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0) - expectedSpan.Attributes().Range(func(k string, _ pcommon.Value) bool { - _, ok := actualSpan.Attributes().Get(k) - assert.True(t, ok) - return true - }) - var selected pmetric.Metrics - for _, m := range metricsConsumer.AllMetrics() { - if m.ResourceMetrics().Len() == expectedMetrics.ResourceMetrics().Len() { - selected = m + var selected *pmetric.Metrics + for h := len(metricsConsumer.AllMetrics()) - 1; h >= 0; h-- { + m := metricsConsumer.AllMetrics()[h] + foundCorrectSet := false + OUTER: + for i := 0; i < m.ResourceMetrics().Len(); i++ { + for j := 0; j < m.ResourceMetrics().At(i).ScopeMetrics().Len(); j++ { + for k := 0; k < m.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().Len(); k++ { + metricToConsider := m.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().At(k) + if metricToConsider.Name() == "k8s.container.restarts" { + foundCorrectSet = true + break OUTER + } + } + } + } + if !foundCorrectSet { + continue + } + if m.ResourceMetrics().Len() == expectedMetrics.ResourceMetrics().Len() && m.MetricCount() == expectedMetrics.MetricCount() { + selected = &m break } } + require.NotNil(t, selected) + + metricNames := []string{"k8s.node.condition_ready", "k8s.namespace.phase", "k8s.pod.phase", "k8s.replicaset.desired", "k8s.replicaset.available", "k8s.daemonset.ready_nodes", "k8s.daemonset.misscheduled_nodes", "k8s.daemonset.desired_scheduled_nodes", "k8s.daemonset.current_scheduled_nodes", "k8s.container.ready", "k8s.container.memory_request", "k8s.container.memory_limit", "k8s.container.cpu_request", "k8s.container.cpu_limit", "k8s.deployment.desired", "k8s.deployment.available", "k8s.container.restarts", "k8s.container.cpu_request", "k8s.container.memory_request", "k8s.container.memory_limit"} + require.NoError(t, - pmetrictest.CompareMetrics(expectedMetrics, selected, + pmetrictest.CompareMetrics(expectedMetrics, *selected, pmetrictest.IgnoreTimestamp(), pmetrictest.IgnoreStartTimestamp(), - pmetrictest.IgnoreMetricValues("k8s.deployment.desired", "k8s.deployment.available", "k8s.container.restarts", "k8s.container.cpu_request", "k8s.container.memory_request", "k8s.container.memory_limit"), + pmetrictest.IgnoreMetricAttributeValue("container.id", metricNames...), + pmetrictest.IgnoreMetricAttributeValue("k8s.daemonset.uid", metricNames...), + pmetrictest.IgnoreMetricAttributeValue("k8s.deployment.uid", metricNames...), + pmetrictest.IgnoreMetricAttributeValue("k8s.pod.uid", metricNames...), + pmetrictest.IgnoreMetricAttributeValue("k8s.pod.name", metricNames...), + pmetrictest.IgnoreMetricAttributeValue("k8s.replicaset.uid", metricNames...), + pmetrictest.IgnoreMetricAttributeValue("k8s.replicaset.name", metricNames...), + pmetrictest.IgnoreMetricAttributeValue("k8s.namespace.uid", metricNames...), + pmetrictest.IgnoreMetricAttributeValue("container.image.tag", metricNames...), + pmetrictest.IgnoreMetricAttributeValue("k8s.node.uid", metricNames...), + pmetrictest.IgnoreMetricValues(metricNames...), pmetrictest.ChangeResourceAttributeValue("k8s.deployment.name", shortenNames), pmetrictest.ChangeResourceAttributeValue("k8s.pod.name", shortenNames), pmetrictest.ChangeResourceAttributeValue("k8s.replicaset.name", shortenNames), @@ -200,6 +479,7 @@ func Test_Functions(t *testing.T) { pmetrictest.ChangeResourceAttributeValue("k8s.daemonset.uid", replaceWithStar), pmetrictest.ChangeResourceAttributeValue("container.image.name", containerImageShorten), pmetrictest.ChangeResourceAttributeValue("container.id", replaceWithStar), + pmetrictest.ChangeResourceAttributeValue("host.name", replaceWithStar), pmetrictest.IgnoreScopeVersion(), pmetrictest.IgnoreResourceMetricsOrder(), pmetrictest.IgnoreMetricsOrder(), @@ -209,6 +489,283 @@ func Test_Functions(t *testing.T) { ) } +func testAgentLogs(t *testing.T, ) { + logsConsumer := setupOnce(t).logsConsumer + waitForLogs(t, 5, logsConsumer) + + var helloWorldResource pcommon.Resource + var helloWorldLogRecord *plog.LogRecord + var podAnnoResource pcommon.Resource + var podAnnoLogRecord *plog.LogRecord + var nsAnnoResource pcommon.Resource + var nsAnnoLogRecord *plog.LogRecord + //var journaldResource pcommon.Resource + //var journaldLogRecord *plog.LogRecord + var sourcetypes []string + + for i := 0; i < len(logsConsumer.AllLogs()); i++ { + l := logsConsumer.AllLogs()[i] + for j := 0; j < l.ResourceLogs().Len(); j++ { + rl := l.ResourceLogs().At(j) + if value, ok := rl.Resource().Attributes().Get("com.splunk.sourcetype"); ok { + sourcetypes = append(sourcetypes, value.AsString()) + } + + for k := 0; k < rl.ScopeLogs().Len(); k++ { + sl := rl.ScopeLogs().At(k) + for m := 0; m < sl.LogRecords().Len(); m++ { + logRecord := sl.LogRecords().At(m) + if logRecord.Body().AsString() == "Hello World" { + helloWorldLogRecord = &logRecord + helloWorldResource = rl.Resource() + } + if value, ok := rl.Resource().Attributes().Get("com.splunk.index"); ok { + if "pod-anno" == value.AsString() { + podAnnoLogRecord = &logRecord + podAnnoResource = rl.Resource() + } + if "ns-anno" == value.AsString() { + nsAnnoLogRecord = &logRecord + nsAnnoResource = rl.Resource() + } + } + //if value, ok := rl.Resource().Attributes().Get("com.splunk.sourcetype"); ok { + // if strings.Contains(value.AsString(), "journald") { + // journaldLogRecord = &logRecord + // journaldResource = rl.Resource() + // } + //} + } + } + } + } + { + assert.NotNil(t, helloWorldLogRecord) + sourceType, ok := helloWorldResource.Attributes().Get("com.splunk.sourcetype") + assert.True(t, ok) + assert.Equal(t, "kube:container:nodejs-test", sourceType.AsString()) + source, ok := helloWorldResource.Attributes().Get("com.splunk.source") + assert.True(t, ok) + assert.Regexp(t, regexp.MustCompile("/var/log/pods/default_nodejs-test-.*/nodejs-test/0.log"), source.AsString()) + index, ok := helloWorldResource.Attributes().Get("com.splunk.index") + assert.True(t, ok) + assert.Equal(t, "main", index.AsString()) + podName, ok := helloWorldLogRecord.Attributes().Get("k8s.pod.name") + assert.True(t, ok) + assert.Regexp(t, regexp.MustCompile("nodejs-test-.*"), podName.AsString()) + } + { + assert.NotNil(t, podAnnoLogRecord) + sourceType, ok := podAnnoResource.Attributes().Get("com.splunk.sourcetype") + assert.True(t, ok) + assert.Equal(t, "kube:container:pod-w-index-wo-ns-index", sourceType.AsString()) + } + { + assert.NotNil(t, nsAnnoLogRecord) + sourceType, ok := nsAnnoResource.Attributes().Get("com.splunk.sourcetype") + assert.True(t, ok) + assert.Equal(t, "kube:container:pod-wo-index-w-ns-index", sourceType.AsString()) + } + { + // journald testing fails with kind. + //assert.NotNil(t, journaldLogRecord) + //_, ok := journaldResource.Attributes().Get("com.splunk.host") + //assert.True(t, ok) + + //foundContainerdService := false + //foundDockerService := false + //foundKubeletService := false + // + //for _, sourcetype := range sourcetypes { + // switch sourcetype { + // case "kube:journald:containerd.service": + // foundContainerdService = true + // case "kube:journald:docker.service": + // foundDockerService = true + // case "kube:journald:kubelet.service": + // foundKubeletService = true + // } + //} + //assert.True(t, foundContainerdService) + //assert.True(t, foundDockerService) + //assert.True(t, foundKubeletService) + } +} + +func testK8sObjects(t *testing.T) { + logsObjectsConsumer := setupOnce(t).logsObjectsConsumer + waitForLogs(t, 5, logsObjectsConsumer) + + var kinds []string + var sourceTypes []string + + for i := 0; i < len(logsObjectsConsumer.AllLogs()); i++ { + l := logsObjectsConsumer.AllLogs()[i] + for j := 0; j < l.ResourceLogs().Len(); j++ { + rl := l.ResourceLogs().At(j) + for k := 0; k < rl.ScopeLogs().Len(); k++ { + sl := rl.ScopeLogs().At(k) + for m := 0; m < sl.LogRecords().Len(); m++ { + logRecord := sl.LogRecords().At(m) + if logRecord.Body().Type() == pcommon.ValueTypeMap { + if kind, ok := logRecord.Body().Map().Get("kind"); ok { + kinds = append(kinds, kind.AsString()) + } + } + } + } + if value, ok := rl.Resource().Attributes().Get("com.splunk.sourcetype"); ok { + sourceTypes = append(sourceTypes, value.AsString()) + } + } + } + + assert.Contains(t, kinds, "Pod") + assert.Contains(t, kinds, "Namespace") + assert.Contains(t, kinds, "Node") + + assert.Contains(t, sourceTypes, "kube:object:pods") + assert.Contains(t, sourceTypes, "kube:object:namespaces") + assert.Contains(t, sourceTypes, "kube:object:nodes") +} + +func testAgentMetrics(t *testing.T) { + agentMetricsConsumer := setupOnce(t).agentMetricsConsumer + + metricNames := []string{ + "container.filesystem.available", + "container.filesystem.capacity", + "container.filesystem.usage", + "container.memory.usage", + "k8s.pod.network.errors", + "k8s.pod.network.io", + "otelcol_exporter_sent_log_records", + "otelcol_otelsvc_k8s_ip_lookup_miss", + "otelcol_processor_refused_log_records", + "otelcol_processor_dropped_log_records", + "otelcol_processor_accepted_log_records", + "otelcol_processor_batch_batch_send_size_sum", + "otelcol_processor_batch_batch_send_size_count", + "otelcol_processor_batch_batch_send_size_bucket", + "otelcol_exporter_queue_size", + "otelcol_exporter_sent_metric_points", + "otelcol_otelsvc_k8s_namespace_added", + "otelcol_otelsvc_k8s_pod_added", + "otelcol_otelsvc_k8s_pod_table_size", + "otelcol_otelsvc_k8s_pod_updated", + "otelcol_process_cpu_seconds", + "otelcol_process_memory_rss", + "otelcol_process_runtime_heap_alloc_bytes", + "otelcol_process_runtime_total_alloc_bytes", + "otelcol_process_runtime_total_sys_memory_bytes", + "otelcol_process_uptime", + "otelcol_processor_accepted_metric_points", + "otelcol_processor_batch_timeout_trigger_send", + "otelcol_processor_dropped_metric_points", + "otelcol_processor_refused_metric_points", + "otelcol_receiver_accepted_metric_points", + "otelcol_receiver_refused_metric_points", + "otelcol_scraper_errored_metric_points", + "otelcol_scraper_scraped_metric_points", + "system.cpu.load_average.15m", + "system.cpu.load_average.1m", + "system.cpu.load_average.5m", + "system.disk.operations", + "system.filesystem.usage", + "system.memory.usage", + "system.network.errors", + "system.network.io", + "system.paging.operations", + } + checkMetricsAreEmitted(t, agentMetricsConsumer, metricNames) +} + +func testHECMetrics(t *testing.T) { + hecMetricsConsumer := setupOnce(t).hecMetricsConsumer + + metricNames := []string{ + "container.cpu.time", + "container.cpu.utilization", + "container.filesystem.available", + "container.filesystem.capacity", + "container.filesystem.usage", + "container.memory.available", + "container.memory.major_page_faults", + "container.memory.page_faults", + "container.memory.rss", + "container.memory.usage", + "container.memory.working_set", + "k8s.node.network.errors", + "k8s.node.network.io", + "k8s.pod.cpu.time", + "k8s.pod.cpu.utilization", + "k8s.pod.filesystem.available", + "k8s.pod.filesystem.capacity", + "k8s.pod.filesystem.usage", + "k8s.pod.memory.available", + "k8s.pod.memory.major_page_faults", + "k8s.pod.memory.page_faults", + "k8s.pod.memory.rss", + "k8s.pod.memory.usage", + "k8s.pod.memory.working_set", + "k8s.pod.network.errors", + "k8s.pod.network.io", + "otelcol_exporter_queue_size", + "otelcol_exporter_sent_metric_points", + "otelcol_exporter_sent_log_records", + "otelcol_otelsvc_k8s_ip_lookup_miss", + "otelcol_processor_refused_log_records", + "otelcol_processor_dropped_log_records", + "otelcol_processor_accepted_log_records", + "otelcol_processor_batch_batch_send_size_sum", + "otelcol_processor_batch_batch_send_size_count", + "otelcol_processor_batch_batch_send_size_bucket", + "otelcol_otelsvc_k8s_namespace_added", + "otelcol_otelsvc_k8s_pod_added", + "otelcol_otelsvc_k8s_pod_table_size", + "otelcol_otelsvc_k8s_pod_updated", + "otelcol_process_cpu_seconds", + "otelcol_process_memory_rss", + "otelcol_process_runtime_heap_alloc_bytes", + "otelcol_process_runtime_total_alloc_bytes", + "otelcol_process_runtime_total_sys_memory_bytes", + "otelcol_process_uptime", + "otelcol_processor_accepted_metric_points", + "otelcol_processor_batch_timeout_trigger_send", + "otelcol_processor_dropped_metric_points", + "otelcol_processor_refused_metric_points", + "otelcol_receiver_accepted_metric_points", + "otelcol_receiver_refused_metric_points", + "otelcol_scraper_errored_metric_points", + "otelcol_scraper_scraped_metric_points", + "system.cpu.load_average.15m", + "system.cpu.load_average.1m", + "system.cpu.load_average.5m", + "system.cpu.time", + "system.disk.io", + "system.disk.io_time", + "system.disk.merged", + "system.disk.operation_time", + "system.disk.operations", + "system.disk.pending_operations", + "system.disk.weighted_io_time", + "system.filesystem.inodes.usage", + "system.filesystem.usage", + "system.memory.usage", + "system.network.connections", + "system.network.dropped", + "system.network.errors", + "system.network.io", + "system.network.packets", + "system.paging.faults", + "system.paging.operations", + "system.paging.usage", + "system.processes.count", + "system.processes.created", + } + checkMetricsAreEmitted(t, hecMetricsConsumer, metricNames) +} + func waitForAllDeploymentsToStart(t *testing.T, clientset *kubernetes.Clientset) { require.Eventually(t, func() bool { di, err := clientset.AppsV1().Deployments("default").List(context.Background(), metav1.ListOptions{}) @@ -222,6 +779,19 @@ func waitForAllDeploymentsToStart(t *testing.T, clientset *kubernetes.Clientset) }, 5*time.Minute, 10*time.Second) } +func waitForAllNamespacesToBeCreated(t *testing.T, clientset *kubernetes.Clientset) { + require.Eventually(t, func() bool { + nms, err := clientset.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}) + require.NoError(t, err) + for _, d := range nms.Items { + if d.Status.Phase != corev1.NamespaceActive { + return false + } + } + return true + }, 5*time.Minute, 10*time.Second) +} + func waitForData(t *testing.T, entriesNum int, tc *consumertest.TracesSink, mc *consumertest.MetricsSink, lc *consumertest.LogsSink) { f := otlpreceiver.NewFactory() cfg := f.CreateDefaultConfig().(*otlpreceiver.Config) @@ -247,25 +817,6 @@ func waitForData(t *testing.T, entriesNum int, tc *consumertest.TracesSink, mc * len(tc.AllTraces()), len(mc.AllMetrics()), len(lc.AllLogs()), timeoutMinutes) } -func hostEndpoint(t *testing.T) string { - if runtime.GOOS == "darwin" { - return "host.docker.internal" - } - - client, err := docker.NewClientWithOpts(docker.FromEnv) - require.NoError(t, err) - client.NegotiateAPIVersion(context.Background()) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - network, err := client.NetworkInspect(ctx, "kind", types.NetworkInspectOptions{}) - require.NoError(t, err) - for _, ipam := range network.IPAM.Config { - return ipam.Gateway - } - require.Fail(t, "failed to find host endpoint") - return "" -} - // readMetrics reads a pmetric.Metrics from the specified YAML or JSON file. func readMetrics(filePath string) (pmetric.Metrics, error) { b, err := os.ReadFile(filePath) @@ -305,3 +856,163 @@ func readTraces(filePath string) (ptrace.Traces, error) { unmarshaler := ptrace.JSONUnmarshaler{} return unmarshaler.UnmarshalTraces(b) } + +func setupTraces(t *testing.T) *consumertest.TracesSink { + tc := new(consumertest.TracesSink) + f := otlpreceiver.NewFactory() + cfg := f.CreateDefaultConfig().(*otlpreceiver.Config) + cfg.Protocols.GRPC.NetAddr.Endpoint = fmt.Sprintf("0.0.0.0:%d", otlpReceiverPort) + + rcvr, err := f.CreateTracesReceiver(context.Background(), receivertest.NewNopCreateSettings(), cfg, tc) + require.NoError(t, err) + + require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, err, "failed creating traces receiver") + t.Cleanup(func() { + assert.NoError(t, rcvr.Shutdown(context.Background())) + }) + + return tc +} + +func setupSignalfxReceiver(t *testing.T, port int) *consumertest.MetricsSink { + mc := new(consumertest.MetricsSink) + f := signalfxreceiver.NewFactory() + cfg := f.CreateDefaultConfig().(*signalfxreceiver.Config) + cfg.Endpoint = fmt.Sprintf("0.0.0.0:%d", port) + + rcvr, err := f.CreateMetricsReceiver(context.Background(), receivertest.NewNopCreateSettings(), cfg, mc) + require.NoError(t, err) + + require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, err, "failed creating metrics receiver") + t.Cleanup(func() { + assert.NoError(t, rcvr.Shutdown(context.Background())) + }) + + return mc +} + +func setupHEC(t *testing.T) (*consumertest.LogsSink, *consumertest.MetricsSink) { + // the splunkhecreceiver does poorly at receiving logs and metrics. Use separate ports for now. + f := splunkhecreceiver.NewFactory() + cfg := f.CreateDefaultConfig().(*splunkhecreceiver.Config) + cfg.Endpoint = fmt.Sprintf("0.0.0.0:%d", hecReceiverPort) + + mCfg := f.CreateDefaultConfig().(*splunkhecreceiver.Config) + mCfg.Endpoint = fmt.Sprintf("0.0.0.0:%d", hecMetricsReceiverPort) + + lc := new(consumertest.LogsSink) + mc := new(consumertest.MetricsSink) + rcvr, err := f.CreateLogsReceiver(context.Background(), receivertest.NewNopCreateSettings(), cfg, lc) + mrcvr, err := f.CreateMetricsReceiver(context.Background(), receivertest.NewNopCreateSettings(), mCfg, mc) + require.NoError(t, err) + + require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, err, "failed creating logs receiver") + t.Cleanup(func() { + assert.NoError(t, rcvr.Shutdown(context.Background())) + }) + + require.NoError(t, mrcvr.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, err, "failed creating metrics receiver") + t.Cleanup(func() { + assert.NoError(t, mrcvr.Shutdown(context.Background())) + }) + + return lc, mc +} + +func setupHECLogsObjects(t *testing.T) *consumertest.LogsSink { + f := splunkhecreceiver.NewFactory() + cfg := f.CreateDefaultConfig().(*splunkhecreceiver.Config) + cfg.Endpoint = fmt.Sprintf("0.0.0.0:%d", hecLogsObjectsReceiverPort) + + lc := new(consumertest.LogsSink) + rcvr, err := f.CreateLogsReceiver(context.Background(), receivertest.NewNopCreateSettings(), cfg, lc) + require.NoError(t, err) + + require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, err, "failed creating logs receiver") + t.Cleanup(func() { + assert.NoError(t, rcvr.Shutdown(context.Background())) + }) + + return lc +} +func checkMetricsAreEmitted(t *testing.T, mc *consumertest.MetricsSink, metricNames []string) { + metricsToFind := map[string]bool{} + for _, name := range metricNames { + metricsToFind[name] = false + } + timeoutMinutes := 3 + require.Eventuallyf(t, func() bool { + + for _, m := range mc.AllMetrics() { + for i := 0; i < m.ResourceMetrics().Len(); i++ { + rm := m.ResourceMetrics().At(i) + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + sm := rm.ScopeMetrics().At(j) + for k := 0; k < sm.Metrics().Len(); k++ { + metric := sm.Metrics().At(k) + metricsToFind[metric.Name()] = true + } + } + } + } + var stillMissing []string + var found []string + missingCount := 0 + foundCount := 0 + for _, name := range metricNames { + if !metricsToFind[name] { + stillMissing = append(stillMissing, name) + missingCount++ + } else { + found = append(found, name) + foundCount++ + } + } + t.Logf("Found: %s", strings.Join(found, ",")) + t.Logf("Metrics found: %d, metrics still missing: %d\n%s\n", foundCount, missingCount, strings.Join(stillMissing, ",")) + return missingCount == 0 + }, time.Duration(timeoutMinutes)*time.Minute, 10*time.Second, + "failed to receive all metrics %d minutes", timeoutMinutes) +} + +func hostEndpoint(t *testing.T) string { + if runtime.GOOS == "darwin" { + return "host.docker.internal" + } + + client, err := docker.NewClientWithOpts(docker.FromEnv) + require.NoError(t, err) + client.NegotiateAPIVersion(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + network, err := client.NetworkInspect(ctx, "kind", types.NetworkInspectOptions{}) + require.NoError(t, err) + for _, ipam := range network.IPAM.Config { + return ipam.Gateway + } + require.Fail(t, "failed to find host endpoint") + return "" +} + +func waitForTraces(t *testing.T, entriesNum int, tc *consumertest.TracesSink) { + timeoutMinutes := 3 + require.Eventuallyf(t, func() bool { + return len(tc.AllTraces()) > entriesNum + }, time.Duration(timeoutMinutes)*time.Minute, 1*time.Second, + "failed to receive %d entries, received %d traces in %d minutes", entriesNum, + len(tc.AllTraces()), timeoutMinutes) +} + +func waitForLogs(t *testing.T, entriesNum int, lc *consumertest.LogsSink) { + timeoutMinutes := 3 + require.Eventuallyf(t, func() bool { + return len(lc.AllLogs()) > entriesNum + }, time.Duration(timeoutMinutes)*time.Minute, 1*time.Second, + "failed to receive %d entries, received %d logs in %d minutes", entriesNum, + len(lc.AllLogs()), timeoutMinutes) +} diff --git a/functional_tests/go.mod b/functional_tests/go.mod index f10391d74f..066b382621 100644 --- a/functional_tests/go.mod +++ b/functional_tests/go.mod @@ -9,6 +9,8 @@ require ( github.com/docker/docker v23.0.3+incompatible github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.86.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.86.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.86.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.86.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/collector/component v0.86.0 go.opentelemetry.io/collector/consumer v0.86.0 @@ -35,6 +37,7 @@ require ( github.com/Microsoft/hcsshim v0.11.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/containerd/containerd v1.7.6 // indirect @@ -61,6 +64,7 @@ require ( github.com/go-openapi/swag v0.22.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.0.1 // indirect @@ -105,6 +109,9 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/mostynb/go-grpc-compression v1.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.86.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.86.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.86.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect @@ -118,6 +125,7 @@ require ( github.com/rubenv/sql-migrate v1.5.2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect + github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/cobra v1.7.0 // indirect @@ -138,9 +146,11 @@ require ( go.opentelemetry.io/collector/config/configtls v0.86.0 // indirect go.opentelemetry.io/collector/config/internal v0.86.0 // indirect go.opentelemetry.io/collector/confmap v0.86.0 // indirect + go.opentelemetry.io/collector/exporter v0.86.0 // indirect go.opentelemetry.io/collector/extension v0.86.0 // indirect go.opentelemetry.io/collector/extension/auth v0.86.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0-rcv0015 // indirect + go.opentelemetry.io/collector/semconv v0.86.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.44.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect go.opentelemetry.io/otel v1.18.0 // indirect diff --git a/functional_tests/go.sum b/functional_tests/go.sum index 3b42a3324e..a1fd211d76 100644 --- a/functional_tests/go.sum +++ b/functional_tests/go.sum @@ -616,6 +616,8 @@ github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuP github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= @@ -740,6 +742,7 @@ github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -964,6 +967,7 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= @@ -1047,11 +1051,26 @@ github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnu github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.86.0 h1:LjDMbehWLH+t6fkp6uQdpiu9sc3XgyG9Wjve4qV+wOU= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.86.0 h1:xqoxs6rG6G+klvzEyeEo0T35V0GFC900zJANj8sFJjM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.86.0 h1:g7HlND105lwm7NW8JCxAfbpaFyk1WKcEUUVwchIo9zE= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.86.0 h1:HEuJ7hCbLVfoL7xLrGQ2QORvocyFfWuaoDuzyTLwTdc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.86.0 h1:S6b7ToTSFZvVzcabjqoUBqAwair7YuELvBS6mOAopHs= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.86.0/go.mod h1:aLQB8gu7vJ1lokUeWoZs9ExpduamPrD3oRbDk7hNg/g= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.86.0 h1:hPwD5f77ZFaXgbX1/HwB5xcHCdPSbEUaz72Vy9ttxSs= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.86.0/go.mod h1:yALAKkORvMeaaMGFhKIzuHGPdDCjEpBahWL+zTxfCuc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.86.0 h1:2/KuYugX/jjjj+KRvKKVDwTuTPrSEnZUsznnmFobP34= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.86.0 h1:c0YrPA9p78Sqm3QWW5OFAuajdTWbTwVvawdvL1hbxvA= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.86.0 h1:EzGSvuCXAsGpwgeieTVcy1gs0hOlPidhFPcvwcPEU8s= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.86.0/go.mod h1:EL62K0jwPFXa3uxYeYMZGS1TSLR6AdGHRvM7RnRlosY= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.86.0 h1:xt/YvYpgssWk2Ix2C9SSXrILIzRqyWe+r5RE348m1fE= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.86.0/go.mod h1:fjK1kn7PIDP+TqOIFVEth3w0Eiexx5jIk411c//fYkM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.86.0 h1:qR/wCuXENMydERtUBpTMCDuQIkGA+x2zh8vUy+bOGq0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.86.0/go.mod h1:yrtyAYeuyUdaP6xfywqu1XPtkWqq3vlpUvNX3YSEL8Y= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.86.0 h1:6dKEwMkm/hxwxPQy4ClVArX2QL8Vuj5fHSgVB6iTNVY= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.86.0/go.mod h1:M6s2jpCHtoqS2DS/PNbuiJ1yy2edZ3lX2gyDajIwsRY= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.86.0 h1:0e9nClcszdZjBScBkYHY92ejuSZDzfB/XbUexskUfL0= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.86.0/go.mod h1:XIb6MhFQ4wK1WlTFZcB8ywuaYl7MXIak/JQwmchYrEY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= @@ -1078,6 +1097,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -1126,9 +1146,13 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/shirou/gopsutil/v3 v3.23.8 h1:xnATPiybo6GgdRoC4YoGnxXZFRc3dqQTGi73oLvvBrE= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 h1:32k2QLgsKhcEs55q4REPKyIadvid5FPy2+VMgvbmKJ0= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3/go.mod h1:gJrXWi7wSGXfiC7+VheQaz+ypdCt5SmZNL+BRxUe7y4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -1161,6 +1185,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -1175,6 +1201,7 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= @@ -1215,6 +1242,7 @@ go.opentelemetry.io/collector/confmap v0.86.0/go.mod h1:vJms49x98lXUR7Mj6/28eXV+ go.opentelemetry.io/collector/consumer v0.86.0 h1:8AL9I30tJV01KfcSaa+8DTiARIiUDA8o2p7yQoSFUCs= go.opentelemetry.io/collector/consumer v0.86.0/go.mod h1:SvoV1eto4VZzQ3ILKQ1rv4qgN8rUMJqupn78hoXLHRw= go.opentelemetry.io/collector/exporter v0.86.0 h1:LFmBb7S4Fkj5fv/nrUkLOy50GT6s4R/BLrv6uTb+GNo= +go.opentelemetry.io/collector/exporter v0.86.0/go.mod h1:+PKZrFV4sVgS2TVFnfZ+RCJqXexEENjW1riWaqkxsN4= go.opentelemetry.io/collector/extension v0.86.0 h1:oXnZarkh1aBgnr/U3JSj/lPpBJUquOQ3DHMXXP4Jacc= go.opentelemetry.io/collector/extension v0.86.0/go.mod h1:EPAloXQ+48577GvHTP5wGDvV4OyHPuldvM+2rYbM/fw= go.opentelemetry.io/collector/extension/auth v0.86.0 h1:VwKbeElL8sBnvRDC565EWOw4ixMG/t0oXjIphNsRszU= @@ -1229,6 +1257,7 @@ go.opentelemetry.io/collector/receiver v0.86.0/go.mod h1:oFpofH/OG4HqmaVsb8ftnIA go.opentelemetry.io/collector/receiver/otlpreceiver v0.86.0 h1:Iv6q/11whpWUVmnC2cyZOlhhCzlQK+SkJQfsQcxa13E= go.opentelemetry.io/collector/receiver/otlpreceiver v0.86.0/go.mod h1:dHh1SnS0BKxbZF+9+xeWTwI7CbQVzGHfzt4LYMIGYNE= go.opentelemetry.io/collector/semconv v0.86.0 h1:bLlPe/JYNjQHo744cqi7iIEybuLv+M5DntUwQPTrvZo= +go.opentelemetry.io/collector/semconv v0.86.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= go.opentelemetry.io/collector/service v0.86.0 h1:vyoynxNY2Oc6XET2ZvFkRC+Fpc1oMl9qQkORyX5LoWg= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.44.0 h1:b8xjZxHbLrXAum4SxJd1Rlm7Y/fKaB+6ACI7/e5EfSA= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.44.0/go.mod h1:1ei0a32xOGkFoySu7y1DAHfcuIhC0pNZpvY2huXuMy4= diff --git a/functional_tests/internal/api_server.go b/functional_tests/internal/api_server.go new file mode 100644 index 0000000000..51dd084806 --- /dev/null +++ b/functional_tests/internal/api_server.go @@ -0,0 +1,34 @@ +package internal + +import ( + "context" + "errors" + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func CreateApiServer(t *testing.T, port int) { + mux := http.NewServeMux() + mux.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) { + writer.WriteHeader(200) + }) + + _, cancelCtx := context.WithCancel(context.Background()) + s := &http.Server{ + Addr: fmt.Sprintf("0.0.0.0:%d", port), + Handler: mux, + } + + t.Cleanup(func() { + cancelCtx() + }) + + go func() { + if err := s.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { + require.NoError(t, err) + } + }() +} diff --git a/functional_tests/internal/writer.go b/functional_tests/internal/writer.go index 4e60839e5f..90e9c19ba4 100644 --- a/functional_tests/internal/writer.go +++ b/functional_tests/internal/writer.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "gopkg.in/yaml.v3" ) @@ -93,3 +94,41 @@ func writeLogs(filePath string, logs plog.Logs) error { } return os.WriteFile(filePath, b, 0600) } + +// WriteTraces writes a ptrace.Traces to the specified file in YAML format. +func WriteTraces(t *testing.T, filePath string, traces ptrace.Traces) error { + if err := writeTraces(filePath, traces); err != nil { + return err + } + t.Logf("Golden file successfully written to %s.", filePath) + t.Log("NOTE: The WriteLogs call must be removed in order to pass the test.") + t.Fail() + return nil +} + +func marshalTraces(traces ptrace.Traces) ([]byte, error) { + marshaler := &ptrace.JSONMarshaler{} + fileBytes, err := marshaler.MarshalTraces(traces) + if err != nil { + return nil, err + } + var jsonVal map[string]interface{} + if err = json.Unmarshal(fileBytes, &jsonVal); err != nil { + return nil, err + } + b := &bytes.Buffer{} + enc := yaml.NewEncoder(b) + enc.SetIndent(2) + if err := enc.Encode(jsonVal); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func writeTraces(filePath string, traces ptrace.Traces) error { + b, err := marshalTraces(traces) + if err != nil { + return err + } + return os.WriteFile(filePath, b, 0600) +} diff --git a/functional_tests/testdata/expected_cluster_receiver.yaml b/functional_tests/testdata/expected_cluster_receiver.yaml index 60fb568feb..f4bc5a2e99 100644 --- a/functional_tests/testdata/expected_cluster_receiver.yaml +++ b/functional_tests/testdata/expected_cluster_receiver.yaml @@ -1,2872 +1,4460 @@ resourceMetrics: - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: k8s.namespace.uid - value: - stringValue: b16b230a-0d22-4064-98ea-7fab701c7e16 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 + - resource: {} scopeMetrics: - metrics: - - description: The current phase of namespaces (1 for active and 0 for terminating) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.namespace.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.namespace.uid - value: - stringValue: 04c3cb2a-a665-4e04-92d2-b3b67d38cf61 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: The current phase of namespaces (1 for active and 0 for terminating) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.namespace.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-node-lease - - key: k8s.namespace.uid - value: - stringValue: 91ffc163-9225-4e28-a1e5-53581209d28e - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: The current phase of namespaces (1 for active and 0 for terminating) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.namespace.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-public - - key: k8s.namespace.uid - value: - stringValue: c9af04c1-e23a-4dac-9825-fdb759608650 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: The current phase of namespaces (1 for active and 0 for terminating) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.namespace.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.namespace.uid - value: - stringValue: 96e3f487-e551-416d-8bf7-2764434d7211 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: The current phase of namespaces (1 for active and 0 for terminating) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.namespace.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: local-path-storage - - key: k8s.namespace.uid - value: - stringValue: d3e67148-c796-4cf1-aabe-43209175d67e - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: The current phase of namespaces (1 for active and 0 for terminating) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.namespace.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.node.uid - value: - stringValue: 31330b44-72a9-4f4f-bc74-ca614801e57f - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Ready condition status of the node (true=1, false=0, unknown=-1) - gauge: - dataPoints: - - asInt: "1" - timeUnixNano: "1000000" - name: k8s.node.condition_ready - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.daemonset.name - value: - stringValue: kindnet - - key: k8s.daemonset.uid - value: - stringValue: 6f6342ae-bf47-43de-b461-36e60b9b4357 - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.current_scheduled_nodes - unit: "{node}" - - description: Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.desired_scheduled_nodes - unit: "{node}" - - description: Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod - gauge: - dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.misscheduled_nodes - unit: "{node}" - - description: Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.ready_nodes - unit: "{node}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.daemonset.name - value: - stringValue: kube-proxy - - key: k8s.daemonset.uid - value: - stringValue: 6883acde-992f-4fd5-b286-440f87503ac1 - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.current_scheduled_nodes - unit: "{node}" - - description: Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.desired_scheduled_nodes - unit: "{node}" - - description: Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod - gauge: - dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.misscheduled_nodes - unit: "{node}" - - description: Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.ready_nodes - unit: "{node}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.daemonset.name - value: - stringValue: sock-splunk-otel-collector-agent - - key: k8s.daemonset.uid - value: - stringValue: 6532584f-74f9-43e9-905d-d469b5b627f3 - - key: k8s.namespace.name - value: - stringValue: default - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.current_scheduled_nodes - unit: "{node}" - - description: Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.desired_scheduled_nodes - unit: "{node}" - - description: Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod - gauge: - dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.misscheduled_nodes - unit: "{node}" - - description: Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.daemonset.ready_nodes - unit: "{node}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.deployment.name - value: - stringValue: cert-manager - - key: k8s.deployment.uid - value: - stringValue: 63358f3d-e277-4923-9184-82a94a767d67 - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.available - unit: "{pod}" - - description: Number of desired pods in this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.deployment.name - value: - stringValue: cert-manager-cainjector - - key: k8s.deployment.uid - value: - stringValue: 7fdf73d0-3ced-4909-ac52-0aba8c85c5e6 - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.available - unit: "{pod}" - - description: Number of desired pods in this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.deployment.name - value: - stringValue: cert-manager-webhook - - key: k8s.deployment.uid - value: - stringValue: d1bac2c3-6ce6-458d-8d75-3d533e4223c0 - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.available - unit: "{pod}" - - description: Number of desired pods in this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.deployment.name - value: - stringValue: coredns - - key: k8s.deployment.uid - value: - stringValue: 7238ae8f-d960-4d6f-8ddc-1038e19483e4 - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment - gauge: + - gauge: dataPoints: - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.available - unit: "{pod}" - - description: Number of desired pods in this deployment - gauge: - dataPoints: - - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.deployment.name - value: - stringValue: local-path-provisioner - - key: k8s.deployment.uid - value: - stringValue: f4f861dc-099f-4a81-a4fb-fbbf8397fd2e - - key: k8s.namespace.name - value: - stringValue: local-path-storage - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.available - unit: "{pod}" - - description: Number of desired pods in this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.deployment.name - value: - stringValue: nodejs-test - - key: k8s.deployment.uid - value: - stringValue: 13485c12-7940-4ffc-99f8-9bc9ca8790b9 - - key: k8s.namespace.name - value: - stringValue: default - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.available - unit: "{pod}" - - description: Number of desired pods in this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.deployment.name - value: - stringValue: sock-operator - - key: k8s.deployment.uid - value: - stringValue: e92ee97d-76a1-4d70-9d64-f4fea6b5168b - - key: k8s.namespace.name - value: - stringValue: default - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.available - unit: "{pod}" - - description: Number of desired pods in this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.deployment.name - value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver - - key: k8s.deployment.uid - value: - stringValue: 885caf3b-450d-436d-a211-33a895323de4 - - key: k8s.namespace.name - value: - stringValue: default - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.available - unit: "{pod}" - - description: Number of desired pods in this deployment - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.deployment.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: k8s.replicaset.name - value: - stringValue: cert-manager-5698c4d465 - - key: k8s.replicaset.uid - value: - stringValue: ba2f16fe-c502-4103-b3aa-01f59314ad31 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.available - unit: "{pod}" - - description: Number of desired pods in this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: k8s.replicaset.name - value: - stringValue: cert-manager-cainjector-d4748596 - - key: k8s.replicaset.uid - value: - stringValue: 6068057a-93f0-45a5-a971-18a37ac80230 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.available - unit: "{pod}" - - description: Number of desired pods in this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: k8s.replicaset.name - value: - stringValue: cert-manager-webhook-65d78d5c4b - - key: k8s.replicaset.uid - value: - stringValue: 148c767b-7aa6-4e5b-80d8-66a616188ed1 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.available - unit: "{pod}" - - description: Number of desired pods in this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.replicaset.name - value: - stringValue: nodejs-test-57564b7dc9 - - key: k8s.replicaset.uid - value: - stringValue: 03533cee-b2bb-4945-9696-3c6c652ddc9b - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.available - unit: "{pod}" - - description: Number of desired pods in this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.replicaset.name - value: - stringValue: sock-operator-949dd8564 - - key: k8s.replicaset.uid - value: - stringValue: 86c0736f-2b92-4db1-8c1a-8d04702c4b2f - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.available - unit: "{pod}" - - description: Number of desired pods in this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.replicaset.name - value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-67bff88777 - - key: k8s.replicaset.uid - value: - stringValue: 20e5af2a-cdab-44ee-8af4-d7358de908de - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.available - unit: "{pod}" - - description: Number of desired pods in this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.replicaset.name - value: - stringValue: coredns-5dd5756b68 - - key: k8s.replicaset.uid - value: - stringValue: dd7c6f6d-62af-4d77-88b5-e210d9cee0e7 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset - gauge: - dataPoints: - - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.available - unit: "{pod}" - - description: Number of desired pods in this replicaset - gauge: - dataPoints: - - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: local-path-storage - - key: k8s.replicaset.name - value: - stringValue: local-path-provisioner-6f8956fb48 - - key: k8s.replicaset.uid - value: - stringValue: a386a9b1-b96c-4ede-8116-0d0c63e8dd88 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.available - unit: "{pod}" - - description: Number of desired pods in this replicaset - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.replicaset.desired - unit: "{pod}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: cert-manager-5698c4d465-66vf8 - - key: k8s.pod.uid - value: - stringValue: 2c63c09e-6ffc-4058-bb37-6cc63de92bc9 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-cainjector-65c7bff89d-vksqw + - key: k8s.pod.uid + value: + stringValue: 36d87b59-8ddf-47e7-a145-aeb81201d19a + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: cert-manager-cainjector-d4748596-bth5r - - key: k8s.pod.uid - value: - stringValue: e90e45fd-acb3-4a9a-bb57-00e65b2802d8 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-cbcf9668d-9xvq5 + - key: k8s.pod.uid + value: + stringValue: 3e54c119-9054-428c-aeb4-14088263b4c7 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: cert-manager-webhook-65d78d5c4b-8v4p4 - - key: k8s.pod.uid - value: - stringValue: 84e51af3-bab8-4446-9efd-ac48c34c2b33 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-webhook-594cb9799b-jtv49 + - key: k8s.pod.uid + value: + stringValue: 4d5a76c4-db4f-466f-b925-13dc115f4b1b + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: nodejs-test-57564b7dc9-xjsl2 - - key: k8s.pod.uid - value: - stringValue: 93397346-c9c3-4b44-8802-3a0bc74636ce - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: nodejs-test-57564b7dc9-pf2bk + - key: k8s.pod.uid + value: + stringValue: b660efd2-e961-488a-af7a-2161f27565bb + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: sock-operator-949dd8564-zvzwf - - key: k8s.pod.uid - value: - stringValue: 9e40cb1e-6007-48ef-9403-afdd23e98051 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: sock-splunk-otel-collector-agent-zqlrn - - key: k8s.pod.uid - value: - stringValue: 4dd3f2ea-a998-45dd-8efe-374dcc1a7c89 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-pfm6z + - key: k8s.pod.uid + value: + stringValue: 9afe2416-ad03-4b81-8b21-051643ed6f11 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-67bff88777g7b7j - - key: k8s.pod.uid - value: - stringValue: ee20e1f8-2b45-4b32-8a1c-585c85d239c6 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-8687f4bfbb86fbd + - key: k8s.pod.uid + value: + stringValue: d310e423-e3ee-42ac-a284-9bab01bcbba4 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: coredns-5dd5756b68-2bhmb - - key: k8s.pod.uid - value: - stringValue: 0aeddf50-90ca-4426-8cbc-dd0c245d6911 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-ttk8c + - key: k8s.pod.uid + value: + stringValue: e29ffd0f-0138-49ef-92c5-c62195097294 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: coredns-5dd5756b68-hpqzn - - key: k8s.pod.uid - value: - stringValue: ef076d0a-9b5e-40f3-956a-4e0b8e058427 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-w7sqj + - key: k8s.pod.uid + value: + stringValue: c026c30f-457d-4649-b070-c708867ef2be + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: etcd-kind-control-plane - - key: k8s.pod.uid - value: - stringValue: aa63c1e3-e408-4804-9b8c-c538a13c0e09 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: etcd-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: bf9cabc7-b0cb-4fe0-b57d-6c1bb6189913 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: kindnet-h7vm7 - - key: k8s.pod.uid - value: - stringValue: 651a0d53-9957-4c21-84f9-bfec00b85491 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kindnet-pbtbs + - key: k8s.pod.uid + value: + stringValue: 047ef34e-0d0d-4cc4-82d2-87495e01d3ac + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: kube-apiserver-kind-control-plane - - key: k8s.pod.uid - value: - stringValue: 0855481b-f77d-4193-95ff-2833dad6fcb2 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-apiserver-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 52cf37aa-d01a-495b-a6d9-1cad9ed2d512 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: kube-controller-manager-kind-control-plane - - key: k8s.pod.uid - value: - stringValue: 2f9e753c-cf6f-4562-a2f2-3e1bca8eed3e - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-controller-manager-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: a6874fb4-06a3-483b-829d-2497a9730bf0 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: kube-proxy-9fw5h - - key: k8s.pod.uid - value: - stringValue: 2ac4b2c8-f7f5-4fe3-af89-3b2221407d5a - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-proxy-8glvr + - key: k8s.pod.uid + value: + stringValue: 47a23d3f-4ed5-42b2-b571-430f5f2c08e9 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: kube-scheduler-kind-control-plane - - key: k8s.pod.uid - value: - stringValue: 733f9326-85cf-43d5-a7ce-2e10adcb0ce0 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-scheduler-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 5f9e1240-ab5c-457a-a212-b47052c9d97d + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.namespace.name - value: - stringValue: local-path-storage - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: local-path-provisioner-6f8956fb48-l94nt - - key: k8s.pod.uid - value: - stringValue: 5c26db5f-c480-48d9-b928-846c8b71945f - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: local-path-provisioner-6f8956fb48-v8rfh + - key: k8s.pod.uid + value: + stringValue: bc4b4f43-06b7-4d79-9d86-1afcbf2cd221 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "3" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-exclude + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-exclude-xd5d5 + - key: k8s.pod.uid + value: + stringValue: e55cab80-fc61-4bf6-9ae7-5cdaf5928398 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "3" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-exclude-wo-ns-exclude-h2nnc + - key: k8s.pod.uid + value: + stringValue: 6d450ffa-15b4-4db1-a743-6e9101fea174 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "3" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-index-r8hsb + - key: k8s.pod.uid + value: + stringValue: efb9c24c-ad0f-435b-a2ee-e4d065acdd8e + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "3" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-w-ns-index-9jg7m + - key: k8s.pod.uid + value: + stringValue: 45b0a4da-029e-4c86-84b4-543590f62893 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "3" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-wo-ns-index-jqf98 + - key: k8s.pod.uid + value: + stringValue: e5d5973b-0155-4424-81b9-dfd40262b53e + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-wo-ns-index-qngh7 + - key: k8s.pod.uid + value: + stringValue: bd9ad7d6-f45f-4cb6-af4f-27fc0b0d53f2 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" name: k8s.pod.phase - unit: "1" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 218c3fe71f3d09a7a67426700358753479acbf5cd8f23978527e1621e0c47dff - - key: container.image.name - value: - stringValue: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator - - key: container.image.tag - value: - stringValue: v0.83.0 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: manager - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: sock-operator-949dd8564-zvzwf - - key: k8s.pod.uid - value: - stringValue: 9e40cb1e-6007-48ef-9403-afdd23e98051 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: + - gauge: dataPoints: - asDouble: 0.1 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_limit - unit: '{cpu}' - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: 0f2dea1b44ca69d59cd23e9ea0ddf09beb859f6c8c2122a30ffbfbc3c75f4ef4 + - key: container.image.name + value: + stringValue: registry.k8s.io/etcd + - key: container.image.tag + value: + stringValue: 3.5.9-0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: etcd + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: etcd-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: bf9cabc7-b0cb-4fe0-b57d-6c1bb6189913 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asDouble: 0.1 + attributes: + - key: container.id + value: + stringValue: 167008237804229f4a618b65ebc574180fcb330a35b97ca53aead0bf1e7425fd + - key: container.image.name + value: + stringValue: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator + - key: container.image.tag + value: + stringValue: v0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: manager + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asDouble: 0.25 + attributes: + - key: container.id + value: + stringValue: 3cccbd65f14e8f5855c527e7fcd09840be671f95765cf72e3d07d8bd1f91d0e6 + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-apiserver + - key: container.image.tag + value: + stringValue: v1.28.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-apiserver + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-apiserver-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 52cf37aa-d01a-495b-a6d9-1cad9ed2d512 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asDouble: 0.1 + attributes: + - key: container.id + value: + stringValue: 5f18c9c8cd462d6e3b8fb356335844f6de2026ef7d93f9cfb74b1a08ae1ad1d0 + - key: container.image.name + value: + stringValue: registry.k8s.io/coredns/coredns + - key: container.image.tag + value: + stringValue: v1.10.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-w7sqj + - key: k8s.pod.uid + value: + stringValue: c026c30f-457d-4649-b070-c708867ef2be + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asDouble: 0.2 + attributes: + - key: container.id + value: + stringValue: 6ab275ce0fa40c66ca45b2438178cbe5632a38174a90d3fcc45ed3ecc5f4c38e + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-controller-manager + - key: container.image.tag + value: + stringValue: v1.28.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-controller-manager + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-controller-manager-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: a6874fb4-06a3-483b-829d-2497a9730bf0 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asDouble: 0.005 + attributes: + - key: container.id + value: + stringValue: 8f91fb6daaa23a0d7310a6bdbed1ebd640680d4428652bac5053a37c5c019a4c + - key: container.image.name + value: + stringValue: quay.io/brancz/kube-rbac-proxy + - key: container.image.tag + value: + stringValue: v0.14.2 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-rbac-proxy + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asDouble: 0.2 + attributes: + - key: container.id + value: + stringValue: b68c07b4e82828c9fb83b3c9cd119e0837506fcafc73c43c6f11d2916daf4754 + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-pfm6z + - key: k8s.pod.uid + value: + stringValue: 9afe2416-ad03-4b81-8b21-051643ed6f11 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asDouble: 0.1 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" + attributes: + - key: container.id + value: + stringValue: b8b1c6e32ce1686ca6f1a61244f6b35631160ecbfc34bf499ebc4f042a44c8b3 + - key: container.image.name + value: + stringValue: registry.k8s.io/coredns/coredns + - key: container.image.tag + value: + stringValue: v1.10.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-ttk8c + - key: k8s.pod.uid + value: + stringValue: e29ffd0f-0138-49ef-92c5-c62195097294 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asDouble: 0.2 + attributes: + - key: container.id + value: + stringValue: d3e96ffe6a87a4b9927ac1627a7dc00c6e92561ef7d14d0f06340e706832071e + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-8687f4bfbb86fbd + - key: k8s.pod.uid + value: + stringValue: d310e423-e3ee-42ac-a284-9bab01bcbba4 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asDouble: 0.1 + attributes: + - key: container.id + value: + stringValue: e84ea721ed9d8bb476083d278dffa2f1ddf2c0d2ed6d1008264242a3f29a44df + - key: container.image.name + value: + stringValue: docker.io/kindest/kindnetd + - key: container.image.tag + value: + stringValue: v20230511-dc714da8 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kindnet-cni + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kindnet-pbtbs + - key: k8s.pod.uid + value: + stringValue: 047ef34e-0d0d-4cc4-82d2-87495e01d3ac + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asDouble: 0.1 + attributes: + - key: container.id + value: + stringValue: f305339d0131fb9abe4368daa59b1649e195f8d2f2f181cabacf3e5029a93856 + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-scheduler + - key: container.image.tag + value: + stringValue: v1.28.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-scheduler + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-scheduler-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 5f9e1240-ab5c-457a-a212-b47052c9d97d + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" name: k8s.container.cpu_request - unit: '{cpu}' - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "134217728" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_limit - unit: By - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "67108864" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_request - unit: By - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: + - gauge: dataPoints: - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 2ce5e93a6bb7ce009581f194fcd0de68974c4c629fab31455edffaea931840ac - - key: container.image.name - value: - stringValue: quay.io/jetstack/cert-manager-controller - - key: container.image.tag - value: - stringValue: v1.13.0 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: cert-manager-controller - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: cert-manager-5698c4d465-66vf8 - - key: k8s.pod.uid - value: - stringValue: 2c63c09e-6ffc-4058-bb37-6cc63de92bc9 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: 04e136acbbefd063fd743318a65e115f2a53de681d0b8d259158bdc44c2d81e0 + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-w-ns-exclude + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-exclude-wo-ns-exclude-h2nnc + - key: k8s.pod.uid + value: + stringValue: 6d450ffa-15b4-4db1-a743-6e9101fea174 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 32d176f2c9afa23e3579aeb7b0a19f6dd86939db58bdd5fbf285714dc05e6020 - - key: container.image.name - value: - stringValue: registry.k8s.io/coredns/coredns - - key: container.image.tag - value: - stringValue: v1.10.1 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: coredns - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: coredns-5dd5756b68-hpqzn - - key: k8s.pod.uid - value: - stringValue: ef076d0a-9b5e-40f3-956a-4e0b8e058427 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.1 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "178257920" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_limit - unit: By - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "73400320" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_request - unit: By - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: 0ddf476d6f32cbcba5d75e07449d0bfdd2850152b241853a9b9f0f249e17d341 + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-w-ns-exclude + - key: k8s.namespace.name + value: + stringValue: ns-w-exclude + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-exclude-xd5d5 + - key: k8s.pod.uid + value: + stringValue: e55cab80-fc61-4bf6-9ae7-5cdaf5928398 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 3442d15574ab8643e355ea70fcf639d9ded90b41e5bc677663468ec45f3f5b37 - - key: container.image.name - value: - stringValue: quay.io/brancz/kube-rbac-proxy - - key: container.image.tag - value: - stringValue: v0.14.2 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: kube-rbac-proxy - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: sock-operator-949dd8564-zvzwf - - key: k8s.pod.uid - value: - stringValue: 9e40cb1e-6007-48ef-9403-afdd23e98051 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.5 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_limit - unit: '{cpu}' - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.005 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "134217728" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_limit - unit: By - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "67108864" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_request - unit: By - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: 0de43e72001a29620c17a2389a191c6614045d60a4209f47acc69e3f755fb901 + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-wo-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-wo-ns-index-jqf98 + - key: k8s.pod.uid + value: + stringValue: e5d5973b-0155-4424-81b9-dfd40262b53e + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 3a5ba5a682be74cc096512a0eae38ad85cb79ee23b0c1b3568d220c99a6e70df - - key: container.image.name - value: - stringValue: docker.io/kindest/local-path-provisioner - - key: container.image.tag - value: - stringValue: v20230511-dc714da8 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: local-path-provisioner - - key: k8s.namespace.name - value: - stringValue: local-path-storage - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: local-path-provisioner-6f8956fb48-l94nt - - key: k8s.pod.uid - value: - stringValue: 5c26db5f-c480-48d9-b928-846c8b71945f - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: 0f2dea1b44ca69d59cd23e9ea0ddf09beb859f6c8c2122a30ffbfbc3c75f4ef4 + - key: container.image.name + value: + stringValue: registry.k8s.io/etcd + - key: container.image.tag + value: + stringValue: 3.5.9-0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: etcd + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: etcd-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: bf9cabc7-b0cb-4fe0-b57d-6c1bb6189913 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 3fbecc78f0d2488829a7277aa4f0e4da4832b26ae8f650358fe0169e5c06d995 - - key: container.image.name - value: - stringValue: registry.k8s.io/kube-proxy - - key: container.image.tag - value: - stringValue: v1.28.0 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: kube-proxy - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: kube-proxy-9fw5h - - key: k8s.pod.uid - value: - stringValue: 2ac4b2c8-f7f5-4fe3-af89-3b2221407d5a - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: 167008237804229f4a618b65ebc574180fcb330a35b97ca53aead0bf1e7425fd + - key: container.image.name + value: + stringValue: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator + - key: container.image.tag + value: + stringValue: v0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: manager + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: 38809b5e5212eddddeec2d4d387e2f27ff754e0c3f67cc9a9ffb89db72601480 + - key: container.image.name + value: + stringValue: docker.io/library/nodejs_test + - key: container.image.tag + value: + stringValue: latest + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: nodejs-test + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: nodejs-test-57564b7dc9-pf2bk + - key: k8s.pod.uid + value: + stringValue: b660efd2-e961-488a-af7a-2161f27565bb + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: 3cccbd65f14e8f5855c527e7fcd09840be671f95765cf72e3d07d8bd1f91d0e6 + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-apiserver + - key: container.image.tag + value: + stringValue: v1.28.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-apiserver + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-apiserver-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 52cf37aa-d01a-495b-a6d9-1cad9ed2d512 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: 507978464797f2c6cbdbca13eb1343ab759436f2ad1fd1b48ecd068ad978e73b + - key: container.image.name + value: + stringValue: docker.io/kindest/local-path-provisioner + - key: container.image.tag + value: + stringValue: v20230511-dc714da8 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: local-path-provisioner + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: local-path-provisioner-6f8956fb48-v8rfh + - key: k8s.pod.uid + value: + stringValue: bc4b4f43-06b7-4d79-9d86-1afcbf2cd221 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: 5f18c9c8cd462d6e3b8fb356335844f6de2026ef7d93f9cfb74b1a08ae1ad1d0 + - key: container.image.name + value: + stringValue: registry.k8s.io/coredns/coredns + - key: container.image.tag + value: + stringValue: v1.10.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-w7sqj + - key: k8s.pod.uid + value: + stringValue: c026c30f-457d-4649-b070-c708867ef2be + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: 6ab275ce0fa40c66ca45b2438178cbe5632a38174a90d3fcc45ed3ecc5f4c38e + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-controller-manager + - key: container.image.tag + value: + stringValue: v1.28.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-controller-manager + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-controller-manager-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: a6874fb4-06a3-483b-829d-2497a9730bf0 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: 85bd2e415148ad6f2088cda811ea999142093a04001a62f8b5e3593cbb9cb008 + - key: container.image.name + value: + stringValue: quay.io/jetstack/cert-manager-controller + - key: container.image.tag + value: + stringValue: v1.13.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: cert-manager-controller + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-cbcf9668d-9xvq5 + - key: k8s.pod.uid + value: + stringValue: 3e54c119-9054-428c-aeb4-14088263b4c7 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: 8e4a37866b88361493c42b279bfc0275b72e8d19c2dc2637e827063aa7c036ff + - key: container.image.name + value: + stringValue: quay.io/jetstack/cert-manager-webhook + - key: container.image.tag + value: + stringValue: v1.13.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: cert-manager-webhook + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-webhook-594cb9799b-jtv49 + - key: k8s.pod.uid + value: + stringValue: 4d5a76c4-db4f-466f-b925-13dc115f4b1b + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: 8f91fb6daaa23a0d7310a6bdbed1ebd640680d4428652bac5053a37c5c019a4c + - key: container.image.name + value: + stringValue: quay.io/brancz/kube-rbac-proxy + - key: container.image.tag + value: + stringValue: v0.14.2 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-rbac-proxy + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: b68c07b4e82828c9fb83b3c9cd119e0837506fcafc73c43c6f11d2916daf4754 + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-pfm6z + - key: k8s.pod.uid + value: + stringValue: 9afe2416-ad03-4b81-8b21-051643ed6f11 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: b8b1c6e32ce1686ca6f1a61244f6b35631160ecbfc34bf499ebc4f042a44c8b3 + - key: container.image.name + value: + stringValue: registry.k8s.io/coredns/coredns + - key: container.image.tag + value: + stringValue: v1.10.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-ttk8c + - key: k8s.pod.uid + value: + stringValue: e29ffd0f-0138-49ef-92c5-c62195097294 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 58b82ee7aa41acc493118e3e5057d2f63ebc4ce73d7a6abc6a088622cb475022 - - key: container.image.name - value: - stringValue: quay.io/signalfx/splunk-otel-collector - - key: container.image.tag - value: - stringValue: 0.85.0 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: otel-collector - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: sock-splunk-otel-collector-agent-zqlrn - - key: k8s.pod.uid - value: - stringValue: 4dd3f2ea-a998-45dd-8efe-374dcc1a7c89 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.2 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_limit - unit: '{cpu}' - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.2 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "524288000" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_limit - unit: By - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "524288000" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_request - unit: By - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: be43b1aca150b59e2b2f9b261b6cfd64619ba3238a1cc2e3c15c92ae3076cdfd + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-wo-index-w-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-w-ns-index-9jg7m + - key: k8s.pod.uid + value: + stringValue: 45b0a4da-029e-4c86-84b4-543590f62893 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: d3e96ffe6a87a4b9927ac1627a7dc00c6e92561ef7d14d0f06340e706832071e + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-8687f4bfbb86fbd + - key: k8s.pod.uid + value: + stringValue: d310e423-e3ee-42ac-a284-9bab01bcbba4 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: e031ab5010dcf8711677089ae047575d480010797533d017948f747bb0086f54 + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-proxy + - key: container.image.tag + value: + stringValue: v1.28.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-proxy + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-proxy-8glvr + - key: k8s.pod.uid + value: + stringValue: 47a23d3f-4ed5-42b2-b571-430f5f2c08e9 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" + attributes: + - key: container.id + value: + stringValue: e84ea721ed9d8bb476083d278dffa2f1ddf2c0d2ed6d1008264242a3f29a44df + - key: container.image.name + value: + stringValue: docker.io/kindest/kindnetd + - key: container.image.tag + value: + stringValue: v20230511-dc714da8 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kindnet-cni + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kindnet-pbtbs + - key: k8s.pod.uid + value: + stringValue: 047ef34e-0d0d-4cc4-82d2-87495e01d3ac + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: f305339d0131fb9abe4368daa59b1649e195f8d2f2f181cabacf3e5029a93856 + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-scheduler + - key: container.image.tag + value: + stringValue: v1.28.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-scheduler + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-scheduler-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 5f9e1240-ab5c-457a-a212-b47052c9d97d + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: f89d85a849e619987764e0fb1512a5fafaa5101ad1956e89e3935eca4194fb3a + - key: container.image.name + value: + stringValue: quay.io/jetstack/cert-manager-cainjector + - key: container.image.tag + value: + stringValue: v1.13.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: cert-manager-cainjector + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-cainjector-65c7bff89d-vksqw + - key: k8s.pod.uid + value: + stringValue: 36d87b59-8ddf-47e7-a145-aeb81201d19a + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: container.id + value: + stringValue: fdc0b7ceccd9a1fa5b0f941477e6ca8cc0c63fe512e0feb10a09270c69239ec1 + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-wo-index-wo-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-wo-ns-index-qngh7 + - key: k8s.pod.uid + value: + stringValue: bd9ad7d6-f45f-4cb6-af4f-27fc0b0d53f2 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: fe4e0e7117f2aaf3301cec1aafa623769e446120d276c29b25f2ec4c57996d63 + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-w-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-index-r8hsb + - key: k8s.pod.uid + value: + stringValue: efb9c24c-ad0f-435b-a2ee-e4d065acdd8e + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: + - gauge: dataPoints: - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 6456b8c5dfe70eb511d86a4b3af0b2d7e06a79f74eccbf20634924d390cec2cb - - key: container.image.name - value: - stringValue: registry.k8s.io/etcd - - key: container.image.tag - value: - stringValue: 3.5.9-0 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: etcd - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: etcd-kind-control-plane - - key: k8s.pod.uid - value: - stringValue: aa63c1e3-e408-4804-9b8c-c538a13c0e09 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.1 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "104857600" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_request - unit: By - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: 04e136acbbefd063fd743318a65e115f2a53de681d0b8d259158bdc44c2d81e0 + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-w-ns-exclude + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-exclude-wo-ns-exclude-h2nnc + - key: k8s.pod.uid + value: + stringValue: 6d450ffa-15b4-4db1-a743-6e9101fea174 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 0ddf476d6f32cbcba5d75e07449d0bfdd2850152b241853a9b9f0f249e17d341 + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-w-ns-exclude + - key: k8s.namespace.name + value: + stringValue: ns-w-exclude + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-exclude-xd5d5 + - key: k8s.pod.uid + value: + stringValue: e55cab80-fc61-4bf6-9ae7-5cdaf5928398 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 0de43e72001a29620c17a2389a191c6614045d60a4209f47acc69e3f755fb901 + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-wo-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-wo-ns-index-jqf98 + - key: k8s.pod.uid + value: + stringValue: e5d5973b-0155-4424-81b9-dfd40262b53e + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 0f2dea1b44ca69d59cd23e9ea0ddf09beb859f6c8c2122a30ffbfbc3c75f4ef4 + - key: container.image.name + value: + stringValue: registry.k8s.io/etcd + - key: container.image.tag + value: + stringValue: 3.5.9-0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: etcd + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: etcd-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: bf9cabc7-b0cb-4fe0-b57d-6c1bb6189913 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 167008237804229f4a618b65ebc574180fcb330a35b97ca53aead0bf1e7425fd + - key: container.image.name + value: + stringValue: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator + - key: container.image.tag + value: + stringValue: v0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: manager + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 38809b5e5212eddddeec2d4d387e2f27ff754e0c3f67cc9a9ffb89db72601480 + - key: container.image.name + value: + stringValue: docker.io/library/nodejs_test + - key: container.image.tag + value: + stringValue: latest + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: nodejs-test + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: nodejs-test-57564b7dc9-pf2bk + - key: k8s.pod.uid + value: + stringValue: b660efd2-e961-488a-af7a-2161f27565bb + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 3cccbd65f14e8f5855c527e7fcd09840be671f95765cf72e3d07d8bd1f91d0e6 + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-apiserver + - key: container.image.tag + value: + stringValue: v1.28.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-apiserver + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-apiserver-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 52cf37aa-d01a-495b-a6d9-1cad9ed2d512 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 507978464797f2c6cbdbca13eb1343ab759436f2ad1fd1b48ecd068ad978e73b + - key: container.image.name + value: + stringValue: docker.io/kindest/local-path-provisioner + - key: container.image.tag + value: + stringValue: v20230511-dc714da8 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: local-path-provisioner + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: local-path-provisioner-6f8956fb48-v8rfh + - key: k8s.pod.uid + value: + stringValue: bc4b4f43-06b7-4d79-9d86-1afcbf2cd221 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 5f18c9c8cd462d6e3b8fb356335844f6de2026ef7d93f9cfb74b1a08ae1ad1d0 + - key: container.image.name + value: + stringValue: registry.k8s.io/coredns/coredns + - key: container.image.tag + value: + stringValue: v1.10.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-w7sqj + - key: k8s.pod.uid + value: + stringValue: c026c30f-457d-4649-b070-c708867ef2be + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 6ab275ce0fa40c66ca45b2438178cbe5632a38174a90d3fcc45ed3ecc5f4c38e + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-controller-manager + - key: container.image.tag + value: + stringValue: v1.28.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-controller-manager + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-controller-manager-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: a6874fb4-06a3-483b-829d-2497a9730bf0 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 85bd2e415148ad6f2088cda811ea999142093a04001a62f8b5e3593cbb9cb008 + - key: container.image.name + value: + stringValue: quay.io/jetstack/cert-manager-controller + - key: container.image.tag + value: + stringValue: v1.13.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: cert-manager-controller + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-cbcf9668d-9xvq5 + - key: k8s.pod.uid + value: + stringValue: 3e54c119-9054-428c-aeb4-14088263b4c7 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 8e4a37866b88361493c42b279bfc0275b72e8d19c2dc2637e827063aa7c036ff + - key: container.image.name + value: + stringValue: quay.io/jetstack/cert-manager-webhook + - key: container.image.tag + value: + stringValue: v1.13.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: cert-manager-webhook + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-webhook-594cb9799b-jtv49 + - key: k8s.pod.uid + value: + stringValue: 4d5a76c4-db4f-466f-b925-13dc115f4b1b + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: 8f91fb6daaa23a0d7310a6bdbed1ebd640680d4428652bac5053a37c5c019a4c + - key: container.image.name + value: + stringValue: quay.io/brancz/kube-rbac-proxy + - key: container.image.tag + value: + stringValue: v0.14.2 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-rbac-proxy + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: b68c07b4e82828c9fb83b3c9cd119e0837506fcafc73c43c6f11d2916daf4754 + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-pfm6z + - key: k8s.pod.uid + value: + stringValue: 9afe2416-ad03-4b81-8b21-051643ed6f11 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: b8b1c6e32ce1686ca6f1a61244f6b35631160ecbfc34bf499ebc4f042a44c8b3 + - key: container.image.name + value: + stringValue: registry.k8s.io/coredns/coredns + - key: container.image.tag + value: + stringValue: v1.10.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-ttk8c + - key: k8s.pod.uid + value: + stringValue: e29ffd0f-0138-49ef-92c5-c62195097294 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: be43b1aca150b59e2b2f9b261b6cfd64619ba3238a1cc2e3c15c92ae3076cdfd + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-wo-index-w-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-w-ns-index-9jg7m + - key: k8s.pod.uid + value: + stringValue: 45b0a4da-029e-4c86-84b4-543590f62893 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: d3e96ffe6a87a4b9927ac1627a7dc00c6e92561ef7d14d0f06340e706832071e + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-8687f4bfbb86fbd + - key: k8s.pod.uid + value: + stringValue: d310e423-e3ee-42ac-a284-9bab01bcbba4 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: container.id + value: + stringValue: e031ab5010dcf8711677089ae047575d480010797533d017948f747bb0086f54 + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-proxy + - key: container.image.tag + value: + stringValue: v1.28.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-proxy + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-proxy-8glvr + - key: k8s.pod.uid + value: + stringValue: 47a23d3f-4ed5-42b2-b571-430f5f2c08e9 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 68f489dfb42d16fac7f3e2880f1087d2d4a906d5354d51aa21be3d5fe0e8c174 - - key: container.image.name - value: - stringValue: quay.io/jetstack/cert-manager-cainjector - - key: container.image.tag - value: - stringValue: v1.13.0 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: cert-manager-cainjector - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: cert-manager-cainjector-d4748596-bth5r - - key: k8s.pod.uid - value: - stringValue: e90e45fd-acb3-4a9a-bb57-00e65b2802d8 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: e84ea721ed9d8bb476083d278dffa2f1ddf2c0d2ed6d1008264242a3f29a44df + - key: container.image.name + value: + stringValue: docker.io/kindest/kindnetd + - key: container.image.tag + value: + stringValue: v20230511-dc714da8 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kindnet-cni + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kindnet-pbtbs + - key: k8s.pod.uid + value: + stringValue: 047ef34e-0d0d-4cc4-82d2-87495e01d3ac + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 6ebfed3db9e29316a60d176381c254ce48f2b930be7a9137a92a2ea2b2802810 - - key: container.image.name - value: - stringValue: registry.k8s.io/kube-apiserver - - key: container.image.tag - value: - stringValue: v1.28.0 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: kube-apiserver - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: kube-apiserver-kind-control-plane - - key: k8s.pod.uid - value: - stringValue: 0855481b-f77d-4193-95ff-2833dad6fcb2 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.25 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: f305339d0131fb9abe4368daa59b1649e195f8d2f2f181cabacf3e5029a93856 + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-scheduler + - key: container.image.tag + value: + stringValue: v1.28.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-scheduler + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-scheduler-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 5f9e1240-ab5c-457a-a212-b47052c9d97d + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 884d7cc6680d22db05016acc80433fd2e85cb6b723c4e23353dfe0f10969785f - - key: container.image.name - value: - stringValue: quay.io/jetstack/cert-manager-webhook - - key: container.image.tag - value: - stringValue: v1.13.0 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: cert-manager-webhook - - key: k8s.namespace.name - value: - stringValue: cert-manager - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: cert-manager-webhook-65d78d5c4b-8v4p4 - - key: k8s.pod.uid - value: - stringValue: 84e51af3-bab8-4446-9efd-ac48c34c2b33 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: f89d85a849e619987764e0fb1512a5fafaa5101ad1956e89e3935eca4194fb3a + - key: container.image.name + value: + stringValue: quay.io/jetstack/cert-manager-cainjector + - key: container.image.tag + value: + stringValue: v1.13.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: cert-manager-cainjector + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-cainjector-65c7bff89d-vksqw + - key: k8s.pod.uid + value: + stringValue: 36d87b59-8ddf-47e7-a145-aeb81201d19a + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 899f629863fd122b010af7e314bcaaf6d3e4b8f494bdabf0e2cd56bd142108c7 - - key: container.image.name - value: - stringValue: registry.k8s.io/coredns/coredns - - key: container.image.tag - value: - stringValue: v1.10.1 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: coredns - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: coredns-5dd5756b68-2bhmb - - key: k8s.pod.uid - value: - stringValue: 0aeddf50-90ca-4426-8cbc-dd0c245d6911 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.1 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "178257920" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_limit - unit: By - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "73400320" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_request - unit: By - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: fdc0b7ceccd9a1fa5b0f941477e6ca8cc0c63fe512e0feb10a09270c69239ec1 + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-wo-index-wo-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-wo-ns-index-qngh7 + - key: k8s.pod.uid + value: + stringValue: bd9ad7d6-f45f-4cb6-af4f-27fc0b0d53f2 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" + attributes: + - key: container.id + value: + stringValue: fe4e0e7117f2aaf3301cec1aafa623769e446120d276c29b25f2ec4c57996d63 + - key: container.image.name + value: + stringValue: docker.io/rock1017/log-generator + - key: container.image.tag + value: + stringValue: 2.2.6 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-w-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-index-r8hsb + - key: k8s.pod.uid + value: + stringValue: efb9c24c-ad0f-435b-a2ee-e4d065acdd8e + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: 9bbac2d8928847746c7eb9e94d1accc538bc2a1c76e94976e92cf38ee887cef5 - - key: container.image.name - value: - stringValue: registry.k8s.io/kube-controller-manager - - key: container.image.tag - value: - stringValue: v1.28.0 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: kube-controller-manager - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: kube-controller-manager-kind-control-plane - - key: k8s.pod.uid - value: - stringValue: 2f9e753c-cf6f-4562-a2f2-3e1bca8eed3e - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: + - gauge: dataPoints: + - asDouble: 0.1 + attributes: + - key: container.id + value: + stringValue: 167008237804229f4a618b65ebc574180fcb330a35b97ca53aead0bf1e7425fd + - key: container.image.name + value: + stringValue: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator + - key: container.image.tag + value: + stringValue: v0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: manager + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asDouble: 0.5 + attributes: + - key: container.id + value: + stringValue: 8f91fb6daaa23a0d7310a6bdbed1ebd640680d4428652bac5053a37c5c019a4c + - key: container.image.name + value: + stringValue: quay.io/brancz/kube-rbac-proxy + - key: container.image.tag + value: + stringValue: v0.14.2 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-rbac-proxy + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asDouble: 0.2 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: b2e366dc08d47f7220b1f98b5ce51b06e62352ab5302c3c6edfaa97dacb21f24 - - key: container.image.name - value: - stringValue: docker.io/kindest/kindnetd - - key: container.image.tag - value: - stringValue: v20230511-dc714da8 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: kindnet-cni - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: kindnet-h7vm7 - - key: k8s.pod.uid - value: - stringValue: 651a0d53-9957-4c21-84f9-bfec00b85491 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: + attributes: + - key: container.id + value: + stringValue: b68c07b4e82828c9fb83b3c9cd119e0837506fcafc73c43c6f11d2916daf4754 + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-pfm6z + - key: k8s.pod.uid + value: + stringValue: 9afe2416-ad03-4b81-8b21-051643ed6f11 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asDouble: 0.2 + attributes: + - key: container.id + value: + stringValue: d3e96ffe6a87a4b9927ac1627a7dc00c6e92561ef7d14d0f06340e706832071e + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-8687f4bfbb86fbd + - key: k8s.pod.uid + value: + stringValue: d310e423-e3ee-42ac-a284-9bab01bcbba4 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asDouble: 0.1 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" + attributes: + - key: container.id + value: + stringValue: e84ea721ed9d8bb476083d278dffa2f1ddf2c0d2ed6d1008264242a3f29a44df + - key: container.image.name + value: + stringValue: docker.io/kindest/kindnetd + - key: container.image.tag + value: + stringValue: v20230511-dc714da8 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kindnet-cni + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kindnet-pbtbs + - key: k8s.pod.uid + value: + stringValue: 047ef34e-0d0d-4cc4-82d2-87495e01d3ac + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" name: k8s.container.cpu_limit - unit: '{cpu}' - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.1 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: + - gauge: dataPoints: + - asInt: "134217728" + attributes: + - key: container.id + value: + stringValue: 167008237804229f4a618b65ebc574180fcb330a35b97ca53aead0bf1e7425fd + - key: container.image.name + value: + stringValue: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator + - key: container.image.tag + value: + stringValue: v0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: manager + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "178257920" + attributes: + - key: container.id + value: + stringValue: 5f18c9c8cd462d6e3b8fb356335844f6de2026ef7d93f9cfb74b1a08ae1ad1d0 + - key: container.image.name + value: + stringValue: registry.k8s.io/coredns/coredns + - key: container.image.tag + value: + stringValue: v1.10.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-w7sqj + - key: k8s.pod.uid + value: + stringValue: c026c30f-457d-4649-b070-c708867ef2be + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "134217728" + attributes: + - key: container.id + value: + stringValue: 8f91fb6daaa23a0d7310a6bdbed1ebd640680d4428652bac5053a37c5c019a4c + - key: container.image.name + value: + stringValue: quay.io/brancz/kube-rbac-proxy + - key: container.image.tag + value: + stringValue: v0.14.2 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-rbac-proxy + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "524288000" + attributes: + - key: container.id + value: + stringValue: b68c07b4e82828c9fb83b3c9cd119e0837506fcafc73c43c6f11d2916daf4754 + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-pfm6z + - key: k8s.pod.uid + value: + stringValue: 9afe2416-ad03-4b81-8b21-051643ed6f11 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "178257920" + attributes: + - key: container.id + value: + stringValue: b8b1c6e32ce1686ca6f1a61244f6b35631160ecbfc34bf499ebc4f042a44c8b3 + - key: container.image.name + value: + stringValue: registry.k8s.io/coredns/coredns + - key: container.image.tag + value: + stringValue: v1.10.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-ttk8c + - key: k8s.pod.uid + value: + stringValue: e29ffd0f-0138-49ef-92c5-c62195097294 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "524288000" + attributes: + - key: container.id + value: + stringValue: d3e96ffe6a87a4b9927ac1627a7dc00c6e92561ef7d14d0f06340e706832071e + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-8687f4bfbb86fbd + - key: k8s.pod.uid + value: + stringValue: d310e423-e3ee-42ac-a284-9bab01bcbba4 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "52428800" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" + attributes: + - key: container.id + value: + stringValue: e84ea721ed9d8bb476083d278dffa2f1ddf2c0d2ed6d1008264242a3f29a44df + - key: container.image.name + value: + stringValue: docker.io/kindest/kindnetd + - key: container.image.tag + value: + stringValue: v20230511-dc714da8 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kindnet-cni + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kindnet-pbtbs + - key: k8s.pod.uid + value: + stringValue: 047ef34e-0d0d-4cc4-82d2-87495e01d3ac + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" name: k8s.container.memory_limit - unit: By - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: + - gauge: dataPoints: + - asInt: "104857600" + attributes: + - key: container.id + value: + stringValue: 0f2dea1b44ca69d59cd23e9ea0ddf09beb859f6c8c2122a30ffbfbc3c75f4ef4 + - key: container.image.name + value: + stringValue: registry.k8s.io/etcd + - key: container.image.tag + value: + stringValue: 3.5.9-0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: etcd + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: etcd-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: bf9cabc7-b0cb-4fe0-b57d-6c1bb6189913 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "67108864" + attributes: + - key: container.id + value: + stringValue: 167008237804229f4a618b65ebc574180fcb330a35b97ca53aead0bf1e7425fd + - key: container.image.name + value: + stringValue: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator + - key: container.image.tag + value: + stringValue: v0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: manager + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "73400320" + attributes: + - key: container.id + value: + stringValue: 5f18c9c8cd462d6e3b8fb356335844f6de2026ef7d93f9cfb74b1a08ae1ad1d0 + - key: container.image.name + value: + stringValue: registry.k8s.io/coredns/coredns + - key: container.image.tag + value: + stringValue: v1.10.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-w7sqj + - key: k8s.pod.uid + value: + stringValue: c026c30f-457d-4649-b070-c708867ef2be + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "67108864" + attributes: + - key: container.id + value: + stringValue: 8f91fb6daaa23a0d7310a6bdbed1ebd640680d4428652bac5053a37c5c019a4c + - key: container.image.name + value: + stringValue: quay.io/brancz/kube-rbac-proxy + - key: container.image.tag + value: + stringValue: v0.14.2 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-rbac-proxy + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-7867c9764-gdv94 + - key: k8s.pod.uid + value: + stringValue: 062e3244-7c7d-4f1a-84e5-93451dd93467 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "524288000" + attributes: + - key: container.id + value: + stringValue: b68c07b4e82828c9fb83b3c9cd119e0837506fcafc73c43c6f11d2916daf4754 + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-pfm6z + - key: k8s.pod.uid + value: + stringValue: 9afe2416-ad03-4b81-8b21-051643ed6f11 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "73400320" + attributes: + - key: container.id + value: + stringValue: b8b1c6e32ce1686ca6f1a61244f6b35631160ecbfc34bf499ebc4f042a44c8b3 + - key: container.image.name + value: + stringValue: registry.k8s.io/coredns/coredns + - key: container.image.tag + value: + stringValue: v1.10.1 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-ttk8c + - key: k8s.pod.uid + value: + stringValue: e29ffd0f-0138-49ef-92c5-c62195097294 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "524288000" + attributes: + - key: container.id + value: + stringValue: d3e96ffe6a87a4b9927ac1627a7dc00c6e92561ef7d14d0f06340e706832071e + - key: container.image.name + value: + stringValue: quay.io/signalfx/splunk-otel-collector + - key: container.image.tag + value: + stringValue: 0.85.0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-8687f4bfbb86fbd + - key: k8s.pod.uid + value: + stringValue: d310e423-e3ee-42ac-a284-9bab01bcbba4 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "52428800" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" + attributes: + - key: container.id + value: + stringValue: e84ea721ed9d8bb476083d278dffa2f1ddf2c0d2ed6d1008264242a3f29a44df + - key: container.image.name + value: + stringValue: docker.io/kindest/kindnetd + - key: container.image.tag + value: + stringValue: v20230511-dc714da8 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kindnet-cni + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kindnet-pbtbs + - key: k8s.pod.uid + value: + stringValue: 047ef34e-0d0d-4cc4-82d2-87495e01d3ac + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" name: k8s.container.memory_request - unit: By - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: + - gauge: dataPoints: - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: c07ca2c0f5f8252918c9a7cfcf0e29ba2eb281c4f6a44673d1bdb6c1d39fb059 - - key: container.image.name - value: - stringValue: docker.io/library/nodejs_test - - key: container.image.tag - value: - stringValue: latest - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: nodejs-test - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: nodejs-test-57564b7dc9-xjsl2 - - key: k8s.pod.uid - value: - stringValue: 93397346-c9c3-4b44-8802-3a0bc74636ce - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.namespace.uid + value: + stringValue: 70f7e9eb-77bc-47aa-86c7-e852ed47365b + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: dcd92dd6155287da03dd442319998ad7ecdf16911eadb707303fde6a480345af - - key: container.image.name - value: - stringValue: registry.k8s.io/kube-scheduler - - key: container.image.tag - value: - stringValue: v1.28.0 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: kube-scheduler - - key: k8s.namespace.name - value: - stringValue: kube-system - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: kube-scheduler-kind-control-plane - - key: k8s.pod.uid - value: - stringValue: 733f9326-85cf-43d5-a7ce-2e10adcb0ce0 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.namespace.uid + value: + stringValue: 66c6e06b-978e-4e4e-963c-1a6a858c31cf + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-node-lease + - key: k8s.namespace.uid + value: + stringValue: 2859d780-e331-4785-a103-fd53c36a5cb8 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-public + - key: k8s.namespace.uid + value: + stringValue: fd827296-7360-4e0c-a3db-0cd13a1867a4 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.namespace.uid + value: + stringValue: d629fbc5-fed8-4748-89b7-9dcf828d9816 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: k8s.namespace.uid + value: + stringValue: 3991e76f-b0f1-4253-a53c-29628be0d2f0 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-exclude + - key: k8s.namespace.uid + value: + stringValue: 484ae869-2b18-4855-9368-43d55c1385ce + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.namespace.uid + value: + stringValue: be389993-32a3-421e-a4e5-197ea936e996 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.namespace.uid + value: + stringValue: d907432d-ee7b-4d1d-9739-27df63e9eb8e + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + name: k8s.namespace.phase + - gauge: dataPoints: - - asDouble: 0.1 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: cert-manager + - key: k8s.deployment.uid + value: + stringValue: cd2a6b90-0eb6-4e27-9f35-5cdbec85ebdd + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: cert-manager-cainjector + - key: k8s.deployment.uid + value: + stringValue: 9d2daa69-2e21-4274-9f8d-e38b05bbf2b5 + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: cert-manager-webhook + - key: k8s.deployment.uid + value: + stringValue: 00c95a5a-1bba-47b5-819f-dd692807cfa2 + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "2" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: coredns + - key: k8s.deployment.uid + value: + stringValue: 8d51777a-0115-465a-94e1-43a5e871d581 + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: local-path-provisioner + - key: k8s.deployment.uid + value: + stringValue: a690d7bd-a831-48ae-a0fa-44a4485b2b3e + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: nodejs-test + - key: k8s.deployment.uid + value: + stringValue: 4d52f5ff-5b2f-4b1c-a488-639f029967eb + - key: k8s.namespace.name + value: + stringValue: default + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: sock-operator + - key: k8s.deployment.uid + value: + stringValue: 8d77cf86-4513-4a5d-bb64-2bf8ea7c4dc3 + - key: k8s.namespace.name + value: + stringValue: default + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver + - key: k8s.deployment.uid + value: + stringValue: a9ae7c39-b804-4cb5-b9a1-95b8bcf6cf25 + - key: k8s.namespace.name + value: + stringValue: default + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + name: k8s.deployment.available + - gauge: dataPoints: - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: cert-manager + - key: k8s.deployment.uid + value: + stringValue: cd2a6b90-0eb6-4e27-9f35-5cdbec85ebdd + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: cert-manager-cainjector + - key: k8s.deployment.uid + value: + stringValue: 9d2daa69-2e21-4274-9f8d-e38b05bbf2b5 + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: cert-manager-webhook + - key: k8s.deployment.uid + value: + stringValue: 00c95a5a-1bba-47b5-819f-dd692807cfa2 + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "2" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: coredns + - key: k8s.deployment.uid + value: + stringValue: 8d51777a-0115-465a-94e1-43a5e871d581 + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: local-path-provisioner + - key: k8s.deployment.uid + value: + stringValue: a690d7bd-a831-48ae-a0fa-44a4485b2b3e + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: nodejs-test + - key: k8s.deployment.uid + value: + stringValue: 4d52f5ff-5b2f-4b1c-a488-639f029967eb + - key: k8s.namespace.name + value: + stringValue: default + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: sock-operator + - key: k8s.deployment.uid + value: + stringValue: 8d77cf86-4513-4a5d-bb64-2bf8ea7c4dc3 + - key: k8s.namespace.name + value: + stringValue: default + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.deployment.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver + - key: k8s.deployment.uid + value: + stringValue: a9ae7c39-b804-4cb5-b9a1-95b8bcf6cf25 + - key: k8s.namespace.name + value: + stringValue: default + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + name: k8s.deployment.desired + - gauge: dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored - - resource: - attributes: - - key: container.id - value: - stringValue: ee9fa5842411121786a482c1dab2c68c04d5f171b94d7d2f6570e0442e2bee80 - - key: container.image.name - value: - stringValue: quay.io/signalfx/splunk-otel-collector - - key: container.image.tag - value: - stringValue: 0.85.0 - - key: k8s.cluster.name - value: - stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: otel-collector - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-67bff88777g7b7j - - key: k8s.pod.uid - value: - stringValue: ee20e1f8-2b45-4b32-8a1c-585c85d239c6 - - key: metric_source - value: - stringValue: kubernetes - - key: receiver - value: - stringValue: k8scluster - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.replicaset.name + value: + stringValue: cert-manager-cainjector-65c7bff89d + - key: k8s.replicaset.uid + value: + stringValue: 16620753-2ac6-4c9d-a6b6-57ce6444cc32 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.replicaset.name + value: + stringValue: cert-manager-cbcf9668d + - key: k8s.replicaset.uid + value: + stringValue: e352e7ed-b9c7-4b1e-9702-c8eebb9f28bc + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.replicaset.name + value: + stringValue: cert-manager-webhook-594cb9799b + - key: k8s.replicaset.uid + value: + stringValue: b529b042-8ef6-49b4-a005-e67aea95c1fc + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.replicaset.name + value: + stringValue: nodejs-test-57564b7dc9 + - key: k8s.replicaset.uid + value: + stringValue: fc4c748c-a646-4813-9abf-f0688a15d022 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.replicaset.name + value: + stringValue: sock-operator-7867c9764 + - key: k8s.replicaset.uid + value: + stringValue: d129ef46-3a1c-4e26-987b-b778356962e5 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.replicaset.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-8687f4bfbb + - key: k8s.replicaset.uid + value: + stringValue: 779c6bfc-728e-43b2-9e38-c80f56d7f8c1 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "2" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.replicaset.name + value: + stringValue: coredns-5dd5756b68 + - key: k8s.replicaset.uid + value: + stringValue: e0ccf64f-8b03-4ffc-8fb1-a84507cca4b5 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: k8s.replicaset.name + value: + stringValue: local-path-provisioner-6f8956fb48 + - key: k8s.replicaset.uid + value: + stringValue: 4b3bdd53-7d41-4413-9a97-c89c6d60576e + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + name: k8s.replicaset.available + - gauge: dataPoints: - - asDouble: 0.2 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_limit - unit: '{cpu}' - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.replicaset.name + value: + stringValue: cert-manager-cainjector-65c7bff89d + - key: k8s.replicaset.uid + value: + stringValue: 16620753-2ac6-4c9d-a6b6-57ce6444cc32 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.replicaset.name + value: + stringValue: cert-manager-cbcf9668d + - key: k8s.replicaset.uid + value: + stringValue: e352e7ed-b9c7-4b1e-9702-c8eebb9f28bc + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.replicaset.name + value: + stringValue: cert-manager-webhook-594cb9799b + - key: k8s.replicaset.uid + value: + stringValue: b529b042-8ef6-49b4-a005-e67aea95c1fc + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.replicaset.name + value: + stringValue: nodejs-test-57564b7dc9 + - key: k8s.replicaset.uid + value: + stringValue: fc4c748c-a646-4813-9abf-f0688a15d022 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.replicaset.name + value: + stringValue: sock-operator-7867c9764 + - key: k8s.replicaset.uid + value: + stringValue: d129ef46-3a1c-4e26-987b-b778356962e5 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.replicaset.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-8687f4bfbb + - key: k8s.replicaset.uid + value: + stringValue: 779c6bfc-728e-43b2-9e38-c80f56d7f8c1 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "2" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.replicaset.name + value: + stringValue: coredns-5dd5756b68 + - key: k8s.replicaset.uid + value: + stringValue: e0ccf64f-8b03-4ffc-8fb1-a84507cca4b5 + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: k8s.replicaset.name + value: + stringValue: local-path-provisioner-6f8956fb48 + - key: k8s.replicaset.uid + value: + stringValue: 4b3bdd53-7d41-4413-9a97-c89c6d60576e + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + name: k8s.replicaset.desired + - gauge: dataPoints: - - asDouble: 0.2 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: kindnet + - key: k8s.daemonset.uid + value: + stringValue: 178ae82d-c61b-4ccd-8f0e-681092827d83 + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: kube-proxy + - key: k8s.daemonset.uid + value: + stringValue: 08707a00-e808-4df3-8162-d6a71e63ec81 + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: sock-splunk-otel-collector-agent + - key: k8s.daemonset.uid + value: + stringValue: 28a98e0e-076d-4ed2-8970-f487c4112875 + - key: k8s.namespace.name + value: + stringValue: default + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + name: k8s.daemonset.current_scheduled_nodes + - gauge: dataPoints: - - asInt: "524288000" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_limit - unit: By - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: kindnet + - key: k8s.daemonset.uid + value: + stringValue: 178ae82d-c61b-4ccd-8f0e-681092827d83 + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: kube-proxy + - key: k8s.daemonset.uid + value: + stringValue: 08707a00-e808-4df3-8162-d6a71e63ec81 + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: sock-splunk-otel-collector-agent + - key: k8s.daemonset.uid + value: + stringValue: 28a98e0e-076d-4ed2-8970-f487c4112875 + - key: k8s.namespace.name + value: + stringValue: default + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + name: k8s.daemonset.desired_scheduled_nodes + - gauge: dataPoints: - - asInt: "524288000" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_request - unit: By - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: + - asInt: "0" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: kindnet + - key: k8s.daemonset.uid + value: + stringValue: 178ae82d-c61b-4ccd-8f0e-681092827d83 + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: kube-proxy + - key: k8s.daemonset.uid + value: + stringValue: 08707a00-e808-4df3-8162-d6a71e63ec81 + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: sock-splunk-otel-collector-agent + - key: k8s.daemonset.uid + value: + stringValue: 28a98e0e-076d-4ed2-8970-f487c4112875 + - key: k8s.namespace.name + value: + stringValue: default + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + name: k8s.daemonset.misscheduled_nodes + - gauge: dataPoints: - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - unit: "1" - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: kindnet + - key: k8s.daemonset.uid + value: + stringValue: 178ae82d-c61b-4ccd-8f0e-681092827d83 + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: kube-proxy + - key: k8s.daemonset.uid + value: + stringValue: 08707a00-e808-4df3-8162-d6a71e63ec81 + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.daemonset.name + value: + stringValue: sock-splunk-otel-collector-agent + - key: k8s.daemonset.uid + value: + stringValue: 28a98e0e-076d-4ed2-8970-f487c4112875 + - key: k8s.namespace.name + value: + stringValue: default + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + name: k8s.daemonset.ready_nodes + - gauge: dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: "{restart}" - scope: - name: otelcol/k8sclusterreceiver - version: ignored + - asInt: "1" + attributes: + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.node.uid + value: + stringValue: 33909090-f3de-4231-a442-9f10007a030d + - key: metric_source + value: + stringValue: kubernetes + - key: receiver + value: + stringValue: k8scluster + timeUnixNano: "1000000" + name: k8s.node.condition_ready + scope: {} diff --git a/functional_tests/testdata/test_jobs.yaml b/functional_tests/testdata/test_jobs.yaml new file mode 100644 index 0000000000..7cebba094d --- /dev/null +++ b/functional_tests/testdata/test_jobs.yaml @@ -0,0 +1,178 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ns-w-index + annotations: + splunk.com/index: ns-anno + splunk.com/customField: ns-value +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ns-wo-index +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ns-w-exclude + annotations: + splunk.com/index: ns-anno + splunk.com/exclude: "true" +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: pod-w-index-w-ns-index + namespace: ns-w-index +spec: + parallelism: 1 + template: + metadata: + labels: + app: pod-w-index-w-ns-index + annotations: + splunk.com/index: "pod-anno" + splunk.com/sourcetype: "sourcetype-anno" + splunk.com/customField: pod-value-1 + spec: + restartPolicy: Never + containers: + - name: pod-w-index-w-ns-index + image: docker.io/rock1017/log-generator:2.2.6 + env: + - name: MESSAGE_COUNT + value: "10" + - name: SIZE + value: "256" + - name: FREQ + value: "1" +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: pod-wo-index-w-ns-index + namespace: ns-w-index +spec: + parallelism: 1 + template: + metadata: + labels: + app: pod-wo-index-w-ns-index + annotations: + splunk.com/exclude: "false" + spec: + restartPolicy: Never + containers: + - name: pod-wo-index-w-ns-index + image: docker.io/rock1017/log-generator:2.2.6 + env: + - name: MESSAGE_COUNT + value: "10" + - name: SIZE + value: "256" + - name: FREQ + value: "1" +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: pod-w-index-wo-ns-index + namespace: ns-wo-index +spec: + parallelism: 1 + template: + metadata: + labels: + app: pod-w-index-wo-ns-index + annotations: + splunk.com/index: "pod-anno" + splunk.com/customField: pod-value-2 + spec: + restartPolicy: Never + containers: + - name: pod-w-index-wo-ns-index + image: docker.io/rock1017/log-generator:2.2.6 + env: + - name: MESSAGE_COUNT + value: "10" + - name: SIZE + value: "256" + - name: FREQ + value: "1" +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: pod-w-index-w-ns-exclude + namespace: ns-w-exclude +spec: + parallelism: 1 + template: + metadata: + labels: + app: pod-w-index-w-ns-exclude + annotations: + splunk.com/index: "pod-anno" + spec: + restartPolicy: Never + containers: + - name: pod-w-index-w-ns-exclude + image: docker.io/rock1017/log-generator:2.2.6 + env: + - name: MESSAGE_COUNT + value: "10" + - name: SIZE + value: "256" + - name: FREQ + value: "1" +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: pod-w-exclude-wo-ns-exclude + namespace: ns-w-index +spec: + parallelism: 1 + template: + metadata: + labels: + app: pod-w-index-w-ns-exclude + annotations: + splunk.com/exclude: "true" + spec: + restartPolicy: Never + containers: + - name: pod-w-index-w-ns-exclude + image: docker.io/rock1017/log-generator:2.2.6 + env: + - name: MESSAGE_COUNT + value: "10" + - name: SIZE + value: "256" + - name: FREQ + value: "1" +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: pod-wo-index-wo-ns-index + namespace: ns-wo-index +spec: + parallelism: 1 + template: + metadata: + labels: + app: pod-wo-index-wo-ns-index + spec: + restartPolicy: Never + containers: + - name: pod-wo-index-wo-ns-index + image: docker.io/rock1017/log-generator:2.2.6 + env: + - name: MESSAGE_COUNT + value: "10" + - name: SIZE + value: "256" + - name: FREQ + value: "1" diff --git a/functional_tests/testdata/test_values.yaml.tmpl b/functional_tests/testdata/test_values.yaml.tmpl new file mode 100644 index 0000000000..f23728148e --- /dev/null +++ b/functional_tests/testdata/test_values.yaml.tmpl @@ -0,0 +1,53 @@ +--- +clusterName: dev-operator +metricsEnabled: true +tracesEnabled: true +splunkObservability: + realm: CHANGEME + accessToken: CHANGEME + ingestUrl: {{ .AgentEndpoint }} + apiUrl: {{ .ApiURLEndpoint }} + metricsEnabled: true +splunkPlatform: + token: foobar + endpoint: {{ .LogHecEndpoint }} + metricsEnabled: true + metricsIndex: myMetricsIndex +logsCollection: + journald: + enabled: true +agent: + config: + exporters: + otlp: + endpoint: {{ .OtlpEndpoint }} + tls: + insecure: true + splunk_hec/platform_metrics: + endpoint: {{ .MetricHecEndpoint }} + service: + pipelines: + traces: + exporters: + - otlp +clusterReceiver: + eventsEnabled: true + config: + exporters: + signalfx: + ingest_url: {{ .K8sClusterEndpoint }} + tls: + insecure: true + splunk_hec/platform_logs: + endpoint: {{ .LogObjectsHecEndpoint }} + + k8sObjects: + - name: pods + - name: namespaces + - name: nodes + - name: events + mode: watch + +environment: dev +operator: + enabled: true diff --git a/test/k8s_agent_pod_tests/test_agent_correctness_tests.py b/test/k8s_agent_pod_tests/test_agent_correctness_tests.py index 7a7ff519dc..6406d83ca5 100644 --- a/test/k8s_agent_pod_tests/test_agent_correctness_tests.py +++ b/test/k8s_agent_pod_tests/test_agent_correctness_tests.py @@ -47,61 +47,6 @@ def setup_for_agent_tests(): k8s_helper.upgrade_helm(default_yaml_file, yaml_fields_recall) -def test_agent_logs_metadata(setup): - """ - Test that agent logs have correct metadata: - - source - - sourcetype - - index - - """ - # prepare connector for test - yaml_file = AGENT_VALUES_YAML - yaml_fields = { - "splunkPlatform.index": INDEX_MAIN, - "splunkPlatform.token": os.environ.get("CI_SPLUNK_HEC_TOKEN"), - "splunkPlatform.endpoint": "https://" - + os.environ.get("CI_SPLUNK_HOST") - + ":8088/services/collector", - } - k8s_helper.upgrade_helm(yaml_file, yaml_fields) - - full_pod_name = k8s_helper.get_pod_full_name("agent") - search_query = ( - "index=" - + INDEX_MAIN - + " k8s.pod.name=" - + full_pod_name - + ' "Everything is ready. Begin running and processing data."' - ) - logger.info(f"Query: {search_query}") - events = check_events_from_splunk( - start_time="-5m@m", - url=setup["splunkd_url"], - user=setup["splunk_user"], - query=["search {0}".format(search_query)], - password=setup["splunk_password"], - ) - logger.info("Splunk received %s events in the last minute", len(events)) - assert len(events) == 1 - event = events[0] - sourcetype = "kube:container:otel-collector" - sorce_regex_part = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" - source_pattern = ( - r"^/var/log/pods/default_" - + full_pod_name - + "_" - + sorce_regex_part - + "/otel-collector/0.log$" - ) - assert INDEX_MAIN == event["index"] - assert full_pod_name == event["k8s.pod.name"] - assert sourcetype == event["_sourcetype"] - assert re.match( - source_pattern, event["source"] - ), f"Source does not match the pattern {source_pattern}" - - def test_all_agent_logs_correctly_ingested_into_splunk(setup): """ Test that agent logs are correctly ingested into Splunk diff --git a/test/k8s_logging_tests/test_config_logging.py b/test/k8s_logging_tests/test_config_logging.py index fe5ce5b091..dd2be01f98 100644 --- a/test/k8s_logging_tests/test_config_logging.py +++ b/test/k8s_logging_tests/test_config_logging.py @@ -108,52 +108,6 @@ def test_annotation_excluding(setup, container_name, expected): len(events)) assert len(events) == expected -@pytest.mark.parametrize("test_input,expected", [ - ("kube:container:kube-apiserver", 1), - ("kube:container:etcd", 1), - ("kube:container:kube-controller-manager", 1), - ("empty_sourcetype", 0) -]) -def test_sourcetype(setup, test_input, expected): - ''' - Test that known sourcetypes are present in target index - ''' - logger.info("testing for presence of sourcetype={0} expected={1} event(s)".format( - test_input, expected)) - index_logging = os.environ["CI_INDEX_EVENTS"] if os.environ["CI_INDEX_EVENTS"] else "ci_events" - source_type = ' sourcetype=""' if test_input == "empty_sourcetype" else ' sourcetype=' + test_input - search_query = "index=" + index_logging + source_type - events = check_events_from_splunk(start_time="-24h@h", - url=setup["splunkd_url"], - user=setup["splunk_user"], - query=["search {0}".format( - search_query)], - password=setup["splunk_password"]) - logger.info("Splunk received %s events in the last minute", - len(events)) - assert len(events) >= expected if test_input != "empty_sourcetype" else len( - events) == expected - -@pytest.mark.parametrize("sourcetype,index,expected", [ - ("sourcetype-anno", "pod-anno", 1) -]) -def test_annotation_sourcetype(setup, sourcetype, index, expected): - ''' - Test annotation for sourcetype properly overwrites it when set - ''' - logger.info("testing for annotation sourcetype of {0} index={1} expected={2} event(s)".format( - sourcetype, index, expected)) - search_query = "index=" + index + ' sourcetype=' + sourcetype - events = check_events_from_splunk(start_time="-1h@h", - url=setup["splunkd_url"], - user=setup["splunk_user"], - query=["search {0}".format( - search_query)], - password=setup["splunk_password"]) - logger.info("Splunk received %s events in the last minute", - len(events)) - assert len(events) >= expected - @pytest.mark.skipif(True, reason="Jira: ADDON-36296") @pytest.mark.parametrize("test_input,expected", [ ("/var/log/pods/*_kube-apiserver*", 1), @@ -182,27 +136,6 @@ def test_source(setup, test_input, expected): assert len(events) >= expected if test_input != "empty_source" else len( events) == expected -@pytest.mark.parametrize("test_input,host_name,expected", [ - ("valid_host", "minikube", 1), - ("empty_host", "", 0) -]) -def test_host(setup, test_input, host_name, expected): - ''' - Test that known hosts are present in target index - ''' - logger.info("testing for presence of host={0} expected={1} event(s)".format( - test_input, expected)) - index_logging = os.environ["CI_INDEX_EVENTS"] if os.environ["CI_INDEX_EVENTS"] else "ci_events" - search_query = "index={0} host=\"{1}\"".format(index_logging, host_name) - events = check_events_from_splunk(start_time="-24h@h", - url=setup["splunkd_url"], - user=setup["splunk_user"], - query=["search {0}".format(search_query)], - password=setup["splunk_password"]) - logger.info("Splunk received %s events in the last minute", - len(events)) - assert len(events) >= expected - @pytest.mark.parametrize("test_input,expected", [ ("k8s.pod.name", 1), ("k8s.namespace.name", 1), @@ -273,30 +206,6 @@ def test_custom_metadata_fields_annotations(setup, label, index, value, expected len(events)) assert len(events) >= expected -@pytest.mark.parametrize("test_input,expected", [ - ("test_journald_data", 1) -]) -def test_journald_logs(setup, test_input, expected): - ''' - Test that user specified index can successfully index the - journald log stream from k8s. If no index is specified, default - index "ci_events" will be used. - ''' - logger.info("testing test_journald_logs input={0} expected={1} event(s)".format( - test_input, expected)) - index_logging = os.environ["CI_INDEX_EVENTS"] if os.environ["CI_INDEX_EVENTS"] else "ci_events" - search_query = "index=" + index_logging + " sourcetype=kube:journald*" - - events = check_events_from_splunk(start_time="-1h@h", - url=setup["splunkd_url"], - user=setup["splunk_user"], - query=["search {0}".format( - search_query)], - password=setup["splunk_password"]) - logger.info("Splunk received %s events in the last hour", - len(events)) - assert len(events) >= expected - @pytest.mark.parametrize("test_input,expected", [ ("containerd.service", 1), ("docker.service", 1), diff --git a/test/k8s_metrics_test/__init__.py b/test/k8s_metrics_test/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/k8s_metrics_test/test_config_metrics.py b/test/k8s_metrics_test/test_config_metrics.py deleted file mode 100644 index 98980aee14..0000000000 --- a/test/k8s_metrics_test/test_config_metrics.py +++ /dev/null @@ -1,183 +0,0 @@ -import pytest -import time -import os -import logging -import json -from urllib.parse import urlparse -from ..common import check_events_from_splunk -from ..common import check_metrics_from_splunk - - -@pytest.mark.parametrize("metric", [ - #Control Plane Metrics - ("apiserver_request_total"), - ("workqueue_adds_total"), - ("scheduler_scheduling_algorithm_duration_seconds"), - ("kubeproxy_sync_proxy_rules_duration_seconds_count"), - ("coredns_dns_requests_total"), - #Container Metrics - ("container.cpu.time"), - ("container.cpu.utilization"), - ("container.filesystem.available"), - ("container.filesystem.capacity"), - ("container.filesystem.usage"), - ("container.memory.available"), - ("container.memory.major_page_faults"), - ("container.memory.page_faults"), - ("container.memory.rss"), - ("container.memory.usage"), - ("container.memory.working_set"), - ("k8s.container.cpu_limit"), - ("k8s.container.cpu_request"), - ("k8s.container.memory_limit"), - ("k8s.container.memory_request"), - ("k8s.container.ready"), - ("k8s.container.restarts"), - #Daemonset Metrics - ("k8s.daemonset.current_scheduled_nodes"), - ("k8s.daemonset.desired_scheduled_nodes"), - ("k8s.daemonset.misscheduled_nodes"), - ("k8s.daemonset.ready_nodes"), - #Deployment Metrics - ("k8s.deployment.available"), - ("k8s.deployment.desired"), - #Namespace Metrics - ("k8s.namespace.phase"), - #Node Metrics - ("k8s.node.condition_ready"), - ("k8s.node.cpu.time"), - ("k8s.node.cpu.utilization"), - ("k8s.node.filesystem.available"), - ("k8s.node.filesystem.capacity"), - ("k8s.node.filesystem.usage"), - ("k8s.node.memory.available"), - ("k8s.node.memory.major_page_faults"), - ("k8s.node.memory.page_faults"), - ("k8s.node.memory.rss"), - ("k8s.node.memory.usage"), - ("k8s.node.memory.working_set"), - ("k8s.node.network.errors"), - ("k8s.node.network.io"), - #Pod Metrics - ("k8s.pod.cpu.time"), - ("k8s.pod.cpu.utilization"), - ("k8s.pod.filesystem.available"), - ("k8s.pod.filesystem.capacity"), - ("k8s.pod.filesystem.usage"), - ("k8s.pod.memory.available"), - ("k8s.pod.memory.major_page_faults"), - ("k8s.pod.memory.page_faults"), - ("k8s.pod.memory.rss"), - ("k8s.pod.memory.usage"), - ("k8s.pod.memory.working_set"), - ("k8s.pod.network.errors"), - ("k8s.pod.network.io"), - ("k8s.pod.phase"), - #Replicaset Metrics - ("k8s.replicaset.available"), - ("k8s.replicaset.desired"), - #otelcol Metrics - ("otelcol_exporter_queue_size"), - ("otelcol_exporter_send_failed_log_records"), - ("otelcol_exporter_send_failed_metric_points"), - ("otelcol_exporter_sent_log_records"), - ("otelcol_exporter_sent_metric_points"), - ("otelcol_otelsvc_k8s_ip_lookup_miss"), - ("otelcol_otelsvc_k8s_namespace_added"), - ("otelcol_otelsvc_k8s_namespace_updated"), - ("otelcol_otelsvc_k8s_pod_added"), - ("otelcol_otelsvc_k8s_pod_table_size"), - ("otelcol_otelsvc_k8s_pod_updated"), - ("otelcol_process_cpu_seconds"), - ("otelcol_process_memory_rss"), - ("otelcol_process_runtime_heap_alloc_bytes"), - ("otelcol_process_runtime_total_alloc_bytes"), - ("otelcol_process_runtime_total_sys_memory_bytes"), - ("otelcol_process_uptime"), - ("otelcol_processor_accepted_log_records"), - ("otelcol_processor_accepted_metric_points"), - ("otelcol_processor_batch_batch_send_size_bucket"), - ("otelcol_processor_batch_batch_send_size_count"), - ("otelcol_processor_batch_batch_send_size_sum"), - ("otelcol_processor_batch_timeout_trigger_send"), - ("otelcol_processor_dropped_log_records"), - ("otelcol_processor_dropped_metric_points"), - ("otelcol_processor_refused_log_records"), - ("otelcol_processor_refused_metric_points"), - ("otelcol_receiver_accepted_metric_points"), - ("otelcol_receiver_refused_metric_points"), - ("otelcol_scraper_errored_metric_points"), - ("otelcol_scraper_scraped_metric_points"), - #Scrape Metrics - ("scrape_duration_seconds"), - ("scrape_samples_post_metric_relabeling"), - ("scrape_samples_scraped"), - ("scrape_series_added"), - #System Metrics - ("system.cpu.load_average.15m"), - ("system.cpu.load_average.1m"), - ("system.cpu.load_average.5m"), - ("system.cpu.time"), - ("system.disk.io"), - ("system.disk.io_time"), - ("system.disk.merged"), - ("system.disk.operation_time"), - ("system.disk.operations"), - ("system.disk.pending_operations"), - ("system.disk.weighted_io_time"), - ("system.filesystem.inodes.usage"), - ("system.filesystem.usage"), - ("system.memory.usage"), - ("system.network.connections"), - ("system.network.dropped"), - ("system.network.errors"), - ("system.network.io"), - ("system.network.packets"), - ("system.paging.faults"), - ("system.paging.operations"), - ("system.paging.usage"), - ("system.processes.count"), - ("system.processes.created"), - #Up Metrics - ("up"), - # Network Explorer Metrics - ("tcp.bytes"), - ("tcp.active"), - ("tcp.packets"), - ("tcp.retrans"), - ("tcp.syn.timeouts"), - ("tcp.new_sockets"), - ("tcp.resets"), - ("tcp.rtt.num_measurements"), - ("tcp.rtt.average"), - ("udp.bytes"), - ("udp.packets"), - ("udp.active"), - ("udp.drops"), - ("http.status_code"), - ("http.active_sockets"), - ("http.client.duration_average"), - ("http.server.duration_average"), - ("dns.active_sockets"), - ("dns.responses"), - ("dns.timeouts"), - ("dns.client.duration_average"), - ("dns.server.duration_average") -]) -def test_metric_name(setup, metric): - ''' - This test covers one metric from each endpoint that the metrics plugin covers - ''' - logging.info("testing for presence of metric={0}".format(metric)) - index_metrics = os.environ["CI_INDEX_METRICS"] if os.environ["CI_INDEX_METRICS"] else "ci_metrics" - logging.info("index for metrics is {0}".format(index_metrics)) - events = check_metrics_from_splunk(start_time="-24h@h", - end_time="now", - url=setup["splunkd_url"], - user=setup["splunk_user"], - password=setup["splunk_password"], - index=index_metrics, - metric_name=metric) - logging.info("Splunk received %s metrics in the last minute", - len(events)) - assert len(events) >= 0 diff --git a/test/k8s_objects_tests/__init__.py b/test/k8s_objects_tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/k8s_objects_tests/test_config_objects.py b/test/k8s_objects_tests/test_config_objects.py deleted file mode 100644 index b6e304ea61..0000000000 --- a/test/k8s_objects_tests/test_config_objects.py +++ /dev/null @@ -1,54 +0,0 @@ -import pytest -import os -import logging - -from ..common import check_events_from_splunk - -@pytest.mark.parametrize("test_key, test_value, expected", [ - ("object.kind", "event", 1), - ("kind", "pod", 1), - ("kind", "namespace", 1), - ("kind", "node", 1) -]) -def test_k8s_objects(setup, test_key, test_value, expected): - ''' - Test that user specified index can successfully index the - objects stream from k8s. - ''' - logging.getLogger().info("testing test_splunk_index input={0} \ - expected={1} event(s)".format(test_value, expected)) - index_objects = os.environ.get("CI_INDEX_EVENTS", "ci_events") - - search_query = f'index={index_objects} {test_key}={test_value}' - events = check_events_from_splunk(start_time="-1h@h", - url=setup["splunkd_url"], - user=setup["splunk_user"], - query=["search {0}".format(search_query)], - password=setup["splunk_password"]) - logging.getLogger().info("Splunk received %s events in the last minute", - len(events)) - assert len(events) >= expected - -@pytest.mark.parametrize("test_key, test_value, expected", [ - ("sourcetype", "kube:object:pods", 1), - ("sourcetype", "kube:object:namespaces", 1), - ("sourcetype", "kube:object:nodes", 1) -]) -def test_k8s_objects_sourcetype(setup, test_key, test_value, expected): - ''' - Test that known k8s objects sourcetypes are present in target index - ''' - logging.getLogger().info("testing test_splunk_index input={0} \ - expected={1} event(s)".format(test_value, expected)) - index_objects = os.environ.get("CI_INDEX_EVENTS", "ci_events") - - search_query = f'index={index_objects} {test_key}={test_value}' - events = check_events_from_splunk(start_time="-1h@h", - url=setup["splunkd_url"], - user=setup["splunk_user"], - query=["search {0}".format(search_query)], - password=setup["splunk_password"]) - logging.getLogger().info("Splunk received %s events in the last minute", - len(events)) - assert len(events) >= expected -