From 97d7604ec258937f6ca8c02f20cd8305322bdace Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Tue, 10 Oct 2023 17:38:12 +0300 Subject: [PATCH 01/14] all: update version string everywhere for v0.37.1 (#5427) Signed-off-by: Paschalis Tsilias --- CHANGELOG.md | 8 ++++++++ docs/sources/operator/deploy-agent-operator-resources.md | 2 +- docs/sources/operator/getting-started.md | 2 +- .../configuration/integrations/node-exporter-config.md | 2 +- .../configuration/integrations/process-exporter-config.md | 4 ++-- .../sources/static/set-up/install/install-agent-docker.md | 4 ++-- pkg/operator/defaults.go | 2 +- production/kubernetes/agent-bare.yaml | 2 +- production/kubernetes/agent-loki.yaml | 2 +- production/kubernetes/agent-traces.yaml | 2 +- production/kubernetes/build/lib/version.libsonnet | 2 +- .../kubernetes/build/templates/operator/main.jsonnet | 4 ++-- production/kubernetes/install-bare.sh | 2 +- production/operator/templates/agent-operator.yaml | 4 ++-- production/tanka/grafana-agent/v1/main.libsonnet | 4 ++-- production/tanka/grafana-agent/v2/internal/base.libsonnet | 4 ++-- .../tanka/grafana-agent/v2/internal/syncer.libsonnet | 2 +- 17 files changed, 30 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f13d08614b1..216e9558c33f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,14 @@ Main (unreleased) - The `loki.write` WAL now has snappy compression enabled by default. (@thepalbi) +v0.37.1 (2023-10-10) +----------------- + +### Bugfixes + +- Fix the initialization of the default namespaces map for the operator and the + loki.source.kubernetes component. (@wildum) + v0.37.0 (2023-10-10) ----------------- diff --git a/docs/sources/operator/deploy-agent-operator-resources.md b/docs/sources/operator/deploy-agent-operator-resources.md index 227941ae13ae..7d73fadd4a1a 100644 --- a/docs/sources/operator/deploy-agent-operator-resources.md +++ b/docs/sources/operator/deploy-agent-operator-resources.md @@ -62,7 +62,7 @@ To deploy the `GrafanaAgent` resource: labels: app: grafana-agent spec: - image: grafana/agent:v0.37.0 + image: grafana/agent:v0.37.1 integrations: selector: matchLabels: diff --git a/docs/sources/operator/getting-started.md b/docs/sources/operator/getting-started.md index ab6c7943fc62..822a7dca01a4 100644 --- a/docs/sources/operator/getting-started.md +++ b/docs/sources/operator/getting-started.md @@ -79,7 +79,7 @@ To install Agent Operator: serviceAccountName: grafana-agent-operator containers: - name: operator - image: grafana/agent-operator:v0.37.0 + image: grafana/agent-operator:v0.37.1 args: - --kubelet-service=default/kubelet --- diff --git a/docs/sources/static/configuration/integrations/node-exporter-config.md b/docs/sources/static/configuration/integrations/node-exporter-config.md index 0c4c4e83acd8..28eaccb09e45 100644 --- a/docs/sources/static/configuration/integrations/node-exporter-config.md +++ b/docs/sources/static/configuration/integrations/node-exporter-config.md @@ -30,7 +30,7 @@ docker run \ -v "/proc:/host/proc:ro,rslave" \ -v /tmp/agent:/etc/agent \ -v /path/to/config.yaml:/etc/agent-config/agent.yaml \ - grafana/agent:v0.37.0 \ + grafana/agent:v0.37.1 \ --config.file=/etc/agent-config/agent.yaml ``` diff --git a/docs/sources/static/configuration/integrations/process-exporter-config.md b/docs/sources/static/configuration/integrations/process-exporter-config.md index 41cc9afd40da..eee154ad4f8a 100644 --- a/docs/sources/static/configuration/integrations/process-exporter-config.md +++ b/docs/sources/static/configuration/integrations/process-exporter-config.md @@ -22,7 +22,7 @@ docker run \ -v "/proc:/proc:ro" \ -v /tmp/agent:/etc/agent \ -v /path/to/config.yaml:/etc/agent-config/agent.yaml \ - grafana/agent:v0.37.0 \ + grafana/agent:v0.37.1 \ --config.file=/etc/agent-config/agent.yaml ``` @@ -39,7 +39,7 @@ metadata: name: agent spec: containers: - - image: grafana/agent:v0.37.0 + - image: grafana/agent:v0.37.1 name: agent args: - --config.file=/etc/agent-config/agent.yaml diff --git a/docs/sources/static/set-up/install/install-agent-docker.md b/docs/sources/static/set-up/install/install-agent-docker.md index 618e8c912bd8..a7b0a03e327a 100644 --- a/docs/sources/static/set-up/install/install-agent-docker.md +++ b/docs/sources/static/set-up/install/install-agent-docker.md @@ -34,7 +34,7 @@ To run a Grafana Agent Docker container on Linux, run the following command in a docker run \ -v WAL_DATA_DIRECTORY:/etc/agent/data \ -v CONFIG_FILE_PATH:/etc/agent/agent.yaml \ - grafana/agent:v0.37.0 + grafana/agent:v0.37.1 ``` Replace `CONFIG_FILE_PATH` with the configuration file path on your Linux host system. @@ -51,7 +51,7 @@ To run a Grafana Agent Docker container on Windows, run the following command in docker run ^ -v WAL_DATA_DIRECTORY:C:\etc\grafana-agent\data ^ -v CONFIG_FILE_PATH:C:\etc\grafana-agent ^ - grafana/agent:v0.37.0-windows + grafana/agent:v0.37.1-windows ``` Replace the following: diff --git a/pkg/operator/defaults.go b/pkg/operator/defaults.go index bc9372e1d53c..59ea98669a8c 100644 --- a/pkg/operator/defaults.go +++ b/pkg/operator/defaults.go @@ -2,7 +2,7 @@ package operator // Supported versions of the Grafana Agent. var ( - DefaultAgentVersion = "v0.37.0" + DefaultAgentVersion = "v0.37.1" DefaultAgentBaseImage = "grafana/agent" DefaultAgentImage = DefaultAgentBaseImage + ":" + DefaultAgentVersion ) diff --git a/production/kubernetes/agent-bare.yaml b/production/kubernetes/agent-bare.yaml index 0305e1162706..4a84a679ef92 100644 --- a/production/kubernetes/agent-bare.yaml +++ b/production/kubernetes/agent-bare.yaml @@ -83,7 +83,7 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - image: grafana/agent:v0.37.0 + image: grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent name: grafana-agent ports: diff --git a/production/kubernetes/agent-loki.yaml b/production/kubernetes/agent-loki.yaml index e980104bff2c..2550cd6a7571 100644 --- a/production/kubernetes/agent-loki.yaml +++ b/production/kubernetes/agent-loki.yaml @@ -65,7 +65,7 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - image: grafana/agent:v0.37.0 + image: grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent name: grafana-agent-logs ports: diff --git a/production/kubernetes/agent-traces.yaml b/production/kubernetes/agent-traces.yaml index 3e236bef3437..1fb5501c8558 100644 --- a/production/kubernetes/agent-traces.yaml +++ b/production/kubernetes/agent-traces.yaml @@ -114,7 +114,7 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - image: grafana/agent:v0.37.0 + image: grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent name: grafana-agent-traces ports: diff --git a/production/kubernetes/build/lib/version.libsonnet b/production/kubernetes/build/lib/version.libsonnet index e3d950cb7d93..3c0c2dc542fc 100644 --- a/production/kubernetes/build/lib/version.libsonnet +++ b/production/kubernetes/build/lib/version.libsonnet @@ -1 +1 @@ -'grafana/agent:v0.37.0' +'grafana/agent:v0.37.1' diff --git a/production/kubernetes/build/templates/operator/main.jsonnet b/production/kubernetes/build/templates/operator/main.jsonnet index e9ad3fd44533..18c53173d526 100644 --- a/production/kubernetes/build/templates/operator/main.jsonnet +++ b/production/kubernetes/build/templates/operator/main.jsonnet @@ -23,8 +23,8 @@ local ksm = import 'kube-state-metrics/kube-state-metrics.libsonnet'; local this = self, _images:: { - agent: 'grafana/agent:v0.37.0', - agent_operator: 'grafana/agent-operator:v0.37.0', + agent: 'grafana/agent:v0.37.1', + agent_operator: 'grafana/agent-operator:v0.37.1', ksm: 'registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.5.0', }, diff --git a/production/kubernetes/install-bare.sh b/production/kubernetes/install-bare.sh index 2de9c3d9b5fc..ff06be34f772 100644 --- a/production/kubernetes/install-bare.sh +++ b/production/kubernetes/install-bare.sh @@ -25,7 +25,7 @@ check_installed() { check_installed curl check_installed envsubst -MANIFEST_BRANCH=v0.37.0 +MANIFEST_BRANCH=v0.37.1 MANIFEST_URL=${MANIFEST_URL:-https://raw.githubusercontent.com/grafana/agent/${MANIFEST_BRANCH}/production/kubernetes/agent-bare.yaml} NAMESPACE=${NAMESPACE:-default} diff --git a/production/operator/templates/agent-operator.yaml b/production/operator/templates/agent-operator.yaml index 2fe5667a69f9..dc618b85099c 100644 --- a/production/operator/templates/agent-operator.yaml +++ b/production/operator/templates/agent-operator.yaml @@ -372,7 +372,7 @@ spec: containers: - args: - --kubelet-service=default/kubelet - image: grafana/agent-operator:v0.37.0 + image: grafana/agent-operator:v0.37.1 imagePullPolicy: IfNotPresent name: grafana-agent-operator serviceAccount: grafana-agent-operator @@ -436,7 +436,7 @@ metadata: name: grafana-agent namespace: ${NAMESPACE} spec: - image: grafana/agent:v0.37.0 + image: grafana/agent:v0.37.1 integrations: selector: matchLabels: diff --git a/production/tanka/grafana-agent/v1/main.libsonnet b/production/tanka/grafana-agent/v1/main.libsonnet index 99eac44402a4..051e7a45e7b6 100644 --- a/production/tanka/grafana-agent/v1/main.libsonnet +++ b/production/tanka/grafana-agent/v1/main.libsonnet @@ -15,8 +15,8 @@ local service = k.core.v1.service; (import './lib/traces.libsonnet') + { _images:: { - agent: 'grafana/agent:v0.37.0', - agentctl: 'grafana/agentctl:v0.37.0', + agent: 'grafana/agent:v0.37.1', + agentctl: 'grafana/agentctl:v0.37.1', }, // new creates a new DaemonSet deployment of the grafana-agent. By default, diff --git a/production/tanka/grafana-agent/v2/internal/base.libsonnet b/production/tanka/grafana-agent/v2/internal/base.libsonnet index ce83f82cc756..553feb15012a 100644 --- a/production/tanka/grafana-agent/v2/internal/base.libsonnet +++ b/production/tanka/grafana-agent/v2/internal/base.libsonnet @@ -11,8 +11,8 @@ function(name='grafana-agent', namespace='') { local this = self, _images:: { - agent: 'grafana/agent:v0.37.0', - agentctl: 'grafana/agentctl:v0.37.0', + agent: 'grafana/agent:v0.37.1', + agentctl: 'grafana/agentctl:v0.37.1', }, _config:: { name: name, diff --git a/production/tanka/grafana-agent/v2/internal/syncer.libsonnet b/production/tanka/grafana-agent/v2/internal/syncer.libsonnet index 6cf66902ecbc..633ae5d519fe 100644 --- a/production/tanka/grafana-agent/v2/internal/syncer.libsonnet +++ b/production/tanka/grafana-agent/v2/internal/syncer.libsonnet @@ -14,7 +14,7 @@ function( ) { local _config = { api: error 'api must be set', - image: 'grafana/agentctl:v0.37.0', + image: 'grafana/agentctl:v0.37.1', schedule: '*/5 * * * *', configs: [], } + config, From d5338a269a6cbae344f4b259be81ebbeb1074bc2 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 10 Oct 2023 11:43:28 -0400 Subject: [PATCH 02/14] helm: move initContainers to inside controller for consistency with pod settings (#5425) Other pod-level settings are determined by the `controller` group of settings, so initContainers should be inside of controller (`controller.initContainers`) rather than a top-level field. --- .../helm/charts/grafana-agent/CHANGELOG.md | 5 +++ .../helm/charts/grafana-agent/README.md | 2 +- .../ci/initcontainers-values.yaml | 37 ++++++++++--------- .../templates/controllers/_pod.yaml | 4 +- .../helm/charts/grafana-agent/values.yaml | 10 ++--- 5 files changed, 32 insertions(+), 26 deletions(-) diff --git a/operations/helm/charts/grafana-agent/CHANGELOG.md b/operations/helm/charts/grafana-agent/CHANGELOG.md index b8f2d6183cb0..32960da90cd1 100644 --- a/operations/helm/charts/grafana-agent/CHANGELOG.md +++ b/operations/helm/charts/grafana-agent/CHANGELOG.md @@ -10,6 +10,11 @@ internal API changes are not present. Unreleased ---------- +### Breaking changes + +- The `initContainers` setting has been moved to `controller.initContainers` + for consistency with other Pod-level settings. (@rfratto) + ### Enhancements - Make CRDs optional through the `crds.create` setting. (@bentonam, @rfratto) diff --git a/operations/helm/charts/grafana-agent/README.md b/operations/helm/charts/grafana-agent/README.md index 31ab5d02336a..097053e4706a 100644 --- a/operations/helm/charts/grafana-agent/README.md +++ b/operations/helm/charts/grafana-agent/README.md @@ -78,6 +78,7 @@ use the older mode (called "static mode"), set the `agent.mode` value to | controller.enableStatefulSetAutoDeletePVC | bool | `false` | Whether to enable automatic deletion of stale PVCs due to a scale down operation, when controller.type is 'statefulset'. | | controller.hostNetwork | bool | `false` | Configures Pods to use the host network. When set to true, the ports that will be used must be specified. | | controller.hostPID | bool | `false` | Configures Pods to use the host PID namespace. | +| controller.initContainers | list | `[]` | | | controller.nodeSelector | object | `{}` | nodeSelector to apply to Grafana Agent pods. | | controller.parallelRollout | bool | `true` | Whether to deploy pods in parallel. Only used when controller.type is 'statefulset'. | | controller.podAnnotations | object | `{}` | Extra pod annotations to add. | @@ -109,7 +110,6 @@ use the older mode (called "static mode"), set the `agent.mode` value to | ingress.path | string | `"/"` | | | ingress.pathType | string | `"Prefix"` | | | ingress.tls | list | `[]` | | -| initContainers | list | `[]` | | | nameOverride | string | `nil` | Overrides the chart's name. Used to change the infix in the resource names. | | rbac.create | bool | `true` | Whether to create RBAC resources for the agent. | | service.annotations | object | `{}` | | diff --git a/operations/helm/charts/grafana-agent/ci/initcontainers-values.yaml b/operations/helm/charts/grafana-agent/ci/initcontainers-values.yaml index 9b12e515be4e..3097aebc628e 100644 --- a/operations/helm/charts/grafana-agent/ci/initcontainers-values.yaml +++ b/operations/helm/charts/grafana-agent/ci/initcontainers-values.yaml @@ -1,26 +1,27 @@ -initContainers: -- name: geo-ip - image: ghcr.io/maxmind/geoipupdate:v6.0 - volumeMounts: - - name: geoip - mountPath: /etc/geoip - volumes: - - name: geoip - emptyDir: {} - env: - - name: GEOIPUPDATE_ACCOUNT_ID - value: "geoipupdate_account_id" - - name: GEOIPUPDATE_LICENSE_KEY - value: "geoipupdate_license_key" - - name: GEOIPUPDATE_EDITION_IDS - value: "GeoLite2-ASN GeoLite2-City GeoLite2-Country" - - name: GEOIPUPDATE_DB_DIR - value: "/etc/geoip" controller: + initContainers: + - name: geo-ip + image: ghcr.io/maxmind/geoipupdate:v6.0 + volumeMounts: + - name: geoip + mountPath: /etc/geoip + volumes: + - name: geoip + emptyDir: {} + env: + - name: GEOIPUPDATE_ACCOUNT_ID + value: "geoipupdate_account_id" + - name: GEOIPUPDATE_LICENSE_KEY + value: "geoipupdate_license_key" + - name: GEOIPUPDATE_EDITION_IDS + value: "GeoLite2-ASN GeoLite2-City GeoLite2-Country" + - name: GEOIPUPDATE_DB_DIR + value: "/etc/geoip" volumes: extra: - name: geoip mountPath: /etc/geoip + agent: mounts: extra: diff --git a/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml b/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml index d016b5f8b74e..1fe0363c9232 100644 --- a/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml +++ b/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml @@ -23,9 +23,9 @@ spec: {{- toYaml .Values.image.pullSecrets | nindent 4 }} {{- end }} {{- end }} - {{- if .Values.initContainers }} + {{- if .Values.controller.initContainers }} initContainers: - {{- with .Values.initContainers }} + {{- with .Values.controller.initContainers }} {{- toYaml . | nindent 4 }} {{- end }} {{- end }} diff --git a/operations/helm/charts/grafana-agent/values.yaml b/operations/helm/charts/grafana-agent/values.yaml index 8d55723462bb..d325951c4571 100644 --- a/operations/helm/charts/grafana-agent/values.yaml +++ b/operations/helm/charts/grafana-agent/values.yaml @@ -18,11 +18,6 @@ global: # -- Security context to apply to the Grafana Agent pod. podSecurityContext: {} -## The init containers to run. -## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ -## -initContainers: [] - crds: # -- Whether to install CRDs for monitoring. create: true @@ -210,6 +205,11 @@ controller: # -- volumeClaimTemplates to add when controller.type is 'statefulset'. volumeClaimTemplates: [] + ## -- Additional init containers to run. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## + initContainers: [] + service: # -- Creates a Service for the controller's pods. enabled: true From 4cd52486d0e9266ddb7faaf5fe72be59bc98f45e Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Tue, 10 Oct 2023 18:55:08 +0300 Subject: [PATCH 03/14] helm: update agent version to v0.37.1 (#5431) Signed-off-by: Paschalis Tsilias --- operations/helm/charts/grafana-agent/CHANGELOG.md | 5 +++++ operations/helm/charts/grafana-agent/Chart.yaml | 4 ++-- operations/helm/charts/grafana-agent/README.md | 2 +- .../grafana-agent/templates/controllers/statefulset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/deployment.yaml | 2 +- .../grafana-agent/templates/controllers/deployment.yaml | 2 +- .../grafana-agent/templates/controllers/statefulset.yaml | 2 +- .../grafana-agent/templates/controllers/statefulset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- .../grafana-agent/templates/controllers/daemonset.yaml | 2 +- 26 files changed, 31 insertions(+), 26 deletions(-) diff --git a/operations/helm/charts/grafana-agent/CHANGELOG.md b/operations/helm/charts/grafana-agent/CHANGELOG.md index 32960da90cd1..abd217b00f72 100644 --- a/operations/helm/charts/grafana-agent/CHANGELOG.md +++ b/operations/helm/charts/grafana-agent/CHANGELOG.md @@ -10,6 +10,9 @@ internal API changes are not present. Unreleased ---------- +0.26.0 (2023-10-10) +------------------- + ### Breaking changes - The `initContainers` setting has been moved to `controller.initContainers` @@ -19,6 +22,8 @@ Unreleased - Make CRDs optional through the `crds.create` setting. (@bentonam, @rfratto) +- Update Grafana Agent version to v0.37.1. (@tpaschalis) + 0.25.0 (2023-09-22) ------------------- diff --git a/operations/helm/charts/grafana-agent/Chart.yaml b/operations/helm/charts/grafana-agent/Chart.yaml index a955a9674d45..2ace23ab53ed 100644 --- a/operations/helm/charts/grafana-agent/Chart.yaml +++ b/operations/helm/charts/grafana-agent/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: grafana-agent description: 'Grafana Agent' type: application -version: 0.25.0 -appVersion: 'v0.36.2' +version: 0.26.0 +appVersion: 'v0.37.1' dependencies: - name: crds diff --git a/operations/helm/charts/grafana-agent/README.md b/operations/helm/charts/grafana-agent/README.md index 097053e4706a..09596d8c7ccb 100644 --- a/operations/helm/charts/grafana-agent/README.md +++ b/operations/helm/charts/grafana-agent/README.md @@ -1,6 +1,6 @@ # Grafana Agent Helm chart -![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.25.0](https://img.shields.io/badge/Version-0.25.0-informational?style=flat-square) ![AppVersion: v0.36.2](https://img.shields.io/badge/AppVersion-v0.36.2-informational?style=flat-square) +![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.26.0](https://img.shields.io/badge/Version-0.26.0-informational?style=flat-square) ![AppVersion: v0.37.1](https://img.shields.io/badge/AppVersion-v0.37.1-informational?style=flat-square) Helm chart for deploying [Grafana Agent][] to Kubernetes. diff --git a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml index 0cfc38af77a3..e1ab3066d501 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml index 138e27ae9645..7a6bf70f7889 100644 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml index a6791e28477a..5aa4d6550c7c 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml index 80312dc3472b..2c83b321a780 100644 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml index cdacf03454f7..f2f35ee90803 100644 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml index 93a523bb6d92..9965a7648e47 100644 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml @@ -26,7 +26,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml index 74ef6adb4cbe..cf07f6062cb4 100644 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml index 17f27c9d9cda..822abd34a76b 100644 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml index 80312dc3472b..2c83b321a780 100644 --- a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml index 80312dc3472b..2c83b321a780 100644 --- a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml index 80312dc3472b..2c83b321a780 100644 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml index 972b25dafe43..71cb7b4d5e25 100644 --- a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml index d920f35a6258..d1c0643743ec 100644 --- a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml index d3152c3e2e19..d9b5317954d2 100644 --- a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml index 80d6c1465fb1..22521833324f 100644 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml index d2feed25edde..b5e2e4df2f1e 100644 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index bd57f79325c4..41bad269bfe0 100644 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -30,7 +30,7 @@ spec: - name: global-cred containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 04c54cc05393..7e291194baa0 100644 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: quay.io/grafana/agent:v0.36.2 + image: quay.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml index 36e30d03d56c..3fde83e13d93 100644 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml @@ -43,7 +43,7 @@ spec: name: geoip containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index 76f2655abd09..51e370ab5d44 100644 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: - name: local-cred containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 04c54cc05393..7e291194baa0 100644 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: quay.io/grafana/agent:v0.36.2 + image: quay.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml index 4bc4ad6c3559..773c092e37d5 100644 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml index 05a65951b8a0..b99272109279 100644 --- a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.36.2 + image: docker.io/grafana/agent:v0.37.1 imagePullPolicy: IfNotPresent args: - -config.file=/etc/agent/config.yaml From bb974bebd1d54e76af8a65d746c246a1813eb218 Mon Sep 17 00:00:00 2001 From: Mischa Thompson Date: Tue, 10 Oct 2023 14:36:40 -0700 Subject: [PATCH 04/14] Templatize version updates (#5333) --- .github/workflows/check-versioned-files.yml | 16 +++ Makefile | 32 +++-- .../release/3-update-version-in-code.md | 8 ++ docs/sources/_index.md | 2 + docs/sources/_index.md.t | 120 ++++++++++++++++++ .../deploy-agent-operator-resources.md | 2 +- docs/sources/operator/getting-started.md | 2 +- .../integrations/node-exporter-config.md | 4 +- .../integrations/process-exporter-config.md | 4 +- .../set-up/install/install-agent-docker.md | 4 +- pkg/operator/defaults.go.t | 15 +++ tools/gen-versioned-files/agent-version.txt | 1 + .../gen-versioned-files.sh | 20 +++ 13 files changed, 210 insertions(+), 20 deletions(-) create mode 100644 .github/workflows/check-versioned-files.yml create mode 100644 docs/sources/_index.md.t create mode 100644 pkg/operator/defaults.go.t create mode 100644 tools/gen-versioned-files/agent-version.txt create mode 100755 tools/gen-versioned-files/gen-versioned-files.sh diff --git a/.github/workflows/check-versioned-files.yml b/.github/workflows/check-versioned-files.yml new file mode 100644 index 000000000000..a29b4de8e6cf --- /dev/null +++ b/.github/workflows/check-versioned-files.yml @@ -0,0 +1,16 @@ +name: Test Versioned Files +on: pull_request +jobs: + regenerate-docs: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Regenerate versioned files + run: | + make generate-versioned-files + if ! git diff --exit-code; then + echo "Newly generated versioned files differ from those checked in. Make sure to only update the templates manually and run 'make generate-versioned-files'!" >&2 + exit 1 + fi diff --git a/Makefile b/Makefile index bc568f81e332..937a95ab993a 100644 --- a/Makefile +++ b/Makefile @@ -50,16 +50,17 @@ ## ## Targets for generating assets: ## -## generate Generate everything. -## generate-crds Generate Grafana Agent Operator CRDs ands its documentation. -## generate-drone Generate the Drone YAML from Jsonnet. -## generate-helm-docs Generate Helm chart documentation. -## generate-helm-tests Generate Helm chart tests. -## generate-manifests Generate production/kubernetes YAML manifests. -## generate-dashboards Generate dashboards in example/docker-compose after -## changing Jsonnet. -## generate-protos Generate protobuf files. -## generate-ui Generate the UI assets. +## generate Generate everything. +## generate-crds Generate Grafana Agent Operator CRDs ands its documentation. +## generate-drone Generate the Drone YAML from Jsonnet. +## generate-helm-docs Generate Helm chart documentation. +## generate-helm-tests Generate Helm chart tests. +## generate-manifests Generate production/kubernetes YAML manifests. +## generate-dashboards Generate dashboards in example/docker-compose after +## changing Jsonnet. +## generate-protos Generate protobuf files. +## generate-ui Generate the UI assets. +## generate-versioned-files Generate versioned files. ## ## Other targets: ## @@ -280,8 +281,8 @@ smoke-image: # Targets for generating assets # -.PHONY: generate generate-crds generate-drone generate-helm-docs generate-helm-tests generate-manifests generate-dashboards generate-protos generate-ui -generate: generate-crds generate-drone generate-helm-docs generate-helm-tests generate-manifests generate-dashboards generate-protos generate-ui +.PHONY: generate generate-crds generate-drone generate-helm-docs generate-helm-tests generate-manifests generate-dashboards generate-protos generate-ui generate-versioned-files +generate: generate-crds generate-drone generate-helm-docs generate-helm-tests generate-manifests generate-dashboards generate-protos generate-ui generate-versioned-files generate-crds: ifeq ($(USE_CONTAINER),1) @@ -337,6 +338,13 @@ else cd ./web/ui && yarn --network-timeout=1200000 && yarn run build endif +generate-versioned-files: +ifeq ($(USE_CONTAINER),1) + $(RERUN_IN_CONTAINER) +else + sh ./tools/gen-versioned-files/gen-versioned-files.sh +endif + # # Other targets # diff --git a/docs/developer/release/3-update-version-in-code.md b/docs/developer/release/3-update-version-in-code.md index a5488d2777e5..b3de34187392 100644 --- a/docs/developer/release/3-update-version-in-code.md +++ b/docs/developer/release/3-update-version-in-code.md @@ -24,6 +24,14 @@ The project must be updated to reference the upcoming release tag whenever a new 3. Update appropriate places in the codebase that have the previous version with the new version determined above. + First update `tools/gen-versioned-files/agent-version.txt` with the new `VERSION` and run: + + ``` + make generate-versioned-files + ``` + + Next, commit the changes (including those to `tools/gen-versioned-files/agent-version.txt`, as a workflow will use this version to ensure that the templates and generated files are in sync). + * Do **not** update the `operations/helm` directory. It is updated independently from Agent releases. 3. Create a PR to merge to main (must be merged before continuing). diff --git a/docs/sources/_index.md b/docs/sources/_index.md index a589b6545f2e..2411cd963a0c 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -7,6 +7,8 @@ canonical: https://grafana.com/docs/agent/latest/ title: Grafana Agent description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector weight: 350 +cascade: + AGENT_RELEASE: v0.37.1 --- # Grafana Agent diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t new file mode 100644 index 000000000000..ade2db655994 --- /dev/null +++ b/docs/sources/_index.md.t @@ -0,0 +1,120 @@ +--- +aliases: +- /docs/grafana-cloud/agent/ +- /docs/grafana-cloud/monitor-infrastructure/agent/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/ +canonical: https://grafana.com/docs/agent/latest/ +title: Grafana Agent +description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector +weight: 350 +cascade: + AGENT_RELEASE: $AGENT_VERSION +--- + +# Grafana Agent + +Grafana Agent is a vendor-neutral, batteries-included telemetry collector with +configuration inspired by [Terraform][]. It is designed to be flexible, +performant, and compatible with multiple ecosystems such as Prometheus and +OpenTelemetry. + +Grafana Agent is based around **components**. Components are wired together to +form programmable observability **pipelines** for telemetry collection, +processing, and delivery. + +{{% admonition type="note" %}} +This page focuses mainly on [Flow mode][], the Terraform-inspired variant of Grafana Agent. + +For information on other variants of Grafana Agent, refer to [Introduction to Grafana Agent]({{< relref "./about.md" >}}). +{{% /admonition %}} + +Grafana Agent can collect, transform, and send data to: + +* The [Prometheus][] ecosystem +* The [OpenTelemetry][] ecosystem +* The Grafana open source ecosystem ([Loki][], [Grafana][], [Tempo][], [Mimir][], [Pyroscope][]) + +[Terraform]: https://terraform.io +[Prometheus]: https://prometheus.io +[OpenTelemetry]: https://opentelemetry.io +[Loki]: https://github.com/grafana/loki +[Grafana]: https://github.com/grafana/grafana +[Tempo]: https://github.com/grafana/tempo +[Mimir]: https://github.com/grafana/mimir +[Pyroscope]: https://github.com/grafana/pyroscope + +## Why use Grafana Agent? + +* **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and + Grafana open source ecosystems. +* **Every signal**: Collect telemetry data for metrics, logs, traces, and + continuous profiles. +* **Scalable**: Deploy on any number of machines to collect millions of active + series and terabytes of logs. +* **Battle-tested**: Grafana Agent extends the existing battle-tested code from + the Prometheus and OpenTelemetry Collector projects. +* **Powerful**: Write programmable pipelines with ease, and debug them using a + [built-in UI][UI]. +* **Batteries included**: Integrate with systems like MySQL, Kubernetes, and + Apache to get telemetry that's immediately useful. + +## Getting started + +* Choose a [variant][variants] of Grafana Agent to run. +* Refer to the documentation for the variant to use: + * [Static mode][] + * [Static mode Kubernetes operator][] + * [Flow mode][] + +## Supported platforms + +* Linux + + * Minimum version: kernel 2.6.32 or later + * Architectures: AMD64, ARM64 + +* Windows + + * Minimum version: Windows Server 2012 or later, or Windows 10 or later. + * Architectures: AMD64 + +* macOS + + * Minimum version: macOS 10.13 or later + * Architectures: AMD64 (Intel), ARM64 (Apple Silicon) + +* FreeBSD + + * Minimum version: FreeBSD 10 or later + * Architectures: AMD64 + +## Release cadence + +A new minor release is planned every six weeks for the entire Grafana Agent +project, including Static mode, the Static mode Kubernetes operator, and Flow +mode. + +The release cadence is best-effort: releases may be moved forwards or backwards +if needed. The planned release dates for future minor releases do not change if +one minor release is moved. + +Patch and security releases may be created at any time. + +[Milestones]: https://github.com/grafana/agent/milestones + +{{% docs/reference %}} +[variants]: "/docs/agent/ -> /docs/agent//about" +[variants]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/about" + +[Static mode]: "/docs/agent/ -> /docs/agent//static" +[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static" + +[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" +[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/operator" + +[Flow mode]: "/docs/agent/ -> /docs/agent//flow" +[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" + +[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" +{{% /docs/reference %}} diff --git a/docs/sources/operator/deploy-agent-operator-resources.md b/docs/sources/operator/deploy-agent-operator-resources.md index 7d73fadd4a1a..09132e0d448c 100644 --- a/docs/sources/operator/deploy-agent-operator-resources.md +++ b/docs/sources/operator/deploy-agent-operator-resources.md @@ -62,7 +62,7 @@ To deploy the `GrafanaAgent` resource: labels: app: grafana-agent spec: - image: grafana/agent:v0.37.1 + image: grafana/agent:{{< param "AGENT_RELEASE" >}} integrations: selector: matchLabels: diff --git a/docs/sources/operator/getting-started.md b/docs/sources/operator/getting-started.md index 822a7dca01a4..e78a79bb1f62 100644 --- a/docs/sources/operator/getting-started.md +++ b/docs/sources/operator/getting-started.md @@ -79,7 +79,7 @@ To install Agent Operator: serviceAccountName: grafana-agent-operator containers: - name: operator - image: grafana/agent-operator:v0.37.1 + image: grafana/agent-operator:{{< param "AGENT_RELEASE" >}} args: - --kubelet-service=default/kubelet --- diff --git a/docs/sources/static/configuration/integrations/node-exporter-config.md b/docs/sources/static/configuration/integrations/node-exporter-config.md index 28eaccb09e45..eb65fb51d91f 100644 --- a/docs/sources/static/configuration/integrations/node-exporter-config.md +++ b/docs/sources/static/configuration/integrations/node-exporter-config.md @@ -30,7 +30,7 @@ docker run \ -v "/proc:/host/proc:ro,rslave" \ -v /tmp/agent:/etc/agent \ -v /path/to/config.yaml:/etc/agent-config/agent.yaml \ - grafana/agent:v0.37.1 \ + grafana/agent:{{< param "AGENT_RELEASE" >}} \ --config.file=/etc/agent-config/agent.yaml ``` @@ -70,7 +70,7 @@ metadata: name: agent spec: containers: - - image: grafana/agent:v0.37.0 + - image: {{< param "AGENT_RELEASE" >}} name: agent args: - --config.file=/etc/agent-config/agent.yaml diff --git a/docs/sources/static/configuration/integrations/process-exporter-config.md b/docs/sources/static/configuration/integrations/process-exporter-config.md index eee154ad4f8a..a0ba6235148d 100644 --- a/docs/sources/static/configuration/integrations/process-exporter-config.md +++ b/docs/sources/static/configuration/integrations/process-exporter-config.md @@ -22,7 +22,7 @@ docker run \ -v "/proc:/proc:ro" \ -v /tmp/agent:/etc/agent \ -v /path/to/config.yaml:/etc/agent-config/agent.yaml \ - grafana/agent:v0.37.1 \ + grafana/agent:{{< param "AGENT_RELEASE" >}} \ --config.file=/etc/agent-config/agent.yaml ``` @@ -39,7 +39,7 @@ metadata: name: agent spec: containers: - - image: grafana/agent:v0.37.1 + - image: grafana/agent:{{< param "AGENT_RELEASE" >}} name: agent args: - --config.file=/etc/agent-config/agent.yaml diff --git a/docs/sources/static/set-up/install/install-agent-docker.md b/docs/sources/static/set-up/install/install-agent-docker.md index a7b0a03e327a..7f32cc4e6d7f 100644 --- a/docs/sources/static/set-up/install/install-agent-docker.md +++ b/docs/sources/static/set-up/install/install-agent-docker.md @@ -34,7 +34,7 @@ To run a Grafana Agent Docker container on Linux, run the following command in a docker run \ -v WAL_DATA_DIRECTORY:/etc/agent/data \ -v CONFIG_FILE_PATH:/etc/agent/agent.yaml \ - grafana/agent:v0.37.1 + grafana/agent:{{< param "AGENT_RELEASE" >}} ``` Replace `CONFIG_FILE_PATH` with the configuration file path on your Linux host system. @@ -51,7 +51,7 @@ To run a Grafana Agent Docker container on Windows, run the following command in docker run ^ -v WAL_DATA_DIRECTORY:C:\etc\grafana-agent\data ^ -v CONFIG_FILE_PATH:C:\etc\grafana-agent ^ - grafana/agent:v0.37.1-windows + grafana/agent:{{< param "AGENT_RELEASE" >}}-windows ``` Replace the following: diff --git a/pkg/operator/defaults.go.t b/pkg/operator/defaults.go.t new file mode 100644 index 000000000000..fe5c2b2b70b6 --- /dev/null +++ b/pkg/operator/defaults.go.t @@ -0,0 +1,15 @@ +package operator + +// Supported versions of the Grafana Agent. +var ( + DefaultAgentVersion = "$AGENT_VERSION" + DefaultAgentBaseImage = "grafana/agent" + DefaultAgentImage = DefaultAgentBaseImage + ":" + DefaultAgentVersion +) + +// Defaults for Prometheus Config Reloader. +var ( + DefaultConfigReloaderVersion = "v0.67.1" + DefaultConfigReloaderBaseImage = "quay.io/prometheus-operator/prometheus-config-reloader" + DefaultConfigReloaderImage = DefaultConfigReloaderBaseImage + ":" + DefaultConfigReloaderVersion +) diff --git a/tools/gen-versioned-files/agent-version.txt b/tools/gen-versioned-files/agent-version.txt new file mode 100644 index 000000000000..283d4a015533 --- /dev/null +++ b/tools/gen-versioned-files/agent-version.txt @@ -0,0 +1 @@ +v0.37.1 \ No newline at end of file diff --git a/tools/gen-versioned-files/gen-versioned-files.sh b/tools/gen-versioned-files/gen-versioned-files.sh new file mode 100755 index 000000000000..39d25a006d07 --- /dev/null +++ b/tools/gen-versioned-files/gen-versioned-files.sh @@ -0,0 +1,20 @@ +#!/bin/sh +AGENT_VERSION=$(cat ./tools/gen-versioned-files/agent-version.txt | tr -d '\n') + +if [ -z "$AGENT_VERSION" ]; then + echo "AGENT_VERSION can't be found. Are you running this from the repo root?" + exit 1 +fi + +versionMatcher='^v[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)?$' + +if ! echo "$AGENT_VERSION" | grep -Eq "$versionMatcher"; then + echo "AGENT_VERSION env var is not in the correct format. It should be in the format of vX.Y.Z or vX.Y.Z-rcN" + exit 1 +fi + +templates=$(find . -type f -name "*.t" -not -path "./.git/*") +for template in $templates; do + echo "Generating ${template%.t}" + sed -e "s/\$AGENT_VERSION/$AGENT_VERSION/g" < "$template" > "${template%.t}" +done From 08dd42a6fd91669015fdb8f80797461dad9c4f12 Mon Sep 17 00:00:00 2001 From: Piotr <17101802+thampiotr@users.noreply.github.com> Date: Wed, 11 Oct 2023 11:21:36 +0100 Subject: [PATCH 05/14] Add loki.process stage.sampling (#5421) * Add loki.process stage.sampling * fixlint * fixlint * fixlint * feedback, ty --- CHANGELOG.md | 3 + component/loki/process/stages/pipeline.go | 1 + component/loki/process/stages/sampling.go | 104 ++++++++++++++++++ .../loki/process/stages/sampling_test.go | 63 +++++++++++ component/loki/process/stages/stage.go | 35 +++--- .../promtailconvert/internal/build/stages.go | 17 ++- .../testdata/pipeline_stages_part2.river | 9 ++ .../testdata/pipeline_stages_part2.yaml | 5 + .../pipeline_stages_unsupported.diags | 1 - .../testdata/pipeline_stages_unsupported.yaml | 2 - .../flow/reference/components/loki.process.md | 34 +++++- 11 files changed, 248 insertions(+), 26 deletions(-) create mode 100644 component/loki/process/stages/sampling.go create mode 100644 component/loki/process/stages/sampling_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 216e9558c33f..ee6f198c435d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,9 @@ Main (unreleased) - Added a new `stage.decolorize` stage to `loki.process` component which allows to strip ANSI color codes from the log lines. (@thampiotr) +- Added a new `stage.sampling` stage to `loki.process` component which + allows to only process a fraction of logs and drop the rest. (@thampiotr) + ### Bugfixes - Fixed an issue where `loki.process` validation for stage `metric.counter` was diff --git a/component/loki/process/stages/pipeline.go b/component/loki/process/stages/pipeline.go index 4be53d34acb7..bd0ef5e15252 100644 --- a/component/loki/process/stages/pipeline.go +++ b/component/loki/process/stages/pipeline.go @@ -40,6 +40,7 @@ type StageConfig struct { ReplaceConfig *ReplaceConfig `river:"replace,block,optional"` StaticLabelsConfig *StaticLabelsConfig `river:"static_labels,block,optional"` StructuredMetadata *LabelsConfig `river:"structured_metadata,block,optional"` + SamplingConfig *SamplingConfig `river:"sampling,block,optional"` TemplateConfig *TemplateConfig `river:"template,block,optional"` TenantConfig *TenantConfig `river:"tenant,block,optional"` TimestampConfig *TimestampConfig `river:"timestamp,block,optional"` diff --git a/component/loki/process/stages/sampling.go b/component/loki/process/stages/sampling.go new file mode 100644 index 000000000000..ebc4f0c6d129 --- /dev/null +++ b/component/loki/process/stages/sampling.go @@ -0,0 +1,104 @@ +package stages + +// NOTE: This code is copied from Promtail (07cbef92268aecc0f20d1791a6df390c2df5c072) with changes kept to the minimum. + +import ( + "fmt" + "math" + "math/rand" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/uber/jaeger-client-go/utils" +) + +const ( + ErrSamplingStageInvalidRate = "sampling stage failed to parse rate,Sampling Rate must be between 0.0 and 1.0, received %f" +) +const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff + +var ( + defaultSamplingpReason = "sampling_stage" +) + +// SamplingConfig contains the configuration for a samplingStage +type SamplingConfig struct { + DropReason *string `river:"drop_counter_reason,attr,optional"` + SamplingRate float64 `river:"rate,attr"` +} + +func (s *SamplingConfig) SetToDefault() { + if s.DropReason == nil || *s.DropReason == "" { + s.DropReason = &defaultSamplingpReason + } +} + +func (s *SamplingConfig) Validate() error { + if s.SamplingRate < 0.0 || s.SamplingRate > 1.0 { + return fmt.Errorf(ErrSamplingStageInvalidRate, s.SamplingRate) + } + return nil +} + +// newSamplingStage creates a SamplingStage from config +// code from jaeger project. +// github.com/uber/jaeger-client-go@v2.30.0+incompatible/tracer.go:126 +func newSamplingStage(logger log.Logger, cfg SamplingConfig, registerer prometheus.Registerer) Stage { + samplingRate := math.Max(0.0, math.Min(cfg.SamplingRate, 1.0)) + samplingBoundary := uint64(float64(maxRandomNumber) * samplingRate) + seedGenerator := utils.NewRand(time.Now().UnixNano()) + source := rand.NewSource(seedGenerator.Int63()) + return &samplingStage{ + logger: log.With(logger, "component", "stage", "type", "sampling"), + cfg: cfg, + dropCount: getDropCountMetric(registerer), + samplingBoundary: samplingBoundary, + source: source, + } +} + +type samplingStage struct { + logger log.Logger + cfg SamplingConfig + dropCount *prometheus.CounterVec + samplingBoundary uint64 + source rand.Source +} + +func (m *samplingStage) Run(in chan Entry) chan Entry { + out := make(chan Entry) + go func() { + defer close(out) + for e := range in { + if m.isSampled() { + out <- e + continue + } + m.dropCount.WithLabelValues(*m.cfg.DropReason).Inc() + } + }() + return out +} + +// code from jaeger project. +// github.com/uber/jaeger-client-go@v2.30.0+incompatible/sampler.go:144 +// func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) +func (m *samplingStage) isSampled() bool { + return m.samplingBoundary >= m.randomID()&maxRandomNumber +} +func (m *samplingStage) randomID() uint64 { + val := m.randomNumber() + for val == 0 { + val = m.randomNumber() + } + return val +} +func (m *samplingStage) randomNumber() uint64 { + return uint64(m.source.Int63()) +} + +// Name implements Stage +func (m *samplingStage) Name() string { + return StageTypeSampling +} diff --git a/component/loki/process/stages/sampling_test.go b/component/loki/process/stages/sampling_test.go new file mode 100644 index 000000000000..414a2e7ddea1 --- /dev/null +++ b/component/loki/process/stages/sampling_test.go @@ -0,0 +1,63 @@ +package stages + +// NOTE: This code is copied from Promtail (07cbef92268aecc0f20d1791a6df390c2df5c072) with changes kept to the minimum. + +import ( + "fmt" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + util_log "github.com/grafana/loki/pkg/util/log" +) + +var testSamplingRiver = ` +stage.sampling { + rate = 0.5 +} +` + +func TestSamplingPipeline(t *testing.T) { + registry := prometheus.NewRegistry() + pl, err := NewPipeline(util_log.Logger, loadConfig(testSamplingRiver), &plName, registry) + require.NoError(t, err) + + entries := make([]Entry, 0) + for i := 0; i < 100; i++ { + entries = append(entries, newEntry(nil, nil, testMatchLogLineApp1, time.Now())) + } + + out := processEntries(pl, entries..., + ) + // sampling rate = 0.5,entries len = 100, + // The theoretical sample size is 50. + // 50>30 and 50<70 + assert.GreaterOrEqual(t, len(out), 30) + assert.LessOrEqual(t, len(out), 70) +} + +func Test_validateSamplingConfig(t *testing.T) { + tests := []struct { + name string + config *SamplingConfig + wantErr error + }{ + { + name: "Invalid rate", + config: &SamplingConfig{ + SamplingRate: 12, + }, + wantErr: fmt.Errorf(ErrSamplingStageInvalidRate, 12.0), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := tt.config.Validate(); ((err != nil) && (err.Error() != tt.wantErr.Error())) || (err == nil && tt.wantErr != nil) { + t.Errorf("validateDropConfig() error = %v, wantErr = %v", err, tt.wantErr) + } + }) + } +} diff --git a/component/loki/process/stages/stage.go b/component/loki/process/stages/stage.go index 494408ce14f0..9e164f864887 100644 --- a/component/loki/process/stages/stage.go +++ b/component/loki/process/stages/stage.go @@ -24,23 +24,22 @@ const ( StageTypeDocker = "docker" StageTypeDrop = "drop" //TODO(thampiotr): Add support for eventlogmessage stage - StageTypeEventLogMessage = "eventlogmessage" - StageTypeGeoIP = "geoip" - StageTypeJSON = "json" - StageTypeLabel = "labels" - StageTypeLabelAllow = "labelallow" - StageTypeLabelDrop = "labeldrop" - StageTypeLimit = "limit" - StageTypeLogfmt = "logfmt" - StageTypeMatch = "match" - StageTypeMetric = "metrics" - StageTypeMultiline = "multiline" - StageTypeOutput = "output" - StageTypePack = "pack" - StageTypePipeline = "pipeline" - StageTypeRegex = "regex" - StageTypeReplace = "replace" - //TODO(thampiotr): Add support for sampling stage + StageTypeEventLogMessage = "eventlogmessage" + StageTypeGeoIP = "geoip" + StageTypeJSON = "json" + StageTypeLabel = "labels" + StageTypeLabelAllow = "labelallow" + StageTypeLabelDrop = "labeldrop" + StageTypeLimit = "limit" + StageTypeLogfmt = "logfmt" + StageTypeMatch = "match" + StageTypeMetric = "metrics" + StageTypeMultiline = "multiline" + StageTypeOutput = "output" + StageTypePack = "pack" + StageTypePipeline = "pipeline" + StageTypeRegex = "regex" + StageTypeReplace = "replace" StageTypeSampling = "sampling" StageTypeStaticLabels = "static_labels" StageTypeStructuredMetadata = "structured_metadata" @@ -233,6 +232,8 @@ func New(logger log.Logger, jobName *string, cfg StageConfig, registerer prometh if err != nil { return nil, err } + case cfg.SamplingConfig != nil: + s = newSamplingStage(logger, *cfg.SamplingConfig, registerer) default: panic(fmt.Sprintf("unreachable; should have decoded into one of the StageConfig fields: %+v", cfg)) } diff --git a/converter/internal/promtailconvert/internal/build/stages.go b/converter/internal/promtailconvert/internal/build/stages.go index 80778c475670..570086509450 100644 --- a/converter/internal/promtailconvert/internal/build/stages.go +++ b/converter/internal/promtailconvert/internal/build/stages.go @@ -216,8 +216,21 @@ func convertLimit(cfg interface{}, diags *diag.Diagnostics) (stages.StageConfig, } func convertSampling(cfg interface{}, diags *diag.Diagnostics) (stages.StageConfig, bool) { - diags.Add(diag.SeverityLevelError, fmt.Sprintf("pipeline_stages.sampling is currently not supported: %v", cfg)) - return stages.StageConfig{}, false + pSampling := &promtailstages.SamplingConfig{} + // NOTE: using WeakDecode to match promtail behaviour + if err := mapstructure.WeakDecode(cfg, pSampling); err != nil { + addInvalidStageError(diags, cfg, err) + return stages.StageConfig{}, false + } + fSampling := &stages.SamplingConfig{} + fSampling.SetToDefault() + fSampling.SamplingRate = pSampling.SamplingRate + if pSampling.DropReason != nil { + fSampling.DropReason = pSampling.DropReason + } + return stages.StageConfig{ + SamplingConfig: fSampling, + }, true } func convertDrop(cfg interface{}, diags *diag.Diagnostics) (stages.StageConfig, bool) { diff --git a/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river b/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river index ea849f30cbde..c5c961b7353f 100644 --- a/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river +++ b/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river @@ -81,6 +81,15 @@ loki.process "example" { } stage.decolorize { } + + stage.sampling { + rate = 100 + } + + stage.sampling { + drop_counter_reason = "womp womp!" + rate = 0 + } } loki.source.file "example" { diff --git a/converter/internal/promtailconvert/testdata/pipeline_stages_part2.yaml b/converter/internal/promtailconvert/testdata/pipeline_stages_part2.yaml index 078d62268259..31af9c420cba 100644 --- a/converter/internal/promtailconvert/testdata/pipeline_stages_part2.yaml +++ b/converter/internal/promtailconvert/testdata/pipeline_stages_part2.yaml @@ -54,6 +54,11 @@ scrape_configs: source: internet db_type: mmdb - decolorize: { } + - sampling: + rate: 100 + - sampling: + rate: 0 + drop_counter_reason: "womp womp!" kubernetes_sd_configs: - role: pod kubeconfig_file: /home/toby/.kube/config diff --git a/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.diags b/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.diags index 58ccf371d93c..ad0ec0999ed7 100644 --- a/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.diags +++ b/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.diags @@ -1,2 +1 @@ -(Error) pipeline_stages.sampling is currently not supported: map[rate:100] (Error) pipeline_stages.eventlogmessage is not supported \ No newline at end of file diff --git a/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.yaml b/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.yaml index 4e6d5a73d0b6..9c426057c8fe 100644 --- a/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.yaml +++ b/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.yaml @@ -3,8 +3,6 @@ clients: scrape_configs: - job_name: example pipeline_stages: - - sampling: - rate: 100 - eventlogmessage: { } kubernetes_sd_configs: - role: pod diff --git a/docs/sources/flow/reference/components/loki.process.md b/docs/sources/flow/reference/components/loki.process.md index 5e0136846b3b..d2bcd8e826cf 100644 --- a/docs/sources/flow/reference/components/loki.process.md +++ b/docs/sources/flow/reference/components/loki.process.md @@ -56,11 +56,11 @@ The following blocks are supported inside the definition of `loki.process`: | stage.decolorize | [stage.decolorize][] | Strips ANSI color codes from log lines. | no | | stage.docker | [stage.docker][] | Configures a pre-defined Docker log format pipeline. | no | | stage.drop | [stage.drop][] | Configures a `drop` processing stage. | no | +| stage.geoip | [stage.geoip][] | Configures a `geoip` processing stage. | no | | stage.json | [stage.json][] | Configures a JSON processing stage. | no | | stage.label_drop | [stage.label_drop][] | Configures a `label_drop` processing stage. | no | | stage.label_keep | [stage.label_keep][] | Configures a `label_keep` processing stage. | no | | stage.labels | [stage.labels][] | Configures a `labels` processing stage. | no | -| stage.structured_metadata | [stage.structured_metadata][] | Configures a structured metadata processing stage. | no | | stage.limit | [stage.limit][] | Configures a `limit` processing stage. | no | | stage.logfmt | [stage.logfmt][] | Configures a `logfmt` processing stage. | no | | stage.match | [stage.match][] | Configures a `match` processing stage. | no | @@ -70,11 +70,12 @@ The following blocks are supported inside the definition of `loki.process`: | stage.pack | [stage.pack][] | Configures a `pack` processing stage. | no | | stage.regex | [stage.regex][] | Configures a `regex` processing stage. | no | | stage.replace | [stage.replace][] | Configures a `replace` processing stage. | no | +| stage.sampling | [stage.sampling][] | Samples logs at a given rate. | no | | stage.static_labels | [stage.static_labels][] | Configures a `static_labels` processing stage. | no | +| stage.structured_metadata | [stage.structured_metadata][] | Configures a structured metadata processing stage. | no | | stage.template | [stage.template][] | Configures a `template` processing stage. | no | | stage.tenant | [stage.tenant][] | Configures a `tenant` processing stage. | no | | stage.timestamp | [stage.timestamp][] | Configures a `timestamp` processing stage. | no | -| stage.geoip | [stage.geoip][] | Configures a `geoip` processing stage. | no | A user can provide any number of these stage blocks nested inside `loki.process`; these will run in order of appearance in the configuration @@ -84,11 +85,11 @@ file. [stage.decolorize]: #stagedecolorize-block [stage.docker]: #stagedocker-block [stage.drop]: #stagedrop-block +[stage.geoip]: #stagegeoip-block [stage.json]: #stagejson-block [stage.label_drop]: #stagelabel_drop-block [stage.label_keep]: #stagelabel_keep-block [stage.labels]: #stagelabels-block -[stage.structured_metadata]: #stagestructuredmetadata-block [stage.limit]: #stagelimit-block [stage.logfmt]: #stagelogfmt-block [stage.match]: #stagematch-block @@ -98,11 +99,12 @@ file. [stage.pack]: #stagepack-block [stage.regex]: #stageregex-block [stage.replace]: #stagereplace-block +[stage.sampling]: #stagesampling-block [stage.static_labels]: #stagestatic_labels-block +[stage.structured_metadata]: #stagestructuredmetadata-block [stage.template]: #stagetemplate-block [stage.tenant]: #stagetenant-block [stage.timestamp]: #stagetimestamp-block -[stage.geoip]: #stagegeoip-block ### stage.cri block @@ -1098,6 +1100,30 @@ ToLower, ToUpper, Replace, Trim, TrimLeftTrimRight, TrimPrefix, TrimSuffix, Trim "*IP4*{{ .Value | Hash "salt" }}*" ``` +### stage.sampling block + +The `sampling` stage is used to sample the logs. Configuring the value +`rate = 0.1` means that 10% of the logs will continue to be processed. The +remaining 90% of the logs will be dropped. + +The following arguments are supported: + +| Name | Type | Description | Default | Required | +|-----------------------|----------|----------------------------------------------------------------------------------------------------|----------------|----------| +| `rate` | `float` | The sampling rate in a range of `[0, 1]` | | yes | +| `drop_counter_reason` | `string` | The label to add to `loki_process_dropped_lines_total` metric when logs are dropped by this stage. | sampling_stage | no | + +For example, the configuration below will sample 25% of the logs and drop the +remaining 75%. When logs are dropped, the `loki_process_dropped_lines_total` +metric is incremented with an additional `reason=logs_sampling` label. + +```river +stage.sampling { + rate = 0.25 + drop_counter_reason = "logs_sampling" +} +``` + ### stage.static_labels block The `stage.static_labels` inner block configures a static_labels processing stage From 3627c89ce1317a19ae905838dd5815b10b9c6503 Mon Sep 17 00:00:00 2001 From: Paulin Todev Date: Wed, 11 Oct 2023 17:24:55 +0100 Subject: [PATCH 06/14] Clarify usage of otelcol.processor.discovery (#5378) * Clarify usage of otelcol.processor.discovery Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> --- .../processor/discovery/discovery_test.go | 26 ++++++++++- .../components/otelcol.processor.discovery.md | 44 +++++++++++++++---- 2 files changed, 60 insertions(+), 10 deletions(-) diff --git a/component/otelcol/processor/discovery/discovery_test.go b/component/otelcol/processor/discovery/discovery_test.go index 81db5c713557..de1985cd3946 100644 --- a/component/otelcol/processor/discovery/discovery_test.go +++ b/component/otelcol/processor/discovery/discovery_test.go @@ -145,7 +145,8 @@ func Test_Insert(t *testing.T) { targets = [{ "__address__" = "1.2.2.2", "__internal_label__" = "test_val", - "test_label" = "test_val2"}] + "test_label" = "test_val2", + "test.label.with.dots" = "test.val2.with.dots"}] operation_type = "insert" @@ -169,6 +170,10 @@ func Test_Insert(t *testing.T) { { "key": "test_label", "value": { "stringValue": "old_val" } + }, + { + "key": "test.label.with.dots", + "value": { "stringValue": "old_val" } }] }, "scopeSpans": [{ @@ -210,6 +215,10 @@ func Test_Insert(t *testing.T) { { "key": "test_label", "value": { "stringValue": "old_val" } + }, + { + "key": "test.label.with.dots", + "value": { "stringValue": "old_val" } }] }, "scopeSpans": [{ @@ -231,6 +240,10 @@ func Test_Insert(t *testing.T) { { "key": "test_label", "value": { "stringValue": "test_val2" } + }, + { + "key": "test.label.with.dots", + "value": { "stringValue": "test.val2.with.dots" } }] }, "scopeSpans": [{ @@ -253,7 +266,8 @@ func Test_Update(t *testing.T) { targets = [{ "__address__" = "1.2.2.2", "__internal_label__" = "test_val", - "test_label" = "test_val2"}] + "test_label" = "test_val2", + "test.label.with.dots" = "test.val2.with.dots"}] operation_type = "update" @@ -277,6 +291,10 @@ func Test_Update(t *testing.T) { { "key": "test_label", "value": { "stringValue": "old_val" } + }, + { + "key": "test.label.with.dots", + "value": { "stringValue": "old_val" } }] }, "scopeSpans": [{ @@ -318,6 +336,10 @@ func Test_Update(t *testing.T) { { "key": "test_label", "value": { "stringValue": "test_val2" } + }, + { + "key": "test.label.with.dots", + "value": { "stringValue": "test.val2.with.dots" } }] }, "scopeSpans": [{ diff --git a/docs/sources/flow/reference/components/otelcol.processor.discovery.md b/docs/sources/flow/reference/components/otelcol.processor.discovery.md index fc69da8849a2..14d36d9e13b3 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.discovery.md +++ b/docs/sources/flow/reference/components/otelcol.processor.discovery.md @@ -16,12 +16,38 @@ of labels for each discovered target. `otelcol.processor.discovery` adds resource attributes to spans which have a hostname matching the one in the `__address__` label provided by the `discovery.*` component. -> **NOTE**: `otelcol.processor.discovery` is a custom component unrelated to any -> processors from the OpenTelemetry Collector. +{{% admonition type="note" %}} +`otelcol.processor.discovery` is a custom component unrelated to any +processors from the OpenTelemetry Collector. +{{% /admonition %}} Multiple `otelcol.processor.discovery` components can be specified by giving them different labels. +{{% admonition type="note" %}} +It can be difficult to follow [OpenTelemetry semantic conventions][OTEL sem conv] when +adding resource attributes via `otelcol.processor.discovery`: +* `discovery.relabel` and most `discovery.*` processes such as `discovery.kubernetes` + can only emit [Prometheus-compatible labels][Prometheus data model]. +* Prometheus labels use underscores (`_`) in labels names, whereas + [OpenTelemetry semantic conventions][OTEL sem conv] use dots (`.`). +* Although `otelcol.processor.discovery` is able to work with non-Prometheus labels + such as ones containing dots, the fact that `discovery.*` components are generally + only compatible with Prometheus naming conventions makes it hard to follow OpenTelemetry + semantic conventions in `otelcol.processor.discovery`. + +If your use case is to add resource attributes which contain Kubernetes metadata, +consider using `otelcol.processor.k8sattributes` instead. + +------ +The main use case for `otelcol.processor.discovery` is for users who migrate to Grafana Agent Flow mode +from Static mode's `prom_sd_operation_type`/`prom_sd_pod_associations` [configuration options][Traces]. + +[Prometheus data model]: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels +[OTEL sem conv]: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/README.md +[Traces]: {{< relref "../../../static/configuration/traces-config.md" >}} +{{% /admonition %}} + ## Usage ```river @@ -146,16 +172,18 @@ otelcol.processor.discovery "default" { ### Using a preconfigured list of attributes -It is not necessary to use a discovery component. In the example below, a `test_label` resource -attribute will be added to a span if its IP address is "1.2.2.2". The `__internal_label__` will -be not be added to the span, because it begins with a double underscore (`__`). +It is not necessary to use a discovery component. In the example below, both a `test_label` and +a `test.label.with.dots` resource attributes will be added to a span if its IP address is +"1.2.2.2". The `__internal_label__` will be not be added to the span, because it begins with +a double underscore (`__`). ```river otelcol.processor.discovery "default" { targets = [{ - "__address__" = "1.2.2.2", - "__internal_label__" = "test_val", - "test_label" = "test_val2"}] + "__address__" = "1.2.2.2", + "__internal_label__" = "test_val", + "test_label" = "test_val2", + "test.label.with.dots" = "test.val2.with.dots"}] output { traces = [otelcol.exporter.otlp.default.input] From 93cb6c336cdbd395a530267f17b8e5c565f81ff3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90=E1=BB=97=20Tr=E1=BB=8Dng=20H=E1=BA=A3i?= <41283691+hainenber@users.noreply.github.com> Date: Thu, 12 Oct 2023 00:22:41 +0700 Subject: [PATCH 07/14] feat(tracing): inject comp ID as instru scope attr (#5286) Signed-off-by: hainenber --- CHANGELOG.md | 2 ++ pkg/flow/tracing/wrap_tracer.go | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ee6f198c435d..9b44e14e5a5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -204,6 +204,8 @@ v0.37.0 (2023-10-10) - Fix initialization of the RAPL collector for the node_exporter integration and the prometheus.exporter.unix component. (@marctc) +- Set instrumentation scope attribute for traces emitted by Flow component. (@hainenber) + ### Other changes - Use Go 1.21.1 for builds. (@rfratto) diff --git a/pkg/flow/tracing/wrap_tracer.go b/pkg/flow/tracing/wrap_tracer.go index 9ffb5e7b4d38..197e7ce3200b 100644 --- a/pkg/flow/tracing/wrap_tracer.go +++ b/pkg/flow/tracing/wrap_tracer.go @@ -2,6 +2,8 @@ package tracing import ( "context" + "path/filepath" + "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -41,6 +43,12 @@ type wrappedProvider struct { var _ trace.TracerProvider = (*wrappedProvider)(nil) func (wp *wrappedProvider) Tracer(name string, options ...trace.TracerOption) trace.Tracer { + // Inject the component name as instrumentation scope attribute. + // This would not have component's exact ID, aligning with OTEL's definition + if wp.id != "" { + otelComponentName := strings.TrimSuffix(wp.id, filepath.Ext(wp.id)) + options = append(options, trace.WithInstrumentationAttributes(attribute.String(wp.spanName, otelComponentName))) + } innerTracer := wp.inner.Tracer(name, options...) return &wrappedTracer{ inner: innerTracer, From 79c690e31aa0bb3108012727e977c77684d2dfbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90=E1=BB=97=20Tr=E1=BB=8Dng=20H=E1=BA=A3i?= <41283691+hainenber@users.noreply.github.com> Date: Thu, 12 Oct 2023 02:56:50 +0700 Subject: [PATCH 08/14] Remove deprecated otelcol.exporter.jaeger (#5171) feat(otelcol/exporter): remove deprecated Jaeger exporter --- CHANGELOG.md | 4 + component/all/all.go | 1 - .../kafkatarget/target_syncer_test.go | 4 +- component/otelcol/exporter/jaeger/jaeger.go | 102 --------- .../processor/discovery/discovery_test.go | 8 +- .../components/otelcol.exporter.jaeger.md | 199 ------------------ docs/sources/flow/release-notes.md | 35 +-- .../static/configuration/traces-config.md | 14 +- docs/sources/static/release-notes.md | 16 +- go.mod | 3 +- go.sum | 2 - pkg/traces/config.go | 2 - pkg/traces/config_test.go | 124 ----------- .../promsdprocessor/consumer/consumer.go | 12 +- 14 files changed, 57 insertions(+), 469 deletions(-) delete mode 100644 component/otelcol/exporter/jaeger/jaeger.go delete mode 100644 docs/sources/flow/reference/components/otelcol.exporter.jaeger.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b44e14e5a5e..f14513cc1174 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,10 @@ internal API changes are not present. Main (unreleased) ----------------- +### Breaking changes + +- Remove `otelcol.exporter.jaeger` component (@hainenber) + ### Features - Added a new `stage.decolorize` stage to `loki.process` component which diff --git a/component/all/all.go b/component/all/all.go index 4d8d0fe8275d..6657a7bc68d5 100644 --- a/component/all/all.go +++ b/component/all/all.go @@ -66,7 +66,6 @@ import ( _ "github.com/grafana/agent/component/otelcol/connector/servicegraph" // Import otelcol.connector.servicegraph _ "github.com/grafana/agent/component/otelcol/connector/spanlogs" // Import otelcol.connector.spanlogs _ "github.com/grafana/agent/component/otelcol/connector/spanmetrics" // Import otelcol.connector.spanmetrics - _ "github.com/grafana/agent/component/otelcol/exporter/jaeger" // Import otelcol.exporter.jaeger _ "github.com/grafana/agent/component/otelcol/exporter/loadbalancing" // Import otelcol.exporter.loadbalancing _ "github.com/grafana/agent/component/otelcol/exporter/logging" // Import otelcol.exporter.logging _ "github.com/grafana/agent/component/otelcol/exporter/loki" // Import otelcol.exporter.loki diff --git a/component/loki/source/internal/kafkatarget/target_syncer_test.go b/component/loki/source/internal/kafkatarget/target_syncer_test.go index 8c1b755b4cde..1e1a417cab5b 100644 --- a/component/loki/source/internal/kafkatarget/target_syncer_test.go +++ b/component/loki/source/internal/kafkatarget/target_syncer_test.go @@ -64,7 +64,7 @@ func Test_TopicDiscovery(t *testing.T) { } group.mut.Unlock() return reflect.DeepEqual([]string{"topic1"}, group.GetTopics()) - }, time.Second, time.Millisecond, "expected topics: %v, got: %v", []string{"topic1"}, group.GetTopics()) + }, 5*time.Second, 100*time.Millisecond, "expected topics: %v, got: %v", []string{"topic1"}, group.GetTopics()) client.UpdateTopics([]string{"topic1", "topic2"}) @@ -75,7 +75,7 @@ func Test_TopicDiscovery(t *testing.T) { } group.mut.Unlock() return reflect.DeepEqual([]string{"topic1", "topic2"}, group.GetTopics()) - }, time.Second, time.Millisecond, "expected topics: %v, got: %v", []string{"topic1", "topic2"}, group.GetTopics()) + }, 5*time.Second, 100*time.Millisecond, "expected topics: %v, got: %v", []string{"topic1", "topic2"}, group.GetTopics()) require.NoError(t, ts.Stop()) require.True(t, closed) diff --git a/component/otelcol/exporter/jaeger/jaeger.go b/component/otelcol/exporter/jaeger/jaeger.go deleted file mode 100644 index ca4cf1b84e57..000000000000 --- a/component/otelcol/exporter/jaeger/jaeger.go +++ /dev/null @@ -1,102 +0,0 @@ -// Package jaeger provides an otelcol.exporter.jaeger component. -package jaeger - -import ( - "time" - - "github.com/grafana/agent/component" - "github.com/grafana/agent/component/otelcol" - "github.com/grafana/agent/component/otelcol/exporter" - otel_service "github.com/grafana/agent/service/otel" - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter" - otelcomponent "go.opentelemetry.io/collector/component" - otelpexporterhelper "go.opentelemetry.io/collector/exporter/exporterhelper" - otelextension "go.opentelemetry.io/collector/extension" -) - -func init() { - component.Register(component.Registration{ - Name: "otelcol.exporter.jaeger", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, - - Build: func(opts component.Options, args component.Arguments) (component.Component, error) { - fact := jaegerexporter.NewFactory() - return exporter.New(opts, fact, args.(Arguments)) - }, - }) -} - -// Arguments configures the otelcol.exporter.jaeger component. -type Arguments struct { - Timeout time.Duration `river:"timeout,attr,optional"` - - Queue otelcol.QueueArguments `river:"sending_queue,block,optional"` - Retry otelcol.RetryArguments `river:"retry_on_failure,block,optional"` - - // DebugMetrics configures component internal metrics. Optional. - DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` - - Client GRPCClientArguments `river:"client,block"` -} - -var _ exporter.Arguments = Arguments{} - -// DefaultArguments holds default values for Arguments. -var DefaultArguments = Arguments{ - Timeout: otelcol.DefaultTimeout, - Queue: otelcol.DefaultQueueArguments, - Retry: otelcol.DefaultRetryArguments, - Client: DefaultGRPCClientArguments, -} - -// SetToDefault implements river.Defaulter. -func (args *Arguments) SetToDefault() { - *args = DefaultArguments -} - -// Convert implements exporter.Arguments. -func (args Arguments) Convert() (otelcomponent.Config, error) { - return &jaegerexporter.Config{ - TimeoutSettings: otelpexporterhelper.TimeoutSettings{ - Timeout: args.Timeout, - }, - QueueSettings: *args.Queue.Convert(), - RetrySettings: *args.Retry.Convert(), - GRPCClientSettings: *(*otelcol.GRPCClientArguments)(&args.Client).Convert(), - }, nil -} - -// Extensions implements exporter.Arguments. -func (args Arguments) Extensions() map[otelcomponent.ID]otelextension.Extension { - return (*otelcol.GRPCClientArguments)(&args.Client).Extensions() -} - -// Exporters implements exporter.Arguments. -func (args Arguments) Exporters() map[otelcomponent.DataType]map[otelcomponent.ID]otelcomponent.Component { - return nil -} - -// DebugMetricsConfig implements receiver.Arguments. -func (args Arguments) DebugMetricsConfig() otelcol.DebugMetricsArguments { - return args.DebugMetrics -} - -// GRPCClientArguments is used to configure otelcol.exporter.jaeger with -// component-specific defaults. -type GRPCClientArguments otelcol.GRPCClientArguments - -// DefaultGRPCClientArguments holds component-specific default settings for -// GRPCClientArguments. -var DefaultGRPCClientArguments = GRPCClientArguments{ - Headers: map[string]string{}, - Compression: otelcol.CompressionTypeGzip, - WriteBufferSize: 512 * 1024, - BalancerName: "pick_first", -} - -// SetToDefault implements river.Defaulter. -func (args *GRPCClientArguments) SetToDefault() { - *args = DefaultGRPCClientArguments -} diff --git a/component/otelcol/processor/discovery/discovery_test.go b/component/otelcol/processor/discovery/discovery_test.go index de1985cd3946..aa909a45dbd7 100644 --- a/component/otelcol/processor/discovery/discovery_test.go +++ b/component/otelcol/processor/discovery/discovery_test.go @@ -237,13 +237,13 @@ func Test_Insert(t *testing.T) { "key": "ip", "value": { "stringValue": "1.2.2.2" } }, - { - "key": "test_label", - "value": { "stringValue": "test_val2" } - }, { "key": "test.label.with.dots", "value": { "stringValue": "test.val2.with.dots" } + }, + { + "key": "test_label", + "value": { "stringValue": "test_val2" } }] }, "scopeSpans": [{ diff --git a/docs/sources/flow/reference/components/otelcol.exporter.jaeger.md b/docs/sources/flow/reference/components/otelcol.exporter.jaeger.md deleted file mode 100644 index 8c12d00bfe50..000000000000 --- a/docs/sources/flow/reference/components/otelcol.exporter.jaeger.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.jaeger/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.jaeger/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.jaeger/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.jaeger/ -title: otelcol.exporter.jaeger -description: Learn about otelcol.exporter.jaeger ---- - -# otelcol.exporter.jaeger - -`otelcol.exporter.jaeger` accepts telemetry data from other `otelcol` components -and writes them over the network using the Jaeger protocol. - -> **NOTE**: `otelcol.exporter.jaeger` is a wrapper over the -> [upstream](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/jaegerexporter) -> OpenTelemetry Collector `jaeger` exporter. The upstream -> exporter has been deprecated and will be removed from future versions of -> both OpenTelemetry Collector and Grafana Agent because Jaeger supports OTLP directly. - -Multiple `otelcol.exporter.jaeger` components can be specified by giving them -different labels. - -## Usage - -```river -otelcol.exporter.jaeger "LABEL" { - client { - endpoint = "HOST:PORT" - } -} -``` - -## Arguments - -`otelcol.exporter.jaeger` supports the following arguments: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`timeout` | `duration` | Time to wait before marking a request as failed. | `"5s"` | no - -## Blocks - -The following blocks are supported inside the definition of -`otelcol.exporter.jaeger`: - -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | Configures the gRPC server to send telemetry data to. | yes -client > tls | [tls][] | Configures TLS for the gRPC client. | no -client > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no -sending_queue | [sending_queue][] | Configures batching of data before sending. | no -retry_on_failure | [retry_on_failure][] | Configures retry mechanism for failed requests. | no -debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no - -The `>` symbol indicates deeper levels of nesting. For example, `client > tls` -refers to a `tls` block defined inside a `client` block. - -[client]: #client-block -[tls]: #tls-block -[keepalive]: #keepalive-block -[sending_queue]: #sending_queue-block -[retry_on_failure]: #retry_on_failure-block -[debug_metrics]: #debug_metrics-block - -### client block - -The `client` block configures the gRPC client used by the component. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | `host:port` to send telemetry data to. | | yes -`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no -`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no -`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no -`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no -`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no -`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no -`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no -`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no - -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} - -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} - -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} - -An HTTP proxy can be configured through the following environment variables: - -* `HTTPS_PROXY` -* `NO_PROXY` - -The `HTTPS_PROXY` environment variable specifies a URL to use for proxying -requests. Connections to the proxy are established via [the `HTTP CONNECT` -method][HTTP CONNECT]. - -The `NO_PROXY` environment variable is an optional list of comma-separated -hostnames for which the HTTPS proxy should _not_ be used. Each hostname can be -provided as an IP address (`1.2.3.4`), an IP address in CIDR notation -(`1.2.3.4/8`), a domain name (`example.com`), or `*`. A domain name matches -that domain and all subdomains. A domain name with a leading "." -(`.example.com`) matches subdomains only. `NO_PROXY` is only read when -`HTTPS_PROXY` is set. - -Because `otelcol.exporter.jaeger` uses gRPC, the configured proxy server must be -able to handle and proxy HTTP/2 traffic. - -[HTTP CONNECT]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/CONNECT - -### tls block - -The `tls` block configures TLS settings used for the connection to the gRPC -server. - -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} - -### keepalive block - -The `keepalive` block configures keepalive settings for gRPC client -connections. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`ping_wait` | `duration` | How often to ping the server after no activity. | | no -`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no -`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no - -### sending_queue block - -The `sending_queue` block configures an in-memory buffer of batches before data is sent -to the gRPC server. - -{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} - -### retry_on_failure block - -The `retry_on_failure` block configures how failed requests to the gRPC server are -retried. - -{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} - -### debug_metrics block - -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description ----- | ---- | ----------- -`input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. - -`input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, -logs, or traces). - -## Component health - -`otelcol.exporter.jaeger` is only reported as unhealthy if given an invalid -configuration. - -## Debug information - -`otelcol.exporter.jaeger` does not expose any component-specific debug -information. - -## Example - -This example accepts OTLP traces over gRPC, sends them to a batch processor and forwards to Jaeger without TLS: - -```river -otelcol.receiver.otlp "default" { - grpc {} - output { - traces = [otelcol.processor.batch.default.input] - } -} - -otelcol.processor.batch "default" { - output { - traces = [otelcol.exporter.jaeger.default.input] - } -} - -otelcol.exporter.jaeger "default" { - client { - endpoint = "jaeger:14250" - tls { - insecure = true - insecure_skip_verify = true - } - } -} -``` diff --git a/docs/sources/flow/release-notes.md b/docs/sources/flow/release-notes.md index 8d4ba736dce3..f45b5d3291f7 100644 --- a/docs/sources/flow/release-notes.md +++ b/docs/sources/flow/release-notes.md @@ -28,6 +28,13 @@ Other release notes for the different Grafana Agent variants are contained on se [release-notes-operator]: {{< relref "../operator/release-notes.md" >}} {{% /admonition %}} +## v0.38 + +### Breaking change: `otelcol.exporter.jaeger` component removed + +The deprecated `otelcol.exporter.jaeger` component has been removed. To send +traces to Jaeger, use `otelcol.exporter.otlp` and a version of Jaeger that +supports OTLP. ## v0.37 @@ -38,15 +45,15 @@ If you use the Loki processing stage in your Agent configuration, you must renam Old configuration example: ```river -stage.non_indexed_labels { - values = {"app" = ""} +stage.non_indexed_labels { + values = {"app" = ""} } ``` New configuration example: ```river -stage.structured_metadata { - values = {"app" = ""} +stage.structured_metadata { + values = {"app" = ""} } ``` @@ -69,14 +76,14 @@ needs a label. Old configuration example: -```river -prometheus.exporter.unix { /* ... */ } +```river +prometheus.exporter.unix { /* ... */ } ``` New configuration example: -```river -prometheus.exporter.unix "example" { /* ... */ } +```river +prometheus.exporter.unix "example" { /* ... */ } ``` ## v0.36 @@ -116,11 +123,11 @@ How to migrate: format = "" } } - ``` + ``` where the `` is the appropriate compression format - see [`loki.source.file` documentation][loki-source-file-docs] for details. - + [loki-source-file-docs]: {{< relref "./reference/components/loki.source.file.md" >}} ## v0.35 @@ -288,7 +295,7 @@ prometehus.scrape "example" { ### Breaking change: The algorithm for the "hash" action of `otelcol.processor.attributes` has changed The hash produced when using `action = "hash"` in the `otelcol.processor.attributes` flow component is now using the more secure SHA-256 algorithm. -The change was made in PR [#22831](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/22831) of opentelemetry-collector-contrib. +The change was made in PR [#22831](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/22831) of opentelemetry-collector-contrib. ### Breaking change: `otelcol.exporter.loki` now includes instrumentation scope in its output @@ -316,16 +323,16 @@ Additional `instrumentation_scope` information will be added to the OTLP log sig ### Breaking change: `otelcol.extension.jaeger_remote_sampling` removes the `/` HTTP endpoint The `/` HTTP endpoint was the same as the `/sampling` endpoint. The `/sampling` endpoint is still functional. -The change was made in PR [#18070](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/18070) of opentelemetry-collector-contrib. +The change was made in PR [#18070](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/18070) of opentelemetry-collector-contrib. ### Breaking change: The `remote_sampling` block has been removed from `otelcol.receiver.jaeger` -The `remote_sampling` block in `otelcol.receiver.jaeger` has been an undocumented no-op configuration for some time, and has now been removed. +The `remote_sampling` block in `otelcol.receiver.jaeger` has been an undocumented no-op configuration for some time, and has now been removed. Customers are advised to use `otelcol.extension.jaeger_remote_sampling` instead. ### Deprecation: `otelcol.exporter.jaeger` has been deprecated and will be removed in Agent v0.38.0. -This is because Jaeger supports OTLP directly and OpenTelemetry Collector is also removing its +This is because Jaeger supports OTLP directly and OpenTelemetry Collector is also removing its [Jaeger receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/jaegerexporter). ## v0.34 diff --git a/docs/sources/static/configuration/traces-config.md b/docs/sources/static/configuration/traces-config.md index 2ed6315d7fb1..dbd27dc34245 100644 --- a/docs/sources/static/configuration/traces-config.md +++ b/docs/sources/static/configuration/traces-config.md @@ -66,9 +66,9 @@ remote_write: [ protocol: | default = "grpc" | supported = "grpc", "http" ] # Controls what format to use when exporting traces, in combination with protocol. - # protocol/format supported combinations are grpc/otlp, http/otlp and grpc/jaeger + # protocol/format supported combinations are grpc/otlp and http/otlp. # Only grpc/otlp is supported in Grafana Cloud. - [ format: | default = "otlp" | supported = "otlp", "jaeger" ] + [ format: | default = "otlp" | supported = "otlp" ] # Controls whether or not TLS is required. See https://godoc.org/google.golang.org/grpc#WithInsecure [ insecure: | default = false ] @@ -323,11 +323,11 @@ load_balancing: [ port: | default = 4317 ] # Resolver interval [ interval: | default = 5s ] - # Resolver timeout + # Resolver timeout [ timeout: | default = 1s ] # routing_key can be either "traceID" or "service": - # * "service": exports spans based on their service name. + # * "service": exports spans based on their service name. # * "traceID": exports spans based on their traceID. [ routing_key: | default = "traceID" ] @@ -415,9 +415,9 @@ service_graphs: # jaeger_remote_sampling configures one or more jaeger remote sampling extensions. # For more details about the configuration please consult the OpenTelemetry documentation: # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.85.0/extension/jaegerremotesampling -# +# # Example config: -# +# # jaeger_remote_sampling: # - source: # remote: @@ -427,7 +427,7 @@ service_graphs: # - source: # reload_interval: 1s # file: /etc/otelcol/sampling_strategies.json -# +# jaeger_remote_sampling: [ - ... ] ``` diff --git a/docs/sources/static/release-notes.md b/docs/sources/static/release-notes.md index f78df5ccde06..f22381e48442 100644 --- a/docs/sources/static/release-notes.md +++ b/docs/sources/static/release-notes.md @@ -15,7 +15,7 @@ The release notes provide information about deprecations and breaking changes in For a complete list of changes to Grafana Agent, with links to pull requests and related issues when available, refer to the [Changelog](https://github.com/grafana/agent/blob/main/CHANGELOG.md). -> **Note:** These release notes are specific to Grafana Agent static mode. +> **Note:** These release notes are specific to Grafana Agent static mode. > Other release notes for the different Grafana Agent variants are contained on separate pages: > > * [Static mode Kubernetes operator release notes][release-notes-operator] @@ -32,6 +32,14 @@ For a complete list of changes to Grafana Agent, with links to pull requests and [Modules]: "/docs/grafana-cloud/ -> /docs/agent//flow/concepts/modules" {{% /docs/reference %}} +## v0.38 + +### Breaking change: support for exporting Jaeger traces removed + +The deprecated support for exporting Jaeger-formatted traces has been removed. +To send traces to Jaeger, export OTLP-formatted data to a version of Jaeger +that supports OTLP. + ## v0.37 ### Breaking change: The default value of `retry_on_http_429` is overriden to `true` for the `queue_config` in `remote_write` in `metrics` config. @@ -77,9 +85,9 @@ New configuration example: ### Breaking change: Jaeger remote sampling no longer configurable using the Jaeger receiver -Jaeger remote sampling used to be configured using the Jaeger receiver configuration. This receiver was updated to a new version, where support for remote sampling in the receiver was removed. +Jaeger remote sampling used to be configured using the Jaeger receiver configuration. This receiver was updated to a new version, where support for remote sampling in the receiver was removed. -Jaeger remote sampling is available as a separate configuration field starting in v0.35.3. +Jaeger remote sampling is available as a separate configuration field starting in v0.35.3. Old configuration example: @@ -104,7 +112,7 @@ jaeger_remote_sampling: ### Breaking change: `auth` and `version` attributes from `walk_params` block of SNMP integration have been removed -The SNMP integrations (both v1 and v2) wrap a new version of SNMP exporter which introduces a new configuration file format. +The SNMP integrations (both v1 and v2) wrap a new version of SNMP exporter which introduces a new configuration file format. This new format separates the walk and metric mappings from the connection and authentication settings. This allows for easier configuration of different auth params without having to duplicate the full walk and metric mapping. diff --git a/go.mod b/go.mod index ce7abbae2856..8aa5f697740f 100644 --- a/go.mod +++ b/go.mod @@ -103,7 +103,6 @@ require ( github.com/oliver006/redis_exporter v1.54.0 github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector v0.85.0 github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.85.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter v0.85.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.85.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.85.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.85.0 @@ -530,7 +529,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/prometheus-community/go-runit v0.1.0 // indirect github.com/prometheus/alertmanager v0.26.0 // indirect - github.com/prometheus/common/sigv4 v0.1.0 // indirect + github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remeh/sizedwaitgroup v1.0.0 // indirect diff --git a/go.sum b/go.sum index 13fba5d7d3b5..98f051f2ff5f 100644 --- a/go.sum +++ b/go.sum @@ -1737,8 +1737,6 @@ github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraph github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector v0.85.0/go.mod h1:CA1v4fzjPVIu/YEDD9Q6FppAGhZGBBxQ+MKAvMERd0o= github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.85.0 h1:r75lKRDTUz+qCMO5Bghe1o6Snef7Vov55ycQChB/vps= github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.85.0/go.mod h1:enlrQwrV86jYc8w5rCrvJkXBjZqPUyexs53pFY7YjXo= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter v0.85.0 h1:NyqJDAgm3VC1UNrTW0gv2auQoTlaCEcit5c0On0oYG4= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter v0.85.0/go.mod h1:F8TX5dXc8VcF789x8UAu1fIWta16sGZMxhfGLNes+L8= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.85.0 h1:zCFV4nSdHziunsWYB/Zwsy2C4W88KlACyENQyjLZHw8= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.85.0/go.mod h1:ADyJg6g2zJ3t6FMGOdw0PjmFhZhDxNT2QVFdt6ZFl5k= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.85.0 h1:qBUujYIkqkctwNkmnAF8ajbVN357DQNbOPp6VAe4ldE= diff --git a/pkg/traces/config.go b/pkg/traces/config.go index 7a36ac45705c..dbd6e28a9a44 100644 --- a/pkg/traces/config.go +++ b/pkg/traces/config.go @@ -13,7 +13,6 @@ import ( promsdconsumer "github.com/grafana/agent/pkg/traces/promsdprocessor/consumer" "github.com/mitchellh/mapstructure" - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling" @@ -899,7 +898,6 @@ func tracingFactories() (otelcol.Factories, error) { exporters, err := otelexporter.MakeFactoryMap( otlpexporter.NewFactory(), otlphttpexporter.NewFactory(), - jaegerexporter.NewFactory(), loadbalancingexporter.NewFactory(), prometheusexporter.NewFactory(), remotewriteexporter.NewFactory(), diff --git a/pkg/traces/config_test.go b/pkg/traces/config_test.go index 6a123cb3f329..89157f809ba7 100644 --- a/pkg/traces/config_test.go +++ b/pkg/traces/config_test.go @@ -1027,130 +1027,6 @@ service: exporters: ["otlp/0"] processors: ["service_graphs"] receivers: ["push_receiver", "jaeger"] -`, - }, - { - name: "jaeger exporter", - cfg: ` -receivers: - jaeger: - protocols: - grpc: -remote_write: - - insecure: true - format: jaeger - endpoint: example.com:12345 -`, - expectedConfig: ` -receivers: - push_receiver: {} - jaeger: - protocols: - grpc: -exporters: - jaeger/0: - endpoint: example.com:12345 - compression: gzip - tls: - insecure: true - retry_on_failure: - max_elapsed_time: 60s -processors: {} -extensions: {} -service: - pipelines: - traces: - exporters: ["jaeger/0"] - processors: [] - receivers: ["push_receiver", "jaeger"] -`, - }, - { - name: "jaeger exporter with basic auth", - cfg: ` -receivers: - jaeger: - protocols: - grpc: -remote_write: - - insecure: true - format: jaeger - protocol: grpc - basic_auth: - username: test - password_file: ` + passwordFile.Name() + ` - endpoint: example.com:12345 -`, - expectedConfig: ` -receivers: - push_receiver: {} - jaeger: - protocols: - grpc: -exporters: - jaeger/0: - endpoint: example.com:12345 - compression: gzip - tls: - insecure: true - headers: - authorization: Basic dGVzdDpwYXNzd29yZF9pbl9maWxl - retry_on_failure: - max_elapsed_time: 60s -processors: {} -extensions: {} -service: - pipelines: - traces: - exporters: ["jaeger/0"] - processors: [] - receivers: ["push_receiver", "jaeger"] -`, - }, - { - name: "two exporters different format", - cfg: ` -receivers: - jaeger: - protocols: - grpc: -remote_write: - - insecure: true - format: jaeger - endpoint: example.com:12345 - - insecure: true - format: otlp - endpoint: something.com:123 -`, - expectedConfig: ` -receivers: - push_receiver: {} - jaeger: - protocols: - grpc: -exporters: - jaeger/0: - endpoint: example.com:12345 - compression: gzip - tls: - insecure: true - retry_on_failure: - max_elapsed_time: 60s - otlp/1: - endpoint: something.com:123 - compression: gzip - tls: - insecure: true - retry_on_failure: - max_elapsed_time: 60s -processors: {} -extensions: {} -service: - pipelines: - traces: - exporters: ["jaeger/0", "otlp/1"] - processors: [] - receivers: ["push_receiver", "jaeger"] `, }, { diff --git a/pkg/traces/promsdprocessor/consumer/consumer.go b/pkg/traces/promsdprocessor/consumer/consumer.go index 79df98fe7aa1..f24e5f9fe159 100644 --- a/pkg/traces/promsdprocessor/consumer/consumer.go +++ b/pkg/traces/promsdprocessor/consumer/consumer.go @@ -160,17 +160,17 @@ func (c *Consumer) processAttributes(ctx context.Context, attrs pcommon.Map) { return } - for k, v := range labels { + for _, label := range labels.Labels() { switch c.opts.OperationType { case OperationTypeUpsert: - attrs.PutStr(k, v) + attrs.PutStr(label.Name, label.Value) case OperationTypeInsert: - if _, ok := attrs.Get(k); !ok { - attrs.PutStr(k, v) + if _, ok := attrs.Get(label.Name); !ok { + attrs.PutStr(label.Name, label.Value) } case OperationTypeUpdate: - if toVal, ok := attrs.Get(k); ok { - toVal.SetStr(v) + if toVal, ok := attrs.Get(label.Name); ok { + toVal.SetStr(label.Value) } } } From a0c55a76a467c957a0ea2419d806aab076f3e5bd Mon Sep 17 00:00:00 2001 From: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:43:00 -0700 Subject: [PATCH 09/14] Add systemd steps to Grafana Agent standalone binary docs (#5440) * Cleanup and simplify * Update for wording consistency * Update docs/sources/flow/setup/start-agent.md Co-authored-by: Robert Fratto * Fix agent command * Update to match the default configuration * Minor clarifications in var names --------- Co-authored-by: Robert Fratto --- docs/sources/flow/setup/start-agent.md | 92 +++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 8 deletions(-) diff --git a/docs/sources/flow/setup/start-agent.md b/docs/sources/flow/setup/start-agent.md index 6ea7902d3048..22f962d361a6 100644 --- a/docs/sources/flow/setup/start-agent.md +++ b/docs/sources/flow/setup/start-agent.md @@ -28,7 +28,7 @@ To start Grafana Agent, run the following command in a terminal window: sudo systemctl start grafana-agent-flow ``` -(Optional) Verify that the service is running: +(Optional) To verify that the service is running, run the following command in a terminal window: ```shell sudo systemctl status grafana-agent-flow @@ -80,7 +80,7 @@ brew services start grafana-agent-flow Grafana Agent automatically runs when the system starts. -(Optional) Verify that the service is running: +(Optional) To verify that the service is running, run the following command in a terminal window: ```shell brew services info grafana-agent-flow @@ -150,13 +150,13 @@ If you downloaded the standalone binary, you must run the agent from a terminal To start Grafana Agent on Linux, macOS, or FreeBSD, run the following command in a terminal window: ```shell -AGENT_MODE=flow BINARY_PATH run CONFIG_FILE +AGENT_MODE=flow BINARY_PATH run CONFIG_PATH ``` Replace the following: -* `BINARY_PATH`: The path to the Grafana Agent binary file -* `CONFIG_FILE`: The path to the Grafana Agent configuration file. +* `BINARY_PATH`: The path and Grafana Agent binary filename. +* `CONFIG_PATH`: The path and Grafana Agent configuration filename. ### Start Grafana Agent on Windows @@ -164,12 +164,88 @@ To start Grafana Agent on Windows, run the following commands in a command promp ```cmd set AGENT_MODE=flow -BINARY_PATH run CONFIG_FILE +BINARY_PATH run CONFIG_PATH ``` Replace the following: -* `BINARY_PATH`: The path to the Grafana Agent binary file -* `CONFIG_FILE`: The path to the Grafana Agent configuration file. +* `BINARY_PATH`: The path and Grafana Agent binary filename. +* `CONFIG_PATH`: The path and Grafana Agent configuration filename. + +### Set up Grafana Agent as a Linux systemd service + +You can set up and manage the standalone binary for Grafana Agent as a Linux systemd service. + +{{% admonition type="note" %}} +These steps assume you have a default systemd and Grafana Agent configuration. +{{% /admonition %}} + +1. To create a new user called `grafana-agent-flow` run the following command in a terminal window: + + ```shell + sudo useradd --no-create-home --shell /bin/false grafana-agent-flow + ``` + +1. Create a service file in `/etc/systemd/system` called `grafana-agent-flow.service` with the following contents: + + ```shell + [Unit] + Description=Vendor-neutral programmable observability pipelines. + Documentation=https://grafana.com/docs/agent/latest/flow/ + Wants=network-online.target + After=network-online.target + + [Service] + Restart=always + User=grafana-agent-flow + Environment=HOSTNAME=%H + EnvironmentFile=/etc/default/grafana-agent-flow + WorkingDirectory=WORKING_PATH + ExecStart=BINARY_PATH run $CUSTOM_ARGS --storage.path=WORKING_PATH $CONFIG_FILE + ExecReload=/usr/bin/env kill -HUP $MAINPID + TimeoutStopSec=20s + SendSIGKILL=no + + [Install] + WantedBy=multi-user.target + ``` + + Replace the following: + + * `BINARY_PATH`: The path and Grafana Agent binary filename. + * `WORKING_PATH`: The path to a working directory, for example `/var/lib/grafana-agent-flow`. + +1. Create an environment file in `/etc/default/` called `grafana-agent-flow` with the following contents: + + ```shell + ## Path: + ## Description: Grafana Agent Flow settings + ## Type: string + ## Default: "" + ## ServiceRestart: grafana-agent-flow + # + # Command line options for grafana-agent + # + # The configuration file holding the agent config. + CONFIG_FILE="CONFIG_PATH" + + # User-defined arguments to pass to the run command. + CUSTOM_ARGS="" + + # Restart on system upgrade. Defaults to true. + RESTART_ON_UPGRADE=true + ``` + + Replace the following: + + * `CONFIG_PATH`: The path and Grafana Agent configuration filename. + +1. To reload the service files, run the following command in a terminal window: + + ```shell + sudo systemctl daemon-reload + ``` + +1. Use the [Linux](#linux) systemd commands to manage your standalone Linux installation of Grafana Agent. [release]: https://github.com/grafana/agent/releases/latest From da72bf7e9d48e1ee9fee88d761254e1336c0bbc0 Mon Sep 17 00:00:00 2001 From: Piotr <17101802+thampiotr@users.noreply.github.com> Date: Thu, 12 Oct 2023 14:55:21 +0100 Subject: [PATCH 10/14] Add loki.process stage.eventlogmessage (#5432) * Add loki.process stage.eventlogmessage * feedback, ty * fixlint --- CHANGELOG.md | 3 + .../loki/process/stages/eventlogmessage.go | 134 ++++++ .../process/stages/eventlogmessage_test.go | 393 ++++++++++++++++++ component/loki/process/stages/pipeline.go | 49 +-- component/loki/process/stages/stage.go | 2 + .../promtailconvert/internal/build/stages.go | 21 +- .../testdata/pipeline_stages_part2.river | 6 + .../testdata/pipeline_stages_part2.yaml | 4 + .../pipeline_stages_unsupported.diags | 1 - .../pipeline_stages_unsupported.river | 24 -- .../testdata/pipeline_stages_unsupported.yaml | 12 - .../flow/reference/components/loki.process.md | 147 +++++-- 12 files changed, 686 insertions(+), 110 deletions(-) create mode 100644 component/loki/process/stages/eventlogmessage.go create mode 100644 component/loki/process/stages/eventlogmessage_test.go delete mode 100644 converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.diags delete mode 100644 converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.river delete mode 100644 converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index f14513cc1174..fa2f2432c2fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,9 @@ Main (unreleased) - Added a new `stage.sampling` stage to `loki.process` component which allows to only process a fraction of logs and drop the rest. (@thampiotr) +- Added a new `stage.eventlogmessage` stage to `loki.process` component which + allows to extract data from Windows Event Log. (@thampiotr) + ### Bugfixes - Fixed an issue where `loki.process` validation for stage `metric.counter` was diff --git a/component/loki/process/stages/eventlogmessage.go b/component/loki/process/stages/eventlogmessage.go new file mode 100644 index 000000000000..2a31023e2e6c --- /dev/null +++ b/component/loki/process/stages/eventlogmessage.go @@ -0,0 +1,134 @@ +package stages + +import ( + "fmt" + "strings" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/common/model" +) + +const ( + defaultSource = "message" +) + +type EventLogMessageConfig struct { + Source string `river:"source,attr,optional"` + DropInvalidLabels bool `river:"drop_invalid_labels,attr,optional"` + OverwriteExisting bool `river:"overwrite_existing,attr,optional"` +} + +func (e *EventLogMessageConfig) Validate() error { + if !model.LabelName(e.Source).IsValid() { + return fmt.Errorf(ErrInvalidLabelName, e.Source) + } + return nil +} + +func (e *EventLogMessageConfig) SetToDefault() { + e.Source = defaultSource +} + +type eventLogMessageStage struct { + cfg *EventLogMessageConfig + logger log.Logger +} + +// Create a event log message stage, including validating any supplied configuration +func newEventLogMessageStage(logger log.Logger, cfg *EventLogMessageConfig) Stage { + return &eventLogMessageStage{ + cfg: cfg, + logger: log.With(logger, "component", "stage", "type", "eventlogmessage"), + } +} + +func (m *eventLogMessageStage) Run(in chan Entry) chan Entry { + out := make(chan Entry) + key := m.cfg.Source + go func() { + defer close(out) + for e := range in { + err := m.processEntry(e.Extracted, key) + if err != nil { + continue + } + out <- e + } + }() + return out +} + +// Process a event log message from extracted with the specified key, adding additional +// entries into the extracted map +func (m *eventLogMessageStage) processEntry(extracted map[string]interface{}, key string) error { + value, ok := extracted[key] + if !ok { + if Debug { + level.Debug(m.logger).Log("msg", "source not in the extracted values", "source", key) + } + return nil + } + s, err := getString(value) + if err != nil { + level.Warn(m.logger).Log("msg", "invalid label value parsed", "value", value) + return err + } + lines := strings.Split(s, "\r\n") + for _, line := range lines { + parts := strings.SplitN(line, ":", 2) + if len(parts) < 2 { + level.Warn(m.logger).Log("msg", "invalid line parsed from message", "line", line) + continue + } + mkey := parts[0] + if !model.LabelName(mkey).IsValid() { + if m.cfg.DropInvalidLabels { + if Debug { + level.Debug(m.logger).Log("msg", "invalid label parsed from message", "key", mkey) + } + continue + } + mkey = SanitizeFullLabelName(mkey) + } + if _, ok := extracted[mkey]; ok && !m.cfg.OverwriteExisting { + level.Info(m.logger).Log("msg", "extracted key that already existed, appending _extracted to key", + "key", mkey) + mkey += "_extracted" + } + mval := strings.TrimSpace(parts[1]) + if !model.LabelValue(mval).IsValid() { + if Debug { + level.Debug(m.logger).Log("msg", "invalid value parsed from message", "value", mval) + } + continue + } + extracted[mkey] = mval + } + if Debug { + level.Debug(m.logger).Log("msg", "extracted data debug in event_log_message stage", + "extracted data", fmt.Sprintf("%v", extracted)) + } + return nil +} + +func (m *eventLogMessageStage) Name() string { + return StageTypeEventLogMessage +} + +// Sanitize a input string to convert it into a valid prometheus label +// TODO: switch to prometheus/prometheus/util/strutil/SanitizeFullLabelName +func SanitizeFullLabelName(input string) string { + if len(input) == 0 { + return "_" + } + var validSb strings.Builder + for i, b := range input { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + validSb.WriteRune('_') + } else { + validSb.WriteRune(b) + } + } + return validSb.String() +} diff --git a/component/loki/process/stages/eventlogmessage_test.go b/component/loki/process/stages/eventlogmessage_test.go new file mode 100644 index 000000000000..8e6edba8626e --- /dev/null +++ b/component/loki/process/stages/eventlogmessage_test.go @@ -0,0 +1,393 @@ +package stages + +import ( + "errors" + "fmt" + "regexp" + "strings" + "testing" + "time" + + util_log "github.com/grafana/loki/pkg/util/log" + "github.com/grafana/river" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var testEvtLogMsgYamlDefaults = ` +stage.eventlogmessage {} +` + +var testEvtLogMsgYamlCustomSource = ` +stage.eventlogmessage { source = "Message" } +` + +var testEvtLogMsgYamlDropInvalidLabels = ` +stage.eventlogmessage { drop_invalid_labels = true } +` + +var testEvtLogMsgYamlOverwriteExisting = ` +stage.eventlogmessage { overwrite_existing = true } +` + +var ( + testEvtLogMsgSimple = "Key1: Value 1\r\nKey2: Value 2\r\nKey3: Value: 3" + testEvtLogMsgInvalidLabels = "Key 1: Value 1\r\n0Key2: Value 2\r\nKey@3: Value 3\r\n: Value 4" + testEvtLogMsgOverwriteTest = "test: new value" +) + +func TestEventLogMessage_simple(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config string + sourcekey string + msgdata string + extractedValues map[string]interface{} + }{ + "successfully ran a pipeline with sample event log message stage using default source": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgSimple, + map[string]interface{}{ + "Key1": "Value 1", + "Key2": "Value 2", + "Key3": "Value: 3", + "test": "existing value", + }, + }, + "successfully ran a pipeline with sample event log message stage using custom source": { + testEvtLogMsgYamlCustomSource, + "Message", + testEvtLogMsgSimple, + map[string]interface{}{ + "Key1": "Value 1", + "Key2": "Value 2", + "Key3": "Value: 3", + "test": "existing value", + }, + }, + "successfully ran a pipeline with sample event log message stage containing invalid labels": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgInvalidLabels, + map[string]interface{}{ + "Key_1": "Value 1", + "_Key2": "Value 2", + "Key_3": "Value 3", + "_": "Value 4", + "test": "existing value", + }, + }, + "successfully ran a pipeline with sample event log message stage without overwriting existing labels": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgOverwriteTest, + map[string]interface{}{ + "test": "existing value", + "test_extracted": "new value", + }, + }, + "successfully ran a pipeline with sample event log message stage overwriting existing labels": { + testEvtLogMsgYamlOverwriteExisting, + "message", + testEvtLogMsgOverwriteTest, + map[string]interface{}{ + "test": "new value", + }, + }, + } + + for testName, testData := range tests { + testData := testData + testData.extractedValues[testData.sourcekey] = testData.msgdata + + t.Run(testName, func(t *testing.T) { + t.Parallel() + + pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) + assert.NoError(t, err, "Expected pipeline creation to not result in error") + out := processEntries(pl, + newEntry(map[string]interface{}{ + testData.sourcekey: testData.msgdata, + "test": "existing value", + }, nil, testData.msgdata, time.Now()))[0] + assert.Equal(t, testData.extractedValues, out.Extracted) + }) + } +} + +func TestEventLogMessageConfig_validate(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config string + err error + }{ + "valid config": { + `stage.eventlogmessage { source = "msg"}`, + nil, + }, + "invalid config": { + `stage.eventlogmessage { source = 1}`, + errors.New("invalid label name: 1"), + }, + "invalid source": { + `stage.eventlogmessage { source = "the message"}`, + fmt.Errorf(ErrInvalidLabelName, "the message"), + }, + "empty source": { + `stage.eventlogmessage { source = ""}`, + fmt.Errorf(ErrInvalidLabelName, ""), + }, + } + for tName, tt := range tests { + tt := tt + t.Run(tName, func(t *testing.T) { + var config Configs + err := river.Unmarshal([]byte(tt.config), &config) + if err == nil { + require.Len(t, config.Stages, 1) + err = config.Stages[0].EventLogMessageConfig.Validate() + } + + if err == nil && tt.err != nil { + assert.NotNil(t, err, "EventLogMessage.validate() expected error = %v, but got nil", tt.err) + } + if err != nil { + assert.Equal(t, tt.err.Error(), err.Error(), "EventLogMessage.validate() expected error = %v, actual error = %v", tt.err, err) + } + }) + } +} + +var testEvtLogMsgNetworkConn = "Network connection detected:\r\nRuleName: Usermode\r\n" + + "UtcTime: 2023-01-31 08:07:23.782\r\nProcessGuid: {44ffd2c7-cc3a-63d8-2002-000000000d00}\r\n" + + "ProcessId: 7344\r\nImage: C:\\Users\\User\\promtail\\promtail-windows-amd64.exe\r\n" + + "User: WINTEST2211\\User\r\nProtocol: tcp\r\nInitiated: true\r\nSourceIsIpv6: false\r\n" + + "SourceIp: 10.0.2.15\r\nSourceHostname: WinTest2211..\r\nSourcePort: 49992\r\n" + + "SourcePortName: -\r\nDestinationIsIpv6: false\r\nDestinationIp: 34.117.8.58\r\n" + + "DestinationHostname: 58.8.117.34.bc.googleusercontent.com\r\nDestinationPort: 443\r\n" + + "DestinationPortName: https" + +func TestEventLogMessage_Real(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config string + sourcekey string + msgdata string + extractedValues map[string]interface{} + }{ + "successfully ran a pipeline with network event log message stage using default source": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgNetworkConn, + map[string]interface{}{ + "Network_connection_detected": "", + "RuleName": "Usermode", + "UtcTime": "2023-01-31 08:07:23.782", + "ProcessGuid": "{44ffd2c7-cc3a-63d8-2002-000000000d00}", + "ProcessId": "7344", + "Image": "C:\\Users\\User\\promtail\\promtail-windows-amd64.exe", + "User": "WINTEST2211\\User", + "Protocol": "tcp", + "Initiated": "true", + "SourceIsIpv6": "false", + "SourceIp": "10.0.2.15", + "SourceHostname": "WinTest2211..", + "SourcePort": "49992", + "SourcePortName": "-", + "DestinationIsIpv6": "false", + "DestinationIp": "34.117.8.58", + "DestinationHostname": "58.8.117.34.bc.googleusercontent.com", + "DestinationPort": "443", + "DestinationPortName": "https", + }, + }, + "successfully ran a pipeline with network event log message stage using custom source": { + testEvtLogMsgYamlCustomSource, + "Message", + testEvtLogMsgNetworkConn, + map[string]interface{}{ + "Network_connection_detected": "", + "RuleName": "Usermode", + "UtcTime": "2023-01-31 08:07:23.782", + "ProcessGuid": "{44ffd2c7-cc3a-63d8-2002-000000000d00}", + "ProcessId": "7344", + "Image": "C:\\Users\\User\\promtail\\promtail-windows-amd64.exe", + "User": "WINTEST2211\\User", + "Protocol": "tcp", + "Initiated": "true", + "SourceIsIpv6": "false", + "SourceIp": "10.0.2.15", + "SourceHostname": "WinTest2211..", + "SourcePort": "49992", + "SourcePortName": "-", + "DestinationIsIpv6": "false", + "DestinationIp": "34.117.8.58", + "DestinationHostname": "58.8.117.34.bc.googleusercontent.com", + "DestinationPort": "443", + "DestinationPortName": "https", + }, + }, + "successfully ran a pipeline with network event log message stage dropping invalid labels": { + testEvtLogMsgYamlDropInvalidLabels, + "message", + testEvtLogMsgNetworkConn, + map[string]interface{}{ + "RuleName": "Usermode", + "UtcTime": "2023-01-31 08:07:23.782", + "ProcessGuid": "{44ffd2c7-cc3a-63d8-2002-000000000d00}", + "ProcessId": "7344", + "Image": "C:\\Users\\User\\promtail\\promtail-windows-amd64.exe", + "User": "WINTEST2211\\User", + "Protocol": "tcp", + "Initiated": "true", + "SourceIsIpv6": "false", + "SourceIp": "10.0.2.15", + "SourceHostname": "WinTest2211..", + "SourcePort": "49992", + "SourcePortName": "-", + "DestinationIsIpv6": "false", + "DestinationIp": "34.117.8.58", + "DestinationHostname": "58.8.117.34.bc.googleusercontent.com", + "DestinationPort": "443", + "DestinationPortName": "https", + }, + }, + } + + for testName, testData := range tests { + testData := testData + testData.extractedValues[testData.sourcekey] = testData.msgdata + + t.Run(testName, func(t *testing.T) { + t.Parallel() + + pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) + assert.NoError(t, err, "Expected pipeline creation to not result in error") + out := processEntries(pl, + newEntry(map[string]interface{}{testData.sourcekey: testData.msgdata}, nil, testData.msgdata, time.Now()))[0] + assert.Equal(t, testData.extractedValues, out.Extracted) + }) + } +} + +var ( + testEvtLogMsgInvalidStructure = "\n\rwhat; is this?\n\r" + testEvtLogMsgInvalidValue = "Key1: " + string([]byte{0xff, 0xfe, 0xfd}) +) + +func TestEventLogMessage_invalid(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config string + sourcekey string + msgdata string + extractedValues map[string]interface{} + }{ + "successfully ran a pipeline with an invalid event log message": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgInvalidStructure, + map[string]interface{}{}, + }, + "successfully ran a pipeline with sample event log message stage on the wrong default source": { + testEvtLogMsgYamlDefaults, + "notmessage", + testEvtLogMsgSimple, + map[string]interface{}{}, + }, + "successfully ran a pipeline with sample event log message stage dropping invalid labels": { + testEvtLogMsgYamlDropInvalidLabels, + "message", + testEvtLogMsgInvalidLabels, + map[string]interface{}{}, + }, + "successfully ran a pipeline with an invalid event log message value (not UTF-8)": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgInvalidValue, + map[string]interface{}{}, + }, + } + + for testName, testData := range tests { + testData := testData + testData.extractedValues[testData.sourcekey] = testData.msgdata + + t.Run(testName, func(t *testing.T) { + t.Parallel() + + pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) + assert.NoError(t, err, "Expected pipeline creation to not result in error") + out := processEntries(pl, + newEntry(map[string]interface{}{testData.sourcekey: testData.msgdata}, nil, testData.msgdata, time.Now()))[0] + assert.Equal(t, testData.extractedValues, out.Extracted) + }) + } +} + +func TestEventLogMessage_invalidString(t *testing.T) { + t.Parallel() + + pl, err := NewPipeline(util_log.Logger, loadConfig(testEvtLogMsgYamlDefaults), nil, prometheus.DefaultRegisterer) + assert.NoError(t, err, "Expected pipeline creation to not result in error") + out := processEntries(pl, + newEntry(map[string]interface{}{"message": nil}, nil, "", time.Now())) + assert.Len(t, out, 0, "No output should be produced with a nil input") +} + +var ( + inputJustKey = "Key 1:" + inputBoth = "Key 1: Value 1" + RegexSplitKeyValue = regexp.MustCompile(": ?") +) + +func BenchmarkSplittingKeyValuesRegex(b *testing.B) { + for i := 0; i < b.N; i++ { + var val string + resultKey := RegexSplitKeyValue.Split(inputJustKey, 2) + if len(resultKey) > 1 { + val = resultKey[1] + } + resultKeyValue := RegexSplitKeyValue.Split(inputBoth, 2) + if len(resultKeyValue) > 1 { + val = resultKeyValue[1] + } + _ = val + } +} + +func BenchmarkSplittingKeyValuesSplitTrim(b *testing.B) { + for i := 0; i < b.N; i++ { + var val string + resultKey := strings.SplitN(inputJustKey, ":", 2) + if len(resultKey) > 1 { + val = strings.TrimSpace(resultKey[1]) + } + resultKeyValue := strings.SplitN(inputBoth, ":", 2) + if len(resultKey) > 1 { + val = strings.TrimSpace(resultKeyValue[1]) + } + _ = val + } +} + +func BenchmarkSplittingKeyValuesSplitSubstr(b *testing.B) { + for i := 0; i < b.N; i++ { + var val string + resultKey := strings.SplitN(inputJustKey, ":", 2) + if len(resultKey) > 1 && len(resultKey[1]) > 0 { + val = resultKey[1][1:] + } + resultKeyValue := strings.SplitN(inputBoth, ":", 2) + if len(resultKey) > 1 && len(resultKey[1]) > 0 { + val = resultKeyValue[1][1:] + } + _ = val + } +} diff --git a/component/loki/process/stages/pipeline.go b/component/loki/process/stages/pipeline.go index bd0ef5e15252..fb7d30cce637 100644 --- a/component/loki/process/stages/pipeline.go +++ b/component/loki/process/stages/pipeline.go @@ -20,30 +20,31 @@ import ( // exactly one is set. type StageConfig struct { //TODO(thampiotr): sync these with new stages - CRIConfig *CRIConfig `river:"cri,block,optional"` - DecolorizeConfig *DecolorizeConfig `river:"decolorize,block,optional"` - DockerConfig *DockerConfig `river:"docker,block,optional"` - DropConfig *DropConfig `river:"drop,block,optional"` - GeoIPConfig *GeoIPConfig `river:"geoip,block,optional"` - JSONConfig *JSONConfig `river:"json,block,optional"` - LabelAllowConfig *LabelAllowConfig `river:"label_keep,block,optional"` - LabelDropConfig *LabelDropConfig `river:"label_drop,block,optional"` - LabelsConfig *LabelsConfig `river:"labels,block,optional"` - LimitConfig *LimitConfig `river:"limit,block,optional"` - LogfmtConfig *LogfmtConfig `river:"logfmt,block,optional"` - MatchConfig *MatchConfig `river:"match,block,optional"` - MetricsConfig *MetricsConfig `river:"metrics,block,optional"` - MultilineConfig *MultilineConfig `river:"multiline,block,optional"` - OutputConfig *OutputConfig `river:"output,block,optional"` - PackConfig *PackConfig `river:"pack,block,optional"` - RegexConfig *RegexConfig `river:"regex,block,optional"` - ReplaceConfig *ReplaceConfig `river:"replace,block,optional"` - StaticLabelsConfig *StaticLabelsConfig `river:"static_labels,block,optional"` - StructuredMetadata *LabelsConfig `river:"structured_metadata,block,optional"` - SamplingConfig *SamplingConfig `river:"sampling,block,optional"` - TemplateConfig *TemplateConfig `river:"template,block,optional"` - TenantConfig *TenantConfig `river:"tenant,block,optional"` - TimestampConfig *TimestampConfig `river:"timestamp,block,optional"` + CRIConfig *CRIConfig `river:"cri,block,optional"` + DecolorizeConfig *DecolorizeConfig `river:"decolorize,block,optional"` + DockerConfig *DockerConfig `river:"docker,block,optional"` + DropConfig *DropConfig `river:"drop,block,optional"` + EventLogMessageConfig *EventLogMessageConfig `river:"eventlogmessage,block,optional"` + GeoIPConfig *GeoIPConfig `river:"geoip,block,optional"` + JSONConfig *JSONConfig `river:"json,block,optional"` + LabelAllowConfig *LabelAllowConfig `river:"label_keep,block,optional"` + LabelDropConfig *LabelDropConfig `river:"label_drop,block,optional"` + LabelsConfig *LabelsConfig `river:"labels,block,optional"` + LimitConfig *LimitConfig `river:"limit,block,optional"` + LogfmtConfig *LogfmtConfig `river:"logfmt,block,optional"` + MatchConfig *MatchConfig `river:"match,block,optional"` + MetricsConfig *MetricsConfig `river:"metrics,block,optional"` + MultilineConfig *MultilineConfig `river:"multiline,block,optional"` + OutputConfig *OutputConfig `river:"output,block,optional"` + PackConfig *PackConfig `river:"pack,block,optional"` + RegexConfig *RegexConfig `river:"regex,block,optional"` + ReplaceConfig *ReplaceConfig `river:"replace,block,optional"` + StaticLabelsConfig *StaticLabelsConfig `river:"static_labels,block,optional"` + StructuredMetadata *LabelsConfig `river:"structured_metadata,block,optional"` + SamplingConfig *SamplingConfig `river:"sampling,block,optional"` + TemplateConfig *TemplateConfig `river:"template,block,optional"` + TenantConfig *TenantConfig `river:"tenant,block,optional"` + TimestampConfig *TimestampConfig `river:"timestamp,block,optional"` } var rateLimiter *rate.Limiter diff --git a/component/loki/process/stages/stage.go b/component/loki/process/stages/stage.go index 9e164f864887..1a27830ae735 100644 --- a/component/loki/process/stages/stage.go +++ b/component/loki/process/stages/stage.go @@ -234,6 +234,8 @@ func New(logger log.Logger, jobName *string, cfg StageConfig, registerer prometh } case cfg.SamplingConfig != nil: s = newSamplingStage(logger, *cfg.SamplingConfig, registerer) + case cfg.EventLogMessageConfig != nil: + s = newEventLogMessageStage(logger, cfg.EventLogMessageConfig) default: panic(fmt.Sprintf("unreachable; should have decoded into one of the StageConfig fields: %+v", cfg)) } diff --git a/converter/internal/promtailconvert/internal/build/stages.go b/converter/internal/promtailconvert/internal/build/stages.go index 570086509450..85ec697603cd 100644 --- a/converter/internal/promtailconvert/internal/build/stages.go +++ b/converter/internal/promtailconvert/internal/build/stages.go @@ -83,7 +83,7 @@ func convertStage(st interface{}, diags *diag.Diagnostics) (stages.StageConfig, case promtailstages.StageTypeDecolorize: return convertDecolorize(diags) case promtailstages.StageTypeEventLogMessage: - return convertEventLogMessage(diags) + return convertEventLogMessage(iCfg, diags) case promtailstages.StageTypeGeoIP: return convertGeoIP(iCfg, diags) case promtailstages.StageTypeStructuredMetadata: @@ -121,9 +121,22 @@ func convertGeoIP(cfg interface{}, diags *diag.Diagnostics) (stages.StageConfig, }, true } -func convertEventLogMessage(diags *diag.Diagnostics) (stages.StageConfig, bool) { - diags.Add(diag.SeverityLevelError, "pipeline_stages.eventlogmessage is not supported") - return stages.StageConfig{}, false +func convertEventLogMessage(cfg interface{}, diags *diag.Diagnostics) (stages.StageConfig, bool) { + pCfg := &promtailstages.EventLogMessageConfig{} + if err := mapstructure.Decode(cfg, pCfg); err != nil { + addInvalidStageError(diags, cfg, err) + return stages.StageConfig{}, false + } + result := &stages.EventLogMessageConfig{} + result.SetToDefault() + result.DropInvalidLabels = pCfg.DropInvalidLabels + result.OverwriteExisting = pCfg.OverwriteExisting + if pCfg.Source != nil { + result.Source = *pCfg.Source + } + return stages.StageConfig{ + EventLogMessageConfig: result, + }, true } func convertDecolorize(_ *diag.Diagnostics) (stages.StageConfig, bool) { diff --git a/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river b/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river index c5c961b7353f..afe10179c0f0 100644 --- a/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river +++ b/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river @@ -90,6 +90,12 @@ loki.process "example" { drop_counter_reason = "womp womp!" rate = 0 } + + stage.eventlogmessage { + source = "something" + drop_invalid_labels = true + overwrite_existing = true + } } loki.source.file "example" { diff --git a/converter/internal/promtailconvert/testdata/pipeline_stages_part2.yaml b/converter/internal/promtailconvert/testdata/pipeline_stages_part2.yaml index 31af9c420cba..32a8ffd500fa 100644 --- a/converter/internal/promtailconvert/testdata/pipeline_stages_part2.yaml +++ b/converter/internal/promtailconvert/testdata/pipeline_stages_part2.yaml @@ -59,6 +59,10 @@ scrape_configs: - sampling: rate: 0 drop_counter_reason: "womp womp!" + - eventlogmessage: + source: something + drop_invalid_labels: true + overwrite_existing: true kubernetes_sd_configs: - role: pod kubeconfig_file: /home/toby/.kube/config diff --git a/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.diags b/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.diags deleted file mode 100644 index ad0ec0999ed7..000000000000 --- a/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.diags +++ /dev/null @@ -1 +0,0 @@ -(Error) pipeline_stages.eventlogmessage is not supported \ No newline at end of file diff --git a/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.river b/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.river deleted file mode 100644 index 30fe46e377c2..000000000000 --- a/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.river +++ /dev/null @@ -1,24 +0,0 @@ -discovery.kubernetes "example" { - role = "pod" - kubeconfig_file = "/home/toby/.kube/config" -} - -local.file_match "example" { - path_targets = discovery.kubernetes.example.targets -} - -loki.process "example" { - forward_to = [loki.write.default.receiver] -} - -loki.source.file "example" { - targets = local.file_match.example.targets - forward_to = [loki.process.example.receiver] -} - -loki.write "default" { - endpoint { - url = "http://localhost/loki/api/v1/push" - } - external_labels = {} -} diff --git a/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.yaml b/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.yaml deleted file mode 100644 index 9c426057c8fe..000000000000 --- a/converter/internal/promtailconvert/testdata/pipeline_stages_unsupported.yaml +++ /dev/null @@ -1,12 +0,0 @@ -clients: - - url: http://localhost/loki/api/v1/push -scrape_configs: - - job_name: example - pipeline_stages: - - eventlogmessage: { } - kubernetes_sd_configs: - - role: pod - kubeconfig_file: /home/toby/.kube/config - -tracing: { enabled: false } -server: { register_instrumentation: false } \ No newline at end of file diff --git a/docs/sources/flow/reference/components/loki.process.md b/docs/sources/flow/reference/components/loki.process.md index d2bcd8e826cf..1c40dc9f2a06 100644 --- a/docs/sources/flow/reference/components/loki.process.md +++ b/docs/sources/flow/reference/components/loki.process.md @@ -50,32 +50,33 @@ loki.process "LABEL" { The following blocks are supported inside the definition of `loki.process`: -| Hierarchy | Block | Description | Required | -|---------------------------|-------------------------------|------------------------------------------------------|----------| -| stage.cri | [stage.cri][] | Configures a pre-defined CRI-format pipeline. | no | -| stage.decolorize | [stage.decolorize][] | Strips ANSI color codes from log lines. | no | -| stage.docker | [stage.docker][] | Configures a pre-defined Docker log format pipeline. | no | -| stage.drop | [stage.drop][] | Configures a `drop` processing stage. | no | -| stage.geoip | [stage.geoip][] | Configures a `geoip` processing stage. | no | -| stage.json | [stage.json][] | Configures a JSON processing stage. | no | -| stage.label_drop | [stage.label_drop][] | Configures a `label_drop` processing stage. | no | -| stage.label_keep | [stage.label_keep][] | Configures a `label_keep` processing stage. | no | -| stage.labels | [stage.labels][] | Configures a `labels` processing stage. | no | -| stage.limit | [stage.limit][] | Configures a `limit` processing stage. | no | -| stage.logfmt | [stage.logfmt][] | Configures a `logfmt` processing stage. | no | -| stage.match | [stage.match][] | Configures a `match` processing stage. | no | -| stage.metrics | [stage.metrics][] | Configures a `metrics` stage. | no | -| stage.multiline | [stage.multiline][] | Configures a `multiline` processing stage. | no | -| stage.output | [stage.output][] | Configures an `output` processing stage. | no | -| stage.pack | [stage.pack][] | Configures a `pack` processing stage. | no | -| stage.regex | [stage.regex][] | Configures a `regex` processing stage. | no | -| stage.replace | [stage.replace][] | Configures a `replace` processing stage. | no | -| stage.sampling | [stage.sampling][] | Samples logs at a given rate. | no | -| stage.static_labels | [stage.static_labels][] | Configures a `static_labels` processing stage. | no | -| stage.structured_metadata | [stage.structured_metadata][] | Configures a structured metadata processing stage. | no | -| stage.template | [stage.template][] | Configures a `template` processing stage. | no | -| stage.tenant | [stage.tenant][] | Configures a `tenant` processing stage. | no | -| stage.timestamp | [stage.timestamp][] | Configures a `timestamp` processing stage. | no | +| Hierarchy | Block | Description | Required | +|---------------------------|-------------------------------|----------------------------------------------------------------|----------| +| stage.cri | [stage.cri][] | Configures a pre-defined CRI-format pipeline. | no | +| stage.decolorize | [stage.decolorize][] | Strips ANSI color codes from log lines. | no | +| stage.docker | [stage.docker][] | Configures a pre-defined Docker log format pipeline. | no | +| stage.drop | [stage.drop][] | Configures a `drop` processing stage. | no | +| stage.eventlogmessage | [stage.eventlogmessage][] | Extracts data from the Message field in the Windows Event Log. | no | +| stage.geoip | [stage.geoip][] | Configures a `geoip` processing stage. | no | +| stage.json | [stage.json][] | Configures a JSON processing stage. | no | +| stage.label_drop | [stage.label_drop][] | Configures a `label_drop` processing stage. | no | +| stage.label_keep | [stage.label_keep][] | Configures a `label_keep` processing stage. | no | +| stage.labels | [stage.labels][] | Configures a `labels` processing stage. | no | +| stage.limit | [stage.limit][] | Configures a `limit` processing stage. | no | +| stage.logfmt | [stage.logfmt][] | Configures a `logfmt` processing stage. | no | +| stage.match | [stage.match][] | Configures a `match` processing stage. | no | +| stage.metrics | [stage.metrics][] | Configures a `metrics` stage. | no | +| stage.multiline | [stage.multiline][] | Configures a `multiline` processing stage. | no | +| stage.output | [stage.output][] | Configures an `output` processing stage. | no | +| stage.pack | [stage.pack][] | Configures a `pack` processing stage. | no | +| stage.regex | [stage.regex][] | Configures a `regex` processing stage. | no | +| stage.replace | [stage.replace][] | Configures a `replace` processing stage. | no | +| stage.sampling | [stage.sampling][] | Samples logs at a given rate. | no | +| stage.static_labels | [stage.static_labels][] | Configures a `static_labels` processing stage. | no | +| stage.structured_metadata | [stage.structured_metadata][] | Configures a structured metadata processing stage. | no | +| stage.template | [stage.template][] | Configures a `template` processing stage. | no | +| stage.tenant | [stage.tenant][] | Configures a `tenant` processing stage. | no | +| stage.timestamp | [stage.timestamp][] | Configures a `timestamp` processing stage. | no | A user can provide any number of these stage blocks nested inside `loki.process`; these will run in order of appearance in the configuration @@ -85,6 +86,7 @@ file. [stage.decolorize]: #stagedecolorize-block [stage.docker]: #stagedocker-block [stage.drop]: #stagedrop-block +[stage.eventlogmessage]: #stageeventlogmessage-block [stage.geoip]: #stagegeoip-block [stage.json]: #stagejson-block [stage.label_drop]: #stagelabel_drop-block @@ -261,6 +263,61 @@ stage.drop { } ``` +### stage.eventlogmessage block + +The `eventlogmessage` stage extracts data from the Message string that appears +in the Windows Event Log. + +The following arguments are supported: + +| Name | Type | Description | Default | Required | +|-----------------------|----------|--------------------------------------------------------|-----------|----------| +| `source` | `string` | Name of the field in the extracted data to parse. | `message` | no | +| `overwrite_existing` | `bool` | Whether to overwrite existing extracted data fields. | `false` | no | +| `drop_invalid_labels` | `bool` | Whether to drop fields that are not valid label names. | `false` | no | + +When `overwrite_existing` is set to `true`, the stage overwrites existing extracted data +fields with the same name. If set to `false`, the `_extracted` suffix will be +appended to an already existing field name. + +When `drop_invalid_labels` is set to `true`, the stage drops fields that are +not valid label names. If set to `false`, the stage will automatically convert +them into valid labels replacing invalid characters with underscores. + +#### Example combined with `stage.json` + +```river +stage.json { + expressions = { + message = "", + Overwritten = "", + } +} + +stage.eventlogmessage { + source = "message" + overwrite_existing = true +} +``` + +Given the following log line: +``` +{"event_id": 1, "Overwritten": "old", "message": "Message type:\r\nOverwritten: new\r\nImage: C:\\Users\\User\\agent.exe"} +``` + +The first stage would create the following key-value pairs in the set of +extracted data: + +- `message`: `Message type:\r\nOverwritten: new\r\nImage: C:\Users\User\agent.exe` +- `Overwritten`: `old` + +The second stage will parse the value of `message` from the extracted data +and append/overwrite the following key-value pairs to the set of extracted data: + +- `Image`: `C:\\Users\\User\\agent.exe` +- `Message_type`: (empty string) +- `Overwritten`: `new` + ### stage.json block The `stage.json` inner block configures a JSON processing stage that parses incoming @@ -600,13 +657,13 @@ Defines a metric whose value only goes up. The following arguments are supported: | Name | Type | Description | Default | Required | -| ------------------- | ---------- | -------------------------------------------------------------------------------------------------------- | ------------------------ | -------- | +|---------------------|------------|----------------------------------------------------------------------------------------------------------|--------------------------|----------| | `name` | `string` | The metric name. | | yes | | `action` | `string` | The action to take. Valid actions are `set`, `inc`, `dec`,` add`, or `sub`. | | yes | | `description` | `string` | The metric's description and help text. | `""` | no | | `source` | `string` | Key from the extracted data map to use for the metric. Defaults to the metric name. | `""` | no | | `prefix` | `string` | The prefix to the metric name. | `"loki_process_custom_"` | no | -| `max_idle_duration` | `duration` | Maximum amount of time to wait until the metric is marked as 'stale' and removed. | `"5m"` | no | +| `max_idle_duration` | `duration` | Maximum amount of time to wait until the metric is marked as 'stale' and removed. | `"5m"` | no | | `value` | `string` | If set, the metric only changes if `source` exactly matches the `value`. | `""` | no | | `match_all` | `bool` | If set to true, all log lines are counted, without attemptng to match the `source` to the extracted map. | `false` | no | | `count_entry_bytes` | `bool` | If set to true, counts all log lines bytes. | `false` | no | @@ -624,15 +681,15 @@ Defines a gauge metric whose value can go up or down. The following arguments are supported: -| Name | Type | Description | Default | Required | -| --------------- | ---------- | ----------------------------------------------------------------------------------- | ------------------------ | -------- | -| `name` | `string` | The metric name. | | yes | -| `action` | `string` | The action to take. Valid actions are `inc` and `add`. | | yes | -| `description` | `string` | The metric's description and help text. | `""` | no | -| `source` | `string` | Key from the extracted data map to use for the metric. Defaults to the metric name. | `""` | no | -| `prefix` | `string` | The prefix to the metric name. | `"loki_process_custom_"` | no | +| Name | Type | Description | Default | Required | +|---------------------|------------|-------------------------------------------------------------------------------------|--------------------------|----------| +| `name` | `string` | The metric name. | | yes | +| `action` | `string` | The action to take. Valid actions are `inc` and `add`. | | yes | +| `description` | `string` | The metric's description and help text. | `""` | no | +| `source` | `string` | Key from the extracted data map to use for the metric. Defaults to the metric name. | `""` | no | +| `prefix` | `string` | The prefix to the metric name. | `"loki_process_custom_"` | no | | `max_idle_duration` | `duration` | Maximum amount of time to wait until the metric is marked as 'stale' and removed. | `"5m"` | no | -| `value` | `string` | If set, the metric only changes if `source` exactly matches the `value`. | `""` | no | +| `value` | `string` | If set, the metric only changes if `source` exactly matches the `value`. | `""` | no | The valid `action` values are `inc`, `dec`, `set`, `add`, or `sub`. @@ -647,15 +704,15 @@ Defines a histogram metric whose values are recorded in predefined buckets. The following arguments are supported: -| Name | Type | Description | Default | Required | -| --------------- | ------------- | ----------------------------------------------------------------------------------- | ------------------------ | -------- | -| `name` | `string` | The metric name. | | yes | -| `buckets` | `list(float)` | The action to take. Valid actions are `set`, `inc`, `dec`,` add`, or `sub`. | | yes | -| `description` | `string` | The metric's description and help text. | `""` | no | -| `source` | `string` | Key from the extracted data map to use for the metric. Defaults to the metric name. | `""` | no | -| `prefix` | `string` | The prefix to the metric name. | `"loki_process_custom_"` | no | +| Name | Type | Description | Default | Required | +|---------------------|---------------|-------------------------------------------------------------------------------------|--------------------------|----------| +| `name` | `string` | The metric name. | | yes | +| `buckets` | `list(float)` | The action to take. Valid actions are `set`, `inc`, `dec`,` add`, or `sub`. | | yes | +| `description` | `string` | The metric's description and help text. | `""` | no | +| `source` | `string` | Key from the extracted data map to use for the metric. Defaults to the metric name. | `""` | no | +| `prefix` | `string` | The prefix to the metric name. | `"loki_process_custom_"` | no | | `max_idle_duration` | `duration` | Maximum amount of time to wait until the metric is marked as 'stale' and removed. | `"5m"` | no | -| `value` | `string` | If set, the metric only changes if `source` exactly matches the `value`. | `""` | no | +| `value` | `string` | If set, the metric only changes if `source` exactly matches the `value`. | `""` | no | #### metrics behavior @@ -906,7 +963,7 @@ embedded labels are removed from the original log entry: { "_entry": "something went wrong", "env": "dev", - "user_id": "f8fas0r", + "user_id": "f8fas0r" } ``` From 5f5fc055d79a4cf8ecf2d959d25304fe23d7ea12 Mon Sep 17 00:00:00 2001 From: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> Date: Thu, 12 Oct 2023 07:54:04 -0700 Subject: [PATCH 11/14] Update the Grafana Agent start topic (#5455) * Small changes for clarification * Add file to description --- docs/sources/flow/setup/start-agent.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/sources/flow/setup/start-agent.md b/docs/sources/flow/setup/start-agent.md index 22f962d361a6..a09dfb2e72e9 100644 --- a/docs/sources/flow/setup/start-agent.md +++ b/docs/sources/flow/setup/start-agent.md @@ -155,8 +155,8 @@ AGENT_MODE=flow BINARY_PATH run CONFIG_PATH Replace the following: -* `BINARY_PATH`: The path and Grafana Agent binary filename. -* `CONFIG_PATH`: The path and Grafana Agent configuration filename. +* `BINARY_PATH`: The path to the Grafana Agent binary file. +* `CONFIG_PATH`: The path to the Grafana Agent configuration file. ### Start Grafana Agent on Windows @@ -169,8 +169,8 @@ BINARY_PATH run CONFIG_PATH Replace the following: -* `BINARY_PATH`: The path and Grafana Agent binary filename. -* `CONFIG_PATH`: The path and Grafana Agent configuration filename. +* `BINARY_PATH`: The path to the Grafana Agent binary file. +* `CONFIG_PATH`: The path to the Grafana Agent configuration file. ### Set up Grafana Agent as a Linux systemd service @@ -188,7 +188,7 @@ These steps assume you have a default systemd and Grafana Agent configuration. 1. Create a service file in `/etc/systemd/system` called `grafana-agent-flow.service` with the following contents: - ```shell + ```systemd [Unit] Description=Vendor-neutral programmable observability pipelines. Documentation=https://grafana.com/docs/agent/latest/flow/ @@ -200,7 +200,7 @@ These steps assume you have a default systemd and Grafana Agent configuration. User=grafana-agent-flow Environment=HOSTNAME=%H EnvironmentFile=/etc/default/grafana-agent-flow - WorkingDirectory=WORKING_PATH + WorkingDirectory=WORKING_DIRECTORY ExecStart=BINARY_PATH run $CUSTOM_ARGS --storage.path=WORKING_PATH $CONFIG_FILE ExecReload=/usr/bin/env kill -HUP $MAINPID TimeoutStopSec=20s @@ -212,8 +212,8 @@ These steps assume you have a default systemd and Grafana Agent configuration. Replace the following: - * `BINARY_PATH`: The path and Grafana Agent binary filename. - * `WORKING_PATH`: The path to a working directory, for example `/var/lib/grafana-agent-flow`. + * `BINARY_PATH`: The path to the Grafana Agent binary file. + * `WORKING_DIRECTORY`: The path to a working directory, for example `/var/lib/grafana-agent-flow`. 1. Create an environment file in `/etc/default/` called `grafana-agent-flow` with the following contents: @@ -238,7 +238,7 @@ These steps assume you have a default systemd and Grafana Agent configuration. Replace the following: - * `CONFIG_PATH`: The path and Grafana Agent configuration filename. + * `CONFIG_PATH`: The path to the Grafana Agent configuration file. 1. To reload the service files, run the following command in a terminal window: From 73c2642b37fbe4ed2e0e24c9423eeee540d43490 Mon Sep 17 00:00:00 2001 From: Maxime Le Conte des Floris Date: Thu, 12 Oct 2023 17:18:14 +0200 Subject: [PATCH 12/14] docs: small typo in an example (#5456) Hello :) The exported field for local.file is `content` and not `content`. :) https://grafana.com/docs/agent/latest/flow/reference/components/local.file/#exported-fields Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> --- docs/sources/flow/config-language/expressions/function_calls.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/flow/config-language/expressions/function_calls.md b/docs/sources/flow/config-language/expressions/function_calls.md index 0b8f3a01c1ac..77fab0d2df53 100644 --- a/docs/sources/flow/config-language/expressions/function_calls.md +++ b/docs/sources/flow/config-language/expressions/function_calls.md @@ -28,7 +28,7 @@ allow for more complex expressions (e.g. concatenating arrays or decoding JSON strings into objects). ```river env("HOME") -json_decode(local.file.cfg.contents)["namespace"] +json_decode(local.file.cfg.content)["namespace"] ``` [standard library]: {{< relref "../../reference/stdlib" >}} From 3efd214ced744785d5f7840a2f5c1b904cda1368 Mon Sep 17 00:00:00 2001 From: Paulin Todev Date: Thu, 12 Oct 2023 17:09:29 +0100 Subject: [PATCH 13/14] Upgrade `golang.org/x/net` from v0.15.0 to v0.17.0 (#5448) * Upgrade golang.org/x/net from v0.15.0 to v0.17.0 --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 8aa5f697740f..48f0c11ada53 100644 --- a/go.mod +++ b/go.mod @@ -212,11 +212,11 @@ require ( go.uber.org/goleak v1.2.1 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.25.0 - golang.org/x/crypto v0.13.0 + golang.org/x/crypto v0.14.0 golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 - golang.org/x/net v0.15.0 + golang.org/x/net v0.17.0 golang.org/x/oauth2 v0.11.0 - golang.org/x/sys v0.12.0 + golang.org/x/sys v0.13.0 golang.org/x/text v0.13.0 golang.org/x/time v0.3.0 google.golang.org/api v0.139.0 @@ -598,7 +598,7 @@ require ( go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/term v0.12.0 // indirect + golang.org/x/term v0.13.0 // indirect golang.org/x/tools v0.12.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 98f051f2ff5f..359aea725075 100644 --- a/go.sum +++ b/go.sum @@ -2494,8 +2494,8 @@ golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2618,8 +2618,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2784,8 +2784,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2795,8 +2795,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= -golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 1820786d658b507679e2a661ffbb3f89f94fa473 Mon Sep 17 00:00:00 2001 From: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> Date: Thu, 12 Oct 2023 10:00:17 -0700 Subject: [PATCH 14/14] Fix a cross reference link in a note in the Agent root topic (#5464) * Removing link that fails * Add fully qualified URL * Update docs/sources/_index.md.t Co-authored-by: Jack Baldry * Generate file content * Regenerate files with correct content --------- Co-authored-by: Jack Baldry --- docs/sources/_index.md | 2 +- docs/sources/_index.md.t | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/_index.md b/docs/sources/_index.md index 2411cd963a0c..49da949e752b 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -23,7 +23,7 @@ form programmable observability **pipelines** for telemetry collection, processing, and delivery. {{% admonition type="note" %}} -This page focuses mainly on [Flow mode][], the Terraform-inspired variant of Grafana Agent. +This page focuses mainly on [Flow mode](https://grafana.com/docs/agent//flow/), the Terraform-inspired variant of Grafana Agent. For information on other variants of Grafana Agent, refer to [Introduction to Grafana Agent]({{< relref "./about.md" >}}). {{% /admonition %}} diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t index ade2db655994..00197119920c 100644 --- a/docs/sources/_index.md.t +++ b/docs/sources/_index.md.t @@ -23,7 +23,7 @@ form programmable observability **pipelines** for telemetry collection, processing, and delivery. {{% admonition type="note" %}} -This page focuses mainly on [Flow mode][], the Terraform-inspired variant of Grafana Agent. +This page focuses mainly on [Flow mode](https://grafana.com/docs/agent//flow/), the Terraform-inspired variant of Grafana Agent. For information on other variants of Grafana Agent, refer to [Introduction to Grafana Agent]({{< relref "./about.md" >}}). {{% /admonition %}}