diff --git a/src/grafana/values/values.yaml b/src/grafana/values/values.yaml index 6ae3c9a8b..07c038ede 100644 --- a/src/grafana/values/values.yaml +++ b/src/grafana/values/values.yaml @@ -22,6 +22,9 @@ grafana.ini: reporting_enabled: false check_for_updates: false check_for_plugin_updates: false + feedback_links_enabled: false + plugins: + public_key_retrieval_disabled: true auth: # Disable the login form to force users to use SSO disable_login_form: true diff --git a/src/pepr/zarf.yaml b/src/pepr/zarf.yaml index 25c0e2f2a..904045a19 100644 --- a/src/pepr/zarf.yaml +++ b/src/pepr/zarf.yaml @@ -52,7 +52,9 @@ components: actions: onDeploy: before: - - cmd: | + - mute: true + description: "Update helm ownership for Pepr resources if necessary during the upgrade" + cmd: | ./zarf tools kubectl annotate secret -n pepr-system pepr-uds-core-api-token meta.helm.sh/release-name=module --overwrite || true ./zarf tools kubectl annotate secret -n pepr-system pepr-uds-core-module meta.helm.sh/release-name=module --overwrite || true ./zarf tools kubectl annotate secret -n pepr-system pepr-uds-core-tls meta.helm.sh/release-name=module --overwrite || true diff --git a/src/vector/README.md b/src/vector/README.md index d22d451f7..7c424cb33 100644 --- a/src/vector/README.md +++ b/src/vector/README.md @@ -37,6 +37,6 @@ As with any decisions of tooling in core this can always be reevaluated in the f ### Upgrade Considerations -During the upgrade there may be some duplication/overlap of log lines shipped to Loki due to the transition from Promtail's "position" file to Vector's "checkpoint" file (both used for tracking the last log line scraped/shipped). Grafana provides a built in feature to de-duplicate log entries when querying Loki, but this does not consistently work with all log lines due to the approach used for de-duplication. +During the upgrade there may be some duplication/overlap of log lines shipped to Loki due to the transition from Promtail's "position" file to Vector's "checkpoint" file (both used for tracking the last log line scraped/shipped). Grafana provides a built in feature to de-duplicate log entries when querying Loki, but this does not consistently work with all log lines due to the approach used by Grafana for de-duplication. -To ensure easy querying of logs across the upgrade, all logs shipped by Vector also have a `collector` label (with the value of `vector`). This can be used to filter down any logs to either what was collected by Vector or what was not collected by Vector (using the `=` and `!=` operators). In general you can use these filters to filter so that any log timestamps from before your upgrade are not collected by Vector and vice-verse post-upgrade. +To ensure easy querying of logs across the upgrade, all logs shipped by Vector also have a `collector` label (with the value of `vector`). This can be used to filter down any logs to those collected by either Vector or Promtail (using the `=` and `!=` operators). In general you can use these filters along with tracking your upgrade timing to properly ignore duplicate logs for the short upgrade period. diff --git a/src/vector/values/values.yaml b/src/vector/values/values.yaml index 467763b65..7bbe3ee60 100644 --- a/src/vector/values/values.yaml +++ b/src/vector/values/values.yaml @@ -22,23 +22,22 @@ customConfig: type: remap inputs: ["pod_logs"] source: | - if !exists(.kubernetes.pod_labels.app) { - if exists(.kubernetes.pod_labels."app.kubernetes.io/name") { - .kubernetes.pod_labels.app = .kubernetes.pod_labels."app.kubernetes.io/name" - } else if exists(.kubernetes.pod_labels.name) { - .kubernetes.pod_labels.app = .kubernetes.pod_labels.name - } else if exists(.kubernetes.pod_owner) { - .kubernetes.pod_labels.app = .kubernetes.pod_owner - } else { - .kubernetes.pod_labels.app = .kubernetes.pod_name - } + if exists(.kubernetes.pod_labels."app.kubernetes.io/name") { + .app = .kubernetes.pod_labels."app.kubernetes.io/name" + } else if exists(.kubernetes.pod_labels.app) { + .app = .kubernetes.pod_labels.app + } else if exists(.kubernetes.pod_owner) { + .app = replace!(.kubernetes.pod_owner, r'^([^/]+/)', "") + } else { + .app = .kubernetes.pod_name } - if !exists(.kubernetes.pod_labels.component) { - if exists(.kubernetes.pod_labels."app.kubernetes.io/component") { - .kubernetes.pod_labels.component = .kubernetes.pod_labels."app.kubernetes.io/component" - } else { - .kubernetes.pod_labels.component = "" - } + + if exists(.kubernetes.pod_labels."app.kubernetes.io/component") { + .component = .kubernetes.pod_labels."app.kubernetes.io/component" + } else if !exists(.kubernetes.pod_labels.component) { + .component = .kubernetes.pod_labels.component + } else { + .component = "" } node_logs_labelled: @@ -62,10 +61,10 @@ customConfig: codec: "raw_message" labels: namespace: '{{`{{ kubernetes.pod_namespace }}`}}' - app: '{{`{{ kubernetes.pod_labels.app }}`}}' - job: '{{`{{ kubernetes.pod_namespace }}`}}/{{`{{ kubernetes.pod_labels.app }}`}}' + app: '{{`{{ app }}`}}' + job: '{{`{{ kubernetes.pod_namespace }}`}}/{{`{{ app }}`}}' container: '{{`{{ kubernetes.container_name }}`}}' - component: '{{`{{ kubernetes.pod_labels.component }}`}}' + component: '{{`{{ component }}`}}' host: '{{`{{ kubernetes.pod_node_name }}`}}' filename: '{{`{{ file }}`}}' collector: "vector"