Skip to content

Commit

Permalink
regenerates k8s manifests
Browse files Browse the repository at this point in the history
  • Loading branch information
qclaogui committed Dec 10, 2024
1 parent f5f3241 commit b68e085
Show file tree
Hide file tree
Showing 19 changed files with 7,218 additions and 12,005 deletions.
61 changes: 36 additions & 25 deletions kubernetes/common/alloy/manifests/k8s-all-in-one.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ metadata:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alloy
app.kubernetes.io/part-of: alloy
app.kubernetes.io/version: v1.5.0
helm.sh/chart: alloy-0.10.0
app.kubernetes.io/version: v1.5.1
helm.sh/chart: alloy-0.10.1
name: alloy
namespace: monitoring-system
---
Expand All @@ -21,8 +21,8 @@ metadata:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alloy
app.kubernetes.io/part-of: alloy
app.kubernetes.io/version: v1.5.0
helm.sh/chart: alloy-0.10.0
app.kubernetes.io/version: v1.5.1
helm.sh/chart: alloy-0.10.1
name: alloy
rules:
- apiGroups:
Expand Down Expand Up @@ -125,8 +125,8 @@ metadata:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alloy
app.kubernetes.io/part-of: alloy
app.kubernetes.io/version: v1.5.0
helm.sh/chart: alloy-0.10.0
app.kubernetes.io/version: v1.5.1
helm.sh/chart: alloy-0.10.1
name: alloy
roleRef:
apiGroup: rbac.authorization.k8s.io
Expand Down Expand Up @@ -4435,8 +4435,8 @@ metadata:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alloy
app.kubernetes.io/part-of: alloy
app.kubernetes.io/version: v1.5.0
helm.sh/chart: alloy-0.10.0
app.kubernetes.io/version: v1.5.1
helm.sh/chart: alloy-0.10.1
name: alloy
namespace: monitoring-system
spec:
Expand Down Expand Up @@ -4476,8 +4476,8 @@ metadata:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alloy
app.kubernetes.io/part-of: alloy
app.kubernetes.io/version: v1.5.0
helm.sh/chart: alloy-0.10.0
app.kubernetes.io/version: v1.5.1
helm.sh/chart: alloy-0.10.1
name: alloy-cluster
namespace: monitoring-system
spec:
Expand Down Expand Up @@ -4517,8 +4517,8 @@ metadata:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alloy
app.kubernetes.io/part-of: alloy
app.kubernetes.io/version: v1.5.0
helm.sh/chart: alloy-0.10.0
app.kubernetes.io/version: v1.5.1
helm.sh/chart: alloy-0.10.1
name: alloy
namespace: monitoring-system
spec:
Expand Down Expand Up @@ -4571,7 +4571,7 @@ spec:
- secretRef:
name: alloy-env
optional: true
image: docker.io/grafana/alloy:v1.5.0
image: docker.io/grafana/alloy:v1.5.1
imagePullPolicy: IfNotPresent
name: alloy
ports:
Expand Down Expand Up @@ -4746,21 +4746,32 @@ spec:
description: The receiver could not push some spans to the pipeline under
job {{ $labels.job }}. This could be due to reaching a limit such as the
ones imposed by otelcol.processor.memory_limiter.
summary: The receiver could not push some spans to the pipeline.
expr: sum by (cluster, namespace, job) (rate(otelcol_receiver_refused_spans_total{}[1m]))
> 0
for: 5m
summary: The receiver pushing spans to the pipeline success rate is below
95%.
expr: |
(1 - (
sum by (cluster, namespace, job) (rate(otelcol_receiver_refused_spans_total{}[1m]))
/
sum by (cluster, namespace, job) (rate(otelcol_receiver_refused_spans_total{}[1m]) + rate(otelcol_receiver_accepted_spans_total{}[1m]))
)
) < 0.95
for: 10m
labels:
severity: warning
- alert: OtelcolExporterFailedSpans
annotations:
description: The exporter failed to send spans to their destination under
job {{ $labels.job }}. There could be an issue with the payload or with
the destination endpoint.
summary: The exporter failed to send spans to their destination.
expr: sum by (cluster, namespace, job) (rate(otelcol_exporter_send_failed_spans_total{}[1m]))
> 0
for: 5m
summary: The exporter sending spans success rate is below 95%.
expr: |
(1 - (
sum by (cluster, namespace, job) (rate(otelcol_exporter_send_failed_spans_total{}[1m]))
/
sum by (cluster, namespace, job) (rate(otelcol_exporter_send_failed_spans_total{}[1m]) + rate(otelcol_exporter_sent_spans_total{}[1m]))
)
) < 0.95
for: 10m
labels:
severity: warning
---
Expand All @@ -4773,8 +4784,8 @@ metadata:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alloy
app.kubernetes.io/part-of: alloy
app.kubernetes.io/version: v1.5.0
helm.sh/chart: alloy-0.10.0
app.kubernetes.io/version: v1.5.1
helm.sh/chart: alloy-0.10.1
name: alloy
namespace: monitoring-system
spec:
Expand All @@ -4796,8 +4807,8 @@ metadata:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alloy
app.kubernetes.io/part-of: alloy
app.kubernetes.io/version: v1.5.0
helm.sh/chart: alloy-0.10.0
app.kubernetes.io/version: v1.5.1
helm.sh/chart: alloy-0.10.1
name: alloy
namespace: monitoring-system
spec:
Expand Down
22 changes: 11 additions & 11 deletions kubernetes/common/grafana/manifests/k8s-all-in-one.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ metadata:
app.kubernetes.io/instance: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.3.1
helm.sh/chart: grafana-8.6.3
helm.sh/chart: grafana-8.6.4
name: grafana
namespace: monitoring-system
---
Expand All @@ -22,7 +22,7 @@ metadata:
app.kubernetes.io/instance: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.3.1
helm.sh/chart: grafana-8.6.3
helm.sh/chart: grafana-8.6.4
name: grafana
namespace: monitoring-system
rules: []
Expand All @@ -34,7 +34,7 @@ metadata:
app.kubernetes.io/instance: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.3.1
helm.sh/chart: grafana-8.6.3
helm.sh/chart: grafana-8.6.4
name: grafana-clusterrole
rules:
- apiGroups:
Expand All @@ -54,7 +54,7 @@ metadata:
app.kubernetes.io/instance: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.3.1
helm.sh/chart: grafana-8.6.3
helm.sh/chart: grafana-8.6.4
name: grafana
namespace: monitoring-system
roleRef:
Expand All @@ -73,7 +73,7 @@ metadata:
app.kubernetes.io/instance: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.3.1
helm.sh/chart: grafana-8.6.3
helm.sh/chart: grafana-8.6.4
name: grafana-clusterrolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
Expand Down Expand Up @@ -151,7 +151,7 @@ metadata:
app.kubernetes.io/instance: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.3.1
helm.sh/chart: grafana-8.6.3
helm.sh/chart: grafana-8.6.4
name: grafana-config-dashboards
namespace: monitoring-system
---
Expand Down Expand Up @@ -4219,7 +4219,7 @@ metadata:
app.kubernetes.io/instance: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.3.1
helm.sh/chart: grafana-8.6.3
helm.sh/chart: grafana-8.6.4
name: grafana
namespace: monitoring-system
spec:
Expand All @@ -4240,7 +4240,7 @@ metadata:
app.kubernetes.io/instance: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.3.1
helm.sh/chart: grafana-8.6.3
helm.sh/chart: grafana-8.6.4
name: grafana
namespace: monitoring-system
spec:
Expand All @@ -4264,7 +4264,7 @@ spec:
app.kubernetes.io/instance: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.3.1
helm.sh/chart: grafana-8.6.3
helm.sh/chart: grafana-8.6.4
spec:
automountServiceAccountToken: true
containers:
Expand Down Expand Up @@ -4456,7 +4456,7 @@ metadata:
app.kubernetes.io/instance: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.3.1
helm.sh/chart: grafana-8.6.3
helm.sh/chart: grafana-8.6.4
name: grafana
namespace: monitoring-system
spec:
Expand All @@ -4483,7 +4483,7 @@ metadata:
app.kubernetes.io/instance: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.3.1
helm.sh/chart: grafana-8.6.3
helm.sh/chart: grafana-8.6.4
name: grafana
namespace: monitoring-system
spec:
Expand Down
86 changes: 86 additions & 0 deletions kubernetes/common/grafana/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -982,6 +982,23 @@ sidecar:
# defaults to 66sec (sic!)
# watchClientTimeout: 60
#
# maxTotalRetries: Total number of retries to allow for any http request.
# Takes precedence over other counts. Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry.
# maxTotalRetries: 5
#
# maxConnectRetries: How many connection-related errors to retry on for any http request.
# These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request.
# Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry of this type.
# maxConnectRetries: 10
#
# maxReadRetries: How many times to retry on read errors for any http request
# These errors are raised after the request was sent to the server, so the request may have side-effects.
# Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry of this type.
# maxReadRetries: 5
#
# Endpoint to send request to reload alerts
reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload"
# Absolute path to shell script to execute after a alert got reloaded
Expand Down Expand Up @@ -1035,6 +1052,24 @@ sidecar:
# If specified, the sidecar will look for annotation with this name to create folder and put graph here.
# You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
folderAnnotation: null
#
# maxTotalRetries: Total number of retries to allow for any http request.
# Takes precedence over other counts. Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry.
# maxTotalRetries: 5
#
# maxConnectRetries: How many connection-related errors to retry on for any http request.
# These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request.
# Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry of this type.
# maxConnectRetries: 10
#
# maxReadRetries: How many times to retry on read errors for any http request
# These errors are raised after the request was sent to the server, so the request may have side-effects.
# Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry of this type.
# maxReadRetries: 5
#
# Endpoint to send request to reload alerts
reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload"
# Absolute path to shell script to execute after a configmap got reloaded
Expand Down Expand Up @@ -1115,6 +1150,23 @@ sidecar:
# defaults to 66sec (sic!)
# watchClientTimeout: 60
#
# maxTotalRetries: Total number of retries to allow for any http request.
# Takes precedence over other counts. Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry.
# maxTotalRetries: 5
#
# maxConnectRetries: How many connection-related errors to retry on for any http request.
# These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request.
# Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry of this type.
# maxConnectRetries: 10
#
# maxReadRetries: How many times to retry on read errors for any http request
# These errors are raised after the request was sent to the server, so the request may have side-effects.
# Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry of this type.
# maxReadRetries: 5
#
# Endpoint to send request to reload datasources
reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload"
# Absolute path to shell script to execute after a datasource got reloaded
Expand Down Expand Up @@ -1157,6 +1209,23 @@ sidecar:
# defaults to 66sec (sic!)
# watchClientTimeout: 60
#
# maxTotalRetries: Total number of retries to allow for any http request.
# Takes precedence over other counts. Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry.
# maxTotalRetries: 5
#
# maxConnectRetries: How many connection-related errors to retry on for any http request.
# These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request.
# Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry of this type.
# maxConnectRetries: 10
#
# maxReadRetries: How many times to retry on read errors for any http request
# These errors are raised after the request was sent to the server, so the request may have side-effects.
# Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry of this type.
# maxReadRetries: 5
#
# Endpoint to send request to reload plugins
reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload"
# Absolute path to shell script to execute after a plugin got reloaded
Expand Down Expand Up @@ -1199,6 +1268,23 @@ sidecar:
# defaults to 66sec (sic!)
# watchClientTimeout: 60
#
# maxTotalRetries: Total number of retries to allow for any http request.
# Takes precedence over other counts. Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry.
# maxTotalRetries: 5
#
# maxConnectRetries: How many connection-related errors to retry on for any http request.
# These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request.
# Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry of this type.
# maxConnectRetries: 10
#
# maxReadRetries: How many times to retry on read errors for any http request
# These errors are raised after the request was sent to the server, so the request may have side-effects.
# Applies to all requests to reloadURL and k8s api requests.
# Set to 0 to fail on the first retry of this type.
# maxReadRetries: 5
#
# Endpoint to send request to reload notifiers
reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload"
# Absolute path to shell script to execute after a notifier got reloaded
Expand Down
Loading

0 comments on commit b68e085

Please sign in to comment.