From 6ba9146e7d6f6bb2ccc4ef4f4066102ecae2e165 Mon Sep 17 00:00:00 2001 From: Weifeng Wang Date: Wed, 6 Mar 2024 12:27:42 +0800 Subject: [PATCH] imporve tracesToProfiles and tracesToLogsV2 Signed-off-by: Weifeng Wang imporve tracesToProfiles and tracesToLogsV2 Signed-off-by: Weifeng Wang imporve tracesToProfiles and tracesToLogsV2 Signed-off-by: Weifeng Wang imporve tracesToProfiles and tracesToLogsV2 Signed-off-by: Weifeng Wang update Signed-off-by: Weifeng Wang --- .../agent-flow/monolithic-mode-all.river | 144 ++++++++++++++---- .../common/config/agent-flow/traces.river | 101 +++++++++--- .../common/config/tempo/datasources.yaml | 12 +- .../traces/docker-compose.yaml | 34 ++++- .../config/grafana/datasources.yaml | 6 +- .../all-in-one/docker-compose.yaml | 13 +- .../config/tempo/scalable-single-binary.yaml | 1 + .../traces/docker-compose.yaml | 10 +- .../configs/modules/traces.river | 56 +++---- .../manifests/k8s-all-in-one.yaml | 39 +++-- .../common/minio-tenant/k8s-all-in-one.yaml | 128 ---------------- 11 files changed, 307 insertions(+), 237 deletions(-) delete mode 100644 kubernetes/common/minio-tenant/k8s-all-in-one.yaml diff --git a/docker-compose/common/config/agent-flow/monolithic-mode-all.river b/docker-compose/common/config/agent-flow/monolithic-mode-all.river index 317604fd..5bec86c0 100644 --- a/docker-compose/common/config/agent-flow/monolithic-mode-all.river +++ b/docker-compose/common/config/agent-flow/monolithic-mode-all.river @@ -49,7 +49,7 @@ discovery.relabel "containers" { rule { source_labels = ["__meta_docker_container_label_com_docker_compose_service"] regex = "(.*)" - target_label = "service_name" + target_label = "app" } rule { @@ -180,9 +180,9 @@ otelcol.receiver.jaeger "containers" { } output { - metrics = [otelcol.processor.batch.containers.input] - logs = [otelcol.processor.batch.containers.input] - traces = [otelcol.processor.batch.containers.input] + metrics = [otelcol.processor.transform.add_metric_datapoint_attributes.input] + logs = [otelcol.processor.resourcedetection.containers.input] + traces = [otelcol.processor.resourcedetection.containers.input] } } @@ -200,16 +200,104 @@ otelcol.receiver.otlp "containers" { } output { - metrics = [otelcol.processor.batch.containers.input] - logs = [otelcol.processor.batch.containers.input] + metrics = [otelcol.processor.transform.add_metric_datapoint_attributes.input] + logs = [otelcol.processor.resourcedetection.containers.input] traces = [ - otelcol.processor.batch.containers.input, + otelcol.processor.resourcedetection.containers.input, otelcol.connector.spanlogs.autologging.input, ] } } +otelcol.processor.resourcedetection "containers" { + detectors = ["env"] + + output { + logs = [otelcol.processor.attributes.containers.input] + metrics = [otelcol.processor.attributes.containers.input] + traces = [otelcol.processor.attributes.containers.input] + } +} + +otelcol.processor.transform "add_metric_datapoint_attributes" { + error_mode = "ignore" + + metric_statements { + context = "datapoint" + statements = [ + `set(attributes["deployment.environment"], resource.attributes["deployment.environment"])`, + `set(attributes["service.version"], resource.attributes["service.version"])`, + ] + } + + output { + metrics = [otelcol.processor.attributes.containers.input] + } +} + +otelcol.processor.attributes "containers" { + // Inserts a new attribute "cluster" to spans where the key doesn't exist. + action { + key = "cluster" + value = "docker-compose" + action = "insert" + } + + output { + metrics = [otelcol.processor.transform.add_resource_attributes.input] + logs = [otelcol.processor.transform.add_resource_attributes.input] + traces = [otelcol.processor.transform.add_resource_attributes.input] + } +} + +otelcol.processor.transform "add_resource_attributes" { + error_mode = "ignore" + + metric_statements { + context = "resource" + statements = [ + `set(attributes["cluster"], "docker-compose") where attributes["cluster"] == nil`, + ] + } + + log_statements { + context = "resource" + statements = [ + `set(attributes["pod"], attributes["pod.name"])`, + `set(attributes["namespace"], attributes["namespace.name"])`, + `set(attributes["loki.resource.labels"], "pod, namespace, cluster, job")`, + ] + } + + trace_statements { + context = "resource" + statements = [ + `set(attributes["cluster"], "docker-compose") where attributes["cluster"] == nil`, + ] + } + + output { + metrics = [otelcol.processor.filter.containers.input] + logs = [otelcol.processor.filter.containers.input] + traces = [otelcol.processor.filter.containers.input] + } +} + +otelcol.processor.filter "containers" { + error_mode = "ignore" + + output { + metrics = [otelcol.processor.batch.containers.input] + logs = [otelcol.processor.batch.containers.input] + traces = [otelcol.processor.batch.containers.input] + } +} + otelcol.processor.batch "containers" { + send_batch_size = 16384 + send_batch_max_size = 0 + timeout = "2s" + output { metrics = [otelcol.processor.memory_limiter.containers.input] logs = [otelcol.processor.memory_limiter.containers.input] @@ -217,6 +305,26 @@ otelcol.processor.batch "containers" { } } +otelcol.processor.memory_limiter "containers" { + check_interval = "1s" + limit_percentage = 50 + spike_limit_percentage = 30 + + output { + metrics = [otelcol.exporter.prometheus.containers.input] + logs = [otelcol.exporter.loki.containers.input] + traces = [module.file.docker_compose.exports.traces_receiver] + } +} + +otelcol.exporter.prometheus "containers" { + forward_to = [module.file.docker_compose.exports.metrics_receiver] +} + +otelcol.exporter.loki "containers" { + forward_to = [loki.process.containers.receiver] +} + // The OpenTelemetry spanlog connector processes incoming trace spans and extracts data from them ready // for logging. otelcol.connector.spanlogs "autologging" { @@ -262,32 +370,16 @@ loki.process "autologging" { forward_to = [loki.process.containers.receiver] } -otelcol.processor.memory_limiter "containers" { - output { - metrics = [otelcol.exporter.prometheus.containers.input] - logs = [otelcol.exporter.loki.containers.input] - traces = [module.file.docker_compose.exports.traces_receiver] - } -} - -otelcol.exporter.prometheus "containers" { - forward_to = [module.file.docker_compose.exports.metrics_receiver] -} - -otelcol.exporter.loki "containers" { - forward_to = [loki.process.containers.receiver] -} - /******************************************** * Profiles ********************************************/ pyroscope.scrape "containers" { targets = [ - {"__address__" = "mimir:8080", "service_name" = "mimir"}, - {"__address__" = "loki:3100", "service_name" = "loki-all"}, - {"__address__" = "tempo:3200", "service_name" = "tempo-all"}, + {"__address__" = "loki:3100", "service_name" = "loki"}, {"__address__" = "grafana:6060", "service_name" = "grafana"}, + {"__address__" = "tempo:3200", "service_name" = "tempo"}, + {"__address__" = "mimir:8080", "service_name" = "mimir"}, ] clustering { diff --git a/docker-compose/common/config/agent-flow/traces.river b/docker-compose/common/config/agent-flow/traces.river index 51734626..2d788013 100644 --- a/docker-compose/common/config/agent-flow/traces.river +++ b/docker-compose/common/config/agent-flow/traces.river @@ -54,7 +54,7 @@ discovery.relabel "containers" { rule { source_labels = ["__meta_docker_container_label_com_docker_compose_service"] regex = "(.*)" - target_label = "service_name" + target_label = "app" } rule { @@ -161,9 +161,8 @@ otelcol.receiver.jaeger "containers" { } output { - metrics = [otelcol.processor.batch.containers.input] - logs = [otelcol.processor.batch.containers.input] - traces = [otelcol.processor.batch.containers.input] + metrics = [otelcol.processor.transform.add_metric_datapoint_attributes.input] + traces = [otelcol.processor.resourcedetection.containers.input] } } @@ -180,25 +179,101 @@ otelcol.receiver.otlp "containers" { endpoint = "0.0.0.0:4318" } + output { + metrics = [otelcol.processor.transform.add_metric_datapoint_attributes.input] + traces = [otelcol.processor.resourcedetection.containers.input] + } +} + +otelcol.processor.resourcedetection "containers" { + detectors = ["env"] + + output { + metrics = [otelcol.processor.attributes.containers.input] + traces = [otelcol.processor.attributes.containers.input] + } +} + +otelcol.processor.transform "add_metric_datapoint_attributes" { + error_mode = "ignore" + + metric_statements { + context = "datapoint" + statements = [ + `set(attributes["deployment.environment"], resource.attributes["deployment.environment"])`, + `set(attributes["service.version"], resource.attributes["service.version"])`, + ] + } + + output { + metrics = [otelcol.processor.attributes.containers.input] + } +} + +otelcol.processor.attributes "containers" { + // Inserts a new attribute "cluster" to spans where the key doesn't exist. + action { + key = "cluster" + value = "docker-compose" + action = "insert" + } + + output { + metrics = [otelcol.processor.transform.add_resource_attributes.input] + traces = [otelcol.processor.transform.add_resource_attributes.input] + } +} + +otelcol.processor.transform "add_resource_attributes" { + error_mode = "ignore" + + metric_statements { + context = "resource" + statements = [ + `set(attributes["cluster"], "docker-compose") where attributes["cluster"] == nil`, + ] + } + + trace_statements { + context = "resource" + statements = [ + `set(attributes["cluster"], "docker-compose") where attributes["cluster"] == nil`, + ] + } + + output { + metrics = [otelcol.processor.filter.containers.input] + traces = [otelcol.processor.filter.containers.input] + } +} + +otelcol.processor.filter "containers" { + error_mode = "ignore" + output { metrics = [otelcol.processor.batch.containers.input] - logs = [otelcol.processor.batch.containers.input] traces = [otelcol.processor.batch.containers.input] } } otelcol.processor.batch "containers" { + send_batch_size = 16384 + send_batch_max_size = 0 + timeout = "2s" + output { metrics = [otelcol.processor.memory_limiter.containers.input] - logs = [otelcol.processor.memory_limiter.containers.input] traces = [otelcol.processor.memory_limiter.containers.input] } } otelcol.processor.memory_limiter "containers" { + check_interval = "1s" + limit_percentage = 50 + spike_limit_percentage = 30 + output { metrics = [otelcol.exporter.prometheus.containers.input] - logs = [otelcol.exporter.loki.containers.input] traces = [module.file.docker_compose.exports.traces_receiver] } } @@ -206,15 +281,3 @@ otelcol.processor.memory_limiter "containers" { otelcol.exporter.prometheus "containers" { forward_to = [module.file.docker_compose.exports.metrics_receiver] } - -otelcol.exporter.loki "containers" { - forward_to = [loki.process.containers.receiver] -} - -loki.process "containers" { - stage.tenant { - value = "anonymous" - } - - forward_to = [module.file.docker_compose.exports.logs_receiver] -} diff --git a/docker-compose/common/config/tempo/datasources.yaml b/docker-compose/common/config/tempo/datasources.yaml index 57985493..106f2c43 100644 --- a/docker-compose/common/config/tempo/datasources.yaml +++ b/docker-compose/common/config/tempo/datasources.yaml @@ -1,5 +1,15 @@ apiVersion: 1 +deleteDatasources: +- name: Metrics + uid: metrics +- name: Logs + uid: logs +- name: Traces + uid: traces +- name: Profiles + uid: profiles + datasources: # Mimir for metrics - name: Metrics @@ -60,7 +70,7 @@ datasources: datasourceUid: metrics spanStartTimeShift: '-30m' spanEndTimeShift: '30m' - tags: [{ key: 'service.name', value: 'service' }, { key: 'span_name' }, { key: 'http_method' }] + tags: [{ key: 'service.name', value: 'service' }] queries: - name: '(R) Rate' query: 'sum(rate(traces_spanmetrics_calls_total{$$__tags}[$$__rate_interval]))' diff --git a/docker-compose/microservices-mode/traces/docker-compose.yaml b/docker-compose/microservices-mode/traces/docker-compose.yaml index a4af1260..1f3bc0e6 100644 --- a/docker-compose/microservices-mode/traces/docker-compose.yaml +++ b/docker-compose/microservices-mode/traces/docker-compose.yaml @@ -52,6 +52,12 @@ services: - -target=distributor - -config.expand-env=true - -log.level=warn + environment: + - JAEGER_AGENT_HOST=agent + - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=distributor + - JAEGER_SAMPLER_TYPE=const + - JAEGER_SAMPLER_PARAM=1 healthcheck: test: [ "CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3100/ready || exit 1" ] interval: 15s @@ -71,6 +77,12 @@ services: - -target=ingester - -config.expand-env=true - -log.level=warn + environment: + - JAEGER_AGENT_HOST=agent + - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=ingester + - JAEGER_SAMPLER_TYPE=const + - JAEGER_SAMPLER_PARAM=1 deploy: replicas: 3 networks: @@ -90,6 +102,12 @@ services: - -target=query-frontend - -config.expand-env=true - -log.level=warn + environment: + - JAEGER_AGENT_HOST=agent + - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=query-frontend + - JAEGER_SAMPLER_TYPE=const + - JAEGER_SAMPLER_PARAM=1 querier: depends_on: @@ -105,6 +123,12 @@ services: - -target=querier - -config.expand-env=true - -log.level=warn + environment: + - JAEGER_AGENT_HOST=agent + - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=querier + - JAEGER_SAMPLER_TYPE=const + - JAEGER_SAMPLER_PARAM=1 compactor: depends_on: @@ -117,11 +141,17 @@ services: - -config.file=/etc/tempo.yaml - -target=compactor - -log.level=error + environment: + - JAEGER_AGENT_HOST=agent + - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=compactor + - JAEGER_SAMPLER_TYPE=const + - JAEGER_SAMPLER_PARAM=1 mimir: depends_on: minio: - condition: service_healthy + condition: service_started image: grafana/mimir:2.11.0 volumes: - ../../common/config/mimir/monolithic-mode-metrics.yaml:/etc/config.yaml @@ -169,6 +199,6 @@ services: - GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD:-admin_password} - GF_FEATURE_TOGGLES_ENABLE=traceqlEditor tracesEmbeddedFlameGraph traceqlSearch correlations metricsSummary traceToMetrics traceToProfiles - GF_TRACING_OPENTELEMETRY_OTLP_ADDRESS=agent:4317 - - GF_TRACING_OPENTELEMETRY_CUSTOM_ATTRIBUTES=service.namespace:monitoring-system + - GF_TRACING_OPENTELEMETRY_CUSTOM_ATTRIBUTES=app:grafana ports: - "3000:3000" diff --git a/docker-compose/monolithic-mode/all-in-one/config/grafana/datasources.yaml b/docker-compose/monolithic-mode/all-in-one/config/grafana/datasources.yaml index fdffd3fb..ede86a9a 100644 --- a/docker-compose/monolithic-mode/all-in-one/config/grafana/datasources.yaml +++ b/docker-compose/monolithic-mode/all-in-one/config/grafana/datasources.yaml @@ -80,7 +80,7 @@ datasources: datasourceUid: metrics spanStartTimeShift: '-30m' spanEndTimeShift: '30m' - tags: [{ key: 'service.name', value: 'service' }, { key: 'span_name' }, { key: 'http_method' }] + tags: [{ key: 'service.name', value: 'service' }] queries: - name: '(R) Rate' query: 'sum(rate(traces_spanmetrics_calls_total{$$__tags}[$$__rate_interval]))' @@ -92,14 +92,14 @@ datasources: datasourceUid: logs spanStartTimeShift: '-30m' spanEndTimeShift: '30m' - tags: [{ key: 'service.name', value: 'service_name' }, { key: 'namespace' }, { key: 'cluster' }] + tags: [{ key: 'app', value: 'app' }] filterByTraceID: false filterBySpanID: false tracesToProfiles: customQuery: false datasourceUid: "profiles" profileTypeId: "process_cpu:cpu:nanoseconds:cpu:nanoseconds" - tags: [{ key: 'service.name', value: 'service_name' }] + tags: [{ key: 'app', value: 'service_name' }] # Pyroscope for profiles diff --git a/docker-compose/monolithic-mode/all-in-one/docker-compose.yaml b/docker-compose/monolithic-mode/all-in-one/docker-compose.yaml index 87004f3f..a8a692a0 100644 --- a/docker-compose/monolithic-mode/all-in-one/docker-compose.yaml +++ b/docker-compose/monolithic-mode/all-in-one/docker-compose.yaml @@ -48,9 +48,9 @@ services: - -config.expand-env=true - -log.level=warn environment: - - JAEGER_SERVICE_NAME=mimir - JAEGER_AGENT_HOST=agent - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=mimir - JAEGER_SAMPLER_TYPE=const - JAEGER_SAMPLER_PARAM=1 healthcheck: @@ -92,9 +92,9 @@ services: - -config.expand-env=true - -log.level=warn environment: - - JAEGER_SERVICE_NAME=loki - JAEGER_AGENT_HOST=agent - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=loki - JAEGER_SAMPLER_TYPE=const - JAEGER_SAMPLER_PARAM=1 healthcheck: @@ -138,9 +138,9 @@ services: - -config.expand-env=true - -log.level=warn environment: - - JAEGER_SERVICE_NAME=tempo - JAEGER_AGENT_HOST=agent - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=tempo - JAEGER_SAMPLER_TYPE=const - JAEGER_SAMPLER_PARAM=1 healthcheck: @@ -164,13 +164,16 @@ services: rules load /tempo-mixin/rules.yaml /tempo-mixin/alerts.yaml pyroscope: + depends_on: + minio: + condition: service_healthy image: grafana/pyroscope:1.4.0 volumes: - ../../common/config/pyroscope/monolithic-mode-profiles.yaml:/etc/config.yaml environment: - - JAEGER_SERVICE_NAME=pyroscope - JAEGER_AGENT_HOST=agent - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=pyroscope - JAEGER_SAMPLER_TYPE=const - JAEGER_SAMPLER_PARAM=1 command: @@ -200,7 +203,7 @@ services: - GF_DIAGNOSTICS_PROFILING_ADDR=0.0.0.0 - GF_DIAGNOSTICS_PROFILING_PORT=6060 - GF_TRACING_OPENTELEMETRY_OTLP_ADDRESS=agent:4317 - - GF_TRACING_OPENTELEMETRY_CUSTOM_ATTRIBUTES=service.namespace:monitoring-system + - GF_TRACING_OPENTELEMETRY_CUSTOM_ATTRIBUTES=app:grafana # - GF_INSTALL_PLUGINS=pyroscope-datasource,pyroscope-panel ports: - "3000:3000" diff --git a/docker-compose/monolithic-mode/traces/config/tempo/scalable-single-binary.yaml b/docker-compose/monolithic-mode/traces/config/tempo/scalable-single-binary.yaml index 717c3099..c8452690 100644 --- a/docker-compose/monolithic-mode/traces/config/tempo/scalable-single-binary.yaml +++ b/docker-compose/monolithic-mode/traces/config/tempo/scalable-single-binary.yaml @@ -26,6 +26,7 @@ ingester: max_block_duration: 5m memberlist: + abort_if_cluster_join_fails: false join_members: - tempo-memberlist:7946 diff --git a/docker-compose/monolithic-mode/traces/docker-compose.yaml b/docker-compose/monolithic-mode/traces/docker-compose.yaml index 7d442dd5..c4bddd2f 100644 --- a/docker-compose/monolithic-mode/traces/docker-compose.yaml +++ b/docker-compose/monolithic-mode/traces/docker-compose.yaml @@ -43,7 +43,7 @@ services: tempo: depends_on: minio: - condition: service_healthy + condition: service_started image: &tempoImage grafana/tempo:2.4.0 restart: always volumes: @@ -54,9 +54,9 @@ services: - -log.level=warn - -config.expand-env=true environment: - - JAEGER_SERVICE_NAME=tempo - JAEGER_AGENT_HOST=agent - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=tempo - JAEGER_SAMPLER_TYPE=const - JAEGER_SAMPLER_PARAM=1 healthcheck: @@ -76,7 +76,7 @@ services: mimir: depends_on: minio: - condition: service_healthy + condition: service_started image: grafana/mimir:2.11.0 volumes: - ../../common/config/mimir/monolithic-mode-metrics.yaml:/etc/config.yaml @@ -86,9 +86,9 @@ services: - -config.expand-env=true - -log.level=warn environment: - - JAEGER_SERVICE_NAME=mimir - JAEGER_AGENT_HOST=agent - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=mimir - JAEGER_SAMPLER_TYPE=const - JAEGER_SAMPLER_PARAM=1 healthcheck: @@ -124,6 +124,6 @@ services: - GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD:-admin_password} - GF_FEATURE_TOGGLES_ENABLE=traceqlEditor tracesEmbeddedFlameGraph traceqlSearch correlations metricsSummary traceToMetrics traceToProfiles - GF_TRACING_OPENTELEMETRY_OTLP_ADDRESS=agent:4317 - - GF_TRACING_OPENTELEMETRY_CUSTOM_ATTRIBUTES=service.namespace:monitoring-system + - GF_TRACING_OPENTELEMETRY_CUSTOM_ATTRIBUTES=app:grafana ports: - "3000:3000" diff --git a/kubernetes/common/grafana-agent/configs/modules/traces.river b/kubernetes/common/grafana-agent/configs/modules/traces.river index a7847607..f5bca557 100644 --- a/kubernetes/common/grafana-agent/configs/modules/traces.river +++ b/kubernetes/common/grafana-agent/configs/modules/traces.river @@ -93,6 +93,34 @@ otelcol.processor.batch "default" { } } +otelcol.processor.memory_limiter "default" { + check_interval = "1s" + limit_percentage = 50 + spike_limit_percentage = 30 + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } +} + +otelcol.processor.k8sattributes "default" { + output { + metrics = [otelcol.exporter.prometheus.tracesmetrics.input] + logs = [otelcol.exporter.loki.traceslogs.input] + traces = argument.traces_forward_to.value + } +} + +otelcol.exporter.prometheus "tracesmetrics" { + forward_to = argument.metrics_forward_to.value +} + +otelcol.exporter.loki "traceslogs" { + forward_to = [loki.process.traceslogs.receiver] +} + // The OpenTelemetry spanlog connector processes incoming trace spans and extracts data from them ready // for logging. otelcol.connector.spanlogs "autologging" { @@ -144,34 +172,6 @@ loki.process "autologging" { forward_to = [loki.process.traceslogs.receiver] } -otelcol.processor.memory_limiter "default" { - check_interval = "1s" - limit_percentage = 50 - spike_limit_percentage = 30 - - output { - metrics = [otelcol.processor.k8sattributes.default.input] - logs = [otelcol.processor.k8sattributes.default.input] - traces = [otelcol.processor.k8sattributes.default.input] - } -} - -otelcol.processor.k8sattributes "default" { - output { - metrics = [otelcol.exporter.prometheus.tracesmetrics.input] - logs = [otelcol.exporter.loki.traceslogs.input] - traces = argument.traces_forward_to.value - } -} - -otelcol.exporter.prometheus "tracesmetrics" { - forward_to = argument.metrics_forward_to.value -} - -otelcol.exporter.loki "traceslogs" { - forward_to = [loki.process.traceslogs.receiver] -} - loki.process "traceslogs" { stage.tenant { value = "anonymous" diff --git a/kubernetes/common/grafana-agent/manifests/k8s-all-in-one.yaml b/kubernetes/common/grafana-agent/manifests/k8s-all-in-one.yaml index 08ba294a..27c8d2e8 100644 --- a/kubernetes/common/grafana-agent/manifests/k8s-all-in-one.yaml +++ b/kubernetes/common/grafana-agent/manifests/k8s-all-in-one.yaml @@ -581,14 +581,21 @@ data: \ = [\n\t\t\totelcol.processor.batch.default.input,\n\t\t\totelcol.connector.spanlogs.autologging.input,\n\t\t]\n\t}\n}\n\notelcol.processor.batch \"default\" {\n\tsend_batch_size = 16384\n\tsend_batch_max_size = 0\n\ttimeout \ = \"2s\"\n\n\toutput {\n\t\tmetrics = [otelcol.processor.memory_limiter.default.input]\n\t\tlogs - \ = [otelcol.processor.memory_limiter.default.input]\n\t\ttraces = [otelcol.processor.memory_limiter.default.input]\n\t}\n}\n\n// - The OpenTelemetry spanlog connector processes incoming trace spans and extracts - data from them ready\n// for logging.\notelcol.connector.spanlogs \"autologging\" - {\n\t// We only want to output a line for each root span (ie. every single trace), - and not for every\n\t// process or span (outputting a line for every span would - be extremely verbose).\n\tspans = false\n\troots = true\n\tprocesses = - false\n\n\t// We want to ensure that the following three span attributes are included - in the log line, if present.\n\tspan_attributes = [\n\t\t\"http.method\",\n\t\t\"http.target\",\n\t\t\"http.status_code\",\n\t]\n\n\t// + \ = [otelcol.processor.memory_limiter.default.input]\n\t\ttraces = [otelcol.processor.memory_limiter.default.input]\n\t}\n}\n\notelcol.processor.memory_limiter + \"default\" {\n\tcheck_interval = \"1s\"\n\tlimit_percentage = 50\n\tspike_limit_percentage + = 30\n\n\toutput {\n\t\tmetrics = [otelcol.processor.k8sattributes.default.input]\n\t\tlogs + \ = [otelcol.processor.k8sattributes.default.input]\n\t\ttraces = [otelcol.processor.k8sattributes.default.input]\n\t}\n}\n\notelcol.processor.k8sattributes + \"default\" {\n\toutput {\n\t\tmetrics = [otelcol.exporter.prometheus.tracesmetrics.input]\n\t\tlogs + \ = [otelcol.exporter.loki.traceslogs.input]\n\t\ttraces = argument.traces_forward_to.value\n\t}\n}\n\notelcol.exporter.prometheus + \"tracesmetrics\" {\n\tforward_to = argument.metrics_forward_to.value\n}\n\notelcol.exporter.loki + \"traceslogs\" {\n\tforward_to = [loki.process.traceslogs.receiver]\n}\n\n// The + OpenTelemetry spanlog connector processes incoming trace spans and extracts data + from them ready\n// for logging.\notelcol.connector.spanlogs \"autologging\" {\n\t// + We only want to output a line for each root span (ie. every single trace), and + not for every\n\t// process or span (outputting a line for every span would be + extremely verbose).\n\tspans = false\n\troots = true\n\tprocesses = false\n\n\t// + We want to ensure that the following three span attributes are included in the + log line, if present.\n\tspan_attributes = [\n\t\t\"http.method\",\n\t\t\"http.target\",\n\t\t\"http.status_code\",\n\t]\n\n\t// Overrides the default key in the log line to be `traceId`, which is then used by Grafana to\n\t// identify the trace ID for correlation with the Tempo datasource.\n\toverrides {\n\t\ttrace_id_key = \"traceId\"\n\t}\n\n\t// Send to the OpenTelemetry Loki @@ -603,19 +610,11 @@ data: = \"\"}\n\t}\n\t// The output stage takes the body (the main logline) and uses this as the source for the output\n\t// logline. In this case, it essentially turns it into logfmt.\n\tstage.output {\n\t\tsource = \"body\"\n\t}\n\n\tforward_to - = [loki.process.traceslogs.receiver]\n}\n\notelcol.processor.memory_limiter \"default\" - {\n\tcheck_interval = \"1s\"\n\tlimit_percentage = 50\n\tspike_limit_percentage - = 30\n\n\toutput {\n\t\tmetrics = [otelcol.processor.k8sattributes.default.input]\n\t\tlogs - \ = [otelcol.processor.k8sattributes.default.input]\n\t\ttraces = [otelcol.processor.k8sattributes.default.input]\n\t}\n}\n\notelcol.processor.k8sattributes - \"default\" {\n\toutput {\n\t\tmetrics = [otelcol.exporter.prometheus.tracesmetrics.input]\n\t\tlogs - \ = [otelcol.exporter.loki.traceslogs.input]\n\t\ttraces = argument.traces_forward_to.value\n\t}\n}\n\notelcol.exporter.prometheus - \"tracesmetrics\" {\n\tforward_to = argument.metrics_forward_to.value\n}\n\notelcol.exporter.loki - \"traceslogs\" {\n\tforward_to = [loki.process.traceslogs.receiver]\n}\n\nloki.process - \"traceslogs\" {\n\tstage.tenant {\n\t\tvalue = \"anonymous\"\n\t}\n\n\tforward_to - = argument.logs_forward_to.value\n}\n" + = [loki.process.traceslogs.receiver]\n}\n\nloki.process \"traceslogs\" {\n\tstage.tenant + {\n\t\tvalue = \"anonymous\"\n\t}\n\n\tforward_to = argument.logs_forward_to.value\n}\n" kind: ConfigMap metadata: - name: agent-modules-kbkcg9d4th + name: agent-modules-k2kfd9mm27 namespace: monitoring-system --- apiVersion: v1 @@ -839,7 +838,7 @@ spec: path: /var/log name: varlog - configMap: - name: agent-modules-kbkcg9d4th + name: agent-modules-k2kfd9mm27 name: agent-modules --- apiVersion: monitoring.coreos.com/v1 diff --git a/kubernetes/common/minio-tenant/k8s-all-in-one.yaml b/kubernetes/common/minio-tenant/k8s-all-in-one.yaml deleted file mode 100644 index 7f9065f4..00000000 --- a/kubernetes/common/minio-tenant/k8s-all-in-one.yaml +++ /dev/null @@ -1,128 +0,0 @@ -apiVersion: v1 -data: - config.env: | - ZXhwb3J0IE1JTklPX0JST1dTRVI9Im9uIgpleHBvcnQgTUlOSU9fUk9PVF9QQVNTV09SRD - 0iMUtGR0cxWlZCQlEwM0NLTEdaVTNLU1M0RFdNV0pJNFQiCmV4cG9ydCBNSU5JT19ST09U - X1VTRVI9IjBHTVFQMkRHQTQxSUJMVjAiCiNleHBvcnQgTUlOSU9fU1RPUkFHRV9DTEFTU1 - 9TVEFOREFSRD0iRUM6MiIKZXhwb3J0IE1JTklPX1BST01FVEhFVVNfQVVUSF9UWVBFPSJw - dWJsaWMiCmV4cG9ydCBNSU5JT19QUk9NRVRIRVVTX0pPQl9JRD0ibWluaW8tam9iIgo= -kind: Secret -metadata: - labels: - v1.min.io/tenant: codelab - name: codelab-minio-env - namespace: minio-system -type: Opaque ---- -apiVersion: v1 -data: - CONSOLE_ACCESS_KEY: bGd0bXA= - CONSOLE_SECRET_KEY: VkQ1MzhPWXhTRWlHRDRJOW1tRmZxRk1DR3ExdklpR20= -kind: Secret -metadata: - labels: - v1.min.io/tenant: codelab - name: user-lgtmp - namespace: minio-system -type: Opaque ---- -apiVersion: minio.min.io/v2 -kind: Tenant -metadata: - labels: - app: minio - name: codelab - namespace: minio-system -spec: - buckets: - - name: mimir-blocks - - name: mimir-ruler - - name: mimir-alertmanager - - name: loki-data - - name: loki-ruler - - name: tempo-data - - name: pyroscope-data - configuration: - name: codelab-minio-env - exposeServices: - console: false - minio: false - features: - bucketDNS: false - enableSFTP: false - image: quay.io/minio/minio:RELEASE.2023-11-15T20-43-25Z - imagePullPolicy: IfNotPresent - mountPath: /export - podManagementPolicy: Parallel - pools: - - name: pool-10gb - servers: 1 - volumeClaimTemplate: - metadata: - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi - storageClassName: null - volumesPerServer: 1 - prometheusOperator: false - requestAutoCert: true - subPath: /data - users: - - name: user-lgtmp ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: minio - namespace: minio-system -spec: - endpoints: - - path: /minio/v2/metrics/cluster - port: https-minio - relabelings: - - replacement: minio-job - sourceLabels: - - job - targetLabel: job - - action: keep - regex: .*-0$ - sourceLabels: - - pod - scheme: https - tlsConfig: - insecureSkipVerify: true - - path: /minio/v2/metrics/bucket - port: https-minio - relabelings: - - replacement: minio-job-bucket - sourceLabels: - - job - targetLabel: job - scheme: https - tlsConfig: - insecureSkipVerify: true - - path: /minio/v2/metrics/resource - port: https-minio - relabelings: - - replacement: minio-job-resource - sourceLabels: - - job - targetLabel: job - scheme: https - tlsConfig: - insecureSkipVerify: true - namespaceSelector: - matchNames: - - minio-system - selector: - matchExpressions: - - key: prometheus.io/service-monitor - operator: NotIn - values: - - "false" - - key: v1.min.io/tenant - operator: Exists