diff --git a/ci/check_configuration_keys.py b/ci/check_configuration_keys.py index 2c0a2db96d..32fd5661d1 100755 --- a/ci/check_configuration_keys.py +++ b/ci/check_configuration_keys.py @@ -241,6 +241,9 @@ def compare_values(readme: dict, values_keys: list[str], values: dict) -> dict: if compare_keys(this_key, other_key): other_value = get_value(this_key, values) if this_value != other_value: + if this_value.replace("\\\\", "\\").replace("\\|", "|") == other_value: + # yaml contains both `'` and `"` strings and readme is always `"` string + continue # Skip configuration linked to values.yaml if this_value == 'See [values.yaml]': @@ -270,7 +273,7 @@ def get_value(key: str, dictionary: dict) -> str: value = value[subkey] if isinstance(value, str): - return value + return value.replace("\n", "\\n") return json.dumps(value) diff --git a/ci/generate-schema.py b/ci/generate-schema.py new file mode 100755 index 0000000000..f408fd7d74 --- /dev/null +++ b/ci/generate-schema.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 + +import argparse +import json +import re +import sys + +import yaml +from yaml.loader import SafeLoader + +DESCRIPTION = 'This program generates JSON schema from README.md table' + +def values_to_dictionary(path: str) -> dict: + """Reads given path as values.yaml and returns it as dict + + Args: + path (str): path to the value.yaml + + Returns: + dict: values.yaml as dict + """ + with open(path, encoding='utf-8') as file: + values_yaml = file.read() + values_yaml = re.sub(r'(\[\]|\{\})\n(\s+# )', r'\n\2', values_yaml, flags=re.M) + values_yaml = re.sub(r'^(\s+)# ', r'\1', values_yaml, flags=re.M) + return yaml.load(values_yaml, Loader=SafeLoader) + +def set_properties(values): + properties = { + 'type': '', + # 'required': [], + # 'properties': {}, + # 'default': '', + 'description': '', + } + + if isinstance(values, dict): + properties['type'] = 'object' + properties['properties'] = {} + for key in values.keys(): + properties['properties'][key] = set_properties(values[key]) + else: + properties['default'] = values + if isinstance(values, bool): + properties['type'] = 'boolean' + elif isinstance(values, int): + properties['type'] = 'integer' + elif isinstance(values, (list, set)): + properties['type'] = 'array' + elif isinstance(values, str): + properties['type'] = 'string' + else: + properties['type'] = 'string' + if not properties['default']: + properties['default'] = "" + + return properties + +def extract_description_from_readme(path: str) -> dict: + """Reads given path as README.md and returns dict in the following form: + + ``` + { + configuration_key: configuration_default + } + ``` + + Args: + path (str): path to the README.md + + Returns: + dict: {configuration_key: configuration_default,...} + """ + with open(path, encoding='utf-8') as file: + readme = file.readlines() + + keys = {} + + for line in readme: + match = re.match( + r'^\|\s+`(?P.*?)`\s+\|\s+(?P.*?)\s+\|\s+(?P.*?)\s+\|$', + line) + if match and match.group('key'): + description = match.group('description').strip('`').strip('"') + keys[match.group('key')] = description + + return keys + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + prog = sys.argv[0], + description = DESCRIPTION) + parser.add_argument('--values', required=True) + parser.add_argument('--readme', required=True) + parser.add_argument('--output', required=True) + parser.add_argument('--full-diff', required=False, action='store_true') + args = parser.parse_args() + + values = values_to_dictionary(args.values) + + output = { + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": {}, + } + + for key in values: + output['properties'][key] = set_properties(values[key]) + + descriptions = extract_description_from_readme(args.readme) + for key, description in descriptions.items(): + a = output['properties'] + subkeys = key.split(".") + for i in range(0, len(subkeys)-1): + a = a[subkeys[i]]['properties'] + a[subkeys[-1]]['description'] = description + with open(args.output, "w") as f: + f.write(json.dumps(output, indent=2)) diff --git a/ci/generate_readme.py b/ci/generate_readme.py new file mode 100755 index 0000000000..e5513bb70f --- /dev/null +++ b/ci/generate_readme.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 + +import argparse +import json +import re +import sys +import os + +import yaml +from yaml.loader import SafeLoader + +DESCRIPTION = "test" +HEADER = """# Configuration + +To see all available configuration for our sub-charts, please refer to their documentation. + +- [Falco](https://github.com/falcosecurity/charts/tree/master/falco#configuration) - All Falco properties should be prefixed with `falco.` + in our values.yaml to override a property not listed below. +- [Kube-Prometheus-Stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#configuration) - All + Kube Prometheus Stack properties should be prefixed with `kube-prometheus-stack.` in our values.yaml to override a property not listed + below. +- [Fluent Bit](https://github.com/fluent/helm-charts/blob/main/charts/fluent-bit/values.yaml) - All Fluent Bit properties should be prefixed + with `fluent-bit.` in our values.yaml to override a property not listed below. +- [Metrics Server](https://github.com/bitnami/charts/tree/master/bitnami/metrics-server/#parameters) - All Metrics Server properties should + be prefixed with `metrics-server.` in our values.yaml to override a property not listed below. +- [Tailing Sidecar Operator](https://github.com/SumoLogic/tailing-sidecar/tree/main/helm/tailing-sidecar-operator#configuration) - All + Tailing Sidecar Operator properties should be prefixed with `tailing-sidecar-operator` in our values.yaml to override a property not + listed below. +- [OpenTelemetry Operator](https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-operator#opentelemetry-operator-helm-chart) - + All OpenTelemetry Operator properties should be prefixed with `opentelemetry-operator` in our values.yaml to override a property listed + below. + +The following table lists the configurable parameters of the Sumo Logic chart and their default values. + +| Parameter | Description | Default | +| --- | --- | --- |""" + +FOOTER = """ +[values.yaml]: values.yaml""" + +def build_default(data): + return_value = {} + if 'properties' in data: + for key in data['properties']: + return_value[key] = build_default(data['properties'][key]) + return return_value + elif 'items' in data: + return [item['default'] for item in data['items']] + else: + return data['default'] + +def get_description(prefix, data): + return_value = [] + prefix = prefix.strip('.') + description = data["description"] if 'description' in data else "" + built_default = None + + if 'properties' in data: + if not description: + for key in data['properties']: + if prefix == "": + pref = key + else: + if "." in key: + pref = f"{prefix}[{key}]" + else: + pref = f"{prefix}.{key}" + return_value += get_description(pref, data['properties'][key]) + return return_value + else: + built_default = build_default(data) + + if 'items' in data: + built_default = build_default(data) + + default = json.dumps(built_default if built_default is not None else data['default']).strip('"').replace("|", "\|") + if len(default) > 180: + default = "See [values.yaml]" + + if default == "": + default = "Nil" + return_value.append(f'| `{prefix}` | {data["description"]} | `{default}` |') + + return return_value + +def main(schema, directory): + readme = [HEADER] + with open(schema) as f: + data = json.loads(f.read()) + readme += get_description("", data) + readme.append(FOOTER) + + readme = "\n".join(readme) + + with open(os.path.join(directory, "README.md"), "w") as f: + f.write(readme) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + prog = sys.argv[0], + description = DESCRIPTION) + parser.add_argument('--schema', required=True) + parser.add_argument('--dir', required=True) + parser.add_argument('--full-diff', required=False, action='store_true') + args = parser.parse_args() + + main(args.schema, args.dir) diff --git a/ci/generate_values.py b/ci/generate_values.py new file mode 100755 index 0000000000..e6d7da7fb5 --- /dev/null +++ b/ci/generate_values.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 + +import argparse +import json +import re +import sys +import os + +import yaml +from yaml.loader import SafeLoader + +DESCRIPTION = "test" + + +def get_values(indent, data): + return_value = [] + if 'properties' in data: + for key, value in data['properties'].items(): + commented = '' + if 'comment' in value: + for line in value['comment'].split('\n'): + if not line.strip(): + return_value.append(f"{indent}##") + else: + return_value.append(f"{indent}## {line}") + if 'commented' in value and value['commented']: + commented = '# ' + if 'properties' in value: + return_value.append(f"{indent}{commented}{key}:") + elif 'items' in value: + return_value.append(f"{indent}{commented}{key}:") + for item in value['items']: + commented = '' + if 'commented' in item and item['commented']: + commented = '# ' + if 'comment' in item: + for line in item['comment'].split('\n'): + if '#' in indent: + return_value.append(f"{indent.replace('# ', '##')} {line.rstrip()}") + else: + return_value.append(f"{indent}## {line.rstrip()}") + dumped = yaml.dump(item['default']).strip() + first = True + for line in dumped.split("\n"): + if first: + return_value.append(f"{indent}{commented}- {line}") + first = False + continue + return_value.append(f"{indent}{commented} {line}") + else: + dumped = yaml.dump({key: value['default']}).strip() + for line in dumped.split("\n"): + if not line.strip(): + return_value.append(f"{indent}{commented.rstrip()}") + else: + return_value.append(f"{indent}{commented}{line.rstrip()}") + if 'example' in value: + dumped = yaml.dump({key: value['example']}).strip() + for line in dumped.split("\n")[1:]: + if not line.strip(): + return_value.append(f"{indent}#") + else: + return_value.append(f"{indent}# {line}") + return_value += get_values(f"{indent}{commented} ", data['properties'][key]) + return return_value + +def main(schema, directory): + with open(schema) as f: + data = json.loads(f.read()) + values = ['## Sumo Logic Kubernetes Collection configuration file', +'## All the comments start with two or more # characters'] + get_values('', data) + + print('\n'.join(values)) + + # with open(os.path.join(directory, "_values.yaml"), "w") as f: + # f.write(yaml.dump(values)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + prog = sys.argv[0], + description = DESCRIPTION) + parser.add_argument('--schema', required=True) + parser.add_argument('--dir', required=True) + parser.add_argument('--full-diff', required=False, action='store_true') + args = parser.parse_args() + + main(args.schema, args.dir) diff --git a/deploy/helm/sumologic/README.md b/deploy/helm/sumologic/README.md index 3014983940..7535c29a8b 100644 --- a/deploy/helm/sumologic/README.md +++ b/deploy/helm/sumologic/README.md @@ -7,6 +7,8 @@ To see all available configuration for our sub-charts, please refer to their doc - [Kube-Prometheus-Stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#configuration) - All Kube Prometheus Stack properties should be prefixed with `kube-prometheus-stack.` in our values.yaml to override a property not listed below. +- [Fluent Bit](https://github.com/fluent/helm-charts/blob/main/charts/fluent-bit/values.yaml) - All Fluent Bit properties should be prefixed + with `fluent-bit.` in our values.yaml to override a property not listed below. - [Metrics Server](https://github.com/bitnami/charts/tree/master/bitnami/metrics-server/#parameters) - All Metrics Server properties should be prefixed with `metrics-server.` in our values.yaml to override a property not listed below. - [Tailing Sidecar Operator](https://github.com/SumoLogic/tailing-sidecar/tree/main/helm/tailing-sidecar-operator#configuration) - All @@ -18,453 +20,437 @@ To see all available configuration for our sub-charts, please refer to their doc The following table lists the configurable parameters of the Sumo Logic chart and their default values. -| Parameter | Description | Default | -| ------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `nameOverride` | Used to override the Chart name. | `Nil` | -| `fullnameOverride` | Used to override the chart's full name. Names longer than 22 characters will be truncated. | `Nil` | -| `namespaceOverride` | Used to override the chart's default target namepace. | `Nil` | -| `sumologic.setupEnabled` | If enabled, a pre-install hook will create Collector and Sources in Sumo Logic. | `true` | -| `sumologic.cleanupEnabled` | If enabled, a pre-delete hook will destroy Kubernetes secret and Sumo Logic Collector. | `false` | -| `sumologic.events.enabled` | Defines whether collection of Kubernetes events is enabled. | `true` | -| `sumologic.events.sourceName` | Source name for the Events source. | `events` | -| `sumologic.events.sourceCategory` | Source category for the Events source. | `{clusterName}/events` | -| `sumologic.events.sourceCategoryReplaceDash` | Used to replace - with another character. | `"/"` | -| `sumologic.events.persistence.enabled` | Enable persistence for the event collector. Persistence lets the collector avoid reingesting events on restart and buffer them locally if unable to reach the backend. | `true` | -| `sumologic.events.persistence.persistentVolume.path` | Local filesystem path the persistent storage volume will be mounted at. | `/var/lib/storage/events` | -| `sumologic.events.persistence.size` | Size of the persistent storage volume | `10Gi` | -| `sumologic.events.persistence.persistentVolume.storageClass` | The storageClassName for the persistent storage volume | `Nil` | -| `sumologic.events.persistence.persistentVolume.accessMode` | The accessMode for the persistent storage volume | `ReadWriteOnce` | -| `sumologic.events.persistence.persistentVolume.pvcLabels` | Additional PersistentVolumeClaim labels for persistent storage volumes | `{}` | -| `sumologic.events.sourceType` | The type of the Sumo Logic source being used for events ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `http` | -| `sumologic.logs.enabled` | Set the enabled flag to false for disabling logs ingestion altogether. | `true` | -| `sumologic.logs.collector.otelcol.enabled` | Enable OpenTelemetry logs collector. | `true` | -| `sumologic.logs.collector.otelcloudwatch.enabled` | Flag to enable CloudWatch Collection | `false` | -| `sumologic.logs.collector.otelcloudwatch.logGroups` | Log Groups configuration for AWS CloudWatch receiver | `{}` | -| `sumologic.logs.collector.otelcloudwatch.persistence.enabled` | Flag to control persistence for the CloudWatch collector | `true` | -| `sumologic.logs.collector.otelcloudwatch.pollInterval` | CloudWatch poll interval | `1m` | -| `sumologic.logs.collector.otelcloudwatch.region` | EKS Fargate cluster region | `""` | -| `sumologic.logs.collector.otelcloudwatch.roleArn` | AWS role ARN, to authenticate with CloudWatch | `""` | -| `sumologic.logs.container.enabled` | Enable collecting logs from Kubernetes containers. | `true` | -| `sumologic.logs.container.format` | Format for container logs. | `fields` | -| `sumologic.logs.multiline.enabled` | Enable multiline detection for Kubernetes container logs. | `true` | -| `sumologic.logs.multiline.first_line_regex` | Regular expression to match first line of multiline logs. | `^\[?\d{4}-\d{1,2}-\d{1,2}.\d{2}:\d{2}:\d{2}` | -| `sumologic.logs.multiline.additional` | List of additional conditions and expressions to match first line of multiline logs. See [Multiline](/docs/collecting-container-logs.md#conditional-multiline-log-parsing) for more information. | `[]` | -| `sumologic.logs.systemd.enabled` | Enable collecting systemd logs from Kubernets nodes. | `true` | -| `sumologic.logs.systemd.units` | List of systemd units to collect logs from. | See [values.yaml] | -| `sumologic.logs.container.keep_time_attribute` | When set to `true`, preserves the `time` attribute, which is a string representation of the `timestamp` attribute. | `false` | -| `sumologic.logs.container.sourceHost` | Set the \_sourceHost metadata field in Sumo Logic. | `""` | -| `sumologic.logs.container.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `"%{namespace}.%{pod}.%{container}"` | -| `sumologic.logs.container.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `"%{namespace}/%{pod_name}"` | -| `sumologic.logs.container.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `"kubernetes/"` | -| `sumologic.logs.container.sourceCategoryReplaceDash` | Used to replace - with another character. | `"/"` | -| `sumologic.logs.container.excludeContainerRegex` | A regular expression for container names. Logs from matching containers will not be sent to Sumo. | `""` | -| `sumologic.logs.container.excludeHostRegex` | A regular expression for Kubernetes node names. Logs from pods running on matching nodes will not be sent to Sumo. | `""` | -| `sumologic.logs.container.excludeNamespaceRegex` | A regular expression for Kubernetes namespace names. Logs from pods running in matching namespaces will not be sent to Sumo. | `""` | -| `sumologic.logs.container.excludePodRegex` | A regular expression for pod names. Logs from matching pods will not be sent to Sumo. | `""` | -| `sumologic.logs.container.otelcol.extraProcessors` | Extra processors for container logs. See [/docs/collecting-container-logs.md](/docs/collecting-container-logs.md) for details. | `[]` | -| `sumologic.logs.container.perContainerAnnotationsEnabled` | Enable container-level pod annotations. | `false` | -| `sumologic.logs.container.perContainerAnnotationPrefixes` | Defines the list of prefixes of container-level pod annotations. | `[]` | -| `sumologic.logs.systemd.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `"%{_sourceName}"` | -| `sumologic.logs.systemd.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `"system"` | -| `sumologic.logs.systemd.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `"kubernetes/"` | -| `sumologic.logs.systemd.sourceCategoryReplaceDash` | Used to replace - with another character. | `"/"` | -| `sumologic.logs.systemd.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.systemd.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | -| `sumologic.logs.systemd.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.systemd.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.systemd.otelcol.extraProcessors` | Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. | `[]` | -| `sumologic.logs.kubelet.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `"k8s_kubelet"` | -| `sumologic.logs.kubelet.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `"kubelet"` | -| `sumologic.logs.kubelet.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `"kubernetes/"` | -| `sumologic.logs.kubelet.sourceCategoryReplaceDash` | Used to replace - with another character. | `"/"` | -| `sumologic.logs.kubelet.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.kubelet.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.kubelet.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.kubelet.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.kubelet.otelcol.extraProcessors` | Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. | `[]` | -| `sumologic.logs.fields` | Fields to be created at Sumo Logic to ensure logs are tagged with relevant metadata. [Sumo Logic help](https://help.sumologic.com/docs/manage/fields/#manage-fields) | `["cluster", "container", "daemonset", "deployment", "host", "namespace", "node", "pod", "service", "statefulset"]` | -| `sumologic.logs.additionalFields` | Additional Fields to be created in Sumo Logic. [Sumo Logic help](https://help.sumologic.com/docs/manage/fields/#manage-fields) | `[]` | -| `sumologic.logs.sourceType` | The type of the Sumo Logic source being used for logs ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `http` | -| `sumologic.metrics.enabled` | Set the enabled flag to false for disabling metrics ingestion altogether. | `true` | -| `sumologic.metrics.otelcol.extraProcessors` | Extra processors configuration for metrics pipeline. See [/docs/collecting-application-metrics.md#metrics-modifications](/docs/collecting-application-metrics.md#metrics-modifications) for more information. | `[]` | -| `sumologic.metrics.remoteWriteProxy.enabled` | Enable a load balancing proxy for Prometheus remote writes. [See docs for more information.](/docs/prometheus.md#using-a-load-balancing-proxy-for-prometheus-remote-write) | `false` | -| `sumologic.metrics.remoteWriteProxy.config.clientBodyBufferSize` | See the [nginx documentation](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size). Increase if you've also increased samples per send in Prometheus remote write. | `64k` | -| `sumologic.metrics.remoteWriteProxy.config.workerCountAutotune` | This feature autodetects how much CPU is assigned to the nginx instance and setsthe right amount of workers based on that. Disable to use the default of 8 workers. | `true` | -| `sumologic.metrics.remoteWriteProxy.config.enableAccessLogs` | Enable nginx access logs. | `false` | -| `sumologic.metrics.remoteWriteProxy.replicaCount` | Number of replicas in the remote write proxy deployment. | `3` | -| `sumologic.metrics.remoteWriteProxy.image` | Nginx docker image for the remote write proxy. | `{"repository": "public.ecr.aws/sumologic/nginx-unprivileged", "tag": "1.25.2-alpine", "pullPolicy": "IfNotPresent"}` | -| `sumologic.metrics.remoteWriteProxy.resources` | Resource requests and limits for the remote write proxy container. | `{"limits": {"cpu": "1000m", "memory": "256Mi"}, "requests": {"cpu": "100m", "memory": "128Mi"}}` | -| `sumologic.metrics.remoteWriteProxy.livenessProbe` | Liveness probe settings for the remote write proxy container. | `{"initialDelaySeconds": 30, "periodSeconds": 10, "timeoutSeconds": 5, "successThreshold": 1, "failureThreshold": 6}` | -| `sumologic.metrics.remoteWriteProxy.readinessProbe` | Readiness probe settings for the remote write proxy container. | `{"initialDelaySeconds": 5, "periodSeconds": 5, "timeoutSeconds": 3, "successThreshold": 1, "failureThreshold": 3}` | -| `sumologic.metrics.remoteWriteProxy.securityContext` | The securityContext configuration for the remote write proxy. | `{}` | -| `sumologic.metrics.remoteWriteProxy.nodeSelector` | Node selector for the remote write proxy deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `sumologic.metrics.remoteWriteProxy.tolerations` | Tolerations for the remote write proxy deployment. | `[]` | -| `sumologic.metrics.remoteWriteProxy.affinity` | Affinity for the remote write proxy deployment. | `{}` | -| `sumologic.metrics.remoteWriteProxy.priorityClassName` | Priority class name for the remote write proxy deployment. | `Nil` | -| `sumologic.metrics.remoteWriteProxy.podLabels` | Additional labels for the remote write proxy container. | `{}` | -| `sumologic.metrics.remoteWriteProxy.podAnnotations` | Additional annotations for for the remote write proxy container. | `{}` | -| `sumologic.metrics.remoteWriteProxy.config.port` | Port on which remote write proxy is going to be exposed | `8080` | -| `sumologic.metrics.serviceMonitors` | Configuration of Sumo Logic Kubernetes Collection components serviceMonitors | See [values.yaml] | -| `sumologic.metrics.collector.otelcol.enabled` | Enable experimental otelcol metrics collector | See [values.yaml] | -| `sumologic.metrics.collector.otelcol.scrapeInterval` | The default scrape interval for the collector. | `30s` | -| `sumologic.metrics.collector.otelcol.replicaCount` | Replica count for the experimental otelcol metrics collector | `1` | -| `sumologic.metrics.collector.otelcol.resources` | Resource requests and limits for the experimental otelcol metrics collector | See [values.yaml] | -| `sumologic.metrics.collector.otelcol.autoscaling.enabled` | Option to turn autoscaling on for the experimental otelcol metrics and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. collector | `false` | -| `sumologic.metrics.collector.otelcol.autoscaling.maxReplicas` | Default max replicas for autoscaling. collector | `10` | -| `sumologic.metrics.collector.otelcol.autoscaling.minReplicas` | Default min replicas for autoscaling. collector | `3` | -| `sumologic.metrics.collector.otelcol.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `70` | -| `sumologic.metrics.collector.otelcol.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `70` | -| `sumologic.metrics.collector.otelcol.serviceMonitorSelector` | Selector for ServiceMonitors used for target discovery. By default, we select ServiceMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr | `Nil` | -| `sumologic.metrics.collector.otelcol.podMonitorSelector` | Selector for PodMonitors used for target discovery. By default, we select PodMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr | `Nil` | -| `sumologic.metrics.collector.otelcol.nodeSelector` | Node selector for the experimental otelcol metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md). | `{}` | -| `sumologic.metrics.collector.otelcol.podAnnotations` | Additional annotations for the experimental otelcol metrics pods. | `{}` | -| `sumologic.metrics.collector.otelcol.podLabels` | Additional labels for the experimental otelcol metrics pods. | `{}` | -| `sumologic.metrics.collector.otelcol.priorityClassName` | Priority class name for the experimental otelcol metrics. | `null` | -| `sumologic.metrics.collector.otelcol.securityContext` | The securityContext configuration for the experimental otelcol metrics. | `{"fsGroup": 999}` | -| `sumologic.metrics.collector.otelcol.tolerations` | Tolerations for the experimental otelcol metrics. | `[]` | -| `sumologic.metrics.enableDefaultFilters` | Enable default metric filters for Sumo Apps. | `false` | -| `sumologic.metrics.collector.otelcol.kubelet.enabled` | Enable collection of kubelet metrics. | `true` | -| `sumologic.metrics.collector.otelcol.cAdvisor.enabled` | Enable collection of cAdvisor metrics. | `true` | -| `sumologic.metrics.collector.otelcol.annotatedPods.enabled` | Enable collection of metrics from Pods annotated with prometheus.io/\* keys. See [docs/collecting-application-metrics.md](/docs/collecting-application-metrics.md#application-metrics-are-exposed-one-endpoint-scenario) for more information. | `true` | -| `sumologic.metrics.collector.otelcol.allocationStrategy` | Allocation strategy for the scrape target allocator. Valid values are: least-weighted and consistent-hashing. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocator | `least-weighted` | -| `sumologic.metrics.dropHistogramBuckets` | Drop buckets from select high-cardinality histogram metrics, leaving only the sum and count components. | `true` | -| `sumologic.metrics.sourceType` | The type of the Sumo Logic source being used for metrics ingestion. Can be `http` or `otlp`. | `http` | -| `sumologic.traces.enabled` | Set the enabled flag to true to enable tracing ingestion. _Tracing must be enabled for the account first. Please contact your Sumo representative for activation details_ | `true` | -| `sumologic.traces.spans_per_request` | Maximum number of spans sent in single batch | `100` | -| `sumologic.traces.sourceType` | The type of the Sumo Logic source being used for traces ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/traces/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `http` | -| `sumologic.envFromSecret` | If enabled, accessId and accessKey will be sourced from Secret Name given. Be sure to include at least the following env variables in your secret (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY | `sumo-api-secret` | -| `sumologic.accessId` | Sumo access ID. | `Nil` | -| `sumologic.accessKey` | Sumo access key. | `Nil` | -| `sumologic.endpoint` | Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection. | `Nil` | -| `sumologic.collectionMonitoring` | If you set it to false, it would set `EXCLUDE_NAMESPACE=` and not add the Prometheus remotestorage metrics. | `true` | -| `sumologic.collectorName` | The name of the Sumo Logic collector that will be created in the SetUp job. Defaults to `clusterName` if not specified. | `Nil` | -| `sumologic.clusterName` | An identifier for the Kubernetes cluster. Whitespaces in the cluster name will be replaced with dashes. | `kubernetes` | -| `sumologic.cluster` | Configuration of Kubernetes for [Terraform client](https://www.terraform.io/docs/providers/kubernetes/index.html#argument-reference). | See [values.yaml] | -| `sumologic.collector.sources` | Configuration of HTTP sources. [See docs/Terraform.md for more information](/docs/terraform.md). All double quotes should be escaped here regarding Terraform syntax. | See [values.yaml] | -| `sumologic.collector.fields` | Configuration of Sumo Logic fields. [See Sumo Logic Terraform Plugin documentation for more information](https://registry.terraform.io/providers/SumoLogic/sumologic/latest/docs/resources/collector#fields). All double quotes should be escaped here regarding Terraform syntax. | See [values.yaml] | -| `sumologic.httpProxy` | HTTP proxy URL | `Nil` | -| `sumologic.httpsProxy` | HTTPS proxy URL | `Nil` | -| `sumologic.noProxy` | List of comma separated hostnames which should be excluded from the proxy | `kubernetes.default.svc` | -| `sumologic.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's deployments and statefulsets. | `Nil` | -| `sumologic.otelcolImage.repository` | Default image repository for OpenTelemetry Collector. This can be overridden for specific components. | `public.ecr.aws/sumologic/sumologic-otel-collector` | -| `sumologic.otelcolImage.tag` | Default image tag for OpenTelemetry Collector. This can be overridden for specific components. | `0.85.0-sumo-0` | -| `sumologic.otelcolImage.addFipsSuffix` | Add a `-fips` suffix to all image tags. See [docs/security-best-practices.md](/docs/security-best-practices.md) for more information. | `false` | -| `sumologic.podLabels` | Additional labels for the pods. | `{}` | -| `sumologic.podAnnotations` | Additional annotations for the pods. | `{}` | -| `sumologic.scc.create` | Create OpenShift's Security Context Constraint | `false` | -| `sumologic.serviceAccount.annotations` | Add custom annotations to sumologic serviceAccounts | `{}` | -| `sumologic.setup.job.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's setup job. | `Nil` | -| `sumologic.setup.job.podLabels` | Additional labels for the setup Job pod. | `{}` | -| `sumologic.setup.job.podAnnotations` | Additional annotations for the setup Job pod. | `{}` | -| `sumologic.setup.job.image.repository` | Image repository for Sumo Logic setup job docker container. | `public.ecr.aws/sumologic/kubernetes-setup` | -| `sumologic.setup.job.image.tag` | Image tag for Sumo Logic setup job docker container. | `3.10.0` | -| `sumologic.setup.job.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` | -| `sumologic.setup.job.nodeSelector` | Node selector for sumologic setup job. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `sumologic.setup.job.tolerations` | Add tolerations for the setup Job. | `[]` | -| `sumologic.setup.job.affinity` | Add affinity and anti-affinity for the setup Job. | `{}` | -| `sumologic.setup.debug` | Enable debug mode (disables the automatic execution of the setup.sh script) | `Nil` | -| `sumologic.setup.force` | Force collection installation (disables k8s version verification) | `Nil` | -| `sumologic.setup.job.resources` | Resource requests and limits for the setup Job. | `{"limits": {"memory": "256Mi", "cpu": "2000m"}, "requests": {"memory": "64Mi", "cpu": "200m"}}` | -| `sumologic.setup.monitors.enabled` | If enabled, a pre-install hook will create k8s monitors in Sumo Logic. | `true` | -| `sumologic.setup.monitors.monitorStatus` | The installed monitors default status: enabled/disabled. | `enabled` | -| `sumologic.setup.monitors.notificationEmails` | A list of emails to send notifications from monitors. | `[]` | -| `sumologic.setup.dashboards.enabled` | If enabled, a pre-install hook will install k8s dashboards in Sumo Logic. | `true` | -| `metrics-server.enabled` | Set the enabled flag to true for enabling metrics-server. This is required before enabling fluentd autoscaling unless you have an existing metrics-server in the cluster. | `false` | -| `metrics-server.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `metrics-server.apiService.create` | Specifies whether the v1beta1.metrics.k8s.io API service should be created. | `true` | -| `metrics-server.extraArgs` | Extra arguments to pass to metrics-server on start up. | `["--kubelet-insecure-tls=true", "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"]` | -| `metrics-server.image.pullSecrets` | Pull secrets for metrics-server images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `Nil` | -| `kube-prometheus-stack.kubeTargetVersionOverride` | Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). Changing this may break Sumo Logic apps. | `Nil` | -| `kube-prometheus-stack.enabled` | Flag to control deploying Prometheus Operator Helm sub-chart. | `true` | -| `kube-prometheus-stack.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `kube-prometheus-stack.namespaceOverride` | Used to override the chart's default namespace. | `Nil` | -| `kube-prometheus-stack.defaultRules.rules` | Control which default recording and alerting rules are enabled. | See [values.yaml] | -| `kube-prometheus-stack.alertmanager.enabled` | Deploy alertmanager. | `false` | -| `kube-prometheus-stack.grafana.enabled` | If true, deploy the grafana sub-chart. | `false` | -| `kube-prometheus-stack.grafana.defaultDashboardsEnabled` | Deploy default dashboards. These are loaded using the sidecar. | `false` | -| `kube-prometheus-stack.prometheusOperator.enabled` | Enable prometheus-operator | `false` | -| `kube-prometheus-stack.prometheusOperator.podLabels` | Additional labels for prometheus operator pods. | `{}` | -| `kube-prometheus-stack.prometheusOperator.podAnnotations` | Additional annotations for prometheus operator pods. | `{}` | -| `kube-prometheus-stack.prometheusOperator.resources` | Resource limits for prometheus operator. Uses sub-chart defaults. | `{"limits": {"cpu": "200m", "memory": "200Mi"}, "requests": {"cpu": "100m", "memory": "100Mi"}}` | -| `kube-prometheus-stack.prometheusOperator.serviceMonitor` | Prometheus operator ServiceMonitor | `{"selfMonitor": false}` | -| `kube-prometheus-stack.prometheusOperator.admissionWebhooks.enabled` | Create PrometheusRules admission webhooks. Mutating webhook will patch PrometheusRules objects indicating they were validated. Validating webhook will check the rules syntax. | `false` | -| `kube-prometheus-stack.prometheusOperator.tls.enabled` | Enable TLS in prometheus operator. | `false` | -| `kube-prometheus-stack.kube-state-metrics.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `kube-prometheus-stack.kube-state-metrics.resources` | Resource limits for kube state metrics. Uses sub-chart defaults. | `{"limits": {"cpu": "100m", "memory": "64Mi"}, "requests": {"cpu": "10m", "memory": "32Mi"}}` | -| `kube-prometheus-stack.kube-state-metrics.customLabels` | Custom labels to apply to service, deployment and pods. Uses sub-chart defaults. | `{}` | -| `kube-prometheus-stack.kube-state-metrics.podAnnotations` | Additional annotations for pods in the DaemonSet. Uses sub-chart defaults. | `{}` | -| `kube-prometheus-stack.prometheus.enabled` | Enable Prometheus | `false` | -| `kube-prometheus-stack.prometheus.additionalServiceMonitors` | List of ServiceMonitor objects to create. | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.resources` | Resource limits for prometheus. Uses sub-chart defaults. | `{"limits": {"cpu": "2000m", "memory": "8Gi"}, "requests": {"cpu": "500m", "memory": "1Gi"}}` | -| `kube-prometheus-stack.prometheus.prometheusSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.labels` | Add custom pod labels to prometheus pods | `{}` | -| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.annotations` | Add custom pod annotations to prometheus pods | `{}` | -| `kube-prometheus-stack.prometheus.prometheusSpec.remoteWrite` | If specified, the remote_write spec. | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.walCompression` | Enables walCompression in Prometheus | `true` | -| `kube-prometheus-stack.prometheus-node-exporter.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `kube-prometheus-stack.prometheus-node-exporter.podLabels` | Additional labels for prometheus-node-exporter pods. | `{}` | -| `kube-prometheus-stack.prometheus-node-exporter.podAnnotations` | Additional annotations for prometheus-node-exporter pods. | `{}` | -| `kube-prometheus-stack.prometheus-node-exporter.resources` | Resource limits for node exporter. Uses sub-chart defaults. | `{"limits": {"cpu": "200m", "memory": "50Mi"}, "requests": {"cpu": "100m", "memory": "30Mi"}}` | -| `kube-prometheus-stack.prometheus-node-exporter.nodeSelector` | Node selector for prometheus node exporter. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `kube-prometheus-stack.kube-state-metrics.nodeSelector` | Node selector for kube-state-metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `kube-prometheus-stack.kube-state-metrics.image.tag` | Tag for kube-state-metrics Docker image. | `v2.7.0` | -| `kube-prometheus-stack.commonLabels` | Labels to apply to all Kube Prometheus Stack resources | `{}` | -| `kube-prometheus-stack.coreDns.serviceMonitor.interval` | Core DNS metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.coreDns.serviceMonitor.metricRelabelings` | Core DNS MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.global.imagePullSecrets` | Pull secrets for Kube Prometheus Stack images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | -| `kube-prometheus-stack.kubeApiServer.serviceMonitor.interval` | Kubernetes API Server metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kubeApiServer.serviceMonitor.metricRelabelings` | Kubernetes API Server MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubeControllerManager.serviceMonitor.interval` | Kubernetes Controller Manager metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kubeControllerManager.serviceMonitor.metricRelabelings` | Kubernetes Controller Manager MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubeEtcd.serviceMonitor.interval` | Kubernetes Etcd metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kubeEtcd.serviceMonitor.metricRelabelings` | Kubernetes Etcd MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubeScheduler.serviceMonitor.interval` | Kubernetes Scheduler metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kubeScheduler.serviceMonitor.metricRelabelings` | Kubernetes Scheduler MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kube-state-metrics.prometheus.monitor.interval` | Kubernetes State Metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kube-state-metrics.prometheus.monitor.metricRelabelings` | Kubernetes State Metrics MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubelet.serviceMonitor.cAdvisorMetricRelabelings` | Kubelet CAdvisor MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubelet.serviceMonitor.interval` | Kubelet metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kubelet.serviceMonitor.metricRelabelings` | Kubelet MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubelet.serviceMonitor.probes` | Enable scraping /metrics/probes from kubelet's service | `false` | -| `kube-prometheus-stack.kubelet.serviceMonitor.resource` | Enable scraping /metrics/resource from kubelet's service | `false` | -| `kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.interval` | Node Exporter scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.metricRelabelings` | Node Exporter MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.additionalScrapeConfigs` | Additional Prometheus scrape configurations | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.initContainers` | InitContainers allows injecting additional Prometheus initContainers. | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.retention` | How long to retain metrics in Prometheus | `1d` | -| `kube-prometheus-stack.prometheus.prometheusSpec.scrapeInterval` | Prometheus metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `30s` | -| `kube-prometheus-stack.prometheus.serviceMonitor.selfMonitor` | Enable scraping Prometheus metrics | `false` | -| `falco.enabled` | Flag to control deploying Falco Helm sub-chart. | `false` | -| `falco.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `falco.addKernelDevel` | Flag to control installation of `kernel-devel` on nodes using MachineConfig, required to build falco modules (only for OpenShift) | `true` | -| `falco.extra.initContainers` | InitContainers for Falco pod | See [values.yaml] | -| `falco.falco.json_output` | Output events in json. | `true` | -| `falco.imagePullSecrets` | Pull secrets for falco images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | -| `falco.customRules` | Additional falco rules related to Sumo Logic Kubernetes Collection | See [values.yaml] | -| `falco.driver.kind` | Tell Falco which driver to use. Available options: module (kernel driver) and ebpf (eBPF probe). Set to `ebpf` for GKE | `module` | -| `falco.driver.loader.initContainer.image` | Init container image configuration for falco driver loader. | `{"registry": "public.ecr.aws", "repository": "falcosecurity/falco-driver-loader"}` | -| `falco.falco.load_plugins` | Names of the plugins to be loaded by Falco. | `["json", "k8saudit"]` | -| `falco.falco.rules_file` | The location of the rules files that will be consumed by Falco. | `["/etc/falco/falco_rules.yaml", "/etc/falco/falco_rules.local.yaml", "/etc/falco/k8s_audit_rules.yaml", "/etc/falco/rules.d", "/etc/falco/rules.available/application_rules.yaml"]` | -| `falco.image.registry` | Image registry for falco docker container. | `public.ecr.aws` | -| `falco.image.repository` | Image repository for falco docker container. | `falcosecurity/falco-no-driver` | -| `falco.falcoctl` | Falcoctl configuration. We don't use it for now due to breaking changes. [See this issue](https://github.com/SumoLogic/sumologic-kubernetes-collection/issues/3144). | `{"artifact": {"follow": {"enabled": false}, "install": {"enabled": false}}}` | -| `telegraf-operator.enabled` | Flag to control deploying Telegraf Operator Helm sub-chart. | `false` | -| `telegraf-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `telegraf-operator.replicaCount` | Replica count for Telegraf Operator pods. | 1 | -| `telegraf-operator.classes.secretName` | Secret name in which the Telegraf Operator configuration will be stored. | `telegraf-operator-classes` | -| `telegraf-operator.classes.data` | Telegraf sidecar configuration. | See [values.yaml] | -| `telegraf-operator.classes.default` | Name of the default output configuration. | `sumologic-prometheus` | -| `telegraf-operator.image.sidecarImage` | Telegraf Operator sidecar image. | `public.ecr.aws/sumologic/telegraf:1.21.2` | -| `telegraf-operator.imagePullSecrets` | Pull secrets for Telegraf Operator images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | -| `opentelemetry-operator.enabled` | Flag to control deploying OpenTelemetry Operator Helm sub-chart. | `true` | -| `opentelemetry-operator.createDefaultInstrumentation` | Flag to control creation of default Instrumentation object | `false` | -| `opentelemetry-operator.instrumentation.dotnet.metrics.enabled` | Flag to control metrics export from DotNet instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.instrumentation.dotnet.traces.enabled` | Flag to control traces export from DotNet instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.instrumentation.java.metrics.enabled` | Flag to control metrics export from Java instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.instrumentation.java.traces.enabled` | Flag to control traces export from Java instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.instrumentation.python.metrics.enabled` | Flag to control metrics export from Python instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.instrumentation.python.traces.enabled` | Flag to control traces export from Python instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.manager.collectorImage.repository` | The default collector image repository for OpenTelemetryCollector CRDs. | `public.ecr.aws/sumologic/sumologic-otel-collector` | -| `opentelemetry-operator.manager.collectorImage.tag` | The default collector image tag for OpenTelemetryCollector CRDs. | `0.85.0-sumo-0` | -| `opentelemetry-operator.manager.resources.limits.cpu` | Used to set limit CPU for OpenTelemetry-Operator Manager. | `250m` | -| `opentelemetry-operator.manager.resources.limits.memory` | Used to set limit Memory for OpenTelemetry-Operator Manager. | `512Mi` | -| `opentelemetry-operator.manager.resources.requests.cpu` | Used to set requested CPU for OpenTelemetry-Operator Manager. | `150m` | -| `opentelemetry-operator.manager.resources.requests.memory` | Used to set requested Memory for OpenTelemetry-Operator Manager. | `256Mi` | -| `opentelemetry-operator.instrumentationNamespaces` | Used to create `Instrumentation` resources in specified namespaces. | `Nil` | -| `opentelemetry-operator.instrumentationJobImage.image.repository` | Name of the image repository used to apply Instrumentation resource | `sumologic/kubernetes-tools` | -| `opentelemetry-operator.instrumentationJobImage.image.tag` | Name of the image tag used to apply Instrumentation resource | `2.14.0` | -| `opentelemetry-operator.admissionWebhooks` | Admission webhooks make sure only requests with correctly formatted rules will get into the Operator. They also enable the sidecar injection for OpenTelemetryCollector and Instrumentation CR's. | See [values.yaml] | -| `opentelemetry-operator.manager.env` | Additional environment variables for opentelemetry-operator helm chart. | `Nil` | -| `otelcolInstrumentation.enabled` | Enables Sumo Otel Distro Collector StatefulSet to collect telemetry data. [See docs for more information.](/docs/opentelemetry-collector/traces.md) | `true` | -| `otelcolInstrumentation.autoscaling.enabled` | Option to turn autoscaling on for Sumo Otel Distro Collector StatefulSet and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | -| `otelcolInstrumentation.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | -| `otelcolInstrumentation.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | -| `otelcolInstrumentation.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `100` | -| `otelcolInstrumentation.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `50` | -| `otelcolInstrumentation.statefulset.replicaCount` | Set the number of otelcol-instrumentation replicasets. | `3` | -| `otelcolInstrumentation.statefulset.nodeSelector` | Node selector for otelcol-instrumentation statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `otelcolInstrumentation.statefulset.priorityClassName` | Priority class name for otelcol-instrumentation pods. | If not provided then set to `RELEASE-NAME-sumologic-priorityclass`. | -| `otelcolInstrumentation.statefulset.affinity` | Affinity for otelcol-instrumentation statefulset. | `{}` | -| `otelcolInstrumentation.statefulset.extraEnvVars` | Additional environment variables for otelcol-instrumentation pods. | `{}` | -| `otelcolInstrumentation.statefulset.extraVolumeMounts` | Additional volume mounts for otelcol-instrumentation pods. | `{}` | -| `otelcolInstrumentation.statefulset.extraVolumes` | Additional volumes for otelcol-instrumentation pods. | `{}` | -| `otelcolInstrumentation.statefulset.image.pullPolicy` | Image pullPolicy for otelcol-instrumentation docker container. | `IfNotPresent` | -| `otelcolInstrumentation.statefulset.image.repository` | Image repository for otelcol-instrumentation docker container. | `` | -| `otelcolInstrumentation.statefulset.image.tag` | Image tag for otelcol-instrumentation docker container. | `` | -| `otelcolInstrumentation.statefulset.podAnnotations` | Additional annotations for otelcol-instrumentation pods. | `{}` | -| `otelcolInstrumentation.statefulset.podAntiAffinity` | PodAntiAffinity for otelcol-instrumentation statefulset. | `soft` | -| `otelcolInstrumentation.statefulset.podLabels` | Additional labels for otelcol-instrumentation pods. | `{}` | -| `otelcolInstrumentation.statefulset.resources` | Resources for otelcol-instrumentation statefulset. | `{"limits": {"memory": "4Gi", "cpu": "2000m"}, "requests": {"memory": "768Mi", "cpu": "500m"}}` | -| `otelcolInstrumentation.statefulset.tolerations` | Tolerations for otelcol-instrumentation statefulset. | `[]` | -| `otelcolInstrumentation.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for otelcol-instrumentation statefulset. | `[]` | -| `otelcolInstrumentation.sourceMetadata.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `"%{k8s.namespace.name}.%{k8s.pod.pod_name}.%{k8s.container.name}"` | -| `otelcolInstrumentation.sourceMetadata.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `"%{k8s.namespace.name}/%{k8s.pod.pod_name}"` | -| `otelcolInstrumentation.sourceMetadata.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `"kubernetes/"` | -| `otelcolInstrumentation.sourceMetadata.sourceCategoryReplaceDash` | Used to replace - with another character. | `"/"` | -| `otelcolInstrumentation.sourceMetadata.excludeContainerRegex` | A regular expression for containers. Matching containers will be excluded from Sumo. | `""` | -| `otelcolInstrumentation.sourceMetadata.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. | `""` | -| `otelcolInstrumentation.sourceMetadata.excludeNamespaceRegex` | A regular expression for namespaces. Matching namespaces will be excluded from Sumo. | `""` | -| `otelcolInstrumentation.sourceMetadata.excludePodRegex` | A regular expression for pods. Matching pods will be excluded from Sumo. | `""` | -| `otelcolInstrumentation.logLevelFilter` | Do not send otelcol-instrumentation logs if `true`. | `false` | -| `otelcolInstrumentation.config.processors.batch.send_batch_size` | Sets the preferred size of batch. | `256` | -| `otelcolInstrumentation.config.processors.batch.send_batch_max_size` | Sets the maximum allowed size of a batch. Use with caution, setting too large value might cause 413 Payload Too Large errors. | `512` | -| `otelcolInstrumentation.config.processors.memory_limiter.limit_percentage` | Sets the maximum amount of memory, in %, targeted to be allocated by the process heap. | `75` | -| `otelcolInstrumentation.config.processors.memory_limiter.spike_limit_percentage` | Sets the maximum spike expected between the measurements of memory usage, in %. | `20` | -| `otelcolInstrumentation.config` | Configuration for otelcol-instrumentation | See [values.yaml] | -| `otelcolInstrumentation.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for the otelcol-instrumentation container. | `{"periodSeconds": 3, "failureThreshold": 60}` | -| `otelcolInstrumentation.statefulset.containers.otelcol.livenessProbe` | Liveness probe settings for the otelcol-instrumentation container. | `{"initialDelaySeconds": 15, "periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | -| `otelcolInstrumentation.statefulset.containers.otelcol.readinessProbe` | Readiness probe settings for the otelcol-instrumentation container. | `{"initialDelaySeconds": 5, "periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | -| `otelcolInstrumentation.statefulset.containers.otelcol.securityContext` | The securityContext configuration for the otelcol-instrumentation container. | `{}` | -| `tracesGateway.enabled` | Flag to control deploying traces-gateway. [See docs for more information.](/docs/opentelemetry-collector/traces.md) | `true` | -| `tracesGateway.autoscaling.enabled` | Option to turn autoscaling on for traces-gateway and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | -| `tracesGateway.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | -| `tracesGateway.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | -| `tracesGateway.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `100` | -| `tracesGateway.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `50` | -| `tracesGateway.deployment.replicas` | Set the number of OpenTelemetry Collector replicas. | `1` | -| `tracesGateway.deployment.nodeSelector` | Node selector for otelcol deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `tracesGateway.deployment.priorityClassName` | Priority class name for OpenTelemetry Collector log pods. | `Nil` | -| `tracesGateway.logLevelFilter` | Do not send traces-gateway logs if `true`. | `false` | -| `tracesGateway.config.processors.batch.send_batch_size` | Sets the preferred size of batch. | `256` | -| `tracesGateway.config.processors.batch.send_batch_max_size` | Sets the maximum allowed size of a batch. Use with caution, setting too large value might cause 413 Payload Too Large errors. | `512` | -| `tracesGateway.config.processors.memory_limiter.limit_percentage` | Sets the maximum amount of memory, in %, targeted to be allocated by the process heap. | `75` | -| `tracesGateway.config.processors.memory_limiter.spike_limit_percentage` | Sets the maximum spike expected between the measurements of memory usage, in %. | `20` | -| `tracesGateway.config` | Configuration for traces-gateway. | See [values.yaml] | -| `tracesGateway.deployment.extraEnvVars` | Additional environment variables for traces-gateway pods. | `{}` | -| `tracesGateway.deployment.extraVolumeMounts` | Additional volume mounts for traces-gateway pods. | `{}` | -| `tracesGateway.deployment.extraVolumes` | Additional volumes for traces-gateway pods. | `{}` | -| `tracesGateway.deployment.image.pullPolicy` | Image pullPolicy for traces-gateway docker container. | `IfNotPresent` | -| `tracesGateway.deployment.image.repository` | Image repository for traces-gateway docker container. | `` | -| `tracesGateway.deployment.image.tag` | Image tag for traces-gateway docker container. | `` | -| `tracesGateway.deployment.livenessProbe` | Liveness probe settings for the traces-gateway container. | `{"periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | -| `tracesGateway.deployment.podAnnotations` | Additional annotations for traces-gateway pods. | `{}` | -| `tracesGateway.deployment.podLabels` | Additional labels for traces-gateway pods. | `{}` | -| `tracesGateway.deployment.readinessProbe` | Readiness probe settings for the traces-gateway container. | `{"periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | -| `tracesGateway.deployment.resources` | Resources for traces-gateway statefulset. | `{"limits": {"memory": "2Gi", "cpu": "1000m"}, "requests": {"memory": "196Mi", "cpu": "50m"}}` | -| `tracesGateway.deployment.startupProbe` | Startup probe configuration for the traces-gateway container. | `{"periodSeconds": 5, "timeoutSeconds": 3, "failureThreshold": 60}` | -| `tracesGateway.deployment.tolerations` | Tolerations for traces-gateway statefulset. | `[]` | -| `tracesSampler.deployment.replicas` | Set the number of OpenTelemetry Collector replicas. | `1` | -| `tracesSampler.deployment.nodeSelector` | Node selector for otelcol deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `tracesSampler.deployment.priorityClassName` | Priority class name for OpenTelemetry Collector log pods. | `Nil` | -| `tracesSampler.logLevelFilter` | Do not send traces-sampler logs if `true`. | `false` | -| `tracesSampler.config.processors.batch.send_batch_size` | Sets the preferred size of batch. | `256` | -| `tracesSampler.config.processors.batch.send_batch_max_size` | Sets the maximum allowed size of a batch. Use with caution, setting too large value might cause 413 Payload Too Large errors. | `512` | -| `tracesSampler.config.processors.memory_limiter.limit_percentage` | Sets the maximum amount of memory, in %, targeted to be allocated by the process heap. | `75` | -| `tracesSampler.config.processors.memory_limiter.spike_limit_percentage` | Sets the maximum spike expected between the measurements of memory usage, in %. | `20` | -| `tracesSampler.config` | Configuration for traces-sampler. | See [values.yaml] | -| `tracesSampler.deployment.extraEnvVars` | Additional environment variables for traces-sampler pods. | `{}` | -| `tracesSampler.deployment.extraVolumeMounts` | Additional volume mounts for traces-sampler pods. | `{}` | -| `tracesSampler.deployment.extraVolumes` | Additional volumes for traces-sampler pods. | `{}` | -| `tracesSampler.deployment.image.pullPolicy` | Image pullPolicy for traces-sampler docker container. | `IfNotPresent` | -| `tracesSampler.deployment.image.repository` | Image repository for traces-sampler docker container. | `` | -| `tracesSampler.deployment.image.tag` | Image tag for traces-sampler docker container. | `` | -| `tracesSampler.deployment.podAnnotations` | Additional annotations for traces-sampler pods. | `{}` | -| `tracesSampler.deployment.podLabels` | Additional labels for traces-sampler pods. | `{}` | -| `tracesSampler.deployment.resources` | Resources for traces-sampler statefulset. | `{"limits": {"memory": "4Gi", "cpu": "2000m"}, "requests": {"memory": "384Mi", "cpu": "200m"}}` | -| `tracesSampler.deployment.tolerations` | Tolerations for traces-sampler statefulset. | `[]` | -| `otellogs.image.repository` | Image repository for otelcol docker container. | `` | -| `otellogs.image.tag` | Image tag for otelcol docker container. | `` | -| `otellogs.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | -| `otellogs.logLevel` | Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | -| `otellogs.config.merge` | Configuration for log collector otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `otellogs.config.override` | Configuration for log collector otelcol, replaces defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `otellogs.daemonset` | OpenTelemetry Collector Daemonset customization options. See [values.yaml] for more details. | See [values.yaml] | -| `otelcloudwatch.statefulset` | OpenTelemetry Cloudwatch Collector statefulset customization options. See [values.yaml] for more details. | See [values.yaml] | -| `otellogs.additionalDaemonSets` | OpenTelemetry Collector Daemonset per node customization options. See [Best Practices](/docs/best-practices.md#setting-different-resources-on-different-nodes-for-logs-collector). | `{}` | -| `otellogs.metrics.enabled` | Enable OpenTelemetry Collector metrics | `true` | -| `otellogs.serviceLabels` | Add custom labels to OpenTelemetry Collector Service | `{}` | -| `metadata.image.repository` | Image repository for otelcol docker container. | `` | -| `metadata.image.tag` | Image tag for otelcol docker container. | `` | -| `metadata.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | -| `metadata.securityContext` | The securityContext configuration for otelcol. | `{"fsGroup": 999}` | -| `metadata.podLabels` | Additional labels for all otelcol pods. | `{}` | -| `metadata.podAnnotations` | Additional annotations for all otelcol pods. | `{}` | -| `metadata.serviceLabels` | Additional labels for all otelcol pods. | `{}` | -| `metadata.persistence.enabled` | Flag to control persistence for OpenTelemetry Collector. | `true` | -| `metadata.persistence.storageClass` | Defines storageClassName for the PersistentVolumeClaim which is used to provide persistence for OpenTelemetry Collector. | `Nil` | -| `metadata.persistence.accessMode` | The accessMode for the volume which is used to provide persistence for OpenTelemetry Collector. | `ReadWriteOnce` | -| `metadata.persistence.size` | Size of the volume which is used to provide persistence for OpenTelemetry Collector. | `10Gi` | -| `metadata.persistence.pvcLabels` | Additional PersistentVolumeClaim labels for all OpenTelemetry Collector pods. | `{}` | -| `metadata.metrics.enabled` | Flag to control deploying the otelcol metrics statefulsets. | `true` | -| `metadata.metrics.logLevel` | Flag to control logging level for OpenTelemetry Collector for metrics. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | -| `metadata.metrics.config.merge` | Configuration for metrics metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `metadata.metrics.config.override` | Configuration for metrics metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `metadata.metrics.config.additionalEndpoints` | List of additional endpoints for Open Telemetry Metadata Pod. | `[]` | -| `metadata.metrics.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for metrics otelcol container. | `{"periodSeconds": 3, "failureThreshold": 60}` | -| `metadata.metrics.statefulset.nodeSelector` | Node selector for metrics metadata enrichment (otelcol) statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `metadata.metrics.statefulset.tolerations` | Tolerations for metrics metadata enrichment (otelcol) statefulset. | `[]` | -| `metadata.metrics.statefulset.affinity` | Affinity for metrics metadata enrichment (otelcol) statefulset. | `{}` | -| `metadata.metrics.statefulset.podAntiAffinity` | PodAntiAffinity for metrics metadata enrichment (otelcol) statefulset. | `soft` | -| `metadata.metrics.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for metrics metadata enrichment (otelcol) statefulset. | `[]` | -| `metadata.metrics.statefulset.replicaCount` | Replica count for metrics metadata enrichment (otelcol) statefulset. | `3` | -| `metadata.metrics.statefulset.resources` | Resources for metrics metadata enrichment (otelcol) statefulset. | `{"limits": {"memory": "1Gi", "cpu": "1000m"}, "requests": {"memory": "768Mi", "cpu": "500m"}}` | -| `metadata.metrics.statefulset.priorityClassName` | Priority class name for metrics metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.metrics.statefulset.podLabels` | Additional labels for metrics metadata enrichment (otelcol) pods. | `{}` | -| `metadata.metrics.statefulset.podAnnotations` | Additional annotations for metrics metadata enrichment (otelcol) pods. | `{}` | -| `metadata.metrics.statefulset.containers.otelcol.livenessProbe` | Liveness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 15, "periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | -| `metadata.metrics.statefulset.containers.otelcol.readinessProbe` | Readiness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 5, "periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | -| `metadata.metrics.statefulset.containers.otelcol.securityContext` | The securityContext configuration for otelcol container for metrics metadata enrichment statefulset. | `{}` | -| `metadata.metrics.statefulset.extraEnvVars` | Additional environment variables for metrics metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.metrics.statefulset.extraVolumes` | Additional volumes for metrics metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.metrics.statefulset.extraVolumeMounts` | Additional volume mounts for metrics metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.metrics.autoscaling.enabled` | Option to turn autoscaling on for metrics metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | -| `metadata.metrics.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | -| `metadata.metrics.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | -| `metadata.metrics.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `80` | -| `metadata.metrics.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | -| `metadata.metrics.podDisruptionBudget` | Pod Disruption Budget for metrics metadata enrichment (otelcol) statefulset and for experimental otelcol metrics collector. | `{"minAvailable": 2}` | -| `metadata.logs.enabled` | Flag to control deploying the otelcol logs statefulsets. | `true` | -| `metadata.logs.logLevel` | Flag to control logging level for OpenTelemetry Collector for logs. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | -| `metadata.logs.config.merge` | Configuration for logs metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `metadata.logs.config.override` | Configuration for logs metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `metadata.logs.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for the logs otelcol container. | `{"periodSeconds": 3, "failureThreshold": 60}` | -| `metadata.logs.statefulset.containers.otelcol.livenessProbe` | Liveness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 15, "periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | -| `metadata.logs.statefulset.containers.otelcol.readinessProbe` | Readiness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 5, "periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | -| `metadata.logs.statefulset.containers.otelcol.securityContext` | The securityContext configuration for the logs otelcol container. | `{}` | -| `metadata.logs.statefulset.nodeSelector` | Node selector for logs metadata enrichment (otelcol) statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `metadata.logs.statefulset.tolerations` | Tolerations for logs metadata enrichment (otelcol) statefulset. | `[]` | -| `metadata.logs.statefulset.affinity` | Affinity for logs metadata enrichment (otelcol) statefulset. | `{}` | -| `metadata.logs.statefulset.podAntiAffinity` | PodAntiAffinity for logs metadata enrichment (otelcol) statefulset. | `soft` | -| `metadata.logs.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for logs metadata enrichment (otelcol) statefulset. | `[]` | -| `metadata.logs.statefulset.replicaCount` | Replica count for logs metadata enrichment (otelcol) statefulset. | `3` | -| `metadata.logs.statefulset.resources` | Resources for logs metadata enrichment (otelcol) statefulset. | `{"limits": {"memory": "1Gi", "cpu": "1000m"}, "requests": {"memory": "768Mi", "cpu": "500m"}}` | -| `metadata.logs.statefulset.priorityClassName` | Priority class name for logs metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.logs.statefulset.podLabels` | Additional labels for logs metadata enrichment (otelcol) pods. | `{}` | -| `metadata.logs.statefulset.podAnnotations` | Additional annotations for logs metadata enrichment (otelcol) pods. | `{}` | -| `metadata.logs.statefulset.extraEnvVars` | Additional environment variables for logs metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.logs.statefulset.extraVolumes` | Additional volumes for logs metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.logs.statefulset.extraVolumeMounts` | Additional volume mounts for logs metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.logs.statefulset.extraPorts` | Additional exposed ports in logs metadata enrichment (otelcol) pods and service. | `Nil` | -| `metadata.logs.statefulset.extraArgs` | Additional arguments to otelcol container. | `Nil` | -| `metadata.logs.autoscaling.enabled` | Option to turn autoscaling on for logs metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | -| `metadata.logs.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | -| `metadata.logs.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | -| `metadata.logs.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `80` | -| `metadata.logs.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | -| `metadata.logs.podDisruptionBudget` | Pod Disruption Budget for logs metadata enrichment (otelcol) statefulset. | `{"minAvailable": 2}` | -| `otelevents.image.repository` | Image repository for otelcol docker container. | `` | -| `otelevents.image.tag` | Image tag for otelcol docker container. | `` | -| `otelevents.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | -| `otelevents.logLevel` | Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | -| `otelevents.config.merge` | Configuration for events otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | -| `otelevents.config.override` | Configuration for events otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | -| `otelevents.statefulset` | OpenTelemetry Collector StatefulSet customization options. See values.yaml for more details. | See [values.yaml] | -| `tailing-sidecar-operator.enabled` | Flag to control deploying Tailing Sidecar Operator Helm sub-chart. | `false` | -| `tailing-sidecar-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `tailing-sidecar-operator.scc.create` | Create OpenShift's Security Context Constraint | `false` | -| `kube-prometheus-stack.prometheus.prometheusSpec.nodeSelector` | Node selector for prometheus. [See docs/Best_Practices.md for more information.](/docs/best-practices.md) | `{}` | -| `pvcCleaner.metrics.enabled` | Flag to enable cleaning unused PVCs for otelcol metrics statefulsets. | `false` | -| `pvcCleaner.logs.enabled` | Flag to enable cleaning unused PVCs for otelcol logs statefulsets. | `false` | -| `pvcCleaner.job.image.repository` | Image repository for pvcCleaner docker containers. | `public.ecr.aws/sumologic/kubernetes-tools-kubectl` | -| `pvcCleaner.job.image.tag` | Image tag for pvcCleaner docker containers. | `2.20.0` | -| `pvcCleaner.job.image.pullPolicy` | Image pullPolicy for pvcCleaner docker containers. | `IfNotPresent` | -| `pvcCleaner.job.resources` | Resource requests and limits for the pvcCleaner containers. | `{"limits": {"memory": "256Mi", "cpu": "2000m"}, "requests": {"memory": "64Mi", "cpu": "100m"}}` | -| `pvcCleaner.job.nodeSelector` | Node selector for pvcCleaner job. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `pvcCleaner.job.tolerations` | Add tolerations for the pvcCleaner job. | `[]` | -| `pvcCleaner.job.affinity` | Add affinity and anti-affinity for the pvcCleaner job. | `{}` | -| `pvcCleaner.job.podLabels` | Additional labels for the pvcCleaner container. | `{}` | -| `pvcCleaner.job.podAnnotations` | Additional annotations for for the pvcCleaner container. | `{}` | -| `pvcCleaner.job.schedule` | Schedule for cronJobs | `*/15 * * * *` | -| `pvcCleaner.job.securityContext` | The securityContext configuration for the pvcCleaner. | `{"runAsUser": 1000}` | +| Parameter | Description | Default | +| --- | --- | --- | +| `nameOverride` | Used to override the Chart name. | `Nil` | +| `fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `namespaceOverride` | Used to override the chart's default target namepace. | `Nil` | +| `sumologic.setupEnabled` | If enabled, a pre-install hook will create Collector and Sources in Sumo Logic. | `true` | +| `sumologic.cleanupEnabled` | If enabled, a pre-delete hook will destroy Kubernetes secret and Sumo Logic Collector. | `false` | +| `sumologic.envFromSecret` | If enabled, accessId and accessKey will be sourced from Secret Name given. Be sure to include at least the following env variables in your secret (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY | `sumo-api-secret` | +| `sumologic.accessId` | Sumo access ID. | `Nil` | +| `sumologic.accessKey` | Sumo access key. | `Nil` | +| `sumologic.endpoint` | Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection. | `Nil` | +| `sumologic.httpProxy` | HTTP proxy URL | `Nil` | +| `sumologic.httpsProxy` | HTTPS proxy URL | `Nil` | +| `sumologic.noProxy` | List of comma separated hostnames which should be excluded from the proxy | `kubernetes.default.svc` | +| `sumologic.collectorName` | The name of the Sumo Logic collector that will be created in the SetUp job. Defaults to `clusterName` if not specified. | `Nil` | +| `sumologic.clusterName` | An identifier for the Kubernetes cluster. Whitespaces in the cluster name will be replaced with dashes. | `kubernetes` | +| `sumologic.cluster` | Configuration of Kubernetes for [Terraform client](https://www.terraform.io/docs/providers/kubernetes/index.html#argument-reference). | `See [values.yaml]` | +| `sumologic.collectionMonitoring` | If you set it to false, it would set EXCLUDE_NAMESPACE= and not add the Otelcol logs and Prometheus remotestorage metrics. | `true` | +| `sumologic.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's deployments and statefulsets. | `[{"name": "myRegistryKeySecretName"}]` | +| `sumologic.podLabels` | Additional labels for the pods. | `{}` | +| `sumologic.podAnnotations` | Additional annotations for the pods. | `{}` | +| `sumologic.serviceAccount.annotations` | Add custom annotations to sumologic serviceAccounts | `{}` | +| `sumologic.scc.create` | Create OpenShift's Security Context Constraint | `false` | +| `sumologic.setup.force` | Force collection installation (disables k8s version verification) | `true` | +| `sumologic.setup.job.image.repository` | Image repository for Sumo Logic setup job docker container. | `public.ecr.aws/sumologic/kubernetes-setup` | +| `sumologic.setup.job.image.tag` | Image tag for Sumo Logic setup job docker container. | `3.10.0` | +| `sumologic.setup.job.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` | +| `sumologic.setup.job.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's setup job. | `[{"name": "myRegistryKeySecretName"}]` | +| `sumologic.setup.job.resources` | Resource requests and limits for the setup Job. | `{"limits": {"memory": "256Mi", "cpu": "2000m"}, "requests": {"memory": "64Mi", "cpu": "200m"}}` | +| `sumologic.setup.job.nodeSelector` | Node selector for sumologic setup job. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `sumologic.setup.job.tolerations` | Add tolerations for the setup Job. | `[]` | +| `sumologic.setup.job.affinity` | Add affinity and anti-affinity for the setup Job. | `{}` | +| `sumologic.setup.job.podLabels` | Additional labels for the setup Job pod. | `{}` | +| `sumologic.setup.job.podAnnotations` | Additional annotations for the setup Job pod. | `{}` | +| `sumologic.setup.debug` | Enable debug mode (disables the automatic execution of the setup.sh script) | `true` | +| `sumologic.setup.monitors.enabled` | If enabled, a pre-install hook will create k8s monitors in Sumo Logic. | `true` | +| `sumologic.setup.monitors.monitorStatus` | The installed monitors default status: enabled/disabled. | `enabled` | +| `sumologic.setup.monitors.notificationEmails` | A list of emails to send notifications from monitors. | `[]` | +| `sumologic.setup.dashboards.enabled` | If enabled, a pre-install hook will install k8s dashboards in Sumo Logic. | `true` | +| `sumologic.collector.fields` | Configuration of Sumo Logic fields. [See Sumo Logic Terraform Plugin documentation for more information](https://registry.terraform.io/providers/SumoLogic/sumologic/latest/docs/resources/collector#fields). All double quotes should be escaped here regarding Terraform syntax. | `{}` | +| `sumologic.collector.sources` | Configuration of HTTP sources. [See docs/Terraform.md for more information](/docs/terraform.md). All double quotes should be escaped here regarding Terraform syntax. | `See [values.yaml]` | +| `sumologic.otelcolImage.repository` | Default image repository for OpenTelemetry Collector. This can be overridden for specific components. | `public.ecr.aws/sumologic/sumologic-otel-collector` | +| `sumologic.otelcolImage.tag` | Default image tag for OpenTelemetry Collector. This can be overridden for specific components. | `0.85.0-sumo-0` | +| `sumologic.otelcolImage.addFipsSuffix` | Add a `-fips` suffix to all image tags. See [docs/security-best-practices.md](/docs/security-best-practices.md) for more information. | `false` | +| `sumologic.events.enabled` | Defines whether collection of Kubernetes events is enabled. | `true` | +| `sumologic.events.sourceName` | Source name for the Events source. | `events` | +| `sumologic.events.sourceCategory` | Source category for the Events source. | `kubernetes/events` | +| `sumologic.events.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `sumologic.events.persistence.enabled` | Enable persistence for the event collector. Persistence lets the collector avoid reingesting events on restart and buffer them locally if unable to reach the backend. | `true` | +| `sumologic.events.persistence.size` | Size of the persistent storage volume | `10Gi` | +| `sumologic.events.persistence.persistentVolume.path` | Local filesystem path the persistent storage volume will be mounted at. | `/var/lib/storage/events` | +| `sumologic.events.persistence.persistentVolume.accessMode` | The accessMode for the persistent storage volume | `ReadWriteOnce` | +| `sumologic.events.persistence.persistentVolume.pvcLabels` | Additional PersistentVolumeClaim labels for persistent storage volumes | `{}` | +| `sumologic.events.persistence.persistentVolume.storageClass` | The storageClassName for the persistent storage volume | `Nil` | +| `sumologic.events.sourceType` | The type of the Sumo Logic source being used for events ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `http` | +| `sumologic.logs.enabled` | Set the enabled flag to false for disabling logs ingestion altogether. | `true` | +| `sumologic.logs.collector.otelcol.enabled` | Enable OpenTelemetry logs collector. | `true` | +| `sumologic.logs.collector.otelcloudwatch.enabled` | Flag to enable CloudWatch Collection | `false` | +| `sumologic.logs.collector.otelcloudwatch.roleArn` | AWS role ARN, to authenticate with CloudWatch | `Nil` | +| `sumologic.logs.collector.otelcloudwatch.persistence.enabled` | Flag to control persistence for the CloudWatch collector | `true` | +| `sumologic.logs.collector.otelcloudwatch.region` | EKS Fargate cluster region | `Nil` | +| `sumologic.logs.collector.otelcloudwatch.pollInterval` | CloudWatch poll interval | `1m` | +| `sumologic.logs.collector.otelcloudwatch.logGroups` | Log Groups configuration for AWS CloudWatch receiver | `{}` | +| `sumologic.logs.multiline.enabled` | Enable multiline detection for Kubernetes container logs. | `true` | +| `sumologic.logs.multiline.first_line_regex` | Regular expression to match first line of multiline logs. | `^\\[?\\d{4}-\\d{1,2}-\\d{1,2}.\\d{2}:\\d{2}:\\d{2}` | +| `sumologic.logs.multiline.additional` | List of additional conditions and expressions to match first line of multiline logs. See [Multiline](/docs/collecting-container-logs.md#conditional-multiline-log-parsing) for more information. | `[]` | +| `sumologic.logs.container.enabled` | Enable collecting logs from Kubernetes containers. | `true` | +| `sumologic.logs.container.format` | Format for container logs. | `fields` | +| `sumologic.logs.container.keep_time_attribute` | When set to `true`, preserves the `time` attribute, which is a string representation of the `timestamp` attribute. | `false` | +| `sumologic.logs.container.otelcol.extraProcessors` | Extra processors for container logs. See [/docs/collecting-container-logs.md](/docs/collecting-container-logs.md) for details. | `[]` | +| `sumologic.logs.container.sourceHost` | Set the \_sourceHost metadata field in Sumo Logic. | `Nil` | +| `sumologic.logs.container.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `%{namespace}.%{pod}.%{container}` | +| `sumologic.logs.container.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `%{namespace}/%{pod_name}` | +| `sumologic.logs.container.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `kubernetes/` | +| `sumologic.logs.container.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `sumologic.logs.container.excludeContainerRegex` | A regular expression for container names. Logs from matching containers will not be sent to Sumo. | `Nil` | +| `sumologic.logs.container.excludeHostRegex` | A regular expression for Kubernetes node names. Logs from pods running on matching nodes will not be sent to Sumo. | `Nil` | +| `sumologic.logs.container.excludeNamespaceRegex` | A regular expression for Kubernetes namespace names. Logs from pods running in matching namespaces will not be sent to Sumo. | `Nil` | +| `sumologic.logs.container.excludePodRegex` | A regular expression for pod names. Logs from matching pods will not be sent to Sumo. | `Nil` | +| `sumologic.logs.container.perContainerAnnotationsEnabled` | Enable container-level pod annotations. | `false` | +| `sumologic.logs.container.perContainerAnnotationPrefixes` | Defines the list of prefixes of container-level pod annotations. | `[]` | +| `sumologic.logs.systemd.enabled` | Enable collecting systemd logs from Kubernets nodes. | `true` | +| `sumologic.logs.systemd.units` | List of systemd units to collect logs from. | `["docker.service"]` | +| `sumologic.logs.systemd.otelcol.extraProcessors` | Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. | `[]` | +| `sumologic.logs.systemd.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `%{_sourceName}` | +| `sumologic.logs.systemd.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `system` | +| `sumologic.logs.systemd.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `kubernetes/` | +| `sumologic.logs.systemd.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `sumologic.logs.systemd.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.systemd.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.systemd.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.systemd.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.kubelet.otelcol.extraProcessors` | Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. | `[]` | +| `sumologic.logs.kubelet.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `k8s_kubelet` | +| `sumologic.logs.kubelet.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `kubelet` | +| `sumologic.logs.kubelet.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `kubernetes/` | +| `sumologic.logs.kubelet.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `sumologic.logs.kubelet.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.kubelet.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.kubelet.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.kubelet.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.fields` | Fields to be created at Sumo Logic to ensure logs are tagged with relevant metadata. [Sumo Logic help](https://help.sumologic.com/docs/manage/fields/#manage-fields) | `["cluster", "container", "daemonset", "deployment", "host", "namespace", "node", "pod", "service", "statefulset"]` | +| `sumologic.logs.additionalFields` | Additional Fields to be created in Sumo Logic. [Sumo Logic help](https://help.sumologic.com/docs/manage/fields/#manage-fields) | `[]` | +| `sumologic.logs.sourceType` | The type of the Sumo Logic source being used for logs ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `http` | +| `sumologic.metrics.enabled` | Set the enabled flag to false for disabling metrics ingestion altogether. | `true` | +| `sumologic.metrics.collector.otelcol.enabled` | Enable experimental otelcol metrics collector | `false` | +| `sumologic.metrics.collector.otelcol.scrapeInterval` | The default scrape interval for the collector. | `30s` | +| `sumologic.metrics.collector.otelcol.autoscaling.enabled` | Option to turn autoscaling on for the experimental otelcol metrics and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. collector | `false` | +| `sumologic.metrics.collector.otelcol.autoscaling.minReplicas` | Default min replicas for autoscaling. collector | `3` | +| `sumologic.metrics.collector.otelcol.autoscaling.maxReplicas` | Default max replicas for autoscaling. collector | `10` | +| `sumologic.metrics.collector.otelcol.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `70` | +| `sumologic.metrics.collector.otelcol.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `70` | +| `sumologic.metrics.collector.otelcol.nodeSelector` | Node selector for the experimental otelcol metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md). | `{}` | +| `sumologic.metrics.collector.otelcol.podAnnotations` | Additional annotations for the experimental otelcol metrics pods. | `{}` | +| `sumologic.metrics.collector.otelcol.podLabels` | Additional labels for the experimental otelcol metrics pods. | `{}` | +| `sumologic.metrics.collector.otelcol.priorityClassName` | Priority class name for the experimental otelcol metrics. | `Nil` | +| `sumologic.metrics.collector.otelcol.replicaCount` | Replica count for the experimental otelcol metrics collector | `1` | +| `sumologic.metrics.collector.otelcol.resources` | Resource requests and limits for the experimental otelcol metrics collector | `{"limits": {"memory": "2Gi", "cpu": "1000m"}, "requests": {"memory": "768Mi", "cpu": "100m"}}` | +| `sumologic.metrics.collector.otelcol.serviceMonitorSelector` | Selector for ServiceMonitors used for target discovery. By default, we select ServiceMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr | `{}` | +| `sumologic.metrics.collector.otelcol.podMonitorSelector` | Selector for PodMonitors used for target discovery. By default, we select PodMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr | `{}` | +| `sumologic.metrics.collector.otelcol.securityContext` | The securityContext configuration for the experimental otelcol metrics. | `{"fsGroup": 999}` | +| `sumologic.metrics.collector.otelcol.tolerations` | Tolerations for the experimental otelcol metrics. | `[]` | +| `sumologic.metrics.collector.otelcol.kubelet.enabled` | Enable collection of kubelet metrics. | `true` | +| `sumologic.metrics.collector.otelcol.cAdvisor.enabled` | Enable collection of cAdvisor metrics. | `true` | +| `sumologic.metrics.collector.otelcol.annotatedPods.enabled` | Enable collection of metrics from Pods annotated with prometheus.io/\* keys. See [docs/collecting-application-metrics.md](/docs/collecting-application-metrics.md#application-metrics-are-exposed-one-endpoint-scenario) for more information. | `true` | +| `sumologic.metrics.collector.otelcol.allocationStrategy` | Allocation strategy for the scrape target allocator. Valid values are: least-weighted and consistent-hashing. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocator | `least-weighted` | +| `sumologic.metrics.enableDefaultFilters` | Enable default metric filters for Sumo Apps. | `false` | +| `sumologic.metrics.dropHistogramBuckets` | Drop buckets from select high-cardinality histogram metrics, leaving only the sum and count components. | `true` | +| `sumologic.metrics.otelcol.extraProcessors` | Extra processors configuration for metrics pipeline. See [/docs/collecting-application-metrics.md#metrics-modifications](/docs/collecting-application-metrics.md#metrics-modifications) for more information. | `[]` | +| `sumologic.metrics.remoteWriteProxy.enabled` | Enable a load balancing proxy for Prometheus remote writes. [See docs for more information.](/docs/prometheus.md#using-a-load-balancing-proxy-for-prometheus-remote-write) | `true` | +| `sumologic.metrics.remoteWriteProxy.config.clientBodyBufferSize` | See the [nginx documentation](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size). Increase if you've also increased samples per send in Prometheus remote write. | `64k` | +| `sumologic.metrics.remoteWriteProxy.config.workerCountAutotune` | This feature autodetects how much CPU is assigned to the nginx instance and setsthe right amount of workers based on that. Disable to use the default of 8 workers. | `true` | +| `sumologic.metrics.remoteWriteProxy.config.port` | Port on which remote write proxy is going to be exposed | `8080` | +| `sumologic.metrics.remoteWriteProxy.config.enableAccessLogs` | Enable nginx access logs. | `false` | +| `sumologic.metrics.remoteWriteProxy.replicaCount` | Number of replicas in the remote write proxy deployment. | `3` | +| `sumologic.metrics.remoteWriteProxy.image` | Nginx docker image for the remote write proxy. | `{"repository": "public.ecr.aws/sumologic/nginx-unprivileged", "tag": "1.25.2-alpine", "pullPolicy": "IfNotPresent"}` | +| `sumologic.metrics.remoteWriteProxy.resources` | Resource requests and limits for the remote write proxy container. | `{"limits": {"cpu": "1000m", "memory": "256Mi"}, "requests": {"cpu": "100m", "memory": "128Mi"}}` | +| `sumologic.metrics.remoteWriteProxy.livenessProbe` | Liveness probe settings for the remote write proxy container. | `{"initialDelaySeconds": 30, "periodSeconds": 10, "timeoutSeconds": 5, "successThreshold": 1, "failureThreshold": 6}` | +| `sumologic.metrics.remoteWriteProxy.readinessProbe` | Readiness probe settings for the remote write proxy container. | `{"initialDelaySeconds": 5, "periodSeconds": 5, "timeoutSeconds": 3, "successThreshold": 1, "failureThreshold": 3}` | +| `sumologic.metrics.remoteWriteProxy.securityContext` | The securityContext configuration for the remote write proxy. | `{}` | +| `sumologic.metrics.remoteWriteProxy.nodeSelector` | Node selector for the remote write proxy deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `sumologic.metrics.remoteWriteProxy.tolerations` | Tolerations for the remote write proxy deployment. | `[]` | +| `sumologic.metrics.remoteWriteProxy.affinity` | Affinity for the remote write proxy deployment. | `{}` | +| `sumologic.metrics.remoteWriteProxy.priorityClassName` | Priority class name for the remote write proxy deployment. | `Nil` | +| `sumologic.metrics.remoteWriteProxy.podLabels` | Additional labels for the remote write proxy container. | `{}` | +| `sumologic.metrics.remoteWriteProxy.podAnnotations` | Additional annotations for for the remote write proxy container. | `{}` | +| `sumologic.metrics.serviceMonitors` | Configuration of Sumo Logic Kubernetes Collection components serviceMonitors | `See [values.yaml]` | +| `sumologic.metrics.sourceType` | The type of the Sumo Logic source being used for metrics ingestion. Can be `http` or `otlp`. | `http` | +| `sumologic.traces.enabled` | Set the enabled flag to true to enable tracing ingestion. _Tracing must be enabled for the account first. Please contact your Sumo representative for activation details_ | `true` | +| `sumologic.traces.spans_per_request` | Maximum number of spans sent in single batch | `100` | +| `sumologic.traces.sourceType` | The type of the Sumo Logic source being used for traces ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/traces/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `http` | +| `metrics-server.enabled` | Set the enabled flag to true for enabling metrics-server. This is required before enabling autoscaling unless you have an existing metrics-server in the cluster. | `false` | +| `metrics-server.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `metrics-server.apiService.create` | Specifies whether the v1beta1.metrics.k8s.io API service should be created. | `true` | +| `metrics-server.extraArgs` | Extra arguments to pass to metrics-server on start up. | `["--kubelet-insecure-tls=true", "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"]` | +| `metrics-server.image.pullSecrets` | Pull secrets for metrics-server images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `["imagepullsecret"]` | +| `kube-prometheus-stack.enabled` | Flag to control deploying Prometheus Operator Helm sub-chart. | `false` | +| `kube-prometheus-stack.global.imagePullSecrets` | Pull secrets for Kube Prometheus Stack images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[{"name": "image-pull-secret"}]` | +| `kube-prometheus-stack.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `kube-prometheus-stack.namespaceOverride` | Used to override the chart's default namespace. | `Nil` | +| `kube-prometheus-stack.kubeTargetVersionOverride` | Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). Changing this may break Sumo Logic apps. | `Nil` | +| `kube-prometheus-stack.commonLabels` | Labels to apply to all Kube Prometheus Stack resources | `{}` | +| `kube-prometheus-stack.defaultRules.rules` | Control which default recording and alerting rules are enabled. | `See [values.yaml]` | +| `kube-prometheus-stack.kubeApiServer.serviceMonitor.interval` | Kubernetes API Server metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kubeApiServer.serviceMonitor.metricRelabelings` | Kubernetes API Server MetricRelabelConfigs | `[{"action": "keep", "regex": "(?:apiserver_request_(?:count\|total)\|apiserver_request_(?:duration_seconds)_(?:count\|sum))", "sourceLabels": ["__name__"]}]` | +| `kube-prometheus-stack.kubelet.serviceMonitor.interval` | Kubelet metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kubelet.serviceMonitor.probes` | Enable scraping /metrics/probes from kubelet's service | `false` | +| `kube-prometheus-stack.kubelet.serviceMonitor.resource` | Enable scraping /metrics/resource from kubelet's service | `false` | +| `kube-prometheus-stack.kubelet.serviceMonitor.metricRelabelings` | Kubelet MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.kubelet.serviceMonitor.cAdvisorMetricRelabelings` | Kubelet CAdvisor MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.kubeControllerManager.serviceMonitor.interval` | Kubernetes Controller Manager metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kubeControllerManager.serviceMonitor.metricRelabelings` | Kubernetes Controller Manager MetricRelabelConfigs | `[{"action": "keep", "regex": "(?:cloudprovider_.*_api_request_duration_seconds.*)", "sourceLabels": ["__name__"]}]` | +| `kube-prometheus-stack.coreDns.serviceMonitor.interval` | Core DNS metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.coreDns.serviceMonitor.metricRelabelings` | Core DNS MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.kubeEtcd.serviceMonitor.interval` | Kubernetes Etcd metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kubeEtcd.serviceMonitor.metricRelabelings` | Kubernetes Etcd MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.kubeScheduler.serviceMonitor.interval` | Kubernetes Scheduler metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kubeScheduler.serviceMonitor.metricRelabelings` | Kubernetes Scheduler MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.alertmanager.enabled` | Deploy alertmanager. | `false` | +| `kube-prometheus-stack.grafana.enabled` | If true, deploy the grafana sub-chart. | `false` | +| `kube-prometheus-stack.grafana.defaultDashboardsEnabled` | Deploy default dashboards. These are loaded using the sidecar. | `false` | +| `kube-prometheus-stack.prometheusOperator.podLabels` | Additional labels for prometheus operator pods. | `{}` | +| `kube-prometheus-stack.prometheusOperator.podAnnotations` | Additional annotations for prometheus operator pods. | `{}` | +| `kube-prometheus-stack.prometheusOperator.resources` | Resource limits for prometheus operator. Uses sub-chart defaults. | `{}` | +| `kube-prometheus-stack.prometheusOperator.admissionWebhooks.enabled` | Create PrometheusRules admission webhooks. Mutating webhook will patch PrometheusRules objects indicating they were validated. Validating webhook will check the rules syntax. | `false` | +| `kube-prometheus-stack.prometheusOperator.tls.enabled` | Enable TLS in prometheus operator. | `false` | +| `kube-prometheus-stack.kube-state-metrics.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `kube-prometheus-stack.kube-state-metrics.nodeSelector` | Node selector for kube-state-metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `kube-prometheus-stack.kube-state-metrics.customLabels` | Custom labels to apply to service, deployment and pods. Uses sub-chart defaults. | `{}` | +| `kube-prometheus-stack.kube-state-metrics.podAnnotations` | Additional annotations for pods in the DaemonSet. Uses sub-chart defaults. | `{}` | +| `kube-prometheus-stack.kube-state-metrics.resources` | Resource limits for kube state metrics. Uses sub-chart defaults. | `{}` | +| `kube-prometheus-stack.kube-state-metrics.image.tag` | Tag for kube-state-metrics Docker image. | `v2.7.0` | +| `kube-prometheus-stack.kube-state-metrics.prometheus.monitor.interval` | Kubernetes State Metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kube-state-metrics.prometheus.monitor.metricRelabelings` | Kubernetes State Metrics MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.prometheus-node-exporter.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `kube-prometheus-stack.prometheus-node-exporter.nodeSelector` | Node selector for prometheus node exporter. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `kube-prometheus-stack.prometheus-node-exporter.podLabels` | Additional labels for prometheus-node-exporter pods. | `{}` | +| `kube-prometheus-stack.prometheus-node-exporter.podAnnotations` | Additional annotations for prometheus-node-exporter pods. | `{}` | +| `kube-prometheus-stack.prometheus-node-exporter.resources` | Resource limits for node exporter. Uses sub-chart defaults. | `{}` | +| `kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.interval` | Node Exporter scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.metricRelabelings` | Node Exporter MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.prometheus.additionalServiceMonitors` | List of ServiceMonitor objects to create. | `[]` | +| `kube-prometheus-stack.prometheus.prometheusSpec.scrapeInterval` | Prometheus metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `30s` | +| `kube-prometheus-stack.prometheus.prometheusSpec.retention` | How long to retain metrics in Prometheus | `1d` | +| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.labels` | Add custom pod labels to prometheus pods | `{}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.annotations` | Add custom pod annotations to prometheus pods | `{}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.nodeSelector` | Node selector for prometheus. [See docs/Best_Practices.md for more information.](/docs/best-practices.md) | `{}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.resources` | Resource limits for prometheus. Uses sub-chart defaults. | `{"limits": {"cpu": "2000m", "memory": "8Gi"}, "requests": {"cpu": "500m", "memory": "1Gi"}}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.initContainers` | InitContainers allows injecting additional Prometheus initContainers. | `See [values.yaml]` | +| `kube-prometheus-stack.prometheus.prometheusSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. | `See [values.yaml]` | +| `kube-prometheus-stack.prometheus.prometheusSpec.walCompression` | Enables walCompression in Prometheus | `true` | +| `kube-prometheus-stack.prometheus.prometheusSpec.additionalScrapeConfigs` | Additional Prometheus scrape configurations | `See [values.yaml]` | +| `kube-prometheus-stack.prometheus.prometheusSpec.remoteWrite` | If specified, the remote_write spec. | `[{"remoteTimeout": "5s", "url": "http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics"}]` | +| `otelcolInstrumentation.enabled` | Enables Sumo Otel Distro Collector StatefulSet to collect telemetry data. [See docs for more information.](/docs/opentelemetry-collector/traces.md) | `true` | +| `otelcolInstrumentation.sourceMetadata.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `%{k8s.namespace.name}.%{k8s.pod.pod_name}.%{k8s.container.name}` | +| `otelcolInstrumentation.sourceMetadata.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `%{k8s.namespace.name}/%{k8s.pod.pod_name}` | +| `otelcolInstrumentation.sourceMetadata.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `kubernetes/` | +| `otelcolInstrumentation.sourceMetadata.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `otelcolInstrumentation.sourceMetadata.excludeContainerRegex` | A regular expression for containers. Matching containers will be excluded from Sumo. | `Nil` | +| `otelcolInstrumentation.sourceMetadata.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. | `Nil` | +| `otelcolInstrumentation.sourceMetadata.excludeNamespaceRegex` | A regular expression for namespaces. Matching namespaces will be excluded from Sumo. | `Nil` | +| `otelcolInstrumentation.sourceMetadata.excludePodRegex` | A regular expression for pods. Matching pods will be excluded from Sumo. | `Nil` | +| `otelcolInstrumentation.autoscaling.enabled` | Option to turn autoscaling on for Sumo Otel Distro Collector StatefulSet and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `otelcolInstrumentation.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | +| `otelcolInstrumentation.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | +| `otelcolInstrumentation.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `100` | +| `otelcolInstrumentation.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `50` | +| `otelcolInstrumentation.statefulset.nodeSelector` | Node selector for otelcol-instrumentation statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `otelcolInstrumentation.statefulset.tolerations` | Tolerations for otelcol-instrumentation statefulset. | `[]` | +| `otelcolInstrumentation.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for otelcol-instrumentation statefulset. | `[]` | +| `otelcolInstrumentation.statefulset.affinity` | Affinity for otelcol-instrumentation statefulset. | `{}` | +| `otelcolInstrumentation.statefulset.podAntiAffinity` | PodAntiAffinity for otelcol-instrumentation statefulset. | `soft` | +| `otelcolInstrumentation.statefulset.replicaCount` | Set the number of otelcol-instrumentation replicasets. | `3` | +| `otelcolInstrumentation.statefulset.resources` | Resources for otelcol-instrumentation statefulset. | `{"limits": {"memory": "4Gi", "cpu": "2000m"}, "requests": {"memory": "768Mi", "cpu": "500m"}}` | +| `otelcolInstrumentation.statefulset.priorityClassName` | Priority class name for otelcol-instrumentation pods. | `Nil` | +| `otelcolInstrumentation.statefulset.podLabels` | Additional labels for otelcol-instrumentation pods. | `{}` | +| `otelcolInstrumentation.statefulset.podAnnotations` | Additional annotations for otelcol-instrumentation pods. | `{}` | +| `otelcolInstrumentation.statefulset.image.repository` | Image repository for otelcol-instrumentation docker container. | `Nil` | +| `otelcolInstrumentation.statefulset.image.tag` | Image tag for otelcol-instrumentation docker container. | `Nil` | +| `otelcolInstrumentation.statefulset.image.pullPolicy` | Image pullPolicy for otelcol-instrumentation docker container. | `IfNotPresent` | +| `otelcolInstrumentation.statefulset.containers.otelcol.securityContext` | The securityContext configuration for the otelcol-instrumentation container. | `{}` | +| `otelcolInstrumentation.statefulset.containers.otelcol.livenessProbe` | Liveness probe settings for the otelcol-instrumentation container. | `{"initialDelaySeconds": 15, "periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | +| `otelcolInstrumentation.statefulset.containers.otelcol.readinessProbe` | Readiness probe settings for the otelcol-instrumentation container. | `{"initialDelaySeconds": 5, "periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | +| `otelcolInstrumentation.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for the otelcol-instrumentation container. | `{"periodSeconds": 3, "failureThreshold": 60}` | +| `otelcolInstrumentation.statefulset.extraEnvVars` | Additional environment variables for otelcol-instrumentation pods. | `[{"name": "VALUE_FROM_SECRET", "valueFrom": {"secretKeyRef": {"name": "secret_name", "key": "secret_key"}}}]` | +| `otelcolInstrumentation.statefulset.extraVolumes` | Additional volumes for otelcol-instrumentation pods. | `[{"name": "es-certs", "secret": {"defaultMode": 420, "secretName": "es-certs"}}]` | +| `otelcolInstrumentation.statefulset.extraVolumeMounts` | Additional volume mounts for otelcol-instrumentation pods. | `[{"name": "es-certs", "mountPath": "/certs", "readOnly": true}]` | +| `otelcolInstrumentation.logLevelFilter` | Do not send otelcol-instrumentation logs if `true`. | `false` | +| `otelcolInstrumentation.config` | Configuration for otelcol-instrumentation | `See [values.yaml]` | +| `tracesSampler.deployment.nodeSelector` | Node selector for otelcol deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `tracesSampler.deployment.tolerations` | Tolerations for traces-sampler statefulset. | `[]` | +| `tracesSampler.deployment.replicas` | Set the number of OpenTelemetry Collector replicas. | `1` | +| `tracesSampler.deployment.resources` | Resources for traces-sampler statefulset. | `{"limits": {"memory": "4Gi", "cpu": "2000m"}, "requests": {"memory": "384Mi", "cpu": "200m"}}` | +| `tracesSampler.deployment.priorityClassName` | Priority class name for OpenTelemetry Collector log pods. | `Nil` | +| `tracesSampler.deployment.podLabels` | Additional labels for traces-sampler pods. | `{}` | +| `tracesSampler.deployment.podAnnotations` | Additional annotations for traces-sampler pods. | `{}` | +| `tracesSampler.deployment.image.repository` | Image repository for traces-sampler docker container. | `Nil` | +| `tracesSampler.deployment.image.tag` | Image tag for traces-sampler docker container. | `Nil` | +| `tracesSampler.deployment.image.pullPolicy` | Image pullPolicy for traces-sampler docker container. | `IfNotPresent` | +| `tracesSampler.deployment.extraEnvVars` | Additional environment variables for traces-sampler pods. | `[{"name": "VALUE_FROM_SECRET", "valueFrom": {"secretKeyRef": {"name": "secret_name", "key": "secret_key"}}}]` | +| `tracesSampler.deployment.extraVolumes` | Additional volumes for traces-sampler pods. | `[{"name": "es-certs", "secret": {"defaultMode": 420, "secretName": "es-certs"}}]` | +| `tracesSampler.deployment.extraVolumeMounts` | Additional volume mounts for traces-sampler pods. | `[{"name": "es-certs", "mountPath": "/certs", "readOnly": true}]` | +| `tracesSampler.logLevelFilter` | Do not send traces-sampler logs if `true`. | `false` | +| `tracesSampler.config` | Configuration for traces-sampler. | `See [values.yaml]` | +| `metadata.image.repository` | Image repository for otelcol docker container. | `Nil` | +| `metadata.image.tag` | Image tag for otelcol docker container. | `Nil` | +| `metadata.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | +| `metadata.securityContext` | The securityContext configuration for otelcol. | `{"fsGroup": 999}` | +| `metadata.podLabels` | Additional labels for all otelcol pods. | `{}` | +| `metadata.podAnnotations` | Additional annotations for all otelcol pods. | `{}` | +| `metadata.serviceLabels` | Additional labels for all otelcol pods. | `{}` | +| `metadata.persistence.enabled` | Flag to control persistence for OpenTelemetry Collector. | `true` | +| `metadata.persistence.storageClass` | Defines storageClassName for the PersistentVolumeClaim which is used to provide persistence for OpenTelemetry Collector. | `Nil` | +| `metadata.persistence.accessMode` | The accessMode for the volume which is used to provide persistence for OpenTelemetry Collector. | `ReadWriteOnce` | +| `metadata.persistence.size` | Size of the volume which is used to provide persistence for OpenTelemetry Collector. | `10Gi` | +| `metadata.persistence.pvcLabels` | Additional PersistentVolumeClaim labels for all OpenTelemetry Collector pods. | `{}` | +| `metadata.metrics.enabled` | Flag to control deploying the otelcol metrics statefulsets. | `true` | +| `metadata.metrics.logLevel` | Flag to control logging level for OpenTelemetry Collector for metrics. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | +| `metadata.metrics.config.merge` | Configuration for metrics metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `metadata.metrics.config.override` | Configuration for metrics metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `metadata.metrics.config.additionalEndpoints` | List of additional endpoints for Open Telemetry Metadata Pod. | `[]` | +| `metadata.metrics.statefulset.nodeSelector` | Node selector for metrics metadata enrichment (otelcol) statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `metadata.metrics.statefulset.tolerations` | Tolerations for metrics metadata enrichment (otelcol) statefulset. | `[]` | +| `metadata.metrics.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for metrics metadata enrichment (otelcol) statefulset. | `[]` | +| `metadata.metrics.statefulset.affinity` | Affinity for metrics metadata enrichment (otelcol) statefulset. | `{}` | +| `metadata.metrics.statefulset.podAntiAffinity` | PodAntiAffinity for metrics metadata enrichment (otelcol) statefulset. | `soft` | +| `metadata.metrics.statefulset.replicaCount` | Replica count for metrics metadata enrichment (otelcol) statefulset. | `3` | +| `metadata.metrics.statefulset.resources` | Resources for metrics metadata enrichment (otelcol) statefulset. | `{"limits": {"memory": "1Gi", "cpu": "1000m"}, "requests": {"memory": "768Mi", "cpu": "500m"}}` | +| `metadata.metrics.statefulset.priorityClassName` | Priority class name for metrics metadata enrichment (otelcol) pods. | `Nil` | +| `metadata.metrics.statefulset.podLabels` | Additional labels for metrics metadata enrichment (otelcol) pods. | `{}` | +| `metadata.metrics.statefulset.podAnnotations` | Additional annotations for metrics metadata enrichment (otelcol) pods. | `{}` | +| `metadata.metrics.statefulset.containers.otelcol.securityContext` | The securityContext configuration for otelcol container for metrics metadata enrichment statefulset. | `{}` | +| `metadata.metrics.statefulset.containers.otelcol.livenessProbe` | Liveness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 15, "periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | +| `metadata.metrics.statefulset.containers.otelcol.readinessProbe` | Readiness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 5, "periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | +| `metadata.metrics.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for metrics otelcol container. | `{"periodSeconds": 3, "failureThreshold": 60}` | +| `metadata.metrics.statefulset.extraEnvVars` | Additional environment variables for metrics metadata enrichment (otelcol) pods. | `[{"name": "VALUE_FROM_SECRET", "valueFrom": {"secretKeyRef": {"name": "secret_name", "key": "secret_key"}}}]` | +| `metadata.metrics.statefulset.extraVolumes` | Additional volumes for metrics metadata enrichment (otelcol) pods. | `[{"name": "es-certs", "secret": {"defaultMode": 420, "secretName": "es-certs"}}]` | +| `metadata.metrics.statefulset.extraVolumeMounts` | Additional volume mounts for metrics metadata enrichment (otelcol) pods. | `[{"name": "es-certs", "mountPath": "/certs", "readOnly": true}]` | +| `metadata.metrics.autoscaling.enabled` | Option to turn autoscaling on for metrics metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `metadata.metrics.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | +| `metadata.metrics.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | +| `metadata.metrics.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `80` | +| `metadata.metrics.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `50` | +| `metadata.metrics.podDisruptionBudget` | Pod Disruption Budget for metrics metadata enrichment (otelcol) statefulset and for experimental otelcol metrics collector. | `{"minAvailable": 2, "maxUnavailable": 1}` | +| `metadata.logs.enabled` | Flag to control deploying the otelcol logs statefulsets. | `true` | +| `metadata.logs.logLevel` | Flag to control logging level for OpenTelemetry Collector for logs. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | +| `metadata.logs.config.merge` | Configuration for logs metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `metadata.logs.config.override` | Configuration for logs metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `metadata.logs.statefulset.nodeSelector` | Node selector for logs metadata enrichment (otelcol) statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `metadata.logs.statefulset.tolerations` | Tolerations for logs metadata enrichment (otelcol) statefulset. | `[]` | +| `metadata.logs.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for logs metadata enrichment (otelcol) statefulset. | `[]` | +| `metadata.logs.statefulset.affinity` | Affinity for logs metadata enrichment (otelcol) statefulset. | `{}` | +| `metadata.logs.statefulset.podAntiAffinity` | PodAntiAffinity for logs metadata enrichment (otelcol) statefulset. | `soft` | +| `metadata.logs.statefulset.replicaCount` | Replica count for logs metadata enrichment (otelcol) statefulset. | `3` | +| `metadata.logs.statefulset.resources` | Resources for logs metadata enrichment (otelcol) statefulset. | `{"limits": {"memory": "1Gi", "cpu": "1000m"}, "requests": {"memory": "768Mi", "cpu": "500m"}}` | +| `metadata.logs.statefulset.priorityClassName` | Priority class name for logs metadata enrichment (otelcol) pods. | `Nil` | +| `metadata.logs.statefulset.podLabels` | Additional labels for logs metadata enrichment (otelcol) pods. | `{}` | +| `metadata.logs.statefulset.podAnnotations` | Additional annotations for logs metadata enrichment (otelcol) pods. | `{}` | +| `metadata.logs.statefulset.containers.otelcol.securityContext` | The securityContext configuration for the logs otelcol container. | `{}` | +| `metadata.logs.statefulset.containers.otelcol.livenessProbe` | Liveness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 15, "periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | +| `metadata.logs.statefulset.containers.otelcol.readinessProbe` | Readiness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 5, "periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | +| `metadata.logs.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for the logs otelcol container. | `{"periodSeconds": 3, "failureThreshold": 60}` | +| `metadata.logs.statefulset.extraEnvVars` | Additional environment variables for logs metadata enrichment (otelcol) pods. | `[{"name": "VALUE_FROM_SECRET", "valueFrom": {"secretKeyRef": {"name": "secret_name", "key": "secret_key"}}}]` | +| `metadata.logs.statefulset.extraVolumes` | Additional volumes for logs metadata enrichment (otelcol) pods. | `[{"name": "es-certs", "secret": {"defaultMode": 420, "secretName": "es-certs"}}]` | +| `metadata.logs.statefulset.extraVolumeMounts` | Additional volume mounts for logs metadata enrichment (otelcol) pods. | `[{"name": "es-certs", "mountPath": "/certs", "readOnly": true}]` | +| `metadata.logs.statefulset.extraPorts` | Additional exposed ports in logs metadata enrichment (otelcol) pods and service. | `[{"name": "otlphttp2", "containerPort": 4319, "protocol": "TCP"}]` | +| `metadata.logs.statefulset.extraArgs` | Additional arguments to otelcol container. | `[]` | +| `metadata.logs.autoscaling.enabled` | Option to turn autoscaling on for logs metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `metadata.logs.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | +| `metadata.logs.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | +| `metadata.logs.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `80` | +| `metadata.logs.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `50` | +| `metadata.logs.podDisruptionBudget` | Pod Disruption Budget for logs metadata enrichment (otelcol) statefulset. | `{"minAvailable": 2, "maxUnavailable": 1}` | +| `tracesGateway.enabled` | Flag to control deploying traces-gateway. [See docs for more information.](/docs/opentelemetry-collector/traces.md) | `true` | +| `tracesGateway.autoscaling.enabled` | Option to turn autoscaling on for traces-gateway and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `tracesGateway.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | +| `tracesGateway.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | +| `tracesGateway.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `100` | +| `tracesGateway.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `50` | +| `tracesGateway.deployment.replicas` | Set the number of OpenTelemetry Collector replicas. | `1` | +| `tracesGateway.deployment.nodeSelector` | Node selector for otelcol deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `tracesGateway.deployment.tolerations` | Tolerations for traces-gateway statefulset. | `[]` | +| `tracesGateway.deployment.resources` | Resources for traces-gateway statefulset. | `{"limits": {"memory": "2Gi", "cpu": "1000m"}, "requests": {"memory": "196Mi", "cpu": "50m"}}` | +| `tracesGateway.deployment.podLabels` | Additional labels for traces-gateway pods. | `{}` | +| `tracesGateway.deployment.podAnnotations` | Additional annotations for traces-gateway pods. | `{}` | +| `tracesGateway.deployment.image.repository` | Image repository for traces-gateway docker container. | `Nil` | +| `tracesGateway.deployment.image.tag` | Image tag for traces-gateway docker container. | `Nil` | +| `tracesGateway.deployment.image.pullPolicy` | Image pullPolicy for traces-gateway docker container. | `IfNotPresent` | +| `tracesGateway.deployment.livenessProbe` | Liveness probe settings for the traces-gateway container. | `{"periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | +| `tracesGateway.deployment.readinessProbe` | Readiness probe settings for the traces-gateway container. | `{"periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | +| `tracesGateway.deployment.startupProbe` | Startup probe configuration for the traces-gateway container. | `{"periodSeconds": 5, "timeoutSeconds": 3, "failureThreshold": 60}` | +| `tracesGateway.deployment.extraEnvVars` | Additional environment variables for traces-gateway pods. | `[{"name": "VALUE_FROM_SECRET", "valueFrom": {"secretKeyRef": {"name": "secret_name", "key": "secret_key"}}}]` | +| `tracesGateway.deployment.extraVolumes` | Additional volumes for traces-gateway pods. | `[{"name": "es-certs", "secret": {"defaultMode": 420, "secretName": "es-certs"}}]` | +| `tracesGateway.deployment.extraVolumeMounts` | Additional volume mounts for traces-gateway pods. | `[{"name": "es-certs", "mountPath": "/certs", "readOnly": true}]` | +| `tracesGateway.deployment.priorityClassName` | Priority class name for OpenTelemetry Collector log pods. | `Nil` | +| `tracesGateway.logLevelFilter` | Do not send traces-gateway logs if `true`. | `false` | +| `tracesGateway.config` | Configuration for traces-gateway. | `See [values.yaml]` | +| `otelevents.image.repository` | Image repository for otelcol docker container. | `Nil` | +| `otelevents.image.tag` | Image tag for otelcol docker container. | `Nil` | +| `otelevents.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | +| `otelevents.logLevel` | Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | +| `otelevents.config.merge` | Configuration for events otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `otelevents.config.override` | Configuration for events otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `otelevents.statefulset` | OpenTelemetry Collector StatefulSet customization options. See values.yaml for more details. | `See [values.yaml]` | +| `otelcloudwatch.statefulset` | OpenTelemetry Cloudwatch Collector statefulset customization options. See [values.yaml] for more details. | `See [values.yaml]` | +| `otellogs.metrics.enabled` | Enable OpenTelemetry Collector metrics | `true` | +| `otellogs.serviceLabels` | Add custom labels to OpenTelemetry Collector Service | `{}` | +| `otellogs.image.repository` | Image repository for otelcol docker container. | `Nil` | +| `otellogs.image.tag` | Image tag for otelcol docker container. | `Nil` | +| `otellogs.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | +| `otellogs.logLevel` | Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | +| `otellogs.config.merge` | Configuration for log collector otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `otellogs.config.override` | Configuration for log collector otelcol, replaces defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `otellogs.daemonset` | OpenTelemetry Collector Daemonset customization options. See [values.yaml] for more details. | `See [values.yaml]` | +| `otellogs.additionalDaemonSets` | OpenTelemetry Collector Daemonset per node customization options. See [Best Practices](/docs/best-practices.md#setting-different-resources-on-different-nodes-for-logs-collector). | `{}` | +| `telegraf-operator.enabled` | Flag to control deploying Telegraf Operator Helm sub-chart. | `false` | +| `telegraf-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `telegraf-operator.image.sidecarImage` | Telegraf Operator sidecar image. | `public.ecr.aws/sumologic/telegraf:1.21.2` | +| `telegraf-operator.replicaCount` | Replica count for Telegraf Operator pods. | `1` | +| `telegraf-operator.classes.secretName` | Secret name in which the Telegraf Operator configuration will be stored. | `telegraf-operator-classes` | +| `telegraf-operator.classes.default` | Name of the default output configuration. | `sumologic-prometheus` | +| `telegraf-operator.classes.data` | Telegraf sidecar configuration. | `See [values.yaml]` | +| `telegraf-operator.imagePullSecrets` | Pull secrets for Telegraf Operator images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | +| `falco.enabled` | Flag to control deploying Falco Helm sub-chart. | `false` | +| `falco.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `falco.imagePullSecrets` | Pull secrets for falco images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | +| `falco.image.registry` | Image registry for falco docker container. | `public.ecr.aws` | +| `falco.image.repository` | Image repository for falco docker container. | `falcosecurity/falco-no-driver` | +| `falco.addKernelDevel` | Flag to control installation of `kernel-devel` on nodes using MachineConfig, required to build falco modules (only for OpenShift) | `true` | +| `falco.extra.initContainers` | InitContainers for Falco pod | `See [values.yaml]` | +| `falco.driver.kind` | Tell Falco which driver to use. Available options: module (kernel driver) and ebpf (eBPF probe). Set to `ebpf` for GKE | `module` | +| `falco.driver.loader.initContainer.image` | Init container image configuration for falco driver loader. | `{"registry": "public.ecr.aws", "repository": "falcosecurity/falco-driver-loader"}` | +| `falco.falco.load_plugins` | Names of the plugins to be loaded by Falco. | `["json", "k8saudit"]` | +| `falco.falco.json_output` | Output events in json. | `true` | +| `falco.falco.rules_file` | The location of the rules files that will be consumed by Falco. | `["/etc/falco/falco_rules.yaml", "/etc/falco/falco_rules.local.yaml", "/etc/falco/k8s_audit_rules.yaml", "/etc/falco/rules.d", "/etc/falco/rules.available/application_rules.yaml"]` | +| `falco.falcoctl` | Falcoctl configuration. We don't use it for now due to breaking changes. [See this issue](https://github.com/SumoLogic/sumologic-kubernetes-collection/issues/3144). | `{"artifact": {"follow": {"enabled": false}, "install": {"enabled": false}}}` | +| `falco.customRules` | Additional falco rules related to Sumo Logic Kubernetes Collection | `See [values.yaml]` | +| `tailing-sidecar-operator.enabled` | Flag to control deploying Tailing Sidecar Operator Helm sub-chart. | `false` | +| `tailing-sidecar-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `tailing-sidecar-operator.scc.create` | Create OpenShift's Security Context Constraint | `false` | +| `opentelemetry-operator.enabled` | Flag to control deploying OpenTelemetry Operator Helm sub-chart. | `true` | +| `opentelemetry-operator.instrumentationJobImage.image.repository` | Name of the image repository used to apply Instrumentation resource | `sumologic/kubernetes-tools` | +| `opentelemetry-operator.instrumentationJobImage.image.tag` | Name of the image tag used to apply Instrumentation resource | `2.14.0` | +| `opentelemetry-operator.createDefaultInstrumentation` | Flag to control creation of default Instrumentation object | `false` | +| `opentelemetry-operator.instrumentationNamespaces` | Used to create `Instrumentation` resources in specified namespaces. | `Nil` | +| `opentelemetry-operator.instrumentation.dotnet.traces.enabled` | Flag to control traces export from DotNet instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.instrumentation.dotnet.metrics.enabled` | Flag to control metrics export from DotNet instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.instrumentation.java.traces.enabled` | Flag to control traces export from Java instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.instrumentation.java.metrics.enabled` | Flag to control metrics export from Java instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.instrumentation.python.traces.enabled` | Flag to control traces export from Python instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.instrumentation.python.metrics.enabled` | Flag to control metrics export from Python instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.admissionWebhooks` | Admission webhooks make sure only requests with correctly formatted rules will get into the Operator. They also enable the sidecar injection for OpenTelemetryCollector and Instrumentation CR's. | `See [values.yaml]` | +| `opentelemetry-operator.manager.collectorImage.repository` | The default collector image repository for OpenTelemetryCollector CRDs. | `public.ecr.aws/sumologic/sumologic-otel-collector` | +| `opentelemetry-operator.manager.collectorImage.tag` | The default collector image tag for OpenTelemetryCollector CRDs. | `0.85.0-sumo-0` | +| `opentelemetry-operator.manager.env` | Additional environment variables for opentelemetry-operator helm chart. | `{}` | +| `opentelemetry-operator.manager.resources.limits.cpu` | Used to set limit CPU for OpenTelemetry-Operator Manager. | `250m` | +| `opentelemetry-operator.manager.resources.limits.memory` | Used to set limit Memory for OpenTelemetry-Operator Manager. | `512Mi` | +| `opentelemetry-operator.manager.resources.requests.cpu` | Used to set requested CPU for OpenTelemetry-Operator Manager. | `150m` | +| `opentelemetry-operator.manager.resources.requests.memory` | Used to set requested Memory for OpenTelemetry-Operator Manager. | `256Mi` | +| `pvcCleaner.metrics.enabled` | Flag to enable cleaning unused PVCs for otelcol metrics statefulsets. | `false` | +| `pvcCleaner.logs.enabled` | Flag to enable cleaning unused PVCs for otelcol logs statefulsets. | `false` | +| `pvcCleaner.job.image.repository` | Image repository for pvcCleaner docker containers. | `public.ecr.aws/sumologic/kubernetes-tools-kubectl` | +| `pvcCleaner.job.image.tag` | Image tag for pvcCleaner docker containers. | `2.20.0` | +| `pvcCleaner.job.image.pullPolicy` | Image pullPolicy for pvcCleaner docker containers. | `IfNotPresent` | +| `pvcCleaner.job.resources` | Resource requests and limits for the pvcCleaner containers. | `{"limits": {"memory": "256Mi", "cpu": "2000m"}, "requests": {"memory": "64Mi", "cpu": "100m"}}` | +| `pvcCleaner.job.nodeSelector` | Node selector for pvcCleaner job. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `pvcCleaner.job.tolerations` | Add tolerations for the pvcCleaner job. | `[]` | +| `pvcCleaner.job.affinity` | Add affinity and anti-affinity for the pvcCleaner job. | `{}` | +| `pvcCleaner.job.podLabels` | Additional labels for the pvcCleaner container. | `{}` | +| `pvcCleaner.job.podAnnotations` | Additional annotations for for the pvcCleaner container. | `{}` | +| `pvcCleaner.job.schedule` | Schedule for cronJobs | `*/15 * * * *` | +| `pvcCleaner.job.securityContext` | The securityContext configuration for the pvcCleaner. | `{"runAsUser": 1000}` | -[values.yaml]: values.yaml +[values.yaml]: values.yaml \ No newline at end of file diff --git a/deploy/helm/sumologic/_values.yaml b/deploy/helm/sumologic/_values.yaml new file mode 100644 index 0000000000..512466bf44 --- /dev/null +++ b/deploy/helm/sumologic/_values.yaml @@ -0,0 +1,3721 @@ +nameOverride: "" +fullnameOverride: "" +## Use the same namespace as namespaceOverride in 'kube-prometheus-stack.namespaceOverride' if Prometheus setup is also enabled +namespaceOverride: "" +sumologic: + ## If enabled, a pre-install hook will create Collector and Sources in Sumo Logic + setupEnabled: true + ## If enabled, a pre-delete hook will destroy Collector in Sumo Logic + cleanupEnabled: false + ## If enabled, accessId and accessKey will be sourced from Secret Name given + ## Be sure to include at least the following env variables in your secret + ## (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY + # envFromSecret: sumo-api-secret + ## Sumo access ID + # accessId: '' + ## Sumo access key + # accessKey: '' + ## Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection + ## ref: https://help.sumologic.com/docs/api/getting-started#sumo-logic-endpoints-by-deployment-and-firewall-security + endpoint: "" + ## proxy urls + httpProxy: "" + httpsProxy: "" + ## Exclude Kubernetes internal traffic from proxy + noProxy: kubernetes.default.svc + ## Collector name + # collectorName: '' + ## Cluster name: Note spaces are not allowed and will be replaced with dashes. + clusterName: kubernetes + ## Configuration of Kubernetes for Terraform client + ## https://www.terraform.io/docs/providers/kubernetes/index.html#argument-reference + ## All double quotes should be escaped here regarding Terraform syntax + cluster: + host: https://kubernetes.default.svc + # username: '' + # password: '' + # insecure: '' + # client_certificate: '' + # client_key: '' + cluster_ca_certificate: ${file("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")} + # config_path: '' + # config_context: '' + # config_context_auth_info: '' + # config_context_cluster: '' + token: ${file("/var/run/secrets/kubernetes.io/serviceaccount/token")} + # exec: + # api_version: '' + # command: '' + # args: '' + # env: + ## If you set it to false, it would set EXCLUDE_NAMESPACE= + ## and not add the Otelcol logs and Prometheus remotestorage metrics. + collectionMonitoring: true + ## Optionally specify an array of pullSecrets. + ## They will be added to serviceaccount that is used for Sumo Logic's + ## deployments and statefulsets. + ## + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # pullSecrets: + # - name: myRegistryKeySecretName + ## Add custom labels to the following sumologic resources(otelcol sts, setup job, otelcol deployment) + podLabels: + ## Add custom annotations to the following sumologic resources(otelcol sts, setup job, otelcol deployment) + podAnnotations: + ## Add custom annotations to sumologic serviceAccounts + serviceAccount: + annotations: + ## creation of Security Context Constraints in Openshift + scc: + create: false + setup: + ## uncomment to force collection installation (disables k8s version verification) + # force: true + job: + image: + repository: public.ecr.aws/sumologic/kubernetes-setup + tag: 3.10.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of pullSecrets. + ## They will be added to serviceaccount that is used for Sumo Logic's + ## setup job. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # pullSecrets: + # - name: myRegistryKeySecretName + resources: + limits: + memory: 256Mi + cpu: 2000m + requests: + memory: 64Mi + cpu: 200m + nodeSelector: + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + # - effect: NoSchedule + # key: null + # operator: Exists + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: + ## Add custom labels only to setup job pod + podLabels: + ## Add custom annotations only to setup job pod + podAnnotations: + ## uncomment for the debug mode (disables the automatic run of the setup.sh script) + # debug: true + ## If enabled, a pre-install hook will create k8s monitors in Sumo Logic + monitors: + enabled: true + ## The installed monitors default status: enabled/disabled + monitorStatus: enabled + ## A list of emails to send notifications from monitors + notificationEmails: [] + dashboards: + ## If enabled, a pre-install hook will install k8s dashboards in Sumo Logic + enabled: true + collector: + ## Configuration of additional collector fields + ## https://help.sumologic.com/docs/manage/fields/#http-source-fields + ## + fields: + ## Configuration of http sources + ## See docs/Terraform.md for more information + ## name: source name visible in sumologic platform + ## config-name: This is mostly for backward compatibility + ## + sources: + metrics: + default: + name: (default-metrics) + config-name: endpoint-metrics + default-otlp: + name: metrics-otlp + config-name: endpoint-metrics-otlp + properties: + content_type: Otlp + apiserver: + name: apiserver-metrics + config-name: endpoint-metrics-apiserver + controller: + name: kube-controller-manager-metrics + config-name: endpoint-metrics-kube-controller-manager + scheduler: + name: kube-scheduler-metrics + config-name: endpoint-metrics-kube-scheduler + state: + name: kube-state-metrics + config-name: endpoint-metrics-kube-state + kubelet: + name: kubelet-metrics + config-name: endpoint-metrics-kubelet + node: + name: node-exporter-metrics + config-name: endpoint-metrics-node-exporter + control-plane: + name: control-plane-metrics + logs: + default: + name: logs + config-name: endpoint-logs + ## Properties can be used to extend default settings, such as processing rules, fields etc + properties: + ## Ensures that timestamp key has precedence over timestamp auto discovery + default_date_formats: + - format: epoch + locator: \"timestamp\":(\\d+) + # filters: + # - filter_type: Exclude + # name: Test Exclude Debug + # regexp: .*DEBUG.* + default-otlp: + name: logs-otlp + config-name: endpoint-logs-otlp + properties: + content_type: Otlp + events: + default: + name: events + config-name: endpoint-events + properties: + ## Ensures that timestamp key has precedence over timestamp auto discovery + default_date_formats: + - format: epoch + locator: \"timestamp\":(\\d+) + default-otlp: + name: events-otlp + config-name: endpoint-events-otlp + properties: + content_type: Otlp + traces: + default: + name: traces + config-name: endpoint-traces + properties: + content_type: Zipkin + default-otlp: + name: traces-otlp + config-name: endpoint-traces-otlp + properties: + content_type: Otlp + ## Global configuration for OpenTelemetry Collector + otelcolImage: + repository: public.ecr.aws/sumologic/sumologic-otel-collector + tag: 0.85.0-sumo-0 + ## Add a -fips suffix to all image tags. With default tags, this results in FIPS-compliant otel images. + ## See https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/fips.md for more information. + ## + addFipsSuffix: false + ## Configuration for collection of Kubernetes events + events: + enabled: true + ## Source name for the Events source. Default: "events" + sourceName: events + ## Source category for the Events source. Default: "" which is resolved to "{clusterName}/events" + # sourceCategory: kubernetes/events + ## Used to replace '-' with another character. + sourceCategoryReplaceDash: / + persistence: + enabled: true + size: 10Gi + ## Configuration for the Persistent Volume and Persistent Volume Claim + ## where the storage is kept + ## + persistentVolume: + path: /var/lib/storage/events + accessMode: ReadWriteOnce + ## Add custom labels to otelcol event statefulset PVC + pvcLabels: {} + # storageClass: '' + sourceType: http + ## Logs configuration + logs: + ## Set the enabled flag to false for disabling logs ingestion altogether. + enabled: true + collector: + otelcol: + enabled: true + ## Experimental + otelcloudwatch: + enabled: false + roleArn: "" + ## Configure persistence for the cloudwatch collector + persistence: + enabled: true + region: "" + pollInterval: 1m + ## A map of log group and stream prefixes + ## This is a map of log group and stream prefix, for example: + ## logGroups: + ## fluent-bit: + ## names: [fluent-bit] + ## + logGroups: + multiline: + enabled: true + first_line_regex: ^\[?\d{4}-\d{1,2}-\d{1,2}.\d{2}:\d{2}:\d{2} + ## Additional configuration takes precedence over first_line_regex and are executed only for first matching condition + ## ## Example: + ## - first_line_regex: "^@@@@ First Line" + ## condition: 'attributes["k8s.namespace.name"] == "foo"' + ## - first_line_regex: "^--- First Line" + ## condition: 'attributes["k8s.container.name"] matches "^bar-.*" + ## ## NOTE: See below link for full reference: + ## https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/collecting-container-logs.md#conditional-multiline-log-parsing + ## + additional: [] + container: + enabled: true + ## Format to post logs into Sumo: fields, json, json_merge, or text. + ## NOTE: json is an alias for fields + ## NOTE: Multiline log detection works differently for `text` format. See below link for full reference: + ## https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/collecting-container-logs.md#text-log-format + ## + format: fields + ## When set to `true`, preserves the `time` attribute, which is a string representation of the `timestamp` attribute. + keep_time_attribute: false + otelcol: + ## Extra processors for container logs. See [/docs/collecting-container-logs.md](/docs/collecting-container-logs.md) for details. + extraProcessors: [] + ## Set the _sourceHost metadata field in Sumo Logic. + sourceHost: "" + ## Set the _sourceName metadata field in Sumo Logic. + sourceName: "%{namespace}.%{pod}.%{container}" + ## Set the _sourceCategory metadata field in Sumo Logic. + sourceCategory: "%{namespace}/%{pod_name}" + ## Set the prefix, for _sourceCategory metadata. + sourceCategoryPrefix: kubernetes/ + ## Used to replace - with another character. + sourceCategoryReplaceDash: / + ## A regular expression for containers. + ## Matching containers will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludeContainerRegex: "" + ## A regular expression for hosts. + ## Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludeHostRegex: "" + ## A regular expression for namespaces. + ## Matching namespaces will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludeNamespaceRegex: "" + ## A regular expression for pods. + ## Matching pods will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludePodRegex: "" + ## Defines whether container-level pod annotations are enabled. + perContainerAnnotationsEnabled: false + ## Defines the list of prefixes of container-level pod annotations. + perContainerAnnotationPrefixes: [] + systemd: + enabled: true + ## systemd units to collect logs from + # units: + # - docker.service + ## Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. + otelcol: + extraProcessors: [] + ## Set the _sourceName metadata field in Sumo Logic. + sourceName: "%{_sourceName}" + ## Set the _sourceCategory metadata field in Sumo Logic. + sourceCategory: system + ## Set the prefix, for _sourceCategory metadata. + sourceCategoryPrefix: kubernetes/ + ## Used to replace - with another character. + sourceCategoryReplaceDash: / + ## A regular expression for facility. + ## Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludeFacilityRegex: "" + ## A regular expression for hosts. + ## Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludeHostRegex: "" + ## A regular expression for priority. + ## Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludePriorityRegex: "" + ## A regular expression for unit. + ## Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludeUnitRegex: "" + kubelet: + otelcol: + ## Extra processors for kubelet logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. + extraProcessors: [] + ## Set the _sourceName metadata field in Sumo Logic. + sourceName: k8s_kubelet + ## Set the _sourceCategory metadata field in Sumo Logic. + sourceCategory: kubelet + ## Set the prefix, for _sourceCategory metadata. + sourceCategoryPrefix: kubernetes/ + ## Used to replace - with another character. + sourceCategoryReplaceDash: / + ## A regular expression for facility. + ## Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludeFacilityRegex: "" + ## A regular expression for hosts. + ## Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludeHostRegex: "" + ## A regular expression for priority. + ## Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludePriorityRegex: "" + ## A regular expression for unit. + ## Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + ## + excludeUnitRegex: "" + ## Fields to be created at Sumo Logic to ensure logs are tagged with + ## relevant metadata. + ## https://help.sumologic.com/docs/manage/fields/#manage-fields + ## + fields: + - cluster + - container + - daemonset + - deployment + - host + - namespace + - node + - pod + - service + - statefulset + sourceType: http + ## Metrics configuration + metrics: + ## Set the enabled flag to false for disabling metrics ingestion altogether. + enabled: true + ## Otel metrics collector. Replaces Prometheus. + ## To enable, you need opentelemetry-operator enabled as well. + ## Stability: Beta. + ## + collector: + otelcol: + enabled: false + ## Default scrape interval + scrapeInterval: 30s + ## Option to turn autoscaling on for otelcol and specify params for HPA. + ## Autoscaling needs metrics-server to access cpu metrics. + ## + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 10 + targetCPUUtilizationPercentage: 70 + targetMemoryUtilizationPercentage: 70 + nodeSelector: + ## Add custom annotations only to merics otelcol sts pods + podAnnotations: + ## Add custom labels only to metrics otelcol sts pods + podLabels: + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: "" + replicaCount: 1 + resources: + limits: + memory: 2Gi + cpu: 1000m + requests: + memory: 768Mi + cpu: 100m + ## Selector for ServiceMonitors used for target discovery. By default, this selects resources created by this Chart. + ## See https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr + ## + # serviceMonitorSelector: {} + ## Selector for PodMonitors used for target discovery. By default, this selects resources created by this Chart. + ## See https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr + ## + # podMonitorSelector: {} + ## The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set. + ## The default is 0 (root), and containers don't have write permissions for volumes in that case. + ## + securityContext: + fsGroup: 999 + tolerations: [] + ## Configuration for kubelet metrics + kubelet: + enabled: true + ## Configuration for cAdvisor metrics + cAdvisor: + enabled: true + ## Enable collection of metrics from Pods annotated with prometheus.io/* keys. + ## See https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/collecting-application-metrics.md#application-metrics-are-exposed-one-endpoint-scenario for more information. + ## + annotatedPods: + enabled: true + ## Allocation strategy for the scrape target allocator. Valid values are: least-weighted and consistent-hashing. + ## See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocator + ## + allocationStrategy: least-weighted + ## Default metric filters for Sumo Apps + enableDefaultFilters: false + ## By default, the Helm Chart collects some high-cardinality histogram metrics, as Sumo Apps make use of the sum and count components. + ## This setting causes the metrics collector to drop the actual histogram buckets, keeping only the sum and the count. + ## This affects the following metrics: + ## - apiserver_request_duration_seconds + ## - coredns_dns_request_duration_seconds + ## - kubelet_runtime_operations_duration_seconds + ## + dropHistogramBuckets: true + otelcol: + ## Includes additional processors into pipelines. + ## It can be used for filtering metrics, renaming, changing metadata and so on. + ## This is list of objects, for example: + ## extraProcessors: + ## - filterprocessor: + ## exclude: + ## match_type: strict + ## metric_names: + ## - hello_world + ## - hello/world + ## + extraProcessors: [] + ## Enable a load balancing proxy for Prometheus remote writes. + ## Prometheus remote write uses a single persistent HTTP connection per target, + ## which interacts poorly with TCP load balancing with iptables that K8s Services do. + ## Use a real HTTP load balancer for this instead. + ## This is an advanced feature, enable only if you're experiencing performance + ## issues with metrics metadata enrichment. + ## + remoteWriteProxy: + enabled: true + config: + ## Increase this if you've increased samples_per_send in Prometheus to prevent nginx + ## from spilling proxied request bodies to disk + ## + clientBodyBufferSize: 64k + ## This feature autodetects how much CPU is assigned to the nginx instance and sets + ## the right amount of workers based on that. Disable to use the default of 8 workers. + ## + workerCountAutotune: true + ## Nginx listen port + port: 8080 + ## Nginx access logs + enableAccessLogs: false + replicaCount: 3 + image: + repository: public.ecr.aws/sumologic/nginx-unprivileged + tag: 1.25.2-alpine + pullPolicy: IfNotPresent + resources: + limits: + cpu: 1000m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + livenessProbe: + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + successThreshold: 1 + failureThreshold: 3 + securityContext: + nodeSelector: + tolerations: [] + affinity: + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: "" + ## Add custom labels only to metrics sts pods + podLabels: + ## Add custom annotations only to metrics sts pods + podAnnotations: + ## Prometheus serviceMonitors related to Sumo Logic services + ## They are applied only if kube-prometheus-stack is enabled + ## + serviceMonitors: + - additionalLabels: + sumologic.com/app: otelcol-logs + endpoints: + - port: otelcol-metrics + name: collection-sumologic-otelcol-logs + selector: + matchLabels: + sumologic.com/app: otelcol-logs + sumologic.com/scrape: "true" + - additionalLabels: + sumologic.com/app: otelcol-metrics + endpoints: + - port: otelcol-metrics + name: collection-sumologic-otelcol-metrics + selector: + matchLabels: + sumologic.com/app: otelcol-metrics + sumologic.com/scrape: "true" + - additionalLabels: + sumologic.com/app: otelcol-metrics + endpoints: + - port: monitoring + name: collection-sumologic-metrics-collector + selector: + matchLabels: + sumologic.com/app: otelcol + sumologic.com/component: metrics + sumologic.com/scrape: "true" + - additionalLabels: + sumologic.com/app: otelcol-logs-collector + endpoints: + - port: metrics + name: collection-sumologic-otelcol-logs-collector + selector: + matchLabels: + sumologic.com/app: otelcol-logs-collector + sumologic.com/scrape: "true" + - additionalLabels: + sumologic.com/app: otelcol-events + endpoints: + - port: otelcol-metrics + name: collection-sumologic-otelcol-events + selector: + matchLabels: + sumologic.com/app: otelcol-events + sumologic.com/scrape: "true" + - additionalLabels: + sumologic.com/app: otelcol + endpoints: + - port: metrics + name: collection-sumologic-otelcol-traces + selector: + matchLabels: + sumologic.com/component: instrumentation + sumologic.com/scrape: "true" + - endpoints: + - metricRelabelings: + - action: keep + regex: prometheus_remote_storage_.* + sourceLabels: + - __name__ + path: /metrics + port: http-web + name: collection-sumologic-prometheus + selector: + matchLabels: + app: kube-prometheus-stack-prometheus + ## The type of source we send to in Sumo. The possible values are http and otlp. + ## Consult the documentation for more information. + ## + sourceType: http + ## Traces configuration + ## Set the enabled flag to false to disable traces ingestion. + ## + traces: + enabled: true + ## How many spans per request should be send to receiver + spans_per_request: 100 + sourceType: http +## Configure metrics-server +## ref: https://github.com/bitnami/charts/blob/master/bitnami/metrics-server/values.yaml +## +metrics-server: + ## Set the enabled flag to true for enabling metrics-server. + ## This is required before enabling autoscaling unless you have an existing metrics-server in the cluster. + ## + enabled: false + ## Put here the new name if you want to override the full name used for metrics-server components. + # fullnameOverride: '' + apiService: + create: true + extraArgs: + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + ## Optionally specify image options for metrics-server + # image: + # ## Optionally specify an array of imagePullSecrets. + # ## Secrets must be manually created in the namespace. + # ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # ## + # pullSecrets: + # - imagepullsecret +## Configure kube-prometheus-stack +## ref: https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml +## +kube-prometheus-stack: + ## Uncomment the flag below to not install kube-prometheus-stack helm chart + ## as a dependency along with this helm chart. + ## This is needed e.g. if you want to use a different version of kube-prometheus-stack - + ## see https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/best-practices.md#using-newer-kube-prometheus-stack. + ## To disable metrics collection, set `sumologic.metrics.enabled: false` and leave this flag commented out or set it to `false`. + ## Do not set this flag explicitly to `true` while at the same time setting `sumologic.metrics.enabled: false`, + ## as this will make Prometheus try to write to an non-existent metrics enrichment service. + ## + # enabled: false + # global: + # ## Reference to one or more secrets to be used when pulling images + # ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # ## + # imagePullSecrets: + # - name: image-pull-secret + ## Put here the new name if you want to override the full name used for Kube Prometheus Stack components. + # fullnameOverride: '' + ## Put here the new namespace if you want to override the namespace used for Kube Prometheus Stack components. + # namespaceOverride: '' + ## Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). + ## Changing this may break Sumo Logic apps. + ## + # kubeTargetVersionOverride: '' + ## Labels to apply to all kube-prometheus-stack resources + commonLabels: + defaultRules: + rules: + alertmanager: false + etcd: false + configReloaders: false + general: false + k8s: false + kubeApiserverAvailability: false + kubeApiserverBurnrate: false + kubeApiserverHistogram: false + kubeApiserverSlos: false + kubeControllerManager: false + kubelet: false + kubeProxy: false + kubePrometheusGeneral: false + kubePrometheusNodeRecording: true + kubernetesApps: false + kubernetesResources: false + kubernetesStorage: false + kubernetesSystem: false + kubeSchedulerAlerting: false + kubeSchedulerRecording: false + kubeStateMetrics: false + network: false + node: true + nodeExporterAlerting: false + nodeExporterRecording: false + prometheus: false + prometheusOperator: false + windows: false + ## k8s pre-1.14 prometheus recording rules + additionalPrometheusRulesMap: + pre-1.14-node-rules: + groups: + - name: node-pre-1.14.rules + rules: + - expr: sum(min(kube_pod_info) by (node)) + record: ":kube_pod_info_node_count:" + - expr: 1 - avg(rate(node_cpu_seconds_total{job="node-exporter",mode="idle"}[1m])) + record: :node_cpu_utilisation:avg1m + - expr: "1 - avg by (node) (\n rate(node_cpu_seconds_total{job=\"node-exporter\"\ + ,mode=\"idle\"}[1m])\n* on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:)" + record: node:node_cpu_utilisation:avg1m + - expr: "1 -\nsum(\n node_memory_MemFree_bytes{job=\"node-exporter\"} +\n node_memory_Cached_bytes{job=\"\ + node-exporter\"} +\n node_memory_Buffers_bytes{job=\"node-exporter\"}\n)\n\ + /\nsum(node_memory_MemTotal_bytes{job=\"node-exporter\"})" + record: ":node_memory_utilisation:" + - expr: "sum by (node) (\n (\n node_memory_MemFree_bytes{job=\"node-exporter\"\ + } +\n node_memory_Cached_bytes{job=\"node-exporter\"} +\n node_memory_Buffers_bytes{job=\"\ + node-exporter\"}\n )\n * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n\ + )" + record: node:node_memory_bytes_available:sum + - expr: "(node:node_memory_bytes_total:sum - node:node_memory_bytes_available:sum) + + / + + node:node_memory_bytes_total:sum" + record: node:node_memory_utilisation:ratio + - expr: "1 -\nsum by (node) (\n (\n node_memory_MemFree_bytes{job=\"node-exporter\"\ + } +\n node_memory_Cached_bytes{job=\"node-exporter\"} +\n node_memory_Buffers_bytes{job=\"\ + node-exporter\"}\n )\n* on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n\ + )\n/\nsum by (node) (\n node_memory_MemTotal_bytes{job=\"node-exporter\"}\n\ + * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n\ + )" + record: "node:node_memory_utilisation:" + - expr: 1 - (node:node_memory_bytes_available:sum / node:node_memory_bytes_total:sum) + record: "node:node_memory_utilisation_2:" + - expr: 'max by (instance, namespace, pod, device) ((node_filesystem_size_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"} + + - node_filesystem_avail_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"}) + + / node_filesystem_size_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"})' + record: "node:node_filesystem_usage:" + - expr: "sum by (node) (\n node_memory_MemTotal_bytes{job=\"node-exporter\"}\n\ + \ * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n\ + )" + record: node:node_memory_bytes_total:sum + - expr: 'sum(irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[1m])) + + + sum(irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[1m]))' + record: :node_net_utilisation:sum_irate + - expr: "sum by (node) (\n (irate(node_network_receive_bytes_total{job=\"node-exporter\"\ + ,device!~\"veth.+\"}[1m]) +\n irate(node_network_transmit_bytes_total{job=\"\ + node-exporter\",device!~\"veth.+\"}[1m]))\n* on (namespace, pod) group_left(node)\n\ + \ node_namespace_pod:kube_pod_info:\n)" + record: node:node_net_utilisation:sum_irate + - expr: 'sum(irate(node_network_receive_drop_total{job="node-exporter",device!~"veth.+"}[1m])) + + + sum(irate(node_network_transmit_drop_total{job="node-exporter",device!~"veth.+"}[1m]))' + record: :node_net_saturation:sum_irate + - expr: "sum by (node) (\n (irate(node_network_receive_drop_total{job=\"node-exporter\"\ + ,device!~\"veth.+\"}[1m]) +\n irate(node_network_transmit_drop_total{job=\"\ + node-exporter\",device!~\"veth.+\"}[1m]))\n* on (namespace, pod) group_left(node)\n\ + \ node_namespace_pod:kube_pod_info:\n)" + record: node:node_net_saturation:sum_irate + - expr: 'sum(node_load1{job="node-exporter"}) + + / + + sum(node:node_num_cpu:sum)' + record: ":node_cpu_saturation_load1:" + - expr: avg(irate(node_disk_io_time_weighted_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+"}[1m])) + record: :node_disk_saturation:avg_irate + - expr: "avg by (node) (\n irate(node_disk_io_time_weighted_seconds_total{job=\"\ + node-exporter\",device=~\"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+\"}[1m])\n* on (namespace,\ + \ pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n)" + record: node:node_disk_saturation:avg_irate + - expr: avg(irate(node_disk_io_time_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+"}[1m])) + record: :node_disk_utilisation:avg_irate + - expr: "avg by (node) (\n irate(node_disk_io_time_seconds_total{job=\"node-exporter\"\ + ,device=~\"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+\"}[1m])\n* on (namespace, pod)\ + \ group_left(node)\n node_namespace_pod:kube_pod_info:\n)" + record: node:node_disk_utilisation:avg_irate + - expr: "1e3 * sum(\n (rate(node_vmstat_pgpgin{job=\"node-exporter\"}[1m])\n+ rate(node_vmstat_pgpgout{job=\"\ + node-exporter\"}[1m]))\n)" + record: :node_memory_swap_io_bytes:sum_rate + - expr: "1e3 * sum by (node) (\n (rate(node_vmstat_pgpgin{job=\"node-exporter\"\ + }[1m])\n+ rate(node_vmstat_pgpgout{job=\"node-exporter\"}[1m]))\n* on (namespace,\ + \ pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n)" + record: node:node_memory_swap_io_bytes:sum_rate + - expr: "node:node_cpu_utilisation:avg1m\n *\nnode:node_num_cpu:sum\n /\nscalar(sum(node:node_num_cpu:sum))" + record: node:cluster_cpu_utilisation:ratio + - expr: "(node:node_memory_bytes_total:sum - node:node_memory_bytes_available:sum) + + / + + scalar(sum(node:node_memory_bytes_total:sum))" + record: node:cluster_memory_utilisation:ratio + - expr: "sum by (node) (\n node_load1{job=\"node-exporter\"}\n* on (namespace,\ + \ pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n)\n/\nnode:node_num_cpu:sum" + record: "node:node_cpu_saturation_load1:" + - expr: "max by (instance, namespace, pod, device) (\n node_filesystem_avail_bytes{fstype=~\"\ + ext[234]|btrfs|xfs|zfs\"}\n /\n node_filesystem_size_bytes{fstype=~\"ext[234]|btrfs|xfs|zfs\"\ + }\n )" + record: "node:node_filesystem_avail:" + - expr: "max(\n max(\n kube_pod_info{job=\"kube-state-metrics\", host_ip!=\"\ + \"}\n ) by (node, host_ip)\n * on (host_ip) group_right (node)\n label_replace(\n\ + \ (\n max(node_filesystem_files{job=\"node-exporter\", mountpoint=\"\ + /\"})\n by (instance)\n ), \"host_ip\", \"$1\", \"instance\", \"(.*):.*\"\ + \n )\n) by (node)" + record: "node:node_inodes_total:" + - expr: "max(\n max(\n kube_pod_info{job=\"kube-state-metrics\", host_ip!=\"\ + \"}\n ) by (node, host_ip)\n * on (host_ip) group_right (node)\n label_replace(\n\ + \ (\n max(node_filesystem_files_free{job=\"node-exporter\", mountpoint=\"\ + /\"})\n by (instance)\n ), \"host_ip\", \"$1\", \"instance\", \"(.*):.*\"\ + \n )\n) by (node)" + record: "node:node_inodes_free:" + ## NOTE changing the serviceMonitor scrape interval to be >1m can result in metrics from recording + ## rules to be missing and empty panels in Sumo Logic Kubernetes apps. + ## + kubeApiServer: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + ## see docs/scraped_metrics.md + ## apiserver_request_count + ## apiserver_request_total + ## apiserver_request_duration_seconds_count + ## apiserver_request_duration_seconds_sum + ## + metricRelabelings: + - action: keep + regex: (?:apiserver_request_(?:count|total)|apiserver_request_(?:duration_seconds)_(?:count|sum)) + sourceLabels: + - __name__ + kubelet: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + ## Enable scraping /metrics/probes from kubelet's service + probes: false + ## Enable scraping /metrics/resource/v1alpha1 from kubelet's service + resource: false + ## see docs/scraped_metrics.md + ## kubelet metrics: + ## kubelet_docker_operations_errors + ## kubelet_docker_operations_errors_total + ## kubelet_docker_operations_duration_seconds_count + ## kubelet_docker_operations_duration_seconds_sum + ## kubelet_runtime_operations_duration_seconds_count + ## kubelet_runtime_operations_duration_seconds_sum + ## kubelet_running_container_count + ## kubelet_running_containers + ## kubelet_running_pod_count + ## kubelet_running_pods + ## kubelet_docker_operations_latency_microseconds + ## kubelet_docker_operations_latency_microseconds_count + ## kubelet_docker_operations_latency_microseconds_sum + ## kubelet_runtime_operations_latency_microseconds + ## kubelet_runtime_operations_latency_microseconds_count + ## kubelet_runtime_operations_latency_microseconds_sum + ## + metricRelabelings: + - action: keep + regex: (?:kubelet_docker_operations_errors(?:|_total)|kubelet_(?:docker|runtime)_operations_duration_seconds_(?:count|sum)|kubelet_running_(?:container|pod)(?:_count|s)|kubelet_(:?docker|runtime)_operations_latency_microseconds(?:|_count|_sum)) + sourceLabels: + - __name__ + - action: labeldrop + regex: id + ## see docs/scraped_metrics.md + ## cadvisor container metrics + ## container_cpu_usage_seconds_total + ## container_fs_limit_bytes + ## container_fs_usage_bytes + ## container_memory_working_set_bytes + ## container_cpu_cfs_throttled_seconds_total + ## ## cadvisor aggregate container metrics + ## container_network_receive_bytes_total + ## container_network_transmit_bytes_total + ## Drop container metrics with container tag set to an empty string: + ## these are the pod aggregated container metrics which can be aggregated + ## in Sumo anyway. There's also some cgroup-specific time series we also + ## do not need. + ## + cAdvisorMetricRelabelings: + - action: keep + regex: (?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes|container_cpu_cfs_throttled_seconds_total|container_network_receive_bytes_total|container_network_transmit_bytes_total) + sourceLabels: + - __name__ + - action: drop + regex: (?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes);$ + sourceLabels: + - __name__ + - container + - action: labelmap + regex: container_name + replacement: container + - action: drop + regex: POD + sourceLabels: + - container + - action: labeldrop + regex: (id|name) + kubeControllerManager: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + ## see docs/scraped_metrics.md + ## controller manager metrics + ## https://kubernetes.io/docs/concepts/cluster-administration/monitoring/#kube-controller-manager-metrics + ## e.g. + ## cloudprovider_aws_api_request_duration_seconds_bucket + ## cloudprovider_aws_api_request_duration_seconds_count + ## cloudprovider_aws_api_request_duration_seconds_sum + ## + metricRelabelings: + - action: keep + regex: (?:cloudprovider_.*_api_request_duration_seconds.*) + sourceLabels: + - __name__ + coreDns: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + ## see docs/scraped_metrics.md + ## coredns: + ## coredns_cache_entries + ## coredns_cache_hits_total + ## coredns_cache_misses_total + ## coredns_dns_request_duration_seconds_count + ## coredns_dns_request_duration_seconds_sum + ## coredns_dns_requests_total + ## coredns_dns_responses_total + ## coredns_forward_requests_total + ## process_cpu_seconds_total + ## process_open_fds + ## process_resident_memory_bytes + ## process_cpu_seconds_total + ## process_open_fds + ## process_resident_memory_bytes + ## + metricRelabelings: + - action: keep + regex: (?:coredns_cache_(entries|(hits|misses)_total)|coredns_dns_request_duration_seconds_(count|sum)|coredns_(forward_requests|dns_requests|dns_responses)_total|process_(cpu_seconds_total|open_fds|resident_memory_bytes)) + sourceLabels: + - __name__ + kubeEtcd: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + ## see docs/scraped_metrics.md + ## etcd_request_cache_get_duration_seconds_count + ## etcd_request_cache_get_duration_seconds_sum + ## etcd_request_cache_add_duration_seconds_count + ## etcd_request_cache_add_duration_seconds_sum + ## etcd_request_cache_add_latencies_summary_count + ## etcd_request_cache_add_latencies_summary_sum + ## etcd_request_cache_get_latencies_summary_count + ## etcd_request_cache_get_latencies_summary_sum + ## etcd_helper_cache_hit_count + ## etcd_helper_cache_hit_total + ## etcd_helper_cache_miss_count + ## etcd_helper_cache_miss_total + ## etcd server: + ## etcd_mvcc_db_total_size_in_bytes + ## etcd_debugging_store_expires_total + ## etcd_debugging_store_watchers + ## etcd_disk_backend_commit_duration_seconds_bucket + ## etcd_disk_wal_fsync_duration_seconds_bucket + ## etcd_grpc_proxy_cache_hits_total + ## etcd_grpc_proxy_cache_misses_total + ## etcd_network_client_grpc_received_bytes_total + ## etcd_network_client_grpc_sent_bytes_total + ## etcd_server_has_leader + ## etcd_server_leader_changes_seen_total + ## etcd_server_proposals_applied_total + ## etcd_server_proposals_committed_total + ## etcd_server_proposals_failed_total + ## etcd_server_proposals_pending + ## process_cpu_seconds_total + ## process_open_fds + ## process_resident_memory_bytes + ## + metricRelabelings: + - action: keep + regex: (?:etcd_request_cache_(?:add|get)_(?:duration_seconds|latencies_summary)_(?:count|sum)|etcd_helper_cache_(?:hit|miss)_(?:count|total)|etcd_mvcc_db_total_size_in_bytes|etcd_debugging_(store_(expires_total|watchers))|etcd_disk_(backend_commit|wal_fsync)_duration_seconds_.*|etcd_grpc_proxy_cache_(hits|misses)_total|etcd_network_client_grpc_(received|sent)_bytes_total|etcd_server_(has_leader|leader_changes_seen_total)|etcd_server_proposals_(pending|(applied|committed|failed)_total)|process_(cpu_seconds_total|open_fds|resident_memory_bytes)) + sourceLabels: + - __name__ + kubeScheduler: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + ## see docs/scraped_metrics.md + ## ## scheduler_e2e_* is present for K8s <1.23 + ## scheduler_e2e_scheduling_duration_seconds_bucket + ## scheduler_e2e_scheduling_duration_seconds_count + ## scheduler_e2e_scheduling_duration_seconds_sum + ## ## scheduler_scheduling_attempt_duration_seconds is present for K8s >=1.23 + ## scheduler_scheduling_attempt_duration_seconds_bucket + ## scheduler_scheduling_attempt_duration_seconds_count + ## scheduler_scheduling_attempt_duration_seconds_sum + ## ## scheduler_framework_extension_point_duration_seconds_bucket + ## scheduler_framework_extension_point_duration_seconds_count + ## scheduler_framework_extension_point_duration_seconds_sum + ## scheduler_scheduling_algorithm_duration_seconds_bucket + ## scheduler_scheduling_algorithm_duration_seconds_count + ## scheduler_scheduling_algorithm_duration_seconds_sum + ## + metricRelabelings: + - action: keep + regex: (?:scheduler_(?:e2e_scheduling|scheduling_attempt|framework_extension_point|scheduling_algorithm)_duration_seconds.*) + sourceLabels: + - __name__ + alertmanager: + enabled: false + grafana: + enabled: false + defaultDashboardsEnabled: false + prometheusOperator: + ## Labels to add to the operator pod + podLabels: + ## Annotations to add to the operator pod + podAnnotations: + ## Resource limits for prometheus operator + resources: {} + # limits: + # cpu: 200m + # memory: 200Mi + # requests: + # cpu: 100m + # memory: 100Mi + admissionWebhooks: + enabled: false + tls: + enabled: false + ## Resource limits for kube-state-metrics + kube-state-metrics: + ## Put here the new name if you want to override the full name used for Kube State Metrics components. + # fullnameOverride: '' + nodeSelector: + ## Custom labels to apply to service, deployment and pods + customLabels: + ## Additional annotations for pods in the DaemonSet + podAnnotations: + resources: {} + ## latest kube-prometheus-stack version that is supported on OpenShift 4.8-4.10 + ## uses version 2.6.0 of kube-state-metrics, but this version has some critical vulnerabilities, + ## so we bump the image manually. + ## + image: + tag: v2.7.0 + prometheus: + monitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + ## see docs/scraped_metrics.md + ## kube_daemonset_status_current_number_scheduled + ## kube_daemonset_status_desired_number_scheduled + ## kube_daemonset_status_number_misscheduled + ## kube_daemonset_status_number_unavailable + ## kube_deployment_spec_replicas + ## kube_deployment_status_replicas_available + ## kube_deployment_status_replicas_unavailable + ## kube_node_info + ## kube_node_status_allocatable + ## kube_node_status_capacity + ## kube_node_status_condition + ## kube_statefulset_metadata_generation + ## kube_statefulset_replicas + ## kube_statefulset_status_observed_generation + ## kube_statefulset_status_replicas + ## kube_hpa_spec_max_replicas + ## kube_hpa_spec_min_replicas + ## kube_hpa_status_condition + ## kube_hpa_status_current_replicas + ## kube_hpa_status_desired_replicas + ## kube pod state metrics + ## kube_pod_container_info + ## kube_pod_container_resource_limits + ## kube_pod_container_resource_requests + ## kube_pod_container_status_ready + ## kube_pod_container_status_restarts_total + ## kube_pod_container_status_terminated_reason + ## kube_pod_container_status_waiting_reason + ## kube_pod_status_phase + ## kube_pod_info + ## kube_service_info + ## kube_service_spec_external_ip + ## kube_service_spec_type + ## kube_service_status_load_balancer_ingress + ## Drop unnecessary labels Prometheus adds to these metrics + ## We don't want container=kube-state-metrics on everything + ## + metricRelabelings: + - action: keep + regex: (?:kube_statefulset_status_observed_generation|kube_statefulset_status_replicas|kube_statefulset_replicas|kube_statefulset_metadata_generation|kube_daemonset_status_current_number_scheduled|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_misscheduled|kube_daemonset_status_number_unavailable|kube_deployment_spec_replicas|kube_deployment_status_replicas_available|kube_deployment_status_replicas_unavailable|kube_node_info|kube_node_status_allocatable|kube_node_status_capacity|kube_node_status_condition|kube_hpa_spec_max_replicas|kube_hpa_spec_min_replicas|kube_hpa_status_(condition|(current|desired)_replicas)|kube_pod_container_info|kube_pod_container_resource_requests|kube_pod_container_resource_limits|kube_pod_container_status_ready|kube_pod_container_status_terminated_reason|kube_pod_container_status_waiting_reason|kube_pod_container_status_restarts_total|kube_pod_status_phase|kube_pod_info|kube_service_info|kube_service_spec_external_ip|kube_service_spec_type|kube_service_status_load_balancer_ingress) + sourceLabels: + - __name__ + - action: labeldrop + regex: service + - action: replace + regex: kube-state-metrics + replacement: "" + sourceLabels: + - container + targetLabel: container + - action: replace + regex: .*kube-state-metrics.* + replacement: "" + sourceLabels: + - pod + targetLabel: pod + - action: labelmap + regex: (pod|service) + replacement: service_discovery_${1} + ## Resource limits for prometheus node exporter + prometheus-node-exporter: + ## Put here the new name if you want to override the full name used for Prometheus Node exporter components. + # fullnameOverride: '' + nodeSelector: + ## Additional labels for pods in the DaemonSet + podLabels: + ## Additional annotations for pods in the DaemonSet + podAnnotations: + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + prometheus: + monitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + ## see docs/scraped_metrics.md + ## node exporter metrics + ## node_cpu_seconds_total + ## node_load1 + ## node_load5 + ## node_load15 + ## node_disk_io_time_weighted_seconds_total + ## node_disk_io_time_seconds_total + ## node_vmstat_pgpgin + ## node_vmstat_pgpgout + ## node_memory_MemFree_bytes + ## node_memory_Cached_bytes + ## node_memory_Buffers_bytes + ## node_memory_MemTotal_bytes + ## node_network_receive_drop_total + ## node_network_transmit_drop_total + ## node_network_receive_bytes_total + ## node_network_transmit_bytes_total + ## node_filesystem_avail_bytes + ## node_filesystem_size_bytes + ## node_filesystem_files_free + ## node_filesystem_files + ## + metricRelabelings: + - action: keep + regex: (?:node_load1|node_load5|node_load15|node_cpu_seconds_total|node_disk_io_time_weighted_seconds_total|node_disk_io_time_seconds_total|node_vmstat_pgpgin|node_vmstat_pgpgout|node_memory_MemFree_bytes|node_memory_Cached_bytes|node_memory_Buffers_bytes|node_memory_MemTotal_bytes|node_network_receive_drop_total|node_network_transmit_drop_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_filesystem_avail_bytes|node_filesystem_size_bytes) + sourceLabels: + - __name__ + prometheus: + additionalServiceMonitors: [] + prometheusSpec: + ## Prometheus default scrape interval, default from upstream Kube Prometheus Stack Helm chart + ## NOTE changing the scrape interval to be >1m can result in metrics + ## from recording rules to be missing and empty panels in Sumo Logic Kubernetes apps. + ## + scrapeInterval: 30s + ## Prometheus data retention period + retention: 1d + ## Add custom pod annotations and labels to prometheus pods + podMetadata: + labels: + annotations: + nodeSelector: + ## Define resources requests and limits for single Pods. + resources: + limits: + cpu: 2000m + memory: 8Gi + requests: + cpu: 500m + memory: 1Gi + initContainers: + - env: + - name: METADATA_METRICS_SVC + valueFrom: + configMapKeyRef: + key: metadataMetrics + name: sumologic-configmap + - name: NAMESPACE + valueFrom: + configMapKeyRef: + key: metadataNamespace + name: sumologic-configmap + name: init-config-reloader + containers: + - env: + - name: METADATA_METRICS_SVC + valueFrom: + configMapKeyRef: + key: metadataMetrics + name: sumologic-configmap + - name: NAMESPACE + valueFrom: + configMapKeyRef: + key: metadataNamespace + name: sumologic-configmap + name: config-reloader + ## Enable WAL compression to reduce Prometheus memory consumption + walCompression: true + ## prometheus scrape config + ## rel: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config + ## scraping metrics basing on annotations: + ## - prometheus.io/scrape: true - to scrape metrics from the pod + ## - prometheus.io/path: /metrics - path which the metric should be scrape from + ## - prometheus.io/port: 9113 - port which the metric should be scrape from + ## rel: https://github.com/prometheus-operator/kube-prometheus/pull/16#issuecomment-424318647 + ## + additionalScrapeConfigs: + - job_name: pod-annotations + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: true + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_scrape + - action: replace + regex: (.+) + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_path + target_label: __metrics_path__ + - action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + source_labels: + - __address__ + - __meta_kubernetes_pod_annotation_prometheus_io_port + target_label: __address__ + - action: replace + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - __metrics_path__ + target_label: endpoint + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - action: replace + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - action: replace + regex: (.*) + replacement: "true" + separator: ; + source_labels: + - __name__ + target_label: _sumo_forward_ + remoteWrite: + ## infrastructure metrics + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics + writeRelabelConfigs: + - action: keep + regex: (?:kube-state-metrics|kubelet|kube-scheduler|apiserver|coredns|kube-etcd|.+-sumologic-.+|.+-prometheus) + sourceLabels: + - job + - action: drop + regex: kube_pod_info + sourceLabels: + - __name__ + - action: drop + regex: scrape_.* + sourceLabels: + - __name__ + ## This needs to be separate because we have a bunch of recording rule based metrics for Nodes which fall into this job + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.node + writeRelabelConfigs: + - action: keep + regex: node-exporter;(?:node_load1|node_load5|node_load15|node_cpu_seconds_total) + sourceLabels: + - job + - __name__ + ## prometheus operator rules + ## :kube_pod_info_node_count: + ## :node_cpu_saturation_load1: + ## :node_cpu_utilisation:avg1m + ## :node_disk_saturation:avg_irate + ## :node_disk_utilisation:avg_irate + ## :node_memory_swap_io_bytes:sum_rate + ## :node_memory_utilisation: + ## :node_net_saturation:sum_irate + ## :node_net_utilisation:sum_irate + ## cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + ## cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile + ## cluster_quantile:scheduler_framework_extension_point_duration_seconds:histogram_quantile + ## cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile + ## cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile + ## instance:node_filesystem_usage:sum # no rules definition found + ## instance:node_network_receive_bytes:rate:sum + ## node:cluster_cpu_utilisation:ratio + ## node:cluster_memory_utilisation:ratio + ## node:node_cpu_saturation_load1: + ## node:node_cpu_utilisation:avg1m + ## node:node_disk_saturation:avg_irate + ## node:node_disk_utilisation:avg_irate + ## node:node_filesystem_avail: + ## node:node_filesystem_usage: + ## node:node_inodes_free: + ## node:node_inodes_total: + ## node:node_memory_bytes_total:sum + ## node:node_memory_swap_io_bytes:sum_rate + ## node:node_memory_utilisation: + ## node:node_memory_utilisation:ratio + ## node:node_memory_utilisation_2: + ## node:node_net_saturation:sum_irate + ## node:node_net_utilisation:sum_irate + ## node:node_num_cpu:sum + ## node_namespace_pod:kube_pod_info: + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.operator.rule + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: "cluster_quantile:apiserver_request_duration_seconds:histogram_quantile|instance:node_filesystem_usage:sum|instance:node_network_receive_bytes:rate:sum|cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile|cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile|cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile|cluster_quantile:scheduler_framework_extension_point_duration_seconds:histogram_quantile|node_namespace_pod:kube_pod_info:|:kube_pod_info_node_count:|node:node_num_cpu:sum|:node_cpu_utilisation:avg1m|node:node_cpu_utilisation:avg1m|node:cluster_cpu_utilisation:ratio|:node_cpu_saturation_load1:|node:node_cpu_saturation_load1:|:node_memory_utilisation:|node:node_memory_bytes_total:sum|node:node_memory_utilisation:ratio|node:cluster_memory_utilisation:ratio|:node_memory_swap_io_bytes:sum_rate|node:node_memory_utilisation:|node:node_memory_utilisation_2:|node:node_memory_swap_io_bytes:sum_rate|:node_disk_utilisation:avg_irate|node:node_disk_utilisation:avg_irate|:node_disk_saturation:avg_irate|node:node_disk_saturation:avg_irate|node:node_filesystem_usage:|node:node_filesystem_avail:|:node_net_utilisation:sum_irate|node:node_net_utilisation:sum_irate|:node_net_saturation:sum_irate|node:node_net_saturation:sum_irate|node:node_inodes_total:|node:node_inodes_free:" + sourceLabels: + - __name__ + ## Nginx ingress controller metrics + ## rel: https://docs.nginx.com/nginx-ingress-controller/logging-and-monitoring/prometheus/#available-metrics + ## nginx_ingress_controller_ingress_resources_total + ## nginx_ingress_controller_nginx_last_reload_milliseconds + ## nginx_ingress_controller_nginx_last_reload_status + ## nginx_ingress_controller_nginx_reload_errors_total + ## nginx_ingress_controller_nginx_reloads_total + ## nginx_ingress_controller_virtualserver_resources_total + ## nginx_ingress_controller_virtualserverroute_resources_total + ## nginx_ingress_nginx_connections_accepted + ## nginx_ingress_nginx_connections_active + ## nginx_ingress_nginx_connections_handled + ## nginx_ingress_nginx_connections_reading + ## nginx_ingress_nginx_connections_waiting + ## nginx_ingress_nginx_connections_writing + ## nginx_ingress_nginx_http_requests_total + ## nginx_ingress_nginxplus_connections_accepted + ## nginx_ingress_nginxplus_connections_active + ## nginx_ingress_nginxplus_connections_dropped + ## nginx_ingress_nginxplus_connections_idle + ## nginx_ingress_nginxplus_http_requests_current + ## nginx_ingress_nginxplus_http_requests_total + ## nginx_ingress_nginxplus_resolver_addr + ## nginx_ingress_nginxplus_resolver_formerr + ## nginx_ingress_nginxplus_resolver_name + ## nginx_ingress_nginxplus_resolver_noerror + ## nginx_ingress_nginxplus_resolver_notimp + ## nginx_ingress_nginxplus_resolver_nxdomain + ## nginx_ingress_nginxplus_resolver_refused + ## nginx_ingress_nginxplus_resolver_servfail + ## nginx_ingress_nginxplus_resolver_srv + ## nginx_ingress_nginxplus_resolver_timedout + ## nginx_ingress_nginxplus_resolver_unknown + ## nginx_ingress_nginxplus_ssl_handshakes_failed + ## nginx_ingress_nginxplus_ssl_session_reuses + ## nginx_ingress_nginxplus_stream_server_zone_connections + ## nginx_ingress_nginxplus_stream_server_zone_received + ## nginx_ingress_nginxplus_stream_server_zone_sent + ## nginx_ingress_nginxplus_stream_upstream_server_active + ## nginx_ingress_nginxplus_stream_upstream_server_connect_time + ## nginx_ingress_nginxplus_stream_upstream_server_fails + ## nginx_ingress_nginxplus_stream_upstream_server_health_checks_fails + ## nginx_ingress_nginxplus_stream_upstream_server_health_checks_unhealthy + ## nginx_ingress_nginxplus_stream_upstream_server_received + ## nginx_ingress_nginxplus_stream_upstream_server_response_time + ## nginx_ingress_nginxplus_stream_upstream_server_sent + ## nginx_ingress_nginxplus_stream_upstream_server_unavail + ## nginx_ingress_nginxplus_stream_upstream_server_state + ## nginx_ingress_nginxplus_location_zone_discarded + ## nginx_ingress_nginxplus_location_zone_received + ## nginx_ingress_nginxplus_location_zone_requests + ## nginx_ingress_nginxplus_location_zone_responses + ## nginx_ingress_nginxplus_location_zone_sent + ## nginx_ingress_nginxplus_server_zone_discarded + ## nginx_ingress_nginxplus_server_zone_processing + ## nginx_ingress_nginxplus_server_zone_received + ## nginx_ingress_nginxplus_server_zone_requests + ## nginx_ingress_nginxplus_server_zone_responses + ## nginx_ingress_nginxplus_server_zone_sent + ## nginx_ingress_nginxplus_upstream_server_fails + ## nginx_ingress_nginxplus_upstream_server_header_time + ## nginx_ingress_nginxplus_upstream_server_health_checks_fails + ## nginx_ingress_nginxplus_upstream_server_health_checks_unhealthy + ## nginx_ingress_nginxplus_upstream_server_received + ## nginx_ingress_nginxplus_upstream_server_sent + ## nginx_ingress_nginxplus_upstream_server_unavail + ## nginx_ingress_nginxplus_upstream_server_response_time + ## nginx_ingress_nginxplus_upstream_server_responses + ## nginx_ingress_nginxplus_upstream_server_requests + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.nginx-ingress + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:nginx_ingress_controller_ingress_resources_total|nginx_ingress_controller_nginx_(last_reload_(milliseconds|status)|reload(s|_errors)_total)|nginx_ingress_controller_virtualserver(|route)_resources_total|nginx_ingress_nginx_connections_(accepted|active|handled|reading|waiting|writing)|nginx_ingress_nginx_http_requests_total|nginx_ingress_nginxplus_(connections_(accepted|active|dropped|idle)|http_requests_(current|total)|resolver_(addr|formerr|name|noerror|notimp|nxdomain|refused|servfail|srv|timedout|unknown)|ssl_(handshakes_failed|session_reuses)|stream_server_zone_(connections|received|sent)|stream_upstream_server_(active|connect_time|fails|health_checks_fails|health_checks_unhealthy|received|response_time|sent|unavail|state)|(location|server)_zone_(discarded|received|requests|responses|sent|processing)|upstream_server_(fails|header_time|health_checks_fails|health_checks_unhealthy|received|sent|unavail|response_time|responses|requests))) + sourceLabels: + - __name__ + ## Nginx telegraf metrics + ## nginx_accepts + ## nginx_active + ## nginx_handled + ## nginx_reading + ## nginx_requests + ## nginx_waiting + ## nginx_writing + ## **************** Nginx Plus telegraf metrics + ## nginx_plus_api_connections_accepted + ## nginx_plus_api_connections_active + ## nginx_plus_api_connections_dropped + ## nginx_plus_api_connections_idle + ## nginx_plus_api_http_caches_cold + ## nginx_plus_api_http_caches_hit_bytes + ## nginx_plus_api_http_caches_max_size + ## nginx_plus_api_http_caches_miss_bytes + ## nginx_plus_api_http_caches_size + ## nginx_plus_api_http_caches_updating_bytes + ## nginx_plus_api_http_location_zones_discarded + ## nginx_plus_api_http_location_zones_received + ## nginx_plus_api_http_location_zones_requests + ## nginx_plus_api_http_location_zones_responses_1xx + ## nginx_plus_api_http_location_zones_responses_2xx + ## nginx_plus_api_http_location_zones_responses_3xx + ## nginx_plus_api_http_location_zones_responses_4xx + ## nginx_plus_api_http_location_zones_responses_5xx + ## nginx_plus_api_http_location_zones_responses_total + ## nginx_plus_api_http_location_zones_sent + ## nginx_plus_api_http_requests_current + ## nginx_plus_api_http_requests_total + ## nginx_plus_api_http_server_zones_discarded + ## nginx_plus_api_http_server_zones_processing + ## nginx_plus_api_http_server_zones_received + ## nginx_plus_api_http_server_zones_requests + ## nginx_plus_api_http_server_zones_responses_1xx + ## nginx_plus_api_http_server_zones_responses_2xx + ## nginx_plus_api_http_server_zones_responses_3xx + ## nginx_plus_api_http_server_zones_responses_4xx + ## nginx_plus_api_http_server_zones_responses_5xx + ## nginx_plus_api_http_server_zones_responses_total + ## nginx_plus_api_http_server_zones_sent + ## nginx_plus_api_http_upstream_peers_backup + ## nginx_plus_api_http_upstream_peers_downtime + ## nginx_plus_api_http_upstream_peers_fails + ## nginx_plus_api_http_upstream_peers_healthchecks_fails + ## nginx_plus_api_http_upstream_peers_healthchecks_unhealthy + ## nginx_plus_api_http_upstream_peers_received + ## nginx_plus_api_http_upstream_peers_requests + ## nginx_plus_api_http_upstream_peers_response_time + ## nginx_plus_api_http_upstream_peers_responses_1xx + ## nginx_plus_api_http_upstream_peers_responses_2xx + ## nginx_plus_api_http_upstream_peers_responses_3xx + ## nginx_plus_api_http_upstream_peers_responses_4xx + ## nginx_plus_api_http_upstream_peers_responses_5xx + ## nginx_plus_api_http_upstream_peers_responses_total + ## nginx_plus_api_http_upstream_peers_sent + ## nginx_plus_api_http_upstream_peers_unavail + ## nginx_plus_api_resolver_zones_addr + ## nginx_plus_api_resolver_zones_formerr + ## nginx_plus_api_resolver_zones_name + ## nginx_plus_api_resolver_zones_noerror + ## nginx_plus_api_resolver_zones_notimp + ## nginx_plus_api_resolver_zones_nxdomain + ## nginx_plus_api_resolver_zones_refused + ## nginx_plus_api_resolver_zones_servfail + ## nginx_plus_api_resolver_zones_srv + ## nginx_plus_api_resolver_zones_timedout + ## nginx_plus_api_ssl_handshakes_failed + ## nginx_plus_api_ssl_session_reuses + ## nginx_plus_api_stream_server_zones_connections + ## nginx_plus_api_stream_server_zones_received + ## nginx_plus_api_stream_server_zones_sent + ## nginx_plus_api_stream_upstream_peers_active + ## nginx_plus_api_stream_upstream_peers_backup + ## nginx_plus_api_stream_upstream_peers_connect_time + ## nginx_plus_api_stream_upstream_peers_downtime + ## nginx_plus_api_stream_upstream_peers_fails + ## nginx_plus_api_stream_upstream_peers_healthchecks_fails + ## nginx_plus_api_stream_upstream_peers_healthchecks_last_passed + ## nginx_plus_api_stream_upstream_peers_healthchecks_unhealthy + ## nginx_plus_api_stream_upstream_peers_received + ## nginx_plus_api_stream_upstream_peers_response_time + ## nginx_plus_api_stream_upstream_peers_sent + ## nginx_plus_api_stream_upstream_peers_unavail + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.nginx + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:nginx_(accepts|active|handled|reading|requests|waiting|writing)|nginx_plus_api_connections_(accepted|active|dropped|idle)|nginx_plus_api_http_caches_(cold|hit_bytes|max_size|miss_bytes|size|updating_bytes)|nginx_plus_api_http_location_zones_(discarded|received|requests|sent)|nginx_plus_api_http_location_zones_responses_(1xx|2xx|3xx|4xx|5xx|total)|nginx_plus_api_http_requests_(current|total)|nginx_plus_api_http_server_zones_(discarded|processing|received|requests|sent)|nginx_plus_api_http_server_zones_responses_(1xx|2xx|3xx|4xx|5xx|total)|nginx_plus_api_http_upstream_peers_(backup|downtime|fails|healthchecks_fails|healthchecks_unhealthy|received|requests|sent|unavail|response_time)|nginx_plus_api_http_upstream_peers_responses_(1xx|2xx|3xx|4xx|5xx|total)|nginx_plus_api_resolver_zones_(addr|formerr|name|noerror|notimp|nxdomain|refused|servfail|srv|timedout)|nginx_plus_api_ssl_(handshakes_failed|session_reuses)|nginx_plus_api_stream_server_zones_(connections|received|sent)|nginx_plus_api_stream_upstream_peers_(active|backup|connect_time|downtime|fails|healthchecks_fails|healthchecks_last_passed|healthchecks_unhealthy|received|response_time|sent|unavail)) + sourceLabels: + - __name__ + ## Redis metrics + ## redis_blocked_clients + ## redis_clients + ## redis_cluster_enabled + ## redis_cmdstat_calls + ## redis_connected_slaves + ## redis_evicted_keys + ## redis_expired_keys + ## redis_instantaneous_ops_per_sec + ## redis_keyspace_hitrate + ## redis_keyspace_hits + ## redis_keyspace_misses + ## redis_master_repl_offset + ## redis_maxmemory + ## redis_mem_fragmentation_bytes + ## redis_mem_fragmentation_ratio + ## redis_rdb_changes_since_last_save + ## redis_rejected_connections + ## redis_slave_repl_offset + ## redis_total_commands_processed + ## redis_total_net_input_bytes + ## redis_total_net_output_bytes + ## redis_tracking_total_keys + ## redis_uptime + ## redis_used_cpu_sys + ## redis_used_cpu_user + ## redis_used_memory + ## redis_used_memory_overhead + ## redis_used_memory_rss + ## redis_used_memory_startup + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.redis + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:redis_((blocked_|)clients|cluster_enabled|cmdstat_calls|connected_slaves|(evicted|expired|tracking_total)_keys|instantaneous_ops_per_sec|keyspace_(hitrate|hits|misses)|(master|slave)_repl_offset|maxmemory|mem_fragmentation_(bytes|ratio)|rdb_changes_since_last_save|rejected_connections|total_commands_processed|total_net_(input|output)_bytes|uptime|used_(cpu_(sys|user)|memory(_overhead|_rss|_startup|)))) + sourceLabels: + - __name__ + ## JMX Metrics + ## java_lang_ClassLoading_LoadedClassCount + ## java_lang_ClassLoading_TotalLoadedClassCount + ## java_lang_ClassLoading_UnloadedClassCount + ## java_lang_Compilation_TotalCompilationTime + ## java_lang_GarbageCollector_CollectionCount + ## java_lang_GarbageCollector_CollectionTime + ## java_lang_GarbageCollector_LastGcInfo_GcThreadCount # unavailable for adoptopenjdk-openj9 + ## java_lang_GarbageCollector_LastGcInfo_duration # unavailable for adoptopenjdk-openj9 + ## java_lang_GarbageCollector_LastGcInfo_memoryUsageAfterGc_*_used + ## java_lang_GarbageCollector_LastGcInfo_memoryUsageBeforeGc_*_used + ## java_lang_GarbageCollector_LastGcInfo_usageAfterGc_*_used # only for adoptopenjdk-openj9 + ## java_lang_GarbageCollector_LastGcInfo_usageBeforeGc_*_used # only for adoptopenjdk-openj9 + ## java_lang_MemoryPool_CollectionUsageThresholdSupported + ## java_lang_MemoryPool_CollectionUsage_committed + ## java_lang_MemoryPool_CollectionUsage_max + ## java_lang_MemoryPool_CollectionUsage_used + ## java_lang_MemoryPool_PeakUsage_committed + ## java_lang_MemoryPool_PeakUsage_max + ## java_lang_MemoryPool_PeakUsage_used + ## java_lang_MemoryPool_UsageThresholdSupported + ## java_lang_MemoryPool_Usage_committed + ## java_lang_MemoryPool_Usage_max + ## java_lang_MemoryPool_Usage_used + ## java_lang_Memory_HeapMemoryUsage_committed + ## java_lang_Memory_HeapMemoryUsage_max + ## java_lang_Memory_HeapMemoryUsage_used + ## java_lang_Memory_NonHeapMemoryUsage_committed + ## java_lang_Memory_NonHeapMemoryUsage_max + ## java_lang_Memory_NonHeapMemoryUsage_used + ## java_lang_Memory_ObjectPendingFinalizationCount + ## java_lang_OperatingSystem_AvailableProcessors + ## java_lang_OperatingSystem_CommittedVirtualMemorySize + ## java_lang_OperatingSystem_FreeMemorySize # Added in jdk14 + ## java_lang_OperatingSystem_FreePhysicalMemorySize + ## java_lang_OperatingSystem_FreeSwapSpaceSize + ## java_lang_OperatingSystem_MaxFileDescriptorCount + ## java_lang_OperatingSystem_OpenFileDescriptorCount + ## java_lang_OperatingSystem_ProcessCpuLoad + ## java_lang_OperatingSystem_ProcessCpuTime + ## java_lang_OperatingSystem_SystemCpuLoad + ## java_lang_OperatingSystem_SystemLoadAverage + ## java_lang_OperatingSystem_TotalMemorySize # Added in jdk14 + ## java_lang_OperatingSystem_TotalPhysicalMemorySize + ## java_lang_OperatingSystem_TotalSwapSpaceSize + ## java_lang_Runtime_BootClassPathSupported + ## java_lang_Runtime_Pid # not available for jdk8 + ## java_lang_Runtime_Uptime + ## java_lang_Runtime_StartTime + ## java_lang_Threading_CurrentThreadAllocatedBytes # Added in jdk14 + ## java_lang_Threading_CurrentThreadCpuTime + ## java_lang_Threading_CurrentThreadUserTime + ## java_lang_Threading_DaemonThreadCount + ## java_lang_Threading_ObjectMonitorUsageSupported + ## java_lang_Threading_PeakThreadCount + ## java_lang_Threading_SynchronizerUsageSupported + ## java_lang_Threading_ThreadAllocatedMemory* # Not available for adoptopenjdk-openj9 + ## java_lang_Threading_ThreadContentionMonitoring* + ## java_lang_Threading_ThreadCount + ## java_lang_Threading_ThreadCpuTime* + ## java_lang_Threading_TotalStartedThreadCount + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.jmx + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:java_lang_(ClassLoading_(TotalL|Unl|L)oadedClassCount|Compilation_TotalCompilationTime|GarbageCollector_(Collection(Count|Time)|LastGcInfo_(GcThreadCount|duration|(memoryU|u)sage(After|Before)Gc_.*_used))|MemoryPool_(CollectionUsage(ThresholdSupported|_committed|_max|_used)|(Peak|)Usage_(committed|max|used)|UsageThresholdSupported)|Memory_((Non|)HeapMemoryUsage_(committed|max|used)|ObjectPendingFinalizationCount)|OperatingSystem_(AvailableProcessors|(CommittedVirtual|(Free|Total)(Physical|))MemorySize|(Free|Total)SwapSpaceSize|(Max|Open)FileDescriptorCount|ProcessCpu(Load|Time)|System(CpuLoad|LoadAverage))|Runtime_(BootClassPathSupported|Pid|Uptime|StartTime)|Threading_(CurrentThread(AllocatedBytes|(Cpu|User)Time)|(Daemon|Peak|TotalStarted|)ThreadCount|(ObjectMonitor|Synchronizer)UsageSupported|Thread(AllocatedMemory.*|ContentionMonitoring.*|CpuTime.*)))) + sourceLabels: + - __name__ + ## Kafka Metrics + ## List of Metrics are on following dochub page: + ## https://help.sumologic.com/docs/integrations/containers-orchestration/kafka/#kafka-metrics + ## Metrics follow following format: + ## kafka_broker_* + ## kafka_controller_* + ## kafka_java_lang_* + ## kafka_partition_* + ## kafka_purgatory_* + ## kafka_network_* + ## kafka_replica_* + ## kafka_request_* + ## kafka_topic_* + ## kafka_topics_* + ## kafka_zookeeper_* + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.kafka + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:kafka_(broker_.*|controller_.*|java_lang_.*|partition_.*|purgatory_.*|network_.*|replica_.*|request_.*|topic_.*|topics_.*|zookeeper_.*)) + sourceLabels: + - __name__ + ## MySQL Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/v1.18.1/plugins/inputs/mysql#metrics + ## Metrics follow following format: + ## mysql_uptime + ## mysql_connection_errors_* + ## mysql_queries + ## mysql_slow_queries + ## mysql_questions + ## mysql_table_open_cache_* + ## mysql_table_locks_* + ## mysql_commands_* + ## mysql_select_* + ## mysql_sort_* + ## mysql_mysqlx_connections_* + ## mysql_mysqlx_worker_* + ## mysql_connections + ## mysql_aborted_* + ## mysql_locked_connects + ## mysql_bytes_* + ## mysql_qcache_* + ## mysql_threads_* + ## mysql_opened_* + ## mysql_created_tmp_* + ## mysql_innodb_buffer_pool_* + ## mysql_innodb_data_* + ## mysql_innodb_rows_* + ## mysql_innodb_row_lock_* + ## mysql_innodb_log_waits + ## mysql_perf_schema_events_statements_* + ## mysql_perf_schema_table_io_waits_* + ## mysql_perf_schema_index_io_waits_* + ## mysql_perf_schema_read* + ## mysql_perf_schema_write* + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.mysql + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:mysql_((uptime|connection_errors_.*|queries|slow_queries|questions|table_open_cache_.*|table_locks_.*|commands_.*|select_.*|sort_.*|mysqlx_connections_.*|mysqlx_worker_.*|connections|aborted_.*|locked_connects|bytes_.*|qcache_.*|threads_.*|opened_.*|created_tmp_.*)|innodb_(buffer_pool_.*|data_.*|rows_.*|row_lock_.*|log_waits)|perf_schema_(events_statements_.*|table_io_waits_.*|index_io_waits_.*|read.*|write.*))) + sourceLabels: + - __name__ + ## PostgreSQL Telegraf Metrics + ## List of Metrics are on following dochub page: + ## https://help.sumologic.com/docs/integrations/databases/postgresql/#postgresql-metrics + ## Metrics follow following format: + ## postgresql_blks_hit + ## postgresql_blks_read + ## postgresql_buffers_backend + ## postgresql_buffers_checkpoint + ## postgresql_buffers_clean + ## postgresql_checkpoints_req + ## postgresql_checkpoints_timed + ## postgresql_db_size + ## postgresql_deadlocks + ## postgresql_flush_lag + ## postgresql_heap_blks_hit + ## postgresql_heap_blks_read + ## postgresql_idx_blks_hit + ## postgresql_idx_blks_read + ## postgresql_idx_scan + ## postgresql_idx_tup_fetch + ## postgresql_idx_tup_read + ## postgresql_index_size + ## postgresql_n_dead_tup + ## postgresql_n_live_tup + ## postgresql_n_tup_del + ## postgresql_n_tup_hot_upd + ## postgresql_n_tup_ins + ## postgresql_n_tup_upd + ## postgresql_num_locks + ## postgresql_numbackends + ## postgresql_replay_lag + ## postgresql_replication_delay + ## postgresql_replication_lag + ## postgresql_seq_scan + ## postgresql_seq_tup_read + ## postgresql_stat_ssl_compression_count + ## postgresql_table_size + ## postgresql_tup_deleted + ## postgresql_tup_fetched + ## postgresql_tup_inserted + ## postgresql_tup_returned + ## postgresql_tup_updated + ## postgresql_write_lag + ## postgresql_xact_commit + ## postgresql_xact_rollback + ## postgresql_toast_blks_read + ## postgresql_toast_blks_hit + ## postgresql_tidx_blks_read + ## postgresql_tidx_blks_hit + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.postgresql + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:postgresql_(blks_(hit|read)|buffers_(backend|checkpoint|clean)|checkpoints_(req|timed)|db_size|deadlocks|flush_lag|heap_blks_(hit|read)|idx_blks_(hit|read)|idx_scan|idx_tup_(fetch|read)|index_size|n_dead_tup|n_live_tup|n_tup_(upd|ins|del|hot_upd)|num_locks|numbackends|replay_lag|replication_(delay|lag)|seq_scan|seq_tup_read|stat_ssl_compression_count|table_size|tidx_blks_(hit|read)|toast_blks_(hit|read)|tup_(deleted|fetched|inserted|returned|updated)|write_lag|xact_(commit|rollback))) + sourceLabels: + - __name__ + ## Apache Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/v1.18.2/plugins/inputs/apache + ## Metrics follow following format: + ## apache_BusyWorkers + ## apache_BytesPerReq + ## apache_BytesPerSec + ## apache_CPUChildrenSystem + ## apache_CPUChildrenUser + ## apache_CPULoad + ## apache_CPUSystem + ## apache_CPUUser + ## apache_DurationPerReq + ## apache_IdleWorkers + ## apache_Load1 + ## apache_Load5 + ## apache_Load15 + ## apache_ParentServerConfigGeneration + ## apache_ParentServerMPMGeneration + ## apache_ReqPerSec + ## apache_ServerUptimeSeconds + ## apache_TotalAccesses + ## apache_TotalDuration + ## apache_TotalkBytes + ## apache_Uptime + ## apache_scboard_closing + ## apache_scboard_dnslookup + ## apache_scboard_finishing + ## apache_scboard_idle_cleanup + ## apache_scboard_keepalive + ## apache_scboard_logging + ## apache_scboard_open + ## apache_scboard_reading + ## apache_scboard_sending + ## apache_scboard_starting + ## apache_scboard_waiting + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.apache + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:apache_((BusyWorkers|BytesPerReq|BytesPerSec|CPUChildrenSystem|CPUChildrenUser|CPULoad|CPUSystem|CPUUser|DurationPerReq|IdleWorkers|Load1|Load15|Load5|ParentServerConfigGeneration|ParentServerMPMGeneration|ReqPerSec|ServerUptimeSeconds|TotalAccesses|TotalDuration|TotalkBytes|Uptime)|(scboard_(closing|dnslookup|finishing|idle_cleanup|keepalive|logging|open|reading|sending|starting|waiting)))) + sourceLabels: + - __name__ + ## SQLServer Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/v1.18.2/plugins/inputs/sqlserver + ## Metrics follow following format: + ## sqlserver_cpu_sqlserver_process_cpu + ## sqlserver_database_io_read_bytes + ## sqlserver_database_io_read_latency_ms + ## sqlserver_database_io_write_bytes + ## sqlserver_database_io_write_latency_ms + ## sqlserver_memory_clerks_size_kb + ## sqlserver_performance_value + ## sqlserver_server_properties_server_memory + ## sqlserver_volume_space_total_space_bytes + ## sqlserver_volume_space_used_space_bytes + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.sqlserver + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:sqlserver_(cpu_sqlserver_process_cpu|database_io_(read_(bytes|latency_ms)|write_(bytes|latency_ms))|memory_clerks_size_kb|performance_value|server_properties_server_memory|volume_space_(total_space_bytes|used_space_bytes))) + sourceLabels: + - __name__ + ## Haproxy Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/v1.18.2/plugins/inputs/haproxy + ## Metrics follow following format: + ## haproxy_active_servers + ## haproxy_backup_servers + ## haproxy_bin + ## haproxy_bout + ## haproxy_chkfail + ## haproxy_ctime + ## haproxy_dreq + ## haproxy_dresp + ## haproxy_econ + ## haproxy_ereq + ## haproxy_eresp + ## haproxy_http_response_* + ## haproxy_qcur + ## haproxy_qmax + ## haproxy_qtime + ## haproxy_rate + ## haproxy_rtime + ## haproxy_scur + ## haproxy_slim + ## haproxy_smax + ## haproxy_ttime + ## haproxy_weight + ## haproxy_wredis + ## haproxy_wretr + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.haproxy + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:haproxy_(active_servers|backup_servers|bin|bout|chkfail|ctime|dreq|dresp|econ|ereq|eresp|http_response_(1xx|2xx|3xx|4xx|5xx|other)|qcur|qmax|qtime|rate|rtime|scur|slim|smax|ttime|weight|wredis|wretr)) + sourceLabels: + - __name__ + ## Cassandra Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/v1.18.2/plugins/inputs/cassandra + ## cassandra_CacheMetrics_ChunkCache_OneMinuteRate + ## cassandra_ClientMetrics_connectedNativeClients_Value + ## cassandra_ClientMetrics_RequestDiscarded_OneMinuteRate + ## cassandra_CommitLogMetrics_CompletedTasks_Value + ## cassandra_CommitLogMetrics_PendingTasks_Value + ## cassandra_DroppedMessageMetrics_Dropped_OneMinuteRate + ## cassandra_java_GarbageCollector_*_CollectionCount + ## cassandra_java_GarbageCollector_*_CollectionTime + ## cassandra_java_GarbageCollector_*_LastGcInfo_duration + ## cassandra_java_GarbageCollector_*_LastGcInfo_GcThreadCount + ## cassandra_java_GarbageCollector_*_LastGcInfo_memoryUsageAfterGc_*_used + ## cassandra_java_GarbageCollector_*_LastGcInfo_memoryUsageBeforeGc_*_used + ## cassandra_java_Memory_HeapMemoryUsage_used + ## cassandra_java_OperatingSystem_AvailableProcessors + ## cassandra_java_OperatingSystem_FreePhysicalMemorySize + ## cassandra_java_OperatingSystem_SystemCpuLoad + ## cassandra_java_OperatingSystem_TotalPhysicalMemorySize + ## cassandra_java_OperatingSystem_TotalSwapSpaceSize + ## cassandra_Net_FailureDetector_DownEndpointCount + ## cassandra_Net_FailureDetector_UpEndpointCount + ## cassandra_TableMetrics_AllMemtablesHeapSize_Value + ## cassandra_TableMetrics_AllMemtablesLiveDataSize_Value + ## cassandra_TableMetrics_CompactionBytesWritten_Count + ## cassandra_TableMetrics_EstimatedPartitionCount_Value + ## cassandra_TableMetrics_KeyCacheHitRate_Value + ## cassandra_TableMetrics_LiveSSTableCount_Value + ## cassandra_TableMetrics_MemtableColumnsCount_Value + ## cassandra_TableMetrics_MemtableLiveDataSize_Value + ## cassandra_TableMetrics_MemtableOffHeapSize_Value + ## cassandra_TableMetrics_MemtableOnHeapSize_Value + ## cassandra_TableMetrics_MemtableSwitchCount_Count + ## cassandra_TableMetrics_PendingCompactions_Value + ## cassandra_TableMetrics_PendingFlushes_Count + ## cassandra_TableMetrics_PercentRepaired_Value + ## cassandra_TableMetrics_RangeLatency_Count + ## cassandra_TableMetrics_ReadLatency_50thPercentile + ## cassandra_TableMetrics_ReadLatency_Max + ## cassandra_TableMetrics_ReadLatency_OneMinuteRate + ## cassandra_TableMetrics_RowCacheHit_Count + ## cassandra_TableMetrics_RowCacheMiss_Count + ## cassandra_TableMetrics_SSTablesPerReadHistogram_50thPercentile + ## cassandra_TableMetrics_SSTablesPerReadHistogram_99thPercentile + ## cassandra_TableMetrics_SSTablesPerReadHistogram_Count + ## cassandra_TableMetrics_SSTablesPerReadHistogram_Max + ## cassandra_TableMetrics_TombstoneScannedHistogram_50thPercentile + ## cassandra_TableMetrics_TombstoneScannedHistogram_99thPercentile + ## cassandra_TableMetrics_TombstoneScannedHistogram_Max + ## cassandra_TableMetrics_TotalDiskSpaceUsed_Count + ## cassandra_TableMetrics_WaitingOnFreeMemtableSpace_Max + ## cassandra_TableMetrics_WriteLatency_50thPercentile + ## cassandra_TableMetrics_WriteLatency_99thPercentile + ## cassandra_TableMetrics_WriteLatency_Max + ## cassandra_TableMetrics_WriteLatency_OneMinuteRate + ## cassandra_ThreadPoolMetrics_internal_Count + ## cassandra_ThreadPoolMetrics_internal_Value + ## cassandra_ThreadPoolMetrics_request_Count + ## cassandra_ThreadPoolMetrics_request_Value + ## cassandra_ThreadPoolMetrics_transport_Count + ## cassandra_ThreadPoolMetrics_transport_Value + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.cassandra + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:cassandra_(CacheMetrics_ChunkCache_OneMinuteRate|ClientMetrics_(connectedNativeClients_Value|RequestDiscarded_OneMinuteRate)|CommitLogMetrics_(CompletedTasks_Value|PendingTasks_Value)|DroppedMessageMetrics_Dropped_OneMinuteRate|java_(GarbageCollector_(ConcurrentMarkSweep|ParNew)_(CollectionCount|CollectionTime|LastGcInfo_duration|LastGcInfo_GcThreadCount|LastGcInfo_memoryUsageAfterGc_.*_used|LastGcInfo_memoryUsageBeforeGc_.*_used)|Memory_HeapMemoryUsage_used|OperatingSystem_(AvailableProcessors|FreePhysicalMemorySize|SystemCpuLoad|TotalPhysicalMemorySize|TotalSwapSpaceSize))|Net_FailureDetector_(DownEndpointCount|UpEndpointCount)|TableMetrics_(AllMemtablesHeapSize_Value|AllMemtablesLiveDataSize_Value|CompactionBytesWritten_Count|EstimatedPartitionCount_Value|KeyCacheHitRate_Value|LiveSSTableCount_Value|MemtableColumnsCount_Value|MemtableLiveDataSize_Value|MemtableOffHeapSize_Value|MemtableOnHeapSize_Value|MemtableSwitchCount_Count|PendingCompactions_Value|PendingFlushes_Count|PercentRepaired_Value|RangeLatency_Count|ReadLatency_50thPercentile|ReadLatency_Max|ReadLatency_OneMinuteRate|RowCacheHit_Count|RowCacheMiss_Count|SSTablesPerReadHistogram_50thPercentile|SSTablesPerReadHistogram_99thPercentile|SSTablesPerReadHistogram_Count|SSTablesPerReadHistogram_Max|TombstoneScannedHistogram_50thPercentile|TombstoneScannedHistogram_99thPercentile|TombstoneScannedHistogram_Max|TotalDiskSpaceUsed_Count|WaitingOnFreeMemtableSpace_Max|WriteLatency_50thPercentile|WriteLatency_99thPercentile|WriteLatency_Max|WriteLatency_OneMinuteRate)|ThreadPoolMetrics_(internal_(Count|Value)|request_(Count|Value)|transport_(Count|Value)))) + sourceLabels: + - __name__ + ## MongoDB Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb + ## Metrics follow following format: + ## mongodb_active_reads + ## mongodb_active_writes + ## mongodb_commands_per_sec + ## mongodb_connections_current + ## mongodb_db_stats_storage_size + ## mongodb_deletes_per_sec + ## mongodb_document_* + ## mongodb_flushes_per_sec + ## mongodb_getmores_per_sec + ## mongodb_inserts_per_sec + ## mongodb_net_*_bytes_count + ## mongodb_open_connections + ## mongodb_page_faults + ## mongodb_percent_cache_dirty + ## mongodb_percent_cache_used + ## mongodb_queries_per_sec + ## mongodb_queued_reads + ## mongodb_queued_writes + ## mongodb_repl_queries + ## mongodb_repl_commands_per_sec + ## mongodb_repl_deletes_per_sec + ## mongodb_repl_getmores_per_sec + ## mongodb_repl_inserts_per_sec + ## mongodb_repl_oplog_window_sec + ## mongodb_repl_queries_per_sec + ## mongodb_repl_updates_per_sec + ## mongodb_resident_megabytes + ## mongodb_updates_per_sec + ## mongodb_uptime_ns + ## mongodb_vsize_megabytes + ## mongodb_wtcache_bytes_read_into + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.mongodb + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:mongodb_(active_(reads|writes)|commands_per_sec|connections_current|db_stats_storage_size|deletes_per_sec|document_.*|flushes_per_sec|getmores_per_sec|inserts_per_sec|net_.*_bytes_count|open_connections|page_faults|percent_cache_(dirty|used)|queries_per_sec|queued_(reads|writes)|repl_((commands|deletes|getmores|inserts|oplog|queries|updates)_per_sec|queries|oplog_window_sec)|resident_megabytes|updates_per_sec|uptime_ns|vsize_megabytes|wtcache_bytes_read_into)) + sourceLabels: + - __name__ + ## Rabbitmq Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq + ## Metrics follow following format: + ## rabbitmq_exchange_messages_publish_in + ## rabbitmq_exchange_messages_publish_in_rate + ## rabbitmq_exchange_messages_publish_out + ## rabbitmq_exchange_messages_publish_out_rate + ## rabbitmq_node_disk_free + ## rabbitmq_node_disk_free_limit + ## rabbitmq_node_fd_used + ## rabbitmq_node_gc_num_rate + ## rabbitmq_node_mem_limit + ## rabbitmq_node_mem_used + ## rabbitmq_node_mnesia_disk_tx_count + ## rabbitmq_node_mnesia_ram_tx_count + ## rabbitmq_node_uptime + ## rabbitmq_overview_clustering_listerners + ## rabbitmq_overview_connections + ## rabbitmq_overview_consumers + ## rabbitmq_overview_exchanges + ## rabbitmq_overview_messages_delivered + ## rabbitmq_overview_messages_published + ## rabbitmq_overview_messages_unacked + ## rabbitmq_overview_queues + ## rabbitmq_queue_consumers + ## rabbitmq_queue_memory + ## rabbitmq_queue_messages_deliver_rate + ## rabbitmq_queue_messages_max_time + ## rabbitmq_queue_messages_memory + ## rabbitmq_queue_messages_publish_rate + ## rabbitmq_queue_messages_unack + ## rabbitmq_queue_slave_nodes + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.rabbitmq + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:rabbitmq_(exchange_messages_publish_(in_rate|in|out_rate|out)|node_(disk_free_limit|disk_free|mem_(limit|used)|uptime|fd_used|mnesia_(disk_tx_count|ram_tx_count)|gc_num_rate)|overview_(clustering_listerners|connections|exchanges|consumers|queues|messages_(delivered|published|unacked))|queue_(consumers|memory|slave_nodes|messages_(publish_rate|deliver_rate|memory|max_time|unack)))) + sourceLabels: + - __name__ + ## Tomcat Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tomcat + ## Metrics follow following format: + ## tomcat_connector_bytes_received + ## tomcat_connector_bytes_sent + ## tomcat_connector_current_thread_busy + ## tomcat_connector_current_thread_count + ## tomcat_connector_current_threads_busy + ## tomcat_connector_error_count + ## tomcat_connector_max_threads + ## tomcat_connector_max_time + ## tomcat_connector_processing_time + ## tomcat_connector_request_count + ## tomcat_jmx_jvm_memory_HeapMemoryUsage_max + ## tomcat_jmx_jvm_memory_HeapMemoryUsage_used + ## tomcat_jmx_jvm_memory_NonHeapMemoryUsage_max + ## tomcat_jmx_jvm_memory_NonHeapMemoryUsage_used + ## tomcat_jmx_OperatingSystem_FreePhysicalMemorySize + ## tomcat_jmx_OperatingSystem_FreeSwapSpaceSize + ## tomcat_jmx_OperatingSystem_SystemCpuLoad + ## tomcat_jmx_OperatingSystem_TotalPhysicalMemorySize + ## tomcat_jmx_OperatingSystem_TotalSwapSpaceSize + ## tomcat_jmx_Servlet_processingTime + ## tomcat_jvm_memory_free + ## tomcat_jvm_memory_max + ## tomcat_jvm_memory_total + ## tomcat_jvm_memorypool_bytes_received + ## tomcat_jvm_memorypool_bytes_sent + ## tomcat_jvm_memorypool_current_thread_count + ## tomcat_jvm_memorypool_current_threads_busy + ## tomcat_jvm_memorypool_error_count + ## tomcat_jvm_memorypool_max + ## tomcat_jvm_memorypool_max_threads + ## tomcat_jvm_memorypool_max_time + ## tomcat_jvm_memorypool_processing_time + ## tomcat_jvm_memorypool_request_count + ## tomcat_jvm_memorypool_used + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.tomcat + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:tomcat_(connector_(bytes_(received|sent)|current_(thread_(busy|count)|threads_busy)|error_count|max_threads|max_time|processing_time|request_count)|jmx_(jvm_memory_(HeapMemoryUsage_(max|used)|NonHeapMemoryUsage_(max|used))|OperatingSystem_(FreePhysicalMemorySize|FreeSwapSpaceSize|SystemCpuLoad|TotalPhysicalMemorySize|TotalSwapSpaceSize)|Servlet_processingTime)|jvm_memory_(free|max|total)|jvm_memorypool_(bytes_(received|sent)|current_thread_count|current_threads_busy|error_count|max_threads|max_time|max|processing_time|request_count|used))) + sourceLabels: + - __name__ + ## Varnish Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish + ## Metrics follow following format: + ## varnish_backend_busy + ## varnish_backend_conn + ## varnish_backend_fail + ## varnish_backend_recycle + ## varnish_backend_req + ## varnish_backend_retry + ## varnish_backend_reuse + ## varnish_backend_unhealthy + ## varnish_bans + ## varnish_bans_completed + ## varnish_bans_deleted + ## varnish_bans_dups + ## varnish_bans_lurker_contention + ## varnish_bans_lurker_obj_killed + ## varnish_bans_lurker_tested + ## varnish_bans_lurker_tests_tested + ## varnish_bans_obj + ## varnish_bans_obj_killed + ## varnish_bans_persisted_bytes + ## varnish_bans_persisted_fragmentation + ## varnish_boot_*_*_bodybytes + ## varnish_boot_*_*_hdrbytes + ## varnish_boot_*_bereq_bodybytes + ## varnish_boot_*_bereq_hdrbytes + ## varnish_cache_hit + ## varnish_cache_hit_grace + ## varnish_cache_hitpass + ## varnish_cache_miss + ## varnish_client_req + ## varnish_client_req_400 + ## varnish_client_req_417 + ## varnish_client_resp_500 + ## varnish_n_backend + ## varnish_n_expired + ## varnish_n_lru_nuked + ## varnish_n_vcl_avail + ## varnish_pools + ## varnish_s0_g_bytes + ## varnish_s0_g_space + ## varnish_s_fetch + ## varnish_s_pipe_in + ## varnish_s_pipe_out + ## varnish_s_req_bodybytes + ## varnish_s_req_hdrbytes + ## varnish_s_resp_bodybytes + ## varnish_s_resp_hdrbytes + ## varnish_s_sess + ## varnish_sess_closed + ## varnish_sess_closed_err + ## varnish_sess_conn + ## varnish_sess_drop + ## varnish_sess_dropped + ## varnish_sess_fail + ## varnish_sess_queued + ## varnish_thread_queue_len + ## varnish_threads + ## varnish_threads_created + ## varnish_threads_destroyed + ## varnish_threads_failed + ## varnish_threads_limited + ## varnish_uptime + ## varnish_vmods + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.varnish + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:varnish_(backend_(busy|conn|fail|recycle|req|retry|reuse|unhealthy)|bans_(completed|deleted|dups|lurker_(contention|obj_killed|tests_tested|tested|)|obj_killed|obj|persisted_(bytes|fragmentation))|bans|boot_.*_.*_(bodybytes|hdrbytes)|cache_(hit_grace|hitpass|miss|hit)|client_(req_400|req_417|req|resp_500)|n_(backend|expired|lru_nuked|vcl_avail)|pools|s0_g_(bytes|space)|s_(fetch|pipe_(in|out)|req_(bodybytes|hdrbytes)|resp_(bodybytes|hdrbytes)|sess)|sess_(closed_err|closed|conn|drop|dropped|fail|queued)|thread_queue_len|threads_(created|destroyed|failed|limited)|threads|uptime|vmods)) + sourceLabels: + - __name__ + ## Memcached Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/master/plugins/inputs/memcache + ## Metrics follow following format: + ## memcached_accepting_conns + ## memcached_auth_cmds + ## memcached_auth_errors + ## memcached_bytes + ## memcached_bytes_read + ## memcached_bytes_written + ## memcached_cas_* + ## memcached_cas_* + ## memcached_cmd_* + ## memcached_cmd_flush + ## memcached_cmd_get + ## memcached_cmd_set + ## memcached_cmd_touch + ## memcached_conn_yields + ## memcached_connection_structures + ## memcached_curr_connections + ## memcached_curr_items + ## memcached_decr_* + ## memcached_delete_* + ## memcached_evictions + ## memcached_get_hits + ## memcached_get_misses + ## memcached_hash_bytes + ## memcached_hash_is_expanding + ## memcached_incr_* + ## memcached_limit_maxbytes + ## memcached_listen_disabled_num + ## memcached_reclaimed + ## memcached_threads + ## memcached_total_connections + ## memcached_total_items + ## memcached_uptime + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.memcached + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:memcached_(accepting_conns|auth_(cmds|errors)|bytes_(read|written)|bytes|cas_*|cmd_.*|conn_yields|connection_structures|curr_(connections|items)|decr_.*|delete_.*|evictions|get_(hits|misses)|hash_(bytes|is_expanding)|incr_.*|limit_maxbytes|listen_disabled_num|reclaimed|threads|total_(connections|items)|uptime)) + sourceLabels: + - __name__ + ## Elasticsearch Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch + ## elasticsearch_cluster_health_active_primary_shards + ## elasticsearch_cluster_health_active_shards + ## elasticsearch_cluster_health_delayed_unassigned_shards + ## elasticsearch_cluster_health_indices_status_code + ## elasticsearch_cluster_health_initializing_shards + ## elasticsearch_cluster_health_number_of_data_nodes + ## elasticsearch_cluster_health_number_of_nodes + ## elasticsearch_cluster_health_number_of_pending_tasks + ## elasticsearch_cluster_health_relocating_shards + ## elasticsearch_cluster_health_unassigned_shards + ## elasticsearch_clusterstats_indices_fielddata_evictions + ## elasticsearch_clusterstats_nodes_jvm_mem_heap_used_in_bytes + ## elasticsearch_fs_total_free_in_bytes + ## elasticsearch_fs_total_total_in_bytes + ## elasticsearch_indices_flush_total + ## elasticsearch_indices_flush_total_time_in_millis + ## elasticsearch_indices_get_exists_time_in_millis + ## elasticsearch_indices_get_exists_total + ## elasticsearch_indices_get_missing_time_in_millis + ## elasticsearch_indices_get_missing_total + ## elasticsearch_indices_get_time_in_millis + ## elasticsearch_indices_get_total + ## elasticsearch_indices_indexing_delete_time_in_millis + ## elasticsearch_indices_indexing_delete_total + ## elasticsearch_indices_indexing_index_time_in_millis + ## elasticsearch_indices_indexing_index_total + ## elasticsearch_indices_merges_total_time_in_millis + ## elasticsearch_indices_search_query_time_in_millis + ## elasticsearch_indices_search_query_total + ## elasticsearch_indices_segments_fixed_bit_set_memory_in_bytes + ## elasticsearch_indices_segments_terms_memory_in_bytes + ## elasticsearch_indices_stats_primaries_docs_count + ## elasticsearch_indices_stats_primaries_indexing_index_time_in_millis + ## elasticsearch_indices_stats_primaries_query_cache_cache_size + ## elasticsearch_indices_stats_primaries_query_cache_evictions + ## elasticsearch_indices_stats_primaries_segments_doc_values_memory_in_bytes + ## elasticsearch_indices_stats_primaries_segments_index_writer_memory_in_bytes + ## elasticsearch_indices_stats_primaries_segments_memory_in_bytes + ## elasticsearch_indices_stats_total___fielddata_memory_size_in_bytes + ## elasticsearch_indices_stats_total___indexing_index_total + ## elasticsearch_indices_stats_total___merges_total + ## elasticsearch_indices_stats_total_docs_count + ## elasticsearch_indices_stats_total_fielddata_memory_size_in_bytes + ## elasticsearch_indices_stats_total_flush_total_time_in_millis + ## elasticsearch_indices_stats_total_indexing_delete_total + ## elasticsearch_indices_stats_total_indexing_index_time_in_millis + ## elasticsearch_indices_stats_total_indexing_index_total + ## elasticsearch_indices_stats_total_merges_total_docs + ## elasticsearch_indices_stats_total_merges_total_size_in_bytes + ## elasticsearch_indices_stats_total_merges_total_time_in_millis + ## elasticsearch_indices_stats_total_query_cache_evictions + ## elasticsearch_indices_stats_total_refresh_total + ## elasticsearch_indices_stats_total_refresh_total_time_in_millis + ## elasticsearch_indices_stats_total_search_fetch_time_in_millis + ## elasticsearch_indices_stats_total_search_fetch_total + ## elasticsearch_indices_stats_total_search_query_time_in_millis + ## elasticsearch_indices_stats_total_search_query_total + ## elasticsearch_indices_stats_total_segments_fixed_bit_set_memory_in_bytes + ## elasticsearch_indices_stats_total_segments_index_writer_memory_in_bytes + ## elasticsearch_indices_stats_total_segments_memory_in_bytes + ## elasticsearch_indices_stats_total_segments_terms_memory_in_bytes + ## elasticsearch_indices_stats_total_store_size_in_bytes + ## elasticsearch_indices_stats_total_translog_operations + ## elasticsearch_indices_stats_total_translog_size_in_bytes + ## elasticsearch_jvm_gc_collectors_*_collection_time_in_millis + ## elasticsearch_jvm_mem_heap_committed_in_bytes + ## elasticsearch_jvm_mem_heap_used_in_bytes + ## elasticsearch_jvm_mem_heap_used_percent + ## elasticsearch_os_cpu_load_average_5m + ## elasticsearch_os_cpu_percent + ## elasticsearch_process_open_file_descriptors + ## elasticsearch_thread_pool_analyze_completed + ## elasticsearch_thread_pool_analyze_threads + ## elasticsearch_thread_pool_get_rejected + ## elasticsearch_thread_pool_search_queue + ## elasticsearch_transport_rx_size_in_bytes + ## elasticsearch_transport_tx_size_in_bytes + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.elasticsearch + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:elasticsearch_(cluster_health_(active_(primary_shards|shards)|delayed_unassigned_shards|indices_status_code|initializing_shards|number_of_(data_nodes|nodes|pending_tasks)|relocating_shards|unassigned_shards)|clusterstats_(indices_fielddata_evictions|nodes_jvm_mem_heap_used_in_bytes)|fs_total_(free_in_bytes|total_in_bytes)|indices_(flush_(total|total_time_in_millis)|get_(exists_time_in_millis|exists_total|missing_time_in_millis|missing_total|time_in_millis|total)|indexing_delete_time_in_millis|indexing_delete_total|indexing_index_time_in_millis|indexing_index_total|merges_total_time_in_millis|search_query_time_in_millis|search_query_total|segments_fixed_bit_set_memory_in_bytes|segments_terms_memory_in_bytes|stats_primaries_(docs_count|indexing_index_time_in_millis|query_cache_cache_size|query_cache_evictions|segments_doc_values_memory_in_bytes|segments_index_writer_memory_in_bytes|segments_memory_in_bytes)|stats_total___(fielddata_memory_size_in_bytes|indexing_index_total|merges_total)|stats_total_(docs_count|fielddata_memory_size_in_bytes|flush_total_time_in_millis|indexing_delete_total|indexing_index_time_in_millis|indexing_index_total|merges_total_docs|merges_total_size_in_bytes|merges_total_time_in_millis|query_cache_evictions|refresh_total|refresh_total_time_in_millis|search_fetch_time_in_millis|search_fetch_total|search_query_time_in_millis|search_query_total|segments_fixed_bit_set_memory_in_bytes|segments_index_writer_memory_in_bytes|segments_memory_in_bytes|segments_terms_memory_in_bytes|store_size_in_bytes|translog_operations|translog_size_in_bytes))|jvm_(gc_collectors_.*_collection_time_in_millis|mem_heap_committed_in_bytes|mem_heap_used_in_bytes|mem_heap_used_percent)|os_cpu_(load_average_5m|percent)|process_open_file_descriptors|thread_pool_(analyze_completed|analyze_threads|get_rejected|search_queue)|transport_(rx_size_in_bytes|tx_size_in_bytes))) + sourceLabels: + - __name__ + ## Activemq Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/master/plugins/inputs/activemq + ## activemq_queue_* + ## activemq_topic_* + ## activemq_*_QueueSize + ## activemq_broker_AverageMessageSize + ## activemq_broker_CurrentConnectionsCount + ## activemq_broker_MemoryLimit + ## activemq_broker_StoreLimit + ## activemq_broker_TempLimit + ## activemq_broker_TotalConnectionsCount + ## activemq_broker_TotalConsumerCount + ## activemq_broker_TotalDequeueCount + ## activemq_broker_TotalEnqueueCount + ## activemq_broker_TotalMessageCount + ## activemq_broker_TotalProducerCount + ## activemq_broker_UptimeMillis + ## activemq_jvm_memory_HeapMemoryUsage_max + ## activemq_jvm_memory_HeapMemoryUsage_used + ## activemq_jvm_memory_NonHeapMemoryUsage_used + ## activemq_jvm_runtime_Uptime + ## activemq_OperatingSystem_FreePhysicalMemorySize + ## activemq_OperatingSystem_SystemCpuLoad + ## activemq_OperatingSystem_TotalPhysicalMemorySize + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.activemq + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:activemq_(topic_.*|queue_.*|.*_QueueSize|broker_(AverageMessageSize|CurrentConnectionsCount|MemoryLimit|StoreLimit|TempLimit|TotalConnectionsCount|TotalConsumerCount|TotalDequeueCount|TotalEnqueueCount|TotalMessageCount|TotalProducerCount|UptimeMillis)|jvm_memory_(HeapMemoryUsage_max|HeapMemoryUsage_used|NonHeapMemoryUsage_used)|jvm_runtime_Uptime|OperatingSystem_(FreePhysicalMemorySize|SystemCpuLoad|TotalPhysicalMemorySize))) + sourceLabels: + - __name__ + ## Couchbase Telegraf Metrics + ## List of Metrics are on following github page: + ## https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase + ## couchbase_node_memory_free + ## couchbase_node_memory_total + ## couchbase_bucket_item_count + ## couchbase_bucket_curr_connections + ## couchbase_bucket_ops_per_sec + ## couchbase_bucket_ep_num_value_ejects + ## couchbase_bucket_disk_write_queue + ## couchbase_bucket_ep_oom_errors + ## couchbase_bucket_delete_misses + ## couchbase_bucket_delete_hits + ## couchbase_bucket_bytes_read + ## couchbase_bucket_bytes_written + ## couchbase_bucket_cmd_get + ## couchbase_bucket_cmd_set + ## couchbase_bucket_cas_hits + ## couchbase_bucket_ops + ## couchbase_bucket_curr_items + ## couchbase_bucket_mem_actual_free + ## couchbase_bucket_cpu_utilization_rate + ## couchbase_bucket_swap_used + ## couchbase_bucket_disk_used + ## couchbase_bucket_rest_requests + ## couchbase_bucket_hibernated_waked + ## couchbase_bucket_mem_used + ## couchbase_bucket_xdc_ops + ## couchbase_bucket_ep_mem_low_wat + ## couchbase_bucket_ep_mem_high_wat + ## couchbase_bucket_ep_ops_update + ## couchbase_bucket_ep_tmp_oom_errors + ## couchbase_bucket_ep_dcp_replica_count + ## couchbase_bucket_ep_dcp_replica_producer_count + ## couchbase_bucket_ep_dcp_xdcr_producer_count + ## couchbase_bucket_ep_dcp_replica_items_remaining + ## couchbase_bucket_ep_dcp_xdcr_items_remaining + ## couchbase_bucket_ep_dcp_replica_items_sent + ## couchbase_bucket_ep_dcp_xdcr_items_sent + ## couchbase_bucket_ep_dcp_replica_total_bytes + ## couchbase_bucket_ep_dcp_xdcr_total_bytes + ## couchbase_bucket_ep_num_ops_get_meta + ## couchbase_bucket_ep_num_ops_set_meta + ## couchbase_bucket_ep_num_ops_del_meta + ## couchbase_bucket_ep_dcp_xdcr_count + ## couchbase_bucket_ep_resident_items_rate + ## couchbase_bucket_vb_active_queue_size + ## couchbase_bucket_vb_replica_queue_size + ## couchbase_bucket_vb_pending_queue_size + ## couchbase_bucket_vb_active_queue_fill + ## couchbase_bucket_vb_replica_queue_fill + ## couchbase_bucket_vb_pending_queue_fill + ## couchbase_bucket_vb_avg_active_queue_age + ## couchbase_bucket_vb_avg_replica_queue_age + ## couchbase_bucket_vb_avg_pending_queue_age + ## couchbase_bucket_vb_active_num + ## couchbase_bucket_vb_replica_num + ## couchbase_bucket_vb_pending_num + ## couchbase_bucket_vb_pending_curr_items + ## couchbase_bucket_vb_active_resident_items_ratio + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.couchbase + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:couchbase_(node_.*|bucket_(ep_.*|vb_.*|delete_.*|cmd.*|bytes_.*|item_count|curr_connections|ops_per_sec|disk_write_queue|mem_.*|cas_hits|ops|curr_items|cpu_utilization_rate|swap_used|disk_used|rest_requests|hibernated_waked|xdc_ops))) + sourceLabels: + - __name__ + ## SquidProxy Telegraf Metrics + ## List of Metrics are on following github page: + ## https://wiki.squid-cache.org/Features/Snmp + ## squid_cacheIpEntries + ## squid_cacheIpRequests + ## squid_cacheIpHits + ## squid_cacheFqdnEntries + ## squid_cacheFqdnRequests + ## squid_cacheFqdnMisses + ## squid_cacheFqdnNegativeHits + ## squid_cacheDnsRequests + ## squid_cacheDnsReplies + ## squid_cacheDnsSvcTime5 + ## squid_cacheSysPageFaults + ## squid_cacheSysNumReads + ## squid_cacheCurrentFileDescrCnt + ## squid_cacheCurrentUnusedFDescrCnt + ## squid_cacheCurrentResFileDescrCnt + ## squid_cacheServerRequests + ## squid_cacheServerInKb + ## squid_cacheServerOutKb + ## squid_cacheHttpAllSvcTime5 + ## squid_cacheHttpErrors + ## squid_cacheHttpInKb + ## squid_cacheHttpOutKb + ## squid_cacheHttpAllSvcTime1 + ## squid_cacheMemMaxSize + ## squid_cacheMemUsage + ## squid_cacheNumObjCount + ## squid_cacheCpuTime + ## squid_cacheMaxResSize + ## squid_cacheProtoClientHttpRequests + ## squid_cacheClients + ## squid_uptime + ## + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.squidproxy + writeRelabelConfigs: + - action: drop + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: keep + regex: (?:squid_(uptime|cache(Ip(Entries|Requests|Hits)|Fqdn(Entries|Requests|Misses|NegativeHits)|Dns(Requests|Replies|SvcTime5)|Sys(PageFaults|NumReads)|Current(FileDescrCnt|UnusedFDescrCnt|ResFileDescrCnt)|Server(Requests|InKb|OutKb)|Http(AllSvcTime5|Errors|InKb|OutKb|AllSvcTime1)|Mem(MaxSize|Usage)|NumObjCount|CpuTime|MaxResSize|ProtoClientHttpRequests|Clients))) + sourceLabels: + - __name__ + ## additionalRemoteWrite is appended to remoteWrite + additionalRemoteWrite: + ## Forward every metric which has _sumo_forward_ label set to true + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics.applications.custom + writeRelabelConfigs: + - action: keep + regex: ^true$ + sourceLabels: + - _sumo_forward_ + - action: labeldrop + regex: _sumo_forward_ + serviceMonitor: + selfMonitor: false +## Configure otelcol-instrumentation - Sumo OTel Distro Collector +## ref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md +## +otelcolInstrumentation: + enabled: true + sourceMetadata: + ## Set the _sourceName metadata field in Sumo Logic. + sourceName: "%{k8s.namespace.name}.%{k8s.pod.pod_name}.%{k8s.container.name}" + ## Set the _sourceCategory metadata field in Sumo Logic. + sourceCategory: "%{k8s.namespace.name}/%{k8s.pod.pod_name}" + ## Set the prefix, for _sourceCategory metadata. + sourceCategoryPrefix: kubernetes/ + ## Used to replace - with another character. + sourceCategoryReplaceDash: / + ## A regular expression for containers. + ## Matching containers will be excluded from Sumo. The logs will still be sent to otelcol. + ## + excludeContainerRegex: "" + ## A regular expression for hosts. + ## Matching hosts will be excluded from Sumo. The logs will still be sent to otelcol. + ## + excludeHostRegex: "" + ## A regular expression for namespaces. + ## Matching namespaces will be excluded from Sumo. The logs will still be sent to otelcol. + ## + excludeNamespaceRegex: "" + ## A regular expression for pods. + ## Matching pods will be excluded from Sumo. The logs will still be sent to otelcol. + ## + excludePodRegex: "" + ## Option to turn autoscaling on for otelcol and specify params for HPA. + ## Autoscaling needs metrics-server to access cpu metrics. + ## + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 10 + targetCPUUtilizationPercentage: 100 + # targetMemoryUtilizationPercentage: 50 + statefulset: + nodeSelector: + tolerations: [] + topologySpreadConstraints: [] + affinity: + ## Acceptable values for podAntiAffinity: + ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) + ## hard: specifies rules that must be met for a pod to be scheduled onto a node + ## + podAntiAffinity: soft + replicaCount: 3 + resources: + limits: + memory: 4Gi + cpu: 2000m + requests: + memory: 768Mi + cpu: 500m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: "" + ## Add custom labels only to metrics sts pods + podLabels: + ## Add custom annotations only to metrics sts pods + podAnnotations: + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + ## Set securityContext for containers running in pods in otelcol-instrumentation statefulset. + containers: + otelcol: + securityContext: + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 3 + failureThreshold: 60 + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name + # extraVolumes: + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: + # - mountPath: /certs + # name: es-certs + # readOnly: true + ## To enable collecting all logs, set to false + logLevelFilter: false + config: + receivers: + jaeger: + protocols: + thrift_compact: + endpoint: 0.0.0.0:6831 + thrift_binary: + endpoint: 0.0.0.0:6832 + grpc: + endpoint: 0.0.0.0:14250 + thrift_http: + endpoint: 0.0.0.0:14268 + opencensus: + endpoint: 0.0.0.0:55678 + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + otlp/deprecated: + protocols: + http: + endpoint: 0.0.0.0:55681 + zipkin: + endpoint: 0.0.0.0:9411 + processors: + ## Source processor adds Sumo Logic related metadata + source: + annotation_prefix: k8s.pod.annotation. + collector: "{{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | quote }}" + exclude: + k8s.container.name: "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeContainerRegex | quote }}" + k8s.host.name: "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeHostRegex | quote }}" + k8s.namespace.name: "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeNamespaceRegex | quote }}" + k8s.pod.name: "{{ .Values.otelcolInstrumentation.sourceMetadata.excludePodRegex| quote }}" + pod_key: k8s.pod.name + pod_name_key: k8s.pod.pod_name + pod_template_hash_key: k8s.pod.label.pod-template-hash + source_category: "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategory | quote }}" + source_category_prefix: "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryPrefix | quote }}" + source_category_replace_dash: "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryReplaceDash | quote }}" + source_host: "%{k8s.pod.hostname}" + source_name: "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceName | quote }}" + ## Resource processor sets the associted cluster attribute + resource: + attributes: + - action: upsert + key: k8s.cluster.name + value: '{{ include "sumologic.clusterNameReplaceSpaceWithDash" . }}' + resourcedetection: + detectors: + - system + override: false + timeout: 10s + ## Tags spans with K8S metadata, basing on the context IP + k8s_tagger: + ## When true, only IP is assigned and passed (so it could be tagged on another collector) + passthrough: false + ## When true, additional fields, such as serviceName are being also extracted + owner_lookup_enabled: true + ## Extracted fields and assigned names + extract: + ## extract the following well-known metadata fields + metadata: + - containerId + - containerName + - daemonSetName + - deploymentName + - hostName + - namespace + - nodeName + - podId + - podName + - replicaSetName + - serviceName + - statefulSetName + annotations: + - key: "*" + tag_name: k8s.pod.annotation.%s + namespace_labels: + - key: "*" + tag_name: k8s.namespace.label.%s + labels: + - key: "*" + tag_name: k8s.pod.label.%s + ## The memory_limiter processor is used to prevent out of memory situations on the collector. + memory_limiter: + ## check_interval is the time between measurements of memory usage for the + ## purposes of avoiding going over the limits. Defaults to zero, so no + ## checks will be performed. Values below 1 second are not recommended since + ## it can result in unnecessary CPU consumption. + ## + check_interval: 5s + ## Maximum amount of memory, in %, targeted to be allocated by the process heap. + ## Note that typically the total memory usage of process will be about 50MiB higher + ## than this value. + ## + limit_percentage: 75 + spike_limit_percentage: 20 + ## The batch processor accepts spans and places them into batches grouped by node and resource + batch: + ## Number of spans after which a batch will be sent regardless of time + send_batch_size: 256 + ## Never more than this many spans are being sent in a batch + send_batch_max_size: 512 + ## Time duration after which a batch will be sent regardless of size + timeout: 5s + extensions: + health_check: + memory_ballast: + ## Memory Ballast size should be max 1/3 to 1/2 of memory. + size_mib: 250 + pprof: + exporters: + sumologic/metrics: + endpoint: ${SUMO_ENDPOINT_DEFAULT_METRICS_SOURCE} + ## Compression encoding format, either empty string (""), gzip or deflate (default gzip). + ## Empty string means no compression + ## + compress_encoding: gzip + ## Max HTTP request body size in bytes before compression (if applied). By default 1_048_576 (1MB) is used. + max_request_body_size: 1048576 + ## Format to use when sending logs to Sumo. (default json) (possible values: json, text) + log_format: text + ## Format of the metrics to be sent (default is prometheus) (possible values: carbon2, prometheus) + ## carbon2 and graphite are going to be supported soon. + ## + metric_format: prometheus + ## Timeout for every attempt to send data to Sumo Logic backend. Maximum connection timeout is 55s. + timeout: 5s + retry_on_failure: + enabled: true + ## Time to wait after the first failure before retrying + initial_interval: 5s + ## Upper bound on backoff + max_interval: 30s + ## Maximum amount of time spent trying to send a batch + max_elapsed_time: 120s + sending_queue: + enabled: false + ## Number of consumers that dequeue batches + num_consumers: 10 + ## Maximum number of batches kept in memory before data + ## User should calculate this as num_seconds * requests_per_second where: + ## num_seconds is the number of seconds to buffer in case of a backend outage + ## requests_per_second is the average number of requests per seconds. + ## + queue_size: 5000 + otlphttp/traces: + endpoint: http://{{ include "otelcolinstrumentation.exporter.endpoint" . }}:4318 + service: + extensions: + - health_check + - memory_ballast + - pprof + pipelines: + traces: + receivers: + - jaeger + - opencensus + - otlp + - otlp/deprecated + - zipkin + processors: + - memory_limiter + - k8s_tagger + - source + - resource + - batch + exporters: + - otlphttp/traces + metrics: + receivers: + - otlp + - otlp/deprecated + processors: + - memory_limiter + - k8s_tagger + - source + - resource + - batch + exporters: + - sumologic/metrics +## Configure traces-sampler +## ref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md +## +tracesSampler: + deployment: + nodeSelector: + tolerations: [] + replicas: 1 + resources: + limits: + memory: 4Gi + cpu: 2000m + requests: + memory: 384Mi + cpu: 200m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: "" + ## Add custom labels only to traces-sampler deployment. + podLabels: + ## Add custom annotations only to traces-sampler deployment. + podAnnotations: + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name + # extraVolumes: + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: + # - mountPath: /certs + # name: es-certs + # readOnly: true + ## To enable collecting all logs, set to false + # logLevelFilter: false + ## Collector configuration + config: + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + processors: + ## The memory_limiter processor is used to prevent out of memory situations on the collector. + memory_limiter: + ## check_interval is the time between measurements of memory usage for the + ## purposes of avoiding going over the limits. Defaults to zero, so no + ## checks will be performed. Values below 1 second are not recommended since + ## it can result in unnecessary CPU consumption. + ## + check_interval: 5s + ## Maximum amount of memory, in %, targeted to be allocated by the process heap. + ## Note that typically the total memory usage of process will be about 50MiB higher + ## than this value. + ## + limit_percentage: 75 + ## Maximum spike expected between the measurements of memory usage, in %. + spike_limit_percentage: 20 + ## Smart cascading filtering rules with preset limits. + ## Please see https://github.com/SumoLogic/sumologic-otel-collector/tree/v0.85.0-sumo-0/pkg/processor/cascadingfilterprocessor + ## for details. + ## + cascading_filter: + ## Max number of traces for which decisions are kept in memory + num_traces: 200000 + ## The batch processor accepts spans and places them into batches grouped by node and resource + batch: + ## Number of spans after which a batch will be sent regardless of time + send_batch_size: 256 + ## Never more than this many spans are being sent in a batch + send_batch_max_size: 512 + ## Time duration after which a batch will be sent regardless of size + timeout: 5s + extensions: + health_check: + memory_ballast: + ## Memory Ballast size should be max 1/3 to 1/2 of memory. + size_mib: 683 + pprof: + exporters: + ## Following generates verbose logs with span content, useful to verify what + ## metadata is being tagged. To enable, uncomment and add "logging" to exporters below. + ## There are two levels that could be used: `debug` and `info` with the former + ## being much more verbose and including (sampled) spans content + ## + # logging: + # loglevel: debug + otlphttp: + traces_endpoint: ${SUMO_ENDPOINT_DEFAULT_TRACES_SOURCE} + compression: gzip + service: + extensions: + - health_check + - memory_ballast + - pprof + pipelines: + traces: + receivers: + - otlp + processors: + - memory_limiter + - cascading_filter + - batch + exporters: + - otlphttp +metadata: + ## Configure image for Opentelemetry Collector (for logs and metrics) + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + securityContext: + ## The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set. + ## The default is 0 (root), and containers don't have write permissions for volumes in that case. + ## + fsGroup: 999 + ## Add custom labels to all otelcol sts pods(logs and metrics) + podLabels: + ## Add custom annotations to all otelcol sts pods(logs and metrics) + podAnnotations: + ## Add custom labels to all otelcol svc (logs and metrics) + serviceLabels: + ## Configure persistence for Opentelemetry Collector + persistence: + enabled: true + # storageClass: '' + accessMode: ReadWriteOnce + size: 10Gi + ## Add custom labels to all otelcol statefulset PVC (logs and metrics) + pvcLabels: + ## Configure metrics pipeline. + ## This section affects only otelcol provider. + ## + metrics: + enabled: true + logLevel: info + config: + ## Directly alter the OT configuration. The value of this key should be a dictionary, that will + ## be directly merged with the generated configuration, overriding existing values. + ## For example: + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 + ## will change the batch size of the pipeline. + ## ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest + ## of this chart. It involves implementation details that may change even in minor versions. + ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. + ## + merge: + ## Completely override existing config and replace it with the contents of this value. + ## The value of this key should be a dictionary, that will replace the normal configuration. + ## This is an advanced feature, use with caution, and review the generated configuration first. + ## + override: + ## List of additional endpoints to be handled by Metrics Metadata Pods + additionalEndpoints: [] + statefulset: + nodeSelector: + tolerations: [] + topologySpreadConstraints: [] + affinity: + ## Acceptable values for podAntiAffinity: + ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) + ## hard: specifies rules that must be met for a pod to be scheduled onto a node + ## + podAntiAffinity: soft + replicaCount: 3 + resources: + limits: + memory: 1Gi + cpu: 1000m + requests: + memory: 768Mi + cpu: 500m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: "" + ## Add custom labels only to metrics sts pods + podLabels: + ## Add custom annotations only to metrics sts pods + podAnnotations: + ## Set securityContext for containers running in pods in metrics statefulset. + containers: + otelcol: + securityContext: + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 3 + failureThreshold: 60 + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name + # extraVolumes: + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: + # - mountPath: /certs + # name: es-certs + # readOnly: true + ## Option to turn autoscaling on for metrics and specify params for HPA. + ## Autoscaling needs metrics-server to access cpu metrics. + ## + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 50 + ## Option to specify PodDisrutionBudgets + ## You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget + ## + podDisruptionBudget: + minAvailable: 2 + ## To use maxUnavailable, set minAvailable to null and uncomment the below: + maxUnavailable: 1 + ## Configure logs pipeline. + ## This section affects only otelcol provider. + ## + logs: + enabled: true + logLevel: info + config: + ## Directly alter the OT configuration. The value of this key should be a dictionary, that will + ## be directly merged with the generated configuration, overriding existing values. + ## For example: + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 + ## will change the batch size of the pipeline. + ## ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest + ## of this chart. It involves implementation details that may change even in minor versions. + ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. + ## + merge: + ## Completely override existing config and replace it with the contents of this value. + ## The value of this key should be a dictionary, that will replace the normal configuration. + ## This is an advanced feature, use with caution, and review the generated configuration first. + ## + override: + statefulset: + nodeSelector: + tolerations: [] + topologySpreadConstraints: [] + affinity: + ## Acceptable values for podAntiAffinity: + ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) + ## hard: specifies rules that must be met for a pod to be scheduled onto a node + ## + podAntiAffinity: soft + replicaCount: 3 + resources: + limits: + memory: 1Gi + cpu: 1000m + requests: + memory: 768Mi + cpu: 500m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: "" + ## Add custom labels only to logs sts pods + podLabels: + ## Add custom annotations only to logs sts pods + podAnnotations: + ## Set securityContext for containers running in pods in logs statefulset. + containers: + otelcol: + securityContext: + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 3 + failureThreshold: 60 + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name + # extraVolumes: + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: + # - mountPath: /certs + # name: es-certs + # readOnly: true + # extraPorts: + # - containerPort: 4319 + # name: otlphttp2 + # protocol: TCP + # extraArgs: [] + ## Option to turn autoscaling on for logs and specify params for HPA. + ## Autoscaling needs metrics-server to access cpu metrics. + ## + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 50 + ## Option to specify PodDisrutionBudgets + ## You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget + ## + podDisruptionBudget: + minAvailable: 2 + ## To use maxUnavailable, set minAvailable to null and uncomment the below: + # maxUnavailable: 1 +## Configure traces-gateway +## ref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md +## +tracesGateway: + enabled: true + ## Option to turn autoscaling on for otelcol and specify params for HPA. + ## Autoscaling needs metrics-server to access cpu metrics. + ## + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 10 + targetCPUUtilizationPercentage: 100 + # targetMemoryUtilizationPercentage: 50 + deployment: + replicas: 1 + nodeSelector: + tolerations: [] + resources: + limits: + memory: 2Gi + cpu: 1000m + requests: + memory: 196Mi + cpu: 50m + ## Add custom labels only to traces-gateway deployment. + podLabels: + ## Add custom annotations only to traces-gateway deployment. + podAnnotations: + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + livenessProbe: + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 60 + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name + # extraVolumes: + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: + # - mountPath: /certs + # name: es-certs + # readOnly: true + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: "" + ## To enable collecting all logs, set to false + logLevelFilter: false + config: + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + processors: + ## The memory_limiter processor is used to prevent out of memory situations on the collector. + memory_limiter: + ## check_interval is the time between measurements of memory usage for the + ## purposes of avoiding going over the limits. Defaults to zero, so no + ## checks will be performed. Values below 1 second are not recommended since + ## it can result in unnecessary CPU consumption. + ## + check_interval: 5s + ## Maximum amount of memory, in %, targeted to be allocated by the process heap. + ## Note that typically the total memory usage of process will be about 50MiB higher + ## than this value. + ## + limit_percentage: 75 + ## Maximum spike expected between the measurements of memory usage, in %. + spike_limit_percentage: 20 + ## The batch processor accepts spans and places them into batches grouped by node and resource + batch: + ## Number of spans after which a batch will be sent regardless of time + send_batch_size: 256 + ## Maximum number of spans sent at once + send_batch_max_size: 512 + ## Time duration after which a batch will be sent regardless of size + timeout: 5s + extensions: + health_check: + memory_ballast: + ## Memory Ballast size should be max 1/3 to 1/2 of memory. + size_mib: 250 + pprof: + exporters: + loadbalancing: + protocol: + otlp: + timeout: 10s + tls: + insecure: true + resolver: + dns: + hostname: '{{ include "tracesgateway.exporter.loadbalancing.endpoint" . }}' + port: 4317 + service: + extensions: + - health_check + - memory_ballast + - pprof + pipelines: + traces: + receivers: + - otlp + processors: + - memory_limiter + - batch + exporters: + - loadbalancing +## Configuration of the OpenTelemetry Collector that collects Kubernetes events. +## See https://github.com/SumoLogic/sumologic-kubernetes-collection/deploy/docs/collecting-kubernetes-events.md. +## +otelevents: + ## Configure image for Opentelemetry Collector + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + logLevel: info + ## Customize the Opentelemetry Collector configuration beyond the exposed options + config: + ## Directly alter the OT configuration. The value of this key should be a dictionary, that will + ## be directly merged with the generated configuration, overriding existing values. + ## For example: + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 + ## will change the batch size of the pipeline. + ## ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest + ## of this chart. It involves implementation details that may change even in minor versions. + ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. + ## + merge: + ## Completely override existing config and replace it with the contents of this value. + ## The value of this key should be a dictionary, that will replace the normal configuration. + ## This is an advanced feature, use with caution, and review the generated configuration first. + ## + override: + statefulset: + nodeSelector: + tolerations: [] + topologySpreadConstraints: [] + affinity: + ## Acceptable values for podAntiAffinity: + ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) + ## hard: specifies rules that must be met for a pod to be scheduled onto a node + ## + podAntiAffinity: soft + resources: + limits: + memory: 2Gi + cpu: 2000m + requests: + memory: 500Mi + cpu: 200m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: "" + ## Add custom labels only to events sts pods + podLabels: + ## Add custom annotations only to events sts pods + podAnnotations: + ## The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set. + ## The default is 0 (root), and containers don't have write permissions for volumes in that case. + ## + securityContext: + fsGroup: 999 + ## Set securityContext for containers running in pods in events statefulset. + containers: + otelcol: + securityContext: + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 3 + failureThreshold: 60 + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name + # extraVolumes: + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: + # - mountPath: /certs + # name: es-certs + # readOnly: true +## Configure cloudwatch collection with Otelcol +otelcloudwatch: + statefulset: + nodeSelector: + tolerations: [] + topologySpreadConstraints: [] + affinity: + ## Acceptable values for podAntiAffinity: + ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) + ## hard: specifies rules that must be met for a pod to be scheduled onto a node + ## + podAntiAffinity: soft + replicaCount: 1 + resources: + limits: + memory: 1Gi + cpu: 1000m + requests: + memory: 768Mi + cpu: 500m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: "" + ## Add custom labels only to logs otel sts pods + podLabels: + ## Add custom annotations only to logs otel sts pods + podAnnotations: + ## Set securityContext for containers running in pods in otelcol-instrumentation statefulset. + containers: + otelcol: + securityContext: + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 3 + failureThreshold: 60 +## Configure log collection with Otelcol +otellogs: + ## Metrics from Collector + metrics: + enabled: true + ## Add custom labels to otelcol svc + serviceLabels: + ## Configure image for Opentelemetry Collector + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + logLevel: info + config: + ## Directly alter the OT configuration. The value of this key should be a dictionary, that will + ## be directly merged with the generated configuration, overriding existing values. + ## For example: + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 + ## will change the batch size of the pipeline. + ## ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest + ## of this chart. It involves implementation details that may change even in minor versions. + ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. + ## + merge: + ## Completely override existing config and replace it with the contents of this value. + ## The value of this key should be a dictionary, that will replace the normal configuration. + ## This is an advanced feature, use with caution, and review the generated configuration first. + ## + override: + ## Set securityContext for containers running in pods in log collector daemonset + daemonset: + securityContext: + ## In order to reliably read logs from mounted node logging paths, we need to run as root + fsGroup: 0 + runAsUser: 0 + runAsGroup: 0 + ## Add custom labels to the otelcol daemonset + labels: + ## Add custom annotations to the otelcol daemonset + annotations: + ## Add custom labels to all otelcol daemonset pods + podLabels: + ## Add custom annotations to all otelcol daemonset pods + podAnnotations: + resources: + limits: + memory: 1Gi + cpu: 1000m + requests: + memory: 32Mi + cpu: 100m + ## Option to define priorityClassName to assign a priority class to pods. + ## If not set then temaplates/priorityclass.yaml is used. + ## + priorityClassName: "" + ## Set securityContext for containers running in pods in log collector daemonset + containers: + otelcol: + securityContext: + capabilities: + drop: + - ALL + ## Set securityContext and image for initContainers running in pods in log collector daemonset + initContainers: + changeowner: + image: + repository: public.ecr.aws/docker/library/busybox + tag: 1.36.0 + pullPolicy: IfNotPresent + securityContext: + capabilities: + drop: + - ALL + add: + - CAP_CHOWN + nodeSelector: + tolerations: [] + affinity: + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name + # extraVolumes: + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: + # - mountPath: /certs + # name: es-certs + # readOnly: true + ## additionalDaemonSets allows to set daemonsets with affinity, nodeSelector and resources + ## different than the main DaemonSet + ## Be careful and set nodeAffinity for the main DaemonSet, + ## as we do not support multiple pods of otellogs on the same node + ## ## e.g: + ## additionalDaemonSets: + ## linux: + ## nodeSelector: + ## kubernetes.io/os: linux + ## resources: + ## limits: + ## memory: 1Gi + ## cpu: 6 + ## requests: + ## memory: 32Mi + ## cpu: 2 + ## daemonset: + ## affinity: + ## nodeAffinity: + ## requiredDuringSchedulingIgnoredDuringExecution: + ## nodeSelectorTerms: + ## - matchExpressions: + ## - key: kubernetes.io/os + ## operator: NotIn + ## values: + ## - linux + ## + additionalDaemonSets: +## Configure telegraf-operator +## ref: https://github.com/influxdata/helm-charts/blob/master/charts/telegraf-operator/values.yaml +## +telegraf-operator: + enabled: false + ## Put here the new name if you want to override the full name used for Telegraf Operator components. + # fullnameOverride: '' + image: + sidecarImage: public.ecr.aws/sumologic/telegraf:1.21.2 + replicaCount: 1 + classes: + secretName: telegraf-operator-classes + default: sumologic-prometheus + data: + sumologic-prometheus: "[[outputs.prometheus_client]]\nConfiguration + details:\nhttps://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client#configuration\n\ + \ listen = \":9273\"\n metric_version = 2\n ## Disable the default collectors\n\ + \ collectors_exclude = [\"gocollector\", \"process\"]\n ## Telegraf operator adds\ + \ the internal plugin by default, and the Helm Chart doesn't let us disable it\n\ + \ ## Instead, drop the metrics at the output\n namedrop = [\"internal*\"]\n" + # imagePullSecrets: [] +## Configure Falco +## Please note that Falco is embedded in this Helm Chart for user convenience only - Sumo Logic does not provide production support for it +## This is an experimental configuration and shouldn't be used in production environment +## https://github.com/falcosecurity/charts/tree/master/falco +## +falco: + enabled: false + ## Put here the new name if you want to override the full name used for Falco components. + # fullnameOverride: '' + # imagePullSecrets: [] + image: + registry: public.ecr.aws + # repository: falcosecurity/falco-no-driver + ## Add kernel-devel package through MachineConfig, required to enable building of missing falco modules (only for OpenShift) + addKernelDevel: true + extra: + ## Add initContainer to wait until kernel-devel is installed on host + initContainers: + - command: + - sh + - -c + - 'while [ -f /host/etc/redhat-release ] && [ -z "$(ls /host/usr/src/kernels)" ] ; do + + echo "waiting for kernel headers to be installed" + + sleep 3 + + done + + ' + image: public.ecr.aws/docker/library/busybox:1.36.0 + name: init-falco + volumeMounts: + - mountPath: /host/usr + name: usr-fs + readOnly: true + - mountPath: /host/etc + name: etc-fs + readOnly: true + driver: + ## Set to epbf to enable eBPF support for Falco instead of falco-probe kernel module. + ## https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/troubleshoot-collection.md#falco-and-google-kubernetes-engine-gke + ## + kind: module + loader: + initContainer: + image: + registry: public.ecr.aws + # repository: falcosecurity/falco-driver-loader + falco: + load_plugins: + - json + - k8saudit + json_output: true + ## The location of the rules file(s). This can contain one or more paths to + ## separate rules files. + ## Explicitly add missing /etc/falco/rules.available/application_rules.yaml + ## before https://github.com/falcosecurity/charts/issues/230 gets resolved. + ## + rules_file: + - /etc/falco/falco_rules.yaml + - /etc/falco/falco_rules.local.yaml + - /etc/falco/k8s_audit_rules.yaml + - /etc/falco/rules.d + - /etc/falco/rules.available/application_rules.yaml + falcoctl: + artifact: + follow: + enabled: false + install: + enabled: false + customRules: + ## Mark the following as known k8s api callers: + ## * prometheus + ## * prometheus operator + ## * telegraf operator + ## * grafana sidecar + ## + rules_user_known_k8s_api_callers.yaml: "- macro: user_known_contact_k8s_api_server_activities\n\ + \ condition: >\n (container.image.repository = \"quay.io/prometheus/prometheus\"\ + ) or\n (container.image.repository = \"quay.io/coreos/prometheus-operator\")\ + \ or\n (container.image.repository = \"quay.io/influxdb/telegraf-operator\")\ + \ or\n (container.image.repository = \"kiwigrid/k8s-sidecar\")" + rules_user_sensitive_mount_containers.yaml: "- macro: user_sensitive_mount_containers\n\ + \ condition: >\n (container.image.repository = \"falcosecurity/falco\") or\n\ + \ (container.image.repository = \"quay.io/prometheus/node-exporter\")" + ## NOTE: kube-proxy not exact matching because of regional ecr e.g. + ## 602401143452.dkr.ecr.us-west-1.amazonaws.com/eks/kube-proxy + ## + rules_user_privileged_containers.yaml: "- macro: user_privileged_containers\n condition:\ + \ >\n (container.image.repository endswith \".amazonaws.com/eks/kube-proxy\")" +## Configure Tailing Sidecar Operator +## ref: https://github.com/SumoLogic/tailing-sidecar/blob/main/helm/tailing-sidecar-operator/values.yaml +## +tailing-sidecar-operator: + enabled: false + ## Put here the new name if you want to override the full name used for tailing-sidecar-operator components. + # fullnameOverride: '' + ## creation of Security Context Constraints in Openshift + scc: + create: false +## Configure OpenTelemetry Operator - Instrumentation +## ref: https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-operator +## +opentelemetry-operator: + enabled: false + ## Specific for Sumo Logic chart - Instrumentation resource creation + instrumentationJobImage: + image: + repository: sumologic/kubernetes-tools + tag: 2.14.0 + createDefaultInstrumentation: false + instrumentationNamespaces: "" + ## Current instrumentation doesn't support customization + ## for nodejs. Traces are always enabled. + ## nodejs: + ## traces: + ## enabled: true + ## + instrumentation: + dotnet: + traces: + enabled: true + metrics: + enabled: true + java: + traces: + enabled: true + metrics: + enabled: true + python: + traces: + enabled: true + metrics: + enabled: true + ## Specific for OpenTelemetry Operator chart values + admissionWebhooks: + failurePolicy: Fail + enabled: true + ## skip admission webhook on our own OpenTelemetryCollector object to avoid having to wait for operator to start + objectSelector: + matchExpressions: + - key: sumologic.com/component + operator: NotIn + values: + - metrics + certManager: + enabled: false + issuerRef: + autoGenerateCert: true + manager: + collectorImage: + repository: public.ecr.aws/sumologic/sumologic-otel-collector + tag: 0.85.0-sumo-0 + env: {} + resources: + limits: + cpu: 250m + memory: 512Mi + requests: + cpu: 150m + memory: 256Mi +## pvcCleaner deletes unused PVCs +pvcCleaner: + metrics: + enabled: false + logs: + enabled: false + job: + image: + repository: public.ecr.aws/sumologic/kubernetes-tools-kubectl + tag: 2.20.0 + pullPolicy: IfNotPresent + resources: + limits: + memory: 256Mi + cpu: 2000m + requests: + memory: 64Mi + cpu: 100m + nodeSelector: + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: + podLabels: + ## Add custom annotations + podAnnotations: + ## Schedule for cronJobs + schedule: "*/15 * * * *" + ## securityContext for pvcCleaner pods + securityContext: + runAsUser: 1000 diff --git a/deploy/helm/sumologic/values.schema.json b/deploy/helm/sumologic/values.schema.json new file mode 100644 index 0000000000..497c536fce --- /dev/null +++ b/deploy/helm/sumologic/values.schema.json @@ -0,0 +1,7037 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "comment": "Sumo Logic Kubernetes Collection configuration file\nAll the comments start with two or more # characters", + "properties": { + "nameOverride": { + "type": "string", + "description": "Used to override the Chart name.", + "default": "" + }, + "fullnameOverride": { + "type": "string", + "description": "Used to override the chart's full name.", + "default": "" + }, + "namespaceOverride": { + "type": "string", + "description": "Used to override the chart's default target namepace.", + "default": "", + "comment": "Use the same namespace as namespaceOverride in 'kube-prometheus-stack.namespaceOverride' if Prometheus setup is also enabled" + }, + "sumologic": { + "type": "object", + "description": "", + "properties": { + "setupEnabled": { + "type": "boolean", + "description": "If enabled, a pre-install hook will create Collector and Sources in Sumo Logic.", + "default": true, + "comment": "If enabled, a pre-install hook will create Collector and Sources in Sumo Logic" + }, + "cleanupEnabled": { + "type": "boolean", + "description": "If enabled, a pre-delete hook will destroy Kubernetes secret and Sumo Logic Collector.", + "default": false, + "comment": "If enabled, a pre-delete hook will destroy Collector in Sumo Logic" + }, + "envFromSecret": { + "type": "string", + "description": "If enabled, accessId and accessKey will be sourced from Secret Name given. Be sure to include at least the following env variables in your secret (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY", + "default": "sumo-api-secret", + "commented": true, + "comment": "If enabled, accessId and accessKey will be sourced from Secret Name given\nBe sure to include at least the following env variables in your secret\n(1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY" + }, + "accessId": { + "type": "string", + "description": "Sumo access ID.", + "default": "", + "commented": true, + "comment": "Sumo access ID" + }, + "accessKey": { + "type": "string", + "description": "Sumo access key.", + "default": "", + "comment": "Sumo access key", + "commented": true + }, + "endpoint": { + "type": "string", + "description": "Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection.", + "default": "", + "comment": "Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection\nref: https://help.sumologic.com/docs/api/getting-started#sumo-logic-endpoints-by-deployment-and-firewall-security" + }, + "httpProxy": { + "type": "string", + "description": "HTTP proxy URL", + "default": "", + "comment": "proxy urls" + }, + "httpsProxy": { + "type": "string", + "description": "HTTPS proxy URL", + "default": "" + }, + "noProxy": { + "type": "string", + "description": "List of comma separated hostnames which should be excluded from the proxy", + "default": "kubernetes.default.svc", + "comment": "Exclude Kubernetes internal traffic from proxy" + }, + "collectorName": { + "type": "string", + "description": "The name of the Sumo Logic collector that will be created in the SetUp job. Defaults to `clusterName` if not specified.", + "default": "", + "commented": true, + "comment": "Collector name" + }, + "clusterName": { + "type": "string", + "description": "An identifier for the Kubernetes cluster. Whitespaces in the cluster name will be replaced with dashes.", + "default": "kubernetes", + "comment": "Cluster name: Note spaces are not allowed and will be replaced with dashes." + }, + "cluster": { + "type": "object", + "description": "Configuration of Kubernetes for [Terraform client](https://www.terraform.io/docs/providers/kubernetes/index.html#argument-reference).", + "comment": "Configuration of Kubernetes for Terraform client\nhttps://www.terraform.io/docs/providers/kubernetes/index.html#argument-reference\nAll double quotes should be escaped here regarding Terraform syntax", + "properties": { + "host": { + "type": "string", + "description": "", + "default": "https://kubernetes.default.svc" + }, + "username": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "password": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "insecure": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "client_certificate": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "client_key": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "cluster_ca_certificate": { + "type": "string", + "description": "", + "default": "${file(\"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\")}" + }, + "config_path": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "config_context": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "config_context_auth_info": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "config_context_cluster": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "token": { + "type": "string", + "description": "", + "default": "${file(\"/var/run/secrets/kubernetes.io/serviceaccount/token\")}" + }, + "exec": { + "type": "object", + "description": "", + "commented": true, + "properties": { + "api_version": { + "type": "string", + "description": "", + "default": "" + }, + "command": { + "type": "string", + "description": "", + "default": "" + }, + "args": { + "type": "string", + "description": "", + "default": "" + }, + "env": { + "type": "object", + "description": "", + "default": {} + } + } + } + } + }, + "collectionMonitoring": { + "type": "boolean", + "description": "If you set it to false, it would set EXCLUDE_NAMESPACE= and not add the Otelcol logs and Prometheus remotestorage metrics.", + "default": true, + "comment": "If you set it to false, it would set EXCLUDE_NAMESPACE=\nand not add the Otelcol logs and Prometheus remotestorage metrics." + }, + "pullSecrets": { + "type": "array", + "description": "Optional list of secrets that will be used for pulling images for Sumo Logic's deployments and statefulsets.", + "default": [ + { + "name": "myRegistryKeySecretName" + } + ], + "comment": "Optionally specify an array of pullSecrets.\nThey will be added to serviceaccount that is used for Sumo Logic's\ndeployments and statefulsets.\n\nSecrets must be manually created in the namespace.\nref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n", + "commented": true + }, + "podLabels": { + "type": "object", + "description": "Additional labels for the pods.", + "default": {}, + "comment": "Add custom labels to the following sumologic resources(otelcol sts, setup job, otelcol deployment)" + }, + "podAnnotations": { + "type": "object", + "description": "Additional annotations for the pods.", + "default": {}, + "comment": "Add custom annotations to the following sumologic resources(otelcol sts, setup job, otelcol deployment)" + }, + "serviceAccount": { + "type": "object", + "description": "", + "comment": "Add custom annotations to sumologic serviceAccounts", + "properties": { + "annotations": { + "type": "object", + "description": "Add custom annotations to sumologic serviceAccounts", + "default": {} + } + } + }, + "scc": { + "type": "object", + "description": "", + "comment": "creation of Security Context Constraints in Openshift", + "properties": { + "create": { + "type": "boolean", + "description": "Create OpenShift's Security Context Constraint", + "default": false + } + } + }, + "setup": { + "type": "object", + "description": "", + "properties": { + "force": { + "type": "boolean", + "description": "Force collection installation (disables k8s version verification)", + "default": true, + "commented": true, + "comment": "uncomment to force collection installation (disables k8s version verification)" + }, + "job": { + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "Image repository for Sumo Logic setup job docker container.", + "default": "public.ecr.aws/sumologic/kubernetes-setup" + }, + "tag": { + "type": "string", + "description": "Image tag for Sumo Logic setup job docker container.", + "default": "3.10.0" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for Sumo Logic docker container.", + "default": "IfNotPresent" + } + } + }, + "pullSecrets": { + "type": "array", + "description": "Optional list of secrets that will be used for pulling images for Sumo Logic's setup job.", + "commented": true, + "comment": "Optionally specify an array of pullSecrets.\nThey will be added to serviceaccount that is used for Sumo Logic's\nsetup job.\nSecrets must be manually created in the namespace.\nref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n", + "default": [ + { + "name": "myRegistryKeySecretName" + } + ] + }, + "resources": { + "type": "object", + "description": "Resource requests and limits for the setup Job.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "256Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "64Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "200m" + } + } + } + } + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for sumologic setup job. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Add tolerations for the setup Job.", + "comment": "Node tolerations for server scheduling to nodes with taints\nRef: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n", + "default": [], + "example": [ + { + "key": null, + "operator": "Exists", + "effect": "NoSchedule" + } + ] + }, + "affinity": { + "type": "object", + "description": "Add affinity and anti-affinity for the setup Job.", + "default": {}, + "comment": "Affinity and anti-affinity\nRef: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\n" + }, + "podLabels": { + "type": "object", + "description": "Additional labels for the setup Job pod.", + "default": {}, + "comment": "Add custom labels only to setup job pod" + }, + "podAnnotations": { + "type": "object", + "description": "Additional annotations for the setup Job pod.", + "default": {}, + "comment": "Add custom annotations only to setup job pod" + } + } + }, + "debug": { + "type": "boolean", + "description": "Enable debug mode (disables the automatic execution of the setup.sh script)", + "default": true, + "commented": true, + "comment": "uncomment for the debug mode (disables the automatic run of the setup.sh script)" + }, + "monitors": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "comment": "If enabled, a pre-install hook will create k8s monitors in Sumo Logic", + "description": "If enabled, a pre-install hook will create k8s monitors in Sumo Logic.", + "default": true + }, + "monitorStatus": { + "type": "string", + "description": "The installed monitors default status: enabled/disabled.", + "default": "enabled", + "comment": "The installed monitors default status: enabled/disabled" + }, + "notificationEmails": { + "type": [ + "array", + "string" + ], + "description": "A list of emails to send notifications from monitors.", + "default": [], + "comment": "A list of emails to send notifications from monitors" + } + } + }, + "dashboards": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "If enabled, a pre-install hook will install k8s dashboards in Sumo Logic.", + "default": true, + "comment": "If enabled, a pre-install hook will install k8s dashboards in Sumo Logic" + } + } + } + } + }, + "collector": { + "type": "object", + "description": "", + "properties": { + "fields": { + "type": "object", + "description": "Configuration of Sumo Logic fields. [See Sumo Logic Terraform Plugin documentation for more information](https://registry.terraform.io/providers/SumoLogic/sumologic/latest/docs/resources/collector#fields). All double quotes should be escaped here regarding Terraform syntax.", + "default": {}, + "comment": "Configuration of additional collector fields\nhttps://help.sumologic.com/docs/manage/fields/#http-source-fields" + }, + "sources": { + "type": "object", + "description": "Configuration of HTTP sources. [See docs/Terraform.md for more information](/docs/terraform.md). All double quotes should be escaped here regarding Terraform syntax.", + "comment": "Configuration of http sources\nSee docs/Terraform.md for more information\nname: source name visible in sumologic platform\nconfig-name: This is mostly for backward compatibility", + "properties": { + "metrics": { + "type": "object", + "description": "", + "properties": { + "default": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "(default-metrics)" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics" + } + } + }, + "default-otlp": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "metrics-otlp" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-otlp" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "content_type": { + "type": "string", + "description": "", + "default": "Otlp" + } + } + } + } + }, + "apiserver": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "apiserver-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-apiserver" + } + } + }, + "controller": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "kube-controller-manager-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-kube-controller-manager" + } + } + }, + "scheduler": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "kube-scheduler-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-kube-scheduler" + } + } + }, + "state": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "kube-state-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-kube-state" + } + } + }, + "kubelet": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "kubelet-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-kubelet" + } + } + }, + "node": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "node-exporter-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-node-exporter" + } + } + }, + "control-plane": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "control-plane-metrics" + } + } + } + } + }, + "logs": { + "type": "object", + "description": "", + "properties": { + "default": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "logs" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-logs" + }, + "properties": { + "type": "object", + "description": "", + "comment": "Properties can be used to extend default settings, such as processing rules, fields etc", + "properties": { + "default_date_formats": { + "type": "array", + "description": "", + "items": [ + { + "comment": "Ensures that timestamp key has precedence over timestamp auto discovery", + "default": { + "format": "epoch", + "locator": "\\\"timestamp\\\":(\\\\d+)" + } + } + ] + } + } + }, + "filters": { + "type": "array", + "description": "", + "commented": true, + "default": [ + { + "name": "Test Exclude Debug", + "filter_type": "Exclude", + "regexp": ".*DEBUG.*" + } + ] + } + } + }, + "default-otlp": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "logs-otlp" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-logs-otlp" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "content_type": { + "type": "string", + "description": "", + "default": "Otlp" + } + } + } + } + } + } + }, + "events": { + "type": "object", + "description": "", + "properties": { + "default": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "events" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-events" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "default_date_formats": { + "type": "array", + "description": "", + "items": [ + { + "comment": "Ensures that timestamp key has precedence over timestamp auto discovery", + "default": { + "format": "epoch", + "locator": "\\\"timestamp\\\":(\\\\d+)" + } + } + ] + } + } + } + } + }, + "default-otlp": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "events-otlp" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-events-otlp" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "content_type": { + "type": "string", + "description": "", + "default": "Otlp" + } + } + } + } + } + } + }, + "traces": { + "type": "object", + "description": "", + "properties": { + "default": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "traces" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-traces" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "content_type": { + "type": "string", + "description": "", + "default": "Zipkin" + } + } + } + } + }, + "default-otlp": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "traces-otlp" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-traces-otlp" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "content_type": { + "type": "string", + "description": "", + "default": "Otlp" + } + } + } + } + } + } + } + } + } + } + }, + "otelcolImage": { + "type": "object", + "description": "", + "comment": "Global configuration for OpenTelemetry Collector", + "properties": { + "repository": { + "type": "string", + "description": "Default image repository for OpenTelemetry Collector. This can be overridden for specific components.", + "default": "public.ecr.aws/sumologic/sumologic-otel-collector" + }, + "tag": { + "type": "string", + "description": "Default image tag for OpenTelemetry Collector. This can be overridden for specific components.", + "default": "0.85.0-sumo-0" + }, + "addFipsSuffix": { + "type": "boolean", + "description": "Add a `-fips` suffix to all image tags. See [docs/security-best-practices.md](/docs/security-best-practices.md) for more information.", + "default": false, + "comment": "Add a -fips suffix to all image tags. With default tags, this results in FIPS-compliant otel images.\nSee https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/fips.md for more information." + } + } + }, + "events": { + "type": "object", + "description": "", + "comment": "Configuration for collection of Kubernetes events", + "properties": { + "enabled": { + "type": "boolean", + "description": "Defines whether collection of Kubernetes events is enabled.", + "default": true + }, + "sourceName": { + "type": "string", + "description": "Source name for the Events source.", + "default": "events", + "comment": "Source name for the Events source. Default: \"events\"" + }, + "sourceCategory": { + "type": "string", + "description": "Source category for the Events source.", + "default": "kubernetes/events", + "commented": true, + "comment": "Source category for the Events source. Default: \"\" which is resolved to \"{clusterName}/events\"" + }, + "sourceCategoryReplaceDash": { + "type": "string", + "description": "Used to replace - with another character.", + "default": "/", + "comment": "Used to replace '-' with another character." + }, + "persistence": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable persistence for the event collector. Persistence lets the collector avoid reingesting events on restart and buffer them locally if unable to reach the backend.", + "default": true + }, + "size": { + "type": "string", + "description": "Size of the persistent storage volume", + "default": "10Gi" + }, + "persistentVolume": { + "type": "object", + "description": "", + "comment": "Configuration for the Persistent Volume and Persistent Volume Claim\nwhere the storage is kept", + "properties": { + "path": { + "type": "string", + "description": "Local filesystem path the persistent storage volume will be mounted at.", + "default": "/var/lib/storage/events" + }, + "accessMode": { + "type": "string", + "description": "The accessMode for the persistent storage volume", + "default": "ReadWriteOnce" + }, + "pvcLabels": { + "type": "object", + "description": "Additional PersistentVolumeClaim labels for persistent storage volumes", + "default": {}, + "comment": "Add custom labels to otelcol event statefulset PVC" + }, + "storageClass": { + "type": "string", + "description": "The storageClassName for the persistent storage volume", + "default": "", + "commented": true + } + } + } + } + }, + "sourceType": { + "type": "string", + "description": "The type of the Sumo Logic source being used for events ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/).", + "default": "http" + } + } + }, + "logs": { + "type": "object", + "comment": "Logs configuration\nSet the enabled flag to false for disabling logs ingestion altogether.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Set the enabled flag to false for disabling logs ingestion altogether.", + "default": true + }, + "collector": { + "type": "object", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable OpenTelemetry logs collector.", + "default": true + } + } + }, + "otelcloudwatch": { + "comment": "Experimental", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to enable CloudWatch Collection", + "default": false + }, + "roleArn": { + "type": "string", + "description": "AWS role ARN, to authenticate with CloudWatch", + "default": "" + }, + "persistence": { + "type": "object", + "comment": "Configure persistence for the cloudwatch collector", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control persistence for the CloudWatch collector", + "default": true + } + } + }, + "region": { + "type": "string", + "description": "EKS Fargate cluster region", + "default": "" + }, + "pollInterval": { + "type": "string", + "description": "CloudWatch poll interval", + "default": "1m" + }, + "logGroups": { + "comment": "A map of log group and stream prefixes\nThis is a map of log group and stream prefix, for example:\nlogGroups:\n fluent-bit:\n names: [fluent-bit]", + "type": "object", + "description": "Log Groups configuration for AWS CloudWatch receiver", + "default": {} + } + } + } + } + }, + "multiline": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable multiline detection for Kubernetes container logs.", + "default": true + }, + "first_line_regex": { + "type": "string", + "description": "Regular expression to match first line of multiline logs.", + "default": "^\\[?\\d{4}-\\d{1,2}-\\d{1,2}.\\d{2}:\\d{2}:\\d{2}" + }, + "additional": { + "type": "array", + "description": "List of additional conditions and expressions to match first line of multiline logs. See [Multiline](/docs/collecting-container-logs.md#conditional-multiline-log-parsing) for more information.", + "default": [], + "comment": "Additional configuration takes precedence over first_line_regex and are executed only for first matching condition\n\nExample:\n- first_line_regex: \"^@@@@ First Line\"\n condition: 'attributes[\"k8s.namespace.name\"] == \"foo\"'\n- first_line_regex: \"^--- First Line\"\n condition: 'attributes[\"k8s.container.name\"] matches \"^bar-.*\"\n\nNOTE: See below link for full reference:\nhttps://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/collecting-container-logs.md#conditional-multiline-log-parsing" + } + } + }, + "container": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable collecting logs from Kubernetes containers.", + "default": true + }, + "format": { + "type": "string", + "comment": "Format to post logs into Sumo: fields, json, json_merge, or text.\nNOTE: json is an alias for fields\nNOTE: Multiline log detection works differently for `text` format. See below link for full reference:\nhttps://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/collecting-container-logs.md#text-log-format", + "description": "Format for container logs.", + "default": "fields" + }, + "keep_time_attribute": { + "comment": "When set to `true`, preserves the `time` attribute, which is a string representation of the `timestamp` attribute.", + "type": "boolean", + "description": "When set to `true`, preserves the `time` attribute, which is a string representation of the `timestamp` attribute.", + "default": false + }, + "otelcol": { + "type": "object", + "description": "", + "properties": { + "extraProcessors": { + "comment": "Extra processors for container logs. See [/docs/collecting-container-logs.md](/docs/collecting-container-logs.md) for details.", + "type": "array", + "description": "Extra processors for container logs. See [/docs/collecting-container-logs.md](/docs/collecting-container-logs.md) for details.", + "default": [] + } + } + }, + "sourceHost": { + "comment": "Set the _sourceHost metadata field in Sumo Logic.", + "type": "string", + "description": "Set the \\_sourceHost metadata field in Sumo Logic.", + "default": "" + }, + "sourceName": { + "type": "string", + "comment": "Set the _sourceName metadata field in Sumo Logic.", + "description": "Set the \\_sourceName metadata field in Sumo Logic.", + "default": "%{namespace}.%{pod}.%{container}" + }, + "sourceCategory": { + "comment": "Set the _sourceCategory metadata field in Sumo Logic.", + "type": "string", + "description": "Set the \\_sourceCategory metadata field in Sumo Logic.", + "default": "%{namespace}/%{pod_name}" + }, + "sourceCategoryPrefix": { + "comment": "Set the prefix, for _sourceCategory metadata.", + "type": "string", + "description": "Set the prefix, for \\_sourceCategory metadata.", + "default": "kubernetes/" + }, + "sourceCategoryReplaceDash": { + "comment": "Used to replace - with another character.", + "type": "string", + "description": "Used to replace - with another character.", + "default": "/" + }, + "excludeContainerRegex": { + "type": "string", + "comment": "A regular expression for containers.\nMatching containers will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for container names. Logs from matching containers will not be sent to Sumo.", + "default": "" + }, + "excludeHostRegex": { + "type": "string", + "description": "A regular expression for Kubernetes node names. Logs from pods running on matching nodes will not be sent to Sumo.", + "default": "", + "comment": "A regular expression for hosts.\nMatching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol)." + }, + "excludeNamespaceRegex": { + "type": "string", + "comment": "A regular expression for namespaces.\nMatching namespaces will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for Kubernetes namespace names. Logs from pods running in matching namespaces will not be sent to Sumo.", + "default": "" + }, + "excludePodRegex": { + "type": "string", + "comment": "A regular expression for pods.\nMatching pods will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for pod names. Logs from matching pods will not be sent to Sumo.", + "default": "" + }, + "perContainerAnnotationsEnabled": { + "type": "boolean", + "description": "Enable container-level pod annotations.", + "comment": "Defines whether container-level pod annotations are enabled.", + "default": false + }, + "perContainerAnnotationPrefixes": { + "type": "array", + "description": "Defines the list of prefixes of container-level pod annotations.", + "comment": "Defines the list of prefixes of container-level pod annotations.", + "default": [] + } + } + }, + "systemd": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable collecting systemd logs from Kubernets nodes.", + "default": true + }, + "units": { + "type": "array", + "comment": "systemd units to collect logs from", + "commented": true, + "description": "List of systemd units to collect logs from.", + "default": [ + "docker.service" + ] + }, + "otelcol": { + "type": "object", + "description": "", + "properties": { + "extraProcessors": { + "type": "array", + "comment": "Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details.", + "description": "Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details.", + "default": [] + } + } + }, + "sourceName": { + "comment": "Set the _sourceName metadata field in Sumo Logic.", + "type": "string", + "description": "Set the \\_sourceName metadata field in Sumo Logic.", + "default": "%{_sourceName}" + }, + "sourceCategory": { + "comment": "Set the _sourceCategory metadata field in Sumo Logic.", + "type": "string", + "description": "Set the \\_sourceCategory metadata field in Sumo Logic.", + "default": "system" + }, + "sourceCategoryPrefix": { + "comment": "Set the prefix, for _sourceCategory metadata.", + "type": "string", + "description": "Set the prefix, for \\_sourceCategory metadata.", + "default": "kubernetes/" + }, + "sourceCategoryReplaceDash": { + "comment": "Used to replace - with another character.", + "type": "string", + "description": "Used to replace - with another character.", + "default": "/" + }, + "excludeFacilityRegex": { + "type": "string", + "comment": "A regular expression for facility.\nMatching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludeHostRegex": { + "type": "string", + "comment": "A regular expression for hosts.\nMatching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludePriorityRegex": { + "type": "string", + "comment": "A regular expression for priority.\nMatching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludeUnitRegex": { + "type": "string", + "comment": "A regular expression for unit.\nMatching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + } + } + }, + "kubelet": { + "type": "object", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "extraProcessors": { + "type": "array", + "comment": "Extra processors for kubelet logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details.", + "description": "Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details.", + "default": [] + } + } + }, + "sourceName": { + "type": "string", + "comment": "Set the _sourceName metadata field in Sumo Logic.", + "description": "Set the \\_sourceName metadata field in Sumo Logic.", + "default": "k8s_kubelet" + }, + "sourceCategory": { + "type": "string", + "comment": "Set the _sourceCategory metadata field in Sumo Logic.", + "description": "Set the \\_sourceCategory metadata field in Sumo Logic.", + "default": "kubelet" + }, + "sourceCategoryPrefix": { + "type": "string", + "comment": "Set the prefix, for _sourceCategory metadata.", + "description": "Set the prefix, for \\_sourceCategory metadata.", + "default": "kubernetes/" + }, + "sourceCategoryReplaceDash": { + "type": "string", + "comment": "Used to replace - with another character.", + "description": "Used to replace - with another character.", + "default": "/" + }, + "excludeFacilityRegex": { + "type": "string", + "comment": "A regular expression for facility.\nMatching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludeHostRegex": { + "type": "string", + "comment": "A regular expression for hosts.\nMatching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludePriorityRegex": { + "type": "string", + "comment": "A regular expression for priority.\nMatching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludeUnitRegex": { + "type": "string", + "comment": "A regular expression for unit.\nMatching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + } + } + }, + "fields": { + "type": "array", + "comment": "Fields to be created at Sumo Logic to ensure logs are tagged with\nrelevant metadata.\nhttps://help.sumologic.com/docs/manage/fields/#manage-fields", + "description": "Fields to be created at Sumo Logic to ensure logs are tagged with relevant metadata. [Sumo Logic help](https://help.sumologic.com/docs/manage/fields/#manage-fields)", + "default": [ + "cluster", + "container", + "daemonset", + "deployment", + "host", + "namespace", + "node", + "pod", + "service", + "statefulset" + ] + }, + "additionalFields": { + "type": "array", + "comment": "Additional fields to be created in Sumo Logic.\nhttps://help.sumologic.com/docs/manage/fields/#manage-fields", + "description": "Additional Fields to be created in Sumo Logic. [Sumo Logic help](https://help.sumologic.com/docs/manage/fields/#manage-fields)", + "default": [] + }, + "sourceType": { + "type": "string", + "description": "The type of the Sumo Logic source being used for logs ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/).", + "default": "http" + } + } + }, + "metrics": { + "type": "object", + "description": "", + "comment": "Metrics configuration\nSet the enabled flag to false for disabling metrics ingestion altogether.", + "properties": { + "enabled": { + "type": "boolean", + "description": "Set the enabled flag to false for disabling metrics ingestion altogether.", + "default": true + }, + "collector": { + "type": "object", + "description": "", + "comment": "Otel metrics collector. Replaces Prometheus.\nTo enable, you need opentelemetry-operator enabled as well.", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable experimental otelcol metrics collector", + "default": false + }, + "scrapeInterval": { + "type": "string", + "description": "The default scrape interval for the collector.", + "comment": "Default scrape interval", + "default": "30s" + }, + "autoscaling": { + "comment": "Option to turn autoscaling on for otelcol and specify params for HPA.\nAutoscaling needs metrics-server to access cpu metrics.", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Option to turn autoscaling on for the experimental otelcol metrics and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. collector", + "default": false + }, + "minReplicas": { + "type": "integer", + "description": "Default min replicas for autoscaling. collector", + "default": 3 + }, + "maxReplicas": { + "type": "integer", + "description": "Default max replicas for autoscaling. collector", + "default": 10 + }, + "targetCPUUtilizationPercentage": { + "type": "integer", + "description": "The desired target CPU utilization for autoscaling.", + "default": 70 + }, + "targetMemoryUtilizationPercentage": { + "type": "integer", + "description": "The desired target memory utilization for autoscaling.", + "default": 70 + } + } + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for the experimental otelcol metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md).", + "default": {} + }, + "podAnnotations": { + "comment": "Add custom annotations only to merics otelcol sts pods", + "type": "object", + "description": "Additional annotations for the experimental otelcol metrics pods.", + "default": {} + }, + "podLabels": { + "comment": "Add custom labels only to metrics otelcol sts pods", + "type": "object", + "description": "Additional labels for the experimental otelcol metrics pods.", + "default": {} + }, + "priorityClassName": { + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "type": "string", + "description": "Priority class name for the experimental otelcol metrics.", + "default": "" + }, + "replicaCount": { + "type": "integer", + "description": "Replica count for the experimental otelcol metrics collector", + "default": 1 + }, + "resources": { + "type": "object", + "description": "Resource requests and limits for the experimental otelcol metrics collector", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "2Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "768Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "100m" + } + } + } + } + }, + "serviceMonitorSelector": { + "commented": true, + "comment": "Selector for ServiceMonitors used for target discovery. By default, this selects resources created by this Chart.\nSee https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr", + "type": "object", + "description": "Selector for ServiceMonitors used for target discovery. By default, we select ServiceMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr", + "default": {} + }, + "podMonitorSelector": { + "comment": "Selector for PodMonitors used for target discovery. By default, this selects resources created by this Chart.\nSee https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr", + "commented": true, + "type": "object", + "description": "Selector for PodMonitors used for target discovery. By default, we select PodMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr", + "default": {} + }, + "securityContext": { + "type": "object", + "description": "The securityContext configuration for the experimental otelcol metrics.", + "properties": { + "fsGroup": { + "type": "integer", + "comment": "The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set.\nThe default is 0 (root), and containers don't have write permissions for volumes in that case.", + "description": "", + "default": 999 + } + } + }, + "tolerations": { + "type": "array", + "description": "Tolerations for the experimental otelcol metrics.", + "default": [] + }, + "kubelet": { + "type": "object", + "description": "", + "comment": "Configuration for kubelet metrics", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable collection of kubelet metrics.", + "default": true + } + } + }, + "cAdvisor": { + "type": "object", + "comment": "Configuration for cAdvisor metrics", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable collection of cAdvisor metrics.", + "default": true + } + } + }, + "annotatedPods": { + "type": "object", + "comment": "Enable collection of metrics from Pods annotated with prometheus.io/* keys.\nSee https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/collecting-application-metrics.md#application-metrics-are-exposed-one-endpoint-scenario for more information.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable collection of metrics from Pods annotated with prometheus.io/\\* keys. See [docs/collecting-application-metrics.md](/docs/collecting-application-metrics.md#application-metrics-are-exposed-one-endpoint-scenario) for more information.", + "default": true + } + } + }, + "allocationStrategy": { + "comment": "Allocation strategy for the scrape target allocator. Valid values are: least-weighted and consistent-hashing.\nSee: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocator", + "type": "string", + "description": "Allocation strategy for the scrape target allocator. Valid values are: least-weighted and consistent-hashing. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocator", + "default": "least-weighted" + } + } + } + } + }, + "enableDefaultFilters": { + "type": "boolean", + "comment": "Default metric filters for Sumo Apps", + "description": "Enable default metric filters for Sumo Apps.", + "default": false + }, + "dropHistogramBuckets": { + "comment": "By default, the Helm Chart collects some high-cardinality histogram metrics, as Sumo Apps make use of the sum and count components.\nThis setting causes the metrics collector to drop the actual histogram buckets, keeping only the sum and the count.\nThis affects the following metrics:\n- apiserver_request_duration_seconds\n- coredns_dns_request_duration_seconds\n- kubelet_runtime_operations_duration_seconds", + "type": "boolean", + "description": "Drop buckets from select high-cardinality histogram metrics, leaving only the sum and count components.", + "default": true + }, + "otelcol": { + "type": "object", + "description": "", + "properties": { + "extraProcessors": { + "comment": "Includes additional processors into pipelines.\nIt can be used for filtering metrics, renaming, changing metadata and so on.\nThis is list of objects, for example:\nextraProcessors:\n- filterprocessor:\n exclude:\n match_type: strict\n metric_names:\n - hello_world\n - hello/world", + "type": "array", + "description": "Extra processors configuration for metrics pipeline. See [/docs/collecting-application-metrics.md#metrics-modifications](/docs/collecting-application-metrics.md#metrics-modifications) for more information.", + "default": [] + } + } + }, + "remoteWriteProxy": { + "comment": "Enable a load balancing proxy for Prometheus remote writes.\nPrometheus remote write uses a single persistent HTTP connection per target,\nwhich interacts poorly with TCP load balancing with iptables that K8s Services do.\nUse a real HTTP load balancer for this instead.\nThis is an advanced feature, enable only if you're experiencing performance\nissues with metrics metadata enrichment.", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable a load balancing proxy for Prometheus remote writes. [See docs for more information.](/docs/prometheus.md#using-a-load-balancing-proxy-for-prometheus-remote-write)", + "default": true + }, + "config": { + "type": "object", + "description": "", + "properties": { + "clientBodyBufferSize": { + "comment": "Increase this if you've increased samples_per_send in Prometheus to prevent nginx\nfrom spilling proxied request bodies to disk", + "type": "string", + "description": "See the [nginx documentation](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size). Increase if you've also increased samples per send in Prometheus remote write.", + "default": "64k" + }, + "workerCountAutotune": { + "comment": "This feature autodetects how much CPU is assigned to the nginx instance and sets\nthe right amount of workers based on that. Disable to use the default of 8 workers.", + "type": "boolean", + "description": "This feature autodetects how much CPU is assigned to the nginx instance and setsthe right amount of workers based on that. Disable to use the default of 8 workers.", + "default": true + }, + "port": { + "comment": "Nginx listen port", + "type": "integer", + "description": "Port on which remote write proxy is going to be exposed", + "default": 8080 + }, + "enableAccessLogs": { + "comment": "Nginx access logs", + "type": "boolean", + "description": "Enable nginx access logs.", + "default": false + } + } + }, + "replicaCount": { + "type": "integer", + "description": "Number of replicas in the remote write proxy deployment.", + "default": 3 + }, + "image": { + "type": "object", + "description": "Nginx docker image for the remote write proxy.", + "properties": { + "repository": { + "type": "string", + "description": "", + "default": "public.ecr.aws/sumologic/nginx-unprivileged" + }, + "tag": { + "type": "string", + "description": "", + "default": "1.25.2-alpine" + }, + "pullPolicy": { + "type": "string", + "description": "", + "default": "IfNotPresent" + } + } + }, + "resources": { + "type": "object", + "description": "Resource requests and limits for the remote write proxy container.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + }, + "memory": { + "type": "string", + "description": "", + "default": "256Mi" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "", + "default": "100m" + }, + "memory": { + "type": "string", + "description": "", + "default": "128Mi" + } + } + } + } + }, + "livenessProbe": { + "type": "object", + "description": "Liveness probe settings for the remote write proxy container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 30 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "successThreshold": { + "type": "integer", + "description": "", + "default": 1 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 6 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "Readiness probe settings for the remote write proxy container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "successThreshold": { + "type": "integer", + "description": "", + "default": 1 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "securityContext": { + "type": "object", + "description": "The securityContext configuration for the remote write proxy.", + "default": {} + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for the remote write proxy deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for the remote write proxy deployment.", + "default": [] + }, + "affinity": { + "type": "object", + "description": "Affinity for the remote write proxy deployment.", + "default": {} + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for the remote write proxy deployment.", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to metrics sts pods", + "description": "Additional labels for the remote write proxy container.", + "default": {} + }, + "podAnnotations": { + "comment": "Add custom annotations only to metrics sts pods", + "type": "object", + "description": "Additional annotations for for the remote write proxy container.", + "default": {} + } + } + }, + "serviceMonitors": { + "type": "array", + "comment": "Prometheus serviceMonitors related to Sumo Logic services\nThey are applied only if kube-prometheus-stack is enabled", + "description": "Configuration of Sumo Logic Kubernetes Collection components serviceMonitors", + "default": [ + { + "name": "collection-sumologic-otelcol-logs", + "additionalLabels": { + "sumologic.com/app": "otelcol-logs" + }, + "endpoints": [ + { + "port": "otelcol-metrics" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/app": "otelcol-logs", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-otelcol-metrics", + "additionalLabels": { + "sumologic.com/app": "otelcol-metrics" + }, + "endpoints": [ + { + "port": "otelcol-metrics" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/app": "otelcol-metrics", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-metrics-collector", + "additionalLabels": { + "sumologic.com/app": "otelcol-metrics" + }, + "endpoints": [ + { + "port": "monitoring" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/app": "otelcol", + "sumologic.com/component": "metrics", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-otelcol-logs-collector", + "additionalLabels": { + "sumologic.com/app": "otelcol-logs-collector" + }, + "endpoints": [ + { + "port": "metrics" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/app": "otelcol-logs-collector", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-otelcol-events", + "additionalLabels": { + "sumologic.com/app": "otelcol-events" + }, + "endpoints": [ + { + "port": "otelcol-metrics" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/app": "otelcol-events", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-otelcol-traces", + "additionalLabels": { + "sumologic.com/app": "otelcol" + }, + "endpoints": [ + { + "port": "metrics" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/component": "instrumentation", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-prometheus", + "endpoints": [ + { + "port": "http-web", + "path": "/metrics", + "metricRelabelings": [ + { + "action": "keep", + "regex": "prometheus_remote_storage_.*", + "sourceLabels": [ + "__name__" + ] + } + ] + } + ], + "selector": { + "matchLabels": { + "app": "kube-prometheus-stack-prometheus" + } + } + } + ] + }, + "sourceType": { + "type": "string", + "comment": "The type of source we send to in Sumo. The possible values are http and otlp.\nConsult the documentation for more information.", + "description": "The type of the Sumo Logic source being used for metrics ingestion. Can be `http` or `otlp`.", + "default": "http" + } + } + }, + "traces": { + "type": "object", + "comment": "Traces configuration\nSet the enabled flag to false to disable traces ingestion.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Set the enabled flag to true to enable tracing ingestion. _Tracing must be enabled for the account first. Please contact your Sumo representative for activation details_", + "default": true + }, + "spans_per_request": { + "type": "integer", + "comment": "How many spans per request should be send to receiver", + "description": "Maximum number of spans sent in single batch", + "default": 100 + }, + "sourceType": { + "type": "string", + "description": "The type of the Sumo Logic source being used for traces ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/traces/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/).", + "default": "http" + } + } + } + } + }, + "metrics-server": { + "type": "object", + "comment": "Configure metrics-server\nref: https://github.com/bitnami/charts/blob/master/bitnami/metrics-server/values.yaml", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "comment": "Set the enabled flag to true for enabling metrics-server.\nThis is required before enabling autoscaling unless you have an existing metrics-server in the cluster.", + "description": "Set the enabled flag to true for enabling metrics-server. This is required before enabling autoscaling unless you have an existing metrics-server in the cluster.", + "default": false + }, + "fullnameOverride": { + "type": "string", + "comment": "Put here the new name if you want to override the full name used for metrics-server components.", + "commented": true, + "description": "Used to override the chart's full name.", + "default": "" + }, + "apiService": { + "type": "object", + "description": "", + "properties": { + "create": { + "type": "boolean", + "description": "Specifies whether the v1beta1.metrics.k8s.io API service should be created.", + "default": true + } + } + }, + "extraArgs": { + "type": "array", + "description": "Extra arguments to pass to metrics-server on start up.", + "default": [ + "--kubelet-insecure-tls=true", + "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname" + ] + }, + "image": { + "type": "object", + "comment": "Optionally specify image options for metrics-server", + "commented": true, + "description": "", + "properties": { + "pullSecrets": { + "type": "array", + "comment": "Optionally specify an array of imagePullSecrets.\nSecrets must be manually created in the namespace.\nref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n", + "description": "Pull secrets for metrics-server images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config).", + "default": [ + "imagepullsecret" + ] + } + } + } + } + }, + "kube-prometheus-stack": { + "type": "object", + "comment": "Configure kube-prometheus-stack\nref: https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "comment": "Uncomment the flag below to not install kube-prometheus-stack helm chart\nas a dependency along with this helm chart.\nThis is needed e.g. if you want to use a different version of kube-prometheus-stack -\nsee https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/best-practices.md#using-newer-kube-prometheus-stack.\nTo disable metrics collection, set `sumologic.metrics.enabled: false` and leave this flag commented out or set it to `false`.\nDo not set this flag explicitly to `true` while at the same time setting `sumologic.metrics.enabled: false`,\nas this will make Prometheus try to write to an non-existent metrics enrichment service.", + "commented": true, + "description": "Flag to control deploying Prometheus Operator Helm sub-chart.", + "default": false + }, + "global": { + "type": "object", + "commented": true, + "description": "", + "properties": { + "imagePullSecrets": { + "type": "array", + "comment": "Reference to one or more secrets to be used when pulling images\nref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n", + "description": "Pull secrets for Kube Prometheus Stack images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config).", + "default": [ + { + "name": "image-pull-secret" + } + ] + } + } + }, + "fullnameOverride": { + "type": "string", + "comment": "Put here the new name if you want to override the full name used for Kube Prometheus Stack components.", + "commented": true, + "description": "Used to override the chart's full name.", + "default": "" + }, + "namespaceOverride": { + "type": "string", + "comment": "Put here the new namespace if you want to override the namespace used for Kube Prometheus Stack components.", + "commented": true, + "description": "Used to override the chart's default namespace.", + "default": "" + }, + "kubeTargetVersionOverride": { + "type": "string", + "comment": "Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template).\nChanging this may break Sumo Logic apps.", + "commented": true, + "description": "Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). Changing this may break Sumo Logic apps.", + "default": "" + }, + "commonLabels": { + "type": "object", + "comment": "Labels to apply to all kube-prometheus-stack resources", + "description": "Labels to apply to all Kube Prometheus Stack resources", + "default": {} + }, + "defaultRules": { + "type": "object", + "description": "", + "properties": { + "rules": { + "type": "object", + "description": "Control which default recording and alerting rules are enabled.", + "properties": { + "alertmanager": { + "type": "boolean", + "description": "", + "default": false + }, + "etcd": { + "type": "boolean", + "description": "", + "default": false + }, + "configReloaders": { + "type": "boolean", + "description": "", + "default": false + }, + "general": { + "type": "boolean", + "description": "", + "default": false + }, + "k8s": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeApiserverAvailability": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeApiserverBurnrate": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeApiserverHistogram": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeApiserverSlos": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeControllerManager": { + "type": "boolean", + "description": "", + "default": false + }, + "kubelet": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeProxy": { + "type": "boolean", + "description": "", + "default": false + }, + "kubePrometheusGeneral": { + "type": "boolean", + "description": "", + "default": false + }, + "kubePrometheusNodeRecording": { + "type": "boolean", + "description": "", + "default": true + }, + "kubernetesApps": { + "type": "boolean", + "description": "", + "default": false + }, + "kubernetesResources": { + "type": "boolean", + "description": "", + "default": false + }, + "kubernetesStorage": { + "type": "boolean", + "description": "", + "default": false + }, + "kubernetesSystem": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeSchedulerAlerting": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeSchedulerRecording": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeStateMetrics": { + "type": "boolean", + "description": "", + "default": false + }, + "network": { + "type": "boolean", + "description": "", + "default": false + }, + "node": { + "type": "boolean", + "description": "", + "default": true + }, + "nodeExporterAlerting": { + "type": "boolean", + "description": "", + "default": false + }, + "nodeExporterRecording": { + "type": "boolean", + "description": "", + "default": false + }, + "prometheus": { + "type": "boolean", + "description": "", + "default": false + }, + "prometheusOperator": { + "type": "boolean", + "description": "", + "default": false + }, + "windows": { + "type": "boolean", + "description": "", + "default": false + } + } + } + } + }, + "kubeApiServer": { + "type": "object", + "comment": "NOTE changing the serviceMonitor scrape interval to be >1m can result in metrics from recording\nrules to be missing and empty panels in Sumo Logic Kubernetes apps.", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubernetes API Server metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\napiserver_request_count\napiserver_request_total\napiserver_request_duration_seconds_count\napiserver_request_duration_seconds_sum", + "description": "Kubernetes API Server MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:apiserver_request_(?:count|total)|apiserver_request_(?:duration_seconds)_(?:count|sum))", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + }, + "kubelet": { + "type": "object", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubelet metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "probes": { + "type": "boolean", + "comment": "Enable scraping /metrics/probes from kubelet's service", + "description": "Enable scraping /metrics/probes from kubelet's service", + "default": false + }, + "resource": { + "type": "boolean", + "comment": "Enable scraping /metrics/resource/v1alpha1 from kubelet's service", + "description": "Enable scraping /metrics/resource from kubelet's service", + "default": false + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\nkubelet metrics:\nkubelet_docker_operations_errors\nkubelet_docker_operations_errors_total\nkubelet_docker_operations_duration_seconds_count\nkubelet_docker_operations_duration_seconds_sum\nkubelet_runtime_operations_duration_seconds_count\nkubelet_runtime_operations_duration_seconds_sum\nkubelet_running_container_count\nkubelet_running_containers\nkubelet_running_pod_count\nkubelet_running_pods\nkubelet_docker_operations_latency_microseconds\nkubelet_docker_operations_latency_microseconds_count\nkubelet_docker_operations_latency_microseconds_sum\nkubelet_runtime_operations_latency_microseconds\nkubelet_runtime_operations_latency_microseconds_count\nkubelet_runtime_operations_latency_microseconds_sum", + "description": "Kubelet MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:kubelet_docker_operations_errors(?:|_total)|kubelet_(?:docker|runtime)_operations_duration_seconds_(?:count|sum)|kubelet_running_(?:container|pod)(?:_count|s)|kubelet_(:?docker|runtime)_operations_latency_microseconds(?:|_count|_sum))", + "sourceLabels": [ + "__name__" + ] + }, + { + "action": "labeldrop", + "regex": "id" + } + ] + }, + "cAdvisorMetricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\ncadvisor container metrics\ncontainer_cpu_usage_seconds_total\ncontainer_fs_limit_bytes\ncontainer_fs_usage_bytes\ncontainer_memory_working_set_bytes\ncontainer_cpu_cfs_throttled_seconds_total\ncadvisor aggregate container metrics\ncontainer_network_receive_bytes_total\ncontainer_network_transmit_bytes_total", + "description": "Kubelet CAdvisor MetricRelabelConfigs", + "items": [ + { + "default": { + "action": "keep", + "regex": "(?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes|container_cpu_cfs_throttled_seconds_total|container_network_receive_bytes_total|container_network_transmit_bytes_total)", + "sourceLabels": [ + "__name__" + ] + } + }, + { + "comment": "Drop container metrics with container tag set to an empty string:\nthese are the pod aggregated container metrics which can be aggregated\nin Sumo anyway. There's also some cgroup-specific time series we also\ndo not need.", + "default": { + "action": "drop", + "sourceLabels": [ + "__name__", + "container" + ], + "regex": "(?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes);$" + } + }, + { + "default": { + "action": "labelmap", + "regex": "container_name", + "replacement": "container" + } + }, + { + "default": { + "action": "drop", + "sourceLabels": [ + "container" + ], + "regex": "POD" + } + }, + { + "default": { + "action": "labeldrop", + "regex": "(id|name)" + } + } + ] + } + } + } + } + }, + "kubeControllerManager": { + "type": "object", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubernetes Controller Manager metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\ncontroller manager metrics\nhttps://kubernetes.io/docs/concepts/cluster-administration/monitoring/#kube-controller-manager-metrics\ne.g.\ncloudprovider_aws_api_request_duration_seconds_bucket\ncloudprovider_aws_api_request_duration_seconds_count\ncloudprovider_aws_api_request_duration_seconds_sum", + "description": "Kubernetes Controller Manager MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:cloudprovider_.*_api_request_duration_seconds.*)", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + }, + "coreDns": { + "type": "object", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Core DNS metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\ncoredns:\ncoredns_cache_entries\ncoredns_cache_hits_total\ncoredns_cache_misses_total\ncoredns_dns_request_duration_seconds_count\ncoredns_dns_request_duration_seconds_sum\ncoredns_dns_requests_total\ncoredns_dns_responses_total\ncoredns_forward_requests_total\nprocess_cpu_seconds_total\nprocess_open_fds\nprocess_resident_memory_bytes\nprocess_cpu_seconds_total\nprocess_open_fds\nprocess_resident_memory_bytes", + "description": "Core DNS MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:coredns_cache_(entries|(hits|misses)_total)|coredns_dns_request_duration_seconds_(count|sum)|coredns_(forward_requests|dns_requests|dns_responses)_total|process_(cpu_seconds_total|open_fds|resident_memory_bytes))", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + }, + "kubeEtcd": { + "type": "object", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubernetes Etcd metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\netcd_request_cache_get_duration_seconds_count\netcd_request_cache_get_duration_seconds_sum\netcd_request_cache_add_duration_seconds_count\netcd_request_cache_add_duration_seconds_sum\netcd_request_cache_add_latencies_summary_count\netcd_request_cache_add_latencies_summary_sum\netcd_request_cache_get_latencies_summary_count\netcd_request_cache_get_latencies_summary_sum\netcd_helper_cache_hit_count\netcd_helper_cache_hit_total\netcd_helper_cache_miss_count\netcd_helper_cache_miss_total\netcd server:\netcd_mvcc_db_total_size_in_bytes\netcd_debugging_store_expires_total\netcd_debugging_store_watchers\netcd_disk_backend_commit_duration_seconds_bucket\netcd_disk_wal_fsync_duration_seconds_bucket\netcd_grpc_proxy_cache_hits_total\netcd_grpc_proxy_cache_misses_total\netcd_network_client_grpc_received_bytes_total\netcd_network_client_grpc_sent_bytes_total\netcd_server_has_leader\netcd_server_leader_changes_seen_total\netcd_server_proposals_applied_total\netcd_server_proposals_committed_total\netcd_server_proposals_failed_total\netcd_server_proposals_pending\nprocess_cpu_seconds_total\nprocess_open_fds\nprocess_resident_memory_bytes", + "description": "Kubernetes Etcd MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:etcd_request_cache_(?:add|get)_(?:duration_seconds|latencies_summary)_(?:count|sum)|etcd_helper_cache_(?:hit|miss)_(?:count|total)|etcd_mvcc_db_total_size_in_bytes|etcd_debugging_(store_(expires_total|watchers))|etcd_disk_(backend_commit|wal_fsync)_duration_seconds_.*|etcd_grpc_proxy_cache_(hits|misses)_total|etcd_network_client_grpc_(received|sent)_bytes_total|etcd_server_(has_leader|leader_changes_seen_total)|etcd_server_proposals_(pending|(applied|committed|failed)_total)|process_(cpu_seconds_total|open_fds|resident_memory_bytes))", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + }, + "kubeScheduler": { + "type": "object", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubernetes Scheduler metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\nscheduler_e2e_* is present for K8s <1.23\nscheduler_e2e_scheduling_duration_seconds_bucket\nscheduler_e2e_scheduling_duration_seconds_count\nscheduler_e2e_scheduling_duration_seconds_sum\nscheduler_scheduling_attempt_duration_seconds is present for K8s >=1.23\nscheduler_scheduling_attempt_duration_seconds_bucket\nscheduler_scheduling_attempt_duration_seconds_count\nscheduler_scheduling_attempt_duration_seconds_sum\nscheduler_framework_extension_point_duration_seconds_bucket\nscheduler_framework_extension_point_duration_seconds_count\nscheduler_framework_extension_point_duration_seconds_sum\nscheduler_scheduling_algorithm_duration_seconds_bucket\nscheduler_scheduling_algorithm_duration_seconds_count\nscheduler_scheduling_algorithm_duration_seconds_sum", + "description": "Kubernetes Scheduler MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:scheduler_(?:e2e_scheduling|scheduling_attempt|framework_extension_point|scheduling_algorithm)_duration_seconds.*)", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + }, + "alertmanager": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Deploy alertmanager.", + "default": false + } + } + }, + "grafana": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "If true, deploy the grafana sub-chart.", + "default": false + }, + "defaultDashboardsEnabled": { + "type": "boolean", + "description": "Deploy default dashboards. These are loaded using the sidecar.", + "default": false + } + } + }, + "prometheusOperator": { + "type": "object", + "description": "", + "properties": { + "podLabels": { + "type": "object", + "comment": "Labels to add to the operator pod", + "description": "Additional labels for prometheus operator pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Annotations to add to the operator pod", + "description": "Additional annotations for prometheus operator pods.", + "default": {} + }, + "resources": { + "type": "object", + "comment": "Resource limits for prometheus operator", + "description": "Resource limits for prometheus operator. Uses sub-chart defaults.", + "default": {}, + "example": { + "limits": { + "cpu": "200m", + "memory": "200Mi" + }, + "requests": { + "cpu": "100m", + "memory": "100Mi" + } + } + }, + "admissionWebhooks": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Create PrometheusRules admission webhooks. Mutating webhook will patch PrometheusRules objects indicating they were validated. Validating webhook will check the rules syntax.", + "default": false + } + } + }, + "tls": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable TLS in prometheus operator.", + "default": false + } + } + } + } + }, + "kube-state-metrics": { + "type": "object", + "comment": "Resource limits for kube-state-metrics", + "description": "", + "properties": { + "fullnameOverride": { + "type": "string", + "comment": "Put here the new name if you want to override the full name used for Kube State Metrics components.", + "commented": true, + "description": "Used to override the chart's full name.", + "default": "" + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for kube-state-metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "customLabels": { + "type": "object", + "comment": "Custom labels to apply to service, deployment and pods", + "description": "Custom labels to apply to service, deployment and pods. Uses sub-chart defaults.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Additional annotations for pods in the DaemonSet", + "description": "Additional annotations for pods in the DaemonSet. Uses sub-chart defaults.", + "default": {} + }, + "resources": { + "type": "object", + "description": "Resource limits for kube state metrics. Uses sub-chart defaults.", + "default": {}, + "exmaple": { + "limits": { + "cpu": "100m", + "memory": "64Mi" + }, + "requests": { + "cpu": "10m", + "memory": "32Mi" + } + } + }, + "image": { + "type": "object", + "comment": "latest kube-prometheus-stack version that is supported on OpenShift 4.8-4.10\nuses version 2.6.0 of kube-state-metrics, but this version has some critical vulnerabilities,\nso we bump the image manually.", + "description": "", + "properties": { + "tag": { + "type": "string", + "description": "Tag for kube-state-metrics Docker image.", + "default": "v2.7.0" + } + } + }, + "prometheus": { + "type": "object", + "description": "", + "properties": { + "monitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubernetes State Metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\nkube_daemonset_status_current_number_scheduled\nkube_daemonset_status_desired_number_scheduled\nkube_daemonset_status_number_misscheduled\nkube_daemonset_status_number_unavailable\nkube_deployment_spec_replicas\nkube_deployment_status_replicas_available\nkube_deployment_status_replicas_unavailable\nkube_node_info\nkube_node_status_allocatable\nkube_node_status_capacity\nkube_node_status_condition\nkube_statefulset_metadata_generation\nkube_statefulset_replicas\nkube_statefulset_status_observed_generation\nkube_statefulset_status_replicas\nkube_hpa_spec_max_replicas\nkube_hpa_spec_min_replicas\nkube_hpa_status_condition\nkube_hpa_status_current_replicas\nkube_hpa_status_desired_replicas\nkube pod state metrics\nkube_pod_container_info\nkube_pod_container_resource_limits\nkube_pod_container_resource_requests\nkube_pod_container_status_ready\nkube_pod_container_status_restarts_total\nkube_pod_container_status_terminated_reason\nkube_pod_container_status_waiting_reason\nkube_pod_status_phase\nkube_pod_info\nkube_service_info\nkube_service_spec_external_ip\nkube_service_spec_type\nkube_service_status_load_balancer_ingress\nDrop unnecessary labels Prometheus adds to these metrics\nWe don't want container=kube-state-metrics on everything", + "description": "Kubernetes State Metrics MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:kube_statefulset_status_observed_generation|kube_statefulset_status_replicas|kube_statefulset_replicas|kube_statefulset_metadata_generation|kube_daemonset_status_current_number_scheduled|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_misscheduled|kube_daemonset_status_number_unavailable|kube_deployment_spec_replicas|kube_deployment_status_replicas_available|kube_deployment_status_replicas_unavailable|kube_node_info|kube_node_status_allocatable|kube_node_status_capacity|kube_node_status_condition|kube_hpa_spec_max_replicas|kube_hpa_spec_min_replicas|kube_hpa_status_(condition|(current|desired)_replicas)|kube_pod_container_info|kube_pod_container_resource_requests|kube_pod_container_resource_limits|kube_pod_container_status_ready|kube_pod_container_status_terminated_reason|kube_pod_container_status_waiting_reason|kube_pod_container_status_restarts_total|kube_pod_status_phase|kube_pod_info|kube_service_info|kube_service_spec_external_ip|kube_service_spec_type|kube_service_status_load_balancer_ingress)", + "sourceLabels": [ + "__name__" + ] + }, + { + "action": "labeldrop", + "regex": "service" + }, + { + "action": "replace", + "sourceLabels": [ + "container" + ], + "regex": "kube-state-metrics", + "targetLabel": "container", + "replacement": "" + }, + { + "action": "replace", + "sourceLabels": [ + "pod" + ], + "regex": ".*kube-state-metrics.*", + "targetLabel": "pod", + "replacement": "" + }, + { + "action": "labelmap", + "regex": "(pod|service)", + "replacement": "service_discovery_${1}" + } + ] + } + } + } + } + } + } + }, + "prometheus-node-exporter": { + "type": "object", + "comment": "Resource limits for prometheus node exporter", + "description": "", + "properties": { + "fullnameOverride": { + "type": "string", + "comment": " Put here the new name if you want to override the full name used for Prometheus Node exporter components.", + "commented": true, + "description": "Used to override the chart's full name.", + "default": "" + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for prometheus node exporter. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "podLabels": { + "type": "object", + "comment": "Additional labels for pods in the DaemonSet", + "description": "Additional labels for prometheus-node-exporter pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Additional annotations for pods in the DaemonSet", + "description": "Additional annotations for prometheus-node-exporter pods.", + "default": {} + }, + "resources": { + "type": "object", + "description": "Resource limits for node exporter. Uses sub-chart defaults.", + "default": {}, + "example": { + "limits": { + "cpu": "200m", + "memory": "50Mi" + }, + "requests": { + "cpu": "100m", + "memory": "30Mi" + } + } + }, + "prometheus": { + "type": "object", + "description": "", + "properties": { + "monitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Node Exporter scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\nnode exporter metrics\nnode_cpu_seconds_total\nnode_load1\nnode_load5\nnode_load15\nnode_disk_io_time_weighted_seconds_total\nnode_disk_io_time_seconds_total\nnode_vmstat_pgpgin\nnode_vmstat_pgpgout\nnode_memory_MemFree_bytes\nnode_memory_Cached_bytes\nnode_memory_Buffers_bytes\nnode_memory_MemTotal_bytes\nnode_network_receive_drop_total\nnode_network_transmit_drop_total\nnode_network_receive_bytes_total\nnode_network_transmit_bytes_total\nnode_filesystem_avail_bytes\nnode_filesystem_size_bytes\nnode_filesystem_files_free\nnode_filesystem_files", + "description": "Node Exporter MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:node_load1|node_load5|node_load15|node_cpu_seconds_total|node_disk_io_time_weighted_seconds_total|node_disk_io_time_seconds_total|node_vmstat_pgpgin|node_vmstat_pgpgout|node_memory_MemFree_bytes|node_memory_Cached_bytes|node_memory_Buffers_bytes|node_memory_MemTotal_bytes|node_network_receive_drop_total|node_network_transmit_drop_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_filesystem_avail_bytes|node_filesystem_size_bytes)", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + } + } + }, + "prometheus": { + "type": "object", + "description": "", + "properties": { + "additionalServiceMonitors": { + "type": "array", + "description": "List of ServiceMonitor objects to create.", + "default": [] + }, + "prometheusSpec": { + "type": "object", + "description": "", + "properties": { + "scrapeInterval": { + "type": "string", + "comment": "Prometheus default scrape interval, default from upstream Kube Prometheus Stack Helm chart\nNOTE changing the scrape interval to be >1m can result in metrics\nfrom recording rules to be missing and empty panels in Sumo Logic Kubernetes apps.", + "description": "Prometheus metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "30s" + }, + "retention": { + "type": "string", + "comment": "Prometheus data retention period", + "description": "How long to retain metrics in Prometheus", + "default": "1d" + }, + "podMetadata": { + "type": "object", + "comment": "Add custom pod annotations and labels to prometheus pods", + "description": "", + "properties": { + "labels": { + "type": "object", + "description": "Add custom pod labels to prometheus pods", + "default": {} + }, + "annotations": { + "type": "object", + "description": "Add custom pod annotations to prometheus pods", + "default": {} + } + } + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for prometheus. [See docs/Best_Practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "resources": { + "type": "object", + "comment": "Define resources requests and limits for single Pods.", + "description": "Resource limits for prometheus. Uses sub-chart defaults.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + }, + "memory": { + "type": "string", + "description": "", + "default": "8Gi" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "", + "default": "500m" + }, + "memory": { + "type": "string", + "description": "", + "default": "1Gi" + } + } + } + } + }, + "initContainers": { + "type": "array", + "description": "InitContainers allows injecting additional Prometheus initContainers.", + "default": [ + { + "name": "init-config-reloader", + "env": [ + { + "name": "METADATA_METRICS_SVC", + "valueFrom": { + "configMapKeyRef": { + "name": "sumologic-configmap", + "key": "metadataMetrics" + } + } + }, + { + "name": "NAMESPACE", + "valueFrom": { + "configMapKeyRef": { + "name": "sumologic-configmap", + "key": "metadataNamespace" + } + } + } + ] + } + ] + }, + "containers": { + "type": "array", + "description": "Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.", + "default": [ + { + "name": "config-reloader", + "env": [ + { + "name": "METADATA_METRICS_SVC", + "valueFrom": { + "configMapKeyRef": { + "name": "sumologic-configmap", + "key": "metadataMetrics" + } + } + }, + { + "name": "NAMESPACE", + "valueFrom": { + "configMapKeyRef": { + "name": "sumologic-configmap", + "key": "metadataNamespace" + } + } + } + ] + } + ] + }, + "walCompression": { + "type": "boolean", + "comment": "Enable WAL compression to reduce Prometheus memory consumption", + "description": "Enables walCompression in Prometheus", + "default": true + }, + "additionalScrapeConfigs": { + "type": "array", + "comment": "prometheus scrape config\nrel: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config\nscraping metrics basing on annotations:\n- prometheus.io/scrape: true - to scrape metrics from the pod\n- prometheus.io/path: /metrics - path which the metric should be scrape from\n- prometheus.io/port: 9113 - port which the metric should be scrape from\nrel: https://github.com/prometheus-operator/kube-prometheus/pull/16#issuecomment-424318647", + "description": "Additional Prometheus scrape configurations", + "default": [ + { + "job_name": "pod-annotations", + "kubernetes_sd_configs": [ + { + "role": "pod" + } + ], + "relabel_configs": [ + { + "source_labels": [ + "__meta_kubernetes_pod_annotation_prometheus_io_scrape" + ], + "action": "keep", + "regex": true + }, + { + "source_labels": [ + "__meta_kubernetes_pod_annotation_prometheus_io_path" + ], + "action": "replace", + "target_label": "__metrics_path__", + "regex": "(.+)" + }, + { + "source_labels": [ + "__address__", + "__meta_kubernetes_pod_annotation_prometheus_io_port" + ], + "action": "replace", + "regex": "([^:]+)(?::\\d+)?;(\\d+)", + "replacement": "$1:$2", + "target_label": "__address__" + }, + { + "source_labels": [ + "__metrics_path__" + ], + "separator": ";", + "regex": "(.*)", + "target_label": "endpoint", + "replacement": "$1", + "action": "replace" + }, + { + "source_labels": [ + "__meta_kubernetes_namespace" + ], + "action": "replace", + "target_label": "namespace" + }, + { + "action": "labelmap", + "regex": "__meta_kubernetes_pod_label_(.+)" + }, + { + "source_labels": [ + "__meta_kubernetes_pod_name" + ], + "separator": ";", + "regex": "(.*)", + "target_label": "pod", + "replacement": "$1", + "action": "replace" + } + ] + } + ] + }, + "remoteWrite": { + "type": "array", + "description": "If specified, the remote_write spec.", + "items": [ + { + "comment": "infrastructure metrics", + "default": { + "remoteTimeout": "5s", + "url": "http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics" + } + } + ] + } + }, + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "selfMonitor": { + "type": "boolean", + "description": "Enable scraping Prometheus metrics", + "default": false + } + } + } + } + } + } + } + }, + "otelcolInstrumentation": { + "type": "object", + "description": "", + "comment": "Configure otelcol-instrumentation - Sumo OTel Distro Collector\nref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enables Sumo Otel Distro Collector StatefulSet to collect telemetry data. [See docs for more information.](/docs/opentelemetry-collector/traces.md)", + "default": true + }, + "sourceMetadata": { + "type": "object", + "description": "", + "properties": { + "sourceName": { + "type": "string", + "comment": "Set the _sourceName metadata field in Sumo Logic.", + "description": "Set the \\_sourceName metadata field in Sumo Logic.", + "default": "%{k8s.namespace.name}.%{k8s.pod.pod_name}.%{k8s.container.name}" + }, + "sourceCategory": { + "type": "string", + "comment": "Set the _sourceCategory metadata field in Sumo Logic.", + "description": "Set the \\_sourceCategory metadata field in Sumo Logic.", + "default": "%{k8s.namespace.name}/%{k8s.pod.pod_name}" + }, + "sourceCategoryPrefix": { + "type": "string", + "comment": "Set the prefix, for _sourceCategory metadata.", + "description": "Set the prefix, for \\_sourceCategory metadata.", + "default": "kubernetes/" + }, + "sourceCategoryReplaceDash": { + "type": "string", + "comment": "Used to replace - with another character.", + "description": "Used to replace - with another character.", + "default": "/" + }, + "excludeContainerRegex": { + "type": "string", + "comment": "A regular expression for containers.\nMatching containers will be excluded from Sumo. The logs will still be sent to otelcol.", + "description": "A regular expression for containers. Matching containers will be excluded from Sumo.", + "default": "" + }, + "excludeHostRegex": { + "type": "string", + "comment": "A regular expression for hosts.\nMatching hosts will be excluded from Sumo. The logs will still be sent to otelcol.", + "description": "A regular expression for hosts. Matching hosts will be excluded from Sumo.", + "default": "" + }, + "excludeNamespaceRegex": { + "type": "string", + "comment": "A regular expression for namespaces.\nMatching namespaces will be excluded from Sumo. The logs will still be sent to otelcol.", + "description": "A regular expression for namespaces. Matching namespaces will be excluded from Sumo.", + "default": "" + }, + "excludePodRegex": { + "type": "string", + "comment": "A regular expression for pods.\nMatching pods will be excluded from Sumo. The logs will still be sent to otelcol.", + "description": "A regular expression for pods. Matching pods will be excluded from Sumo.", + "default": "" + } + } + }, + "autoscaling": { + "type": "object", + "description": "", + "comment": "Option to turn autoscaling on for otelcol and specify params for HPA.\nAutoscaling needs metrics-server to access cpu metrics.", + "properties": { + "enabled": { + "type": "boolean", + "description": "Option to turn autoscaling on for Sumo Otel Distro Collector StatefulSet and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics.", + "default": false + }, + "minReplicas": { + "type": "integer", + "description": "Default min replicas for autoscaling.", + "default": 3 + }, + "maxReplicas": { + "type": "integer", + "description": "Default max replicas for autoscaling", + "default": 10 + }, + "targetCPUUtilizationPercentage": { + "type": "integer", + "description": "The desired target CPU utilization for autoscaling.", + "default": 100 + }, + "targetMemoryUtilizationPercentage": { + "type": "integer", + "commented": true, + "description": "The desired target memory utilization for autoscaling.", + "default": 50 + } + } + }, + "statefulset": { + "type": "object", + "description": "", + "properties": { + "nodeSelector": { + "type": "object", + "description": "Node selector for otelcol-instrumentation statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for otelcol-instrumentation statefulset.", + "default": [] + }, + "topologySpreadConstraints": { + "type": "array", + "description": "TopologySpreadConstraints for otelcol-instrumentation statefulset.", + "default": [] + }, + "affinity": { + "type": "object", + "description": "Affinity for otelcol-instrumentation statefulset.", + "default": {} + }, + "podAntiAffinity": { + "type": "string", + "comment": "Acceptable values for podAntiAffinity:\nsoft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default)\nhard: specifies rules that must be met for a pod to be scheduled onto a node", + "description": "PodAntiAffinity for otelcol-instrumentation statefulset.", + "default": "soft" + }, + "replicaCount": { + "type": "integer", + "description": "Set the number of otelcol-instrumentation replicasets.", + "default": 3 + }, + "resources": { + "type": "object", + "description": "Resources for otelcol-instrumentation statefulset.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "4Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "768Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "500m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for otelcol-instrumentation pods.", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to metrics sts pods", + "description": "Additional labels for otelcol-instrumentation pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to metrics sts pods", + "description": "Additional annotations for otelcol-instrumentation pods.", + "default": {} + }, + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "commented": true, + "description": "Image repository for otelcol-instrumentation docker container.", + "default": "" + }, + "tag": { + "type": "string", + "commented": true, + "description": "Image tag for otelcol-instrumentation docker container.", + "default": "" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for otelcol-instrumentation docker container.", + "default": "IfNotPresent" + } + } + }, + "containers": { + "type": "object", + "comment": "Set securityContext for containers running in pods in otelcol-instrumentation statefulset.", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "The securityContext configuration for the otelcol-instrumentation container.", + "default": {} + }, + "livenessProbe": { + "type": "object", + "description": "Liveness probe settings for the otelcol-instrumentation container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "Readiness probe settings for the otelcol-instrumentation container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "Startup probe configuration for the otelcol-instrumentation container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + } + } + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "Additional environment variables for otelcol-instrumentation pods.", + "default": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "Additional volumes for otelcol-instrumentation pods.", + "default": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "Additional volume mounts for otelcol-instrumentation pods.", + "default": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ] + } + } + }, + "logLevelFilter": { + "type": "boolean", + "comment": "To enable collecting all logs, set to false", + "description": "Do not send otelcol-instrumentation logs if `true`.", + "default": false + }, + "config": { + "type": "object", + "description": "Configuration for otelcol-instrumentation", + "properties": { + "receivers": { + "type": "object", + "description": "", + "properties": { + "jaeger": { + "type": "object", + "description": "", + "properties": { + "protocols": { + "type": "object", + "description": "", + "properties": { + "thrift_compact": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:6831" + } + } + }, + "thrift_binary": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:6832" + } + } + }, + "grpc": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:14250" + } + } + }, + "thrift_http": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:14268" + } + } + } + } + } + } + }, + "opencensus": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:55678" + } + } + }, + "otlp": { + "type": "object", + "description": "", + "properties": { + "protocols": { + "type": "object", + "description": "", + "properties": { + "grpc": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4317" + } + } + }, + "http": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4318" + } + } + } + } + } + } + }, + "otlp/deprecated": { + "type": "object", + "description": "", + "properties": { + "protocols": { + "type": "object", + "description": "", + "properties": { + "http": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:55681" + } + } + } + } + } + } + }, + "zipkin": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:9411" + } + } + } + } + }, + "processors": { + "type": "object", + "description": "", + "properties": { + "source": { + "type": "object", + "comment": "Source processor adds Sumo Logic related metadata", + "description": "", + "properties": { + "annotation_prefix": { + "type": "string", + "description": "", + "default": "k8s.pod.annotation." + }, + "collector": { + "type": "string", + "description": "", + "default": "{{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | quote }}" + }, + "exclude": { + "type": "object", + "description": "", + "properties": { + "k8s.container.name": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeContainerRegex | quote }}" + }, + "k8s.host.name": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeHostRegex | quote }}" + }, + "k8s.namespace.name": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeNamespaceRegex | quote }}" + }, + "k8s.pod.name": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.excludePodRegex| quote }}" + } + } + }, + "pod_key": { + "type": "string", + "description": "", + "default": "k8s.pod.name" + }, + "pod_name_key": { + "type": "string", + "description": "", + "default": "k8s.pod.pod_name" + }, + "pod_template_hash_key": { + "type": "string", + "description": "", + "default": "k8s.pod.label.pod-template-hash" + }, + "source_category": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategory | quote }}" + }, + "source_category_prefix": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryPrefix | quote }}" + }, + "source_category_replace_dash": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryReplaceDash | quote }}" + }, + "source_host": { + "type": "string", + "description": "", + "default": "%{k8s.pod.hostname}" + }, + "source_name": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceName | quote }}" + } + } + }, + "resource": { + "type": "object", + "comment": "Resource processor sets the associted cluster attribute", + "description": "", + "properties": { + "attributes": { + "type": "array", + "description": "", + "default": [ + { + "key": "k8s.cluster.name", + "value": "{{ include \"sumologic.clusterNameReplaceSpaceWithDash\" . }}", + "action": "upsert" + } + ] + } + } + }, + "resourcedetection": { + "type": "object", + "description": "", + "properties": { + "detectors": { + "type": "array", + "description": "", + "default": [ + "system" + ] + }, + "override": { + "type": "boolean", + "description": "", + "default": false + }, + "timeout": { + "type": "string", + "description": "", + "default": "10s" + } + } + }, + "k8s_tagger": { + "type": "object", + "comment": "Tags spans with K8S metadata, basing on the context IP", + "description": "", + "properties": { + "passthrough": { + "type": "boolean", + "comment": "When true, only IP is assigned and passed (so it could be tagged on another collector)", + "description": "", + "default": false + }, + "owner_lookup_enabled": { + "type": "boolean", + "comment": "When true, additional fields, such as serviceName are being also extracted", + "description": "", + "default": true + }, + "extract": { + "type": "object", + "comment": "Extracted fields and assigned names", + "description": "", + "properties": { + "metadata": { + "type": "array", + "comment": "extract the following well-known metadata fields", + "description": "", + "default": [ + "containerId", + "containerName", + "daemonSetName", + "deploymentName", + "hostName", + "namespace", + "nodeName", + "podId", + "podName", + "replicaSetName", + "serviceName", + "statefulSetName" + ] + }, + "annotations": { + "type": "array", + "description": "", + "default": [ + { + "tag_name": "k8s.pod.annotation.%s", + "key": "*" + } + ] + }, + "namespace_labels": { + "type": "array", + "description": "", + "default": [ + { + "tag_name": "k8s.namespace.label.%s", + "key": "*" + } + ] + }, + "labels": { + "type": "array", + "description": "", + "default": [ + { + "tag_name": "k8s.pod.label.%s", + "key": "*" + } + ] + } + } + } + } + }, + "memory_limiter": { + "type": "object", + "comment": "The memory_limiter processor is used to prevent out of memory situations on the collector.", + "description": "", + "properties": { + "check_interval": { + "type": "string", + "comment": "check_interval is the time between measurements of memory usage for the\npurposes of avoiding going over the limits. Defaults to zero, so no\nchecks will be performed. Values below 1 second are not recommended since\nit can result in unnecessary CPU consumption.", + "description": "", + "default": "5s" + }, + "limit_percentage": { + "type": "integer", + "comment": "Maximum amount of memory, in %, targeted to be allocated by the process heap.\nNote that typically the total memory usage of process will be about 50MiB higher\nthan this value.", + "description": "", + "default": 75 + }, + "spike_limit_percentage": { + "type": "integer", + "commit": "Maximum spike expected between the measurements of memory usage, in %.", + "description": "", + "default": 20 + } + } + }, + "batch": { + "type": "object", + "comment": "The batch processor accepts spans and places them into batches grouped by node and resource", + "description": "", + "properties": { + "send_batch_size": { + "type": "integer", + "comment": "Number of spans after which a batch will be sent regardless of time", + "description": "", + "default": 256 + }, + "send_batch_max_size": { + "type": "integer", + "comment": "Never more than this many spans are being sent in a batch", + "description": "", + "default": 512 + }, + "timeout": { + "type": "string", + "comment": "Time duration after which a batch will be sent regardless of size", + "description": "", + "default": "5s" + } + } + } + } + }, + "extensions": { + "type": "object", + "description": "", + "properties": { + "health_check": { + "type": "object", + "description": "", + "default": {} + }, + "memory_ballast": { + "type": "object", + "description": "", + "properties": { + "size_mib": { + "type": "integer", + "comment": "Memory Ballast size should be max 1/3 to 1/2 of memory.", + "description": "", + "default": 250 + } + } + }, + "pprof": { + "type": "object", + "description": "", + "default": {} + } + } + }, + "exporters": { + "type": "object", + "description": "", + "properties": { + "sumologic/metrics": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "${SUMO_ENDPOINT_DEFAULT_METRICS_SOURCE}" + }, + "compress_encoding": { + "type": "string", + "comment": "Compression encoding format, either empty string (\"\"), gzip or deflate (default gzip).\nEmpty string means no compression", + "description": "", + "default": "gzip" + }, + "max_request_body_size": { + "type": "integer", + "comment": "Max HTTP request body size in bytes before compression (if applied). By default 1_048_576 (1MB) is used.", + "description": "", + "default": 1048576 + }, + "log_format": { + "type": "string", + "comment": "Format to use when sending logs to Sumo. (default json) (possible values: json, text)", + "description": "", + "default": "text" + }, + "metric_format": { + "type": "string", + "comment": "Format of the metrics to be sent (default is prometheus) (possible values: carbon2, prometheus)\ncarbon2 and graphite are going to be supported soon.", + "description": "", + "default": "prometheus" + }, + "timeout": { + "type": "string", + "comment": "Timeout for every attempt to send data to Sumo Logic backend. Maximum connection timeout is 55s.", + "description": "", + "default": "5s" + }, + "retry_on_failure": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "", + "default": true + }, + "initial_interval": { + "type": "string", + "comment": "Time to wait after the first failure before retrying", + "description": "", + "default": "5s" + }, + "max_interval": { + "type": "string", + "comment": "Upper bound on backoff", + "description": "", + "default": "30s" + }, + "max_elapsed_time": { + "type": "string", + "comment": "Maximum amount of time spent trying to send a batch", + "description": "", + "default": "120s" + } + } + }, + "sending_queue": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "", + "default": false + }, + "num_consumers": { + "type": "integer", + "comment": "Number of consumers that dequeue batches", + "description": "", + "default": 10 + }, + "queue_size": { + "type": "integer", + "comment": "Maximum number of batches kept in memory before data\nUser should calculate this as num_seconds * requests_per_second where:\nnum_seconds is the number of seconds to buffer in case of a backend outage\nrequests_per_second is the average number of requests per seconds.", + "description": "", + "default": 5000 + } + } + } + } + }, + "otlphttp/traces": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "http://{{ include \"otelcolinstrumentation.exporter.endpoint\" . }}:4318" + } + } + } + } + }, + "service": { + "type": "object", + "description": "", + "properties": { + "extensions": { + "type": "array", + "description": "", + "default": [ + "health_check", + "memory_ballast", + "pprof" + ] + }, + "pipelines": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "receivers": { + "type": "array", + "description": "", + "default": [ + "jaeger", + "opencensus", + "otlp", + "otlp/deprecated", + "zipkin" + ] + }, + "processors": { + "type": "array", + "description": "", + "default": [ + "memory_limiter", + "k8s_tagger", + "source", + "resource", + "batch" + ] + }, + "exporters": { + "type": "array", + "description": "", + "default": [ + "otlphttp/traces" + ] + } + } + }, + "metrics": { + "type": "object", + "description": "", + "properties": { + "receivers": { + "type": "array", + "description": "", + "default": [ + "otlp", + "otlp/deprecated" + ] + }, + "processors": { + "type": "array", + "description": "", + "default": [ + "memory_limiter", + "k8s_tagger", + "source", + "resource", + "batch" + ] + }, + "exporters": { + "type": "array", + "description": "", + "default": [ + "sumologic/metrics" + ] + } + } + } + } + } + } + } + } + } + } + }, + "tracesSampler": { + "type": "object", + "comment": "Configure traces-sampler\nref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md", + "description": "", + "properties": { + "deployment": { + "type": "object", + "description": "", + "properties": { + "nodeSelector": { + "type": "object", + "description": "Node selector for otelcol deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for traces-sampler statefulset.", + "default": [] + }, + "replicas": { + "type": "integer", + "description": "Set the number of OpenTelemetry Collector replicas.", + "default": 1 + }, + "resources": { + "type": "object", + "description": "Resources for traces-sampler statefulset.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "4Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "384Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "200m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for OpenTelemetry Collector log pods.", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to traces-sampler deployment.", + "description": "Additional labels for traces-sampler pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to traces-sampler deployment.", + "description": "Additional annotations for traces-sampler pods.", + "default": {} + }, + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "commented": true, + "description": "Image repository for traces-sampler docker container.", + "default": "" + }, + "tag": { + "type": "string", + "commented": true, + "description": "Image tag for traces-sampler docker container.", + "default": "" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for traces-sampler docker container.", + "default": "IfNotPresent" + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "Additional environment variables for traces-sampler pods.", + "default": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "Additional volumes for traces-sampler pods.", + "default": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "Additional volume mounts for traces-sampler pods.", + "default": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ] + } + } + }, + "logLevelFilter": { + "type": "boolean", + "comment": "To enable collecting all logs, set to false", + "commented": true, + "description": "Do not send traces-sampler logs if `true`.", + "default": false + }, + "config": { + "type": "object", + "comment": "Collector configuration", + "description": "Configuration for traces-sampler.", + "properties": { + "receivers": { + "type": "object", + "description": "", + "properties": { + "otlp": { + "type": "object", + "description": "", + "properties": { + "protocols": { + "type": "object", + "description": "", + "properties": { + "grpc": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4317" + } + } + }, + "http": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4318" + } + } + } + } + } + } + } + } + }, + "processors": { + "type": "object", + "description": "", + "properties": { + "memory_limiter": { + "type": "object", + "comment": "The memory_limiter processor is used to prevent out of memory situations on the collector.", + "description": "", + "properties": { + "check_interval": { + "type": "string", + "comment": "check_interval is the time between measurements of memory usage for the\npurposes of avoiding going over the limits. Defaults to zero, so no\nchecks will be performed. Values below 1 second are not recommended since\nit can result in unnecessary CPU consumption.", + "description": "", + "default": "5s" + }, + "limit_percentage": { + "type": "integer", + "comment": "Maximum amount of memory, in %, targeted to be allocated by the process heap.\nNote that typically the total memory usage of process will be about 50MiB higher\nthan this value.", + "description": "", + "default": 75 + }, + "spike_limit_percentage": { + "type": "integer", + "comment": "Maximum spike expected between the measurements of memory usage, in %.", + "description": "", + "default": 20 + } + } + }, + "cascading_filter": { + "type": "object", + "comment": "Smart cascading filtering rules with preset limits.\nPlease see https://github.com/SumoLogic/sumologic-otel-collector/tree/v0.85.0-sumo-0/pkg/processor/cascadingfilterprocessor\nfor details.", + "description": "", + "properties": { + "num_traces": { + "type": "integer", + "comment": "Max number of traces for which decisions are kept in memory", + "description": "", + "default": 200000 + } + } + }, + "batch": { + "type": "object", + "comment": "The batch processor accepts spans and places them into batches grouped by node and resource", + "description": "", + "properties": { + "send_batch_size": { + "type": "integer", + "comment": "Number of spans after which a batch will be sent regardless of time", + "description": "", + "default": 256 + }, + "send_batch_max_size": { + "type": "integer", + "comment": "Never more than this many spans are being sent in a batch", + "description": "", + "default": 512 + }, + "timeout": { + "type": "string", + "comment": "Time duration after which a batch will be sent regardless of size", + "description": "", + "default": "5s" + } + } + } + } + }, + "extensions": { + "type": "object", + "description": "", + "properties": { + "health_check": { + "type": "object", + "description": "", + "default": {} + }, + "memory_ballast": { + "type": "object", + "description": "", + "properties": { + "size_mib": { + "type": "integer", + "comment": "Memory Ballast size should be max 1/3 to 1/2 of memory.", + "description": "", + "default": 683 + } + } + }, + "pprof": { + "type": "object", + "description": "", + "default": {} + } + } + }, + "exporters": { + "type": "object", + "description": "", + "properties": { + "logging": { + "type": "object", + "comment": "Following generates verbose logs with span content, useful to verify what\nmetadata is being tagged. To enable, uncomment and add \"logging\" to exporters below.\nThere are two levels that could be used: `debug` and `info` with the former\nbeing much more verbose and including (sampled) spans content", + "commented": true, + "description": "", + "properties": { + "loglevel": { + "type": "string", + "description": "", + "default": "debug" + } + } + }, + "otlphttp": { + "type": "object", + "description": "", + "properties": { + "traces_endpoint": { + "type": "string", + "description": "", + "default": "${SUMO_ENDPOINT_DEFAULT_TRACES_SOURCE}" + }, + "compression": { + "type": "string", + "description": "", + "default": "gzip" + } + } + } + } + }, + "service": { + "type": "object", + "description": "", + "properties": { + "extensions": { + "type": "array", + "description": "", + "default": [ + "health_check", + "memory_ballast", + "pprof" + ] + }, + "pipelines": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "receivers": { + "type": "array", + "description": "", + "default": [ + "otlp" + ] + }, + "processors": { + "type": "array", + "description": "", + "default": [ + "memory_limiter", + "cascading_filter", + "batch" + ] + }, + "exporters": { + "type": "array", + "description": "", + "default": [ + "otlphttp" + ] + } + } + } + } + } + } + } + } + } + } + }, + "metadata": { + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "comment": "Configure image for Opentelemetry Collector (for logs and metrics)", + "description": "", + "properties": { + "repository": { + "type": "string", + "commented": true, + "description": "Image repository for otelcol docker container.", + "default": "" + }, + "tag": { + "type": "string", + "commented": true, + "description": "Image tag for otelcol docker container.", + "default": "" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for otelcol docker container.", + "default": "IfNotPresent" + } + } + }, + "securityContext": { + "type": "object", + "description": "The securityContext configuration for otelcol.", + "properties": { + "fsGroup": { + "type": "integer", + "comment": "The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set.\nThe default is 0 (root), and containers don't have write permissions for volumes in that case.", + "description": "", + "default": 999 + } + } + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels to all otelcol sts pods(logs and metrics)", + "description": "Additional labels for all otelcol pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations to all otelcol sts pods(logs and metrics)", + "description": "Additional annotations for all otelcol pods.", + "default": {} + }, + "serviceLabels": { + "type": "object", + "comment": "Add custom labels to all otelcol svc (logs and metrics)", + "description": "Additional labels for all otelcol pods.", + "default": {} + }, + "persistence": { + "type": "object", + "comment": "Configure persistence for Opentelemetry Collector", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control persistence for OpenTelemetry Collector.", + "default": true + }, + "storageClass": { + "type": "string", + "commented": true, + "description": "Defines storageClassName for the PersistentVolumeClaim which is used to provide persistence for OpenTelemetry Collector.", + "default": "" + }, + "accessMode": { + "type": "string", + "description": "The accessMode for the volume which is used to provide persistence for OpenTelemetry Collector.", + "default": "ReadWriteOnce" + }, + "size": { + "type": "string", + "description": "Size of the volume which is used to provide persistence for OpenTelemetry Collector.", + "default": "10Gi" + }, + "pvcLabels": { + "type": "object", + "comment": "Add custom labels to all otelcol statefulset PVC (logs and metrics)", + "description": "Additional PersistentVolumeClaim labels for all OpenTelemetry Collector pods.", + "default": {} + } + } + }, + "metrics": { + "type": "object", + "comment": "Configure metrics pipeline.\nThis section affects only otelcol provider.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying the otelcol metrics statefulsets.", + "default": true + }, + "logLevel": { + "type": "string", + "description": "Flag to control logging level for OpenTelemetry Collector for metrics. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`.", + "default": "info" + }, + "config": { + "type": "object", + "description": "", + "properties": { + "merge": { + "comment": "Directly alter the OT configuration. The value of this key should be a dictionary, that will\nbe directly merged with the generated configuration, overriding existing values.\nFor example:\noverride:\n processors:\n batch:\n send_batch_size: 512\nwill change the batch size of the pipeline.\nWARNING: This field is not subject to backwards-compatibility guarantees offered by the rest\nof this chart. It involves implementation details that may change even in minor versions.\nUse with caution, and consider opening an issue, so your customization can be added in a safer way.", + "type": "object", + "description": "Configuration for metrics metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + }, + "override": { + "type": "object", + "comment": "Completely override existing config and replace it with the contents of this value.\nThe value of this key should be a dictionary, that will replace the normal configuration.\nThis is an advanced feature, use with caution, and review the generated configuration first.", + "description": "Configuration for metrics metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + }, + "additionalEndpoints": { + "comment": "List of additional endpoints to be handled by Metrics Metadata Pods", + "type": "array", + "description": "List of additional endpoints for Open Telemetry Metadata Pod.", + "default": [] + } + } + }, + "statefulset": { + "type": "object", + "description": "", + "properties": { + "nodeSelector": { + "type": "object", + "description": "Node selector for metrics metadata enrichment (otelcol) statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for metrics metadata enrichment (otelcol) statefulset.", + "default": [] + }, + "topologySpreadConstraints": { + "type": "array", + "description": "TopologySpreadConstraints for metrics metadata enrichment (otelcol) statefulset.", + "default": [] + }, + "affinity": { + "type": "object", + "description": "Affinity for metrics metadata enrichment (otelcol) statefulset.", + "default": {} + }, + "podAntiAffinity": { + "type": "string", + "comment": "Acceptable values for podAntiAffinity:\nsoft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default)\nhard: specifies rules that must be met for a pod to be scheduled onto a node", + "description": "PodAntiAffinity for metrics metadata enrichment (otelcol) statefulset.", + "default": "soft" + }, + "replicaCount": { + "type": "integer", + "description": "Replica count for metrics metadata enrichment (otelcol) statefulset.", + "default": 3 + }, + "resources": { + "type": "object", + "description": "Resources for metrics metadata enrichment (otelcol) statefulset.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "1Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "768Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "500m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for metrics metadata enrichment (otelcol) pods.", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to metrics sts pods", + "description": "Additional labels for metrics metadata enrichment (otelcol) pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to metrics sts pods", + "description": "Additional annotations for metrics metadata enrichment (otelcol) pods.", + "default": {} + }, + "containers": { + "type": "object", + "comment": "Set securityContext for containers running in pods in metrics statefulset.", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "The securityContext configuration for otelcol container for metrics metadata enrichment statefulset.", + "default": {} + }, + "livenessProbe": { + "type": "object", + "description": "Liveness probe settings for the logs otelcol container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "Readiness probe settings for the logs otelcol container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "Startup probe configuration for metrics otelcol container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + } + } + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "Additional environment variables for metrics metadata enrichment (otelcol) pods.", + "default": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "Additional volumes for metrics metadata enrichment (otelcol) pods.", + "default": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "Additional volume mounts for metrics metadata enrichment (otelcol) pods.", + "default": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ] + } + } + }, + "autoscaling": { + "type": "object", + "comment": "Option to turn autoscaling on for metrics and specify params for HPA.\nAutoscaling needs metrics-server to access cpu metrics.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Option to turn autoscaling on for metrics metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics.", + "default": false + }, + "minReplicas": { + "type": "integer", + "description": "Default min replicas for autoscaling.", + "default": 3 + }, + "maxReplicas": { + "type": "integer", + "description": "Default max replicas for autoscaling", + "default": 10 + }, + "targetCPUUtilizationPercentage": { + "type": "integer", + "description": "The desired target CPU utilization for autoscaling.", + "default": 80 + }, + "targetMemoryUtilizationPercentage": { + "type": "integer", + "commented": true, + "description": "The desired target memory utilization for autoscaling.", + "default": 50 + } + } + }, + "podDisruptionBudget": { + "type": "object", + "comment": "Option to specify PodDisrutionBudgets\nYou can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget", + "description": "Pod Disruption Budget for metrics metadata enrichment (otelcol) statefulset and for experimental otelcol metrics collector.", + "properties": { + "minAvailable": { + "type": "integer", + "description": "", + "default": 2 + }, + "maxUnavailable": { + "type": "integer", + "comment": "To use maxUnavailable, set minAvailable to null and uncomment the below:", + "commened": true, + "description": "", + "default": 1 + } + } + } + } + }, + "logs": { + "type": "object", + "comment": "Configure logs pipeline.\nThis section affects only otelcol provider.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying the otelcol logs statefulsets.", + "default": true + }, + "logLevel": { + "type": "string", + "description": "Flag to control logging level for OpenTelemetry Collector for logs. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`.", + "default": "info" + }, + "config": { + "type": "object", + "description": "", + "properties": { + "merge": { + "type": "object", + "comment": "Directly alter the OT configuration. The value of this key should be a dictionary, that will\nbe directly merged with the generated configuration, overriding existing values.\nFor example:\noverride:\nprocessors:\nbatch:\nsend_batch_size: 512\nwill change the batch size of the pipeline.\nWARNING: This field is not subject to backwards-compatibility guarantees offered by the rest\nof this chart. It involves implementation details that may change even in minor versions.\nUse with caution, and consider opening an issue, so your customization can be added in a safer way.", + "description": "Configuration for logs metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + }, + "override": { + "type": "object", + "comment": "Completely override existing config and replace it with the contents of this value.\nThe value of this key should be a dictionary, that will replace the normal configuration.\nThis is an advanced feature, use with caution, and review the generated configuration first.", + "description": "Configuration for logs metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + } + } + }, + "statefulset": { + "type": "object", + "description": "", + "properties": { + "nodeSelector": { + "type": "object", + "description": "Node selector for logs metadata enrichment (otelcol) statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for logs metadata enrichment (otelcol) statefulset.", + "default": [] + }, + "topologySpreadConstraints": { + "type": "array", + "description": "TopologySpreadConstraints for logs metadata enrichment (otelcol) statefulset.", + "default": [] + }, + "affinity": { + "type": "object", + "description": "Affinity for logs metadata enrichment (otelcol) statefulset.", + "default": {} + }, + "podAntiAffinity": { + "type": "string", + "comment": "Acceptable values for podAntiAffinity:\nsoft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default)\nhard: specifies rules that must be met for a pod to be scheduled onto a node", + "description": "PodAntiAffinity for logs metadata enrichment (otelcol) statefulset.", + "default": "soft" + }, + "replicaCount": { + "type": "integer", + "description": "Replica count for logs metadata enrichment (otelcol) statefulset.", + "default": 3 + }, + "resources": { + "type": "object", + "description": "Resources for logs metadata enrichment (otelcol) statefulset.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "1Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "768Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "500m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for logs metadata enrichment (otelcol) pods.", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to logs sts pods", + "description": "Additional labels for logs metadata enrichment (otelcol) pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to logs sts pods", + "description": "Additional annotations for logs metadata enrichment (otelcol) pods.", + "default": {} + }, + "containers": { + "type": "object", + "comment": "Set securityContext for containers running in pods in logs statefulset.", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "The securityContext configuration for the logs otelcol container.", + "default": {} + }, + "livenessProbe": { + "type": "object", + "description": "Liveness probe settings for the logs otelcol container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "Readiness probe settings for the logs otelcol container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "Startup probe configuration for the logs otelcol container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + } + } + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "Additional environment variables for logs metadata enrichment (otelcol) pods.", + "default": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "Additional volumes for logs metadata enrichment (otelcol) pods.", + "default": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "Additional volume mounts for logs metadata enrichment (otelcol) pods.", + "default": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ] + }, + "extraPorts": { + "type": "array", + "commented": true, + "description": "Additional exposed ports in logs metadata enrichment (otelcol) pods and service.", + "default": [ + { + "name": "otlphttp2", + "containerPort": 4319, + "protocol": "TCP" + } + ] + }, + "extraArgs": { + "type": "array", + "commented": true, + "description": "Additional arguments to otelcol container.", + "default": [] + } + } + }, + "autoscaling": { + "type": "object", + "comment": "Option to turn autoscaling on for logs and specify params for HPA.\nAutoscaling needs metrics-server to access cpu metrics.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Option to turn autoscaling on for logs metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics.", + "default": false + }, + "minReplicas": { + "type": "integer", + "description": "Default min replicas for autoscaling.", + "default": 3 + }, + "maxReplicas": { + "type": "integer", + "description": "Default max replicas for autoscaling", + "default": 10 + }, + "targetCPUUtilizationPercentage": { + "type": "integer", + "description": "The desired target CPU utilization for autoscaling.", + "default": 80 + }, + "targetMemoryUtilizationPercentage": { + "type": "integer", + "commented": true, + "description": "The desired target memory utilization for autoscaling.", + "default": 50 + } + } + }, + "podDisruptionBudget": { + "type": "object", + "comment": "Option to specify PodDisrutionBudgets\nYou can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget", + "description": "Pod Disruption Budget for logs metadata enrichment (otelcol) statefulset.", + "properties": { + "minAvailable": { + "type": "integer", + "description": "", + "default": 2 + }, + "maxUnavailable": { + "type": "integer", + "comment": "To use maxUnavailable, set minAvailable to null and uncomment the below:", + "commented": true, + "description": "", + "default": 1 + } + } + } + } + } + } + }, + "tracesGateway": { + "type": "object", + "comment": "Configure traces-gateway\nref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying traces-gateway. [See docs for more information.](/docs/opentelemetry-collector/traces.md)", + "default": true + }, + "autoscaling": { + "type": "object", + "comment": "Option to turn autoscaling on for otelcol and specify params for HPA.\nAutoscaling needs metrics-server to access cpu metrics.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Option to turn autoscaling on for traces-gateway and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics.", + "default": false + }, + "minReplicas": { + "type": "integer", + "description": "Default min replicas for autoscaling.", + "default": 3 + }, + "maxReplicas": { + "type": "integer", + "description": "Default max replicas for autoscaling", + "default": 10 + }, + "targetCPUUtilizationPercentage": { + "type": "integer", + "description": "The desired target CPU utilization for autoscaling.", + "default": 100 + }, + "targetMemoryUtilizationPercentage": { + "type": "integer", + "commented": true, + "description": "The desired target memory utilization for autoscaling.", + "default": 50 + } + } + }, + "deployment": { + "type": "object", + "description": "", + "properties": { + "replicas": { + "type": "integer", + "description": "Set the number of OpenTelemetry Collector replicas.", + "default": 1 + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for otelcol deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for traces-gateway statefulset.", + "default": [] + }, + "resources": { + "type": "object", + "description": "Resources for traces-gateway statefulset.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "2Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "196Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "50m" + } + } + } + } + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to traces-gateway deployment.", + "description": "Additional labels for traces-gateway pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to traces-gateway deployment.", + "description": "Additional annotations for traces-gateway pods.", + "default": {} + }, + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "commented": true, + "description": "Image repository for traces-gateway docker container.", + "default": "" + }, + "tag": { + "type": "string", + "commented": true, + "description": "Image tag for traces-gateway docker container.", + "default": "" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for traces-gateway docker container.", + "default": "IfNotPresent" + } + } + }, + "livenessProbe": { + "type": "object", + "description": "Liveness probe settings for the traces-gateway container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "Readiness probe settings for the traces-gateway container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "Startup probe configuration for the traces-gateway container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "Additional environment variables for traces-gateway pods.", + "default": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "Additional volumes for traces-gateway pods.", + "default": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "Additional volume mounts for traces-gateway pods.", + "default": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ] + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for OpenTelemetry Collector log pods.", + "default": "" + } + } + }, + "logLevelFilter": { + "type": "boolean", + "comment": "To enable collecting all logs, set to false", + "description": "Do not send traces-gateway logs if `true`.", + "default": false + }, + "config": { + "type": "object", + "description": "Configuration for traces-gateway.", + "properties": { + "receivers": { + "type": "object", + "description": "", + "properties": { + "otlp": { + "type": "object", + "description": "", + "properties": { + "protocols": { + "type": "object", + "description": "", + "properties": { + "grpc": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4317" + } + } + }, + "http": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4318" + } + } + } + } + } + } + } + } + }, + "processors": { + "type": "object", + "description": "", + "properties": { + "memory_limiter": { + "type": "object", + "comment": "The memory_limiter processor is used to prevent out of memory situations on the collector.", + "description": "", + "properties": { + "check_interval": { + "type": "string", + "comment": "check_interval is the time between measurements of memory usage for the\npurposes of avoiding going over the limits. Defaults to zero, so no\nchecks will be performed. Values below 1 second are not recommended since\nit can result in unnecessary CPU consumption.", + "description": "", + "default": "5s" + }, + "limit_percentage": { + "type": "integer", + "comment": "Maximum amount of memory, in %, targeted to be allocated by the process heap.\nNote that typically the total memory usage of process will be about 50MiB higher\nthan this value.", + "description": "", + "default": 75 + }, + "spike_limit_percentage": { + "type": "integer", + "comment": "Maximum spike expected between the measurements of memory usage, in %.", + "description": "", + "default": 20 + } + } + }, + "batch": { + "type": "object", + "comment": "The batch processor accepts spans and places them into batches grouped by node and resource", + "description": "", + "properties": { + "send_batch_size": { + "type": "integer", + "comment": "Number of spans after which a batch will be sent regardless of time", + "description": "", + "default": 256 + }, + "send_batch_max_size": { + "type": "integer", + "comment": "Maximum number of spans sent at once", + "description": "", + "default": 512 + }, + "timeout": { + "type": "string", + "comment": "Time duration after which a batch will be sent regardless of size", + "description": "", + "default": "5s" + } + } + } + } + }, + "extensions": { + "type": "object", + "description": "", + "properties": { + "health_check": { + "type": "object", + "description": "", + "default": {} + }, + "memory_ballast": { + "type": "object", + "description": "", + "properties": { + "size_mib": { + "type": "integer", + "comment": "Memory Ballast size should be max 1/3 to 1/2 of memory.", + "description": "", + "default": 250 + } + } + }, + "pprof": { + "type": "object", + "description": "", + "default": {} + } + } + }, + "exporters": { + "type": "object", + "description": "", + "properties": { + "loadbalancing": { + "type": "object", + "description": "", + "properties": { + "protocol": { + "type": "object", + "description": "", + "properties": { + "otlp": { + "type": "object", + "description": "", + "properties": { + "timeout": { + "type": "string", + "description": "", + "default": "10s" + }, + "tls": { + "type": "object", + "description": "", + "properties": { + "insecure": { + "type": "boolean", + "description": "", + "default": true + } + } + } + } + } + } + }, + "resolver": { + "type": "object", + "description": "", + "properties": { + "dns": { + "type": "object", + "description": "", + "properties": { + "hostname": { + "type": "string", + "description": "", + "default": "{{ include \"tracesgateway.exporter.loadbalancing.endpoint\" . }}" + }, + "port": { + "type": "integer", + "description": "", + "default": 4317 + } + } + } + } + } + } + } + } + }, + "service": { + "type": "object", + "description": "", + "properties": { + "extensions": { + "type": "array", + "description": "", + "default": [ + "health_check", + "memory_ballast", + "pprof" + ] + }, + "pipelines": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "receivers": { + "type": "array", + "description": "", + "default": [ + "otlp" + ] + }, + "processors": { + "type": "array", + "description": "", + "default": [ + "memory_limiter", + "batch" + ] + }, + "exporters": { + "type": "array", + "description": "", + "default": [ + "loadbalancing" + ] + } + } + } + } + } + } + } + } + } + } + }, + "otelevents": { + "type": "object", + "comment": "Configuration of the OpenTelemetry Collector that collects Kubernetes events.\nSee https://github.com/SumoLogic/sumologic-kubernetes-collection/deploy/docs/collecting-kubernetes-events.md.", + "description": "", + "properties": { + "image": { + "type": "object", + "comment": "Configure image for Opentelemetry Collector", + "description": "", + "properties": { + "repository": { + "type": "string", + "commented": true, + "description": "Image repository for otelcol docker container.", + "default": "" + }, + "tag": { + "type": "string", + "commented": true, + "description": "Image tag for otelcol docker container.", + "default": "" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for otelcol docker container.", + "default": "IfNotPresent" + } + } + }, + "logLevel": { + "type": "string", + "description": "Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`.", + "default": "info" + }, + "config": { + "type": "object", + "comment": "Customize the Opentelemetry Collector configuration beyond the exposed options", + "description": "", + "properties": { + "merge": { + "type": "object", + "comment": "Directly alter the OT configuration. The value of this key should be a dictionary, that will\nbe directly merged with the generated configuration, overriding existing values.\nFor example:\noverride:\nprocessors:\nbatch:\nsend_batch_size: 512\nwill change the batch size of the pipeline.\nWARNING: This field is not subject to backwards-compatibility guarantees offered by the rest\nof this chart. It involves implementation details that may change even in minor versions.\nUse with caution, and consider opening an issue, so your customization can be added in a safer way.", + "description": "Configuration for events otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + }, + "override": { + "type": "object", + "comment": "Completely override existing config and replace it with the contents of this value.\nThe value of this key should be a dictionary, that will replace the normal configuration.\nThis is an advanced feature, use with caution, and review the generated configuration first.", + "description": "Configuration for events otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + } + } + }, + "statefulset": { + "type": "object", + "description": "OpenTelemetry Collector StatefulSet customization options. See values.yaml for more details.", + "properties": { + "nodeSelector": { + "type": "object", + "description": "", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "", + "default": [] + }, + "topologySpreadConstraints": { + "type": "array", + "description": "", + "default": [] + }, + "affinity": { + "type": "object", + "description": "", + "default": {} + }, + "podAntiAffinity": { + "type": "string", + "comment": "Acceptable values for podAntiAffinity:\nsoft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default)\nhard: specifies rules that must be met for a pod to be scheduled onto a node", + "description": "", + "default": "soft" + }, + "resources": { + "type": "object", + "description": "", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "2Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "500Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "200m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to events sts pods", + "description": "", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to events sts pods", + "description": "", + "default": {} + }, + "securityContext": { + "type": "object", + "comment": "The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set.\nThe default is 0 (root), and containers don't have write permissions for volumes in that case.", + "description": "", + "properties": { + "fsGroup": { + "type": "integer", + "description": "", + "default": 999 + } + } + }, + "containers": { + "type": "object", + "comment": "Set securityContext for containers running in pods in events statefulset.", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "", + "default": {} + }, + "livenessProbe": { + "type": "object", + "description": "", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + } + } + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "", + "default": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "", + "default": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "", + "default": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ] + } + } + } + } + }, + "otelcloudwatch": { + "type": "object", + "comment": "Configure cloudwatch collection with Otelcol", + "description": "", + "properties": { + "statefulset": { + "type": "object", + "description": "OpenTelemetry Cloudwatch Collector statefulset customization options. See [values.yaml] for more details.", + "properties": { + "nodeSelector": { + "type": "object", + "description": "", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "", + "default": [] + }, + "topologySpreadConstraints": { + "type": "array", + "description": "", + "default": [] + }, + "affinity": { + "type": "object", + "description": "", + "default": {} + }, + "podAntiAffinity": { + "type": "string", + "comment": "Acceptable values for podAntiAffinity:\nsoft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default)\nhard: specifies rules that must be met for a pod to be scheduled onto a node", + "description": "", + "default": "soft" + }, + "replicaCount": { + "type": "integer", + "description": "", + "default": 1 + }, + "resources": { + "type": "object", + "description": "", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "1Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "768Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "500m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to logs otel sts pods", + "description": "", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to logs otel sts pods", + "description": "", + "default": {} + }, + "containers": { + "type": "object", + "comment": "Set securityContext for containers running in pods in otelcol-instrumentation statefulset.", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "", + "default": {} + }, + "livenessProbe": { + "type": "object", + "description": "", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + } + } + } + } + } + } + } + } + }, + "otellogs": { + "comment": "Configure log collection with Otelcol", + "type": "object", + "description": "", + "properties": { + "metrics": { + "comment": "Metrics from Collector", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable OpenTelemetry Collector metrics", + "default": true + } + } + }, + "serviceLabels": { + "type": "object", + "description": "Add custom labels to OpenTelemetry Collector Service", + "default": {}, + "comment": "Add custom labels to otelcol svc" + }, + "image": { + "comment": "Configure image for Opentelemetry Collector", + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "Image repository for otelcol docker container.", + "default": "", + "commented": true + }, + "tag": { + "type": "string", + "description": "Image tag for otelcol docker container.", + "default": "", + "commented": true + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for otelcol docker container.", + "default": "IfNotPresent" + } + } + }, + "logLevel": { + "type": "string", + "description": "Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`.", + "default": "info" + }, + "config": { + "type": "object", + "description": "", + "properties": { + "merge": { + "type": "object", + "description": "Configuration for log collector otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {}, + "comment": "Directly alter the OT configuration. The value of this key should be a dictionary, that will\nbe directly merged with the generated configuration, overriding existing values.\nFor example:\noverride:\nprocessors:\nbatch:\nsend_batch_size: 512\nwill change the batch size of the pipeline.\nWARNING: This field is not subject to backwards-compatibility guarantees offered by the rest\nof this chart. It involves implementation details that may change even in minor versions.\nUse with caution, and consider opening an issue, so your customization can be added in a safer way." + }, + "override": { + "type": "object", + "description": "Configuration for log collector otelcol, replaces defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {}, + "comment": "Completely override existing config and replace it with the contents of this value.\nThe value of this key should be a dictionary, that will replace the normal configuration.\nThis is an advanced feature, use with caution, and review the generated configuration first." + } + } + }, + "daemonset": { + "type": "object", + "description": "OpenTelemetry Collector Daemonset customization options. See [values.yaml] for more details.", + "comment": "Set securityContext for containers running in pods in log collector daemonset", + "properties": { + "securityContext": { + "type": "object", + "description": "", + "properties": { + "fsGroup": { + "comment": "In order to reliably read logs from mounted node logging paths, we need to run as root", + "type": "integer", + "description": "", + "default": 0 + }, + "runAsUser": { + "type": "integer", + "description": "", + "default": 0 + }, + "runAsGroup": { + "type": "integer", + "description": "", + "default": 0 + } + } + }, + "labels": { + "comment": "Add custom labels to the otelcol daemonset", + "type": "object", + "description": "", + "default": {} + }, + "annotations": { + "comment": "Add custom annotations to the otelcol daemonset", + "type": "object", + "description": "", + "default": {} + }, + "podLabels": { + "comment": "Add custom labels to all otelcol daemonset pods", + "type": "object", + "description": "", + "default": {} + }, + "podAnnotations": { + "comment": "Add custom annotations to all otelcol daemonset pods", + "type": "object", + "description": "", + "default": {} + }, + "resources": { + "type": "object", + "description": "", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "1Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "32Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "100m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "description": "", + "default": "", + "comment": "Option to define priorityClassName to assign a priority class to pods.\nIf not set then temaplates/priorityclass.yaml is used." + }, + "containers": { + "comment": "Set securityContext for containers running in pods in log collector daemonset", + "type": "object", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "", + "properties": { + "capabilities": { + "type": "object", + "description": "", + "properties": { + "drop": { + "type": "array", + "description": "", + "default": [ + "ALL" + ] + } + } + } + } + } + } + } + } + }, + "initContainers": { + "comment": "Set securityContext and image for initContainers running in pods in log collector daemonset", + "type": "object", + "description": "", + "properties": { + "changeowner": { + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "", + "default": "public.ecr.aws/docker/library/busybox" + }, + "tag": { + "type": "string", + "description": "", + "default": "1.36.0" + }, + "pullPolicy": { + "type": "string", + "description": "", + "default": "IfNotPresent" + } + } + }, + "securityContext": { + "type": "object", + "description": "", + "properties": { + "capabilities": { + "type": "object", + "description": "", + "properties": { + "drop": { + "type": "array", + "description": "", + "default": [ + "ALL" + ] + }, + "add": { + "type": "array", + "description": "", + "default": [ + "CAP_CHOWN" + ] + } + } + } + } + } + } + } + } + }, + "nodeSelector": { + "type": "object", + "description": "", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "", + "default": [] + }, + "affinity": { + "type": "object", + "description": "", + "default": {} + }, + "extraEnvVars": { + "type": "array", + "description": "", + "commented": true, + "comment": "Extra Environment Values - allows yaml definitions", + "default": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ] + }, + "extraVolumes": { + "type": "array", + "description": "", + "commented": true, + "default": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ] + }, + "extraVolumeMounts": { + "type": "array", + "description": "", + "commented": true, + "default": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ] + } + } + }, + "additionalDaemonSets": { + "type": "object", + "description": "OpenTelemetry Collector Daemonset per node customization options. See [Best Practices](/docs/best-practices.md#setting-different-resources-on-different-nodes-for-logs-collector).", + "default": {}, + "comment": "additionalDaemonSets allows to set daemonsets with affinity, nodeSelector and resources\ndifferent than the main DaemonSet\nBe careful and set nodeAffinity for the main DaemonSet,\nas we do not support multiple pods of otellogs on the same node\ne.g:\nadditionalDaemonSets:\nlinux:\nnodeSelector:\nkubernetes.io/os: linux\nresources:\nlimits:\nmemory: 1Gi\ncpu: 6\nrequests:\nmemory: 32Mi\ncpu: 2\ndaemonset:\naffinity:\nnodeAffinity:\nrequiredDuringSchedulingIgnoredDuringExecution:\nnodeSelectorTerms:\n- matchExpressions:\n- key: kubernetes.io/os\noperator: NotIn\nvalues:\n- linux" + } + } + }, + "telegraf-operator": { + "type": "object", + "comment": "Configure telegraf-operator\nref: https://github.com/influxdata/helm-charts/blob/master/charts/telegraf-operator/values.yaml", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying Telegraf Operator Helm sub-chart.", + "default": false + }, + "fullnameOverride": { + "type": "string", + "description": "Used to override the chart's full name.", + "default": "", + "comment": "Put here the new name if you want to override the full name used for Telegraf Operator components.", + "commented": true + }, + "image": { + "type": "object", + "description": "", + "properties": { + "sidecarImage": { + "type": "string", + "description": "Telegraf Operator sidecar image.", + "default": "public.ecr.aws/sumologic/telegraf:1.21.2" + } + } + }, + "replicaCount": { + "type": "integer", + "description": "Replica count for Telegraf Operator pods.", + "default": 1 + }, + "classes": { + "type": "object", + "description": "", + "properties": { + "secretName": { + "type": "string", + "description": "Secret name in which the Telegraf Operator configuration will be stored.", + "default": "telegraf-operator-classes" + }, + "default": { + "type": "string", + "description": "Name of the default output configuration.", + "default": "sumologic-prometheus" + }, + "data": { + "type": "object", + "description": "Telegraf sidecar configuration.", + "properties": { + "sumologic-prometheus": { + "type": "string", + "description": "", + "default": "[[outputs.prometheus_client]]\nConfiguration details:\nhttps://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client#configuration\n listen = \":9273\"\n metric_version = 2\n ## Disable the default collectors\n collectors_exclude = [\"gocollector\", \"process\"]\n ## Telegraf operator adds the internal plugin by default, and the Helm Chart doesn't let us disable it\n ## Instead, drop the metrics at the output\n namedrop = [\"internal*\"]" + } + } + } + } + }, + "imagePullSecrets": { + "type": "array", + "description": "Pull secrets for Telegraf Operator images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config).", + "default": [], + "commented": true + } + } + }, + "falco": { + "type": "object", + "comment": "Configure Falco\nPlease note that Falco is embedded in this Helm Chart for user convenience only - Sumo Logic does not provide production support for it\nThis is an experimental configuration and shouldn't be used in production environment\nhttps://github.com/falcosecurity/charts/tree/master/falco", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying Falco Helm sub-chart.", + "default": false + }, + "fullnameOverride": { + "type": "string", + "description": "Used to override the chart's full name.", + "default": "", + "commented": true, + "comment": "Put here the new name if you want to override the full name used for Falco components." + }, + "imagePullSecrets": { + "type": "array", + "description": "Pull secrets for falco images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config).", + "default": [], + "commented": true + }, + "image": { + "type": "object", + "description": "", + "properties": { + "registry": { + "type": "string", + "description": "Image registry for falco docker container.", + "default": "public.ecr.aws" + }, + "repository": { + "type": "string", + "description": "Image repository for falco docker container.", + "default": "falcosecurity/falco-no-driver", + "commented": true + } + } + }, + "addKernelDevel": { + "comment": "Add kernel-devel package through MachineConfig, required to enable building of missing falco modules (only for OpenShift)", + "type": "boolean", + "description": "Flag to control installation of `kernel-devel` on nodes using MachineConfig, required to build falco modules (only for OpenShift)", + "default": true + }, + "extra": { + "type": "object", + "description": "", + "properties": { + "initContainers": { + "comment": "Add initContainer to wait until kernel-devel is installed on host", + "type": "array", + "description": "InitContainers for Falco pod", + "default": [ + { + "name": "init-falco", + "image": "public.ecr.aws/docker/library/busybox:1.36.0", + "command": [ + "sh", + "-c", + "while [ -f /host/etc/redhat-release ] && [ -z \"$(ls /host/usr/src/kernels)\" ] ; do\necho \"waiting for kernel headers to be installed\"\nsleep 3\ndone" + ], + "volumeMounts": [ + { + "mountPath": "/host/usr", + "name": "usr-fs", + "readOnly": true + }, + { + "mountPath": "/host/etc", + "name": "etc-fs", + "readOnly": true + } + ] + } + ] + } + } + }, + "driver": { + "type": "object", + "description": "", + "properties": { + "kind": { + "type": "string", + "description": "Tell Falco which driver to use. Available options: module (kernel driver) and ebpf (eBPF probe). Set to `ebpf` for GKE", + "default": "module", + "comment": "Set to epbf to enable eBPF support for Falco instead of falco-probe kernel module.\nhttps://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/troubleshoot-collection.md#falco-and-google-kubernetes-engine-gke" + }, + "loader": { + "type": "object", + "description": "", + "properties": { + "initContainer": { + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "description": "Init container image configuration for falco driver loader.", + "properties": { + "registry": { + "type": "string", + "description": "", + "default": "public.ecr.aws" + }, + "repository": { + "type": "string", + "description": "", + "default": "falcosecurity/falco-driver-loader", + "commented": true + } + } + } + } + } + } + } + } + }, + "falco": { + "type": "object", + "description": "", + "properties": { + "load_plugins": { + "type": "array", + "description": "Names of the plugins to be loaded by Falco.", + "default": [ + "json", + "k8saudit" + ] + }, + "json_output": { + "type": "boolean", + "description": "Output events in json.", + "default": true + }, + "rules_file": { + "comment": "The location of the rules file(s). This can contain one or more paths to\nseparate rules files.\nExplicitly add missing /etc/falco/rules.available/application_rules.yaml\nbefore https://github.com/falcosecurity/charts/issues/230 gets resolved.", + "type": "array", + "description": "The location of the rules files that will be consumed by Falco.", + "default": [ + "/etc/falco/falco_rules.yaml", + "/etc/falco/falco_rules.local.yaml", + "/etc/falco/k8s_audit_rules.yaml", + "/etc/falco/rules.d", + "/etc/falco/rules.available/application_rules.yaml" + ] + } + } + }, + "falcoctl": { + "type": "object", + "description": "Falcoctl configuration. We don't use it for now due to breaking changes. [See this issue](https://github.com/SumoLogic/sumologic-kubernetes-collection/issues/3144).", + "properties": { + "artifact": { + "type": "object", + "description": "", + "properties": { + "follow": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "", + "default": false + } + } + }, + "install": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "", + "default": false + } + } + } + } + } + } + }, + "customRules": { + "type": "object", + "description": "Additional falco rules related to Sumo Logic Kubernetes Collection", + "properties": { + "rules_user_known_k8s_api_callers.yaml": { + "comment": "Mark the following as known k8s api callers:\n* prometheus\n* prometheus operator\n* telegraf operator\n* grafana sidecar", + "type": "string", + "description": "", + "default": "- macro: user_known_contact_k8s_api_server_activities\n condition: >\n (container.image.repository = \"quay.io/prometheus/prometheus\") or\n (container.image.repository = \"quay.io/coreos/prometheus-operator\") or\n (container.image.repository = \"quay.io/influxdb/telegraf-operator\") or\n (container.image.repository = \"kiwigrid/k8s-sidecar\")" + }, + "rules_user_sensitive_mount_containers.yaml": { + "type": "string", + "description": "", + "default": "- macro: user_sensitive_mount_containers\n condition: >\n (container.image.repository = \"falcosecurity/falco\") or\n (container.image.repository = \"quay.io/prometheus/node-exporter\")" + }, + "rules_user_privileged_containers.yaml": { + "comment": "NOTE: kube-proxy not exact matching because of regional ecr e.g.\n602401143452.dkr.ecr.us-west-1.amazonaws.com/eks/kube-proxy", + "type": "string", + "description": "", + "default": "- macro: user_privileged_containers\n condition: >\n (container.image.repository endswith \".amazonaws.com/eks/kube-proxy\")" + } + } + } + } + }, + "tailing-sidecar-operator": { + "comment": "Configure Tailing Sidecar Operator\nref: https://github.com/SumoLogic/tailing-sidecar/blob/main/helm/tailing-sidecar-operator/values.yaml", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying Tailing Sidecar Operator Helm sub-chart.", + "default": false + }, + "fullnameOverride": { + "type": "string", + "description": "Used to override the chart's full name.", + "default": "", + "commented": true, + "comment": "Put here the new name if you want to override the full name used for tailing-sidecar-operator components." + }, + "scc": { + "comment": "creation of Security Context Constraints in Openshift", + "type": "object", + "description": "", + "properties": { + "create": { + "type": "boolean", + "description": "Create OpenShift's Security Context Constraint", + "default": false + } + } + } + } + }, + "opentelemetry-operator": { + "comment": "Configure OpenTelemetry Operator - Instrumentation\nref: https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-operator", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying OpenTelemetry Operator Helm sub-chart.", + "default": true + }, + "instrumentationJobImage": { + "comment": "Specific for Sumo Logic chart - Instrumentation resource creation", + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "Name of the image repository used to apply Instrumentation resource", + "default": "sumologic/kubernetes-tools" + }, + "tag": { + "type": "string", + "description": "Name of the image tag used to apply Instrumentation resource", + "default": "2.14.0" + } + } + } + } + }, + "createDefaultInstrumentation": { + "type": "boolean", + "description": "Flag to control creation of default Instrumentation object", + "default": false + }, + "instrumentationNamespaces": { + "type": "string", + "description": "Used to create `Instrumentation` resources in specified namespaces.", + "default": "" + }, + "instrumentation": { + "comment": "Current instrumentation doesn't support customization\nfor nodejs. Traces are always enabled.\nnodejs:\ntraces:\nenabled: true", + "type": "object", + "description": "", + "properties": { + "dotnet": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control traces export from DotNet instrumentation in `Instrumentation` resource.", + "default": true + } + } + }, + "metrics": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control metrics export from DotNet instrumentation in `Instrumentation` resource.", + "default": true + } + } + } + } + }, + "java": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control traces export from Java instrumentation in `Instrumentation` resource.", + "default": true + } + } + }, + "metrics": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control metrics export from Java instrumentation in `Instrumentation` resource.", + "default": true + } + } + } + } + }, + "python": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control traces export from Python instrumentation in `Instrumentation` resource.", + "default": true + } + } + }, + "metrics": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control metrics export from Python instrumentation in `Instrumentation` resource.", + "default": true + } + } + } + } + } + } + }, + "admissionWebhooks": { + "comment": "Specific for OpenTelemetry Operator chart values", + "type": "object", + "description": "Admission webhooks make sure only requests with correctly formatted rules will get into the Operator. They also enable the sidecar injection for OpenTelemetryCollector and Instrumentation CR's.", + "properties": { + "failurePolicy": { + "type": "string", + "description": "", + "default": "Fail" + }, + "enabled": { + "type": "boolean", + "description": "", + "default": true + }, + "objectSelector": { + "comment": "skip admission webhook on our own OpenTelemetryCollector object to avoid having to wait for operator to start", + "type": "object", + "description": "", + "properties": { + "matchExpressions": { + "type": "array", + "description": "", + "default": [ + { + "key": "sumologic.com/component", + "operator": "NotIn", + "values": [ + "metrics" + ] + } + ] + } + } + }, + "certManager": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "", + "default": false + }, + "issuerRef": { + "type": "object", + "description": "", + "default": {} + } + } + }, + "autoGenerateCert": { + "type": "boolean", + "description": "", + "default": true + } + } + }, + "manager": { + "type": "object", + "description": "", + "properties": { + "collectorImage": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "The default collector image repository for OpenTelemetryCollector CRDs.", + "default": "public.ecr.aws/sumologic/sumologic-otel-collector" + }, + "tag": { + "type": "string", + "description": "The default collector image tag for OpenTelemetryCollector CRDs.", + "default": "0.85.0-sumo-0" + } + } + }, + "env": { + "type": "object", + "description": "Additional environment variables for opentelemetry-operator helm chart.", + "default": {} + }, + "resources": { + "type": "object", + "description": "", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "Used to set limit CPU for OpenTelemetry-Operator Manager.", + "default": "250m" + }, + "memory": { + "type": "string", + "description": "Used to set limit Memory for OpenTelemetry-Operator Manager.", + "default": "512Mi" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "Used to set requested CPU for OpenTelemetry-Operator Manager.", + "default": "150m" + }, + "memory": { + "type": "string", + "description": "Used to set requested Memory for OpenTelemetry-Operator Manager.", + "default": "256Mi" + } + } + } + } + } + } + } + } + }, + "pvcCleaner": { + "type": "object", + "description": "", + "comment": "pvcCleaner deletes unused PVCs", + "properties": { + "metrics": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to enable cleaning unused PVCs for otelcol metrics statefulsets.", + "default": false + } + } + }, + "logs": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to enable cleaning unused PVCs for otelcol logs statefulsets.", + "default": false + } + } + }, + "job": { + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "Image repository for pvcCleaner docker containers.", + "default": "public.ecr.aws/sumologic/kubernetes-tools-kubectl" + }, + "tag": { + "type": "string", + "description": "Image tag for pvcCleaner docker containers.", + "default": "2.20.0" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for pvcCleaner docker containers.", + "default": "IfNotPresent" + } + } + }, + "resources": { + "type": "object", + "description": "Resource requests and limits for the pvcCleaner containers.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "256Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "64Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "100m" + } + } + } + } + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for pvcCleaner job. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Add tolerations for the pvcCleaner job.", + "default": [], + "comment": "Node tolerations for server scheduling to nodes with taints\nRef: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n" + }, + "affinity": { + "type": "object", + "description": "Add affinity and anti-affinity for the pvcCleaner job.", + "default": {}, + "comment": "Affinity and anti-affinity\nRef: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\n" + }, + "podLabels": { + "type": "object", + "description": "Additional labels for the pvcCleaner container.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "description": "Additional annotations for for the pvcCleaner container.", + "default": {}, + "comment": "Add custom annotations" + }, + "schedule": { + "type": "string", + "description": "Schedule for cronJobs", + "default": "*/15 * * * *", + "comment": "Schedule for cronJobs" + }, + "securityContext": { + "type": "object", + "description": "The securityContext configuration for the pvcCleaner.", + "comment": "securityContext for pvcCleaner pods", + "properties": { + "runAsUser": { + "type": "integer", + "description": "", + "default": 1000 + } + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/deploy/helm/sumologic/values.yaml b/deploy/helm/sumologic/values.yaml index a1c6db792e..d7acba8ccc 100644 --- a/deploy/helm/sumologic/values.yaml +++ b/deploy/helm/sumologic/values.yaml @@ -1,97 +1,77 @@ ## Sumo Logic Kubernetes Collection configuration file ## All the comments start with two or more # characters - -nameOverride: "" -fullnameOverride: "" +nameOverride: '' +fullnameOverride: '' ## Use the same namespace as namespaceOverride in 'kube-prometheus-stack.namespaceOverride' if Prometheus setup is also enabled -namespaceOverride: "" - +namespaceOverride: '' sumologic: - ### Setup - ## If enabled, a pre-install hook will create Collector and Sources in Sumo Logic setupEnabled: true - ## If enabled, a pre-delete hook will destroy Collector in Sumo Logic cleanupEnabled: false - ## If enabled, accessId and accessKey will be sourced from Secret Name given ## Be sure to include at least the following env variables in your secret ## (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY # envFromSecret: sumo-api-secret - ## Sumo access ID - # accessId: "" - + # accessId: '' ## Sumo access key - # accessKey: "" - + # accessKey: '' ## Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection ## ref: https://help.sumologic.com/docs/api/getting-started#sumo-logic-endpoints-by-deployment-and-firewall-security - endpoint: "" - + endpoint: '' ## proxy urls - httpProxy: "" - httpsProxy: "" + httpProxy: '' + httpsProxy: '' ## Exclude Kubernetes internal traffic from proxy noProxy: kubernetes.default.svc - ## Collector name - # collectorName: "" - + # collectorName: '' ## Cluster name: Note spaces are not allowed and will be replaced with dashes. - clusterName: "kubernetes" - + clusterName: kubernetes ## Configuration of Kubernetes for Terraform client ## https://www.terraform.io/docs/providers/kubernetes/index.html#argument-reference ## All double quotes should be escaped here regarding Terraform syntax cluster: - host: "https://kubernetes.default.svc" - # username: - # password: - # insecure: - # client_certificate: - # client_key: - cluster_ca_certificate: '${file("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")}' - # config_path: - # config_context: - # config_context_auth_info: - # config_context_cluster: - token: '${file("/var/run/secrets/kubernetes.io/serviceaccount/token")}' + host: https://kubernetes.default.svc + # username: '' + # password: '' + # insecure: '' + # client_certificate: '' + # client_key: '' + cluster_ca_certificate: ${file("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")} + # config_path: '' + # config_context: '' + # config_context_auth_info: '' + # config_context_cluster: '' + token: ${file("/var/run/secrets/kubernetes.io/serviceaccount/token")} # exec: - # api_version: - # command: - # args: [] + # api_version: '' + # command: '' + # args: '' # env: {} - ## If you set it to false, it would set EXCLUDE_NAMESPACE= ## and not add the Otelcol logs and Prometheus remotestorage metrics. collectionMonitoring: true - ## Optionally specify an array of pullSecrets. ## They will be added to serviceaccount that is used for Sumo Logic's ## deployments and statefulsets. - + ## ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: - # - name: myRegistryKeySecretName - + # - name: myRegistryKeySecretName ## Add custom labels to the following sumologic resources(otelcol sts, setup job, otelcol deployment) podLabels: {} - ## Add custom annotations to the following sumologic resources(otelcol sts, setup job, otelcol deployment) podAnnotations: {} - ## Add custom annotations to sumologic serviceAccounts serviceAccount: annotations: {} - ## creation of Security Context Constraints in Openshift scc: create: false - setup: ## uncomment to force collection installation (disables k8s version verification) # force: true @@ -107,7 +87,7 @@ sumologic: ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: - # - name: myRegistryKeySecretName + # - name: myRegistryKeySecretName resources: limits: memory: 256Mi @@ -116,47 +96,37 @@ sumologic: memory: 64Mi cpu: 200m nodeSelector: {} - ## Add custom labels only to setup job pod - ## Node tolerations for server scheduling to nodes with taints ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## tolerations: [] - # - key: null + # - effect: NoSchedule + # key: null # operator: Exists - # effect: "NoSchedule" - ## Affinity and anti-affinity ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} - + ## Add custom labels only to setup job pod podLabels: {} ## Add custom annotations only to setup job pod podAnnotations: {} - ## uncomment for the debug mode (disables the automatic run of the setup.sh script) # debug: true - monitors: ## If enabled, a pre-install hook will create k8s monitors in Sumo Logic enabled: true - ## The installed monitors default status: enabled/disabled monitorStatus: enabled - ## A list of emails to send notifications from monitors notificationEmails: [] - dashboards: ## If enabled, a pre-install hook will install k8s dashboards in Sumo Logic enabled: true - collector: ## Configuration of additional collector fields ## https://help.sumologic.com/docs/manage/fields/#http-source-fields fields: {} - ## Configuration of http sources ## See docs/Terraform.md for more information ## name: source name visible in sumologic platform @@ -198,14 +168,13 @@ sumologic: ## Properties can be used to extend default settings, such as processing rules, fields etc properties: default_date_formats: - ## Ensures that timestamp key has precedence over timestamp auto discovery - - format: epoch - locator: '\"timestamp\":(\\d+)' - + ## Ensures that timestamp key has precedence over timestamp auto discovery + - format: epoch + locator: \"timestamp\":(\\d+) # filters: - # - name: "Test Exclude Debug" - # filter_type: "Exclude" - # regexp: ".*DEBUG.*" + # - filter_type: Exclude + # name: Test Exclude Debug + # regexp: .*DEBUG.* default-otlp: name: logs-otlp config-name: endpoint-logs-otlp @@ -217,9 +186,9 @@ sumologic: config-name: endpoint-events properties: default_date_formats: - ## Ensures that timestamp key has precedence over timestamp auto discovery - - format: epoch - locator: '\"timestamp\":(\\d+)' + ## Ensures that timestamp key has precedence over timestamp auto discovery + - format: epoch + locator: \"timestamp\":(\\d+) default-otlp: name: events-otlp config-name: endpoint-events-otlp @@ -236,33 +205,25 @@ sumologic: config-name: endpoint-traces-otlp properties: content_type: Otlp - - ### Global configuration for OpenTelemetry Collector + ## Global configuration for OpenTelemetry Collector otelcolImage: - repository: "public.ecr.aws/sumologic/sumologic-otel-collector" - tag: "0.85.0-sumo-0" - + repository: public.ecr.aws/sumologic/sumologic-otel-collector + tag: 0.85.0-sumo-0 ## Add a -fips suffix to all image tags. With default tags, this results in FIPS-compliant otel images. ## See https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/fips.md for more information. addFipsSuffix: false - - ### Configuration for collection of Kubernetes events + ## Configuration for collection of Kubernetes events events: enabled: true - ## Source name for the Events source. Default: "events" - sourceName: "events" - + sourceName: events ## Source category for the Events source. Default: "" which is resolved to "{clusterName}/events" - # sourceCategory: "kubernetes/events" - + # sourceCategory: kubernetes/events ## Used to replace '-' with another character. - sourceCategoryReplaceDash: "/" - + sourceCategoryReplaceDash: / persistence: enabled: true size: 10Gi - ## Configuration for the Persistent Volume and Persistent Volume Claim ## where the storage is kept persistentVolume: @@ -270,26 +231,23 @@ sumologic: accessMode: ReadWriteOnce ## Add custom labels to otelcol event statefulset PVC pvcLabels: {} - # storageClass: - + # storageClass: '' sourceType: http - - ### Logs configuration + ## Logs configuration ## Set the enabled flag to false for disabling logs ingestion altogether. logs: enabled: true - collector: otelcol: enabled: true ## Experimental otelcloudwatch: enabled: false - roleArn: "" + roleArn: '' ## Configure persistence for the cloudwatch collector persistence: enabled: true - region: "" + region: '' pollInterval: 1m ## A map of log group and stream prefixes ## This is a map of log group and stream prefix, for example: @@ -297,22 +255,20 @@ sumologic: ## fluent-bit: ## names: [fluent-bit] logGroups: {} - multiline: enabled: true - first_line_regex: "^\\[?\\d{4}-\\d{1,2}-\\d{1,2}.\\d{2}:\\d{2}:\\d{2}" + first_line_regex: ^\[?\d{4}-\d{1,2}-\d{1,2}.\d{2}:\d{2}:\d{2} ## Additional configuration takes precedence over first_line_regex and are executed only for first matching condition ## ## Example: ## - first_line_regex: "^@@@@ First Line" ## condition: 'attributes["k8s.namespace.name"] == "foo"' ## - first_line_regex: "^--- First Line" - ## condition: 'attributes["k8s.container.name"] matches "^bar-.*' + ## condition: 'attributes["k8s.container.name"] matches "^bar-.*" ## ## NOTE: See below link for full reference: ## https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/collecting-container-logs.md#conditional-multiline-log-parsing additional: [] - container: enabled: true ## Format to post logs into Sumo: fields, json, json_merge, or text. @@ -320,137 +276,118 @@ sumologic: ## NOTE: Multiline log detection works differently for `text` format. See below link for full reference: ## https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/collecting-container-logs.md#text-log-format format: fields - ## When set to `true`, preserves the `time` attribute, which is a string representation of the `timestamp` attribute. keep_time_attribute: false - otelcol: ## Extra processors for container logs. See [/docs/collecting-container-logs.md](/docs/collecting-container-logs.md) for details. extraProcessors: [] - ## Set the _sourceHost metadata field in Sumo Logic. - sourceHost: "" + sourceHost: '' ## Set the _sourceName metadata field in Sumo Logic. - sourceName: "%{namespace}.%{pod}.%{container}" + sourceName: '%{namespace}.%{pod}.%{container}' ## Set the _sourceCategory metadata field in Sumo Logic. - sourceCategory: "%{namespace}/%{pod_name}" + sourceCategory: '%{namespace}/%{pod_name}' ## Set the prefix, for _sourceCategory metadata. - sourceCategoryPrefix: "kubernetes/" + sourceCategoryPrefix: kubernetes/ ## Used to replace - with another character. - sourceCategoryReplaceDash: "/" - + sourceCategoryReplaceDash: / ## A regular expression for containers. ## Matching containers will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludeContainerRegex: "" + excludeContainerRegex: '' ## A regular expression for hosts. ## Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludeHostRegex: "" + excludeHostRegex: '' ## A regular expression for namespaces. ## Matching namespaces will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludeNamespaceRegex: "" + excludeNamespaceRegex: '' ## A regular expression for pods. ## Matching pods will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludePodRegex: "" - + excludePodRegex: '' ## Defines whether container-level pod annotations are enabled. perContainerAnnotationsEnabled: false ## Defines the list of prefixes of container-level pod annotations. perContainerAnnotationPrefixes: [] - systemd: enabled: true ## systemd units to collect logs from # units: - # - docker.service - + # - docker.service otelcol: ## Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. extraProcessors: [] - ## Set the _sourceName metadata field in Sumo Logic. - sourceName: "%{_sourceName}" + sourceName: '%{_sourceName}' ## Set the _sourceCategory metadata field in Sumo Logic. - sourceCategory: "system" + sourceCategory: system ## Set the prefix, for _sourceCategory metadata. - sourceCategoryPrefix: "kubernetes/" + sourceCategoryPrefix: kubernetes/ ## Used to replace - with another character. - sourceCategoryReplaceDash: "/" - + sourceCategoryReplaceDash: / ## A regular expression for facility. ## Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludeFacilityRegex: "" + excludeFacilityRegex: '' ## A regular expression for hosts. ## Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludeHostRegex: "" + excludeHostRegex: '' ## A regular expression for priority. ## Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludePriorityRegex: "" + excludePriorityRegex: '' ## A regular expression for unit. ## Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludeUnitRegex: "" - + excludeUnitRegex: '' kubelet: otelcol: ## Extra processors for kubelet logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. extraProcessors: [] - ## Set the _sourceName metadata field in Sumo Logic. - sourceName: "k8s_kubelet" + sourceName: k8s_kubelet ## Set the _sourceCategory metadata field in Sumo Logic. - sourceCategory: "kubelet" + sourceCategory: kubelet ## Set the prefix, for _sourceCategory metadata. - sourceCategoryPrefix: "kubernetes/" + sourceCategoryPrefix: kubernetes/ ## Used to replace - with another character. - sourceCategoryReplaceDash: "/" - + sourceCategoryReplaceDash: / ## A regular expression for facility. ## Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludeFacilityRegex: "" + excludeFacilityRegex: '' ## A regular expression for hosts. ## Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludeHostRegex: "" + excludeHostRegex: '' ## A regular expression for priority. ## Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludePriorityRegex: "" + excludePriorityRegex: '' ## A regular expression for unit. ## Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). - excludeUnitRegex: "" - + excludeUnitRegex: '' ## Fields to be created at Sumo Logic to ensure logs are tagged with ## relevant metadata. ## https://help.sumologic.com/docs/manage/fields/#manage-fields fields: - - cluster - - container - - daemonset - - deployment - - host - - namespace - - node - - pod - - service - - statefulset - + - cluster + - container + - daemonset + - deployment + - host + - namespace + - node + - pod + - service + - statefulset ## Additional fields to be created in Sumo Logic. ## https://help.sumologic.com/docs/manage/fields/#manage-fields additionalFields: [] - sourceType: http - - ### Metrics configuration + ## Metrics configuration ## Set the enabled flag to false for disabling metrics ingestion altogether. metrics: enabled: true - + ## Otel metrics collector. Replaces Prometheus. + ## To enable, you need opentelemetry-operator enabled as well. collector: - ### Otel metrics collector. Replaces Prometheus. - ## To enable, you need opentelemetry-operator enabled as well. otelcol: - enabled: true - + enabled: false ## Default scrape interval scrapeInterval: 30s - ## Option to turn autoscaling on for otelcol and specify params for HPA. ## Autoscaling needs metrics-server to access cpu metrics. autoscaling: @@ -459,19 +396,14 @@ sumologic: maxReplicas: 10 targetCPUUtilizationPercentage: 70 targetMemoryUtilizationPercentage: 70 - nodeSelector: {} - ## Add custom annotations only to merics otelcol sts pods podAnnotations: {} ## Add custom labels only to metrics otelcol sts pods podLabels: {} - ## Option to define priorityClassName to assign a priority class to pods. - priorityClassName: - + priorityClassName: '' replicaCount: 1 - resources: limits: memory: 2Gi @@ -479,41 +411,32 @@ sumologic: requests: memory: 768Mi cpu: 100m - ## Selector for ServiceMonitors used for target discovery. By default, this selects resources created by this Chart. ## See https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr - # serviceMonitorSelector: - + # serviceMonitorSelector: {} ## Selector for PodMonitors used for target discovery. By default, this selects resources created by this Chart. ## See https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr - # podMonitorSelector: - + # podMonitorSelector: {} securityContext: ## The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set. ## The default is 0 (root), and containers don't have write permissions for volumes in that case. fsGroup: 999 tolerations: [] - ## Configuration for kubelet metrics kubelet: enabled: true - ## Configuration for cAdvisor metrics cAdvisor: enabled: true - ## Enable collection of metrics from Pods annotated with prometheus.io/* keys. ## See https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/collecting-application-metrics.md#application-metrics-are-exposed-one-endpoint-scenario for more information. annotatedPods: enabled: true - ## Allocation strategy for the scrape target allocator. Valid values are: least-weighted and consistent-hashing. ## See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocator - # allocationStrategy: least-weighted - + allocationStrategy: least-weighted ## Default metric filters for Sumo Apps enableDefaultFilters: false - ## By default, the Helm Chart collects some high-cardinality histogram metrics, as Sumo Apps make use of the sum and count components. ## This setting causes the metrics collector to drop the actual histogram buckets, keeping only the sum and the count. ## This affects the following metrics: @@ -521,32 +444,30 @@ sumologic: ## - coredns_dns_request_duration_seconds ## - kubelet_runtime_operations_duration_seconds dropHistogramBuckets: true - otelcol: ## Includes additional processors into pipelines. ## It can be used for filtering metrics, renaming, changing metadata and so on. ## This is list of objects, for example: ## extraProcessors: - ## - filterprocessor: - ## exclude: - ## match_type: strict - ## metric_names: - ## - hello_world - ## - hello/world + ## - filterprocessor: + ## exclude: + ## match_type: strict + ## metric_names: + ## - hello_world + ## - hello/world extraProcessors: [] - - ### Enable a load balancing proxy for Prometheus remote writes. + ## Enable a load balancing proxy for Prometheus remote writes. ## Prometheus remote write uses a single persistent HTTP connection per target, ## which interacts poorly with TCP load balancing with iptables that K8s Services do. ## Use a real HTTP load balancer for this instead. ## This is an advanced feature, enable only if you're experiencing performance ## issues with metrics metadata enrichment. remoteWriteProxy: - enabled: false + enabled: true config: ## Increase this if you've increased samples_per_send in Prometheus to prevent nginx ## from spilling proxied request bodies to disk - clientBodyBufferSize: "64k" + clientBodyBufferSize: 64k ## This feature autodetects how much CPU is assigned to the nginx instance and sets ## the right amount of workers based on that. Disable to use the default of 8 workers. workerCountAutotune: true @@ -578,102 +499,96 @@ sumologic: timeoutSeconds: 3 successThreshold: 1 failureThreshold: 3 - securityContext: {} nodeSelector: {} tolerations: [] affinity: {} ## Option to define priorityClassName to assign a priority class to pods. - priorityClassName: - + priorityClassName: '' ## Add custom labels only to metrics sts pods podLabels: {} ## Add custom annotations only to metrics sts pods podAnnotations: {} - ## Prometheus serviceMonitors related to Sumo Logic services ## They are applied only if kube-prometheus-stack is enabled serviceMonitors: - - name: collection-sumologic-otelcol-logs - additionalLabels: + - additionalLabels: + sumologic.com/app: otelcol-logs + endpoints: + - port: otelcol-metrics + name: collection-sumologic-otelcol-logs + selector: + matchLabels: sumologic.com/app: otelcol-logs - endpoints: - - port: otelcol-metrics - selector: - matchLabels: - sumologic.com/app: otelcol-logs - sumologic.com/scrape: "true" - - name: collection-sumologic-otelcol-metrics - additionalLabels: + sumologic.com/scrape: 'true' + - additionalLabels: + sumologic.com/app: otelcol-metrics + endpoints: + - port: otelcol-metrics + name: collection-sumologic-otelcol-metrics + selector: + matchLabels: sumologic.com/app: otelcol-metrics - endpoints: - - port: otelcol-metrics - selector: - matchLabels: - sumologic.com/app: otelcol-metrics - sumologic.com/scrape: "true" - - name: collection-sumologic-metrics-collector - additionalLabels: - sumologic.com/app: otelcol-metrics - endpoints: - - port: monitoring - selector: - matchLabels: - sumologic.com/app: otelcol - sumologic.com/component: metrics - sumologic.com/scrape: "true" - - name: collection-sumologic-otelcol-logs-collector - additionalLabels: + sumologic.com/scrape: 'true' + - additionalLabels: + sumologic.com/app: otelcol-metrics + endpoints: + - port: monitoring + name: collection-sumologic-metrics-collector + selector: + matchLabels: + sumologic.com/app: otelcol + sumologic.com/component: metrics + sumologic.com/scrape: 'true' + - additionalLabels: + sumologic.com/app: otelcol-logs-collector + endpoints: + - port: metrics + name: collection-sumologic-otelcol-logs-collector + selector: + matchLabels: sumologic.com/app: otelcol-logs-collector - endpoints: - - port: metrics - selector: - matchLabels: - sumologic.com/app: otelcol-logs-collector - sumologic.com/scrape: "true" - - name: collection-sumologic-otelcol-events - additionalLabels: + sumologic.com/scrape: 'true' + - additionalLabels: + sumologic.com/app: otelcol-events + endpoints: + - port: otelcol-metrics + name: collection-sumologic-otelcol-events + selector: + matchLabels: sumologic.com/app: otelcol-events - endpoints: - - port: otelcol-metrics - selector: - matchLabels: - sumologic.com/app: otelcol-events - sumologic.com/scrape: "true" - - name: collection-sumologic-otelcol-traces - additionalLabels: - sumologic.com/app: otelcol - endpoints: - - port: metrics - selector: - matchLabels: - sumologic.com/component: instrumentation - sumologic.com/scrape: "true" - - name: collection-sumologic-prometheus - endpoints: - - port: http-web - path: /metrics - metricRelabelings: - - action: keep - regex: prometheus_remote_storage_.* - sourceLabels: [__name__] - selector: - matchLabels: - app: kube-prometheus-stack-prometheus - + sumologic.com/scrape: 'true' + - additionalLabels: + sumologic.com/app: otelcol + endpoints: + - port: metrics + name: collection-sumologic-otelcol-traces + selector: + matchLabels: + sumologic.com/component: instrumentation + sumologic.com/scrape: 'true' + - endpoints: + - metricRelabelings: + - action: keep + regex: prometheus_remote_storage_.* + sourceLabels: + - __name__ + path: /metrics + port: http-web + name: collection-sumologic-prometheus + selector: + matchLabels: + app: kube-prometheus-stack-prometheus ## The type of source we send to in Sumo. The possible values are http and otlp. ## Consult the documentation for more information. sourceType: http - - ### Traces configuration + ## Traces configuration ## Set the enabled flag to false to disable traces ingestion. traces: enabled: true ## How many spans per request should be send to receiver spans_per_request: 100 - sourceType: http - ## Configure metrics-server ## ref: https://github.com/bitnami/charts/blob/master/bitnami/metrics-server/values.yaml metrics-server: @@ -681,22 +596,20 @@ metrics-server: ## This is required before enabling autoscaling unless you have an existing metrics-server in the cluster. enabled: false ## Put here the new name if you want to override the full name used for metrics-server components. - # fullnameOverride: "" - + # fullnameOverride: '' apiService: create: true extraArgs: - - --kubelet-insecure-tls=true - - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname ## Optionally specify image options for metrics-server # image: - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## + # ## Optionally specify an array of imagePullSecrets. + # ## Secrets must be manually created in the namespace. + # ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # ## # pullSecrets: - # - imagepullsecret - + # - imagepullsecret ## Configure kube-prometheus-stack ## ref: https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml kube-prometheus-stack: @@ -708,27 +621,21 @@ kube-prometheus-stack: ## Do not set this flag explicitly to `true` while at the same time setting `sumologic.metrics.enabled: false`, ## as this will make Prometheus try to write to an non-existent metrics enrichment service. # enabled: false - # global: - ## Reference to one or more secrets to be used when pulling images - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## + # ## Reference to one or more secrets to be used when pulling images + # ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # ## # imagePullSecrets: - # - name: "image-pull-secret" - + # - name: image-pull-secret ## Put here the new name if you want to override the full name used for Kube Prometheus Stack components. - # fullnameOverride: "" - + # fullnameOverride: '' ## Put here the new namespace if you want to override the namespace used for Kube Prometheus Stack components. - # namespaceOverride: "" - + # namespaceOverride: '' ## Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). ## Changing this may break Sumo Logic apps. - # kubeTargetVersionOverride: "" - + # kubeTargetVersionOverride: '' ## Labels to apply to all kube-prometheus-stack resources commonLabels: {} - defaultRules: rules: alertmanager: false @@ -744,7 +651,7 @@ kube-prometheus-stack: kubelet: false kubeProxy: false kubePrometheusGeneral: false - kubePrometheusNodeRecording: false + kubePrometheusNodeRecording: true kubernetesApps: false kubernetesResources: false kubernetesStorage: false @@ -753,32 +660,32 @@ kube-prometheus-stack: kubeSchedulerRecording: false kubeStateMetrics: false network: false - node: false + node: true nodeExporterAlerting: false nodeExporterRecording: false prometheus: false prometheusOperator: false windows: false - ## NOTE changing the serviceMonitor scrape interval to be >1m can result in metrics from recording ## rules to be missing and empty panels in Sumo Logic Kubernetes apps. kubeApiServer: serviceMonitor: ## Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: + interval: '' ## see docs/scraped_metrics.md ## apiserver_request_count ## apiserver_request_total ## apiserver_request_duration_seconds_count ## apiserver_request_duration_seconds_sum metricRelabelings: - - action: keep - regex: (?:apiserver_request_(?:count|total)|apiserver_request_(?:duration_seconds)_(?:count|sum)) - sourceLabels: [__name__] + - action: keep + regex: (?:apiserver_request_(?:count|total)|apiserver_request_(?:duration_seconds)_(?:count|sum)) + sourceLabels: + - __name__ kubelet: serviceMonitor: ## Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: + interval: '' ## Enable scraping /metrics/probes from kubelet's service probes: false ## Enable scraping /metrics/resource/v1alpha1 from kubelet's service @@ -802,11 +709,12 @@ kube-prometheus-stack: ## kubelet_runtime_operations_latency_microseconds_count ## kubelet_runtime_operations_latency_microseconds_sum metricRelabelings: - - action: keep - regex: (?:kubelet_docker_operations_errors(?:|_total)|kubelet_(?:docker|runtime)_operations_duration_seconds_(?:count|sum)|kubelet_running_(?:container|pod)(?:_count|s)|kubelet_(:?docker|runtime)_operations_latency_microseconds(?:|_count|_sum)) - sourceLabels: [__name__] - - action: labeldrop - regex: id + - action: keep + regex: (?:kubelet_docker_operations_errors(?:|_total)|kubelet_(?:docker|runtime)_operations_duration_seconds_(?:count|sum)|kubelet_running_(?:container|pod)(?:_count|s)|kubelet_(:?docker|runtime)_operations_latency_microseconds(?:|_count|_sum)) + sourceLabels: + - __name__ + - action: labeldrop + regex: id ## see docs/scraped_metrics.md ## cadvisor container metrics ## container_cpu_usage_seconds_total @@ -814,33 +722,36 @@ kube-prometheus-stack: ## container_fs_usage_bytes ## container_memory_working_set_bytes ## container_cpu_cfs_throttled_seconds_total - ## ## cadvisor aggregate container metrics ## container_network_receive_bytes_total ## container_network_transmit_bytes_total cAdvisorMetricRelabelings: - - action: keep - regex: (?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes|container_cpu_cfs_throttled_seconds_total|container_network_receive_bytes_total|container_network_transmit_bytes_total) - sourceLabels: [__name__] - ## Drop container metrics with container tag set to an empty string: - ## these are the pod aggregated container metrics which can be aggregated - ## in Sumo anyway. There's also some cgroup-specific time series we also - ## do not need. - - action: drop - sourceLabels: [__name__, container] - regex: (?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes);$ - - action: labelmap - regex: container_name - replacement: container - - action: drop - sourceLabels: [container] - regex: POD - - action: labeldrop - regex: (id|name) + - action: keep + regex: (?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes|container_cpu_cfs_throttled_seconds_total|container_network_receive_bytes_total|container_network_transmit_bytes_total) + sourceLabels: + - __name__ + ## Drop container metrics with container tag set to an empty string: + ## these are the pod aggregated container metrics which can be aggregated + ## in Sumo anyway. There's also some cgroup-specific time series we also + ## do not need. + - action: drop + regex: (?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes);$ + sourceLabels: + - __name__ + - container + - action: labelmap + regex: container_name + replacement: container + - action: drop + regex: POD + sourceLabels: + - container + - action: labeldrop + regex: (id|name) kubeControllerManager: serviceMonitor: ## Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: + interval: '' ## see docs/scraped_metrics.md ## controller manager metrics ## https://kubernetes.io/docs/concepts/cluster-administration/monitoring/#kube-controller-manager-metrics @@ -849,13 +760,14 @@ kube-prometheus-stack: ## cloudprovider_aws_api_request_duration_seconds_count ## cloudprovider_aws_api_request_duration_seconds_sum metricRelabelings: - - action: keep - regex: (?:cloudprovider_.*_api_request_duration_seconds.*) - sourceLabels: [__name__] + - action: keep + regex: (?:cloudprovider_.*_api_request_duration_seconds.*) + sourceLabels: + - __name__ coreDns: serviceMonitor: ## Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: + interval: '' ## see docs/scraped_metrics.md ## coredns: ## coredns_cache_entries @@ -873,13 +785,14 @@ kube-prometheus-stack: ## process_open_fds ## process_resident_memory_bytes metricRelabelings: - - action: keep - regex: (?:coredns_cache_(entries|(hits|misses)_total)|coredns_dns_request_duration_seconds_(count|sum)|coredns_(forward_requests|dns_requests|dns_responses)_total|process_(cpu_seconds_total|open_fds|resident_memory_bytes)) - sourceLabels: [__name__] + - action: keep + regex: (?:coredns_cache_(entries|(hits|misses)_total)|coredns_dns_request_duration_seconds_(count|sum)|coredns_(forward_requests|dns_requests|dns_responses)_total|process_(cpu_seconds_total|open_fds|resident_memory_bytes)) + sourceLabels: + - __name__ kubeEtcd: serviceMonitor: ## Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: + interval: '' ## see docs/scraped_metrics.md ## etcd_request_cache_get_duration_seconds_count ## etcd_request_cache_get_duration_seconds_sum @@ -913,25 +826,23 @@ kube-prometheus-stack: ## process_open_fds ## process_resident_memory_bytes metricRelabelings: - - action: keep - regex: (?:etcd_request_cache_(?:add|get)_(?:duration_seconds|latencies_summary)_(?:count|sum)|etcd_helper_cache_(?:hit|miss)_(?:count|total)|etcd_mvcc_db_total_size_in_bytes|etcd_debugging_(store_(expires_total|watchers))|etcd_disk_(backend_commit|wal_fsync)_duration_seconds_.*|etcd_grpc_proxy_cache_(hits|misses)_total|etcd_network_client_grpc_(received|sent)_bytes_total|etcd_server_(has_leader|leader_changes_seen_total)|etcd_server_proposals_(pending|(applied|committed|failed)_total)|process_(cpu_seconds_total|open_fds|resident_memory_bytes)) - sourceLabels: [__name__] + - action: keep + regex: (?:etcd_request_cache_(?:add|get)_(?:duration_seconds|latencies_summary)_(?:count|sum)|etcd_helper_cache_(?:hit|miss)_(?:count|total)|etcd_mvcc_db_total_size_in_bytes|etcd_debugging_(store_(expires_total|watchers))|etcd_disk_(backend_commit|wal_fsync)_duration_seconds_.*|etcd_grpc_proxy_cache_(hits|misses)_total|etcd_network_client_grpc_(received|sent)_bytes_total|etcd_server_(has_leader|leader_changes_seen_total)|etcd_server_proposals_(pending|(applied|committed|failed)_total)|process_(cpu_seconds_total|open_fds|resident_memory_bytes)) + sourceLabels: + - __name__ kubeScheduler: serviceMonitor: ## Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: + interval: '' ## see docs/scraped_metrics.md - ## ## scheduler_e2e_* is present for K8s <1.23 ## scheduler_e2e_scheduling_duration_seconds_bucket ## scheduler_e2e_scheduling_duration_seconds_count ## scheduler_e2e_scheduling_duration_seconds_sum - ## ## scheduler_scheduling_attempt_duration_seconds is present for K8s >=1.23 ## scheduler_scheduling_attempt_duration_seconds_bucket ## scheduler_scheduling_attempt_duration_seconds_count ## scheduler_scheduling_attempt_duration_seconds_sum - ## ## scheduler_framework_extension_point_duration_seconds_bucket ## scheduler_framework_extension_point_duration_seconds_count ## scheduler_framework_extension_point_duration_seconds_sum @@ -939,33 +850,28 @@ kube-prometheus-stack: ## scheduler_scheduling_algorithm_duration_seconds_count ## scheduler_scheduling_algorithm_duration_seconds_sum metricRelabelings: - - action: keep - regex: (?:scheduler_(?:e2e_scheduling|scheduling_attempt|framework_extension_point|scheduling_algorithm)_duration_seconds.*) - sourceLabels: [__name__] - + - action: keep + regex: (?:scheduler_(?:e2e_scheduling|scheduling_attempt|framework_extension_point|scheduling_algorithm)_duration_seconds.*) + sourceLabels: + - __name__ alertmanager: enabled: false grafana: enabled: false defaultDashboardsEnabled: false prometheusOperator: - enabled: false ## Labels to add to the operator pod podLabels: {} ## Annotations to add to the operator pod podAnnotations: {} ## Resource limits for prometheus operator - resources: - {} - # limits: - # cpu: 200m - # memory: 200Mi - # requests: - # cpu: 100m - # memory: 100Mi - ## ServiceMonitor for the Prometheus operator - serviceMonitor: - selfMonitor: false + resources: {} + # limits: + # cpu: 200m + # memory: 200Mi + # requests: + # cpu: 100m + # memory: 100Mi admissionWebhooks: enabled: false tls: @@ -973,32 +879,22 @@ kube-prometheus-stack: ## Resource limits for kube-state-metrics kube-state-metrics: ## Put here the new name if you want to override the full name used for Kube State Metrics components. - # fullnameOverride: "" - + # fullnameOverride: '' nodeSelector: {} - ## Custom labels to apply to service, deployment and pods customLabels: {} ## Additional annotations for pods in the DaemonSet podAnnotations: {} - resources: - {} - # limits: - # cpu: 100m - # memory: 64Mi - # requests: - # cpu: 10m - # memory: 32Mi - + resources: {} ## latest kube-prometheus-stack version that is supported on OpenShift 4.8-4.10 ## uses version 2.6.0 of kube-state-metrics, but this version has some critical vulnerabilities, ## so we bump the image manually. image: - tag: "v2.7.0" + tag: v2.7.0 prometheus: monitor: ## Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: + interval: '' ## see docs/scraped_metrics.md ## kube_daemonset_status_current_number_scheduled ## kube_daemonset_status_desired_number_scheduled @@ -1034,50 +930,50 @@ kube-prometheus-stack: ## kube_service_spec_external_ip ## kube_service_spec_type ## kube_service_status_load_balancer_ingress + ## Drop unnecessary labels Prometheus adds to these metrics + ## We don't want container=kube-state-metrics on everything metricRelabelings: - - action: keep - regex: (?:kube_statefulset_status_observed_generation|kube_statefulset_status_replicas|kube_statefulset_replicas|kube_statefulset_metadata_generation|kube_daemonset_status_current_number_scheduled|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_misscheduled|kube_daemonset_status_number_unavailable|kube_deployment_spec_replicas|kube_deployment_status_replicas_available|kube_deployment_status_replicas_unavailable|kube_node_info|kube_node_status_allocatable|kube_node_status_capacity|kube_node_status_condition|kube_hpa_spec_max_replicas|kube_hpa_spec_min_replicas|kube_hpa_status_(condition|(current|desired)_replicas)|kube_pod_container_info|kube_pod_container_resource_requests|kube_pod_container_resource_limits|kube_pod_container_status_ready|kube_pod_container_status_terminated_reason|kube_pod_container_status_waiting_reason|kube_pod_container_status_restarts_total|kube_pod_status_phase|kube_pod_info|kube_service_info|kube_service_spec_external_ip|kube_service_spec_type|kube_service_status_load_balancer_ingress) - sourceLabels: [__name__] - ## Drop unnecessary labels Prometheus adds to these metrics - ## We don't want container=kube-state-metrics on everything - - action: labeldrop - regex: service - - action: replace - sourceLabels: [container] - regex: kube-state-metrics - targetLabel: container - replacement: "" - - action: replace - sourceLabels: [pod] - regex: ".*kube-state-metrics.*" - targetLabel: pod - replacement: "" - - action: labelmap - regex: (pod|service) - replacement: service_discovery_${1} + - action: keep + regex: (?:kube_statefulset_status_observed_generation|kube_statefulset_status_replicas|kube_statefulset_replicas|kube_statefulset_metadata_generation|kube_daemonset_status_current_number_scheduled|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_misscheduled|kube_daemonset_status_number_unavailable|kube_deployment_spec_replicas|kube_deployment_status_replicas_available|kube_deployment_status_replicas_unavailable|kube_node_info|kube_node_status_allocatable|kube_node_status_capacity|kube_node_status_condition|kube_hpa_spec_max_replicas|kube_hpa_spec_min_replicas|kube_hpa_status_(condition|(current|desired)_replicas)|kube_pod_container_info|kube_pod_container_resource_requests|kube_pod_container_resource_limits|kube_pod_container_status_ready|kube_pod_container_status_terminated_reason|kube_pod_container_status_waiting_reason|kube_pod_container_status_restarts_total|kube_pod_status_phase|kube_pod_info|kube_service_info|kube_service_spec_external_ip|kube_service_spec_type|kube_service_status_load_balancer_ingress) + sourceLabels: + - __name__ + - action: labeldrop + regex: service + - action: replace + regex: kube-state-metrics + replacement: '' + sourceLabels: + - container + targetLabel: container + - action: replace + regex: .*kube-state-metrics.* + replacement: '' + sourceLabels: + - pod + targetLabel: pod + - action: labelmap + regex: (pod|service) + replacement: service_discovery_${1} ## Resource limits for prometheus node exporter prometheus-node-exporter: - ## Put here the new name if you want to override the full name used for Prometheus Node exporter components. - # fullnameOverride: "" - + ## Put here the new name if you want to override the full name used for Prometheus Node exporter components. + # fullnameOverride: '' nodeSelector: {} - ## Additional labels for pods in the DaemonSet podLabels: {} ## Additional annotations for pods in the DaemonSet podAnnotations: {} - resources: - {} - # limits: - # cpu: 200m - # memory: 50Mi - # requests: - # cpu: 100m - # memory: 30Mi + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi prometheus: monitor: ## Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: + interval: '' ## see docs/scraped_metrics.md ## node exporter metrics ## node_cpu_seconds_total @@ -1101,19 +997,19 @@ kube-prometheus-stack: ## node_filesystem_files_free ## node_filesystem_files metricRelabelings: - - action: keep - regex: (?:node_load1|node_load5|node_load15|node_cpu_seconds_total|node_disk_io_time_weighted_seconds_total|node_disk_io_time_seconds_total|node_vmstat_pgpgin|node_vmstat_pgpgout|node_memory_MemFree_bytes|node_memory_Cached_bytes|node_memory_Buffers_bytes|node_memory_MemTotal_bytes|node_network_receive_drop_total|node_network_transmit_drop_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_filesystem_avail_bytes|node_filesystem_size_bytes) - sourceLabels: [__name__] + - action: keep + regex: (?:node_load1|node_load5|node_load15|node_cpu_seconds_total|node_disk_io_time_weighted_seconds_total|node_disk_io_time_seconds_total|node_vmstat_pgpgin|node_vmstat_pgpgout|node_memory_MemFree_bytes|node_memory_Cached_bytes|node_memory_Buffers_bytes|node_memory_MemTotal_bytes|node_network_receive_drop_total|node_network_transmit_drop_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_filesystem_avail_bytes|node_filesystem_size_bytes) + sourceLabels: + - __name__ prometheus: - enabled: false additionalServiceMonitors: [] prometheusSpec: ## Prometheus default scrape interval, default from upstream Kube Prometheus Stack Helm chart ## NOTE changing the scrape interval to be >1m can result in metrics ## from recording rules to be missing and empty panels in Sumo Logic Kubernetes apps. - scrapeInterval: "30s" + scrapeInterval: 30s ## Prometheus data retention period - retention: "1d" + retention: 1d ## Add custom pod annotations and labels to prometheus pods podMetadata: labels: {} @@ -1128,111 +1024,110 @@ kube-prometheus-stack: cpu: 500m memory: 1Gi initContainers: - - name: "init-config-reloader" - env: - - name: METADATA_METRICS_SVC - valueFrom: - configMapKeyRef: - name: sumologic-configmap - key: metadataMetrics - - name: NAMESPACE - valueFrom: - configMapKeyRef: - name: sumologic-configmap - key: metadataNamespace + - env: + - name: METADATA_METRICS_SVC + valueFrom: + configMapKeyRef: + key: metadataMetrics + name: sumologic-configmap + - name: NAMESPACE + valueFrom: + configMapKeyRef: + key: metadataNamespace + name: sumologic-configmap + name: init-config-reloader containers: - - name: "config-reloader" - env: - - name: METADATA_METRICS_SVC - valueFrom: - configMapKeyRef: - name: sumologic-configmap - key: metadataMetrics - - name: NAMESPACE - valueFrom: - configMapKeyRef: - name: sumologic-configmap - key: metadataNamespace - + - env: + - name: METADATA_METRICS_SVC + valueFrom: + configMapKeyRef: + key: metadataMetrics + name: sumologic-configmap + - name: NAMESPACE + valueFrom: + configMapKeyRef: + key: metadataNamespace + name: sumologic-configmap + name: config-reloader ## Enable WAL compression to reduce Prometheus memory consumption walCompression: true - ## prometheus scrape config ## rel: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config + ## scraping metrics basing on annotations: + ## - prometheus.io/scrape: true - to scrape metrics from the pod + ## - prometheus.io/path: /metrics - path which the metric should be scrape from + ## - prometheus.io/port: 9113 - port which the metric should be scrape from + ## rel: https://github.com/prometheus-operator/kube-prometheus/pull/16#issuecomment-424318647 additionalScrapeConfigs: - ## scraping metrics basing on annotations: - ## - prometheus.io/scrape: true - to scrape metrics from the pod - ## - prometheus.io/path: /metrics - path which the metric should be scrape from - ## - prometheus.io/port: 9113 - port which the metric should be scrape from - ## rel: https://github.com/prometheus-operator/kube-prometheus/pull/16#issuecomment-424318647 - - job_name: "pod-annotations" - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - target_label: __address__ - - source_labels: [__metrics_path__] - separator: ; - regex: (.*) - target_label: endpoint - replacement: $1 - action: replace - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_pod_name] - separator: ; - regex: (.*) - target_label: pod - replacement: $1 - action: replace + - job_name: pod-annotations + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: true + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_scrape + - action: replace + regex: (.+) + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_path + target_label: __metrics_path__ + - action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + source_labels: + - __address__ + - __meta_kubernetes_pod_annotation_prometheus_io_port + target_label: __address__ + - action: replace + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - __metrics_path__ + target_label: endpoint + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - action: replace + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - __meta_kubernetes_pod_name + target_label: pod remoteWrite: - ## infrastructure metrics - - url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics - remoteTimeout: 5s - - serviceMonitor: - selfMonitor: false - + ## infrastructure metrics + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics ## Configure otelcol-instrumentation - Sumo OTel Distro Collector ## ref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md otelcolInstrumentation: enabled: true sourceMetadata: ## Set the _sourceName metadata field in Sumo Logic. - sourceName: "%{k8s.namespace.name}.%{k8s.pod.pod_name}.%{k8s.container.name}" + sourceName: '%{k8s.namespace.name}.%{k8s.pod.pod_name}.%{k8s.container.name}' ## Set the _sourceCategory metadata field in Sumo Logic. - sourceCategory: "%{k8s.namespace.name}/%{k8s.pod.pod_name}" + sourceCategory: '%{k8s.namespace.name}/%{k8s.pod.pod_name}' ## Set the prefix, for _sourceCategory metadata. - sourceCategoryPrefix: "kubernetes/" + sourceCategoryPrefix: kubernetes/ ## Used to replace - with another character. - sourceCategoryReplaceDash: "/" - + sourceCategoryReplaceDash: / ## A regular expression for containers. ## Matching containers will be excluded from Sumo. The logs will still be sent to otelcol. - excludeContainerRegex: "" + excludeContainerRegex: '' ## A regular expression for hosts. ## Matching hosts will be excluded from Sumo. The logs will still be sent to otelcol. - excludeHostRegex: "" + excludeHostRegex: '' ## A regular expression for namespaces. ## Matching namespaces will be excluded from Sumo. The logs will still be sent to otelcol. - excludeNamespaceRegex: "" + excludeNamespaceRegex: '' ## A regular expression for pods. ## Matching pods will be excluded from Sumo. The logs will still be sent to otelcol. - excludePodRegex: "" - + excludePodRegex: '' ## Option to turn autoscaling on for otelcol and specify params for HPA. ## Autoscaling needs metrics-server to access cpu metrics. autoscaling: @@ -1241,7 +1136,6 @@ otelcolInstrumentation: maxReplicas: 10 targetCPUUtilizationPercentage: 100 # targetMemoryUtilizationPercentage: 50 - statefulset: nodeSelector: {} tolerations: [] @@ -1250,7 +1144,7 @@ otelcolInstrumentation: ## Acceptable values for podAntiAffinity: ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) ## hard: specifies rules that must be met for a pod to be scheduled onto a node - podAntiAffinity: "soft" + podAntiAffinity: soft replicaCount: 3 resources: limits: @@ -1260,18 +1154,15 @@ otelcolInstrumentation: memory: 768Mi cpu: 500m ## Option to define priorityClassName to assign a priority class to pods. - priorityClassName: - + priorityClassName: '' ## Add custom labels only to metrics sts pods podLabels: {} ## Add custom annotations only to metrics sts pods podAnnotations: {} - image: - # repository: "" - # tag: "" + # repository: '' + # tag: '' pullPolicy: IfNotPresent - ## Set securityContext for containers running in pods in otelcol-instrumentation statefulset. containers: otelcol: @@ -1289,86 +1180,88 @@ otelcolInstrumentation: startupProbe: periodSeconds: 3 failureThreshold: 60 - ## Extra Environment Values - allows yaml definitions # extraEnvVars: - # - name: VALUE_FROM_SECRET - # valueFrom: - # secretKeyRef: - # name: secret_name - # key: secret_key - + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name # extraVolumes: - # - name: es-certs - # secret: - # defaultMode: 420 - # secretName: es-certs + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs # extraVolumeMounts: - # - name: es-certs - # mountPath: /certs - # readOnly: true - + # - mountPath: /certs + # name: es-certs + # readOnly: true ## To enable collecting all logs, set to false logLevelFilter: false - config: receivers: jaeger: protocols: thrift_compact: - endpoint: "0.0.0.0:6831" + endpoint: 0.0.0.0:6831 thrift_binary: - endpoint: "0.0.0.0:6832" + endpoint: 0.0.0.0:6832 grpc: - endpoint: "0.0.0.0:14250" + endpoint: 0.0.0.0:14250 thrift_http: - endpoint: "0.0.0.0:14268" + endpoint: 0.0.0.0:14268 opencensus: - endpoint: "0.0.0.0:55678" + endpoint: 0.0.0.0:55678 otlp: protocols: grpc: - endpoint: "0.0.0.0:4317" + endpoint: 0.0.0.0:4317 http: - endpoint: "0.0.0.0:4318" + endpoint: 0.0.0.0:4318 otlp/deprecated: protocols: http: - endpoint: "0.0.0.0:55681" + endpoint: 0.0.0.0:55681 zipkin: - endpoint: "0.0.0.0:9411" + endpoint: 0.0.0.0:9411 processors: ## Source processor adds Sumo Logic related metadata source: - annotation_prefix: "k8s.pod.annotation." - collector: "{{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | quote }}" + annotation_prefix: k8s.pod.annotation. + collector: '{{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName + | quote }}' exclude: - k8s.container.name: "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeContainerRegex | quote }}" - k8s.host.name: "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeHostRegex | quote }}" - k8s.namespace.name: "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeNamespaceRegex | quote }}" - k8s.pod.name: "{{ .Values.otelcolInstrumentation.sourceMetadata.excludePodRegex| quote }}" - pod_key: "k8s.pod.name" - pod_name_key: "k8s.pod.pod_name" - pod_template_hash_key: "k8s.pod.label.pod-template-hash" - source_category: "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategory | quote }}" - source_category_prefix: "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryPrefix | quote }}" - source_category_replace_dash: "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryReplaceDash | quote }}" - source_host: "%{k8s.pod.hostname}" - source_name: "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceName | quote }}" - + k8s.container.name: '{{ .Values.otelcolInstrumentation.sourceMetadata.excludeContainerRegex | + quote }}' + k8s.host.name: '{{ .Values.otelcolInstrumentation.sourceMetadata.excludeHostRegex + | quote }}' + k8s.namespace.name: '{{ .Values.otelcolInstrumentation.sourceMetadata.excludeNamespaceRegex + | quote }}' + k8s.pod.name: '{{ .Values.otelcolInstrumentation.sourceMetadata.excludePodRegex| + quote }}' + pod_key: k8s.pod.name + pod_name_key: k8s.pod.pod_name + pod_template_hash_key: k8s.pod.label.pod-template-hash + source_category: '{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategory + | quote }}' + source_category_prefix: '{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryPrefix + | quote }}' + source_category_replace_dash: '{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryReplaceDash + | quote }}' + source_host: '%{k8s.pod.hostname}' + source_name: '{{ .Values.otelcolInstrumentation.sourceMetadata.sourceName | quote + }}' ## Resource processor sets the associted cluster attribute resource: attributes: - - key: k8s.cluster.name - value: '{{ include "sumologic.clusterNameReplaceSpaceWithDash" . }}' - action: upsert - + - action: upsert + key: k8s.cluster.name + value: '{{ include "sumologic.clusterNameReplaceSpaceWithDash" . }}' resourcedetection: detectors: - - system + - system override: false timeout: 10s - ## Tags spans with K8S metadata, basing on the context IP k8s_tagger: ## When true, only IP is assigned and passed (so it could be tagged on another collector) @@ -1377,30 +1270,29 @@ otelcolInstrumentation: owner_lookup_enabled: true ## Extracted fields and assigned names extract: + ## extract the following well-known metadata fields metadata: - ## extract the following well-known metadata fields - - containerId - - containerName - - daemonSetName - - deploymentName - - hostName - - namespace - - nodeName - - podId - - podName - - replicaSetName - - serviceName - - statefulSetName + - containerId + - containerName + - daemonSetName + - deploymentName + - hostName + - namespace + - nodeName + - podId + - podName + - replicaSetName + - serviceName + - statefulSetName annotations: - - tag_name: "k8s.pod.annotation.%s" - key: "*" + - key: '*' + tag_name: k8s.pod.annotation.%s namespace_labels: - - tag_name: "k8s.namespace.label.%s" - key: "*" + - key: '*' + tag_name: k8s.namespace.label.%s labels: - - tag_name: "k8s.pod.label.%s" - key: "*" - + - key: '*' + tag_name: k8s.pod.label.%s ## The memory_limiter processor is used to prevent out of memory situations on the collector. memory_limiter: ## check_interval is the time between measurements of memory usage for the @@ -1408,15 +1300,11 @@ otelcolInstrumentation: ## checks will be performed. Values below 1 second are not recommended since ## it can result in unnecessary CPU consumption. check_interval: 5s - ## Maximum amount of memory, in %, targeted to be allocated by the process heap. ## Note that typically the total memory usage of process will be about 50MiB higher ## than this value. limit_percentage: 75 - - ## Maximum spike expected between the measurements of memory usage, in %. spike_limit_percentage: 20 - ## The batch processor accepts spans and places them into batches grouped by node and resource batch: ## Number of spans after which a batch will be sent regardless of time @@ -1438,7 +1326,7 @@ otelcolInstrumentation: ## Empty string means no compression compress_encoding: gzip ## Max HTTP request body size in bytes before compression (if applied). By default 1_048_576 (1MB) is used. - max_request_body_size: 1_048_576 # 1MB + max_request_body_size: 1048576 ## Format to use when sending logs to Sumo. (default json) (possible values: json, text) log_format: text ## Format of the metrics to be sent (default is prometheus) (possible values: carbon2, prometheus) @@ -1464,19 +1352,40 @@ otelcolInstrumentation: ## requests_per_second is the average number of requests per seconds. queue_size: 5000 otlphttp/traces: - endpoint: 'http://{{ include "otelcolinstrumentation.exporter.endpoint" . }}:4318' + endpoint: http://{{ include "otelcolinstrumentation.exporter.endpoint" . }}:4318 service: - extensions: [health_check, memory_ballast, pprof] + extensions: + - health_check + - memory_ballast + - pprof pipelines: traces: - receivers: [jaeger, opencensus, otlp, otlp/deprecated, zipkin] - processors: [memory_limiter, k8s_tagger, source, resource, batch] - exporters: [otlphttp/traces] + receivers: + - jaeger + - opencensus + - otlp + - otlp/deprecated + - zipkin + processors: + - memory_limiter + - k8s_tagger + - source + - resource + - batch + exporters: + - otlphttp/traces metrics: - receivers: [otlp, otlp/deprecated] - processors: [memory_limiter, k8s_tagger, source, resource, batch] - exporters: [sumologic/metrics] - + receivers: + - otlp + - otlp/deprecated + processors: + - memory_limiter + - k8s_tagger + - source + - resource + - batch + exporters: + - sumologic/metrics ## Configure traces-sampler ## ref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md tracesSampler: @@ -1492,47 +1401,42 @@ tracesSampler: memory: 384Mi cpu: 200m ## Option to define priorityClassName to assign a priority class to pods. - priorityClassName: - + priorityClassName: '' ## Add custom labels only to traces-sampler deployment. podLabels: {} ## Add custom annotations only to traces-sampler deployment. podAnnotations: {} image: - # repository: "" - # tag: "" + # repository: '' + # tag: '' pullPolicy: IfNotPresent - ## Extra Environment Values - allows yaml definitions # extraEnvVars: - # - name: VALUE_FROM_SECRET - # valueFrom: - # secretKeyRef: - # name: secret_name - # key: secret_key - + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name # extraVolumes: - # - name: es-certs - # secret: - # defaultMode: 420 - # secretName: es-certs + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs # extraVolumeMounts: - # - name: es-certs - # mountPath: /certs - # readOnly: true - + # - mountPath: /certs + # name: es-certs + # readOnly: true ## To enable collecting all logs, set to false - logLevelFilter: false - + # logLevelFilter: false ## Collector configuration config: receivers: otlp: protocols: grpc: - endpoint: "0.0.0.0:4317" + endpoint: 0.0.0.0:4317 http: - endpoint: "0.0.0.0:4318" + endpoint: 0.0.0.0:4318 processors: ## The memory_limiter processor is used to prevent out of memory situations on the collector. memory_limiter: @@ -1541,22 +1445,18 @@ tracesSampler: ## checks will be performed. Values below 1 second are not recommended since ## it can result in unnecessary CPU consumption. check_interval: 5s - ## Maximum amount of memory, in %, targeted to be allocated by the process heap. ## Note that typically the total memory usage of process will be about 50MiB higher ## than this value. limit_percentage: 75 - ## Maximum spike expected between the measurements of memory usage, in %. spike_limit_percentage: 20 - ## Smart cascading filtering rules with preset limits. ## Please see https://github.com/SumoLogic/sumologic-otel-collector/tree/v0.85.0-sumo-0/pkg/processor/cascadingfilterprocessor ## for details. cascading_filter: ## Max number of traces for which decisions are kept in memory num_traces: 200000 - ## The batch processor accepts spans and places them into batches grouped by node and resource batch: ## Number of spans after which a batch will be sent regardless of time @@ -1565,7 +1465,6 @@ tracesSampler: send_batch_max_size: 512 ## Time duration after which a batch will be sent regardless of size timeout: 5s - extensions: health_check: {} memory_ballast: @@ -1583,43 +1482,44 @@ tracesSampler: traces_endpoint: ${SUMO_ENDPOINT_DEFAULT_TRACES_SOURCE} compression: gzip service: - extensions: [health_check, memory_ballast, pprof] + extensions: + - health_check + - memory_ballast + - pprof pipelines: traces: - receivers: [otlp] - processors: [memory_limiter, cascading_filter, batch] - exporters: [otlphttp] - + receivers: + - otlp + processors: + - memory_limiter + - cascading_filter + - batch + exporters: + - otlphttp metadata: ## Configure image for Opentelemetry Collector (for logs and metrics) image: - # repository: "" - # tag: "" + # repository: '' + # tag: '' pullPolicy: IfNotPresent - securityContext: ## The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set. ## The default is 0 (root), and containers don't have write permissions for volumes in that case. fsGroup: 999 - ## Add custom labels to all otelcol sts pods(logs and metrics) podLabels: {} - ## Add custom annotations to all otelcol sts pods(logs and metrics) podAnnotations: {} - ## Add custom labels to all otelcol svc (logs and metrics) serviceLabels: {} - ## Configure persistence for Opentelemetry Collector persistence: enabled: true - # storageClass: "" + # storageClass: '' accessMode: ReadWriteOnce size: 10Gi ## Add custom labels to all otelcol statefulset PVC (logs and metrics) pvcLabels: {} - ## Configure metrics pipeline. ## This section affects only otelcol provider. metrics: @@ -1629,12 +1529,11 @@ metadata: ## Directly alter the OT configuration. The value of this key should be a dictionary, that will ## be directly merged with the generated configuration, overriding existing values. ## For example: - # override: - # processors: - # batch: - # send_batch_size: 512 + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 ## will change the batch size of the pipeline. - ## ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest ## of this chart. It involves implementation details that may change even in minor versions. ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. @@ -1643,10 +1542,8 @@ metadata: ## The value of this key should be a dictionary, that will replace the normal configuration. ## This is an advanced feature, use with caution, and review the generated configuration first. override: {} - ## List of additional endpoints to be handled by Metrics Metadata Pods additionalEndpoints: [] - statefulset: nodeSelector: {} tolerations: [] @@ -1655,7 +1552,7 @@ metadata: ## Acceptable values for podAntiAffinity: ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) ## hard: specifies rules that must be met for a pod to be scheduled onto a node - podAntiAffinity: "soft" + podAntiAffinity: soft replicaCount: 3 resources: limits: @@ -1665,13 +1562,11 @@ metadata: memory: 768Mi cpu: 500m ## Option to define priorityClassName to assign a priority class to pods. - priorityClassName: - + priorityClassName: '' ## Add custom labels only to metrics sts pods podLabels: {} ## Add custom annotations only to metrics sts pods podAnnotations: {} - ## Set securityContext for containers running in pods in metrics statefulset. containers: otelcol: @@ -1689,25 +1584,22 @@ metadata: startupProbe: periodSeconds: 3 failureThreshold: 60 - ## Extra Environment Values - allows yaml definitions # extraEnvVars: - # - name: VALUE_FROM_SECRET - # valueFrom: - # secretKeyRef: - # name: secret_name - # key: secret_key - + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name # extraVolumes: - # - name: es-certs - # secret: - # defaultMode: 420 - # secretName: es-certs + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs # extraVolumeMounts: - # - name: es-certs - # mountPath: /certs - # readOnly: true - + # - mountPath: /certs + # name: es-certs + # readOnly: true ## Option to turn autoscaling on for metrics and specify params for HPA. ## Autoscaling needs metrics-server to access cpu metrics. autoscaling: @@ -1716,14 +1608,12 @@ metadata: maxReplicas: 10 targetCPUUtilizationPercentage: 80 # targetMemoryUtilizationPercentage: 50 - ## Option to specify PodDisrutionBudgets ## You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget podDisruptionBudget: minAvailable: 2 - ## To use maxUnavailable, set minAvailable to null and uncomment the below: - # maxUnavailable: 1 - + ## To use maxUnavailable, set minAvailable to null and uncomment the below: + maxUnavailable: 1 ## Configure logs pipeline. ## This section affects only otelcol provider. logs: @@ -1733,12 +1623,11 @@ metadata: ## Directly alter the OT configuration. The value of this key should be a dictionary, that will ## be directly merged with the generated configuration, overriding existing values. ## For example: - # override: - # processors: - # batch: - # send_batch_size: 512 + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 ## will change the batch size of the pipeline. - ## ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest ## of this chart. It involves implementation details that may change even in minor versions. ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. @@ -1755,7 +1644,7 @@ metadata: ## Acceptable values for podAntiAffinity: ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) ## hard: specifies rules that must be met for a pod to be scheduled onto a node - podAntiAffinity: "soft" + podAntiAffinity: soft replicaCount: 3 resources: limits: @@ -1765,13 +1654,11 @@ metadata: memory: 768Mi cpu: 500m ## Option to define priorityClassName to assign a priority class to pods. - priorityClassName: - + priorityClassName: '' ## Add custom labels only to logs sts pods podLabels: {} ## Add custom annotations only to logs sts pods podAnnotations: {} - ## Set securityContext for containers running in pods in logs statefulset. containers: otelcol: @@ -1789,30 +1676,27 @@ metadata: startupProbe: periodSeconds: 3 failureThreshold: 60 - ## Extra Environment Values - allows yaml definitions # extraEnvVars: - # - name: VALUE_FROM_SECRET - # valueFrom: - # secretKeyRef: - # name: secret_name - # key: secret_key - + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name # extraVolumes: - # - name: es-certs - # secret: - # defaultMode: 420 - # secretName: es-certs + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs # extraVolumeMounts: - # - name: es-certs - # mountPath: /certs - # readOnly: true + # - mountPath: /certs + # name: es-certs + # readOnly: true # extraPorts: - # - name: otlphttp2 - # containerPort: 4319 - # protocol: TCP - # extraArgs: - + # - containerPort: 4319 + # name: otlphttp2 + # protocol: TCP + # extraArgs: [] ## Option to turn autoscaling on for logs and specify params for HPA. ## Autoscaling needs metrics-server to access cpu metrics. autoscaling: @@ -1821,19 +1705,16 @@ metadata: maxReplicas: 10 targetCPUUtilizationPercentage: 80 # targetMemoryUtilizationPercentage: 50 - ## Option to specify PodDisrutionBudgets ## You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget podDisruptionBudget: minAvailable: 2 ## To use maxUnavailable, set minAvailable to null and uncomment the below: # maxUnavailable: 1 - ## Configure traces-gateway ## ref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md tracesGateway: enabled: true - ## Option to turn autoscaling on for otelcol and specify params for HPA. ## Autoscaling needs metrics-server to access cpu metrics. autoscaling: @@ -1842,7 +1723,6 @@ tracesGateway: maxReplicas: 10 targetCPUUtilizationPercentage: 100 # targetMemoryUtilizationPercentage: 50 - deployment: replicas: 1 nodeSelector: {} @@ -1854,14 +1734,13 @@ tracesGateway: requests: memory: 196Mi cpu: 50m - ## Add custom labels only to traces-gateway deployment. podLabels: {} ## Add custom annotations only to traces-gateway deployment. podAnnotations: {} image: - # repository: "" - # tag: "" + # repository: '' + # tag: '' pullPolicy: IfNotPresent livenessProbe: periodSeconds: 15 @@ -1875,39 +1754,34 @@ tracesGateway: periodSeconds: 5 timeoutSeconds: 3 failureThreshold: 60 - ## Extra Environment Values - allows yaml definitions # extraEnvVars: - # - name: VALUE_FROM_SECRET - # valueFrom: - # secretKeyRef: - # name: secret_name - # key: secret_key - + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name # extraVolumes: - # - name: es-certs - # secret: - # defaultMode: 420 - # secretName: es-certs + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs # extraVolumeMounts: - # - name: es-certs - # mountPath: /certs - # readOnly: true - + # - mountPath: /certs + # name: es-certs + # readOnly: true ## Option to define priorityClassName to assign a priority class to pods. - priorityClassName: - + priorityClassName: '' ## To enable collecting all logs, set to false logLevelFilter: false - config: receivers: otlp: protocols: grpc: - endpoint: "0.0.0.0:4317" + endpoint: 0.0.0.0:4317 http: - endpoint: "0.0.0.0:4318" + endpoint: 0.0.0.0:4318 processors: ## The memory_limiter processor is used to prevent out of memory situations on the collector. memory_limiter: @@ -1916,15 +1790,12 @@ tracesGateway: ## checks will be performed. Values below 1 second are not recommended since ## it can result in unnecessary CPU consumption. check_interval: 5s - ## Maximum amount of memory, in %, targeted to be allocated by the process heap. ## Note that typically the total memory usage of process will be about 50MiB higher ## than this value. limit_percentage: 75 - ## Maximum spike expected between the measurements of memory usage, in %. spike_limit_percentage: 20 - ## The batch processor accepts spans and places them into batches grouped by node and resource batch: ## Number of spans after which a batch will be sent regardless of time @@ -1933,8 +1804,6 @@ tracesGateway: send_batch_max_size: 512 ## Time duration after which a batch will be sent regardless of size timeout: 5s - ## Never more than this many spans are being sent in a batch - # send_batch_max_size: 512 extensions: health_check: {} memory_ballast: @@ -1953,35 +1822,38 @@ tracesGateway: hostname: '{{ include "tracesgateway.exporter.loadbalancing.endpoint" . }}' port: 4317 service: - extensions: [health_check, memory_ballast, pprof] + extensions: + - health_check + - memory_ballast + - pprof pipelines: traces: - receivers: [otlp] - processors: [memory_limiter, batch] - exporters: [loadbalancing] - + receivers: + - otlp + processors: + - memory_limiter + - batch + exporters: + - loadbalancing ## Configuration of the OpenTelemetry Collector that collects Kubernetes events. ## See https://github.com/SumoLogic/sumologic-kubernetes-collection/deploy/docs/collecting-kubernetes-events.md. otelevents: ## Configure image for Opentelemetry Collector image: - # repository: "" - # tag: "" + # repository: '' + # tag: '' pullPolicy: IfNotPresent - logLevel: info - ## Customize the Opentelemetry Collector configuration beyond the exposed options config: ## Directly alter the OT configuration. The value of this key should be a dictionary, that will ## be directly merged with the generated configuration, overriding existing values. ## For example: - # override: - # processors: - # batch: - # send_batch_size: 512 + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 ## will change the batch size of the pipeline. - ## ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest ## of this chart. It involves implementation details that may change even in minor versions. ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. @@ -1990,7 +1862,6 @@ otelevents: ## The value of this key should be a dictionary, that will replace the normal configuration. ## This is an advanced feature, use with caution, and review the generated configuration first. override: {} - statefulset: nodeSelector: {} tolerations: [] @@ -1999,7 +1870,7 @@ otelevents: ## Acceptable values for podAntiAffinity: ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) ## hard: specifies rules that must be met for a pod to be scheduled onto a node - podAntiAffinity: "soft" + podAntiAffinity: soft resources: limits: memory: 2Gi @@ -2008,18 +1879,15 @@ otelevents: memory: 500Mi cpu: 200m ## Option to define priorityClassName to assign a priority class to pods. - priorityClassName: - + priorityClassName: '' ## Add custom labels only to events sts pods podLabels: {} ## Add custom annotations only to events sts pods podAnnotations: {} - + ## The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set. + ## The default is 0 (root), and containers don't have write permissions for volumes in that case. securityContext: - ## The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set. - ## The default is 0 (root), and containers don't have write permissions for volumes in that case. fsGroup: 999 - ## Set securityContext for containers running in pods in events statefulset. containers: otelcol: @@ -2037,25 +1905,22 @@ otelevents: startupProbe: periodSeconds: 3 failureThreshold: 60 - ## Extra Environment Values - allows yaml definitions # extraEnvVars: - # - name: VALUE_FROM_SECRET - # valueFrom: - # secretKeyRef: - # name: secret_name - # key: secret_key - + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name # extraVolumes: - # - name: es-certs - # secret: - # defaultMode: 420 - # secretName: es-certs + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs # extraVolumeMounts: - # - name: es-certs - # mountPath: /certs - # readOnly: true - + # - mountPath: /certs + # name: es-certs + # readOnly: true ## Configure cloudwatch collection with Otelcol otelcloudwatch: statefulset: @@ -2066,7 +1931,7 @@ otelcloudwatch: ## Acceptable values for podAntiAffinity: ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) ## hard: specifies rules that must be met for a pod to be scheduled onto a node - podAntiAffinity: "soft" + podAntiAffinity: soft replicaCount: 1 resources: limits: @@ -2076,8 +1941,7 @@ otelcloudwatch: memory: 768Mi cpu: 500m ## Option to define priorityClassName to assign a priority class to pods. - priorityClassName: - + priorityClassName: '' ## Add custom labels only to logs otel sts pods podLabels: {} ## Add custom annotations only to logs otel sts pods @@ -2099,34 +1963,28 @@ otelcloudwatch: startupProbe: periodSeconds: 3 failureThreshold: 60 - ## Configure log collection with Otelcol otellogs: ## Metrics from Collector metrics: enabled: true - ## Add custom labels to otelcol svc serviceLabels: {} - ## Configure image for Opentelemetry Collector image: - # repository: "" - # tag: "" + # repository: '' + # tag: '' pullPolicy: IfNotPresent - logLevel: info - config: ## Directly alter the OT configuration. The value of this key should be a dictionary, that will ## be directly merged with the generated configuration, overriding existing values. ## For example: - # override: - # processors: - # batch: - # send_batch_size: 512 + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 ## will change the batch size of the pipeline. - ## ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest ## of this chart. It involves implementation details that may change even in minor versions. ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. @@ -2135,27 +1993,21 @@ otellogs: ## The value of this key should be a dictionary, that will replace the normal configuration. ## This is an advanced feature, use with caution, and review the generated configuration first. override: {} - + ## Set securityContext for containers running in pods in log collector daemonset daemonset: - ## Set securityContext for containers running in pods in log collector daemonset securityContext: ## In order to reliably read logs from mounted node logging paths, we need to run as root fsGroup: 0 runAsUser: 0 runAsGroup: 0 - ## Add custom labels to the otelcol daemonset labels: {} - ## Add custom annotations to the otelcol daemonset annotations: {} - ## Add custom labels to all otelcol daemonset pods podLabels: {} - ## Add custom annotations to all otelcol daemonset pods podAnnotations: {} - resources: limits: memory: 1Gi @@ -2165,147 +2017,129 @@ otellogs: cpu: 100m ## Option to define priorityClassName to assign a priority class to pods. ## If not set then temaplates/priorityclass.yaml is used. - priorityClassName: - + priorityClassName: '' ## Set securityContext for containers running in pods in log collector daemonset containers: otelcol: securityContext: capabilities: drop: - - ALL - + - ALL ## Set securityContext and image for initContainers running in pods in log collector daemonset initContainers: changeowner: image: - repository: "public.ecr.aws/docker/library/busybox" - tag: "1.36.0" + repository: public.ecr.aws/docker/library/busybox + tag: 1.36.0 pullPolicy: IfNotPresent securityContext: capabilities: drop: - - ALL + - ALL add: - - CAP_CHOWN - + - CAP_CHOWN nodeSelector: {} tolerations: [] affinity: {} - ## Extra Environment Values - allows yaml definitions # extraEnvVars: - # - name: VALUE_FROM_SECRET - # valueFrom: - # secretKeyRef: - # name: secret_name - # key: secret_key - + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # key: secret_key + # name: secret_name # extraVolumes: - # - name: es-certs - # secret: - # defaultMode: 420 - # secretName: es-certs + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs # extraVolumeMounts: - # - name: es-certs - # mountPath: /certs - # readOnly: true - + # - mountPath: /certs + # name: es-certs + # readOnly: true ## additionalDaemonSets allows to set daemonsets with affinity, nodeSelector and resources ## different than the main DaemonSet ## Be careful and set nodeAffinity for the main DaemonSet, ## as we do not support multiple pods of otellogs on the same node - ## ## e.g: ## additionalDaemonSets: - ## linux: - ## nodeSelector: - ## kubernetes.io/os: linux - ## resources: - ## limits: - ## memory: 1Gi - ## cpu: 6 - ## requests: - ## memory: 32Mi - ## cpu: 2 + ## linux: + ## nodeSelector: + ## kubernetes.io/os: linux + ## resources: + ## limits: + ## memory: 1Gi + ## cpu: 6 + ## requests: + ## memory: 32Mi + ## cpu: 2 ## daemonset: - ## affinity: - ## nodeAffinity: - ## requiredDuringSchedulingIgnoredDuringExecution: - ## nodeSelectorTerms: - ## - matchExpressions: - ## - key: kubernetes.io/os - ## operator: NotIn - ## values: - ## - linux + ## affinity: + ## nodeAffinity: + ## requiredDuringSchedulingIgnoredDuringExecution: + ## nodeSelectorTerms: + ## - matchExpressions: + ## - key: kubernetes.io/os + ## operator: NotIn + ## values: + ## - linux additionalDaemonSets: {} - ## Configure telegraf-operator ## ref: https://github.com/influxdata/helm-charts/blob/master/charts/telegraf-operator/values.yaml telegraf-operator: enabled: false ## Put here the new name if you want to override the full name used for Telegraf Operator components. - # fullnameOverride: "" + # fullnameOverride: '' image: sidecarImage: public.ecr.aws/sumologic/telegraf:1.21.2 replicaCount: 1 classes: - secretName: "telegraf-operator-classes" - default: "sumologic-prometheus" + secretName: telegraf-operator-classes + default: sumologic-prometheus data: - sumologic-prometheus: | - [[outputs.prometheus_client]] - ## Configuration details: - ## https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client#configuration - listen = ":9273" - metric_version = 2 - ## Disable the default collectors - collectors_exclude = ["gocollector", "process"] - ## Telegraf operator adds the internal plugin by default, and the Helm Chart doesn't let us disable it - ## Instead, drop the metrics at the output - namedrop = ["internal*"] + sumologic-prometheus: "[[outputs.prometheus_client]]\nConfiguration details:\nhttps://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client#configuration\n\ + \ listen = \":9273\"\n metric_version = 2\n ## Disable the default collectors\n\ + \ collectors_exclude = [\"gocollector\", \"process\"]\n ## Telegraf operator adds\ + \ the internal plugin by default, and the Helm Chart doesn't let us disable it\n\ + \ ## Instead, drop the metrics at the output\n namedrop = [\"internal*\"]" # imagePullSecrets: [] - ## Configure Falco ## Please note that Falco is embedded in this Helm Chart for user convenience only - Sumo Logic does not provide production support for it ## This is an experimental configuration and shouldn't be used in production environment ## https://github.com/falcosecurity/charts/tree/master/falco falco: enabled: false - ## Put here the new name if you want to override the full name used for Falco components. - # fullnameOverride: "" - + # fullnameOverride: '' # imagePullSecrets: [] - image: registry: public.ecr.aws # repository: falcosecurity/falco-no-driver - ## Add kernel-devel package through MachineConfig, required to enable building of missing falco modules (only for OpenShift) addKernelDevel: true - extra: + ## Add initContainer to wait until kernel-devel is installed on host initContainers: - ## Add initContainer to wait until kernel-devel is installed on host - - name: init-falco - image: public.ecr.aws/docker/library/busybox:1.36.0 - command: - - "sh" - - "-c" - - | - while [ -f /host/etc/redhat-release ] && [ -z "$(ls /host/usr/src/kernels)" ] ; do - echo "waiting for kernel headers to be installed" - sleep 3 - done - volumeMounts: - - mountPath: /host/usr - name: usr-fs - readOnly: true - - mountPath: /host/etc - name: etc-fs - readOnly: true - + - command: + - sh + - -c + - 'while [ -f /host/etc/redhat-release ] && [ -z "$(ls /host/usr/src/kernels)" ] + ; do + + echo "waiting for kernel headers to be installed" + + sleep 3 + + done' + image: public.ecr.aws/docker/library/busybox:1.36.0 + name: init-falco + volumeMounts: + - mountPath: /host/usr + name: usr-fs + readOnly: true + - mountPath: /host/etc + name: etc-fs + readOnly: true driver: ## Set to epbf to enable eBPF support for Falco instead of falco-probe kernel module. ## https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/troubleshoot-collection.md#falco-and-google-kubernetes-engine-gke @@ -2317,78 +2151,68 @@ falco: # repository: falcosecurity/falco-driver-loader falco: load_plugins: - - json - - k8saudit + - json + - k8saudit json_output: true ## The location of the rules file(s). This can contain one or more paths to ## separate rules files. ## Explicitly add missing /etc/falco/rules.available/application_rules.yaml ## before https://github.com/falcosecurity/charts/issues/230 gets resolved. rules_file: - - /etc/falco/falco_rules.yaml - - /etc/falco/falco_rules.local.yaml - - /etc/falco/k8s_audit_rules.yaml - - /etc/falco/rules.d - - /etc/falco/rules.available/application_rules.yaml - + - /etc/falco/falco_rules.yaml + - /etc/falco/falco_rules.local.yaml + - /etc/falco/k8s_audit_rules.yaml + - /etc/falco/rules.d + - /etc/falco/rules.available/application_rules.yaml falcoctl: artifact: follow: enabled: false install: enabled: false - customRules: ## Mark the following as known k8s api callers: ## * prometheus ## * prometheus operator ## * telegraf operator ## * grafana sidecar - rules_user_known_k8s_api_callers.yaml: |- - - macro: user_known_contact_k8s_api_server_activities - condition: > - (container.image.repository = "quay.io/prometheus/prometheus") or - (container.image.repository = "quay.io/coreos/prometheus-operator") or - (container.image.repository = "quay.io/influxdb/telegraf-operator") or - (container.image.repository = "kiwigrid/k8s-sidecar") - rules_user_sensitive_mount_containers.yaml: |- - - macro: user_sensitive_mount_containers - condition: > - (container.image.repository = "falcosecurity/falco") or - (container.image.repository = "quay.io/prometheus/node-exporter") + rules_user_known_k8s_api_callers.yaml: "- macro: user_known_contact_k8s_api_server_activities\n\ + \ condition: >\n (container.image.repository = \"quay.io/prometheus/prometheus\"\ + ) or\n (container.image.repository = \"quay.io/coreos/prometheus-operator\")\ + \ or\n (container.image.repository = \"quay.io/influxdb/telegraf-operator\")\ + \ or\n (container.image.repository = \"kiwigrid/k8s-sidecar\")" + rules_user_sensitive_mount_containers.yaml: "- macro: user_sensitive_mount_containers\n\ + \ condition: >\n (container.image.repository = \"falcosecurity/falco\") or\n\ + \ (container.image.repository = \"quay.io/prometheus/node-exporter\")" ## NOTE: kube-proxy not exact matching because of regional ecr e.g. ## 602401143452.dkr.ecr.us-west-1.amazonaws.com/eks/kube-proxy - rules_user_privileged_containers.yaml: |- - - macro: user_privileged_containers - condition: > - (container.image.repository endswith ".amazonaws.com/eks/kube-proxy") - + rules_user_privileged_containers.yaml: "- macro: user_privileged_containers\n condition:\ + \ >\n (container.image.repository endswith \".amazonaws.com/eks/kube-proxy\")" ## Configure Tailing Sidecar Operator ## ref: https://github.com/SumoLogic/tailing-sidecar/blob/main/helm/tailing-sidecar-operator/values.yaml tailing-sidecar-operator: enabled: false - ## Put here the new name if you want to override the full name used for tailing-sidecar-operator components. - # fullnameOverride: "" - + # fullnameOverride: '' ## creation of Security Context Constraints in Openshift scc: create: false - ## Configure OpenTelemetry Operator - Instrumentation ## ref: https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-operator opentelemetry-operator: enabled: true - ## Specific for Sumo Logic chart - Instrumentation resource creation instrumentationJobImage: image: repository: sumologic/kubernetes-tools tag: 2.14.0 - createDefaultInstrumentation: false - instrumentationNamespaces: "" - + instrumentationNamespaces: '' + ## Current instrumentation doesn't support customization + ## for nodejs. Traces are always enabled. + ## nodejs: + ## traces: + ## enabled: true instrumentation: dotnet: traces: @@ -2405,35 +2229,26 @@ opentelemetry-operator: enabled: true metrics: enabled: true - ## Current instrumentation doesn't support customization - ## for nodejs. Traces are always enabled. - ##nodejs: - ## traces: - ## enabled: true - ## Specific for OpenTelemetry Operator chart values admissionWebhooks: failurePolicy: Fail enabled: true - ## skip admission webhook on our own OpenTelemetryCollector object to avoid having to wait for operator to start objectSelector: matchExpressions: - - key: sumologic.com/component - operator: NotIn - values: ["metrics"] - + - key: sumologic.com/component + operator: NotIn + values: + - metrics certManager: enabled: false issuerRef: {} - autoGenerateCert: true - manager: collectorImage: - repository: "public.ecr.aws/sumologic/sumologic-otel-collector" - tag: "0.85.0-sumo-0" - env: + repository: public.ecr.aws/sumologic/sumologic-otel-collector + tag: 0.85.0-sumo-0 + env: {} resources: limits: cpu: 250m @@ -2441,20 +2256,17 @@ opentelemetry-operator: requests: cpu: 150m memory: 256Mi - ## pvcCleaner deletes unused PVCs pvcCleaner: metrics: enabled: false logs: enabled: false - job: image: repository: public.ecr.aws/sumologic/kubernetes-tools-kubectl tag: 2.20.0 pullPolicy: IfNotPresent - resources: limits: memory: 256Mi @@ -2462,27 +2274,20 @@ pvcCleaner: requests: memory: 64Mi cpu: 100m - nodeSelector: {} - ## Add custom labels - ## Node tolerations for server scheduling to nodes with taints ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## tolerations: [] - ## Affinity and anti-affinity ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} - podLabels: {} ## Add custom annotations podAnnotations: {} - ## Schedule for cronJobs - schedule: "*/15 * * * *" - + schedule: '*/15 * * * *' ## securityContext for pvcCleaner pods securityContext: runAsUser: 1000