From 32a7da3b9c07fff42f08051041719eeed0962478 Mon Sep 17 00:00:00 2001 From: Dominik Rosiek Date: Wed, 13 Sep 2023 15:54:27 +0200 Subject: [PATCH] feat: add schema to helm chart Signed-off-by: Dominik Rosiek --- .gitignore | 3 + ci/check_configuration_keys.py | 21 +- ci/generate-schema.py | 118 + ci/generate_readme.py | 106 + ci/generate_values.py | 133 + ci/test_generate_values.py | 145 + deploy/helm/sumologic/README.md | 601 +- deploy/helm/sumologic/_values.yaml | 2307 +++++++ deploy/helm/sumologic/values.schema.json | 7078 ++++++++++++++++++++++ deploy/helm/sumologic/values.yaml | 2 +- vagrant/scripts/diff_values.py | 143 +- vagrant/scripts/test_diff_values.py | 40 + 12 files changed, 10307 insertions(+), 390 deletions(-) create mode 100755 ci/generate-schema.py create mode 100755 ci/generate_readme.py create mode 100755 ci/generate_values.py create mode 100755 ci/test_generate_values.py create mode 100644 deploy/helm/sumologic/_values.yaml create mode 100644 deploy/helm/sumologic/values.schema.json create mode 100755 vagrant/scripts/test_diff_values.py diff --git a/.gitignore b/.gitignore index 6daf8c0dc2..740f82812c 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,6 @@ go.work.sum # Local values files values.local.yaml + +# Python cache +__pycache__ diff --git a/ci/check_configuration_keys.py b/ci/check_configuration_keys.py index 2c0a2db96d..b8612d4fba 100755 --- a/ci/check_configuration_keys.py +++ b/ci/check_configuration_keys.py @@ -12,21 +12,13 @@ DESCRIPTION = 'This program verifies if all configuration from values.yaml has been documented.' SKIP_DEFAULTS = { - 'kube-prometheus-stack.enabled', - 'kube-prometheus-stack.global.imagePullSecrets', - 'metadata.logs.autoscaling.targetMemoryUtilizationPercentage', - 'metadata.logs.podDisruptionBudget', 'metadata.logs.statefulset.extraEnvVars', 'metadata.logs.statefulset.extraVolumeMounts', 'metadata.logs.statefulset.extraVolumes', 'metadata.logs.statefulset.extraPorts', - 'metadata.metrics.podDisruptionBudget', - 'metadata.metrics.autoscaling.targetMemoryUtilizationPercentage', 'metadata.metrics.statefulset.extraEnvVars', 'metadata.metrics.statefulset.extraVolumeMounts', 'metadata.metrics.statefulset.extraVolumes', - 'metadata.persistence.storageClass', - 'otelcolInstrumentation.statefulset.priorityClassName', 'otelcolInstrumentation.statefulset.extraEnvVars', 'otelcolInstrumentation.statefulset.extraVolumeMounts', 'otelcolInstrumentation.statefulset.extraVolumes', @@ -37,12 +29,8 @@ 'tracesSampler.deployment.extraVolumeMounts', 'tracesSampler.deployment.extraVolumes', 'sumologic.setup.job.tolerations', - 'sumologic.setup.job.pullSecrets', - 'sumologic.pullSecrets', - 'sumologic.setup.force', - 'sumologic.setup.debug', - 'metrics-server.image.pullSecrets', - 'sumologic.events.sourceCategory', + 'kube-prometheus-stack.prometheus-node-exporter.resources', + 'kube-prometheus-stack.prometheusOperator.resources', } def main(values_path: str, readme_path: str, full_diff=False) -> None: @@ -241,6 +229,9 @@ def compare_values(readme: dict, values_keys: list[str], values: dict) -> dict: if compare_keys(this_key, other_key): other_value = get_value(this_key, values) if this_value != other_value: + if this_value.replace("\\\\", "\\").replace("\\|", "|") == other_value: + # yaml contains both `'` and `"` strings and readme is always `"` string + continue # Skip configuration linked to values.yaml if this_value == 'See [values.yaml]': @@ -270,7 +261,7 @@ def get_value(key: str, dictionary: dict) -> str: value = value[subkey] if isinstance(value, str): - return value + return value.replace("\n", "\\n") return json.dumps(value) diff --git a/ci/generate-schema.py b/ci/generate-schema.py new file mode 100755 index 0000000000..f408fd7d74 --- /dev/null +++ b/ci/generate-schema.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 + +import argparse +import json +import re +import sys + +import yaml +from yaml.loader import SafeLoader + +DESCRIPTION = 'This program generates JSON schema from README.md table' + +def values_to_dictionary(path: str) -> dict: + """Reads given path as values.yaml and returns it as dict + + Args: + path (str): path to the value.yaml + + Returns: + dict: values.yaml as dict + """ + with open(path, encoding='utf-8') as file: + values_yaml = file.read() + values_yaml = re.sub(r'(\[\]|\{\})\n(\s+# )', r'\n\2', values_yaml, flags=re.M) + values_yaml = re.sub(r'^(\s+)# ', r'\1', values_yaml, flags=re.M) + return yaml.load(values_yaml, Loader=SafeLoader) + +def set_properties(values): + properties = { + 'type': '', + # 'required': [], + # 'properties': {}, + # 'default': '', + 'description': '', + } + + if isinstance(values, dict): + properties['type'] = 'object' + properties['properties'] = {} + for key in values.keys(): + properties['properties'][key] = set_properties(values[key]) + else: + properties['default'] = values + if isinstance(values, bool): + properties['type'] = 'boolean' + elif isinstance(values, int): + properties['type'] = 'integer' + elif isinstance(values, (list, set)): + properties['type'] = 'array' + elif isinstance(values, str): + properties['type'] = 'string' + else: + properties['type'] = 'string' + if not properties['default']: + properties['default'] = "" + + return properties + +def extract_description_from_readme(path: str) -> dict: + """Reads given path as README.md and returns dict in the following form: + + ``` + { + configuration_key: configuration_default + } + ``` + + Args: + path (str): path to the README.md + + Returns: + dict: {configuration_key: configuration_default,...} + """ + with open(path, encoding='utf-8') as file: + readme = file.readlines() + + keys = {} + + for line in readme: + match = re.match( + r'^\|\s+`(?P.*?)`\s+\|\s+(?P.*?)\s+\|\s+(?P.*?)\s+\|$', + line) + if match and match.group('key'): + description = match.group('description').strip('`').strip('"') + keys[match.group('key')] = description + + return keys + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + prog = sys.argv[0], + description = DESCRIPTION) + parser.add_argument('--values', required=True) + parser.add_argument('--readme', required=True) + parser.add_argument('--output', required=True) + parser.add_argument('--full-diff', required=False, action='store_true') + args = parser.parse_args() + + values = values_to_dictionary(args.values) + + output = { + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": {}, + } + + for key in values: + output['properties'][key] = set_properties(values[key]) + + descriptions = extract_description_from_readme(args.readme) + for key, description in descriptions.items(): + a = output['properties'] + subkeys = key.split(".") + for i in range(0, len(subkeys)-1): + a = a[subkeys[i]]['properties'] + a[subkeys[-1]]['description'] = description + with open(args.output, "w") as f: + f.write(json.dumps(output, indent=2)) diff --git a/ci/generate_readme.py b/ci/generate_readme.py new file mode 100755 index 0000000000..5314eb6a77 --- /dev/null +++ b/ci/generate_readme.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 + +import argparse +import json +import re +import sys +import os + +import yaml +from yaml.loader import SafeLoader + +DESCRIPTION = "test" +HEADER = """# Configuration + +To see all available configuration for our sub-charts, please refer to their documentation. + +- [Falco](https://github.com/falcosecurity/charts/tree/master/charts/falco#configuration) - All Falco properties should be prefixed with + `falco.` in our values.yaml to override a property not listed below. +- [Kube-Prometheus-Stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#configuration) - All + Kube Prometheus Stack properties should be prefixed with `kube-prometheus-stack.` in our values.yaml to override a property not listed + below. +- [Metrics Server](https://github.com/bitnami/charts/tree/master/bitnami/metrics-server/#parameters) - All Metrics Server properties should + be prefixed with `metrics-server.` in our values.yaml to override a property not listed below. +- [Tailing Sidecar Operator](https://github.com/SumoLogic/tailing-sidecar/tree/main/helm/tailing-sidecar-operator#configuration) - All + Tailing Sidecar Operator properties should be prefixed with `tailing-sidecar-operator` in our values.yaml to override a property not + listed below. +- [OpenTelemetry Operator](https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-operator#opentelemetry-operator-helm-chart) - + All OpenTelemetry Operator properties should be prefixed with `opentelemetry-operator` in our values.yaml to override a property listed + below. + +The following table lists the configurable parameters of the Sumo Logic chart and their default values. + +| Parameter | Description | Default | +| --- | --- | --- |""" + +FOOTER = """ +[values.yaml]: values.yaml""" + +def build_default(data): + return_value = {} + if 'properties' in data: + for key in data['properties']: + return_value[key] = build_default(data['properties'][key]) + return return_value + elif 'items' in data: + return [item['default'] for item in data['items']] + else: + return data['default'] + +def get_description(prefix, data): + return_value = [] + prefix = prefix.strip('.') + description = data["description"] if 'description' in data else "" + built_default = None + + if 'properties' in data: + if not description: + for key in data['properties']: + if prefix == "": + pref = key + else: + if "." in key: + pref = f"{prefix}[{key}]" + else: + pref = f"{prefix}.{key}" + return_value += get_description(pref, data['properties'][key]) + return return_value + else: + built_default = build_default(data) + + if 'items' in data: + built_default = build_default(data) + + default = json.dumps(built_default if built_default is not None else data['default']).strip('"').replace("|", "\|") + if len(default) > 180: + default = "See [values.yaml]" + + if default == "": + default = "Nil" + return_value.append(f'| `{prefix}` | {data["description"]} | `{default}` |') + + return return_value + +def main(schema, directory): + readme = [HEADER] + with open(schema) as f: + data = json.loads(f.read()) + readme += get_description("", data) + readme.append(FOOTER) + + readme = "\n".join(readme) + + with open(os.path.join(directory, "README.md"), "w") as f: + f.write(readme) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + prog = sys.argv[0], + description = DESCRIPTION) + parser.add_argument('--schema', required=True) + parser.add_argument('--dir', required=True) + parser.add_argument('--full-diff', required=False, action='store_true') + args = parser.parse_args() + + main(args.schema, args.dir) diff --git a/ci/generate_values.py b/ci/generate_values.py new file mode 100755 index 0000000000..feae261670 --- /dev/null +++ b/ci/generate_values.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 + +import argparse +import json +import re +import sys +import os + +import yaml +from yaml.loader import SafeLoader + +DESCRIPTION = "test" + + +def generate_values(indent, data): + return_value = [] + if 'properties' in data: + for key, value in data['properties'].items(): + commented = '' + if 'comment' in value: + for line in value['comment'].split('\n'): + if not line.strip(): + return_value.append(f"{indent}##") + else: + return_value.append(f"{indent}## {line}") + if 'commented' in value and value['commented']: + commented = '# ' + if 'properties' in value: + return_value.append(f"{indent}{commented}{key}:") + elif 'items' in value: + return_value.append(f"{indent}{commented}{key}:") + for item in value['items']: + commented = '' + if 'commented' in item and item['commented']: + commented = '# ' + if 'comment' in item: + for line in item['comment'].split('\n'): + if '#' in indent: + return_value.append(f"{indent.replace('# ', '##')} {line.rstrip()}") + else: + return_value.append(f"{indent}## {line.rstrip()}") + dumped = dump_to_string(item['default']).strip() + first = True + for line in dumped.split("\n"): + if first: + return_value.append(f"{indent}{commented}- {line}") + first = False + continue + return_value.append(f"{indent}{commented} {line}") + else: + dumped = dump_to_string({key: value['default']}).strip() + for line in dumped.split("\n"): + if not line.strip(): + return_value.append(f"{indent}{commented.rstrip()}") + else: + return_value.append(f"{indent}{commented}{line.rstrip()}") + if 'example' in value: + dumped = dump_to_string({key: value['example']}).strip() + for line in dumped.split("\n")[1:]: + if not line.strip(): + return_value.append(f"{indent}#") + else: + return_value.append(f"{indent}# {line}") + return_value += generate_values(f"{indent}{commented} ", data['properties'][key]) + return return_value + +def main(schema, directory): + with open(schema) as f: + data = json.loads(f.read()) + values = ['## Sumo Logic Kubernetes Collection configuration file', +'## All the comments start with two or more # characters'] + generate_values('', data) + + print('\n'.join(values)) + + # with open(os.path.join(directory, "_values.yaml"), "w") as f: + # f.write(yaml.dump(values)) + + +def dump_to_string(obj): + """ + Dump value to string. This is custom serializer to yaml. Examples are covered in test_generate_values.yaml + """ + if isinstance(obj, str): + if '\n' in obj: + # Dump to mulitline string and remove `key` + return remove_prefix(yaml.dump({'key': obj}, default_style='|').strip().removeprefix('"key": '), ' ') + else: + # dump to string and remove `key` + return yaml.dump({'key': obj}).strip().removeprefix('key: ') + elif isinstance(obj, dict): + if not len(obj): + return '{}' + ret = '' + for key, value in obj.items(): + # figure out separator, may be `:\n` or `: ` + sep = ':\n ' if isinstance(value, (dict, list)) and len(value) else ': ' + ret += f'{key}{sep}{add_prefix(dump_to_string(value), " ").lstrip()}\n' + return ret.rstrip() + elif isinstance(obj, list): + if not len(obj): + return '[]' + ret = '' + for value in obj: + ret += f'- {add_prefix(dump_to_string(value), " ").lstrip()}\n' + return ret.rstrip() + else: + return yaml.dump({'key': obj}).strip().removeprefix('key: ') + + +def add_prefix(text, prefix): + """ + adds prefix to every line of text + """ + return '\n'.join(f'{prefix}{line}' for line in text.split('\n')) + + +def remove_prefix(text, prefix): + """ + removes prefix from every line of text + """ + return '\n'.join(f'{line.removeprefix(prefix)}' for line in text.split('\n')) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + prog = sys.argv[0], + description = DESCRIPTION) + parser.add_argument('--schema', required=True) + parser.add_argument('--dir', required=True) + parser.add_argument('--full-diff', required=False, action='store_true') + args = parser.parse_args() + + main(args.schema, args.dir) diff --git a/ci/test_generate_values.py b/ci/test_generate_values.py new file mode 100755 index 0000000000..90a86f48c5 --- /dev/null +++ b/ci/test_generate_values.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python3 + +import unittest +from generate_values import dump_to_string, generate_values + +class DumpCase: + def __init__(self, object, expected): + self.object = object + self.expected = expected + +class ValuesCase: + def __init__(self, object, expected): + self.object = object + self.expected = expected + +class TestDump(unittest.TestCase): + cases = [ + DumpCase('test', 'test'), + DumpCase({ + 'a': 'b', + 'c': 'd' + }, + '''a: b +c: d'''), + DumpCase({'e': { + 'a': 'b', + 'c': 'd' + }}, + '''e: + a: b + c: d'''), + DumpCase([{ + 'a': 'b', + 'c': 'd' + }], + '''- a: b + c: d'''), + DumpCase({'a': {}}, + '''a: {}'''), + DumpCase({'a': []}, + '''a: []'''), + DumpCase({'a': ''}, + '''a:'''), + DumpCase([ + { + "name": "init-falco", + "image": "public.ecr.aws/docker/library/busybox:1.36.0", + "command": [ + "sh", + "-c", + "while [ -f /host/etc/redhat-release ] && [ -z \"$(ls /host/usr/src/kernels)\" ] ; do\necho \"waiting for kernel headers to be installed\"\nsleep 3\ndone\n" + ], + "volumeMounts": [ + { + "mountPath": "/host/usr", + "name": "usr-fs", + "readOnly": True + }, + { + "mountPath": "/host/etc", + "name": "etc-fs", + "readOnly": True + } + ] + } + ], + '''- name: init-falco + image: public.ecr.aws/docker/library/busybox:1.36.0 + command: + - sh + - -c + - | + while [ -f /host/etc/redhat-release ] && [ -z "$(ls /host/usr/src/kernels)" ] ; do + echo "waiting for kernel headers to be installed" + sleep 3 + done + volumeMounts: + - mountPath: /host/usr + name: usr-fs + readOnly: true + - mountPath: /host/etc + name: etc-fs + readOnly: true'''), + ] + + def test(self): + self.maxDiff = None + for case in self.cases: + self.assertEqual(case.expected, dump_to_string(case.object)) + +class TestDump(unittest.TestCase): + cases = [ + ValuesCase({ + "properties": { + "cAdvisorMetricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\ncadvisor container metrics", + "description": "Kubelet CAdvisor MetricRelabelConfigs", + "items": [ + { + "default": { + "action": "keep", + "regex": "(?:container_cpu_usage_seconds_total|container_memory_working_set_bytes)", + "sourceLabels": [ + "__name__" + ] + } + }, + { + "comment": "Drop container metrics with container tag set to an empty string:\nthese are the pod aggregated container metrics which can be aggregated\nin Sumo anyway. There's also some cgroup-specific time series we also\ndo not need.", + "default": { + "action": "drop", + "sourceLabels": [ + "__name__", + "container" + ], + } + } + ] + } + } + }, '''## see docs/scraped_metrics.md +## cadvisor container metrics +cAdvisorMetricRelabelings: +- action: keep + regex: (?:container_cpu_usage_seconds_total|container_memory_working_set_bytes) + sourceLabels: + - __name__ +## Drop container metrics with container tag set to an empty string: +## these are the pod aggregated container metrics which can be aggregated +## in Sumo anyway. There's also some cgroup-specific time series we also +## do not need. +- action: drop + sourceLabels: + - __name__ + - container''') + ] + + def test(self): + self.maxDiff = None + for case in self.cases: + self.assertEqual(case.expected, '\n'.join(generate_values('', case.object))) + +if __name__ == '__main__': + unittest.main() diff --git a/deploy/helm/sumologic/README.md b/deploy/helm/sumologic/README.md index a7ba679522..10f6422e67 100644 --- a/deploy/helm/sumologic/README.md +++ b/deploy/helm/sumologic/README.md @@ -25,71 +25,132 @@ The following table lists the configurable parameters of the Sumo Logic chart an | `namespaceOverride` | Used to override the chart's default target namepace. | `Nil` | | `sumologic.setupEnabled` | If enabled, a pre-install hook will create Collector and Sources in Sumo Logic. | `true` | | `sumologic.cleanupEnabled` | If enabled, a pre-delete hook will destroy Kubernetes secret and Sumo Logic Collector. | `false` | +| `sumologic.envFromSecret` | If enabled, accessId and accessKey will be sourced from Secret Name given. Be sure to include at least the following env variables in your secret (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY | `sumo-api-secret` | +| `sumologic.accessId` | Sumo access ID. | `Nil` | +| `sumologic.accessKey` | Sumo access key. | `Nil` | +| `sumologic.endpoint` | Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection. | `Nil` | +| `sumologic.httpProxy` | HTTP proxy URL | `Nil` | +| `sumologic.httpsProxy` | HTTPS proxy URL | `Nil` | +| `sumologic.noProxy` | List of comma separated hostnames which should be excluded from the proxy | `kubernetes.default.svc` | +| `sumologic.collectorName` | The name of the Sumo Logic collector that will be created in the SetUp job. Defaults to `clusterName` if not specified. | `Nil` | +| `sumologic.clusterName` | An identifier for the Kubernetes cluster. Whitespaces in the cluster name will be replaced with dashes. | `kubernetes` | +| `sumologic.cluster` | Configuration of Kubernetes for [Terraform client](https://www.terraform.io/docs/providers/kubernetes/index.html#argument-reference). | `See [values.yaml]` | +| `sumologic.collectionMonitoring` | If you set it to false, it would set EXCLUDE_NAMESPACE= and not add the Otelcol logs and Prometheus remotestorage metrics. | `true` | +| `sumologic.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's deployments and statefulsets. | `[{"name": "myRegistryKeySecretName"}]` | +| `sumologic.podLabels` | Additional labels for the pods. | `{}` | +| `sumologic.podAnnotations` | Additional annotations for the pods. | `{}` | +| `sumologic.serviceAccount.annotations` | Add custom annotations to sumologic serviceAccounts | `{}` | +| `sumologic.scc.create` | Create OpenShift's Security Context Constraint | `false` | +| `sumologic.setup.force` | Force collection installation (disables k8s version verification) | `true` | +| `sumologic.setup.job.image.repository` | Image repository for Sumo Logic setup job docker container. | `public.ecr.aws/sumologic/kubernetes-setup` | +| `sumologic.setup.job.image.tag` | Image tag for Sumo Logic setup job docker container. | `3.10.0` | +| `sumologic.setup.job.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` | +| `sumologic.setup.job.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's setup job. | `[{"name": "myRegistryKeySecretName"}]` | +| `sumologic.setup.job.resources` | Resource requests and limits for the setup Job. | `{"limits": {"memory": "256Mi", "cpu": "2000m"}, "requests": {"memory": "64Mi", "cpu": "200m"}}` | +| `sumologic.setup.job.nodeSelector` | Node selector for sumologic setup job. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `sumologic.setup.job.tolerations` | Add tolerations for the setup Job. | `[]` | +| `sumologic.setup.job.affinity` | Add affinity and anti-affinity for the setup Job. | `{}` | +| `sumologic.setup.job.podLabels` | Additional labels for the setup Job pod. | `{}` | +| `sumologic.setup.job.podAnnotations` | Additional annotations for the setup Job pod. | `{}` | +| `sumologic.setup.debug` | Enable debug mode (disables the automatic execution of the setup.sh script) | `true` | +| `sumologic.setup.monitors.enabled` | If enabled, a pre-install hook will create k8s monitors in Sumo Logic. | `true` | +| `sumologic.setup.monitors.monitorStatus` | The installed monitors default status: enabled/disabled. | `enabled` | +| `sumologic.setup.monitors.notificationEmails` | A list of emails to send notifications from monitors. | `[]` | +| `sumologic.setup.dashboards.enabled` | If enabled, a pre-install hook will install k8s dashboards in Sumo Logic. | `true` | +| `sumologic.collector.fields` | Configuration of Sumo Logic fields. [See Sumo Logic Terraform Plugin documentation for more information](https://registry.terraform.io/providers/SumoLogic/sumologic/latest/docs/resources/collector#fields). All double quotes should be escaped here regarding Terraform syntax. | `{}` | +| `sumologic.collector.sources` | Configuration of HTTP sources. [See docs/Terraform.md for more information](/docs/terraform.md). All double quotes should be escaped here regarding Terraform syntax. | `See [values.yaml]` | +| `sumologic.otelcolImage.repository` | Default image repository for OpenTelemetry Collector. This can be overridden for specific components. | `public.ecr.aws/sumologic/sumologic-otel-collector` | +| `sumologic.otelcolImage.tag` | Default image tag for OpenTelemetry Collector. This can be overridden for specific components. | `0.85.0-sumo-0` | +| `sumologic.otelcolImage.addFipsSuffix` | Add a `-fips` suffix to all image tags. See [docs/security-best-practices.md](/docs/security-best-practices.md) for more information. | `false` | | `sumologic.events.enabled` | Defines whether collection of Kubernetes events is enabled. | `true` | | `sumologic.events.sourceName` | Source name for the Events source. | `events` | -| `sumologic.events.sourceCategory` | Source category for the Events source. | `{clusterName}/events` | -| `sumologic.events.sourceCategoryReplaceDash` | Used to replace - with another character. | `"/"` | +| `sumologic.events.sourceCategory` | Source category for the Events source. | `kubernetes/events` | +| `sumologic.events.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | | `sumologic.events.persistence.enabled` | Enable persistence for the event collector. Persistence lets the collector avoid reingesting events on restart and buffer them locally if unable to reach the backend. | `true` | -| `sumologic.events.persistence.persistentVolume.path` | Local filesystem path the persistent storage volume will be mounted at. | `/var/lib/storage/events` | | `sumologic.events.persistence.size` | Size of the persistent storage volume | `10Gi` | -| `sumologic.events.persistence.persistentVolume.storageClass` | The storageClassName for the persistent storage volume | `Nil` | +| `sumologic.events.persistence.persistentVolume.path` | Local filesystem path the persistent storage volume will be mounted at. | `/var/lib/storage/events` | | `sumologic.events.persistence.persistentVolume.accessMode` | The accessMode for the persistent storage volume | `ReadWriteOnce` | | `sumologic.events.persistence.persistentVolume.pvcLabels` | Additional PersistentVolumeClaim labels for persistent storage volumes | `{}` | -| `sumologic.events.sourceType` | The type of the Sumo Logic source being used for events ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `otlp` | +| `sumologic.events.persistence.persistentVolume.storageClass` | The storageClassName for the persistent storage volume | `Nil` | +| `sumologic.events.sourceType` | The type of the Sumo Logic source being used for events ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `http` | | `sumologic.logs.enabled` | Set the enabled flag to false for disabling logs ingestion altogether. | `true` | | `sumologic.logs.collector.otelcol.enabled` | Enable OpenTelemetry logs collector. | `true` | | `sumologic.logs.collector.otelcloudwatch.enabled` | Flag to enable CloudWatch Collection | `false` | -| `sumologic.logs.collector.otelcloudwatch.logGroups` | Log Groups configuration for AWS CloudWatch receiver | `{}` | +| `sumologic.logs.collector.otelcloudwatch.roleArn` | AWS role ARN, to authenticate with CloudWatch | `Nil` | | `sumologic.logs.collector.otelcloudwatch.persistence.enabled` | Flag to control persistence for the CloudWatch collector | `true` | +| `sumologic.logs.collector.otelcloudwatch.region` | EKS Fargate cluster region | `Nil` | | `sumologic.logs.collector.otelcloudwatch.pollInterval` | CloudWatch poll interval | `1m` | -| `sumologic.logs.collector.otelcloudwatch.region` | EKS Fargate cluster region | `""` | -| `sumologic.logs.collector.otelcloudwatch.roleArn` | AWS role ARN, to authenticate with CloudWatch | `""` | -| `sumologic.logs.container.enabled` | Enable collecting logs from Kubernetes containers. | `true` | -| `sumologic.logs.container.format` | Format for container logs. | `fields` | +| `sumologic.logs.collector.otelcloudwatch.logGroups` | Log Groups configuration for AWS CloudWatch receiver | `{}` | | `sumologic.logs.multiline.enabled` | Enable multiline detection for Kubernetes container logs. | `true` | -| `sumologic.logs.multiline.first_line_regex` | Regular expression to match first line of multiline logs. | `^\[?\d{4}-\d{1,2}-\d{1,2}.\d{2}:\d{2}:\d{2}` | +| `sumologic.logs.multiline.first_line_regex` | Regular expression to match first line of multiline logs. | `^\\[?\\d{4}-\\d{1,2}-\\d{1,2}.\\d{2}:\\d{2}:\\d{2}` | | `sumologic.logs.multiline.additional` | List of additional conditions and expressions to match first line of multiline logs. See [Multiline](/docs/collecting-container-logs.md#conditional-multiline-log-parsing) for more information. | `[]` | -| `sumologic.logs.systemd.enabled` | Enable collecting systemd logs from Kubernets nodes. | `true` | -| `sumologic.logs.systemd.units` | List of systemd units to collect logs from. | See [values.yaml] | +| `sumologic.logs.container.enabled` | Enable collecting logs from Kubernetes containers. | `true` | +| `sumologic.logs.container.format` | Format for container logs. | `fields` | | `sumologic.logs.container.keep_time_attribute` | When set to `true`, preserves the `time` attribute, which is a string representation of the `timestamp` attribute. | `false` | -| `sumologic.logs.container.sourceHost` | Set the \_sourceHost metadata field in Sumo Logic. | `""` | -| `sumologic.logs.container.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `"%{namespace}.%{pod}.%{container}"` | -| `sumologic.logs.container.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `"%{namespace}/%{pod_name}"` | -| `sumologic.logs.container.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `"kubernetes/"` | -| `sumologic.logs.container.sourceCategoryReplaceDash` | Used to replace - with another character. | `"/"` | -| `sumologic.logs.container.excludeContainerRegex` | A regular expression for container names. Logs from matching containers will not be sent to Sumo. | `""` | -| `sumologic.logs.container.excludeHostRegex` | A regular expression for Kubernetes node names. Logs from pods running on matching nodes will not be sent to Sumo. | `""` | -| `sumologic.logs.container.excludeNamespaceRegex` | A regular expression for Kubernetes namespace names. Logs from pods running in matching namespaces will not be sent to Sumo. | `""` | -| `sumologic.logs.container.excludePodRegex` | A regular expression for pod names. Logs from matching pods will not be sent to Sumo. | `""` | | `sumologic.logs.container.otelcol.extraProcessors` | Extra processors for container logs. See [/docs/collecting-container-logs.md](/docs/collecting-container-logs.md) for details. | `[]` | +| `sumologic.logs.container.sourceHost` | Set the \_sourceHost metadata field in Sumo Logic. | `Nil` | +| `sumologic.logs.container.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `%{namespace}.%{pod}.%{container}` | +| `sumologic.logs.container.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `%{namespace}/%{pod_name}` | +| `sumologic.logs.container.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `kubernetes/` | +| `sumologic.logs.container.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `sumologic.logs.container.excludeContainerRegex` | A regular expression for container names. Logs from matching containers will not be sent to Sumo. | `Nil` | +| `sumologic.logs.container.excludeHostRegex` | A regular expression for Kubernetes node names. Logs from pods running on matching nodes will not be sent to Sumo. | `Nil` | +| `sumologic.logs.container.excludeNamespaceRegex` | A regular expression for Kubernetes namespace names. Logs from pods running in matching namespaces will not be sent to Sumo. | `Nil` | +| `sumologic.logs.container.excludePodRegex` | A regular expression for pod names. Logs from matching pods will not be sent to Sumo. | `Nil` | | `sumologic.logs.container.perContainerAnnotationsEnabled` | Enable container-level pod annotations. | `false` | | `sumologic.logs.container.perContainerAnnotationPrefixes` | Defines the list of prefixes of container-level pod annotations. | `[]` | -| `sumologic.logs.systemd.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `"%{_sourceName}"` | -| `sumologic.logs.systemd.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `"system"` | -| `sumologic.logs.systemd.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `"kubernetes/"` | -| `sumologic.logs.systemd.sourceCategoryReplaceDash` | Used to replace - with another character. | `"/"` | -| `sumologic.logs.systemd.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.systemd.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | -| `sumologic.logs.systemd.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.systemd.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | +| `sumologic.logs.systemd.enabled` | Enable collecting systemd logs from Kubernets nodes. | `true` | +| `sumologic.logs.systemd.units` | List of systemd units to collect logs from. | `["docker.service"]` | | `sumologic.logs.systemd.otelcol.extraProcessors` | Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. | `[]` | -| `sumologic.logs.kubelet.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `"k8s_kubelet"` | -| `sumologic.logs.kubelet.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `"kubelet"` | -| `sumologic.logs.kubelet.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `"kubernetes/"` | -| `sumologic.logs.kubelet.sourceCategoryReplaceDash` | Used to replace - with another character. | `"/"` | -| `sumologic.logs.kubelet.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.kubelet.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.kubelet.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | -| `sumologic.logs.kubelet.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `""` | +| `sumologic.logs.systemd.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `%{_sourceName}` | +| `sumologic.logs.systemd.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `system` | +| `sumologic.logs.systemd.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `kubernetes/` | +| `sumologic.logs.systemd.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `sumologic.logs.systemd.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.systemd.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.systemd.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.systemd.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | | `sumologic.logs.kubelet.otelcol.extraProcessors` | Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. | `[]` | +| `sumologic.logs.kubelet.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `k8s_kubelet` | +| `sumologic.logs.kubelet.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `kubelet` | +| `sumologic.logs.kubelet.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `kubernetes/` | +| `sumologic.logs.kubelet.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `sumologic.logs.kubelet.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.kubelet.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.kubelet.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | +| `sumologic.logs.kubelet.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). | `Nil` | | `sumologic.logs.fields` | Fields to be created at Sumo Logic to ensure logs are tagged with relevant metadata. [Sumo Logic help](https://help.sumologic.com/docs/manage/fields/#manage-fields) | `["cluster", "container", "daemonset", "deployment", "host", "namespace", "node", "pod", "service", "statefulset"]` | | `sumologic.logs.additionalFields` | Additional Fields to be created in Sumo Logic. [Sumo Logic help](https://help.sumologic.com/docs/manage/fields/#manage-fields) | `[]` | -| `sumologic.logs.sourceType` | The type of the Sumo Logic source being used for logs ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `otlp` | +| `sumologic.logs.sourceType` | The type of the Sumo Logic source being used for logs ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `http` | | `sumologic.metrics.enabled` | Set the enabled flag to false for disabling metrics ingestion altogether. | `true` | +| `sumologic.metrics.collector.otelcol.enabled` | Enable experimental otelcol metrics collector | `false` | +| `sumologic.metrics.collector.otelcol.scrapeInterval` | The default scrape interval for the collector. | `30s` | +| `sumologic.metrics.collector.otelcol.autoscaling.enabled` | Option to turn autoscaling on for the experimental otelcol metrics and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. collector | `false` | +| `sumologic.metrics.collector.otelcol.autoscaling.minReplicas` | Default min replicas for autoscaling. collector | `3` | +| `sumologic.metrics.collector.otelcol.autoscaling.maxReplicas` | Default max replicas for autoscaling. collector | `10` | +| `sumologic.metrics.collector.otelcol.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `70` | +| `sumologic.metrics.collector.otelcol.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `70` | +| `sumologic.metrics.collector.otelcol.nodeSelector` | Node selector for the experimental otelcol metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md). | `{}` | +| `sumologic.metrics.collector.otelcol.podAnnotations` | Additional annotations for the experimental otelcol metrics pods. | `{}` | +| `sumologic.metrics.collector.otelcol.podLabels` | Additional labels for the experimental otelcol metrics pods. | `{}` | +| `sumologic.metrics.collector.otelcol.priorityClassName` | Priority class name for the experimental otelcol metrics. | `Nil` | +| `sumologic.metrics.collector.otelcol.replicaCount` | Replica count for the experimental otelcol metrics collector | `1` | +| `sumologic.metrics.collector.otelcol.resources` | Resource requests and limits for the experimental otelcol metrics collector | `{"limits": {"memory": "2Gi", "cpu": "1000m"}, "requests": {"memory": "768Mi", "cpu": "100m"}}` | +| `sumologic.metrics.collector.otelcol.serviceMonitorSelector` | Selector for ServiceMonitors used for target discovery. By default, we select ServiceMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr | `{}` | +| `sumologic.metrics.collector.otelcol.podMonitorSelector` | Selector for PodMonitors used for target discovery. By default, we select PodMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr | `{}` | +| `sumologic.metrics.collector.otelcol.securityContext` | The securityContext configuration for the experimental otelcol metrics. | `{"fsGroup": 999}` | +| `sumologic.metrics.collector.otelcol.tolerations` | Tolerations for the experimental otelcol metrics. | `[]` | +| `sumologic.metrics.collector.otelcol.kubelet.enabled` | Enable collection of kubelet metrics. | `true` | +| `sumologic.metrics.collector.otelcol.cAdvisor.enabled` | Enable collection of cAdvisor metrics. | `true` | +| `sumologic.metrics.collector.otelcol.annotatedPods.enabled` | Enable collection of metrics from Pods annotated with prometheus.io/\* keys. See [docs/collecting-application-metrics.md](/docs/collecting-application-metrics.md#application-metrics-are-exposed-one-endpoint-scenario) for more information. | `true` | +| `sumologic.metrics.collector.otelcol.allocationStrategy` | Allocation strategy for the scrape target allocator. Valid values are: least-weighted and consistent-hashing. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocator | `least-weighted` | +| `sumologic.metrics.enableDefaultFilters` | Enable default metric filters for Sumo Apps. | `false` | +| `sumologic.metrics.dropHistogramBuckets` | Drop buckets from select high-cardinality histogram metrics, leaving only the sum and count components. | `true` | | `sumologic.metrics.otelcol.extraProcessors` | Extra processors configuration for metrics pipeline. See [/docs/collecting-application-metrics.md#metrics-modifications](/docs/collecting-application-metrics.md#metrics-modifications) for more information. | `[]` | -| `sumologic.metrics.remoteWriteProxy.enabled` | Enable a load balancing proxy for Prometheus remote writes. [See docs for more information.](/docs/prometheus.md#using-a-load-balancing-proxy-for-prometheus-remote-write) | `false` | +| `sumologic.metrics.remoteWriteProxy.enabled` | Enable a load balancing proxy for Prometheus remote writes. [See docs for more information.](/docs/prometheus.md#using-a-load-balancing-proxy-for-prometheus-remote-write) | `true` | | `sumologic.metrics.remoteWriteProxy.config.clientBodyBufferSize` | See the [nginx documentation](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size). Increase if you've also increased samples per send in Prometheus remote write. | `64k` | | `sumologic.metrics.remoteWriteProxy.config.workerCountAutotune` | This feature autodetects how much CPU is assigned to the nginx instance and setsthe right amount of workers based on that. Disable to use the default of 8 workers. | `true` | +| `sumologic.metrics.remoteWriteProxy.config.port` | Port on which remote write proxy is going to be exposed | `8080` | | `sumologic.metrics.remoteWriteProxy.config.enableAccessLogs` | Enable nginx access logs. | `false` | | `sumologic.metrics.remoteWriteProxy.replicaCount` | Number of replicas in the remote write proxy deployment. | `3` | | `sumologic.metrics.remoteWriteProxy.image` | Nginx docker image for the remote write proxy. | `{"repository": "public.ecr.aws/sumologic/nginx-unprivileged", "tag": "1.25.2-alpine", "pullPolicy": "IfNotPresent"}` | @@ -103,283 +164,126 @@ The following table lists the configurable parameters of the Sumo Logic chart an | `sumologic.metrics.remoteWriteProxy.priorityClassName` | Priority class name for the remote write proxy deployment. | `Nil` | | `sumologic.metrics.remoteWriteProxy.podLabels` | Additional labels for the remote write proxy container. | `{}` | | `sumologic.metrics.remoteWriteProxy.podAnnotations` | Additional annotations for for the remote write proxy container. | `{}` | -| `sumologic.metrics.remoteWriteProxy.config.port` | Port on which remote write proxy is going to be exposed | `8080` | -| `sumologic.metrics.serviceMonitors` | Configuration of Sumo Logic Kubernetes Collection components serviceMonitors | See [values.yaml] | -| `sumologic.metrics.additionalServiceMonitors` | List of ServiceMonitor objects to create. | See [values.yaml] | -| `sumologic.metrics.collector.otelcol.enabled` | Enable experimental otelcol metrics collector | See [values.yaml] | -| `sumologic.metrics.collector.otelcol.scrapeInterval` | The default scrape interval for the collector. | `30s` | -| `sumologic.metrics.collector.otelcol.replicaCount` | Replica count for the experimental otelcol metrics collector | `1` | -| `sumologic.metrics.collector.otelcol.resources` | Resource requests and limits for the experimental otelcol metrics collector | See [values.yaml] | -| `sumologic.metrics.collector.otelcol.autoscaling.enabled` | Option to override the default autoscaling parameter (sumologic.autoscaling.enabled) for the otelcol metrics and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. collector | `false` | -| `sumologic.metrics.collector.otelcol.autoscaling.maxReplicas` | Default max replicas for autoscaling. collector | `10` | -| `sumologic.metrics.collector.otelcol.autoscaling.minReplicas` | Default min replicas for autoscaling. collector | `1` | -| `sumologic.metrics.collector.otelcol.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `70` | -| `sumologic.metrics.collector.otelcol.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `70` | -| `sumologic.metrics.collector.otelcol.serviceMonitorSelector` | Selector for ServiceMonitors used for target discovery. By default, we select ServiceMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr | `Nil` | -| `sumologic.metrics.collector.otelcol.podMonitorSelector` | Selector for PodMonitors used for target discovery. By default, we select PodMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr | `Nil` | -| `sumologic.metrics.collector.otelcol.nodeSelector` | Node selector for the experimental otelcol metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md). | `{}` | -| `sumologic.metrics.collector.otelcol.podAnnotations` | Additional annotations for the experimental otelcol metrics pods. | `{}` | -| `sumologic.metrics.collector.otelcol.podLabels` | Additional labels for the experimental otelcol metrics pods. | `{}` | -| `sumologic.metrics.collector.otelcol.priorityClassName` | Priority class name for the experimental otelcol metrics. | `null` | -| `sumologic.metrics.collector.otelcol.securityContext` | The securityContext configuration for the experimental otelcol metrics. | `{"fsGroup": 999}` | -| `sumologic.metrics.collector.otelcol.tolerations` | Tolerations for the experimental otelcol metrics. | `[]` | -| `sumologic.metrics.enableDefaultFilters` | Enable default metric filters for Sumo Apps. | `false` | -| `sumologic.metrics.collector.otelcol.kubelet.enabled` | Enable collection of kubelet metrics. | `true` | -| `sumologic.metrics.collector.otelcol.cAdvisor.enabled` | Enable collection of cAdvisor metrics. | `true` | -| `sumologic.metrics.collector.otelcol.annotatedPods.enabled` | Enable collection of metrics from Pods annotated with prometheus.io/\* keys. See [docs/collecting-application-metrics.md](/docs/collecting-application-metrics.md#application-metrics-are-exposed-one-endpoint-scenario) for more information. | `true` | -| `sumologic.metrics.collector.otelcol.allocationStrategy` | Allocation strategy for the scrape target allocator. Valid values are: least-weighted and consistent-hashing. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocator | `least-weighted` | -| `sumologic.metrics.collector.otelcol.config.merge` | Configuration for otelcol metrics collector, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `sumologic.metrics.collector.otelcol.config.override` | Configuration for otelcol metrics collector, replaces defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `sumologic.metrics.dropHistogramBuckets` | Drop buckets from select high-cardinality histogram metrics, leaving only the sum and count components. | `true` | -| `sumologic.metrics.sourceType` | The type of the Sumo Logic source being used for metrics ingestion. Can be `http` or `otlp`. | `otlp` | +| `sumologic.metrics.serviceMonitors` | Configuration of Sumo Logic Kubernetes Collection components serviceMonitors | `See [values.yaml]` | +| `sumologic.metrics.sourceType` | The type of the Sumo Logic source being used for metrics ingestion. Can be `http` or `otlp`. | `http` | | `sumologic.traces.enabled` | Set the enabled flag to true to enable tracing ingestion. _Tracing must be enabled for the account first. Please contact your Sumo representative for activation details_ | `true` | | `sumologic.traces.spans_per_request` | Maximum number of spans sent in single batch | `100` | -| `sumologic.traces.sourceType` | The type of the Sumo Logic source being used for traces ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/traces/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `otlp` | -| `sumologic.envFromSecret` | If enabled, accessId and accessKey will be sourced from Secret Name given. Be sure to include at least the following env variables in your secret (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY | `sumo-api-secret` | -| `sumologic.accessId` | Sumo access ID. | `Nil` | -| `sumologic.accessKey` | Sumo access key. | `Nil` | -| `sumologic.endpoint` | Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection. | `Nil` | -| `sumologic.collectionMonitoring` | If you set it to false, it would set `EXCLUDE_NAMESPACE=` and not add the Prometheus remotestorage metrics. | `true` | -| `sumologic.collectorName` | The name of the Sumo Logic collector that will be created in the SetUp job. Defaults to `clusterName` if not specified. | `Nil` | -| `sumologic.clusterName` | An identifier for the Kubernetes cluster. Whitespaces in the cluster name will be replaced with dashes. | `kubernetes` | -| `sumologic.clusterDNSDomain` | The DNS domain for the cluster. Change this if you're using a custom domain. | `cluster.local` | -| `sumologic.cluster` | Configuration of Kubernetes for [Terraform client](https://www.terraform.io/docs/providers/kubernetes/index.html#argument-reference). | See [values.yaml] | -| `sumologic.collector.sources` | Configuration of HTTP sources. [See docs/Terraform.md for more information](/docs/terraform.md). All double quotes should be escaped here regarding Terraform syntax. | See [values.yaml] | -| `sumologic.collector.fields` | Configuration of Sumo Logic fields. [See Sumo Logic Terraform Plugin documentation for more information](https://registry.terraform.io/providers/SumoLogic/sumologic/latest/docs/resources/collector#fields). All double quotes should be escaped here regarding Terraform syntax. | See [values.yaml] | -| `sumologic.httpProxy` | HTTP proxy URL | `Nil` | -| `sumologic.httpsProxy` | HTTPS proxy URL | `Nil` | -| `sumologic.noProxy` | List of comma separated hostnames which should be excluded from the proxy | `kubernetes.default.svc` | -| `sumologic.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's deployments and statefulsets. | `Nil` | -| `sumologic.otelcolImage.repository` | Default image repository for OpenTelemetry Collector. This can be overridden for specific components. | `public.ecr.aws/sumologic/sumologic-otel-collector` | -| `sumologic.otelcolImage.tag` | Default image tag for OpenTelemetry Collector. This can be overridden for specific components. | `0.86.0-sumo-1` | -| `sumologic.otelcolImage.addFipsSuffix` | Add a `-fips` suffix to all image tags. See [docs/security-best-practices.md](/docs/security-best-practices.md) for more information. | `false` | -| `sumologic.podLabels` | Additional labels for the pods. | `{}` | -| `sumologic.podAnnotations` | Additional annotations for the pods. | `{}` | -| `sumologic.scc.create` | Create OpenShift's Security Context Constraint | `false` | -| `sumologic.serviceAccount.annotations` | Add custom annotations to sumologic serviceAccounts | `{}` | -| `sumologic.setup.job.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's setup job. | `Nil` | -| `sumologic.setup.job.podLabels` | Additional labels for the setup Job pod. | `{}` | -| `sumologic.setup.job.podAnnotations` | Additional annotations for the setup Job pod. | `{}` | -| `sumologic.setup.job.image.repository` | Image repository for Sumo Logic setup job docker container. | `public.ecr.aws/sumologic/kubernetes-setup` | -| `sumologic.setup.job.image.tag` | Image tag for Sumo Logic setup job docker container. | `3.11.0` | -| `sumologic.setup.job.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` | -| `sumologic.setup.job.nodeSelector` | Node selector for sumologic setup job. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `sumologic.setup.job.tolerations` | Add tolerations for the setup Job. | `[]` | -| `sumologic.setup.job.affinity` | Add affinity and anti-affinity for the setup Job. | `{}` | -| `sumologic.setup.debug` | Enable debug mode (disables the automatic execution of the setup.sh script) | `Nil` | -| `sumologic.setup.force` | Force collection installation (disables k8s version verification) | `Nil` | -| `sumologic.setup.job.resources` | Resource requests and limits for the setup Job. | `{"limits": {"memory": "256Mi", "cpu": "2000m"}, "requests": {"memory": "64Mi", "cpu": "200m"}}` | -| `sumologic.setup.monitors.enabled` | If enabled, a pre-install hook will create k8s monitors in Sumo Logic. | `true` | -| `sumologic.setup.monitors.monitorStatus` | The installed monitors default status: enabled/disabled. | `enabled` | -| `sumologic.setup.monitors.notificationEmails` | A list of emails to send notifications from monitors. | `[]` | -| `sumologic.setup.dashboards.enabled` | If enabled, a pre-install hook will install k8s dashboards in Sumo Logic. | `true` | -| `sumologic.autoscaling.enabled` | Enable autoscaling for components that support it: logs metadata, metrics metadata, metrics collector, otelcol instrumentation, and traces gateway | `true` | -| `metrics-server.enabled` | Set the enabled flag to true for enabling metrics-server. This is required before enabling fluentd autoscaling unless you have an existing metrics-server in the cluster. | `false` | +| `sumologic.traces.sourceType` | The type of the Sumo Logic source being used for traces ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/traces/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `http` | +| `metrics-server.enabled` | Set the enabled flag to true for enabling metrics-server. This is required before enabling autoscaling unless you have an existing metrics-server in the cluster. | `false` | | `metrics-server.fullnameOverride` | Used to override the chart's full name. | `Nil` | | `metrics-server.apiService.create` | Specifies whether the v1beta1.metrics.k8s.io API service should be created. | `true` | | `metrics-server.extraArgs` | Extra arguments to pass to metrics-server on start up. | `["--kubelet-insecure-tls=true", "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"]` | -| `metrics-server.image.pullSecrets` | Pull secrets for metrics-server images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `Nil` | -| `kube-prometheus-stack.kubeTargetVersionOverride` | Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). Changing this may break Sumo Logic apps. | `Nil` | -| `kube-prometheus-stack.enabled` | Flag to control deploying Prometheus Operator Helm sub-chart. | `true` | +| `metrics-server.image.pullSecrets` | Pull secrets for metrics-server images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `["imagepullsecret"]` | +| `kube-prometheus-stack.enabled` | Flag to control deploying Prometheus Operator Helm sub-chart. | `false` | +| `kube-prometheus-stack.global.imagePullSecrets` | Pull secrets for Kube Prometheus Stack images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[{"name": "image-pull-secret"}]` | | `kube-prometheus-stack.fullnameOverride` | Used to override the chart's full name. | `Nil` | | `kube-prometheus-stack.namespaceOverride` | Used to override the chart's default namespace. | `Nil` | -| `kube-prometheus-stack.defaultRules.rules` | Control which default recording and alerting rules are enabled. | See [values.yaml] | +| `kube-prometheus-stack.kubeTargetVersionOverride` | Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). Changing this may break Sumo Logic apps. | `Nil` | +| `kube-prometheus-stack.commonLabels` | Labels to apply to all Kube Prometheus Stack resources | `{}` | +| `kube-prometheus-stack.defaultRules.rules` | Control which default recording and alerting rules are enabled. | `See [values.yaml]` | +| `kube-prometheus-stack.kubeApiServer.serviceMonitor.interval` | Kubernetes API Server metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kubeApiServer.serviceMonitor.metricRelabelings` | Kubernetes API Server MetricRelabelConfigs | `[{"action": "keep", "regex": "(?:apiserver_request_(?:count\|total)\|apiserver_request_(?:duration_seconds)_(?:count\|sum))", "sourceLabels": ["__name__"]}]` | +| `kube-prometheus-stack.kubelet.serviceMonitor.interval` | Kubelet metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kubelet.serviceMonitor.probes` | Enable scraping /metrics/probes from kubelet's service | `false` | +| `kube-prometheus-stack.kubelet.serviceMonitor.resource` | Enable scraping /metrics/resource from kubelet's service | `false` | +| `kube-prometheus-stack.kubelet.serviceMonitor.metricRelabelings` | Kubelet MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.kubelet.serviceMonitor.cAdvisorMetricRelabelings` | Kubelet CAdvisor MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.kubeControllerManager.serviceMonitor.interval` | Kubernetes Controller Manager metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kubeControllerManager.serviceMonitor.metricRelabelings` | Kubernetes Controller Manager MetricRelabelConfigs | `[{"action": "keep", "regex": "(?:cloudprovider_.*_api_request_duration_seconds.*)", "sourceLabels": ["__name__"]}]` | +| `kube-prometheus-stack.coreDns.serviceMonitor.interval` | Core DNS metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.coreDns.serviceMonitor.metricRelabelings` | Core DNS MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.kubeEtcd.serviceMonitor.interval` | Kubernetes Etcd metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kubeEtcd.serviceMonitor.metricRelabelings` | Kubernetes Etcd MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.kubeScheduler.serviceMonitor.interval` | Kubernetes Scheduler metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kubeScheduler.serviceMonitor.metricRelabelings` | Kubernetes Scheduler MetricRelabelConfigs | `See [values.yaml]` | | `kube-prometheus-stack.alertmanager.enabled` | Deploy alertmanager. | `false` | | `kube-prometheus-stack.grafana.enabled` | If true, deploy the grafana sub-chart. | `false` | | `kube-prometheus-stack.grafana.defaultDashboardsEnabled` | Deploy default dashboards. These are loaded using the sidecar. | `false` | -| `kube-prometheus-stack.prometheusOperator.enabled` | Enable prometheus-operator | `false` | | `kube-prometheus-stack.prometheusOperator.podLabels` | Additional labels for prometheus operator pods. | `{}` | | `kube-prometheus-stack.prometheusOperator.podAnnotations` | Additional annotations for prometheus operator pods. | `{}` | -| `kube-prometheus-stack.prometheusOperator.resources` | Resource limits for prometheus operator. Uses sub-chart defaults. | `{"limits": {"cpu": "200m", "memory": "200Mi"}, "requests": {"cpu": "100m", "memory": "100Mi"}}` | -| `kube-prometheus-stack.prometheusOperator.serviceMonitor` | Prometheus operator ServiceMonitor | `{"selfMonitor": false}` | +| `kube-prometheus-stack.prometheusOperator.resources` | Resource limits for prometheus operator. Uses sub-chart defaults. | `{}` | | `kube-prometheus-stack.prometheusOperator.admissionWebhooks.enabled` | Create PrometheusRules admission webhooks. Mutating webhook will patch PrometheusRules objects indicating they were validated. Validating webhook will check the rules syntax. | `false` | | `kube-prometheus-stack.prometheusOperator.tls.enabled` | Enable TLS in prometheus operator. | `false` | | `kube-prometheus-stack.kube-state-metrics.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `kube-prometheus-stack.kube-state-metrics.resources` | Resource limits for kube state metrics. Uses sub-chart defaults. | `{"limits": {"cpu": "100m", "memory": "64Mi"}, "requests": {"cpu": "10m", "memory": "32Mi"}}` | +| `kube-prometheus-stack.kube-state-metrics.nodeSelector` | Node selector for kube-state-metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | | `kube-prometheus-stack.kube-state-metrics.customLabels` | Custom labels to apply to service, deployment and pods. Uses sub-chart defaults. | `{}` | | `kube-prometheus-stack.kube-state-metrics.podAnnotations` | Additional annotations for pods in the DaemonSet. Uses sub-chart defaults. | `{}` | -| `kube-prometheus-stack.prometheus.enabled` | Enable Prometheus | `false` | -| `kube-prometheus-stack.prometheus.additionalServiceMonitors` | List of ServiceMonitor objects to create. | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.resources` | Resource limits for prometheus. Uses sub-chart defaults. | `{"limits": {"cpu": "2000m", "memory": "8Gi"}, "requests": {"cpu": "500m", "memory": "1Gi"}}` | -| `kube-prometheus-stack.prometheus.prometheusSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.labels` | Add custom pod labels to prometheus pods | `{}` | -| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.annotations` | Add custom pod annotations to prometheus pods | `{}` | -| `kube-prometheus-stack.prometheus.prometheusSpec.remoteWrite` | If specified, the remote_write spec. | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.walCompression` | Enables walCompression in Prometheus | `true` | +| `kube-prometheus-stack.kube-state-metrics.resources` | Resource limits for kube state metrics. Uses sub-chart defaults. | `{}` | +| `kube-prometheus-stack.kube-state-metrics.image.tag` | Tag for kube-state-metrics Docker image. | `v2.7.0` | +| `kube-prometheus-stack.kube-state-metrics.prometheus.monitor.interval` | Kubernetes State Metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | +| `kube-prometheus-stack.kube-state-metrics.prometheus.monitor.metricRelabelings` | Kubernetes State Metrics MetricRelabelConfigs | `See [values.yaml]` | | `kube-prometheus-stack.prometheus-node-exporter.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `kube-prometheus-stack.prometheus-node-exporter.nodeSelector` | Node selector for prometheus node exporter. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | | `kube-prometheus-stack.prometheus-node-exporter.podLabels` | Additional labels for prometheus-node-exporter pods. | `{}` | | `kube-prometheus-stack.prometheus-node-exporter.podAnnotations` | Additional annotations for prometheus-node-exporter pods. | `{}` | -| `kube-prometheus-stack.prometheus-node-exporter.resources` | Resource limits for node exporter. Uses sub-chart defaults. | `{"limits": {"cpu": "200m", "memory": "50Mi"}, "requests": {"cpu": "100m", "memory": "30Mi"}}` | -| `kube-prometheus-stack.prometheus-node-exporter.nodeSelector` | Node selector for prometheus node exporter. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `kube-prometheus-stack.kube-state-metrics.nodeSelector` | Node selector for kube-state-metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `kube-prometheus-stack.kube-state-metrics.image.tag` | Tag for kube-state-metrics Docker image. | `v2.7.0` | -| `kube-prometheus-stack.commonLabels` | Labels to apply to all Kube Prometheus Stack resources | `{}` | -| `kube-prometheus-stack.coreDns.serviceMonitor.interval` | Core DNS metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.coreDns.serviceMonitor.metricRelabelings` | Core DNS MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.global.imagePullSecrets` | Pull secrets for Kube Prometheus Stack images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | -| `kube-prometheus-stack.kubeApiServer.serviceMonitor.interval` | Kubernetes API Server metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kubeApiServer.serviceMonitor.metricRelabelings` | Kubernetes API Server MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubeControllerManager.serviceMonitor.interval` | Kubernetes Controller Manager metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kubeControllerManager.serviceMonitor.metricRelabelings` | Kubernetes Controller Manager MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubeEtcd.serviceMonitor.interval` | Kubernetes Etcd metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kubeEtcd.serviceMonitor.metricRelabelings` | Kubernetes Etcd MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubeScheduler.serviceMonitor.interval` | Kubernetes Scheduler metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kubeScheduler.serviceMonitor.metricRelabelings` | Kubernetes Scheduler MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kube-state-metrics.prometheus.monitor.interval` | Kubernetes State Metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kube-state-metrics.prometheus.monitor.metricRelabelings` | Kubernetes State Metrics MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubelet.serviceMonitor.cAdvisorMetricRelabelings` | Kubelet CAdvisor MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubelet.serviceMonitor.interval` | Kubelet metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.kubelet.serviceMonitor.metricRelabelings` | Kubelet MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.kubelet.serviceMonitor.probes` | Enable scraping /metrics/probes from kubelet's service | `false` | -| `kube-prometheus-stack.kubelet.serviceMonitor.resource` | Enable scraping /metrics/resource from kubelet's service | `false` | +| `kube-prometheus-stack.prometheus-node-exporter.resources` | Resource limits for node exporter. Uses sub-chart defaults. | `{}` | | `kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.interval` | Node Exporter scrape interval. If not set, the Prometheus default scrape interval is used. | `Nil` | -| `kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.metricRelabelings` | Node Exporter MetricRelabelConfigs | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.additionalScrapeConfigs` | Additional Prometheus scrape configurations | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.initContainers` | InitContainers allows injecting additional Prometheus initContainers. | See [values.yaml] | -| `kube-prometheus-stack.prometheus.prometheusSpec.retention` | How long to retain metrics in Prometheus | `1d` | +| `kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.metricRelabelings` | Node Exporter MetricRelabelConfigs | `See [values.yaml]` | +| `kube-prometheus-stack.prometheus.additionalServiceMonitors` | List of ServiceMonitor objects to create. | `[]` | | `kube-prometheus-stack.prometheus.prometheusSpec.scrapeInterval` | Prometheus metrics scrape interval. If not set, the Prometheus default scrape interval is used. | `30s` | -| `kube-prometheus-stack.prometheus.serviceMonitor.selfMonitor` | Enable scraping Prometheus metrics | `false` | -| `falco.enabled` | Flag to control deploying Falco Helm sub-chart. | `false` | -| `falco.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `falco.addKernelDevel` | Flag to control installation of `kernel-devel` on nodes using MachineConfig, required to build falco modules (only for OpenShift) | `true` | -| `falco.extra.initContainers` | InitContainers for Falco pod | See [values.yaml] | -| `falco.falco.json_output` | Output events in json. | `true` | -| `falco.imagePullSecrets` | Pull secrets for falco images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | -| `falco.customRules` | Additional falco rules related to Sumo Logic Kubernetes Collection | See [values.yaml] | -| `falco.driver.kind` | Tell Falco which driver to use. Available options: module (kernel driver) and ebpf (eBPF probe). Set to `ebpf` for GKE | `module` | -| `falco.driver.loader.initContainer.image` | Init container image configuration for falco driver loader. | `{"registry": "public.ecr.aws", "repository": "falcosecurity/falco-driver-loader"}` | -| `falco.falco.load_plugins` | Names of the plugins to be loaded by Falco. | `["json", "k8saudit"]` | -| `falco.falco.rules_file` | The location of the rules files that will be consumed by Falco. | `["/etc/falco/falco_rules.yaml", "/etc/falco/falco_rules.local.yaml", "/etc/falco/k8s_audit_rules.yaml", "/etc/falco/rules.d", "/etc/falco/rules.available/application_rules.yaml"]` | -| `falco.image.registry` | Image registry for falco docker container. | `public.ecr.aws` | -| `falco.image.repository` | Image repository for falco docker container. | `falcosecurity/falco-no-driver` | -| `falco.falcoctl` | Falcoctl configuration. We don't use it for now due to breaking changes. [See this issue](https://github.com/SumoLogic/sumologic-kubernetes-collection/issues/3144). | `{"artifact": {"follow": {"enabled": false}, "install": {"enabled": false}}}` | -| `telegraf-operator.enabled` | Flag to control deploying Telegraf Operator Helm sub-chart. | `false` | -| `telegraf-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `telegraf-operator.replicaCount` | Replica count for Telegraf Operator pods. | 1 | -| `telegraf-operator.classes.secretName` | Secret name in which the Telegraf Operator configuration will be stored. | `telegraf-operator-classes` | -| `telegraf-operator.classes.data` | Telegraf sidecar configuration. | See [values.yaml] | -| `telegraf-operator.classes.default` | Name of the default output configuration. | `sumologic-prometheus` | -| `telegraf-operator.image.sidecarImage` | Telegraf Operator sidecar image. | `public.ecr.aws/sumologic/telegraf:1.21.2` | -| `telegraf-operator.imagePullSecrets` | Pull secrets for Telegraf Operator images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | -| `opentelemetry-operator.enabled` | Flag to control deploying OpenTelemetry Operator Helm sub-chart. | `true` | -| `opentelemetry-operator.createDefaultInstrumentation` | Flag to control creation of default Instrumentation object | `false` | -| `opentelemetry-operator.instrumentation.dotnet.metrics.enabled` | Flag to control metrics export from DotNet instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.instrumentation.dotnet.traces.enabled` | Flag to control traces export from DotNet instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.instrumentation.java.metrics.enabled` | Flag to control metrics export from Java instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.instrumentation.java.traces.enabled` | Flag to control traces export from Java instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.instrumentation.python.metrics.enabled` | Flag to control metrics export from Python instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.instrumentation.python.traces.enabled` | Flag to control traces export from Python instrumentation in `Instrumentation` resource. | `true` | -| `opentelemetry-operator.manager.collectorImage.repository` | The default collector image repository for OpenTelemetryCollector CRDs. | `public.ecr.aws/sumologic/sumologic-otel-collector` | -| `opentelemetry-operator.manager.collectorImage.tag` | The default collector image tag for OpenTelemetryCollector CRDs. | `0.86.0-sumo-1` | -| `opentelemetry-operator.manager.resources.limits.cpu` | Used to set limit CPU for OpenTelemetry-Operator Manager. | `250m` | -| `opentelemetry-operator.manager.resources.limits.memory` | Used to set limit Memory for OpenTelemetry-Operator Manager. | `512Mi` | -| `opentelemetry-operator.manager.resources.requests.cpu` | Used to set requested CPU for OpenTelemetry-Operator Manager. | `150m` | -| `opentelemetry-operator.manager.resources.requests.memory` | Used to set requested Memory for OpenTelemetry-Operator Manager. | `256Mi` | -| `opentelemetry-operator.instrumentationNamespaces` | Used to create `Instrumentation` resources in specified namespaces. | `Nil` | -| `opentelemetry-operator.instrumentationJobImage.image.repository` | Name of the image repository used to apply Instrumentation resource | `sumologic/kubernetes-tools` | -| `opentelemetry-operator.instrumentationJobImage.image.tag` | Name of the image tag used to apply Instrumentation resource | `2.14.0` | -| `opentelemetry-operator.admissionWebhooks` | Admission webhooks make sure only requests with correctly formatted rules will get into the Operator. They also enable the sidecar injection for OpenTelemetryCollector and Instrumentation CR's. | See [values.yaml] | -| `opentelemetry-operator.manager.env` | Additional environment variables for opentelemetry-operator helm chart. | `{"ENABLE_WEBHOOKS": "true"}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.retention` | How long to retain metrics in Prometheus | `1d` | +| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.labels` | Add custom pod labels to prometheus pods | `{}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.annotations` | Add custom pod annotations to prometheus pods | `{}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.nodeSelector` | Node selector for prometheus. [See docs/Best_Practices.md for more information.](/docs/best-practices.md) | `{}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.resources` | Resource limits for prometheus. Uses sub-chart defaults. | `{"limits": {"cpu": "2000m", "memory": "8Gi"}, "requests": {"cpu": "500m", "memory": "1Gi"}}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.initContainers` | InitContainers allows injecting additional Prometheus initContainers. | `See [values.yaml]` | +| `kube-prometheus-stack.prometheus.prometheusSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. | `See [values.yaml]` | +| `kube-prometheus-stack.prometheus.prometheusSpec.walCompression` | Enables walCompression in Prometheus | `true` | +| `kube-prometheus-stack.prometheus.prometheusSpec.additionalScrapeConfigs` | Additional Prometheus scrape configurations | `See [values.yaml]` | +| `kube-prometheus-stack.prometheus.prometheusSpec.remoteWrite` | If specified, the remote_write spec. | `[{"remoteTimeout": "5s", "url": "http://$(METADATA_METRICS_SVC).$(NAMESPACE).svc.cluster.local.:9888/prometheus.metrics"}]` | | `otelcolInstrumentation.enabled` | Enables Sumo Otel Distro Collector StatefulSet to collect telemetry data. [See docs for more information.](/docs/opentelemetry-collector/traces.md) | `true` | -| `otelcolInstrumentation.autoscaling.enabled` | Option to override the default autoscaling parameter (sumologic.autoscaling.enabled) for Sumo Otel Distro Collector StatefulSet and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `otelcolInstrumentation.sourceMetadata.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `%{k8s.namespace.name}.%{k8s.pod.pod_name}.%{k8s.container.name}` | +| `otelcolInstrumentation.sourceMetadata.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `%{k8s.namespace.name}/%{k8s.pod.pod_name}` | +| `otelcolInstrumentation.sourceMetadata.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `kubernetes/` | +| `otelcolInstrumentation.sourceMetadata.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `otelcolInstrumentation.sourceMetadata.excludeContainerRegex` | A regular expression for containers. Matching containers will be excluded from Sumo. | `Nil` | +| `otelcolInstrumentation.sourceMetadata.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. | `Nil` | +| `otelcolInstrumentation.sourceMetadata.excludeNamespaceRegex` | A regular expression for namespaces. Matching namespaces will be excluded from Sumo. | `Nil` | +| `otelcolInstrumentation.sourceMetadata.excludePodRegex` | A regular expression for pods. Matching pods will be excluded from Sumo. | `Nil` | +| `otelcolInstrumentation.autoscaling.enabled` | Option to turn autoscaling on for Sumo Otel Distro Collector StatefulSet and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | | `otelcolInstrumentation.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | | `otelcolInstrumentation.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | | `otelcolInstrumentation.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `100` | | `otelcolInstrumentation.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `50` | -| `otelcolInstrumentation.statefulset.replicaCount` | Set the number of otelcol-instrumentation replicasets. | `3` | | `otelcolInstrumentation.statefulset.nodeSelector` | Node selector for otelcol-instrumentation statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `otelcolInstrumentation.statefulset.priorityClassName` | Priority class name for otelcol-instrumentation pods. | If not provided then set to `RELEASE-NAME-sumologic-priorityclass`. | +| `otelcolInstrumentation.statefulset.tolerations` | Tolerations for otelcol-instrumentation statefulset. | `[]` | +| `otelcolInstrumentation.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for otelcol-instrumentation statefulset. | `[]` | | `otelcolInstrumentation.statefulset.affinity` | Affinity for otelcol-instrumentation statefulset. | `{}` | -| `otelcolInstrumentation.statefulset.extraEnvVars` | Additional environment variables for otelcol-instrumentation pods. | `{}` | -| `otelcolInstrumentation.statefulset.extraVolumeMounts` | Additional volume mounts for otelcol-instrumentation pods. | `{}` | -| `otelcolInstrumentation.statefulset.extraVolumes` | Additional volumes for otelcol-instrumentation pods. | `{}` | -| `otelcolInstrumentation.statefulset.image.pullPolicy` | Image pullPolicy for otelcol-instrumentation docker container. | `IfNotPresent` | -| `otelcolInstrumentation.statefulset.image.repository` | Image repository for otelcol-instrumentation docker container. | `` | -| `otelcolInstrumentation.statefulset.image.tag` | Image tag for otelcol-instrumentation docker container. | `` | -| `otelcolInstrumentation.statefulset.podAnnotations` | Additional annotations for otelcol-instrumentation pods. | `{}` | | `otelcolInstrumentation.statefulset.podAntiAffinity` | PodAntiAffinity for otelcol-instrumentation statefulset. | `soft` | -| `otelcolInstrumentation.statefulset.podLabels` | Additional labels for otelcol-instrumentation pods. | `{}` | +| `otelcolInstrumentation.statefulset.replicaCount` | Set the number of otelcol-instrumentation replicasets. | `3` | | `otelcolInstrumentation.statefulset.resources` | Resources for otelcol-instrumentation statefulset. | `{"limits": {"memory": "4Gi", "cpu": "2000m"}, "requests": {"memory": "768Mi", "cpu": "500m"}}` | -| `otelcolInstrumentation.statefulset.tolerations` | Tolerations for otelcol-instrumentation statefulset. | `[]` | -| `otelcolInstrumentation.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for otelcol-instrumentation statefulset. | `[]` | -| `otelcolInstrumentation.sourceMetadata.sourceName` | Set the \_sourceName metadata field in Sumo Logic. | `"%{k8s.namespace.name}.%{k8s.pod.pod_name}.%{k8s.container.name}"` | -| `otelcolInstrumentation.sourceMetadata.sourceCategory` | Set the \_sourceCategory metadata field in Sumo Logic. | `"%{k8s.namespace.name}/%{k8s.pod.pod_name}"` | -| `otelcolInstrumentation.sourceMetadata.sourceCategoryPrefix` | Set the prefix, for \_sourceCategory metadata. | `"kubernetes/"` | -| `otelcolInstrumentation.sourceMetadata.sourceCategoryReplaceDash` | Used to replace - with another character. | `"/"` | -| `otelcolInstrumentation.sourceMetadata.excludeContainerRegex` | A regular expression for containers. Matching containers will be excluded from Sumo. | `""` | -| `otelcolInstrumentation.sourceMetadata.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. | `""` | -| `otelcolInstrumentation.sourceMetadata.excludeNamespaceRegex` | A regular expression for namespaces. Matching namespaces will be excluded from Sumo. | `""` | -| `otelcolInstrumentation.sourceMetadata.excludePodRegex` | A regular expression for pods. Matching pods will be excluded from Sumo. | `""` | -| `otelcolInstrumentation.logLevelFilter` | Do not send otelcol-instrumentation logs if `true`. | `false` | -| `otelcolInstrumentation.config.processors.batch.send_batch_size` | Sets the preferred size of batch. | `256` | -| `otelcolInstrumentation.config.processors.batch.send_batch_max_size` | Sets the maximum allowed size of a batch. Use with caution, setting too large value might cause 413 Payload Too Large errors. | `512` | -| `otelcolInstrumentation.config.processors.memory_limiter.limit_percentage` | Sets the maximum amount of memory, in %, targeted to be allocated by the process heap. | `75` | -| `otelcolInstrumentation.config.processors.memory_limiter.spike_limit_percentage` | Sets the maximum spike expected between the measurements of memory usage, in %. | `20` | -| `otelcolInstrumentation.config` | Configuration for otelcol-instrumentation | See [values.yaml] | -| `otelcolInstrumentation.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for the otelcol-instrumentation container. | `{"periodSeconds": 3, "failureThreshold": 60}` | +| `otelcolInstrumentation.statefulset.priorityClassName` | Priority class name for otelcol-instrumentation pods. | `Nil` | +| `otelcolInstrumentation.statefulset.podLabels` | Additional labels for otelcol-instrumentation pods. | `{}` | +| `otelcolInstrumentation.statefulset.podAnnotations` | Additional annotations for otelcol-instrumentation pods. | `{}` | +| `otelcolInstrumentation.statefulset.image.repository` | Image repository for otelcol-instrumentation docker container. | `Nil` | +| `otelcolInstrumentation.statefulset.image.tag` | Image tag for otelcol-instrumentation docker container. | `Nil` | +| `otelcolInstrumentation.statefulset.image.pullPolicy` | Image pullPolicy for otelcol-instrumentation docker container. | `IfNotPresent` | +| `otelcolInstrumentation.statefulset.containers.otelcol.securityContext` | The securityContext configuration for the otelcol-instrumentation container. | `{}` | | `otelcolInstrumentation.statefulset.containers.otelcol.livenessProbe` | Liveness probe settings for the otelcol-instrumentation container. | `{"initialDelaySeconds": 15, "periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | | `otelcolInstrumentation.statefulset.containers.otelcol.readinessProbe` | Readiness probe settings for the otelcol-instrumentation container. | `{"initialDelaySeconds": 5, "periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | -| `otelcolInstrumentation.statefulset.containers.otelcol.securityContext` | The securityContext configuration for the otelcol-instrumentation container. | `{}` | -| `tracesGateway.enabled` | Flag to control deploying traces-gateway. [See docs for more information.](/docs/opentelemetry-collector/traces.md) | `true` | -| `tracesGateway.autoscaling.enabled` | Option to override the default autoscaling parameter (sumologic.autoscaling.enabled) for traces-gateway and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | -| `tracesGateway.autoscaling.minReplicas` | Default min replicas for autoscaling. | `1` | -| `tracesGateway.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | -| `tracesGateway.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `100` | -| `tracesGateway.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `50` | -| `tracesGateway.deployment.replicas` | Set the number of OpenTelemetry Collector replicas. | `1` | -| `tracesGateway.deployment.nodeSelector` | Node selector for otelcol deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | -| `tracesGateway.deployment.priorityClassName` | Priority class name for OpenTelemetry Collector log pods. | `Nil` | -| `tracesGateway.logLevelFilter` | Do not send traces-gateway logs if `true`. | `false` | -| `tracesGateway.config.processors.batch.send_batch_size` | Sets the preferred size of batch. | `256` | -| `tracesGateway.config.processors.batch.send_batch_max_size` | Sets the maximum allowed size of a batch. Use with caution, setting too large value might cause 413 Payload Too Large errors. | `512` | -| `tracesGateway.config.processors.memory_limiter.limit_percentage` | Sets the maximum amount of memory, in %, targeted to be allocated by the process heap. | `75` | -| `tracesGateway.config.processors.memory_limiter.spike_limit_percentage` | Sets the maximum spike expected between the measurements of memory usage, in %. | `20` | -| `tracesGateway.config` | Configuration for traces-gateway. | See [values.yaml] | -| `tracesGateway.deployment.extraEnvVars` | Additional environment variables for traces-gateway pods. | `{}` | -| `tracesGateway.deployment.extraVolumeMounts` | Additional volume mounts for traces-gateway pods. | `{}` | -| `tracesGateway.deployment.extraVolumes` | Additional volumes for traces-gateway pods. | `{}` | -| `tracesGateway.deployment.image.pullPolicy` | Image pullPolicy for traces-gateway docker container. | `IfNotPresent` | -| `tracesGateway.deployment.image.repository` | Image repository for traces-gateway docker container. | `` | -| `tracesGateway.deployment.image.tag` | Image tag for traces-gateway docker container. | `` | -| `tracesGateway.deployment.livenessProbe` | Liveness probe settings for the traces-gateway container. | `{"periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | -| `tracesGateway.deployment.podAnnotations` | Additional annotations for traces-gateway pods. | `{}` | -| `tracesGateway.deployment.podLabels` | Additional labels for traces-gateway pods. | `{}` | -| `tracesGateway.deployment.readinessProbe` | Readiness probe settings for the traces-gateway container. | `{"periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | -| `tracesGateway.deployment.resources` | Resources for traces-gateway statefulset. | `{"limits": {"memory": "2Gi", "cpu": "1000m"}, "requests": {"memory": "196Mi", "cpu": "50m"}}` | -| `tracesGateway.deployment.startupProbe` | Startup probe configuration for the traces-gateway container. | `{"periodSeconds": 5, "timeoutSeconds": 3, "failureThreshold": 60}` | -| `tracesGateway.deployment.tolerations` | Tolerations for traces-gateway statefulset. | `[]` | -| `tracesSampler.deployment.replicas` | Set the number of OpenTelemetry Collector replicas. | `1` | +| `otelcolInstrumentation.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for the otelcol-instrumentation container. | `{"periodSeconds": 3, "failureThreshold": 60}` | +| `otelcolInstrumentation.statefulset.extraEnvVars` | Additional environment variables for otelcol-instrumentation pods. | `[]` | +| `otelcolInstrumentation.statefulset.extraVolumes` | Additional volumes for otelcol-instrumentation pods. | `[]` | +| `otelcolInstrumentation.statefulset.extraVolumeMounts` | Additional volume mounts for otelcol-instrumentation pods. | `[]` | +| `otelcolInstrumentation.logLevelFilter` | Do not send otelcol-instrumentation logs if `true`. | `false` | +| `otelcolInstrumentation.config` | Configuration for otelcol-instrumentation | `See [values.yaml]` | | `tracesSampler.deployment.nodeSelector` | Node selector for otelcol deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `tracesSampler.deployment.tolerations` | Tolerations for traces-sampler statefulset. | `[]` | +| `tracesSampler.deployment.replicas` | Set the number of OpenTelemetry Collector replicas. | `1` | +| `tracesSampler.deployment.resources` | Resources for traces-sampler statefulset. | `{"limits": {"memory": "4Gi", "cpu": "2000m"}, "requests": {"memory": "384Mi", "cpu": "200m"}}` | | `tracesSampler.deployment.priorityClassName` | Priority class name for OpenTelemetry Collector log pods. | `Nil` | -| `tracesSampler.logLevelFilter` | Do not send traces-sampler logs if `true`. | `false` | -| `tracesSampler.config.processors.batch.send_batch_size` | Sets the preferred size of batch. | `256` | -| `tracesSampler.config.processors.batch.send_batch_max_size` | Sets the maximum allowed size of a batch. Use with caution, setting too large value might cause 413 Payload Too Large errors. | `512` | -| `tracesSampler.config.processors.memory_limiter.limit_percentage` | Sets the maximum amount of memory, in %, targeted to be allocated by the process heap. | `75` | -| `tracesSampler.config.processors.memory_limiter.spike_limit_percentage` | Sets the maximum spike expected between the measurements of memory usage, in %. | `20` | -| `tracesSampler.config` | Configuration for traces-sampler. | See [values.yaml] | -| `tracesSampler.deployment.extraEnvVars` | Additional environment variables for traces-sampler pods. | `{}` | -| `tracesSampler.deployment.extraVolumeMounts` | Additional volume mounts for traces-sampler pods. | `{}` | -| `tracesSampler.deployment.extraVolumes` | Additional volumes for traces-sampler pods. | `{}` | -| `tracesSampler.deployment.image.pullPolicy` | Image pullPolicy for traces-sampler docker container. | `IfNotPresent` | -| `tracesSampler.deployment.image.repository` | Image repository for traces-sampler docker container. | `` | -| `tracesSampler.deployment.image.tag` | Image tag for traces-sampler docker container. | `` | -| `tracesSampler.deployment.podAnnotations` | Additional annotations for traces-sampler pods. | `{}` | | `tracesSampler.deployment.podLabels` | Additional labels for traces-sampler pods. | `{}` | -| `tracesSampler.deployment.resources` | Resources for traces-sampler statefulset. | `{"limits": {"memory": "4Gi", "cpu": "2000m"}, "requests": {"memory": "384Mi", "cpu": "200m"}}` | -| `tracesSampler.deployment.tolerations` | Tolerations for traces-sampler statefulset. | `[]` | -| `otellogs.image.repository` | Image repository for otelcol docker container. | `` | -| `otellogs.image.tag` | Image tag for otelcol docker container. | `` | -| `otellogs.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | -| `otellogs.logLevel` | Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | -| `otellogs.config.merge` | Configuration for log collector otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `otellogs.config.override` | Configuration for log collector otelcol, replaces defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `otellogs.daemonset` | OpenTelemetry Collector Daemonset customization options. See [values.yaml] for more details. | See [values.yaml] | -| `otelcloudwatch.statefulset` | OpenTelemetry Cloudwatch Collector statefulset customization options. See [values.yaml] for more details. | See [values.yaml] | -| `otellogs.additionalDaemonSets` | OpenTelemetry Collector Daemonset per node customization options. See [Best Practices](/docs/best-practices.md#setting-different-resources-on-different-nodes-for-logs-collector). | `{}` | -| `otellogs.metrics.enabled` | Enable OpenTelemetry Collector metrics | `true` | -| `otellogs.serviceLabels` | Add custom labels to OpenTelemetry Collector Service | `{}` | -| `metadata.image.repository` | Image repository for otelcol docker container. | `` | -| `metadata.image.tag` | Image tag for otelcol docker container. | `` | +| `tracesSampler.deployment.podAnnotations` | Additional annotations for traces-sampler pods. | `{}` | +| `tracesSampler.deployment.image.repository` | Image repository for traces-sampler docker container. | `Nil` | +| `tracesSampler.deployment.image.tag` | Image tag for traces-sampler docker container. | `Nil` | +| `tracesSampler.deployment.image.pullPolicy` | Image pullPolicy for traces-sampler docker container. | `IfNotPresent` | +| `tracesSampler.deployment.extraEnvVars` | Additional environment variables for traces-sampler pods. | `[]` | +| `tracesSampler.deployment.extraVolumes` | Additional volumes for traces-sampler pods. | `[]` | +| `tracesSampler.deployment.extraVolumeMounts` | Additional volume mounts for traces-sampler pods. | `[]` | +| `tracesSampler.logLevelFilter` | Do not send traces-sampler logs if `true`. | `false` | +| `tracesSampler.config` | Configuration for traces-sampler. | `See [values.yaml]` | +| `metadata.image.repository` | Image repository for otelcol docker container. | `Nil` | +| `metadata.image.tag` | Image tag for otelcol docker container. | `Nil` | | `metadata.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | | `metadata.securityContext` | The securityContext configuration for otelcol. | `{"fsGroup": 999}` | | `metadata.podLabels` | Additional labels for all otelcol pods. | `{}` | @@ -392,72 +296,147 @@ The following table lists the configurable parameters of the Sumo Logic chart an | `metadata.persistence.pvcLabels` | Additional PersistentVolumeClaim labels for all OpenTelemetry Collector pods. | `{}` | | `metadata.metrics.enabled` | Flag to control deploying the otelcol metrics statefulsets. | `true` | | `metadata.metrics.logLevel` | Flag to control logging level for OpenTelemetry Collector for metrics. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | -| `metadata.metrics.config.merge` | Configuration for metrics metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `metadata.metrics.config.override` | Configuration for metrics metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | +| `metadata.metrics.config.merge` | Configuration for metrics metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `metadata.metrics.config.override` | Configuration for metrics metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | | `metadata.metrics.config.additionalEndpoints` | List of additional endpoints for Open Telemetry Metadata Pod. | `[]` | -| `metadata.metrics.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for metrics otelcol container. | `{"periodSeconds": 3, "failureThreshold": 60}` | | `metadata.metrics.statefulset.nodeSelector` | Node selector for metrics metadata enrichment (otelcol) statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | | `metadata.metrics.statefulset.tolerations` | Tolerations for metrics metadata enrichment (otelcol) statefulset. | `[]` | +| `metadata.metrics.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for metrics metadata enrichment (otelcol) statefulset. | `[]` | | `metadata.metrics.statefulset.affinity` | Affinity for metrics metadata enrichment (otelcol) statefulset. | `{}` | | `metadata.metrics.statefulset.podAntiAffinity` | PodAntiAffinity for metrics metadata enrichment (otelcol) statefulset. | `soft` | -| `metadata.metrics.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for metrics metadata enrichment (otelcol) statefulset. | `[]` | | `metadata.metrics.statefulset.replicaCount` | Replica count for metrics metadata enrichment (otelcol) statefulset. | `3` | | `metadata.metrics.statefulset.resources` | Resources for metrics metadata enrichment (otelcol) statefulset. | `{"limits": {"memory": "1Gi", "cpu": "1000m"}, "requests": {"memory": "768Mi", "cpu": "500m"}}` | | `metadata.metrics.statefulset.priorityClassName` | Priority class name for metrics metadata enrichment (otelcol) pods. | `Nil` | | `metadata.metrics.statefulset.podLabels` | Additional labels for metrics metadata enrichment (otelcol) pods. | `{}` | | `metadata.metrics.statefulset.podAnnotations` | Additional annotations for metrics metadata enrichment (otelcol) pods. | `{}` | +| `metadata.metrics.statefulset.containers.otelcol.securityContext` | The securityContext configuration for otelcol container for metrics metadata enrichment statefulset. | `{}` | | `metadata.metrics.statefulset.containers.otelcol.livenessProbe` | Liveness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 15, "periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | | `metadata.metrics.statefulset.containers.otelcol.readinessProbe` | Readiness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 5, "periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | -| `metadata.metrics.statefulset.containers.otelcol.securityContext` | The securityContext configuration for otelcol container for metrics metadata enrichment statefulset. | `{}` | -| `metadata.metrics.statefulset.extraEnvVars` | Additional environment variables for metrics metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.metrics.statefulset.extraVolumes` | Additional volumes for metrics metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.metrics.statefulset.extraVolumeMounts` | Additional volume mounts for metrics metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.metrics.autoscaling.enabled` | Option to override the default autoscaling parameter (sumologic.autoscaling.enabled) for metrics metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `metadata.metrics.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for metrics otelcol container. | `{"periodSeconds": 3, "failureThreshold": 60}` | +| `metadata.metrics.statefulset.extraEnvVars` | Additional environment variables for metrics metadata enrichment (otelcol) pods. | `[]` | +| `metadata.metrics.statefulset.extraVolumes` | Additional volumes for metrics metadata enrichment (otelcol) pods. | `[]` | +| `metadata.metrics.statefulset.extraVolumeMounts` | Additional volume mounts for metrics metadata enrichment (otelcol) pods. | `[]` | +| `metadata.metrics.autoscaling.enabled` | Option to turn autoscaling on for metrics metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | | `metadata.metrics.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | | `metadata.metrics.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | | `metadata.metrics.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `80` | -| `metadata.metrics.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | -| `metadata.metrics.podDisruptionBudget` | Pod Disruption Budget for metrics metadata enrichment (otelcol) statefulset and for experimental otelcol metrics collector. | `{"minAvailable": 2}` | +| `metadata.metrics.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `50` | +| `metadata.metrics.podDisruptionBudget` | Pod Disruption Budget for metrics metadata enrichment (otelcol) statefulset and for experimental otelcol metrics collector. | `{"minAvailable": 2, "maxUnavailable": 1}` | | `metadata.logs.enabled` | Flag to control deploying the otelcol logs statefulsets. | `true` | | `metadata.logs.logLevel` | Flag to control logging level for OpenTelemetry Collector for logs. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | -| `metadata.logs.config.merge` | Configuration for logs metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `metadata.logs.config.override` | Configuration for logs metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | -| `metadata.logs.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for the logs otelcol container. | `{"periodSeconds": 3, "failureThreshold": 60}` | -| `metadata.logs.statefulset.containers.otelcol.livenessProbe` | Liveness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 15, "periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | -| `metadata.logs.statefulset.containers.otelcol.readinessProbe` | Readiness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 5, "periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | -| `metadata.logs.statefulset.containers.otelcol.securityContext` | The securityContext configuration for the logs otelcol container. | `{}` | +| `metadata.logs.config.merge` | Configuration for logs metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `metadata.logs.config.override` | Configuration for logs metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | | `metadata.logs.statefulset.nodeSelector` | Node selector for logs metadata enrichment (otelcol) statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | | `metadata.logs.statefulset.tolerations` | Tolerations for logs metadata enrichment (otelcol) statefulset. | `[]` | +| `metadata.logs.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for logs metadata enrichment (otelcol) statefulset. | `[]` | | `metadata.logs.statefulset.affinity` | Affinity for logs metadata enrichment (otelcol) statefulset. | `{}` | | `metadata.logs.statefulset.podAntiAffinity` | PodAntiAffinity for logs metadata enrichment (otelcol) statefulset. | `soft` | -| `metadata.logs.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for logs metadata enrichment (otelcol) statefulset. | `[]` | | `metadata.logs.statefulset.replicaCount` | Replica count for logs metadata enrichment (otelcol) statefulset. | `3` | | `metadata.logs.statefulset.resources` | Resources for logs metadata enrichment (otelcol) statefulset. | `{"limits": {"memory": "1Gi", "cpu": "1000m"}, "requests": {"memory": "768Mi", "cpu": "500m"}}` | | `metadata.logs.statefulset.priorityClassName` | Priority class name for logs metadata enrichment (otelcol) pods. | `Nil` | | `metadata.logs.statefulset.podLabels` | Additional labels for logs metadata enrichment (otelcol) pods. | `{}` | | `metadata.logs.statefulset.podAnnotations` | Additional annotations for logs metadata enrichment (otelcol) pods. | `{}` | -| `metadata.logs.statefulset.extraEnvVars` | Additional environment variables for logs metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.logs.statefulset.extraVolumes` | Additional volumes for logs metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.logs.statefulset.extraVolumeMounts` | Additional volume mounts for logs metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.logs.statefulset.extraPorts` | Additional exposed ports in logs metadata enrichment (otelcol) pods and service. | `Nil` | -| `metadata.logs.statefulset.extraArgs` | Additional arguments to otelcol container. | `Nil` | -| `metadata.logs.autoscaling.enabled` | Option to override the default autoscaling parameter (sumologic.autoscaling.enabled) for logs metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `metadata.logs.statefulset.containers.otelcol.securityContext` | The securityContext configuration for the logs otelcol container. | `{}` | +| `metadata.logs.statefulset.containers.otelcol.livenessProbe` | Liveness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 15, "periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | +| `metadata.logs.statefulset.containers.otelcol.readinessProbe` | Readiness probe settings for the logs otelcol container. | `{"initialDelaySeconds": 5, "periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | +| `metadata.logs.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for the logs otelcol container. | `{"periodSeconds": 3, "failureThreshold": 60}` | +| `metadata.logs.statefulset.extraEnvVars` | Additional environment variables for logs metadata enrichment (otelcol) pods. | `[]` | +| `metadata.logs.statefulset.extraVolumes` | Additional volumes for logs metadata enrichment (otelcol) pods. | `[]` | +| `metadata.logs.statefulset.extraVolumeMounts` | Additional volume mounts for logs metadata enrichment (otelcol) pods. | `[]` | +| `metadata.logs.statefulset.extraPorts` | Additional exposed ports in logs metadata enrichment (otelcol) pods and service. | `[]` | +| `metadata.logs.statefulset.extraArgs` | Additional arguments to otelcol container. | `[]` | +| `metadata.logs.autoscaling.enabled` | Option to turn autoscaling on for logs metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | | `metadata.logs.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | | `metadata.logs.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | | `metadata.logs.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `80` | -| `metadata.logs.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | -| `metadata.logs.podDisruptionBudget` | Pod Disruption Budget for logs metadata enrichment (otelcol) statefulset. | `{"minAvailable": 2}` | -| `otelevents.image.repository` | Image repository for otelcol docker container. | `` | -| `otelevents.image.tag` | Image tag for otelcol docker container. | `` | +| `metadata.logs.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `50` | +| `metadata.logs.podDisruptionBudget` | Pod Disruption Budget for logs metadata enrichment (otelcol) statefulset. | `{"minAvailable": 2, "maxUnavailable": 1}` | +| `tracesGateway.enabled` | Flag to control deploying traces-gateway. [See docs for more information.](/docs/opentelemetry-collector/traces.md) | `true` | +| `tracesGateway.autoscaling.enabled` | Option to turn autoscaling on for traces-gateway and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `tracesGateway.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | +| `tracesGateway.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | +| `tracesGateway.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `100` | +| `tracesGateway.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `50` | +| `tracesGateway.deployment.replicas` | Set the number of OpenTelemetry Collector replicas. | `1` | +| `tracesGateway.deployment.nodeSelector` | Node selector for otelcol deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md) | `{}` | +| `tracesGateway.deployment.tolerations` | Tolerations for traces-gateway statefulset. | `[]` | +| `tracesGateway.deployment.resources` | Resources for traces-gateway statefulset. | `{"limits": {"memory": "2Gi", "cpu": "1000m"}, "requests": {"memory": "196Mi", "cpu": "50m"}}` | +| `tracesGateway.deployment.podLabels` | Additional labels for traces-gateway pods. | `{}` | +| `tracesGateway.deployment.podAnnotations` | Additional annotations for traces-gateway pods. | `{}` | +| `tracesGateway.deployment.image.repository` | Image repository for traces-gateway docker container. | `Nil` | +| `tracesGateway.deployment.image.tag` | Image tag for traces-gateway docker container. | `Nil` | +| `tracesGateway.deployment.image.pullPolicy` | Image pullPolicy for traces-gateway docker container. | `IfNotPresent` | +| `tracesGateway.deployment.livenessProbe` | Liveness probe settings for the traces-gateway container. | `{"periodSeconds": 15, "timeoutSeconds": 10, "failureThreshold": 3}` | +| `tracesGateway.deployment.readinessProbe` | Readiness probe settings for the traces-gateway container. | `{"periodSeconds": 10, "timeoutSeconds": 3, "failureThreshold": 3}` | +| `tracesGateway.deployment.startupProbe` | Startup probe configuration for the traces-gateway container. | `{"periodSeconds": 5, "timeoutSeconds": 3, "failureThreshold": 60}` | +| `tracesGateway.deployment.extraEnvVars` | Additional environment variables for traces-gateway pods. | `[]` | +| `tracesGateway.deployment.extraVolumes` | Additional volumes for traces-gateway pods. | `[]` | +| `tracesGateway.deployment.extraVolumeMounts` | Additional volume mounts for traces-gateway pods. | `[]` | +| `tracesGateway.deployment.priorityClassName` | Priority class name for OpenTelemetry Collector log pods. | `Nil` | +| `tracesGateway.logLevelFilter` | Do not send traces-gateway logs if `true`. | `false` | +| `tracesGateway.config` | Configuration for traces-gateway. | `See [values.yaml]` | +| `otelevents.image.repository` | Image repository for otelcol docker container. | `Nil` | +| `otelevents.image.tag` | Image tag for otelcol docker container. | `Nil` | | `otelevents.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | | `otelevents.logLevel` | Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | | `otelevents.config.merge` | Configuration for events otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | | `otelevents.config.override` | Configuration for events otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | -| `otelevents.statefulset` | OpenTelemetry Collector StatefulSet customization options. See values.yaml for more details. | See [values.yaml] | +| `otelevents.statefulset` | OpenTelemetry Collector StatefulSet customization options. See values.yaml for more details. | `See [values.yaml]` | +| `otelcloudwatch.statefulset` | OpenTelemetry Cloudwatch Collector statefulset customization options. See [values.yaml] for more details. | `See [values.yaml]` | +| `otellogs.metrics.enabled` | Enable OpenTelemetry Collector metrics | `true` | +| `otellogs.serviceLabels` | Add custom labels to OpenTelemetry Collector Service | `{}` | +| `otellogs.image.repository` | Image repository for otelcol docker container. | `Nil` | +| `otellogs.image.tag` | Image tag for otelcol docker container. | `Nil` | +| `otellogs.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | +| `otellogs.logLevel` | Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | +| `otellogs.config.merge` | Configuration for log collector otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `otellogs.config.override` | Configuration for log collector otelcol, replaces defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | `{}` | +| `otellogs.daemonset` | OpenTelemetry Collector Daemonset customization options. See [values.yaml] for more details. | `See [values.yaml]` | +| `otellogs.additionalDaemonSets` | OpenTelemetry Collector Daemonset per node customization options. See [Best Practices](/docs/best-practices.md#setting-different-resources-on-different-nodes-for-logs-collector). | `{}` | +| `telegraf-operator.enabled` | Flag to control deploying Telegraf Operator Helm sub-chart. | `false` | +| `telegraf-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `telegraf-operator.image.sidecarImage` | Telegraf Operator sidecar image. | `public.ecr.aws/sumologic/telegraf:1.21.2` | +| `telegraf-operator.replicaCount` | Replica count for Telegraf Operator pods. | `1` | +| `telegraf-operator.classes.secretName` | Secret name in which the Telegraf Operator configuration will be stored. | `telegraf-operator-classes` | +| `telegraf-operator.classes.default` | Name of the default output configuration. | `sumologic-prometheus` | +| `telegraf-operator.classes.data` | Telegraf sidecar configuration. | `See [values.yaml]` | +| `telegraf-operator.imagePullSecrets` | Pull secrets for Telegraf Operator images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | +| `falco.enabled` | Flag to control deploying Falco Helm sub-chart. | `false` | +| `falco.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `falco.imagePullSecrets` | Pull secrets for falco images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | +| `falco.image.registry` | Image registry for falco docker container. | `public.ecr.aws` | +| `falco.image.repository` | Image repository for falco docker container. | `falcosecurity/falco-no-driver` | +| `falco.addKernelDevel` | Flag to control installation of `kernel-devel` on nodes using MachineConfig, required to build falco modules (only for OpenShift) | `true` | +| `falco.extra.initContainers` | InitContainers for Falco pod | `See [values.yaml]` | +| `falco.driver.kind` | Tell Falco which driver to use. Available options: module (kernel driver) and ebpf (eBPF probe). Set to `ebpf` for GKE | `module` | +| `falco.driver.loader.initContainer.image` | Init container image configuration for falco driver loader. | `{"registry": "public.ecr.aws", "repository": "falcosecurity/falco-driver-loader"}` | +| `falco.falco.load_plugins` | Names of the plugins to be loaded by Falco. | `["json", "k8saudit"]` | +| `falco.falco.json_output` | Output events in json. | `true` | +| `falco.falco.rules_file` | The location of the rules files that will be consumed by Falco. | `["/etc/falco/falco_rules.yaml", "/etc/falco/falco_rules.local.yaml", "/etc/falco/k8s_audit_rules.yaml", "/etc/falco/rules.d", "/etc/falco/rules.available/application_rules.yaml"]` | +| `falco.falcoctl` | Falcoctl configuration. We don't use it for now due to breaking changes. [See this issue](https://github.com/SumoLogic/sumologic-kubernetes-collection/issues/3144). | `{"artifact": {"follow": {"enabled": false}, "install": {"enabled": false}}}` | +| `falco.customRules` | Additional falco rules related to Sumo Logic Kubernetes Collection | `See [values.yaml]` | | `tailing-sidecar-operator.enabled` | Flag to control deploying Tailing Sidecar Operator Helm sub-chart. | `false` | | `tailing-sidecar-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` | | `tailing-sidecar-operator.scc.create` | Create OpenShift's Security Context Constraint | `false` | -| `kube-prometheus-stack.prometheus.prometheusSpec.nodeSelector` | Node selector for prometheus. [See docs/Best_Practices.md for more information.](/docs/best-practices.md) | `{}` | +| `opentelemetry-operator.enabled` | Flag to control deploying OpenTelemetry Operator Helm sub-chart. | `true` | +| `opentelemetry-operator.instrumentationJobImage.image.repository` | Name of the image repository used to apply Instrumentation resource | `sumologic/kubernetes-tools` | +| `opentelemetry-operator.instrumentationJobImage.image.tag` | Name of the image tag used to apply Instrumentation resource | `2.14.0` | +| `opentelemetry-operator.createDefaultInstrumentation` | Flag to control creation of default Instrumentation object | `false` | +| `opentelemetry-operator.instrumentationNamespaces` | Used to create `Instrumentation` resources in specified namespaces. | `Nil` | +| `opentelemetry-operator.instrumentation.dotnet.traces.enabled` | Flag to control traces export from DotNet instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.instrumentation.dotnet.metrics.enabled` | Flag to control metrics export from DotNet instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.instrumentation.java.traces.enabled` | Flag to control traces export from Java instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.instrumentation.java.metrics.enabled` | Flag to control metrics export from Java instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.instrumentation.python.traces.enabled` | Flag to control traces export from Python instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.instrumentation.python.metrics.enabled` | Flag to control metrics export from Python instrumentation in `Instrumentation` resource. | `true` | +| `opentelemetry-operator.admissionWebhooks` | Admission webhooks make sure only requests with correctly formatted rules will get into the Operator. They also enable the sidecar injection for OpenTelemetryCollector and Instrumentation CR's. | `See [values.yaml]` | +| `opentelemetry-operator.manager.collectorImage.repository` | The default collector image repository for OpenTelemetryCollector CRDs. | `public.ecr.aws/sumologic/sumologic-otel-collector` | +| `opentelemetry-operator.manager.collectorImage.tag` | The default collector image tag for OpenTelemetryCollector CRDs. | `0.85.0-sumo-0` | +| `opentelemetry-operator.manager.env` | Additional environment variables for opentelemetry-operator helm chart. | `{}` | +| `opentelemetry-operator.manager.resources.limits.cpu` | Used to set limit CPU for OpenTelemetry-Operator Manager. | `250m` | +| `opentelemetry-operator.manager.resources.limits.memory` | Used to set limit Memory for OpenTelemetry-Operator Manager. | `512Mi` | +| `opentelemetry-operator.manager.resources.requests.cpu` | Used to set requested CPU for OpenTelemetry-Operator Manager. | `150m` | +| `opentelemetry-operator.manager.resources.requests.memory` | Used to set requested Memory for OpenTelemetry-Operator Manager. | `256Mi` | | `pvcCleaner.metrics.enabled` | Flag to enable cleaning unused PVCs for otelcol metrics statefulsets. | `false` | | `pvcCleaner.logs.enabled` | Flag to enable cleaning unused PVCs for otelcol logs statefulsets. | `false` | | `pvcCleaner.job.image.repository` | Image repository for pvcCleaner docker containers. | `public.ecr.aws/sumologic/kubernetes-tools-kubectl` | diff --git a/deploy/helm/sumologic/_values.yaml b/deploy/helm/sumologic/_values.yaml new file mode 100644 index 0000000000..c413cc1e90 --- /dev/null +++ b/deploy/helm/sumologic/_values.yaml @@ -0,0 +1,2307 @@ +## Sumo Logic Kubernetes Collection configuration file +## All the comments start with two or more # characters +nameOverride: '' +fullnameOverride: '' +## Use the same namespace as namespaceOverride in 'kube-prometheus-stack.namespaceOverride' if Prometheus setup is also enabled +namespaceOverride: '' +sumologic: + ## If enabled, a pre-install hook will create Collector and Sources in Sumo Logic + setupEnabled: true + ## If enabled, a pre-delete hook will destroy Collector in Sumo Logic + cleanupEnabled: false + ## If enabled, accessId and accessKey will be sourced from Secret Name given + ## Be sure to include at least the following env variables in your secret + ## (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY + # envFromSecret: sumo-api-secret + ## Sumo access ID + # accessId: '' + ## Sumo access key + # accessKey: '' + ## Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection + ## ref: https://help.sumologic.com/docs/api/getting-started#sumo-logic-endpoints-by-deployment-and-firewall-security + endpoint: '' + ## proxy urls + httpProxy: '' + httpsProxy: '' + ## Exclude Kubernetes internal traffic from proxy + noProxy: kubernetes.default.svc + ## Collector name + # collectorName: '' + ## Cluster name: Note spaces are not allowed and will be replaced with dashes. + clusterName: kubernetes + ## Cluster DNS Domain + ## We use the DNS domain in internal urls to speed up DNS resolution, see https://github.com/kubernetes/kubernetes/issues/56903 + ## Change this if you have set a non-default DNS domain in your cluster + clusterDNSDomain: cluster.local + ## Configuration of Kubernetes for Terraform client + ## https://www.terraform.io/docs/providers/kubernetes/index.html#argument-reference + ## All double quotes should be escaped here regarding Terraform syntax + cluster: + host: https://kubernetes.default.svc + # username: '' + # password: '' + # insecure: '' + # client_certificate: '' + # client_key: '' + cluster_ca_certificate: ${file("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")} + # config_path: '' + # config_context: '' + # config_context_auth_info: '' + # config_context_cluster: '' + token: ${file("/var/run/secrets/kubernetes.io/serviceaccount/token")} + # exec: + # api_version: '' + # command: '' + # args: '' + # env: {} + ## Enable autoscaling for components that support it: logs metadata, metrics metadata, metrics collector, otelcol instrumentation, and traces gateway + autoscaling: + enabled: true + ## If you set it to false, it would set EXCLUDE_NAMESPACE= + ## and not add the Otelcol logs and Prometheus remotestorage metrics. + collectionMonitoring: true + ## Optionally specify an array of pullSecrets. + ## They will be added to serviceaccount that is used for Sumo Logic's + ## deployments and statefulsets. + ## + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistryKeySecretName + ## Add custom labels to the following sumologic resources(otelcol sts, setup job, otelcol deployment) + podLabels: {} + ## Add custom annotations to the following sumologic resources(otelcol sts, setup job, otelcol deployment) + podAnnotations: {} + ## Add custom annotations to sumologic serviceAccounts + serviceAccount: + annotations: {} + ## creation of Security Context Constraints in Openshift + scc: + create: false + setup: + ## uncomment to force collection installation (disables k8s version verification) + # force: true + job: + image: + repository: public.ecr.aws/sumologic/kubernetes-setup + tag: 3.11.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of pullSecrets. + ## They will be added to serviceaccount that is used for Sumo Logic's + ## setup job. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistryKeySecretName + resources: + limits: + memory: 256Mi + cpu: 2000m + requests: + memory: 64Mi + cpu: 200m + nodeSelector: {} + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: null + # operator: Exists + # effect: NoSchedule + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + ## Add custom labels only to setup job pod + podLabels: {} + ## Add custom annotations only to setup job pod + podAnnotations: {} + ## uncomment for the debug mode (disables the automatic run of the setup.sh script) + # debug: true + monitors: + ## If enabled, a pre-install hook will create k8s monitors in Sumo Logic + enabled: true + ## The installed monitors default status: enabled/disabled + monitorStatus: enabled + ## A list of emails to send notifications from monitors + notificationEmails: [] + dashboards: + ## If enabled, a pre-install hook will install k8s dashboards in Sumo Logic + enabled: true + collector: + ## Configuration of additional collector fields + ## https://help.sumologic.com/docs/manage/fields/#http-source-fields + fields: {} + ## Configuration of http sources + ## See docs/Terraform.md for more information + ## name: source name visible in sumologic platform + ## config-name: This is mostly for backward compatibility + sources: + metrics: + default: + name: (default-metrics) + config-name: endpoint-metrics + default-otlp: + name: metrics-otlp + config-name: endpoint-metrics-otlp + properties: + content_type: Otlp + apiserver: + name: apiserver-metrics + config-name: endpoint-metrics-apiserver + controller: + name: kube-controller-manager-metrics + config-name: endpoint-metrics-kube-controller-manager + scheduler: + name: kube-scheduler-metrics + config-name: endpoint-metrics-kube-scheduler + state: + name: kube-state-metrics + config-name: endpoint-metrics-kube-state + kubelet: + name: kubelet-metrics + config-name: endpoint-metrics-kubelet + node: + name: node-exporter-metrics + config-name: endpoint-metrics-node-exporter + control-plane: + name: control-plane-metrics + logs: + default: + name: logs + config-name: endpoint-logs + ## Properties can be used to extend default settings, such as processing rules, fields etc + properties: + default_date_formats: + ## Ensures that timestamp key has precedence over timestamp auto discovery + - format: epoch + locator: \"timestamp\":(\\d+) + # filters: + # - name: Test Exclude Debug + # filter_type: Exclude + # regexp: .*DEBUG.* + default-otlp: + name: logs-otlp + config-name: endpoint-logs-otlp + properties: + content_type: Otlp + events: + default: + name: events + config-name: endpoint-events + properties: + default_date_formats: + ## Ensures that timestamp key has precedence over timestamp auto discovery + - format: epoch + locator: \"timestamp\":(\\d+) + default-otlp: + name: events-otlp + config-name: endpoint-events-otlp + properties: + content_type: Otlp + traces: + default: + name: traces + config-name: endpoint-traces + properties: + content_type: Zipkin + default-otlp: + name: traces-otlp + config-name: endpoint-traces-otlp + properties: + content_type: Otlp + ## Global configuration for OpenTelemetry Collector + otelcolImage: + repository: public.ecr.aws/sumologic/sumologic-otel-collector + tag: 0.86.0-sumo-1 + ## Add a -fips suffix to all image tags. With default tags, this results in FIPS-compliant otel images. + ## See https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/fips.md for more information. + addFipsSuffix: false + ## Configuration for collection of Kubernetes events + events: + enabled: true + ## Source name for the Events source. Default: "events" + sourceName: events + ## Source category for the Events source. Default: "" which is resolved to "{clusterName}/events" + # sourceCategory: kubernetes/events + ## Used to replace '-' with another character. + sourceCategoryReplaceDash: / + persistence: + enabled: true + size: 10Gi + ## Configuration for the Persistent Volume and Persistent Volume Claim + ## where the storage is kept + persistentVolume: + path: /var/lib/storage/events + accessMode: ReadWriteOnce + ## Add custom labels to otelcol event statefulset PVC + pvcLabels: {} + # storageClass: '' + sourceType: otlp + ## Logs configuration + ## Set the enabled flag to false for disabling logs ingestion altogether. + logs: + enabled: true + collector: + otelcol: + enabled: true + ## Experimental + otelcloudwatch: + enabled: false + roleArn: '' + ## Configure persistence for the cloudwatch collector + persistence: + enabled: true + region: '' + pollInterval: 1m + ## A map of log group and stream prefixes + ## This is a map of log group and stream prefix, for example: + ## logGroups: + ## fluent-bit: + ## names: [fluent-bit] + logGroups: {} + multiline: + enabled: true + first_line_regex: ^\[?\d{4}-\d{1,2}-\d{1,2}.\d{2}:\d{2}:\d{2} + ## Additional configuration takes precedence over first_line_regex and are executed only for first matching condition + ## + ## Example: + ## - first_line_regex: "^@@@@ First Line" + ## condition: 'attributes["k8s.namespace.name"] == "foo"' + ## - first_line_regex: "^--- First Line" + ## condition: 'attributes["k8s.container.name"] matches "^bar-.*" + ## + ## NOTE: See below link for full reference: + ## https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/collecting-container-logs.md#conditional-multiline-log-parsing + additional: [] + container: + enabled: true + ## Format to post logs into Sumo: fields, json, json_merge, or text. + ## NOTE: json is an alias for fields + ## NOTE: Multiline log detection works differently for `text` format. See below link for full reference: + ## https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/collecting-container-logs.md#text-log-format + format: fields + ## When set to `true`, preserves the `time` attribute, which is a string representation of the `timestamp` attribute. + keep_time_attribute: false + otelcol: + ## Extra processors for container logs. See [/docs/collecting-container-logs.md](/docs/collecting-container-logs.md) for details. + extraProcessors: [] + ## Set the _sourceHost metadata field in Sumo Logic. + sourceHost: '' + ## Set the _sourceName metadata field in Sumo Logic. + sourceName: '%{namespace}.%{pod}.%{container}' + ## Set the _sourceCategory metadata field in Sumo Logic. + sourceCategory: '%{namespace}/%{pod_name}' + ## Set the prefix, for _sourceCategory metadata. + sourceCategoryPrefix: kubernetes/ + ## Used to replace - with another character. + sourceCategoryReplaceDash: / + ## A regular expression for containers. + ## Matching containers will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludeContainerRegex: '' + ## A regular expression for hosts. + ## Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludeHostRegex: '' + ## A regular expression for namespaces. + ## Matching namespaces will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludeNamespaceRegex: '' + ## A regular expression for pods. + ## Matching pods will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludePodRegex: '' + ## Defines whether container-level pod annotations are enabled. + perContainerAnnotationsEnabled: false + ## Defines the list of prefixes of container-level pod annotations. + perContainerAnnotationPrefixes: [] + systemd: + enabled: true + ## systemd units to collect logs from + # units: + # - docker.service + otelcol: + ## Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. + extraProcessors: [] + ## Set the _sourceName metadata field in Sumo Logic. + sourceName: '%{_sourceName}' + ## Set the _sourceCategory metadata field in Sumo Logic. + sourceCategory: system + ## Set the prefix, for _sourceCategory metadata. + sourceCategoryPrefix: kubernetes/ + ## Used to replace - with another character. + sourceCategoryReplaceDash: / + ## A regular expression for facility. + ## Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludeFacilityRegex: '' + ## A regular expression for hosts. + ## Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludeHostRegex: '' + ## A regular expression for priority. + ## Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludePriorityRegex: '' + ## A regular expression for unit. + ## Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludeUnitRegex: '' + kubelet: + otelcol: + ## Extra processors for kubelet logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details. + extraProcessors: [] + ## Set the _sourceName metadata field in Sumo Logic. + sourceName: k8s_kubelet + ## Set the _sourceCategory metadata field in Sumo Logic. + sourceCategory: kubelet + ## Set the prefix, for _sourceCategory metadata. + sourceCategoryPrefix: kubernetes/ + ## Used to replace - with another character. + sourceCategoryReplaceDash: / + ## A regular expression for facility. + ## Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludeFacilityRegex: '' + ## A regular expression for hosts. + ## Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludeHostRegex: '' + ## A regular expression for priority. + ## Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludePriorityRegex: '' + ## A regular expression for unit. + ## Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol). + excludeUnitRegex: '' + ## Fields to be created at Sumo Logic to ensure logs are tagged with + ## relevant metadata. + ## https://help.sumologic.com/docs/manage/fields/#manage-fields + fields: + - cluster + - container + - daemonset + - deployment + - host + - namespace + - node + - pod + - service + - statefulset + ## Additional fields to be created in Sumo Logic. + ## https://help.sumologic.com/docs/manage/fields/#manage-fields + additionalFields: [] + sourceType: otlp + ## Metrics configuration + ## Set the enabled flag to false for disabling metrics ingestion altogether. + metrics: + enabled: true + ## Otel metrics collector. Replaces Prometheus. + ## To enable, you need opentelemetry-operator enabled as well. + collector: + otelcol: + enabled: true + ## Default scrape interval + scrapeInterval: 30s + ## Option to turn autoscaling on for otelcol and specify params for HPA. + ## Autoscaling needs metrics-server to access cpu metrics. + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 10 + targetCPUUtilizationPercentage: 70 + targetMemoryUtilizationPercentage: 70 + nodeSelector: {} + ## Add custom annotations only to merics otelcol sts pods + podAnnotations: {} + ## Add custom labels only to metrics otelcol sts pods + podLabels: {} + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: '' + replicaCount: 1 + resources: + limits: + memory: 2Gi + cpu: 1000m + requests: + memory: 768Mi + cpu: 100m + ## Selector for ServiceMonitors used for target discovery. By default, this selects resources created by this Chart. + ## See https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr + # serviceMonitorSelector: {} + ## Selector for PodMonitors used for target discovery. By default, this selects resources created by this Chart. + ## See https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr + # podMonitorSelector: {} + securityContext: + ## The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set. + ## The default is 0 (root), and containers don't have write permissions for volumes in that case. + fsGroup: 999 + tolerations: [] + ## Configuration for kubelet metrics + kubelet: + enabled: true + ## Configuration for cAdvisor metrics + cAdvisor: + enabled: true + ## Enable collection of metrics from Pods annotated with prometheus.io/* keys. + ## See https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/collecting-application-metrics.md#application-metrics-are-exposed-one-endpoint-scenario for more information. + annotatedPods: + enabled: true + ## Allocation strategy for the scrape target allocator. Valid values are: least-weighted and consistent-hashing. + ## See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocator + # allocationStrategy: least-weighted + ## Default metric filters for Sumo Apps + enableDefaultFilters: false + ## By default, the Helm Chart collects some high-cardinality histogram metrics, as Sumo Apps make use of the sum and count components. + ## This setting causes the metrics collector to drop the actual histogram buckets, keeping only the sum and the count. + ## This affects the following metrics: + ## - apiserver_request_duration_seconds + ## - coredns_dns_request_duration_seconds + ## - kubelet_runtime_operations_duration_seconds + dropHistogramBuckets: true + otelcol: + ## Includes additional processors into pipelines. + ## It can be used for filtering metrics, renaming, changing metadata and so on. + ## This is list of objects, for example: + ## extraProcessors: + ## - filterprocessor: + ## exclude: + ## match_type: strict + ## metric_names: + ## - hello_world + ## - hello/world + extraProcessors: [] + ## Enable a load balancing proxy for Prometheus remote writes. + ## Prometheus remote write uses a single persistent HTTP connection per target, + ## which interacts poorly with TCP load balancing with iptables that K8s Services do. + ## Use a real HTTP load balancer for this instead. + ## This is an advanced feature, enable only if you're experiencing performance + ## issues with metrics metadata enrichment. + remoteWriteProxy: + enabled: false + config: + ## Increase this if you've increased samples_per_send in Prometheus to prevent nginx + ## from spilling proxied request bodies to disk + clientBodyBufferSize: 64k + ## This feature autodetects how much CPU is assigned to the nginx instance and sets + ## the right amount of workers based on that. Disable to use the default of 8 workers. + workerCountAutotune: true + ## Nginx listen port + port: 8080 + ## Nginx access logs + enableAccessLogs: false + replicaCount: 3 + image: + repository: public.ecr.aws/sumologic/nginx-unprivileged + tag: 1.25.2-alpine + pullPolicy: IfNotPresent + resources: + limits: + cpu: 1000m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + livenessProbe: + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + successThreshold: 1 + failureThreshold: 3 + securityContext: {} + nodeSelector: {} + tolerations: [] + affinity: {} + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: '' + ## Add custom labels only to metrics sts pods + podLabels: {} + ## Add custom annotations only to metrics sts pods + podAnnotations: {} + ## Prometheus serviceMonitors related to Sumo Logic services + ## They are applied only if kube-prometheus-stack is enabled + serviceMonitors: + - name: collection-sumologic-otelcol-logs + additionalLabels: + sumologic.com/app: otelcol-logs + endpoints: + - port: otelcol-metrics + selector: + matchLabels: + sumologic.com/app: otelcol-logs + sumologic.com/scrape: 'true' + - name: collection-sumologic-otelcol-metrics + additionalLabels: + sumologic.com/app: otelcol-metrics + endpoints: + - port: otelcol-metrics + selector: + matchLabels: + sumologic.com/app: otelcol-metrics + sumologic.com/scrape: 'true' + - name: collection-sumologic-metrics-collector + additionalLabels: + sumologic.com/app: otelcol-metrics + endpoints: + - port: monitoring + selector: + matchLabels: + sumologic.com/app: otelcol + sumologic.com/component: metrics + sumologic.com/scrape: 'true' + - name: collection-sumologic-otelcol-logs-collector + additionalLabels: + sumologic.com/app: otelcol-logs-collector + endpoints: + - port: metrics + selector: + matchLabels: + sumologic.com/app: otelcol-logs-collector + sumologic.com/scrape: 'true' + - name: collection-sumologic-otelcol-events + additionalLabels: + sumologic.com/app: otelcol-events + endpoints: + - port: otelcol-metrics + selector: + matchLabels: + sumologic.com/app: otelcol-events + sumologic.com/scrape: 'true' + - name: collection-sumologic-otelcol-traces + additionalLabels: + sumologic.com/app: otelcol + endpoints: + - port: metrics + selector: + matchLabels: + sumologic.com/component: instrumentation + sumologic.com/scrape: 'true' + - name: collection-sumologic-prometheus + endpoints: + - port: http-web + path: /metrics + metricRelabelings: + - action: keep + regex: prometheus_remote_storage_.* + sourceLabels: + - __name__ + selector: + matchLabels: + app: kube-prometheus-stack-prometheus + ## The type of source we send to in Sumo. The possible values are http and otlp. + ## Consult the documentation for more information. + sourceType: otlp + ## Traces configuration + ## Set the enabled flag to false to disable traces ingestion. + traces: + enabled: true + ## How many spans per request should be send to receiver + spans_per_request: 100 + sourceType: otlp +## Configure metrics-server +## ref: https://github.com/bitnami/charts/blob/master/bitnami/metrics-server/values.yaml +metrics-server: + ## Set the enabled flag to true for enabling metrics-server. + ## This is required before enabling autoscaling unless you have an existing metrics-server in the cluster. + enabled: false + ## Put here the new name if you want to override the full name used for metrics-server components. + # fullnameOverride: '' + apiService: + create: true + extraArgs: + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + ## Optionally specify image options for metrics-server + # image: + # ## Optionally specify an array of imagePullSecrets. + # ## Secrets must be manually created in the namespace. + # ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # ## + # pullSecrets: + # - imagepullsecret +## Configure kube-prometheus-stack +## ref: https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml +kube-prometheus-stack: + ## Uncomment the flag below to not install kube-prometheus-stack helm chart + ## as a dependency along with this helm chart. + ## This is needed e.g. if you want to use a different version of kube-prometheus-stack - + ## see https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/best-practices.md#using-newer-kube-prometheus-stack. + ## To disable metrics collection, set `sumologic.metrics.enabled: false` and leave this flag commented out or set it to `false`. + ## Do not set this flag explicitly to `true` while at the same time setting `sumologic.metrics.enabled: false`, + ## as this will make Prometheus try to write to an non-existent metrics enrichment service. + # enabled: false + # global: + # ## Reference to one or more secrets to be used when pulling images + # ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # ## + # imagePullSecrets: + # - name: image-pull-secret + ## Put here the new name if you want to override the full name used for Kube Prometheus Stack components. + # fullnameOverride: '' + ## Put here the new namespace if you want to override the namespace used for Kube Prometheus Stack components. + # namespaceOverride: '' + ## Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). + ## Changing this may break Sumo Logic apps. + # kubeTargetVersionOverride: '' + ## Labels to apply to all kube-prometheus-stack resources + commonLabels: {} + defaultRules: + rules: + alertmanager: false + etcd: false + configReloaders: false + general: false + k8s: false + kubeApiserverAvailability: false + kubeApiserverBurnrate: false + kubeApiserverHistogram: false + kubeApiserverSlos: false + kubeControllerManager: false + kubelet: false + kubeProxy: false + kubePrometheusGeneral: false + kubePrometheusNodeRecording: false + kubernetesApps: false + kubernetesResources: false + kubernetesStorage: false + kubernetesSystem: false + kubeSchedulerAlerting: false + kubeSchedulerRecording: false + kubeStateMetrics: false + network: false + node: false + nodeExporterAlerting: false + nodeExporterRecording: false + prometheus: false + prometheusOperator: false + windows: false + ## NOTE changing the serviceMonitor scrape interval to be >1m can result in metrics from recording + ## rules to be missing and empty panels in Sumo Logic Kubernetes apps. + kubeApiServer: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: '' + ## see docs/scraped_metrics.md + ## apiserver_request_count + ## apiserver_request_total + ## apiserver_request_duration_seconds_count + ## apiserver_request_duration_seconds_sum + metricRelabelings: + - action: keep + regex: (?:apiserver_request_(?:count|total)|apiserver_request_(?:duration_seconds)_(?:count|sum)) + sourceLabels: + - __name__ + kubelet: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: '' + ## Enable scraping /metrics/probes from kubelet's service + probes: false + ## Enable scraping /metrics/resource/v1alpha1 from kubelet's service + resource: false + ## see docs/scraped_metrics.md + ## kubelet metrics: + ## kubelet_docker_operations_errors + ## kubelet_docker_operations_errors_total + ## kubelet_docker_operations_duration_seconds_count + ## kubelet_docker_operations_duration_seconds_sum + ## kubelet_runtime_operations_duration_seconds_count + ## kubelet_runtime_operations_duration_seconds_sum + ## kubelet_running_container_count + ## kubelet_running_containers + ## kubelet_running_pod_count + ## kubelet_running_pods + ## kubelet_docker_operations_latency_microseconds + ## kubelet_docker_operations_latency_microseconds_count + ## kubelet_docker_operations_latency_microseconds_sum + ## kubelet_runtime_operations_latency_microseconds + ## kubelet_runtime_operations_latency_microseconds_count + ## kubelet_runtime_operations_latency_microseconds_sum + metricRelabelings: + - action: keep + regex: (?:kubelet_docker_operations_errors(?:|_total)|kubelet_(?:docker|runtime)_operations_duration_seconds_(?:count|sum)|kubelet_running_(?:container|pod)(?:_count|s)|kubelet_(:?docker|runtime)_operations_latency_microseconds(?:|_count|_sum)) + sourceLabels: + - __name__ + - action: labeldrop + regex: id + ## see docs/scraped_metrics.md + ## cadvisor container metrics + ## container_cpu_usage_seconds_total + ## container_fs_limit_bytes + ## container_fs_usage_bytes + ## container_memory_working_set_bytes + ## container_cpu_cfs_throttled_seconds_total + ## cadvisor aggregate container metrics + ## container_network_receive_bytes_total + ## container_network_transmit_bytes_total + cAdvisorMetricRelabelings: + - action: keep + regex: (?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes|container_cpu_cfs_throttled_seconds_total|container_network_receive_bytes_total|container_network_transmit_bytes_total) + sourceLabels: + - __name__ + ## Drop container metrics with container tag set to an empty string: + ## these are the pod aggregated container metrics which can be aggregated + ## in Sumo anyway. There's also some cgroup-specific time series we also + ## do not need. + - action: drop + sourceLabels: + - __name__ + - container + regex: (?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes);$ + - action: labelmap + regex: container_name + replacement: container + - action: drop + sourceLabels: + - container + regex: POD + - action: labeldrop + regex: (id|name) + kubeControllerManager: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: '' + ## see docs/scraped_metrics.md + ## controller manager metrics + ## https://kubernetes.io/docs/concepts/cluster-administration/monitoring/#kube-controller-manager-metrics + ## e.g. + ## cloudprovider_aws_api_request_duration_seconds_bucket + ## cloudprovider_aws_api_request_duration_seconds_count + ## cloudprovider_aws_api_request_duration_seconds_sum + metricRelabelings: + - action: keep + regex: (?:cloudprovider_.*_api_request_duration_seconds.*) + sourceLabels: + - __name__ + coreDns: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: '' + ## see docs/scraped_metrics.md + ## coredns: + ## coredns_cache_entries + ## coredns_cache_hits_total + ## coredns_cache_misses_total + ## coredns_dns_request_duration_seconds_count + ## coredns_dns_request_duration_seconds_sum + ## coredns_dns_requests_total + ## coredns_dns_responses_total + ## coredns_forward_requests_total + ## process_cpu_seconds_total + ## process_open_fds + ## process_resident_memory_bytes + ## process_cpu_seconds_total + ## process_open_fds + ## process_resident_memory_bytes + metricRelabelings: + - action: keep + regex: (?:coredns_cache_(entries|(hits|misses)_total)|coredns_dns_request_duration_seconds_(count|sum)|coredns_(forward_requests|dns_requests|dns_responses)_total|process_(cpu_seconds_total|open_fds|resident_memory_bytes)) + sourceLabels: + - __name__ + kubeEtcd: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: '' + ## see docs/scraped_metrics.md + ## etcd_request_cache_get_duration_seconds_count + ## etcd_request_cache_get_duration_seconds_sum + ## etcd_request_cache_add_duration_seconds_count + ## etcd_request_cache_add_duration_seconds_sum + ## etcd_request_cache_add_latencies_summary_count + ## etcd_request_cache_add_latencies_summary_sum + ## etcd_request_cache_get_latencies_summary_count + ## etcd_request_cache_get_latencies_summary_sum + ## etcd_helper_cache_hit_count + ## etcd_helper_cache_hit_total + ## etcd_helper_cache_miss_count + ## etcd_helper_cache_miss_total + ## etcd server: + ## etcd_mvcc_db_total_size_in_bytes + ## etcd_debugging_store_expires_total + ## etcd_debugging_store_watchers + ## etcd_disk_backend_commit_duration_seconds_bucket + ## etcd_disk_wal_fsync_duration_seconds_bucket + ## etcd_grpc_proxy_cache_hits_total + ## etcd_grpc_proxy_cache_misses_total + ## etcd_network_client_grpc_received_bytes_total + ## etcd_network_client_grpc_sent_bytes_total + ## etcd_server_has_leader + ## etcd_server_leader_changes_seen_total + ## etcd_server_proposals_applied_total + ## etcd_server_proposals_committed_total + ## etcd_server_proposals_failed_total + ## etcd_server_proposals_pending + ## process_cpu_seconds_total + ## process_open_fds + ## process_resident_memory_bytes + metricRelabelings: + - action: keep + regex: (?:etcd_request_cache_(?:add|get)_(?:duration_seconds|latencies_summary)_(?:count|sum)|etcd_helper_cache_(?:hit|miss)_(?:count|total)|etcd_mvcc_db_total_size_in_bytes|etcd_debugging_(store_(expires_total|watchers))|etcd_disk_(backend_commit|wal_fsync)_duration_seconds_.*|etcd_grpc_proxy_cache_(hits|misses)_total|etcd_network_client_grpc_(received|sent)_bytes_total|etcd_server_(has_leader|leader_changes_seen_total)|etcd_server_proposals_(pending|(applied|committed|failed)_total)|process_(cpu_seconds_total|open_fds|resident_memory_bytes)) + sourceLabels: + - __name__ + kubeScheduler: + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: '' + ## see docs/scraped_metrics.md + ## scheduler_e2e_* is present for K8s <1.23 + ## scheduler_e2e_scheduling_duration_seconds_bucket + ## scheduler_e2e_scheduling_duration_seconds_count + ## scheduler_e2e_scheduling_duration_seconds_sum + ## scheduler_scheduling_attempt_duration_seconds is present for K8s >=1.23 + ## scheduler_scheduling_attempt_duration_seconds_bucket + ## scheduler_scheduling_attempt_duration_seconds_count + ## scheduler_scheduling_attempt_duration_seconds_sum + ## scheduler_framework_extension_point_duration_seconds_bucket + ## scheduler_framework_extension_point_duration_seconds_count + ## scheduler_framework_extension_point_duration_seconds_sum + ## scheduler_scheduling_algorithm_duration_seconds_bucket + ## scheduler_scheduling_algorithm_duration_seconds_count + ## scheduler_scheduling_algorithm_duration_seconds_sum + metricRelabelings: + - action: keep + regex: (?:scheduler_(?:e2e_scheduling|scheduling_attempt|framework_extension_point|scheduling_algorithm)_duration_seconds.*) + sourceLabels: + - __name__ + alertmanager: + enabled: false + grafana: + enabled: false + defaultDashboardsEnabled: false + prometheusOperator: + ## Labels to add to the operator pod + podLabels: {} + ## Annotations to add to the operator pod + podAnnotations: {} + ## Resource limits for prometheus operator + resources: {} + # limits: + # cpu: 200m + # memory: 200Mi + # requests: + # cpu: 100m + # memory: 100Mi + admissionWebhooks: + enabled: false + tls: + enabled: false + ## Resource limits for kube-state-metrics + kube-state-metrics: + ## Put here the new name if you want to override the full name used for Kube State Metrics components. + # fullnameOverride: '' + nodeSelector: {} + ## Custom labels to apply to service, deployment and pods + customLabels: {} + ## Additional annotations for pods in the DaemonSet + podAnnotations: {} + resources: {} + ## latest kube-prometheus-stack version that is supported on OpenShift 4.8-4.10 + ## uses version 2.6.0 of kube-state-metrics, but this version has some critical vulnerabilities, + ## so we bump the image manually. + image: + tag: v2.7.0 + prometheus: + monitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: '' + ## see docs/scraped_metrics.md + ## kube_daemonset_status_current_number_scheduled + ## kube_daemonset_status_desired_number_scheduled + ## kube_daemonset_status_number_misscheduled + ## kube_daemonset_status_number_unavailable + ## kube_deployment_spec_replicas + ## kube_deployment_status_replicas_available + ## kube_deployment_status_replicas_unavailable + ## kube_node_info + ## kube_node_status_allocatable + ## kube_node_status_capacity + ## kube_node_status_condition + ## kube_statefulset_metadata_generation + ## kube_statefulset_replicas + ## kube_statefulset_status_observed_generation + ## kube_statefulset_status_replicas + ## kube_hpa_spec_max_replicas + ## kube_hpa_spec_min_replicas + ## kube_hpa_status_condition + ## kube_hpa_status_current_replicas + ## kube_hpa_status_desired_replicas + ## kube pod state metrics + ## kube_pod_container_info + ## kube_pod_container_resource_limits + ## kube_pod_container_resource_requests + ## kube_pod_container_status_ready + ## kube_pod_container_status_restarts_total + ## kube_pod_container_status_terminated_reason + ## kube_pod_container_status_waiting_reason + ## kube_pod_status_phase + ## kube_pod_info + ## kube_service_info + ## kube_service_spec_external_ip + ## kube_service_spec_type + ## kube_service_status_load_balancer_ingress + ## Drop unnecessary labels Prometheus adds to these metrics + ## We don't want container=kube-state-metrics on everything + metricRelabelings: + - action: keep + regex: (?:kube_statefulset_status_observed_generation|kube_statefulset_status_replicas|kube_statefulset_replicas|kube_statefulset_metadata_generation|kube_daemonset_status_current_number_scheduled|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_misscheduled|kube_daemonset_status_number_unavailable|kube_deployment_spec_replicas|kube_deployment_status_replicas_available|kube_deployment_status_replicas_unavailable|kube_node_info|kube_node_status_allocatable|kube_node_status_capacity|kube_node_status_condition|kube_hpa_spec_max_replicas|kube_hpa_spec_min_replicas|kube_hpa_status_(condition|(current|desired)_replicas)|kube_pod_container_info|kube_pod_container_resource_requests|kube_pod_container_resource_limits|kube_pod_container_status_ready|kube_pod_container_status_terminated_reason|kube_pod_container_status_waiting_reason|kube_pod_container_status_restarts_total|kube_pod_status_phase|kube_pod_info|kube_service_info|kube_service_spec_external_ip|kube_service_spec_type|kube_service_status_load_balancer_ingress) + sourceLabels: + - __name__ + - action: labeldrop + regex: service + - action: replace + sourceLabels: + - container + - uid + regex: kube-state-metrics; + targetLabel: container + replacement: '' + - action: replace + sourceLabels: + - pod + - uid + regex: .*kube-state-metrics.*; + targetLabel: pod + replacement: '' + - action: labelmap + regex: (pod|service) + replacement: service_discovery_${1} + ## Resource limits for prometheus node exporter + prometheus-node-exporter: + ## Put here the new name if you want to override the full name used for Prometheus Node exporter components. + # fullnameOverride: '' + nodeSelector: {} + ## Additional labels for pods in the DaemonSet + podLabels: {} + ## Additional annotations for pods in the DaemonSet + podAnnotations: {} + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + prometheus: + monitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: '' + ## see docs/scraped_metrics.md + ## node exporter metrics + ## node_cpu_seconds_total + ## node_load1 + ## node_load5 + ## node_load15 + ## node_disk_io_time_weighted_seconds_total + ## node_disk_io_time_seconds_total + ## node_vmstat_pgpgin + ## node_vmstat_pgpgout + ## node_memory_MemFree_bytes + ## node_memory_Cached_bytes + ## node_memory_Buffers_bytes + ## node_memory_MemTotal_bytes + ## node_network_receive_drop_total + ## node_network_transmit_drop_total + ## node_network_receive_bytes_total + ## node_network_transmit_bytes_total + ## node_filesystem_avail_bytes + ## node_filesystem_size_bytes + ## node_filesystem_files_free + ## node_filesystem_files + metricRelabelings: + - action: keep + regex: (?:node_load1|node_load5|node_load15|node_cpu_seconds_total|node_disk_io_time_weighted_seconds_total|node_disk_io_time_seconds_total|node_vmstat_pgpgin|node_vmstat_pgpgout|node_memory_MemFree_bytes|node_memory_MemAvailable_bytes|node_memory_Cached_bytes|node_memory_Buffers_bytes|node_memory_MemTotal_bytes|node_network_receive_drop_total|node_network_transmit_drop_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_filesystem_avail_bytes|node_filesystem_size_bytes) + sourceLabels: + - __name__ + prometheus: + additionalServiceMonitors: [] + prometheusSpec: + ## Prometheus default scrape interval, default from upstream Kube Prometheus Stack Helm chart + ## NOTE changing the scrape interval to be >1m can result in metrics + ## from recording rules to be missing and empty panels in Sumo Logic Kubernetes apps. + scrapeInterval: 30s + ## Prometheus data retention period + retention: 1d + ## Add custom pod annotations and labels to prometheus pods + podMetadata: + labels: {} + annotations: {} + nodeSelector: {} + ## Define resources requests and limits for single Pods. + resources: + limits: + cpu: 2000m + memory: 8Gi + requests: + cpu: 500m + memory: 1Gi + initContainers: + - name: init-config-reloader + env: + - name: METADATA_METRICS_SVC + valueFrom: + configMapKeyRef: + name: sumologic-configmap + key: metadataMetrics + - name: NAMESPACE + valueFrom: + configMapKeyRef: + name: sumologic-configmap + key: metadataNamespace + containers: + - name: config-reloader + env: + - name: METADATA_METRICS_SVC + valueFrom: + configMapKeyRef: + name: sumologic-configmap + key: metadataMetrics + - name: NAMESPACE + valueFrom: + configMapKeyRef: + name: sumologic-configmap + key: metadataNamespace + ## Enable WAL compression to reduce Prometheus memory consumption + walCompression: true + ## prometheus scrape config + ## rel: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config + ## scraping metrics basing on annotations: + ## - prometheus.io/scrape: true - to scrape metrics from the pod + ## - prometheus.io/path: /metrics - path which the metric should be scrape from + ## - prometheus.io/port: 9113 - port which the metric should be scrape from + ## rel: https://github.com/prometheus-operator/kube-prometheus/pull/16#issuecomment-424318647 + additionalScrapeConfigs: + - job_name: pod-annotations + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_scrape + action: keep + regex: true + - source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_path + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: + - __address__ + - __meta_kubernetes_pod_annotation_prometheus_io_port + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - source_labels: + - __metrics_path__ + separator: ; + regex: (.*) + target_label: endpoint + replacement: $1 + action: replace + - source_labels: + - __meta_kubernetes_namespace + action: replace + target_label: namespace + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: + - __meta_kubernetes_pod_name + separator: ; + regex: (.*) + target_label: pod + replacement: $1 + action: replace + remoteWrite: + ## infrastructure metrics + - remoteTimeout: 5s + url: http://$(METADATA_METRICS_SVC).$(NAMESPACE):9888/prometheus.metrics +## Configure otelcol-instrumentation - Sumo OTel Distro Collector +## ref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md +otelcolInstrumentation: + enabled: true + sourceMetadata: + ## Set the _sourceName metadata field in Sumo Logic. + sourceName: '%{k8s.namespace.name}.%{k8s.pod.pod_name}.%{k8s.container.name}' + ## Set the _sourceCategory metadata field in Sumo Logic. + sourceCategory: '%{k8s.namespace.name}/%{k8s.pod.pod_name}' + ## Set the prefix, for _sourceCategory metadata. + sourceCategoryPrefix: kubernetes/ + ## Used to replace - with another character. + sourceCategoryReplaceDash: / + ## A regular expression for containers. + ## Matching containers will be excluded from Sumo. The logs will still be sent to otelcol. + excludeContainerRegex: '' + ## A regular expression for hosts. + ## Matching hosts will be excluded from Sumo. The logs will still be sent to otelcol. + excludeHostRegex: '' + ## A regular expression for namespaces. + ## Matching namespaces will be excluded from Sumo. The logs will still be sent to otelcol. + excludeNamespaceRegex: '' + ## A regular expression for pods. + ## Matching pods will be excluded from Sumo. The logs will still be sent to otelcol. + excludePodRegex: '' + ## Option to turn autoscaling on for otelcol and specify params for HPA. + ## Autoscaling needs metrics-server to access cpu metrics. + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 10 + targetCPUUtilizationPercentage: 100 + # targetMemoryUtilizationPercentage: 50 + statefulset: + nodeSelector: {} + tolerations: [] + topologySpreadConstraints: [] + affinity: {} + ## Acceptable values for podAntiAffinity: + ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) + ## hard: specifies rules that must be met for a pod to be scheduled onto a node + podAntiAffinity: soft + replicaCount: 3 + resources: + limits: + memory: 4Gi + cpu: 2000m + requests: + memory: 768Mi + cpu: 500m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: '' + ## Add custom labels only to metrics sts pods + podLabels: {} + ## Add custom annotations only to metrics sts pods + podAnnotations: {} + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + ## Set securityContext for containers running in pods in otelcol-instrumentation statefulset. + containers: + otelcol: + securityContext: {} + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 3 + failureThreshold: 60 + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: [] + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # name: secret_name + # key: secret_key + # extraVolumes: [] + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: [] + # - name: es-certs + # mountPath: /certs + # readOnly: true + ## To enable collecting all logs, set to false + logLevelFilter: false + config: + receivers: + jaeger: + protocols: + thrift_compact: + endpoint: 0.0.0.0:6831 + thrift_binary: + endpoint: 0.0.0.0:6832 + grpc: + endpoint: 0.0.0.0:14250 + thrift_http: + endpoint: 0.0.0.0:14268 + opencensus: + endpoint: 0.0.0.0:55678 + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + otlp/deprecated: + protocols: + http: + endpoint: 0.0.0.0:55681 + zipkin: + endpoint: 0.0.0.0:9411 + processors: + ## Source processor adds Sumo Logic related metadata + source: + annotation_prefix: k8s.pod.annotation. + collector: '{{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | + quote }}' + exclude: + k8s.container.name: '{{ .Values.otelcolInstrumentation.sourceMetadata.excludeContainerRegex | quote + }}' + k8s.host.name: '{{ .Values.otelcolInstrumentation.sourceMetadata.excludeHostRegex | quote }}' + k8s.namespace.name: '{{ .Values.otelcolInstrumentation.sourceMetadata.excludeNamespaceRegex | quote + }}' + k8s.pod.name: '{{ .Values.otelcolInstrumentation.sourceMetadata.excludePodRegex| quote }}' + pod_key: k8s.pod.name + pod_name_key: k8s.pod.pod_name + pod_template_hash_key: k8s.pod.label.pod-template-hash + source_category: '{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategory | quote }}' + source_category_prefix: '{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryPrefix | quote + }}' + source_category_replace_dash: '{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryReplaceDash | + quote }}' + source_host: '%{k8s.pod.hostname}' + source_name: '{{ .Values.otelcolInstrumentation.sourceMetadata.sourceName | quote }}' + ## Resource processor sets the associted cluster attribute + resource: + attributes: + - key: k8s.cluster.name + value: '{{ include "sumologic.clusterNameReplaceSpaceWithDash" . }}' + action: upsert + resourcedetection: + detectors: + - system + override: false + timeout: 10s + ## Tags spans with K8S metadata, basing on the context IP + k8s_tagger: + ## When true, only IP is assigned and passed (so it could be tagged on another collector) + passthrough: false + ## When true, additional fields, such as serviceName are being also extracted + owner_lookup_enabled: true + ## Extracted fields and assigned names + extract: + ## extract the following well-known metadata fields + metadata: + - containerId + - containerName + - daemonSetName + - deploymentName + - hostName + - namespace + - nodeName + - podId + - podName + - replicaSetName + - serviceName + - statefulSetName + annotations: + - tag_name: k8s.pod.annotation.%s + key: '*' + namespace_labels: + - tag_name: k8s.namespace.label.%s + key: '*' + labels: + - tag_name: k8s.pod.label.%s + key: '*' + ## The memory_limiter processor is used to prevent out of memory situations on the collector. + memory_limiter: + ## check_interval is the time between measurements of memory usage for the + ## purposes of avoiding going over the limits. Defaults to zero, so no + ## checks will be performed. Values below 1 second are not recommended since + ## it can result in unnecessary CPU consumption. + check_interval: 5s + ## Maximum amount of memory, in %, targeted to be allocated by the process heap. + ## Note that typically the total memory usage of process will be about 50MiB higher + ## than this value. + limit_percentage: 75 + spike_limit_percentage: 20 + ## The batch processor accepts spans and places them into batches grouped by node and resource + batch: + ## Number of spans after which a batch will be sent regardless of time + send_batch_size: 256 + ## Never more than this many spans are being sent in a batch + send_batch_max_size: 512 + ## Time duration after which a batch will be sent regardless of size + timeout: 5s + extensions: + health_check: {} + memory_ballast: + ## Memory Ballast size should be max 1/3 to 1/2 of memory. + size_mib: 250 + pprof: {} + exporters: + sumologic/metrics: + endpoint: ${SUMO_ENDPOINT_DEFAULT_METRICS_SOURCE} + ## Compression encoding format, either empty string (""), gzip or deflate (default gzip). + ## Empty string means no compression + compress_encoding: gzip + ## Max HTTP request body size in bytes before compression (if applied). By default 1_048_576 (1MB) is used. + max_request_body_size: 1048576 + ## Format to use when sending logs to Sumo. (default json) (possible values: json, text) + log_format: text + ## Format of the metrics to be sent (default is prometheus) (possible values: carbon2, prometheus) + ## carbon2 and graphite are going to be supported soon. + metric_format: prometheus + ## Timeout for every attempt to send data to Sumo Logic backend. Maximum connection timeout is 55s. + timeout: 5s + retry_on_failure: + enabled: true + ## Time to wait after the first failure before retrying + initial_interval: 5s + ## Upper bound on backoff + max_interval: 30s + ## Maximum amount of time spent trying to send a batch + max_elapsed_time: 120s + sending_queue: + enabled: false + ## Number of consumers that dequeue batches + num_consumers: 10 + ## Maximum number of batches kept in memory before data + ## User should calculate this as num_seconds * requests_per_second where: + ## num_seconds is the number of seconds to buffer in case of a backend outage + ## requests_per_second is the average number of requests per seconds. + queue_size: 5000 + otlphttp/traces: + endpoint: http://{{ include "otelcolinstrumentation.exporter.endpoint" . }}:4318 + service: + extensions: + - health_check + - memory_ballast + - pprof + pipelines: + traces: + receivers: + - jaeger + - opencensus + - otlp + - otlp/deprecated + - zipkin + processors: + - memory_limiter + - k8s_tagger + - source + - resource + - batch + exporters: + - otlphttp/traces + metrics: + receivers: + - otlp + - otlp/deprecated + processors: + - memory_limiter + - k8s_tagger + - source + - resource + - batch + exporters: + - sumologic/metrics +## Configure traces-sampler +## ref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md +tracesSampler: + deployment: + nodeSelector: {} + tolerations: [] + replicas: 1 + resources: + limits: + memory: 4Gi + cpu: 2000m + requests: + memory: 384Mi + cpu: 200m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: '' + ## Add custom labels only to traces-sampler deployment. + podLabels: {} + ## Add custom annotations only to traces-sampler deployment. + podAnnotations: {} + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: [] + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # name: secret_name + # key: secret_key + # extraVolumes: [] + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: [] + # - name: es-certs + # mountPath: /certs + # readOnly: true + ## To enable collecting all logs, set to false + # logLevelFilter: false + ## Collector configuration + config: + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + processors: + ## The memory_limiter processor is used to prevent out of memory situations on the collector. + memory_limiter: + ## check_interval is the time between measurements of memory usage for the + ## purposes of avoiding going over the limits. Defaults to zero, so no + ## checks will be performed. Values below 1 second are not recommended since + ## it can result in unnecessary CPU consumption. + check_interval: 5s + ## Maximum amount of memory, in %, targeted to be allocated by the process heap. + ## Note that typically the total memory usage of process will be about 50MiB higher + ## than this value. + limit_percentage: 75 + ## Maximum spike expected between the measurements of memory usage, in %. + spike_limit_percentage: 20 + ## Smart cascading filtering rules with preset limits. + ## Please see https://github.com/SumoLogic/sumologic-otel-collector/tree/v0.86.0-sumo-1/pkg/processor/cascadingfilterprocessor + ## for details. + cascading_filter: + ## Max number of traces for which decisions are kept in memory + num_traces: 200000 + ## The batch processor accepts spans and places them into batches grouped by node and resource + batch: + ## Number of spans after which a batch will be sent regardless of time + send_batch_size: 256 + ## Never more than this many spans are being sent in a batch + send_batch_max_size: 512 + ## Time duration after which a batch will be sent regardless of size + timeout: 5s + extensions: + health_check: {} + memory_ballast: + ## Memory Ballast size should be max 1/3 to 1/2 of memory. + size_mib: 683 + pprof: {} + exporters: + ## Following generates verbose logs with span content, useful to verify what + ## metadata is being tagged. To enable, uncomment and add "logging" to exporters below. + ## There are two levels that could be used: `debug` and `info` with the former + ## being much more verbose and including (sampled) spans content + # logging: + # loglevel: debug + otlphttp: + traces_endpoint: ${SUMO_ENDPOINT_DEFAULT_OTLP_TRACES_SOURCE}/v1/traces + compression: gzip + service: + extensions: + - health_check + - memory_ballast + - pprof + pipelines: + traces: + receivers: + - otlp + processors: + - memory_limiter + - cascading_filter + - batch + exporters: + - otlphttp +metadata: + ## Configure image for Opentelemetry Collector (for logs and metrics) + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + securityContext: + ## The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set. + ## The default is 0 (root), and containers don't have write permissions for volumes in that case. + fsGroup: 999 + ## Add custom labels to all otelcol sts pods(logs and metrics) + podLabels: {} + ## Add custom annotations to all otelcol sts pods(logs and metrics) + podAnnotations: {} + ## Add custom labels to all otelcol svc (logs and metrics) + serviceLabels: {} + ## Configure persistence for Opentelemetry Collector + persistence: + enabled: true + # storageClass: '' + accessMode: ReadWriteOnce + size: 10Gi + ## Add custom labels to all otelcol statefulset PVC (logs and metrics) + pvcLabels: {} + ## Configure metrics pipeline. + ## This section affects only otelcol provider. + metrics: + enabled: true + logLevel: info + config: + ## Directly alter the OT configuration. The value of this key should be a dictionary, that will + ## be directly merged with the generated configuration, overriding existing values. + ## For example: + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 + ## will change the batch size of the pipeline. + ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest + ## of this chart. It involves implementation details that may change even in minor versions. + ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. + merge: {} + ## Completely override existing config and replace it with the contents of this value. + ## The value of this key should be a dictionary, that will replace the normal configuration. + ## This is an advanced feature, use with caution, and review the generated configuration first. + override: {} + ## List of additional endpoints to be handled by Metrics Metadata Pods + additionalEndpoints: [] + statefulset: + nodeSelector: {} + tolerations: [] + topologySpreadConstraints: [] + affinity: {} + ## Acceptable values for podAntiAffinity: + ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) + ## hard: specifies rules that must be met for a pod to be scheduled onto a node + podAntiAffinity: soft + replicaCount: 3 + resources: + limits: + memory: 1Gi + cpu: 1000m + requests: + memory: 768Mi + cpu: 500m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: '' + ## Add custom labels only to metrics sts pods + podLabels: {} + ## Add custom annotations only to metrics sts pods + podAnnotations: {} + ## Set securityContext for containers running in pods in metrics statefulset. + containers: + otelcol: + securityContext: {} + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 3 + failureThreshold: 60 + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: [] + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # name: secret_name + # key: secret_key + # extraVolumes: [] + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: [] + # - name: es-certs + # mountPath: /certs + # readOnly: true + ## Option to turn autoscaling on for metrics and specify params for HPA. + ## Autoscaling needs metrics-server to access cpu metrics. + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 50 + ## Option to specify PodDisrutionBudgets + ## You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget + podDisruptionBudget: + minAvailable: 2 + ## To use maxUnavailable, set minAvailable to null and uncomment the below: + # maxUnavailable: 1 + ## Configure logs pipeline. + ## This section affects only otelcol provider. + logs: + enabled: true + logLevel: info + config: + ## Directly alter the OT configuration. The value of this key should be a dictionary, that will + ## be directly merged with the generated configuration, overriding existing values. + ## For example: + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 + ## will change the batch size of the pipeline. + ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest + ## of this chart. It involves implementation details that may change even in minor versions. + ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. + merge: {} + ## Completely override existing config and replace it with the contents of this value. + ## The value of this key should be a dictionary, that will replace the normal configuration. + ## This is an advanced feature, use with caution, and review the generated configuration first. + override: {} + statefulset: + nodeSelector: {} + tolerations: [] + topologySpreadConstraints: [] + affinity: {} + ## Acceptable values for podAntiAffinity: + ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) + ## hard: specifies rules that must be met for a pod to be scheduled onto a node + podAntiAffinity: soft + replicaCount: 3 + resources: + limits: + memory: 1Gi + cpu: 1000m + requests: + memory: 768Mi + cpu: 500m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: '' + ## Add custom labels only to logs sts pods + podLabels: {} + ## Add custom annotations only to logs sts pods + podAnnotations: {} + ## Set securityContext for containers running in pods in logs statefulset. + containers: + otelcol: + securityContext: {} + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 3 + failureThreshold: 60 + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: [] + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # name: secret_name + # key: secret_key + # extraVolumes: [] + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: [] + # - name: es-certs + # mountPath: /certs + # readOnly: true + # extraPorts: [] + # - name: otlphttp2 + # containerPort: 4319 + # protocol: TCP + # extraArgs: [] + ## Option to turn autoscaling on for logs and specify params for HPA. + ## Autoscaling needs metrics-server to access cpu metrics. + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 50 + ## Option to specify PodDisrutionBudgets + ## You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget + podDisruptionBudget: + minAvailable: 2 + ## To use maxUnavailable, set minAvailable to null and uncomment the below: + # maxUnavailable: 1 +## Configure traces-gateway +## ref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md +tracesGateway: + enabled: true + ## Option to turn autoscaling on for otelcol and specify params for HPA. + ## Autoscaling needs metrics-server to access cpu metrics. + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 10 + targetCPUUtilizationPercentage: 100 + # targetMemoryUtilizationPercentage: 50 + deployment: + replicas: 1 + nodeSelector: {} + tolerations: [] + resources: + limits: + memory: 2Gi + cpu: 1000m + requests: + memory: 196Mi + cpu: 50m + ## Add custom labels only to traces-gateway deployment. + podLabels: {} + ## Add custom annotations only to traces-gateway deployment. + podAnnotations: {} + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + livenessProbe: + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 60 + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: [] + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # name: secret_name + # key: secret_key + # extraVolumes: [] + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: [] + # - name: es-certs + # mountPath: /certs + # readOnly: true + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: '' + ## To enable collecting all logs, set to false + logLevelFilter: false + config: + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + processors: + ## The memory_limiter processor is used to prevent out of memory situations on the collector. + memory_limiter: + ## check_interval is the time between measurements of memory usage for the + ## purposes of avoiding going over the limits. Defaults to zero, so no + ## checks will be performed. Values below 1 second are not recommended since + ## it can result in unnecessary CPU consumption. + check_interval: 5s + ## Maximum amount of memory, in %, targeted to be allocated by the process heap. + ## Note that typically the total memory usage of process will be about 50MiB higher + ## than this value. + limit_percentage: 75 + ## Maximum spike expected between the measurements of memory usage, in %. + spike_limit_percentage: 20 + ## The batch processor accepts spans and places them into batches grouped by node and resource + batch: + ## Number of spans after which a batch will be sent regardless of time + send_batch_size: 256 + ## Maximum number of spans sent at once + send_batch_max_size: 512 + ## Time duration after which a batch will be sent regardless of size + timeout: 5s + extensions: + health_check: {} + memory_ballast: + ## Memory Ballast size should be max 1/3 to 1/2 of memory. + size_mib: 250 + pprof: {} + exporters: + loadbalancing: + protocol: + otlp: + timeout: 10s + tls: + insecure: true + resolver: + dns: + hostname: '{{ include "tracesgateway.exporter.loadbalancing.endpoint" . }}' + port: 4317 + service: + extensions: + - health_check + - memory_ballast + - pprof + pipelines: + traces: + receivers: + - otlp + processors: + - memory_limiter + - batch + exporters: + - loadbalancing +## Configuration of the OpenTelemetry Collector that collects Kubernetes events. +## See https://github.com/SumoLogic/sumologic-kubernetes-collection/deploy/docs/collecting-kubernetes-events.md. +otelevents: + ## Configure image for Opentelemetry Collector + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + logLevel: info + ## Customize the Opentelemetry Collector configuration beyond the exposed options + config: + ## Directly alter the OT configuration. The value of this key should be a dictionary, that will + ## be directly merged with the generated configuration, overriding existing values. + ## For example: + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 + ## will change the batch size of the pipeline. + ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest + ## of this chart. It involves implementation details that may change even in minor versions. + ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. + merge: {} + ## Completely override existing config and replace it with the contents of this value. + ## The value of this key should be a dictionary, that will replace the normal configuration. + ## This is an advanced feature, use with caution, and review the generated configuration first. + override: {} + statefulset: + nodeSelector: {} + tolerations: [] + topologySpreadConstraints: [] + affinity: {} + ## Acceptable values for podAntiAffinity: + ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) + ## hard: specifies rules that must be met for a pod to be scheduled onto a node + podAntiAffinity: soft + resources: + limits: + memory: 2Gi + cpu: 2000m + requests: + memory: 500Mi + cpu: 200m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: '' + ## Add custom labels only to events sts pods + podLabels: {} + ## Add custom annotations only to events sts pods + podAnnotations: {} + ## The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set. + ## The default is 0 (root), and containers don't have write permissions for volumes in that case. + securityContext: + fsGroup: 999 + ## Set securityContext for containers running in pods in events statefulset. + containers: + otelcol: + securityContext: {} + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 3 + failureThreshold: 60 + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: [] + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # name: secret_name + # key: secret_key + # extraVolumes: [] + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: [] + # - name: es-certs + # mountPath: /certs + # readOnly: true +## Configure cloudwatch collection with Otelcol +otelcloudwatch: + statefulset: + nodeSelector: {} + tolerations: [] + topologySpreadConstraints: [] + affinity: {} + ## Acceptable values for podAntiAffinity: + ## soft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default) + ## hard: specifies rules that must be met for a pod to be scheduled onto a node + podAntiAffinity: soft + replicaCount: 1 + resources: + limits: + memory: 1Gi + cpu: 1000m + requests: + memory: 768Mi + cpu: 500m + ## Option to define priorityClassName to assign a priority class to pods. + priorityClassName: '' + ## Add custom labels only to logs otel sts pods + podLabels: {} + ## Add custom annotations only to logs otel sts pods + podAnnotations: {} + ## Set securityContext for containers running in pods in otelcol-instrumentation statefulset. + containers: + otelcol: + securityContext: {} + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + startupProbe: + periodSeconds: 3 + failureThreshold: 60 +## Configure log collection with Otelcol +otellogs: + ## Metrics from Collector + metrics: + enabled: true + ## Add custom labels to otelcol svc + serviceLabels: {} + ## Configure image for Opentelemetry Collector + image: + # repository: '' + # tag: '' + pullPolicy: IfNotPresent + logLevel: info + config: + ## Directly alter the OT configuration. The value of this key should be a dictionary, that will + ## be directly merged with the generated configuration, overriding existing values. + ## For example: + ## override: + ## processors: + ## batch: + ## send_batch_size: 512 + ## will change the batch size of the pipeline. + ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest + ## of this chart. It involves implementation details that may change even in minor versions. + ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. + merge: {} + ## Completely override existing config and replace it with the contents of this value. + ## The value of this key should be a dictionary, that will replace the normal configuration. + ## This is an advanced feature, use with caution, and review the generated configuration first. + override: {} + ## Set securityContext for containers running in pods in log collector daemonset + daemonset: + securityContext: + ## In order to reliably read logs from mounted node logging paths, we need to run as root + fsGroup: 0 + runAsUser: 0 + runAsGroup: 0 + ## Add custom labels to the otelcol daemonset + labels: {} + ## Add custom annotations to the otelcol daemonset + annotations: {} + ## Add custom labels to all otelcol daemonset pods + podLabels: {} + ## Add custom annotations to all otelcol daemonset pods + podAnnotations: {} + resources: + limits: + memory: 1Gi + cpu: 1000m + requests: + memory: 32Mi + cpu: 100m + ## Option to define priorityClassName to assign a priority class to pods. + ## If not set then temaplates/priorityclass.yaml is used. + priorityClassName: '' + ## Set securityContext for containers running in pods in log collector daemonset + containers: + otelcol: + securityContext: + capabilities: + drop: + - ALL + ## Set securityContext and image for initContainers running in pods in log collector daemonset + initContainers: + changeowner: + image: + repository: public.ecr.aws/docker/library/busybox + tag: 1.36.0 + pullPolicy: IfNotPresent + securityContext: + capabilities: + drop: + - ALL + add: + - CAP_CHOWN + nodeSelector: {} + tolerations: [] + affinity: {} + ## Extra Environment Values - allows yaml definitions + # extraEnvVars: [] + # - name: VALUE_FROM_SECRET + # valueFrom: + # secretKeyRef: + # name: secret_name + # key: secret_key + # extraVolumes: [] + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # extraVolumeMounts: [] + # - name: es-certs + # mountPath: /certs + # readOnly: true + ## additionalDaemonSets allows to set daemonsets with affinity, nodeSelector and resources + ## different than the main DaemonSet + ## Be careful and set nodeAffinity for the main DaemonSet, + ## as we do not support multiple pods of otellogs on the same node + ## e.g: + ## additionalDaemonSets: + ## linux: + ## nodeSelector: + ## kubernetes.io/os: linux + ## resources: + ## limits: + ## memory: 1Gi + ## cpu: 6 + ## requests: + ## memory: 32Mi + ## cpu: 2 + ## daemonset: + ## affinity: + ## nodeAffinity: + ## requiredDuringSchedulingIgnoredDuringExecution: + ## nodeSelectorTerms: + ## - matchExpressions: + ## - key: kubernetes.io/os + ## operator: NotIn + ## values: + ## - linux + additionalDaemonSets: {} +## Configure telegraf-operator +## ref: https://github.com/influxdata/helm-charts/blob/master/charts/telegraf-operator/values.yaml +telegraf-operator: + enabled: false + ## Put here the new name if you want to override the full name used for Telegraf Operator components. + # fullnameOverride: '' + image: + sidecarImage: public.ecr.aws/sumologic/telegraf:1.21.2 + replicaCount: 1 + classes: + secretName: telegraf-operator-classes + default: sumologic-prometheus + data: + sumologic-prometheus: | + [[outputs.prometheus_client]] + ## Configuration details: + ## https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client#configuration + listen = ":9273" + metric_version = 2 + ## Disable the default collectors + collectors_exclude = ["gocollector", "process"] + ## Telegraf operator adds the internal plugin by default, and the Helm Chart doesn't let us disable it + ## Instead, drop the metrics at the output + namedrop = ["internal*"] + # imagePullSecrets: [] +## Configure Falco +## Please note that Falco is embedded in this Helm Chart for user convenience only - Sumo Logic does not provide production support for it +## This is an experimental configuration and shouldn't be used in production environment +## https://github.com/falcosecurity/charts/tree/master/falco +falco: + enabled: false + ## Put here the new name if you want to override the full name used for Falco components. + # fullnameOverride: '' + # imagePullSecrets: [] + image: + registry: public.ecr.aws + # repository: falcosecurity/falco-no-driver + ## Add kernel-devel package through MachineConfig, required to enable building of missing falco modules (only for OpenShift) + addKernelDevel: true + extra: + ## Add initContainer to wait until kernel-devel is installed on host + initContainers: + - name: init-falco + image: public.ecr.aws/docker/library/busybox:1.36.0 + command: + - sh + - -c + - | + while [ -f /host/etc/redhat-release ] && [ -z "$(ls /host/usr/src/kernels)" ] ; do + echo "waiting for kernel headers to be installed" + sleep 3 + done + volumeMounts: + - mountPath: /host/usr + name: usr-fs + readOnly: true + - mountPath: /host/etc + name: etc-fs + readOnly: true + driver: + ## Set to epbf to enable eBPF support for Falco instead of falco-probe kernel module. + ## https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/troubleshoot-collection.md#falco-and-google-kubernetes-engine-gke + kind: module + loader: + initContainer: + image: + registry: public.ecr.aws + # repository: falcosecurity/falco-driver-loader + falco: + load_plugins: + - json + - k8saudit + json_output: true + ## The location of the rules file(s). This can contain one or more paths to + ## separate rules files. + ## Explicitly add missing /etc/falco/rules.available/application_rules.yaml + ## before https://github.com/falcosecurity/charts/issues/230 gets resolved. + rules_file: + - /etc/falco/falco_rules.yaml + - /etc/falco/falco_rules.local.yaml + - /etc/falco/k8s_audit_rules.yaml + - /etc/falco/rules.d + - /etc/falco/rules.available/application_rules.yaml + falcoctl: + artifact: + follow: + enabled: false + install: + enabled: false + customRules: + ## Mark the following as known k8s api callers: + ## * prometheus + ## * prometheus operator + ## * telegraf operator + ## * grafana sidecar + rules_user_known_k8s_api_callers.yaml: |- + - macro: user_known_contact_k8s_api_server_activities + condition: > + (container.image.repository = "quay.io/prometheus/prometheus") or + (container.image.repository = "quay.io/coreos/prometheus-operator") or + (container.image.repository = "quay.io/influxdb/telegraf-operator") or + (container.image.repository = "kiwigrid/k8s-sidecar") + rules_user_sensitive_mount_containers.yaml: |- + - macro: user_sensitive_mount_containers + condition: > + (container.image.repository = "falcosecurity/falco") or + (container.image.repository = "quay.io/prometheus/node-exporter") + ## NOTE: kube-proxy not exact matching because of regional ecr e.g. + ## 602401143452.dkr.ecr.us-west-1.amazonaws.com/eks/kube-proxy + rules_user_privileged_containers.yaml: |- + - macro: user_privileged_containers + condition: > + (container.image.repository endswith ".amazonaws.com/eks/kube-proxy") +## Configure Tailing Sidecar Operator +## ref: https://github.com/SumoLogic/tailing-sidecar/blob/main/helm/tailing-sidecar-operator/values.yaml +tailing-sidecar-operator: + enabled: false + ## Put here the new name if you want to override the full name used for tailing-sidecar-operator components. + # fullnameOverride: '' + ## creation of Security Context Constraints in Openshift + scc: + create: false +## Configure OpenTelemetry Operator - Instrumentation +## ref: https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-operator +opentelemetry-operator: + enabled: true + ## Specific for Sumo Logic chart - Instrumentation resource creation + instrumentationJobImage: + image: + repository: sumologic/kubernetes-tools + tag: 2.14.0 + createDefaultInstrumentation: false + instrumentationNamespaces: '' + ## Current instrumentation doesn't support customization + ## for nodejs. Traces are always enabled. + ## nodejs: + ## traces: + ## enabled: true + instrumentation: + dotnet: + traces: + enabled: true + metrics: + enabled: true + java: + traces: + enabled: true + metrics: + enabled: true + python: + traces: + enabled: true + metrics: + enabled: true + ## Specific for OpenTelemetry Operator chart values + admissionWebhooks: + failurePolicy: Fail + enabled: true + ## skip admission webhook on our own OpenTelemetryCollector object to avoid having to wait for operator to start + objectSelector: + matchExpressions: + - key: sumologic.com/component + operator: NotIn + values: + - metrics + certManager: + enabled: false + issuerRef: {} + autoGenerateCert: true + manager: + collectorImage: + repository: public.ecr.aws/sumologic/sumologic-otel-collector + tag: 0.86.0-sumo-1 + env: {} + resources: + limits: + cpu: 250m + memory: 512Mi + requests: + cpu: 150m + memory: 256Mi +## pvcCleaner deletes unused PVCs +pvcCleaner: + metrics: + enabled: false + logs: + enabled: false + job: + image: + repository: public.ecr.aws/sumologic/kubernetes-tools-kubectl + tag: 2.20.0 + pullPolicy: IfNotPresent + resources: + limits: + memory: 256Mi + cpu: 2000m + requests: + memory: 64Mi + cpu: 100m + nodeSelector: {} + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + podLabels: {} + ## Add custom annotations + podAnnotations: {} + ## Schedule for cronJobs + schedule: '*/15 * * * *' + ## securityContext for pvcCleaner pods + securityContext: + runAsUser: 1000 diff --git a/deploy/helm/sumologic/values.schema.json b/deploy/helm/sumologic/values.schema.json new file mode 100644 index 0000000000..5871a4a45f --- /dev/null +++ b/deploy/helm/sumologic/values.schema.json @@ -0,0 +1,7078 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "comment": "Sumo Logic Kubernetes Collection configuration file\nAll the comments start with two or more # characters", + "properties": { + "nameOverride": { + "type": "string", + "description": "Used to override the Chart name.", + "default": "" + }, + "fullnameOverride": { + "type": "string", + "description": "Used to override the chart's full name. Names longer than 22 characters will be truncated.", + "default": "" + }, + "namespaceOverride": { + "type": "string", + "description": "Used to override the chart's default target namepace.", + "default": "", + "comment": "Use the same namespace as namespaceOverride in 'kube-prometheus-stack.namespaceOverride' if Prometheus setup is also enabled" + }, + "sumologic": { + "type": "object", + "description": "", + "properties": { + "setupEnabled": { + "type": "boolean", + "description": "If enabled, a pre-install hook will create Collector and Sources in Sumo Logic.", + "default": true, + "comment": "If enabled, a pre-install hook will create Collector and Sources in Sumo Logic" + }, + "cleanupEnabled": { + "type": "boolean", + "description": "If enabled, a pre-delete hook will destroy Kubernetes secret and Sumo Logic Collector.", + "default": false, + "comment": "If enabled, a pre-delete hook will destroy Collector in Sumo Logic" + }, + "envFromSecret": { + "type": "string", + "description": "If enabled, accessId and accessKey will be sourced from Secret Name given. Be sure to include at least the following env variables in your secret (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY", + "default": "sumo-api-secret", + "commented": true, + "comment": "If enabled, accessId and accessKey will be sourced from Secret Name given\nBe sure to include at least the following env variables in your secret\n(1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY" + }, + "accessId": { + "type": "string", + "description": "Sumo access ID.", + "default": "", + "commented": true, + "comment": "Sumo access ID" + }, + "accessKey": { + "type": "string", + "description": "Sumo access key.", + "default": "", + "comment": "Sumo access key", + "commented": true + }, + "endpoint": { + "type": "string", + "description": "Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection.", + "default": "", + "comment": "Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection\nref: https://help.sumologic.com/docs/api/getting-started#sumo-logic-endpoints-by-deployment-and-firewall-security" + }, + "httpProxy": { + "type": "string", + "description": "HTTP proxy URL", + "default": "", + "comment": "proxy urls" + }, + "httpsProxy": { + "type": "string", + "description": "HTTPS proxy URL", + "default": "" + }, + "noProxy": { + "type": "string", + "description": "List of comma separated hostnames which should be excluded from the proxy", + "default": "kubernetes.default.svc", + "comment": "Exclude Kubernetes internal traffic from proxy" + }, + "collectorName": { + "type": "string", + "description": "The name of the Sumo Logic collector that will be created in the SetUp job. Defaults to `clusterName` if not specified.", + "default": "", + "commented": true, + "comment": "Collector name" + }, + "clusterName": { + "type": "string", + "description": "An identifier for the Kubernetes cluster. Whitespaces in the cluster name will be replaced with dashes.", + "default": "kubernetes", + "comment": "Cluster name: Note spaces are not allowed and will be replaced with dashes." + }, + "clusterDNSDomain": { + "type": "string", + "default": "cluster.local", + "comment": "Cluster DNS Domain\nWe use the DNS domain in internal urls to speed up DNS resolution, see https://github.com/kubernetes/kubernetes/issues/56903\nChange this if you have set a non-default DNS domain in your cluster" + }, + "cluster": { + "type": "object", + "description": "Configuration of Kubernetes for [Terraform client](https://www.terraform.io/docs/providers/kubernetes/index.html#argument-reference).", + "comment": "Configuration of Kubernetes for Terraform client\nhttps://www.terraform.io/docs/providers/kubernetes/index.html#argument-reference\nAll double quotes should be escaped here regarding Terraform syntax", + "properties": { + "host": { + "type": "string", + "description": "", + "default": "https://kubernetes.default.svc" + }, + "username": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "password": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "insecure": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "client_certificate": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "client_key": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "cluster_ca_certificate": { + "type": "string", + "description": "", + "default": "${file(\"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\")}" + }, + "config_path": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "config_context": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "config_context_auth_info": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "config_context_cluster": { + "type": "string", + "description": "", + "default": "", + "commented": true + }, + "token": { + "type": "string", + "description": "", + "default": "${file(\"/var/run/secrets/kubernetes.io/serviceaccount/token\")}" + }, + "exec": { + "type": "object", + "description": "", + "commented": true, + "properties": { + "api_version": { + "type": "string", + "description": "", + "default": "" + }, + "command": { + "type": "string", + "description": "", + "default": "" + }, + "args": { + "type": "array", + "description": "", + "default": "" + }, + "env": { + "type": "object", + "description": "", + "default": {} + } + } + } + } + }, + "autoscaling": { + "type": "object", + "description": "If you set it to false, it would set EXCLUDE_NAMESPACE= and not add the Otelcol logs and Prometheus remotestorage metrics.", + "comment": "Enable autoscaling for components that support it: logs metadata, metrics metadata, metrics collector, otelcol instrumentation, and traces gateway", + "properties": { + "enabled": { + "type": "boolean", + "default": true + } + } + }, + "collectionMonitoring": { + "type": "boolean", + "description": "If you set it to false, it would set EXCLUDE_NAMESPACE= and not add the Otelcol logs and Prometheus remotestorage metrics.", + "default": true, + "comment": "If you set it to false, it would set EXCLUDE_NAMESPACE=\nand not add the Otelcol logs and Prometheus remotestorage metrics." + }, + "pullSecrets": { + "type": "array", + "description": "Optional list of secrets that will be used for pulling images for Sumo Logic's deployments and statefulsets.", + "default": [ + { + "name": "myRegistryKeySecretName" + } + ], + "comment": "Optionally specify an array of pullSecrets.\nThey will be added to serviceaccount that is used for Sumo Logic's\ndeployments and statefulsets.\n\nSecrets must be manually created in the namespace.\nref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n", + "commented": true + }, + "podLabels": { + "type": "object", + "description": "Additional labels for the pods.", + "default": {}, + "comment": "Add custom labels to the following sumologic resources(otelcol sts, setup job, otelcol deployment)" + }, + "podAnnotations": { + "type": "object", + "description": "Additional annotations for the pods.", + "default": {}, + "comment": "Add custom annotations to the following sumologic resources(otelcol sts, setup job, otelcol deployment)" + }, + "serviceAccount": { + "type": "object", + "description": "", + "comment": "Add custom annotations to sumologic serviceAccounts", + "properties": { + "annotations": { + "type": "object", + "description": "Add custom annotations to sumologic serviceAccounts", + "default": {} + } + } + }, + "scc": { + "type": "object", + "description": "", + "comment": "creation of Security Context Constraints in Openshift", + "properties": { + "create": { + "type": "boolean", + "description": "Create OpenShift's Security Context Constraint", + "default": false + } + } + }, + "setup": { + "type": "object", + "description": "", + "properties": { + "force": { + "type": "boolean", + "description": "Force collection installation (disables k8s version verification)", + "default": true, + "commented": true, + "comment": "uncomment to force collection installation (disables k8s version verification)" + }, + "job": { + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "Image repository for Sumo Logic setup job docker container.", + "default": "public.ecr.aws/sumologic/kubernetes-setup" + }, + "tag": { + "type": "string", + "description": "Image tag for Sumo Logic setup job docker container.", + "default": "3.11.0" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for Sumo Logic docker container.", + "default": "IfNotPresent" + } + } + }, + "pullSecrets": { + "type": "array", + "description": "Optional list of secrets that will be used for pulling images for Sumo Logic's setup job.", + "commented": true, + "comment": "Optionally specify an array of pullSecrets.\nThey will be added to serviceaccount that is used for Sumo Logic's\nsetup job.\nSecrets must be manually created in the namespace.\nref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n", + "default": [ + { + "name": "myRegistryKeySecretName" + } + ] + }, + "resources": { + "type": "object", + "description": "Resource requests and limits for the setup Job.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "256Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "64Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "200m" + } + } + } + } + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for sumologic setup job. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Add tolerations for the setup Job.", + "comment": "Node tolerations for server scheduling to nodes with taints\nRef: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n", + "default": [], + "example": [ + { + "key": null, + "operator": "Exists", + "effect": "NoSchedule" + } + ] + }, + "affinity": { + "type": "object", + "description": "Add affinity and anti-affinity for the setup Job.", + "default": {}, + "comment": "Affinity and anti-affinity\nRef: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\n" + }, + "podLabels": { + "type": "object", + "description": "Additional labels for the setup Job pod.", + "default": {}, + "comment": "Add custom labels only to setup job pod" + }, + "podAnnotations": { + "type": "object", + "description": "Additional annotations for the setup Job pod.", + "default": {}, + "comment": "Add custom annotations only to setup job pod" + } + } + }, + "debug": { + "type": "boolean", + "description": "Enable debug mode (disables the automatic execution of the setup.sh script)", + "default": true, + "commented": true, + "comment": "uncomment for the debug mode (disables the automatic run of the setup.sh script)" + }, + "monitors": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "comment": "If enabled, a pre-install hook will create k8s monitors in Sumo Logic", + "description": "If enabled, a pre-install hook will create k8s monitors in Sumo Logic.", + "default": true + }, + "monitorStatus": { + "type": "string", + "description": "The installed monitors default status: enabled/disabled.", + "default": "enabled", + "comment": "The installed monitors default status: enabled/disabled" + }, + "notificationEmails": { + "type": [ + "array", + "string" + ], + "description": "A list of emails to send notifications from monitors.", + "default": [], + "comment": "A list of emails to send notifications from monitors" + } + } + }, + "dashboards": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "If enabled, a pre-install hook will install k8s dashboards in Sumo Logic.", + "default": true, + "comment": "If enabled, a pre-install hook will install k8s dashboards in Sumo Logic" + } + } + } + } + }, + "collector": { + "type": "object", + "description": "", + "properties": { + "fields": { + "type": "object", + "description": "Configuration of Sumo Logic fields. [See Sumo Logic Terraform Plugin documentation for more information](https://registry.terraform.io/providers/SumoLogic/sumologic/latest/docs/resources/collector#fields). All double quotes should be escaped here regarding Terraform syntax.", + "default": {}, + "comment": "Configuration of additional collector fields\nhttps://help.sumologic.com/docs/manage/fields/#http-source-fields" + }, + "sources": { + "type": "object", + "description": "Configuration of HTTP sources. [See docs/Terraform.md for more information](/docs/terraform.md). All double quotes should be escaped here regarding Terraform syntax.", + "comment": "Configuration of http sources\nSee docs/Terraform.md for more information\nname: source name visible in sumologic platform\nconfig-name: This is mostly for backward compatibility", + "properties": { + "metrics": { + "type": "object", + "description": "", + "properties": { + "default": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "(default-metrics)" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics" + } + } + }, + "default-otlp": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "metrics-otlp" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-otlp" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "content_type": { + "type": "string", + "description": "", + "default": "Otlp" + } + } + } + } + }, + "apiserver": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "apiserver-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-apiserver" + } + } + }, + "controller": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "kube-controller-manager-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-kube-controller-manager" + } + } + }, + "scheduler": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "kube-scheduler-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-kube-scheduler" + } + } + }, + "state": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "kube-state-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-kube-state" + } + } + }, + "kubelet": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "kubelet-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-kubelet" + } + } + }, + "node": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "node-exporter-metrics" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-metrics-node-exporter" + } + } + }, + "control-plane": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "control-plane-metrics" + } + } + } + } + }, + "logs": { + "type": "object", + "description": "", + "properties": { + "default": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "logs" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-logs" + }, + "properties": { + "type": "object", + "description": "", + "comment": "Properties can be used to extend default settings, such as processing rules, fields etc", + "properties": { + "default_date_formats": { + "type": "array", + "description": "", + "items": [ + { + "comment": "Ensures that timestamp key has precedence over timestamp auto discovery", + "default": { + "format": "epoch", + "locator": "\\\"timestamp\\\":(\\\\d+)" + } + } + ] + } + } + }, + "filters": { + "type": "array", + "description": "", + "commented": true, + "default": [ + { + "name": "Test Exclude Debug", + "filter_type": "Exclude", + "regexp": ".*DEBUG.*" + } + ] + } + } + }, + "default-otlp": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "logs-otlp" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-logs-otlp" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "content_type": { + "type": "string", + "description": "", + "default": "Otlp" + } + } + } + } + } + } + }, + "events": { + "type": "object", + "description": "", + "properties": { + "default": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "events" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-events" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "default_date_formats": { + "type": "array", + "description": "", + "items": [ + { + "comment": "Ensures that timestamp key has precedence over timestamp auto discovery", + "default": { + "format": "epoch", + "locator": "\\\"timestamp\\\":(\\\\d+)" + } + } + ] + } + } + } + } + }, + "default-otlp": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "events-otlp" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-events-otlp" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "content_type": { + "type": "string", + "description": "", + "default": "Otlp" + } + } + } + } + } + } + }, + "traces": { + "type": "object", + "description": "", + "properties": { + "default": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "traces" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-traces" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "content_type": { + "type": "string", + "description": "", + "default": "Zipkin" + } + } + } + } + }, + "default-otlp": { + "type": "object", + "description": "", + "properties": { + "name": { + "type": "string", + "description": "", + "default": "traces-otlp" + }, + "config-name": { + "type": "string", + "description": "", + "default": "endpoint-traces-otlp" + }, + "properties": { + "type": "object", + "description": "", + "properties": { + "content_type": { + "type": "string", + "description": "", + "default": "Otlp" + } + } + } + } + } + } + } + } + } + } + }, + "otelcolImage": { + "type": "object", + "description": "", + "comment": "Global configuration for OpenTelemetry Collector", + "properties": { + "repository": { + "type": "string", + "description": "Default image repository for OpenTelemetry Collector. This can be overridden for specific components.", + "default": "public.ecr.aws/sumologic/sumologic-otel-collector" + }, + "tag": { + "type": "string", + "description": "Default image tag for OpenTelemetry Collector. This can be overridden for specific components.", + "default": "0.86.0-sumo-1" + }, + "addFipsSuffix": { + "type": "boolean", + "description": "Add a `-fips` suffix to all image tags. See [docs/security-best-practices.md](/docs/security-best-practices.md) for more information.", + "default": false, + "comment": "Add a -fips suffix to all image tags. With default tags, this results in FIPS-compliant otel images.\nSee https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/fips.md for more information." + } + } + }, + "events": { + "type": "object", + "description": "", + "comment": "Configuration for collection of Kubernetes events", + "properties": { + "enabled": { + "type": "boolean", + "description": "Defines whether collection of Kubernetes events is enabled.", + "default": true + }, + "sourceName": { + "type": "string", + "description": "Source name for the Events source.", + "default": "events", + "comment": "Source name for the Events source. Default: \"events\"" + }, + "sourceCategory": { + "type": "string", + "description": "Source category for the Events source.", + "default": "kubernetes/events", + "commented": true, + "comment": "Source category for the Events source. Default: \"\" which is resolved to \"{clusterName}/events\"" + }, + "sourceCategoryReplaceDash": { + "type": "string", + "description": "Used to replace - with another character.", + "default": "/", + "comment": "Used to replace '-' with another character." + }, + "persistence": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable persistence for the event collector. Persistence lets the collector avoid reingesting events on restart and buffer them locally if unable to reach the backend.", + "default": true + }, + "size": { + "type": "string", + "description": "Size of the persistent storage volume", + "default": "10Gi" + }, + "persistentVolume": { + "type": "object", + "description": "", + "comment": "Configuration for the Persistent Volume and Persistent Volume Claim\nwhere the storage is kept", + "properties": { + "path": { + "type": "string", + "description": "Local filesystem path the persistent storage volume will be mounted at.", + "default": "/var/lib/storage/events" + }, + "accessMode": { + "type": "string", + "description": "The accessMode for the persistent storage volume", + "default": "ReadWriteOnce" + }, + "pvcLabels": { + "type": "object", + "description": "Additional PersistentVolumeClaim labels for persistent storage volumes", + "default": {}, + "comment": "Add custom labels to otelcol event statefulset PVC" + }, + "storageClass": { + "type": "string", + "description": "The storageClassName for the persistent storage volume", + "default": "", + "commented": true + } + } + } + } + }, + "sourceType": { + "type": "string", + "description": "The type of the Sumo Logic source being used for events ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/).", + "default": "otlp" + } + } + }, + "logs": { + "type": "object", + "comment": "Logs configuration\nSet the enabled flag to false for disabling logs ingestion altogether.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Set the enabled flag to false for disabling logs ingestion altogether.", + "default": true + }, + "collector": { + "type": "object", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable OpenTelemetry logs collector.", + "default": true + } + } + }, + "otelcloudwatch": { + "comment": "Experimental", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to enable CloudWatch Collection", + "default": false + }, + "roleArn": { + "type": "string", + "description": "AWS role ARN, to authenticate with CloudWatch", + "default": "" + }, + "persistence": { + "type": "object", + "comment": "Configure persistence for the cloudwatch collector", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control persistence for the CloudWatch collector", + "default": true + } + } + }, + "region": { + "type": "string", + "description": "EKS Fargate cluster region", + "default": "" + }, + "pollInterval": { + "type": "string", + "description": "CloudWatch poll interval", + "default": "1m" + }, + "logGroups": { + "comment": "A map of log group and stream prefixes\nThis is a map of log group and stream prefix, for example:\nlogGroups:\n fluent-bit:\n names: [fluent-bit]", + "type": "object", + "description": "Log Groups configuration for AWS CloudWatch receiver", + "default": {} + } + } + } + } + }, + "multiline": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable multiline detection for Kubernetes container logs.", + "default": true + }, + "first_line_regex": { + "type": "string", + "description": "Regular expression to match first line of multiline logs.", + "default": "^\\[?\\d{4}-\\d{1,2}-\\d{1,2}.\\d{2}:\\d{2}:\\d{2}" + }, + "additional": { + "type": "array", + "description": "List of additional conditions and expressions to match first line of multiline logs. See [Multiline](/docs/collecting-container-logs.md#conditional-multiline-log-parsing) for more information.", + "default": [], + "comment": "Additional configuration takes precedence over first_line_regex and are executed only for first matching condition\n\nExample:\n- first_line_regex: \"^@@@@ First Line\"\n condition: 'attributes[\"k8s.namespace.name\"] == \"foo\"'\n- first_line_regex: \"^--- First Line\"\n condition: 'attributes[\"k8s.container.name\"] matches \"^bar-.*\"'\n\nNOTE: See below link for full reference:\nhttps://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/collecting-container-logs.md#conditional-multiline-log-parsing" + } + } + }, + "container": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable collecting logs from Kubernetes containers.", + "default": true + }, + "format": { + "type": "string", + "comment": "Format to post logs into Sumo: fields, json, json_merge, or text.\nNOTE: json is an alias for fields\nNOTE: Multiline log detection works differently for `text` format. See below link for full reference:\nhttps://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/collecting-container-logs.md#text-log-format", + "description": "Format for container logs.", + "default": "fields" + }, + "keep_time_attribute": { + "comment": "When set to `true`, preserves the `time` attribute, which is a string representation of the `timestamp` attribute.", + "type": "boolean", + "description": "When set to `true`, preserves the `time` attribute, which is a string representation of the `timestamp` attribute.", + "default": false + }, + "otelcol": { + "type": "object", + "description": "", + "properties": { + "extraProcessors": { + "comment": "Extra processors for container logs. See [/docs/collecting-container-logs.md](/docs/collecting-container-logs.md) for details.", + "type": "array", + "description": "Extra processors for container logs. See [/docs/collecting-container-logs.md](/docs/collecting-container-logs.md) for details.", + "default": [] + } + } + }, + "sourceHost": { + "comment": "Set the _sourceHost metadata field in Sumo Logic.", + "type": "string", + "description": "Set the \\_sourceHost metadata field in Sumo Logic.", + "default": "" + }, + "sourceName": { + "type": "string", + "comment": "Set the _sourceName metadata field in Sumo Logic.", + "description": "Set the \\_sourceName metadata field in Sumo Logic.", + "default": "%{namespace}.%{pod}.%{container}" + }, + "sourceCategory": { + "comment": "Set the _sourceCategory metadata field in Sumo Logic.", + "type": "string", + "description": "Set the \\_sourceCategory metadata field in Sumo Logic.", + "default": "%{namespace}/%{pod_name}" + }, + "sourceCategoryPrefix": { + "comment": "Set the prefix, for _sourceCategory metadata.", + "type": "string", + "description": "Set the prefix, for \\_sourceCategory metadata.", + "default": "kubernetes/" + }, + "sourceCategoryReplaceDash": { + "comment": "Used to replace - with another character.", + "type": "string", + "description": "Used to replace - with another character.", + "default": "/" + }, + "excludeContainerRegex": { + "type": "string", + "comment": "A regular expression for containers.\nMatching containers will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for container names. Logs from matching containers will not be sent to Sumo.", + "default": "" + }, + "excludeHostRegex": { + "type": "string", + "description": "A regular expression for Kubernetes node names. Logs from pods running on matching nodes will not be sent to Sumo.", + "default": "", + "comment": "A regular expression for hosts.\nMatching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol)." + }, + "excludeNamespaceRegex": { + "type": "string", + "comment": "A regular expression for namespaces.\nMatching namespaces will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for Kubernetes namespace names. Logs from pods running in matching namespaces will not be sent to Sumo.", + "default": "" + }, + "excludePodRegex": { + "type": "string", + "comment": "A regular expression for pods.\nMatching pods will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for pod names. Logs from matching pods will not be sent to Sumo.", + "default": "" + }, + "perContainerAnnotationsEnabled": { + "type": "boolean", + "description": "Enable container-level pod annotations.", + "comment": "Defines whether container-level pod annotations are enabled.", + "default": false + }, + "perContainerAnnotationPrefixes": { + "type": "array", + "description": "Defines the list of prefixes of container-level pod annotations.", + "comment": "Defines the list of prefixes of container-level pod annotations.", + "default": [] + } + } + }, + "systemd": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable collecting systemd logs from Kubernets nodes.", + "default": true + }, + "units": { + "type": "array", + "comment": "systemd units to collect logs from", + "commented": true, + "description": "List of systemd units to collect logs from.", + "default": [ + "docker.service" + ] + }, + "otelcol": { + "type": "object", + "description": "", + "properties": { + "extraProcessors": { + "type": "array", + "comment": "Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details.", + "description": "Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details.", + "default": [] + } + } + }, + "sourceName": { + "comment": "Set the _sourceName metadata field in Sumo Logic.", + "type": "string", + "description": "Set the \\_sourceName metadata field in Sumo Logic.", + "default": "%{_sourceName}" + }, + "sourceCategory": { + "comment": "Set the _sourceCategory metadata field in Sumo Logic.", + "type": "string", + "description": "Set the \\_sourceCategory metadata field in Sumo Logic.", + "default": "system" + }, + "sourceCategoryPrefix": { + "comment": "Set the prefix, for _sourceCategory metadata.", + "type": "string", + "description": "Set the prefix, for \\_sourceCategory metadata.", + "default": "kubernetes/" + }, + "sourceCategoryReplaceDash": { + "comment": "Used to replace - with another character.", + "type": "string", + "description": "Used to replace - with another character.", + "default": "/" + }, + "excludeFacilityRegex": { + "type": "string", + "comment": "A regular expression for facility.\nMatching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludeHostRegex": { + "type": "string", + "comment": "A regular expression for hosts.\nMatching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludePriorityRegex": { + "type": "string", + "comment": "A regular expression for priority.\nMatching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludeUnitRegex": { + "type": "string", + "comment": "A regular expression for unit.\nMatching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + } + } + }, + "kubelet": { + "type": "object", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "extraProcessors": { + "type": "array", + "comment": "Extra processors for kubelet logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details.", + "description": "Extra processors for systemd logs. See [/docs/collecting-systemd-logs.md](/docs/collecting-systemd-logs.md) for details.", + "default": [] + } + } + }, + "sourceName": { + "type": "string", + "comment": "Set the _sourceName metadata field in Sumo Logic.", + "description": "Set the \\_sourceName metadata field in Sumo Logic.", + "default": "k8s_kubelet" + }, + "sourceCategory": { + "type": "string", + "comment": "Set the _sourceCategory metadata field in Sumo Logic.", + "description": "Set the \\_sourceCategory metadata field in Sumo Logic.", + "default": "kubelet" + }, + "sourceCategoryPrefix": { + "type": "string", + "comment": "Set the prefix, for _sourceCategory metadata.", + "description": "Set the prefix, for \\_sourceCategory metadata.", + "default": "kubernetes/" + }, + "sourceCategoryReplaceDash": { + "type": "string", + "comment": "Used to replace - with another character.", + "description": "Used to replace - with another character.", + "default": "/" + }, + "excludeFacilityRegex": { + "type": "string", + "comment": "A regular expression for facility.\nMatching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludeHostRegex": { + "type": "string", + "comment": "A regular expression for hosts.\nMatching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludePriorityRegex": { + "type": "string", + "comment": "A regular expression for priority.\nMatching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + }, + "excludeUnitRegex": { + "type": "string", + "comment": "A regular expression for unit.\nMatching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "description": "A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to logs metadata provider (otelcol).", + "default": "" + } + } + }, + "fields": { + "type": "array", + "comment": "Fields to be created at Sumo Logic to ensure logs are tagged with\nrelevant metadata.\nhttps://help.sumologic.com/docs/manage/fields/#manage-fields", + "description": "Fields to be created at Sumo Logic to ensure logs are tagged with relevant metadata. [Sumo Logic help](https://help.sumologic.com/docs/manage/fields/#manage-fields)", + "default": [ + "cluster", + "container", + "daemonset", + "deployment", + "host", + "namespace", + "node", + "pod", + "service", + "statefulset" + ] + }, + "additionalFields": { + "type": "array", + "comment": "Additional fields to be created in Sumo Logic.\nhttps://help.sumologic.com/docs/manage/fields/#manage-fields", + "description": "Additional Fields to be created in Sumo Logic. [Sumo Logic help](https://help.sumologic.com/docs/manage/fields/#manage-fields)", + "default": [] + }, + "sourceType": { + "type": "string", + "description": "The type of the Sumo Logic source being used for logs ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/).", + "default": "otlp" + } + } + }, + "metrics": { + "type": "object", + "description": "", + "comment": "Metrics configuration\nSet the enabled flag to false for disabling metrics ingestion altogether.", + "properties": { + "enabled": { + "type": "boolean", + "description": "Set the enabled flag to false for disabling metrics ingestion altogether.", + "default": true + }, + "collector": { + "type": "object", + "description": "", + "comment": "Otel metrics collector. Replaces Prometheus.\nTo enable, you need opentelemetry-operator enabled as well.", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable experimental otelcol metrics collector", + "default": true + }, + "scrapeInterval": { + "type": "string", + "description": "The default scrape interval for the collector.", + "comment": "Default scrape interval", + "default": "30s" + }, + "autoscaling": { + "comment": "Option to turn autoscaling on for otelcol and specify params for HPA.\nAutoscaling needs metrics-server to access cpu metrics.", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Option to turn autoscaling on for the experimental otelcol metrics and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. collector", + "default": false + }, + "minReplicas": { + "type": "integer", + "description": "Default min replicas for autoscaling. collector", + "default": 1 + }, + "maxReplicas": { + "type": "integer", + "description": "Default max replicas for autoscaling. collector", + "default": 10 + }, + "targetCPUUtilizationPercentage": { + "type": "integer", + "description": "The desired target CPU utilization for autoscaling.", + "default": 70 + }, + "targetMemoryUtilizationPercentage": { + "type": "integer", + "description": "The desired target memory utilization for autoscaling.", + "default": 70 + } + } + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for the experimental otelcol metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md).", + "default": {} + }, + "podAnnotations": { + "comment": "Add custom annotations only to merics otelcol sts pods", + "type": "object", + "description": "Additional annotations for the experimental otelcol metrics pods.", + "default": {} + }, + "podLabels": { + "comment": "Add custom labels only to metrics otelcol sts pods", + "type": "object", + "description": "Additional labels for the experimental otelcol metrics pods.", + "default": {} + }, + "priorityClassName": { + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "type": "string", + "description": "Priority class name for the experimental otelcol metrics.", + "default": "" + }, + "replicaCount": { + "type": "integer", + "description": "Replica count for the experimental otelcol metrics collector", + "default": 1 + }, + "resources": { + "type": "object", + "description": "Resource requests and limits for the experimental otelcol metrics collector", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "2Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "768Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "100m" + } + } + } + } + }, + "serviceMonitorSelector": { + "commented": true, + "comment": "Selector for ServiceMonitors used for target discovery. By default, this selects resources created by this Chart.\nSee https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr", + "type": "object", + "description": "Selector for ServiceMonitors used for target discovery. By default, we select ServiceMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr", + "default": {} + }, + "podMonitorSelector": { + "comment": "Selector for PodMonitors used for target discovery. By default, this selects resources created by this Chart.\nSee https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr", + "commented": true, + "type": "object", + "description": "Selector for PodMonitors used for target discovery. By default, we select PodMonitors created by the Chart. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr", + "default": {} + }, + "securityContext": { + "type": "object", + "description": "The securityContext configuration for the experimental otelcol metrics.", + "properties": { + "fsGroup": { + "type": "integer", + "comment": "The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set.\nThe default is 0 (root), and containers don't have write permissions for volumes in that case.", + "description": "", + "default": 999 + } + } + }, + "tolerations": { + "type": "array", + "description": "Tolerations for the experimental otelcol metrics.", + "default": [] + }, + "kubelet": { + "type": "object", + "description": "", + "comment": "Configuration for kubelet metrics", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable collection of kubelet metrics.", + "default": true + } + } + }, + "cAdvisor": { + "type": "object", + "comment": "Configuration for cAdvisor metrics", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable collection of cAdvisor metrics.", + "default": true + } + } + }, + "annotatedPods": { + "type": "object", + "comment": "Enable collection of metrics from Pods annotated with prometheus.io/* keys.\nSee https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/collecting-application-metrics.md#application-metrics-are-exposed-one-endpoint-scenario for more information.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable collection of metrics from Pods annotated with prometheus.io/\\* keys. See [docs/collecting-application-metrics.md](/docs/collecting-application-metrics.md#application-metrics-are-exposed-one-endpoint-scenario) for more information.", + "default": true + } + } + }, + "allocationStrategy": { + "commented": true, + "comment": "Allocation strategy for the scrape target allocator. Valid values are: least-weighted and consistent-hashing.\nSee: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocator", + "type": "string", + "description": "Allocation strategy for the scrape target allocator. Valid values are: least-weighted and consistent-hashing. See: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocator", + "default": "least-weighted" + } + } + } + } + }, + "enableDefaultFilters": { + "type": "boolean", + "comment": "Default metric filters for Sumo Apps", + "description": "Enable default metric filters for Sumo Apps.", + "default": false + }, + "dropHistogramBuckets": { + "comment": "By default, the Helm Chart collects some high-cardinality histogram metrics, as Sumo Apps make use of the sum and count components.\nThis setting causes the metrics collector to drop the actual histogram buckets, keeping only the sum and the count.\nThis affects the following metrics:\n- apiserver_request_duration_seconds\n- coredns_dns_request_duration_seconds\n- kubelet_runtime_operations_duration_seconds", + "type": "boolean", + "description": "Drop buckets from select high-cardinality histogram metrics, leaving only the sum and count components.", + "default": true + }, + "otelcol": { + "type": "object", + "description": "", + "properties": { + "extraProcessors": { + "comment": "Includes additional processors into pipelines.\nIt can be used for filtering metrics, renaming, changing metadata and so on.\nThis is list of objects, for example:\nextraProcessors:\n- filterprocessor:\n exclude:\n match_type: strict\n metric_names:\n - hello_world\n - hello/world", + "type": "array", + "description": "Extra processors configuration for metrics pipeline. See [/docs/collecting-application-metrics.md#metrics-modifications](/docs/collecting-application-metrics.md#metrics-modifications) for more information.", + "default": [] + } + } + }, + "remoteWriteProxy": { + "comment": "Enable a load balancing proxy for Prometheus remote writes.\nPrometheus remote write uses a single persistent HTTP connection per target,\nwhich interacts poorly with TCP load balancing with iptables that K8s Services do.\nUse a real HTTP load balancer for this instead.\nThis is an advanced feature, enable only if you're experiencing performance\nissues with metrics metadata enrichment.", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable a load balancing proxy for Prometheus remote writes. [See docs for more information.](/docs/prometheus.md#using-a-load-balancing-proxy-for-prometheus-remote-write)", + "default": false + }, + "config": { + "type": "object", + "description": "", + "properties": { + "clientBodyBufferSize": { + "comment": "Increase this if you've increased samples_per_send in Prometheus to prevent nginx\nfrom spilling proxied request bodies to disk", + "type": "string", + "description": "See the [nginx documentation](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size). Increase if you've also increased samples per send in Prometheus remote write.", + "default": "64k" + }, + "workerCountAutotune": { + "comment": "This feature autodetects how much CPU is assigned to the nginx instance and sets\nthe right amount of workers based on that. Disable to use the default of 8 workers.", + "type": "boolean", + "description": "This feature autodetects how much CPU is assigned to the nginx instance and setsthe right amount of workers based on that. Disable to use the default of 8 workers.", + "default": true + }, + "port": { + "comment": "Nginx listen port", + "type": "integer", + "description": "Port on which remote write proxy is going to be exposed", + "default": 8080 + }, + "enableAccessLogs": { + "comment": "Nginx access logs", + "type": "boolean", + "description": "Enable nginx access logs.", + "default": false + } + } + }, + "replicaCount": { + "type": "integer", + "description": "Number of replicas in the remote write proxy deployment.", + "default": 3 + }, + "image": { + "type": "object", + "description": "Nginx docker image for the remote write proxy.", + "properties": { + "repository": { + "type": "string", + "description": "", + "default": "public.ecr.aws/sumologic/nginx-unprivileged" + }, + "tag": { + "type": "string", + "description": "", + "default": "1.25.2-alpine" + }, + "pullPolicy": { + "type": "string", + "description": "", + "default": "IfNotPresent" + } + } + }, + "resources": { + "type": "object", + "description": "Resource requests and limits for the remote write proxy container.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + }, + "memory": { + "type": "string", + "description": "", + "default": "256Mi" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "", + "default": "100m" + }, + "memory": { + "type": "string", + "description": "", + "default": "128Mi" + } + } + } + } + }, + "livenessProbe": { + "type": "object", + "description": "Liveness probe settings for the remote write proxy container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 30 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "successThreshold": { + "type": "integer", + "description": "", + "default": 1 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 6 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "Readiness probe settings for the remote write proxy container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "successThreshold": { + "type": "integer", + "description": "", + "default": 1 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "securityContext": { + "type": "object", + "description": "The securityContext configuration for the remote write proxy.", + "default": {} + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for the remote write proxy deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for the remote write proxy deployment.", + "default": [] + }, + "affinity": { + "type": "object", + "description": "Affinity for the remote write proxy deployment.", + "default": {} + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for the remote write proxy deployment.", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to metrics sts pods", + "description": "Additional labels for the remote write proxy container.", + "default": {} + }, + "podAnnotations": { + "comment": "Add custom annotations only to metrics sts pods", + "type": "object", + "description": "Additional annotations for for the remote write proxy container.", + "default": {} + } + } + }, + "serviceMonitors": { + "type": "array", + "comment": "Prometheus serviceMonitors related to Sumo Logic services\nThey are applied only if kube-prometheus-stack is enabled", + "description": "Configuration of Sumo Logic Kubernetes Collection components serviceMonitors", + "default": [ + { + "name": "collection-sumologic-otelcol-logs", + "additionalLabels": { + "sumologic.com/app": "otelcol-logs" + }, + "endpoints": [ + { + "port": "otelcol-metrics" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/app": "otelcol-logs", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-otelcol-metrics", + "additionalLabels": { + "sumologic.com/app": "otelcol-metrics" + }, + "endpoints": [ + { + "port": "otelcol-metrics" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/app": "otelcol-metrics", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-metrics-collector", + "additionalLabels": { + "sumologic.com/app": "otelcol-metrics" + }, + "endpoints": [ + { + "port": "monitoring" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/app": "otelcol", + "sumologic.com/component": "metrics", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-otelcol-logs-collector", + "additionalLabels": { + "sumologic.com/app": "otelcol-logs-collector" + }, + "endpoints": [ + { + "port": "metrics" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/app": "otelcol-logs-collector", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-otelcol-events", + "additionalLabels": { + "sumologic.com/app": "otelcol-events" + }, + "endpoints": [ + { + "port": "otelcol-metrics" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/app": "otelcol-events", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-otelcol-traces", + "additionalLabels": { + "sumologic.com/app": "otelcol" + }, + "endpoints": [ + { + "port": "metrics" + } + ], + "selector": { + "matchLabels": { + "sumologic.com/component": "instrumentation", + "sumologic.com/scrape": "true" + } + } + }, + { + "name": "collection-sumologic-prometheus", + "endpoints": [ + { + "port": "http-web", + "path": "/metrics", + "metricRelabelings": [ + { + "action": "keep", + "regex": "prometheus_remote_storage_.*", + "sourceLabels": [ + "__name__" + ] + } + ] + } + ], + "selector": { + "matchLabels": { + "app": "kube-prometheus-stack-prometheus" + } + } + } + ] + }, + "sourceType": { + "type": "string", + "comment": "The type of source we send to in Sumo. The possible values are http and otlp.\nConsult the documentation for more information.", + "description": "The type of the Sumo Logic source being used for metrics ingestion. Can be `http` or `otlp`.", + "default": "otlp" + } + } + }, + "traces": { + "type": "object", + "comment": "Traces configuration\nSet the enabled flag to false to disable traces ingestion.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Set the enabled flag to true to enable tracing ingestion. _Tracing must be enabled for the account first. Please contact your Sumo representative for activation details_", + "default": true + }, + "spans_per_request": { + "type": "integer", + "comment": "How many spans per request should be send to receiver", + "description": "Maximum number of spans sent in single batch", + "default": 100 + }, + "sourceType": { + "type": "string", + "description": "The type of the Sumo Logic source being used for traces ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/traces/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/).", + "default": "otlp" + } + } + } + } + }, + "metrics-server": { + "type": "object", + "comment": "Configure metrics-server\nref: https://github.com/bitnami/charts/blob/master/bitnami/metrics-server/values.yaml", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "comment": "Set the enabled flag to true for enabling metrics-server.\nThis is required before enabling autoscaling unless you have an existing metrics-server in the cluster.", + "description": "Set the enabled flag to true for enabling metrics-server. This is required before enabling autoscaling unless you have an existing metrics-server in the cluster.", + "default": false + }, + "fullnameOverride": { + "type": "string", + "comment": "Put here the new name if you want to override the full name used for metrics-server components.", + "commented": true, + "description": "Used to override the chart's full name.", + "default": "" + }, + "apiService": { + "type": "object", + "description": "", + "properties": { + "create": { + "type": "boolean", + "description": "Specifies whether the v1beta1.metrics.k8s.io API service should be created.", + "default": true + } + } + }, + "extraArgs": { + "type": "array", + "description": "Extra arguments to pass to metrics-server on start up.", + "default": [ + "--kubelet-insecure-tls=true", + "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname" + ] + }, + "image": { + "type": "object", + "comment": "Optionally specify image options for metrics-server", + "commented": true, + "description": "", + "properties": { + "pullSecrets": { + "type": "array", + "comment": "Optionally specify an array of imagePullSecrets.\nSecrets must be manually created in the namespace.\nref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n", + "description": "Pull secrets for metrics-server images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config).", + "default": [ + "imagepullsecret" + ] + } + } + } + } + }, + "kube-prometheus-stack": { + "type": "object", + "comment": "Configure kube-prometheus-stack\nref: https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "comment": "Uncomment the flag below to not install kube-prometheus-stack helm chart\nas a dependency along with this helm chart.\nThis is needed e.g. if you want to use a different version of kube-prometheus-stack -\nsee https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/best-practices.md#using-newer-kube-prometheus-stack.\nTo disable metrics collection, set `sumologic.metrics.enabled: false` and leave this flag commented out or set it to `false`.\nDo not set this flag explicitly to `true` while at the same time setting `sumologic.metrics.enabled: false`,\nas this will make Prometheus try to write to an non-existent metrics enrichment service.", + "commented": true, + "description": "Flag to control deploying Prometheus Operator Helm sub-chart.", + "default": false + }, + "global": { + "type": "object", + "commented": true, + "description": "", + "properties": { + "imagePullSecrets": { + "type": "array", + "comment": "Reference to one or more secrets to be used when pulling images\nref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n", + "description": "Pull secrets for Kube Prometheus Stack images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config).", + "default": [ + { + "name": "image-pull-secret" + } + ] + } + } + }, + "fullnameOverride": { + "type": "string", + "comment": "Put here the new name if you want to override the full name used for Kube Prometheus Stack components.", + "commented": true, + "description": "Used to override the chart's full name.", + "default": "" + }, + "namespaceOverride": { + "type": "string", + "comment": "Put here the new namespace if you want to override the namespace used for Kube Prometheus Stack components.", + "commented": true, + "description": "Used to override the chart's default namespace.", + "default": "" + }, + "kubeTargetVersionOverride": { + "type": "string", + "comment": "Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template).\nChanging this may break Sumo Logic apps.", + "commented": true, + "description": "Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). Changing this may break Sumo Logic apps.", + "default": "" + }, + "commonLabels": { + "type": "object", + "comment": "Labels to apply to all kube-prometheus-stack resources", + "description": "Labels to apply to all Kube Prometheus Stack resources", + "default": {} + }, + "defaultRules": { + "type": "object", + "description": "", + "properties": { + "rules": { + "type": "object", + "description": "Control which default recording and alerting rules are enabled.", + "properties": { + "alertmanager": { + "type": "boolean", + "description": "", + "default": false + }, + "etcd": { + "type": "boolean", + "description": "", + "default": false + }, + "configReloaders": { + "type": "boolean", + "description": "", + "default": false + }, + "general": { + "type": "boolean", + "description": "", + "default": false + }, + "k8s": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeApiserverAvailability": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeApiserverBurnrate": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeApiserverHistogram": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeApiserverSlos": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeControllerManager": { + "type": "boolean", + "description": "", + "default": false + }, + "kubelet": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeProxy": { + "type": "boolean", + "description": "", + "default": false + }, + "kubePrometheusGeneral": { + "type": "boolean", + "description": "", + "default": false + }, + "kubePrometheusNodeRecording": { + "type": "boolean", + "description": "", + "default": false + }, + "kubernetesApps": { + "type": "boolean", + "description": "", + "default": false + }, + "kubernetesResources": { + "type": "boolean", + "description": "", + "default": false + }, + "kubernetesStorage": { + "type": "boolean", + "description": "", + "default": false + }, + "kubernetesSystem": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeSchedulerAlerting": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeSchedulerRecording": { + "type": "boolean", + "description": "", + "default": false + }, + "kubeStateMetrics": { + "type": "boolean", + "description": "", + "default": false + }, + "network": { + "type": "boolean", + "description": "", + "default": false + }, + "node": { + "type": "boolean", + "description": "", + "default": false + }, + "nodeExporterAlerting": { + "type": "boolean", + "description": "", + "default": false + }, + "nodeExporterRecording": { + "type": "boolean", + "description": "", + "default": false + }, + "prometheus": { + "type": "boolean", + "description": "", + "default": false + }, + "prometheusOperator": { + "type": "boolean", + "description": "", + "default": false + }, + "windows": { + "type": "boolean", + "description": "", + "default": false + } + } + } + } + }, + "kubeApiServer": { + "type": "object", + "comment": "NOTE changing the serviceMonitor scrape interval to be >1m can result in metrics from recording\nrules to be missing and empty panels in Sumo Logic Kubernetes apps.", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubernetes API Server metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\napiserver_request_count\napiserver_request_total\napiserver_request_duration_seconds_count\napiserver_request_duration_seconds_sum", + "description": "Kubernetes API Server MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:apiserver_request_(?:count|total)|apiserver_request_(?:duration_seconds)_(?:count|sum))", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + }, + "kubelet": { + "type": "object", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubelet metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "probes": { + "type": "boolean", + "comment": "Enable scraping /metrics/probes from kubelet's service", + "description": "Enable scraping /metrics/probes from kubelet's service", + "default": false + }, + "resource": { + "type": "boolean", + "comment": "Enable scraping /metrics/resource/v1alpha1 from kubelet's service", + "description": "Enable scraping /metrics/resource from kubelet's service", + "default": false + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\nkubelet metrics:\nkubelet_docker_operations_errors\nkubelet_docker_operations_errors_total\nkubelet_docker_operations_duration_seconds_count\nkubelet_docker_operations_duration_seconds_sum\nkubelet_runtime_operations_duration_seconds_count\nkubelet_runtime_operations_duration_seconds_sum\nkubelet_running_container_count\nkubelet_running_containers\nkubelet_running_pod_count\nkubelet_running_pods\nkubelet_docker_operations_latency_microseconds\nkubelet_docker_operations_latency_microseconds_count\nkubelet_docker_operations_latency_microseconds_sum\nkubelet_runtime_operations_latency_microseconds\nkubelet_runtime_operations_latency_microseconds_count\nkubelet_runtime_operations_latency_microseconds_sum", + "description": "Kubelet MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:kubelet_docker_operations_errors(?:|_total)|kubelet_(?:docker|runtime)_operations_duration_seconds_(?:count|sum)|kubelet_running_(?:container|pod)(?:_count|s)|kubelet_(:?docker|runtime)_operations_latency_microseconds(?:|_count|_sum))", + "sourceLabels": [ + "__name__" + ] + }, + { + "action": "labeldrop", + "regex": "id" + } + ] + }, + "cAdvisorMetricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\ncadvisor container metrics\ncontainer_cpu_usage_seconds_total\ncontainer_fs_limit_bytes\ncontainer_fs_usage_bytes\ncontainer_memory_working_set_bytes\ncontainer_cpu_cfs_throttled_seconds_total\ncadvisor aggregate container metrics\ncontainer_network_receive_bytes_total\ncontainer_network_transmit_bytes_total", + "description": "Kubelet CAdvisor MetricRelabelConfigs", + "items": [ + { + "default": { + "action": "keep", + "regex": "(?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes|container_cpu_cfs_throttled_seconds_total|container_network_receive_bytes_total|container_network_transmit_bytes_total)", + "sourceLabels": [ + "__name__" + ] + } + }, + { + "comment": "Drop container metrics with container tag set to an empty string:\nthese are the pod aggregated container metrics which can be aggregated\nin Sumo anyway. There's also some cgroup-specific time series we also\ndo not need.", + "default": { + "action": "drop", + "sourceLabels": [ + "__name__", + "container" + ], + "regex": "(?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes);$" + } + }, + { + "default": { + "action": "labelmap", + "regex": "container_name", + "replacement": "container" + } + }, + { + "default": { + "action": "drop", + "sourceLabels": [ + "container" + ], + "regex": "POD" + } + }, + { + "default": { + "action": "labeldrop", + "regex": "(id|name)" + } + } + ] + } + } + } + } + }, + "kubeControllerManager": { + "type": "object", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubernetes Controller Manager metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\ncontroller manager metrics\nhttps://kubernetes.io/docs/concepts/cluster-administration/monitoring/#kube-controller-manager-metrics\ne.g.\ncloudprovider_aws_api_request_duration_seconds_bucket\ncloudprovider_aws_api_request_duration_seconds_count\ncloudprovider_aws_api_request_duration_seconds_sum", + "description": "Kubernetes Controller Manager MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:cloudprovider_.*_api_request_duration_seconds.*)", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + }, + "coreDns": { + "type": "object", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Core DNS metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\ncoredns:\ncoredns_cache_entries\ncoredns_cache_hits_total\ncoredns_cache_misses_total\ncoredns_dns_request_duration_seconds_count\ncoredns_dns_request_duration_seconds_sum\ncoredns_dns_requests_total\ncoredns_dns_responses_total\ncoredns_forward_requests_total\nprocess_cpu_seconds_total\nprocess_open_fds\nprocess_resident_memory_bytes\nprocess_cpu_seconds_total\nprocess_open_fds\nprocess_resident_memory_bytes", + "description": "Core DNS MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:coredns_cache_(entries|(hits|misses)_total)|coredns_dns_request_duration_seconds_(count|sum)|coredns_(forward_requests|dns_requests|dns_responses)_total|process_(cpu_seconds_total|open_fds|resident_memory_bytes))", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + }, + "kubeEtcd": { + "type": "object", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubernetes Etcd metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\netcd_request_cache_get_duration_seconds_count\netcd_request_cache_get_duration_seconds_sum\netcd_request_cache_add_duration_seconds_count\netcd_request_cache_add_duration_seconds_sum\netcd_request_cache_add_latencies_summary_count\netcd_request_cache_add_latencies_summary_sum\netcd_request_cache_get_latencies_summary_count\netcd_request_cache_get_latencies_summary_sum\netcd_helper_cache_hit_count\netcd_helper_cache_hit_total\netcd_helper_cache_miss_count\netcd_helper_cache_miss_total\netcd server:\netcd_mvcc_db_total_size_in_bytes\netcd_debugging_store_expires_total\netcd_debugging_store_watchers\netcd_disk_backend_commit_duration_seconds_bucket\netcd_disk_wal_fsync_duration_seconds_bucket\netcd_grpc_proxy_cache_hits_total\netcd_grpc_proxy_cache_misses_total\netcd_network_client_grpc_received_bytes_total\netcd_network_client_grpc_sent_bytes_total\netcd_server_has_leader\netcd_server_leader_changes_seen_total\netcd_server_proposals_applied_total\netcd_server_proposals_committed_total\netcd_server_proposals_failed_total\netcd_server_proposals_pending\nprocess_cpu_seconds_total\nprocess_open_fds\nprocess_resident_memory_bytes", + "description": "Kubernetes Etcd MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:etcd_request_cache_(?:add|get)_(?:duration_seconds|latencies_summary)_(?:count|sum)|etcd_helper_cache_(?:hit|miss)_(?:count|total)|etcd_mvcc_db_total_size_in_bytes|etcd_debugging_(store_(expires_total|watchers))|etcd_disk_(backend_commit|wal_fsync)_duration_seconds_.*|etcd_grpc_proxy_cache_(hits|misses)_total|etcd_network_client_grpc_(received|sent)_bytes_total|etcd_server_(has_leader|leader_changes_seen_total)|etcd_server_proposals_(pending|(applied|committed|failed)_total)|process_(cpu_seconds_total|open_fds|resident_memory_bytes))", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + }, + "kubeScheduler": { + "type": "object", + "description": "", + "properties": { + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubernetes Scheduler metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\nscheduler_e2e_* is present for K8s <1.23\nscheduler_e2e_scheduling_duration_seconds_bucket\nscheduler_e2e_scheduling_duration_seconds_count\nscheduler_e2e_scheduling_duration_seconds_sum\nscheduler_scheduling_attempt_duration_seconds is present for K8s >=1.23\nscheduler_scheduling_attempt_duration_seconds_bucket\nscheduler_scheduling_attempt_duration_seconds_count\nscheduler_scheduling_attempt_duration_seconds_sum\nscheduler_framework_extension_point_duration_seconds_bucket\nscheduler_framework_extension_point_duration_seconds_count\nscheduler_framework_extension_point_duration_seconds_sum\nscheduler_scheduling_algorithm_duration_seconds_bucket\nscheduler_scheduling_algorithm_duration_seconds_count\nscheduler_scheduling_algorithm_duration_seconds_sum", + "description": "Kubernetes Scheduler MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:scheduler_(?:e2e_scheduling|scheduling_attempt|framework_extension_point|scheduling_algorithm)_duration_seconds.*)", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + }, + "alertmanager": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Deploy alertmanager.", + "default": false + } + } + }, + "grafana": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "If true, deploy the grafana sub-chart.", + "default": false + }, + "defaultDashboardsEnabled": { + "type": "boolean", + "description": "Deploy default dashboards. These are loaded using the sidecar.", + "default": false + } + } + }, + "prometheusOperator": { + "type": "object", + "description": "", + "properties": { + "podLabels": { + "type": "object", + "comment": "Labels to add to the operator pod", + "description": "Additional labels for prometheus operator pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Annotations to add to the operator pod", + "description": "Additional annotations for prometheus operator pods.", + "default": {} + }, + "resources": { + "type": "object", + "comment": "Resource limits for prometheus operator", + "description": "Resource limits for prometheus operator. Uses sub-chart defaults.", + "default": {}, + "example": { + "limits": { + "cpu": "200m", + "memory": "200Mi" + }, + "requests": { + "cpu": "100m", + "memory": "100Mi" + } + } + }, + "admissionWebhooks": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Create PrometheusRules admission webhooks. Mutating webhook will patch PrometheusRules objects indicating they were validated. Validating webhook will check the rules syntax.", + "default": false + } + } + }, + "tls": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable TLS in prometheus operator.", + "default": false + } + } + } + } + }, + "kube-state-metrics": { + "type": "object", + "comment": "Resource limits for kube-state-metrics", + "description": "", + "properties": { + "fullnameOverride": { + "type": "string", + "comment": "Put here the new name if you want to override the full name used for Kube State Metrics components.", + "commented": true, + "description": "Used to override the chart's full name.", + "default": "" + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for kube-state-metrics. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "customLabels": { + "type": "object", + "comment": "Custom labels to apply to service, deployment and pods", + "description": "Custom labels to apply to service, deployment and pods. Uses sub-chart defaults.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Additional annotations for pods in the DaemonSet", + "description": "Additional annotations for pods in the DaemonSet. Uses sub-chart defaults.", + "default": {} + }, + "resources": { + "type": "object", + "description": "Resource limits for kube state metrics. Uses sub-chart defaults.", + "default": {}, + "exmaple": { + "limits": { + "cpu": "100m", + "memory": "64Mi" + }, + "requests": { + "cpu": "10m", + "memory": "32Mi" + } + } + }, + "image": { + "type": "object", + "comment": "latest kube-prometheus-stack version that is supported on OpenShift 4.8-4.10\nuses version 2.6.0 of kube-state-metrics, but this version has some critical vulnerabilities,\nso we bump the image manually.", + "description": "", + "properties": { + "tag": { + "type": "string", + "description": "Tag for kube-state-metrics Docker image.", + "default": "v2.7.0" + } + } + }, + "prometheus": { + "type": "object", + "description": "", + "properties": { + "monitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Kubernetes State Metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\nkube_daemonset_status_current_number_scheduled\nkube_daemonset_status_desired_number_scheduled\nkube_daemonset_status_number_misscheduled\nkube_daemonset_status_number_unavailable\nkube_deployment_spec_replicas\nkube_deployment_status_replicas_available\nkube_deployment_status_replicas_unavailable\nkube_node_info\nkube_node_status_allocatable\nkube_node_status_capacity\nkube_node_status_condition\nkube_statefulset_metadata_generation\nkube_statefulset_replicas\nkube_statefulset_status_observed_generation\nkube_statefulset_status_replicas\nkube_hpa_spec_max_replicas\nkube_hpa_spec_min_replicas\nkube_hpa_status_condition\nkube_hpa_status_current_replicas\nkube_hpa_status_desired_replicas\nkube pod state metrics\nkube_pod_container_info\nkube_pod_container_resource_limits\nkube_pod_container_resource_requests\nkube_pod_container_status_ready\nkube_pod_container_status_restarts_total\nkube_pod_container_status_terminated_reason\nkube_pod_container_status_waiting_reason\nkube_pod_status_phase\nkube_pod_info\nkube_service_info\nkube_service_spec_external_ip\nkube_service_spec_type\nkube_service_status_load_balancer_ingress\nDrop unnecessary labels Prometheus adds to these metrics\nWe don't want container=kube-state-metrics on everything", + "description": "Kubernetes State Metrics MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:kube_statefulset_status_observed_generation|kube_statefulset_status_replicas|kube_statefulset_replicas|kube_statefulset_metadata_generation|kube_daemonset_status_current_number_scheduled|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_misscheduled|kube_daemonset_status_number_unavailable|kube_deployment_spec_replicas|kube_deployment_status_replicas_available|kube_deployment_status_replicas_unavailable|kube_node_info|kube_node_status_allocatable|kube_node_status_capacity|kube_node_status_condition|kube_hpa_spec_max_replicas|kube_hpa_spec_min_replicas|kube_hpa_status_(condition|(current|desired)_replicas)|kube_pod_container_info|kube_pod_container_resource_requests|kube_pod_container_resource_limits|kube_pod_container_status_ready|kube_pod_container_status_terminated_reason|kube_pod_container_status_waiting_reason|kube_pod_container_status_restarts_total|kube_pod_status_phase|kube_pod_info|kube_service_info|kube_service_spec_external_ip|kube_service_spec_type|kube_service_status_load_balancer_ingress)", + "sourceLabels": [ + "__name__" + ] + }, + { + "action": "labeldrop", + "regex": "service" + }, + { + "action": "replace", + "sourceLabels": [ + "container", + "uid" + ], + "regex": "kube-state-metrics;", + "targetLabel": "container", + "replacement": "" + }, + { + "action": "replace", + "sourceLabels": [ + "pod", + "uid" + ], + "regex": ".*kube-state-metrics.*;", + "targetLabel": "pod", + "replacement": "" + }, + { + "action": "labelmap", + "regex": "(pod|service)", + "replacement": "service_discovery_${1}" + } + ] + } + } + } + } + } + } + }, + "prometheus-node-exporter": { + "type": "object", + "comment": "Resource limits for prometheus node exporter", + "description": "", + "properties": { + "fullnameOverride": { + "type": "string", + "comment": " Put here the new name if you want to override the full name used for Prometheus Node exporter components.", + "commented": true, + "description": "Used to override the chart's full name.", + "default": "" + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for prometheus node exporter. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "podLabels": { + "type": "object", + "comment": "Additional labels for pods in the DaemonSet", + "description": "Additional labels for prometheus-node-exporter pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Additional annotations for pods in the DaemonSet", + "description": "Additional annotations for prometheus-node-exporter pods.", + "default": {} + }, + "resources": { + "type": "object", + "description": "Resource limits for node exporter. Uses sub-chart defaults.", + "default": {}, + "example": { + "limits": { + "cpu": "200m", + "memory": "50Mi" + }, + "requests": { + "cpu": "100m", + "memory": "30Mi" + } + } + }, + "prometheus": { + "type": "object", + "description": "", + "properties": { + "monitor": { + "type": "object", + "description": "", + "properties": { + "interval": { + "type": "string", + "comment": "Scrape interval. If not set, the Prometheus default scrape interval is used.", + "description": "Node Exporter scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "" + }, + "metricRelabelings": { + "type": "array", + "comment": "see docs/scraped_metrics.md\nnode exporter metrics\nnode_cpu_seconds_total\nnode_load1\nnode_load5\nnode_load15\nnode_disk_io_time_weighted_seconds_total\nnode_disk_io_time_seconds_total\nnode_vmstat_pgpgin\nnode_vmstat_pgpgout\nnode_memory_MemFree_bytes\nnode_memory_Cached_bytes\nnode_memory_Buffers_bytes\nnode_memory_MemTotal_bytes\nnode_network_receive_drop_total\nnode_network_transmit_drop_total\nnode_network_receive_bytes_total\nnode_network_transmit_bytes_total\nnode_filesystem_avail_bytes\nnode_filesystem_size_bytes\nnode_filesystem_files_free\nnode_filesystem_files", + "description": "Node Exporter MetricRelabelConfigs", + "default": [ + { + "action": "keep", + "regex": "(?:node_load1|node_load5|node_load15|node_cpu_seconds_total|node_disk_io_time_weighted_seconds_total|node_disk_io_time_seconds_total|node_vmstat_pgpgin|node_vmstat_pgpgout|node_memory_MemFree_bytes|node_memory_MemAvailable_bytes|node_memory_Cached_bytes|node_memory_Buffers_bytes|node_memory_MemTotal_bytes|node_network_receive_drop_total|node_network_transmit_drop_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_filesystem_avail_bytes|node_filesystem_size_bytes)", + "sourceLabels": [ + "__name__" + ] + } + ] + } + } + } + } + } + } + }, + "prometheus": { + "type": "object", + "description": "", + "properties": { + "additionalServiceMonitors": { + "type": "array", + "description": "List of ServiceMonitor objects to create.", + "default": [] + }, + "prometheusSpec": { + "type": "object", + "description": "", + "properties": { + "scrapeInterval": { + "type": "string", + "comment": "Prometheus default scrape interval, default from upstream Kube Prometheus Stack Helm chart\nNOTE changing the scrape interval to be >1m can result in metrics\nfrom recording rules to be missing and empty panels in Sumo Logic Kubernetes apps.", + "description": "Prometheus metrics scrape interval. If not set, the Prometheus default scrape interval is used.", + "default": "30s" + }, + "retention": { + "type": "string", + "comment": "Prometheus data retention period", + "description": "How long to retain metrics in Prometheus", + "default": "1d" + }, + "podMetadata": { + "type": "object", + "comment": "Add custom pod annotations and labels to prometheus pods", + "description": "", + "properties": { + "labels": { + "type": "object", + "description": "Add custom pod labels to prometheus pods", + "default": {} + }, + "annotations": { + "type": "object", + "description": "Add custom pod annotations to prometheus pods", + "default": {} + } + } + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for prometheus. [See docs/Best_Practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "resources": { + "type": "object", + "comment": "Define resources requests and limits for single Pods.", + "description": "Resource limits for prometheus. Uses sub-chart defaults.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + }, + "memory": { + "type": "string", + "description": "", + "default": "8Gi" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "", + "default": "500m" + }, + "memory": { + "type": "string", + "description": "", + "default": "1Gi" + } + } + } + } + }, + "initContainers": { + "type": "array", + "description": "InitContainers allows injecting additional Prometheus initContainers.", + "default": [ + { + "name": "init-config-reloader", + "env": [ + { + "name": "METADATA_METRICS_SVC", + "valueFrom": { + "configMapKeyRef": { + "name": "sumologic-configmap", + "key": "metadataMetrics" + } + } + }, + { + "name": "NAMESPACE", + "valueFrom": { + "configMapKeyRef": { + "name": "sumologic-configmap", + "key": "metadataNamespace" + } + } + } + ] + } + ] + }, + "containers": { + "type": "array", + "description": "Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.", + "default": [ + { + "name": "config-reloader", + "env": [ + { + "name": "METADATA_METRICS_SVC", + "valueFrom": { + "configMapKeyRef": { + "name": "sumologic-configmap", + "key": "metadataMetrics" + } + } + }, + { + "name": "NAMESPACE", + "valueFrom": { + "configMapKeyRef": { + "name": "sumologic-configmap", + "key": "metadataNamespace" + } + } + } + ] + } + ] + }, + "walCompression": { + "type": "boolean", + "comment": "Enable WAL compression to reduce Prometheus memory consumption", + "description": "Enables walCompression in Prometheus", + "default": true + }, + "additionalScrapeConfigs": { + "type": "array", + "comment": "prometheus scrape config\nrel: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config\nscraping metrics basing on annotations:\n- prometheus.io/scrape: true - to scrape metrics from the pod\n- prometheus.io/path: /metrics - path which the metric should be scrape from\n- prometheus.io/port: 9113 - port which the metric should be scrape from\nrel: https://github.com/prometheus-operator/kube-prometheus/pull/16#issuecomment-424318647", + "description": "Additional Prometheus scrape configurations", + "default": [ + { + "job_name": "pod-annotations", + "kubernetes_sd_configs": [ + { + "role": "pod" + } + ], + "relabel_configs": [ + { + "source_labels": [ + "__meta_kubernetes_pod_annotation_prometheus_io_scrape" + ], + "action": "keep", + "regex": true + }, + { + "source_labels": [ + "__meta_kubernetes_pod_annotation_prometheus_io_path" + ], + "action": "replace", + "target_label": "__metrics_path__", + "regex": "(.+)" + }, + { + "source_labels": [ + "__address__", + "__meta_kubernetes_pod_annotation_prometheus_io_port" + ], + "action": "replace", + "regex": "([^:]+)(?::\\d+)?;(\\d+)", + "replacement": "$1:$2", + "target_label": "__address__" + }, + { + "source_labels": [ + "__metrics_path__" + ], + "separator": ";", + "regex": "(.*)", + "target_label": "endpoint", + "replacement": "$1", + "action": "replace" + }, + { + "source_labels": [ + "__meta_kubernetes_namespace" + ], + "action": "replace", + "target_label": "namespace" + }, + { + "action": "labelmap", + "regex": "__meta_kubernetes_pod_label_(.+)" + }, + { + "source_labels": [ + "__meta_kubernetes_pod_name" + ], + "separator": ";", + "regex": "(.*)", + "target_label": "pod", + "replacement": "$1", + "action": "replace" + } + ] + } + ] + }, + "remoteWrite": { + "type": "array", + "description": "If specified, the remote_write spec.", + "items": [ + { + "comment": "infrastructure metrics", + "default": { + "remoteTimeout": "5s", + "url": "http://$(METADATA_METRICS_SVC).$(NAMESPACE):9888/prometheus.metrics" + } + } + ] + } + }, + "serviceMonitor": { + "type": "object", + "description": "", + "properties": { + "selfMonitor": { + "type": "boolean", + "description": "Enable scraping Prometheus metrics", + "default": false + } + } + } + } + } + } + } + }, + "otelcolInstrumentation": { + "type": "object", + "description": "", + "comment": "Configure otelcol-instrumentation - Sumo OTel Distro Collector\nref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enables Sumo Otel Distro Collector StatefulSet to collect telemetry data. [See docs for more information.](/docs/opentelemetry-collector/traces.md)", + "default": true + }, + "sourceMetadata": { + "type": "object", + "description": "", + "properties": { + "sourceName": { + "type": "string", + "comment": "Set the _sourceName metadata field in Sumo Logic.", + "description": "Set the \\_sourceName metadata field in Sumo Logic.", + "default": "%{k8s.namespace.name}.%{k8s.pod.pod_name}.%{k8s.container.name}" + }, + "sourceCategory": { + "type": "string", + "comment": "Set the _sourceCategory metadata field in Sumo Logic.", + "description": "Set the \\_sourceCategory metadata field in Sumo Logic.", + "default": "%{k8s.namespace.name}/%{k8s.pod.pod_name}" + }, + "sourceCategoryPrefix": { + "type": "string", + "comment": "Set the prefix, for _sourceCategory metadata.", + "description": "Set the prefix, for \\_sourceCategory metadata.", + "default": "kubernetes/" + }, + "sourceCategoryReplaceDash": { + "type": "string", + "comment": "Used to replace - with another character.", + "description": "Used to replace - with another character.", + "default": "/" + }, + "excludeContainerRegex": { + "type": "string", + "comment": "A regular expression for containers.\nMatching containers will be excluded from Sumo. The logs will still be sent to otelcol.", + "description": "A regular expression for containers. Matching containers will be excluded from Sumo.", + "default": "" + }, + "excludeHostRegex": { + "type": "string", + "comment": "A regular expression for hosts.\nMatching hosts will be excluded from Sumo. The logs will still be sent to otelcol.", + "description": "A regular expression for hosts. Matching hosts will be excluded from Sumo.", + "default": "" + }, + "excludeNamespaceRegex": { + "type": "string", + "comment": "A regular expression for namespaces.\nMatching namespaces will be excluded from Sumo. The logs will still be sent to otelcol.", + "description": "A regular expression for namespaces. Matching namespaces will be excluded from Sumo.", + "default": "" + }, + "excludePodRegex": { + "type": "string", + "comment": "A regular expression for pods.\nMatching pods will be excluded from Sumo. The logs will still be sent to otelcol.", + "description": "A regular expression for pods. Matching pods will be excluded from Sumo.", + "default": "" + } + } + }, + "autoscaling": { + "type": "object", + "description": "", + "comment": "Option to turn autoscaling on for otelcol and specify params for HPA.\nAutoscaling needs metrics-server to access cpu metrics.", + "properties": { + "enabled": { + "type": "boolean", + "description": "Option to turn autoscaling on for Sumo Otel Distro Collector StatefulSet and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics.", + "default": false + }, + "minReplicas": { + "type": "integer", + "description": "Default min replicas for autoscaling.", + "default": 3 + }, + "maxReplicas": { + "type": "integer", + "description": "Default max replicas for autoscaling", + "default": 10 + }, + "targetCPUUtilizationPercentage": { + "type": "integer", + "description": "The desired target CPU utilization for autoscaling.", + "default": 100 + }, + "targetMemoryUtilizationPercentage": { + "type": "integer", + "commented": true, + "description": "The desired target memory utilization for autoscaling.", + "default": 50 + } + } + }, + "statefulset": { + "type": "object", + "description": "", + "properties": { + "nodeSelector": { + "type": "object", + "description": "Node selector for otelcol-instrumentation statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for otelcol-instrumentation statefulset.", + "default": [] + }, + "topologySpreadConstraints": { + "type": "array", + "description": "TopologySpreadConstraints for otelcol-instrumentation statefulset.", + "default": [] + }, + "affinity": { + "type": "object", + "description": "Affinity for otelcol-instrumentation statefulset.", + "default": {} + }, + "podAntiAffinity": { + "type": "string", + "comment": "Acceptable values for podAntiAffinity:\nsoft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default)\nhard: specifies rules that must be met for a pod to be scheduled onto a node", + "description": "PodAntiAffinity for otelcol-instrumentation statefulset.", + "default": "soft" + }, + "replicaCount": { + "type": "integer", + "description": "Set the number of otelcol-instrumentation replicasets.", + "default": 3 + }, + "resources": { + "type": "object", + "description": "Resources for otelcol-instrumentation statefulset.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "4Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "768Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "500m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for otelcol-instrumentation pods.", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to metrics sts pods", + "description": "Additional labels for otelcol-instrumentation pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to metrics sts pods", + "description": "Additional annotations for otelcol-instrumentation pods.", + "default": {} + }, + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "commented": true, + "description": "Image repository for otelcol-instrumentation docker container.", + "default": "" + }, + "tag": { + "type": "string", + "commented": true, + "description": "Image tag for otelcol-instrumentation docker container.", + "default": "" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for otelcol-instrumentation docker container.", + "default": "IfNotPresent" + } + } + }, + "containers": { + "type": "object", + "comment": "Set securityContext for containers running in pods in otelcol-instrumentation statefulset.", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "The securityContext configuration for the otelcol-instrumentation container.", + "default": {} + }, + "livenessProbe": { + "type": "object", + "description": "Liveness probe settings for the otelcol-instrumentation container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "Readiness probe settings for the otelcol-instrumentation container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "Startup probe configuration for the otelcol-instrumentation container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + } + } + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "Additional environment variables for otelcol-instrumentation pods.", + "example": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ], + "default": [] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "Additional volumes for otelcol-instrumentation pods.", + "example": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ], + "default": [] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "Additional volume mounts for otelcol-instrumentation pods.", + "example": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ], + "default": [] + } + } + }, + "logLevelFilter": { + "type": "boolean", + "comment": "To enable collecting all logs, set to false", + "description": "Do not send otelcol-instrumentation logs if `true`.", + "default": false + }, + "config": { + "type": "object", + "description": "Configuration for otelcol-instrumentation", + "properties": { + "receivers": { + "type": "object", + "description": "", + "properties": { + "jaeger": { + "type": "object", + "description": "", + "properties": { + "protocols": { + "type": "object", + "description": "", + "properties": { + "thrift_compact": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:6831" + } + } + }, + "thrift_binary": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:6832" + } + } + }, + "grpc": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:14250" + } + } + }, + "thrift_http": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:14268" + } + } + } + } + } + } + }, + "opencensus": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:55678" + } + } + }, + "otlp": { + "type": "object", + "description": "", + "properties": { + "protocols": { + "type": "object", + "description": "", + "properties": { + "grpc": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4317" + } + } + }, + "http": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4318" + } + } + } + } + } + } + }, + "otlp/deprecated": { + "type": "object", + "description": "", + "properties": { + "protocols": { + "type": "object", + "description": "", + "properties": { + "http": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:55681" + } + } + } + } + } + } + }, + "zipkin": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:9411" + } + } + } + } + }, + "processors": { + "type": "object", + "description": "", + "properties": { + "source": { + "type": "object", + "comment": "Source processor adds Sumo Logic related metadata", + "description": "", + "properties": { + "annotation_prefix": { + "type": "string", + "description": "", + "default": "k8s.pod.annotation." + }, + "collector": { + "type": "string", + "description": "", + "default": "{{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | quote }}" + }, + "exclude": { + "type": "object", + "description": "", + "properties": { + "k8s.container.name": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeContainerRegex | quote }}" + }, + "k8s.host.name": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeHostRegex | quote }}" + }, + "k8s.namespace.name": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.excludeNamespaceRegex | quote }}" + }, + "k8s.pod.name": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.excludePodRegex| quote }}" + } + } + }, + "pod_key": { + "type": "string", + "description": "", + "default": "k8s.pod.name" + }, + "pod_name_key": { + "type": "string", + "description": "", + "default": "k8s.pod.pod_name" + }, + "pod_template_hash_key": { + "type": "string", + "description": "", + "default": "k8s.pod.label.pod-template-hash" + }, + "source_category": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategory | quote }}" + }, + "source_category_prefix": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryPrefix | quote }}" + }, + "source_category_replace_dash": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceCategoryReplaceDash | quote }}" + }, + "source_host": { + "type": "string", + "description": "", + "default": "%{k8s.pod.hostname}" + }, + "source_name": { + "type": "string", + "description": "", + "default": "{{ .Values.otelcolInstrumentation.sourceMetadata.sourceName | quote }}" + } + } + }, + "resource": { + "type": "object", + "comment": "Resource processor sets the associted cluster attribute", + "description": "", + "properties": { + "attributes": { + "type": "array", + "description": "", + "default": [ + { + "key": "k8s.cluster.name", + "value": "{{ include \"sumologic.clusterNameReplaceSpaceWithDash\" . }}", + "action": "upsert" + } + ] + } + } + }, + "resourcedetection": { + "type": "object", + "description": "", + "properties": { + "detectors": { + "type": "array", + "description": "", + "default": [ + "system" + ] + }, + "override": { + "type": "boolean", + "description": "", + "default": false + }, + "timeout": { + "type": "string", + "description": "", + "default": "10s" + } + } + }, + "k8s_tagger": { + "type": "object", + "comment": "Tags spans with K8S metadata, basing on the context IP", + "description": "", + "properties": { + "passthrough": { + "type": "boolean", + "comment": "When true, only IP is assigned and passed (so it could be tagged on another collector)", + "description": "", + "default": false + }, + "owner_lookup_enabled": { + "type": "boolean", + "comment": "When true, additional fields, such as serviceName are being also extracted", + "description": "", + "default": true + }, + "extract": { + "type": "object", + "comment": "Extracted fields and assigned names", + "description": "", + "properties": { + "metadata": { + "type": "array", + "comment": "extract the following well-known metadata fields", + "description": "", + "default": [ + "containerId", + "containerName", + "daemonSetName", + "deploymentName", + "hostName", + "namespace", + "nodeName", + "podId", + "podName", + "replicaSetName", + "serviceName", + "statefulSetName" + ] + }, + "annotations": { + "type": "array", + "description": "", + "default": [ + { + "tag_name": "k8s.pod.annotation.%s", + "key": "*" + } + ] + }, + "namespace_labels": { + "type": "array", + "description": "", + "default": [ + { + "tag_name": "k8s.namespace.label.%s", + "key": "*" + } + ] + }, + "labels": { + "type": "array", + "description": "", + "default": [ + { + "tag_name": "k8s.pod.label.%s", + "key": "*" + } + ] + } + } + } + } + }, + "memory_limiter": { + "type": "object", + "comment": "The memory_limiter processor is used to prevent out of memory situations on the collector.", + "description": "", + "properties": { + "check_interval": { + "type": "string", + "comment": "check_interval is the time between measurements of memory usage for the\npurposes of avoiding going over the limits. Defaults to zero, so no\nchecks will be performed. Values below 1 second are not recommended since\nit can result in unnecessary CPU consumption.", + "description": "", + "default": "5s" + }, + "limit_percentage": { + "type": "integer", + "comment": "Maximum amount of memory, in %, targeted to be allocated by the process heap.\nNote that typically the total memory usage of process will be about 50MiB higher\nthan this value.", + "description": "", + "default": 75 + }, + "spike_limit_percentage": { + "type": "integer", + "commit": "Maximum spike expected between the measurements of memory usage, in %.", + "description": "", + "default": 20 + } + } + }, + "batch": { + "type": "object", + "comment": "The batch processor accepts spans and places them into batches grouped by node and resource", + "description": "", + "properties": { + "send_batch_size": { + "type": "integer", + "comment": "Number of spans after which a batch will be sent regardless of time", + "description": "", + "default": 256 + }, + "send_batch_max_size": { + "type": "integer", + "comment": "Never more than this many spans are being sent in a batch", + "description": "", + "default": 512 + }, + "timeout": { + "type": "string", + "comment": "Time duration after which a batch will be sent regardless of size", + "description": "", + "default": "5s" + } + } + } + } + }, + "extensions": { + "type": "object", + "description": "", + "properties": { + "health_check": { + "type": "object", + "description": "", + "default": {} + }, + "memory_ballast": { + "type": "object", + "description": "", + "properties": { + "size_mib": { + "type": "integer", + "comment": "Memory Ballast size should be max 1/3 to 1/2 of memory.", + "description": "", + "default": 250 + } + } + }, + "pprof": { + "type": "object", + "description": "", + "default": {} + } + } + }, + "exporters": { + "type": "object", + "description": "", + "properties": { + "sumologic/metrics": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "${SUMO_ENDPOINT_DEFAULT_METRICS_SOURCE}" + }, + "compress_encoding": { + "type": "string", + "comment": "Compression encoding format, either empty string (\"\"), gzip or deflate (default gzip).\nEmpty string means no compression", + "description": "", + "default": "gzip" + }, + "max_request_body_size": { + "type": "integer", + "comment": "Max HTTP request body size in bytes before compression (if applied). By default 1_048_576 (1MB) is used.", + "description": "", + "default": 1048576 + }, + "log_format": { + "type": "string", + "comment": "Format to use when sending logs to Sumo. (default json) (possible values: json, text)", + "description": "", + "default": "text" + }, + "metric_format": { + "type": "string", + "comment": "Format of the metrics to be sent (default is prometheus) (possible values: carbon2, prometheus)\ncarbon2 and graphite are going to be supported soon.", + "description": "", + "default": "prometheus" + }, + "timeout": { + "type": "string", + "comment": "Timeout for every attempt to send data to Sumo Logic backend. Maximum connection timeout is 55s.", + "description": "", + "default": "5s" + }, + "retry_on_failure": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "", + "default": true + }, + "initial_interval": { + "type": "string", + "comment": "Time to wait after the first failure before retrying", + "description": "", + "default": "5s" + }, + "max_interval": { + "type": "string", + "comment": "Upper bound on backoff", + "description": "", + "default": "30s" + }, + "max_elapsed_time": { + "type": "string", + "comment": "Maximum amount of time spent trying to send a batch", + "description": "", + "default": "120s" + } + } + }, + "sending_queue": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "", + "default": false + }, + "num_consumers": { + "type": "integer", + "comment": "Number of consumers that dequeue batches", + "description": "", + "default": 10 + }, + "queue_size": { + "type": "integer", + "comment": "Maximum number of batches kept in memory before data\nUser should calculate this as num_seconds * requests_per_second where:\nnum_seconds is the number of seconds to buffer in case of a backend outage\nrequests_per_second is the average number of requests per seconds.", + "description": "", + "default": 5000 + } + } + } + } + }, + "otlphttp/traces": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "http://{{ include \"otelcolinstrumentation.exporter.endpoint\" . }}:4318" + } + } + } + } + }, + "service": { + "type": "object", + "description": "", + "properties": { + "extensions": { + "type": "array", + "description": "", + "default": [ + "health_check", + "memory_ballast", + "pprof" + ] + }, + "pipelines": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "receivers": { + "type": "array", + "description": "", + "default": [ + "jaeger", + "opencensus", + "otlp", + "otlp/deprecated", + "zipkin" + ] + }, + "processors": { + "type": "array", + "description": "", + "default": [ + "memory_limiter", + "k8s_tagger", + "source", + "resource", + "batch" + ] + }, + "exporters": { + "type": "array", + "description": "", + "default": [ + "otlphttp/traces" + ] + } + } + }, + "metrics": { + "type": "object", + "description": "", + "properties": { + "receivers": { + "type": "array", + "description": "", + "default": [ + "otlp", + "otlp/deprecated" + ] + }, + "processors": { + "type": "array", + "description": "", + "default": [ + "memory_limiter", + "k8s_tagger", + "source", + "resource", + "batch" + ] + }, + "exporters": { + "type": "array", + "description": "", + "default": [ + "sumologic/metrics" + ] + } + } + } + } + } + } + } + } + } + } + }, + "tracesSampler": { + "type": "object", + "comment": "Configure traces-sampler\nref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md", + "description": "", + "properties": { + "deployment": { + "type": "object", + "description": "", + "properties": { + "nodeSelector": { + "type": "object", + "description": "Node selector for otelcol deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for traces-sampler statefulset.", + "default": [] + }, + "replicas": { + "type": "integer", + "description": "Set the number of OpenTelemetry Collector replicas.", + "default": 1 + }, + "resources": { + "type": "object", + "description": "Resources for traces-sampler statefulset.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "4Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "384Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "200m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for OpenTelemetry Collector log pods.", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to traces-sampler deployment.", + "description": "Additional labels for traces-sampler pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to traces-sampler deployment.", + "description": "Additional annotations for traces-sampler pods.", + "default": {} + }, + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "commented": true, + "description": "Image repository for traces-sampler docker container.", + "default": "" + }, + "tag": { + "type": "string", + "commented": true, + "description": "Image tag for traces-sampler docker container.", + "default": "" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for traces-sampler docker container.", + "default": "IfNotPresent" + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "Additional environment variables for traces-sampler pods.", + "example": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ], + "default": [] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "Additional volumes for traces-sampler pods.", + "example": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ], + "default": [] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "Additional volume mounts for traces-sampler pods.", + "example": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ], + "default": [] + } + } + }, + "logLevelFilter": { + "type": "boolean", + "comment": "To enable collecting all logs, set to false", + "commented": true, + "description": "Do not send traces-sampler logs if `true`.", + "default": false + }, + "config": { + "type": "object", + "comment": "Collector configuration", + "description": "Configuration for traces-sampler.", + "properties": { + "receivers": { + "type": "object", + "description": "", + "properties": { + "otlp": { + "type": "object", + "description": "", + "properties": { + "protocols": { + "type": "object", + "description": "", + "properties": { + "grpc": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4317" + } + } + }, + "http": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4318" + } + } + } + } + } + } + } + } + }, + "processors": { + "type": "object", + "description": "", + "properties": { + "memory_limiter": { + "type": "object", + "comment": "The memory_limiter processor is used to prevent out of memory situations on the collector.", + "description": "", + "properties": { + "check_interval": { + "type": "string", + "comment": "check_interval is the time between measurements of memory usage for the\npurposes of avoiding going over the limits. Defaults to zero, so no\nchecks will be performed. Values below 1 second are not recommended since\nit can result in unnecessary CPU consumption.", + "description": "", + "default": "5s" + }, + "limit_percentage": { + "type": "integer", + "comment": "Maximum amount of memory, in %, targeted to be allocated by the process heap.\nNote that typically the total memory usage of process will be about 50MiB higher\nthan this value.", + "description": "", + "default": 75 + }, + "spike_limit_percentage": { + "type": "integer", + "comment": "Maximum spike expected between the measurements of memory usage, in %.", + "description": "", + "default": 20 + } + } + }, + "cascading_filter": { + "type": "object", + "comment": "Smart cascading filtering rules with preset limits.\nPlease see https://github.com/SumoLogic/sumologic-otel-collector/tree/v0.86.0-sumo-1/pkg/processor/cascadingfilterprocessor\nfor details.", + "description": "", + "properties": { + "num_traces": { + "type": "integer", + "comment": "Max number of traces for which decisions are kept in memory", + "description": "", + "default": 200000 + } + } + }, + "batch": { + "type": "object", + "comment": "The batch processor accepts spans and places them into batches grouped by node and resource", + "description": "", + "properties": { + "send_batch_size": { + "type": "integer", + "comment": "Number of spans after which a batch will be sent regardless of time", + "description": "", + "default": 256 + }, + "send_batch_max_size": { + "type": "integer", + "comment": "Never more than this many spans are being sent in a batch", + "description": "", + "default": 512 + }, + "timeout": { + "type": "string", + "comment": "Time duration after which a batch will be sent regardless of size", + "description": "", + "default": "5s" + } + } + } + } + }, + "extensions": { + "type": "object", + "description": "", + "properties": { + "health_check": { + "type": "object", + "description": "", + "default": {} + }, + "memory_ballast": { + "type": "object", + "description": "", + "properties": { + "size_mib": { + "type": "integer", + "comment": "Memory Ballast size should be max 1/3 to 1/2 of memory.", + "description": "", + "default": 683 + } + } + }, + "pprof": { + "type": "object", + "description": "", + "default": {} + } + } + }, + "exporters": { + "type": "object", + "description": "", + "properties": { + "logging": { + "type": "object", + "comment": "Following generates verbose logs with span content, useful to verify what\nmetadata is being tagged. To enable, uncomment and add \"logging\" to exporters below.\nThere are two levels that could be used: `debug` and `info` with the former\nbeing much more verbose and including (sampled) spans content", + "commented": true, + "description": "", + "properties": { + "loglevel": { + "type": "string", + "description": "", + "default": "debug" + } + } + }, + "otlphttp": { + "type": "object", + "description": "", + "properties": { + "traces_endpoint": { + "type": "string", + "description": "", + "default": "${SUMO_ENDPOINT_DEFAULT_OTLP_TRACES_SOURCE}/v1/traces" + }, + "compression": { + "type": "string", + "description": "", + "default": "gzip" + } + } + } + } + }, + "service": { + "type": "object", + "description": "", + "properties": { + "extensions": { + "type": "array", + "description": "", + "default": [ + "health_check", + "memory_ballast", + "pprof" + ] + }, + "pipelines": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "receivers": { + "type": "array", + "description": "", + "default": [ + "otlp" + ] + }, + "processors": { + "type": "array", + "description": "", + "default": [ + "memory_limiter", + "cascading_filter", + "batch" + ] + }, + "exporters": { + "type": "array", + "description": "", + "default": [ + "otlphttp" + ] + } + } + } + } + } + } + } + } + } + } + }, + "metadata": { + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "comment": "Configure image for Opentelemetry Collector (for logs and metrics)", + "description": "", + "properties": { + "repository": { + "type": "string", + "commented": true, + "description": "Image repository for otelcol docker container.", + "default": "" + }, + "tag": { + "type": "string", + "commented": true, + "description": "Image tag for otelcol docker container.", + "default": "" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for otelcol docker container.", + "default": "IfNotPresent" + } + } + }, + "securityContext": { + "type": "object", + "description": "The securityContext configuration for otelcol.", + "properties": { + "fsGroup": { + "type": "integer", + "comment": "The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set.\nThe default is 0 (root), and containers don't have write permissions for volumes in that case.", + "description": "", + "default": 999 + } + } + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels to all otelcol sts pods(logs and metrics)", + "description": "Additional labels for all otelcol pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations to all otelcol sts pods(logs and metrics)", + "description": "Additional annotations for all otelcol pods.", + "default": {} + }, + "serviceLabels": { + "type": "object", + "comment": "Add custom labels to all otelcol svc (logs and metrics)", + "description": "Additional labels for all otelcol pods.", + "default": {} + }, + "persistence": { + "type": "object", + "comment": "Configure persistence for Opentelemetry Collector", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control persistence for OpenTelemetry Collector.", + "default": true + }, + "storageClass": { + "type": "string", + "commented": true, + "description": "Defines storageClassName for the PersistentVolumeClaim which is used to provide persistence for OpenTelemetry Collector.", + "default": "" + }, + "accessMode": { + "type": "string", + "description": "The accessMode for the volume which is used to provide persistence for OpenTelemetry Collector.", + "default": "ReadWriteOnce" + }, + "size": { + "type": "string", + "description": "Size of the volume which is used to provide persistence for OpenTelemetry Collector.", + "default": "10Gi" + }, + "pvcLabels": { + "type": "object", + "comment": "Add custom labels to all otelcol statefulset PVC (logs and metrics)", + "description": "Additional PersistentVolumeClaim labels for all OpenTelemetry Collector pods.", + "default": {} + } + } + }, + "metrics": { + "type": "object", + "comment": "Configure metrics pipeline.\nThis section affects only otelcol provider.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying the otelcol metrics statefulsets.", + "default": true + }, + "logLevel": { + "type": "string", + "description": "Flag to control logging level for OpenTelemetry Collector for metrics. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`.", + "default": "info" + }, + "config": { + "type": "object", + "description": "", + "properties": { + "merge": { + "comment": "Directly alter the OT configuration. The value of this key should be a dictionary, that will\nbe directly merged with the generated configuration, overriding existing values.\nFor example:\noverride:\n processors:\n batch:\n send_batch_size: 512\nwill change the batch size of the pipeline.\nWARNING: This field is not subject to backwards-compatibility guarantees offered by the rest\nof this chart. It involves implementation details that may change even in minor versions.\nUse with caution, and consider opening an issue, so your customization can be added in a safer way.", + "type": "object", + "description": "Configuration for metrics metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + }, + "override": { + "type": "object", + "comment": "Completely override existing config and replace it with the contents of this value.\nThe value of this key should be a dictionary, that will replace the normal configuration.\nThis is an advanced feature, use with caution, and review the generated configuration first.", + "description": "Configuration for metrics metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + }, + "additionalEndpoints": { + "comment": "List of additional endpoints to be handled by Metrics Metadata Pods", + "type": "array", + "description": "List of additional endpoints for Open Telemetry Metadata Pod.", + "default": [] + } + } + }, + "statefulset": { + "type": "object", + "description": "", + "properties": { + "nodeSelector": { + "type": "object", + "description": "Node selector for metrics metadata enrichment (otelcol) statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for metrics metadata enrichment (otelcol) statefulset.", + "default": [] + }, + "topologySpreadConstraints": { + "type": "array", + "description": "TopologySpreadConstraints for metrics metadata enrichment (otelcol) statefulset.", + "default": [] + }, + "affinity": { + "type": "object", + "description": "Affinity for metrics metadata enrichment (otelcol) statefulset.", + "default": {} + }, + "podAntiAffinity": { + "type": "string", + "comment": "Acceptable values for podAntiAffinity:\nsoft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default)\nhard: specifies rules that must be met for a pod to be scheduled onto a node", + "description": "PodAntiAffinity for metrics metadata enrichment (otelcol) statefulset.", + "default": "soft" + }, + "replicaCount": { + "type": "integer", + "description": "Replica count for metrics metadata enrichment (otelcol) statefulset.", + "default": 3 + }, + "resources": { + "type": "object", + "description": "Resources for metrics metadata enrichment (otelcol) statefulset.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "1Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "768Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "500m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for metrics metadata enrichment (otelcol) pods.", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to metrics sts pods", + "description": "Additional labels for metrics metadata enrichment (otelcol) pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to metrics sts pods", + "description": "Additional annotations for metrics metadata enrichment (otelcol) pods.", + "default": {} + }, + "containers": { + "type": "object", + "comment": "Set securityContext for containers running in pods in metrics statefulset.", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "The securityContext configuration for otelcol container for metrics metadata enrichment statefulset.", + "default": {} + }, + "livenessProbe": { + "type": "object", + "description": "Liveness probe settings for the logs otelcol container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "Readiness probe settings for the logs otelcol container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "Startup probe configuration for metrics otelcol container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + } + } + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "Additional environment variables for metrics metadata enrichment (otelcol) pods.", + "example": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ], + "default": [] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "Additional volumes for metrics metadata enrichment (otelcol) pods.", + "example": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ], + "default": [] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "Additional volume mounts for metrics metadata enrichment (otelcol) pods.", + "example": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ], + "default": [] + } + } + }, + "autoscaling": { + "type": "object", + "comment": "Option to turn autoscaling on for metrics and specify params for HPA.\nAutoscaling needs metrics-server to access cpu metrics.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Option to turn autoscaling on for metrics metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics.", + "default": false + }, + "minReplicas": { + "type": "integer", + "description": "Default min replicas for autoscaling.", + "default": 3 + }, + "maxReplicas": { + "type": "integer", + "description": "Default max replicas for autoscaling", + "default": 10 + }, + "targetCPUUtilizationPercentage": { + "type": "integer", + "description": "The desired target CPU utilization for autoscaling.", + "default": 80 + }, + "targetMemoryUtilizationPercentage": { + "type": "integer", + "commented": true, + "description": "The desired target memory utilization for autoscaling.", + "default": 50 + } + } + }, + "podDisruptionBudget": { + "type": "object", + "comment": "Option to specify PodDisrutionBudgets\nYou can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget", + "description": "Pod Disruption Budget for metrics metadata enrichment (otelcol) statefulset and for experimental otelcol metrics collector.", + "properties": { + "minAvailable": { + "type": "integer", + "description": "", + "default": 2 + }, + "maxUnavailable": { + "type": "integer", + "comment": "To use maxUnavailable, set minAvailable to null and uncomment the below:", + "commented": true, + "description": "", + "default": 1 + } + } + } + } + }, + "logs": { + "type": "object", + "comment": "Configure logs pipeline.\nThis section affects only otelcol provider.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying the otelcol logs statefulsets.", + "default": true + }, + "logLevel": { + "type": "string", + "description": "Flag to control logging level for OpenTelemetry Collector for logs. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`.", + "default": "info" + }, + "config": { + "type": "object", + "description": "", + "properties": { + "merge": { + "type": "object", + "comment": "Directly alter the OT configuration. The value of this key should be a dictionary, that will\nbe directly merged with the generated configuration, overriding existing values.\nFor example:\noverride:\nprocessors:\nbatch:\nsend_batch_size: 512\nwill change the batch size of the pipeline.\nWARNING: This field is not subject to backwards-compatibility guarantees offered by the rest\nof this chart. It involves implementation details that may change even in minor versions.\nUse with caution, and consider opening an issue, so your customization can be added in a safer way.", + "description": "Configuration for logs metadata otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + }, + "override": { + "type": "object", + "comment": "Completely override existing config and replace it with the contents of this value.\nThe value of this key should be a dictionary, that will replace the normal configuration.\nThis is an advanced feature, use with caution, and review the generated configuration first.", + "description": "Configuration for logs metadata otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + } + } + }, + "statefulset": { + "type": "object", + "description": "", + "properties": { + "nodeSelector": { + "type": "object", + "description": "Node selector for logs metadata enrichment (otelcol) statefulset. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for logs metadata enrichment (otelcol) statefulset.", + "default": [] + }, + "topologySpreadConstraints": { + "type": "array", + "description": "TopologySpreadConstraints for logs metadata enrichment (otelcol) statefulset.", + "default": [] + }, + "affinity": { + "type": "object", + "description": "Affinity for logs metadata enrichment (otelcol) statefulset.", + "default": {} + }, + "podAntiAffinity": { + "type": "string", + "comment": "Acceptable values for podAntiAffinity:\nsoft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default)\nhard: specifies rules that must be met for a pod to be scheduled onto a node", + "description": "PodAntiAffinity for logs metadata enrichment (otelcol) statefulset.", + "default": "soft" + }, + "replicaCount": { + "type": "integer", + "description": "Replica count for logs metadata enrichment (otelcol) statefulset.", + "default": 3 + }, + "resources": { + "type": "object", + "description": "Resources for logs metadata enrichment (otelcol) statefulset.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "1Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "768Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "500m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for logs metadata enrichment (otelcol) pods.", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to logs sts pods", + "description": "Additional labels for logs metadata enrichment (otelcol) pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to logs sts pods", + "description": "Additional annotations for logs metadata enrichment (otelcol) pods.", + "default": {} + }, + "containers": { + "type": "object", + "comment": "Set securityContext for containers running in pods in logs statefulset.", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "The securityContext configuration for the logs otelcol container.", + "default": {} + }, + "livenessProbe": { + "type": "object", + "description": "Liveness probe settings for the logs otelcol container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "Readiness probe settings for the logs otelcol container.", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "Startup probe configuration for the logs otelcol container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + } + } + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "Additional environment variables for logs metadata enrichment (otelcol) pods.", + "example": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ], + "default": [] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "Additional volumes for logs metadata enrichment (otelcol) pods.", + "example": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ], + "default": [] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "Additional volume mounts for logs metadata enrichment (otelcol) pods.", + "example": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ], + "default": [] + }, + "extraPorts": { + "type": "array", + "commented": true, + "description": "Additional exposed ports in logs metadata enrichment (otelcol) pods and service.", + "example": [ + { + "name": "otlphttp2", + "containerPort": 4319, + "protocol": "TCP" + } + ], + "default": [] + }, + "extraArgs": { + "type": "array", + "commented": true, + "description": "Additional arguments to otelcol container.", + "default": [] + } + } + }, + "autoscaling": { + "type": "object", + "comment": "Option to turn autoscaling on for logs and specify params for HPA.\nAutoscaling needs metrics-server to access cpu metrics.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Option to turn autoscaling on for logs metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics.", + "default": false + }, + "minReplicas": { + "type": "integer", + "description": "Default min replicas for autoscaling.", + "default": 3 + }, + "maxReplicas": { + "type": "integer", + "description": "Default max replicas for autoscaling", + "default": 10 + }, + "targetCPUUtilizationPercentage": { + "type": "integer", + "description": "The desired target CPU utilization for autoscaling.", + "default": 80 + }, + "targetMemoryUtilizationPercentage": { + "type": "integer", + "commented": true, + "description": "The desired target memory utilization for autoscaling.", + "default": 50 + } + } + }, + "podDisruptionBudget": { + "type": "object", + "comment": "Option to specify PodDisrutionBudgets\nYou can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget", + "description": "Pod Disruption Budget for logs metadata enrichment (otelcol) statefulset.", + "properties": { + "minAvailable": { + "type": "integer", + "description": "", + "default": 2 + }, + "maxUnavailable": { + "type": "integer", + "comment": "To use maxUnavailable, set minAvailable to null and uncomment the below:", + "commented": true, + "description": "", + "default": 1 + } + } + } + } + } + } + }, + "tracesGateway": { + "type": "object", + "comment": "Configure traces-gateway\nref: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/opentelemetry-collector/traces.md", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying traces-gateway. [See docs for more information.](/docs/opentelemetry-collector/traces.md)", + "default": true + }, + "autoscaling": { + "type": "object", + "comment": "Option to turn autoscaling on for otelcol and specify params for HPA.\nAutoscaling needs metrics-server to access cpu metrics.", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Option to turn autoscaling on for traces-gateway and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics.", + "default": false + }, + "minReplicas": { + "type": "integer", + "description": "Default min replicas for autoscaling.", + "default": 1 + }, + "maxReplicas": { + "type": "integer", + "description": "Default max replicas for autoscaling", + "default": 10 + }, + "targetCPUUtilizationPercentage": { + "type": "integer", + "description": "The desired target CPU utilization for autoscaling.", + "default": 100 + }, + "targetMemoryUtilizationPercentage": { + "type": "integer", + "commented": true, + "description": "The desired target memory utilization for autoscaling.", + "default": 50 + } + } + }, + "deployment": { + "type": "object", + "description": "", + "properties": { + "replicas": { + "type": "integer", + "description": "Set the number of OpenTelemetry Collector replicas.", + "default": 1 + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for otelcol deployment. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Tolerations for traces-gateway statefulset.", + "default": [] + }, + "resources": { + "type": "object", + "description": "Resources for traces-gateway statefulset.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "2Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "196Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "50m" + } + } + } + } + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to traces-gateway deployment.", + "description": "Additional labels for traces-gateway pods.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to traces-gateway deployment.", + "description": "Additional annotations for traces-gateway pods.", + "default": {} + }, + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "commented": true, + "description": "Image repository for traces-gateway docker container.", + "default": "" + }, + "tag": { + "type": "string", + "commented": true, + "description": "Image tag for traces-gateway docker container.", + "default": "" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for traces-gateway docker container.", + "default": "IfNotPresent" + } + } + }, + "livenessProbe": { + "type": "object", + "description": "Liveness probe settings for the traces-gateway container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "Readiness probe settings for the traces-gateway container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "Startup probe configuration for the traces-gateway container.", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "Additional environment variables for traces-gateway pods.", + "example": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ], + "default": [] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "Additional volumes for traces-gateway pods.", + "example": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ], + "default": [] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "Additional volume mounts for traces-gateway pods.", + "example": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ], + "default": [] + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "Priority class name for OpenTelemetry Collector log pods.", + "default": "" + } + } + }, + "logLevelFilter": { + "type": "boolean", + "comment": "To enable collecting all logs, set to false", + "description": "Do not send traces-gateway logs if `true`.", + "default": false + }, + "config": { + "type": "object", + "description": "Configuration for traces-gateway.", + "properties": { + "receivers": { + "type": "object", + "description": "", + "properties": { + "otlp": { + "type": "object", + "description": "", + "properties": { + "protocols": { + "type": "object", + "description": "", + "properties": { + "grpc": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4317" + } + } + }, + "http": { + "type": "object", + "description": "", + "properties": { + "endpoint": { + "type": "string", + "description": "", + "default": "0.0.0.0:4318" + } + } + } + } + } + } + } + } + }, + "processors": { + "type": "object", + "description": "", + "properties": { + "memory_limiter": { + "type": "object", + "comment": "The memory_limiter processor is used to prevent out of memory situations on the collector.", + "description": "", + "properties": { + "check_interval": { + "type": "string", + "comment": "check_interval is the time between measurements of memory usage for the\npurposes of avoiding going over the limits. Defaults to zero, so no\nchecks will be performed. Values below 1 second are not recommended since\nit can result in unnecessary CPU consumption.", + "description": "", + "default": "5s" + }, + "limit_percentage": { + "type": "integer", + "comment": "Maximum amount of memory, in %, targeted to be allocated by the process heap.\nNote that typically the total memory usage of process will be about 50MiB higher\nthan this value.", + "description": "", + "default": 75 + }, + "spike_limit_percentage": { + "type": "integer", + "comment": "Maximum spike expected between the measurements of memory usage, in %.", + "description": "", + "default": 20 + } + } + }, + "batch": { + "type": "object", + "comment": "The batch processor accepts spans and places them into batches grouped by node and resource", + "description": "", + "properties": { + "send_batch_size": { + "type": "integer", + "comment": "Number of spans after which a batch will be sent regardless of time", + "description": "", + "default": 256 + }, + "send_batch_max_size": { + "type": "integer", + "comment": "Maximum number of spans sent at once", + "description": "", + "default": 512 + }, + "timeout": { + "type": "string", + "comment": "Time duration after which a batch will be sent regardless of size", + "description": "", + "default": "5s" + } + } + } + } + }, + "extensions": { + "type": "object", + "description": "", + "properties": { + "health_check": { + "type": "object", + "description": "", + "default": {} + }, + "memory_ballast": { + "type": "object", + "description": "", + "properties": { + "size_mib": { + "type": "integer", + "comment": "Memory Ballast size should be max 1/3 to 1/2 of memory.", + "description": "", + "default": 250 + } + } + }, + "pprof": { + "type": "object", + "description": "", + "default": {} + } + } + }, + "exporters": { + "type": "object", + "description": "", + "properties": { + "loadbalancing": { + "type": "object", + "description": "", + "properties": { + "protocol": { + "type": "object", + "description": "", + "properties": { + "otlp": { + "type": "object", + "description": "", + "properties": { + "timeout": { + "type": "string", + "description": "", + "default": "10s" + }, + "tls": { + "type": "object", + "description": "", + "properties": { + "insecure": { + "type": "boolean", + "description": "", + "default": true + } + } + } + } + } + } + }, + "resolver": { + "type": "object", + "description": "", + "properties": { + "dns": { + "type": "object", + "description": "", + "properties": { + "hostname": { + "type": "string", + "description": "", + "default": "{{ include \"tracesgateway.exporter.loadbalancing.endpoint\" . }}" + }, + "port": { + "type": "integer", + "description": "", + "default": 4317 + } + } + } + } + } + } + } + } + }, + "service": { + "type": "object", + "description": "", + "properties": { + "extensions": { + "type": "array", + "description": "", + "default": [ + "health_check", + "memory_ballast", + "pprof" + ] + }, + "pipelines": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "receivers": { + "type": "array", + "description": "", + "default": [ + "otlp" + ] + }, + "processors": { + "type": "array", + "description": "", + "default": [ + "memory_limiter", + "batch" + ] + }, + "exporters": { + "type": "array", + "description": "", + "default": [ + "loadbalancing" + ] + } + } + } + } + } + } + } + } + } + } + }, + "otelevents": { + "type": "object", + "comment": "Configuration of the OpenTelemetry Collector that collects Kubernetes events.\nSee https://github.com/SumoLogic/sumologic-kubernetes-collection/deploy/docs/collecting-kubernetes-events.md.", + "description": "", + "properties": { + "image": { + "type": "object", + "comment": "Configure image for Opentelemetry Collector", + "description": "", + "properties": { + "repository": { + "type": "string", + "commented": true, + "description": "Image repository for otelcol docker container.", + "default": "" + }, + "tag": { + "type": "string", + "commented": true, + "description": "Image tag for otelcol docker container.", + "default": "" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for otelcol docker container.", + "default": "IfNotPresent" + } + } + }, + "logLevel": { + "type": "string", + "description": "Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`.", + "default": "info" + }, + "config": { + "type": "object", + "comment": "Customize the Opentelemetry Collector configuration beyond the exposed options", + "description": "", + "properties": { + "merge": { + "type": "object", + "comment": "Directly alter the OT configuration. The value of this key should be a dictionary, that will\nbe directly merged with the generated configuration, overriding existing values.\nFor example:\noverride:\nprocessors:\nbatch:\nsend_batch_size: 512\nwill change the batch size of the pipeline.\nWARNING: This field is not subject to backwards-compatibility guarantees offered by the rest\nof this chart. It involves implementation details that may change even in minor versions.\nUse with caution, and consider opening an issue, so your customization can be added in a safer way.", + "description": "Configuration for events otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + }, + "override": { + "type": "object", + "comment": "Completely override existing config and replace it with the contents of this value.\nThe value of this key should be a dictionary, that will replace the normal configuration.\nThis is an advanced feature, use with caution, and review the generated configuration first.", + "description": "Configuration for events otelcol, replaces defaults.See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {} + } + } + }, + "statefulset": { + "type": "object", + "description": "OpenTelemetry Collector StatefulSet customization options. See values.yaml for more details.", + "properties": { + "nodeSelector": { + "type": "object", + "description": "", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "", + "default": [] + }, + "topologySpreadConstraints": { + "type": "array", + "description": "", + "default": [] + }, + "affinity": { + "type": "object", + "description": "", + "default": {} + }, + "podAntiAffinity": { + "type": "string", + "comment": "Acceptable values for podAntiAffinity:\nsoft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default)\nhard: specifies rules that must be met for a pod to be scheduled onto a node", + "description": "", + "default": "soft" + }, + "resources": { + "type": "object", + "description": "", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "2Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "500Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "200m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to events sts pods", + "description": "", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to events sts pods", + "description": "", + "default": {} + }, + "securityContext": { + "type": "object", + "comment": "The group ID of all processes in the statefulset containers. This can be anything, but it does need to be set.\nThe default is 0 (root), and containers don't have write permissions for volumes in that case.", + "description": "", + "properties": { + "fsGroup": { + "type": "integer", + "description": "", + "default": 999 + } + } + }, + "containers": { + "type": "object", + "comment": "Set securityContext for containers running in pods in events statefulset.", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "", + "default": {} + }, + "livenessProbe": { + "type": "object", + "description": "", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + } + } + } + } + }, + "extraEnvVars": { + "type": "array", + "comment": "Extra Environment Values - allows yaml definitions", + "commented": true, + "description": "", + "example": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ], + "default": [] + }, + "extraVolumes": { + "type": "array", + "commented": true, + "description": "", + "example": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ], + "default": [] + }, + "extraVolumeMounts": { + "type": "array", + "commented": true, + "description": "", + "example": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ], + "default": [] + } + } + } + } + }, + "otelcloudwatch": { + "type": "object", + "comment": "Configure cloudwatch collection with Otelcol", + "description": "", + "properties": { + "statefulset": { + "type": "object", + "description": "OpenTelemetry Cloudwatch Collector statefulset customization options. See [values.yaml] for more details.", + "properties": { + "nodeSelector": { + "type": "object", + "description": "", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "", + "default": [] + }, + "topologySpreadConstraints": { + "type": "array", + "description": "", + "default": [] + }, + "affinity": { + "type": "object", + "description": "", + "default": {} + }, + "podAntiAffinity": { + "type": "string", + "comment": "Acceptable values for podAntiAffinity:\nsoft: specifies preferences that the scheduler will try to enforce but will not guarantee (Default)\nhard: specifies rules that must be met for a pod to be scheduled onto a node", + "description": "", + "default": "soft" + }, + "replicaCount": { + "type": "integer", + "description": "", + "default": 1 + }, + "resources": { + "type": "object", + "description": "", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "1Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "768Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "500m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "comment": "Option to define priorityClassName to assign a priority class to pods.", + "description": "", + "default": "" + }, + "podLabels": { + "type": "object", + "comment": "Add custom labels only to logs otel sts pods", + "description": "", + "default": {} + }, + "podAnnotations": { + "type": "object", + "comment": "Add custom annotations only to logs otel sts pods", + "description": "", + "default": {} + }, + "containers": { + "type": "object", + "comment": "Set securityContext for containers running in pods in otelcol-instrumentation statefulset.", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "", + "default": {} + }, + "livenessProbe": { + "type": "object", + "description": "", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 15 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "readinessProbe": { + "type": "object", + "description": "", + "properties": { + "initialDelaySeconds": { + "type": "integer", + "description": "", + "default": 5 + }, + "periodSeconds": { + "type": "integer", + "description": "", + "default": 10 + }, + "timeoutSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 3 + } + } + }, + "startupProbe": { + "type": "object", + "description": "", + "properties": { + "periodSeconds": { + "type": "integer", + "description": "", + "default": 3 + }, + "failureThreshold": { + "type": "integer", + "description": "", + "default": 60 + } + } + } + } + } + } + } + } + } + } + }, + "otellogs": { + "comment": "Configure log collection with Otelcol", + "type": "object", + "description": "", + "properties": { + "metrics": { + "comment": "Metrics from Collector", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable OpenTelemetry Collector metrics", + "default": true + } + } + }, + "serviceLabels": { + "type": "object", + "description": "Add custom labels to OpenTelemetry Collector Service", + "default": {}, + "comment": "Add custom labels to otelcol svc" + }, + "image": { + "comment": "Configure image for Opentelemetry Collector", + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "Image repository for otelcol docker container.", + "default": "", + "commented": true + }, + "tag": { + "type": "string", + "description": "Image tag for otelcol docker container.", + "default": "", + "commented": true + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for otelcol docker container.", + "default": "IfNotPresent" + } + } + }, + "logLevel": { + "type": "string", + "description": "Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`.", + "default": "info" + }, + "config": { + "type": "object", + "description": "", + "properties": { + "merge": { + "type": "object", + "description": "Configuration for log collector otelcol, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {}, + "comment": "Directly alter the OT configuration. The value of this key should be a dictionary, that will\nbe directly merged with the generated configuration, overriding existing values.\nFor example:\noverride:\nprocessors:\nbatch:\nsend_batch_size: 512\nwill change the batch size of the pipeline.\nWARNING: This field is not subject to backwards-compatibility guarantees offered by the rest\nof this chart. It involves implementation details that may change even in minor versions.\nUse with caution, and consider opening an issue, so your customization can be added in a safer way." + }, + "override": { + "type": "object", + "description": "Configuration for log collector otelcol, replaces defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md.", + "default": {}, + "comment": "Completely override existing config and replace it with the contents of this value.\nThe value of this key should be a dictionary, that will replace the normal configuration.\nThis is an advanced feature, use with caution, and review the generated configuration first." + } + } + }, + "daemonset": { + "type": "object", + "description": "OpenTelemetry Collector Daemonset customization options. See [values.yaml] for more details.", + "comment": "Set securityContext for containers running in pods in log collector daemonset", + "properties": { + "securityContext": { + "type": "object", + "description": "", + "properties": { + "fsGroup": { + "comment": "In order to reliably read logs from mounted node logging paths, we need to run as root", + "type": "integer", + "description": "", + "default": 0 + }, + "runAsUser": { + "type": "integer", + "description": "", + "default": 0 + }, + "runAsGroup": { + "type": "integer", + "description": "", + "default": 0 + } + } + }, + "labels": { + "comment": "Add custom labels to the otelcol daemonset", + "type": "object", + "description": "", + "default": {} + }, + "annotations": { + "comment": "Add custom annotations to the otelcol daemonset", + "type": "object", + "description": "", + "default": {} + }, + "podLabels": { + "comment": "Add custom labels to all otelcol daemonset pods", + "type": "object", + "description": "", + "default": {} + }, + "podAnnotations": { + "comment": "Add custom annotations to all otelcol daemonset pods", + "type": "object", + "description": "", + "default": {} + }, + "resources": { + "type": "object", + "description": "", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "1Gi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "1000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "32Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "100m" + } + } + } + } + }, + "priorityClassName": { + "type": "string", + "description": "", + "default": "", + "comment": "Option to define priorityClassName to assign a priority class to pods.\nIf not set then temaplates/priorityclass.yaml is used." + }, + "containers": { + "comment": "Set securityContext for containers running in pods in log collector daemonset", + "type": "object", + "description": "", + "properties": { + "otelcol": { + "type": "object", + "description": "", + "properties": { + "securityContext": { + "type": "object", + "description": "", + "properties": { + "capabilities": { + "type": "object", + "description": "", + "properties": { + "drop": { + "type": "array", + "description": "", + "default": [ + "ALL" + ] + } + } + } + } + } + } + } + } + }, + "initContainers": { + "comment": "Set securityContext and image for initContainers running in pods in log collector daemonset", + "type": "object", + "description": "", + "properties": { + "changeowner": { + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "", + "default": "public.ecr.aws/docker/library/busybox" + }, + "tag": { + "type": "string", + "description": "", + "default": "1.36.0" + }, + "pullPolicy": { + "type": "string", + "description": "", + "default": "IfNotPresent" + } + } + }, + "securityContext": { + "type": "object", + "description": "", + "properties": { + "capabilities": { + "type": "object", + "description": "", + "properties": { + "drop": { + "type": "array", + "description": "", + "default": [ + "ALL" + ] + }, + "add": { + "type": "array", + "description": "", + "default": [ + "CAP_CHOWN" + ] + } + } + } + } + } + } + } + } + }, + "nodeSelector": { + "type": "object", + "description": "", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "", + "default": [] + }, + "affinity": { + "type": "object", + "description": "", + "default": {} + }, + "extraEnvVars": { + "type": "array", + "description": "", + "commented": true, + "comment": "Extra Environment Values - allows yaml definitions", + "example": [ + { + "name": "VALUE_FROM_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": "secret_name", + "key": "secret_key" + } + } + } + ], + "default": [] + }, + "extraVolumes": { + "type": "array", + "description": "", + "commented": true, + "example": [ + { + "name": "es-certs", + "secret": { + "defaultMode": 420, + "secretName": "es-certs" + } + } + ], + "default": [] + }, + "extraVolumeMounts": { + "type": "array", + "description": "", + "commented": true, + "example": [ + { + "name": "es-certs", + "mountPath": "/certs", + "readOnly": true + } + ], + "default": [] + } + } + }, + "additionalDaemonSets": { + "type": "object", + "description": "OpenTelemetry Collector Daemonset per node customization options. See [Best Practices](/docs/best-practices.md#setting-different-resources-on-different-nodes-for-logs-collector).", + "default": {}, + "comment": "additionalDaemonSets allows to set daemonsets with affinity, nodeSelector and resources\ndifferent than the main DaemonSet\nBe careful and set nodeAffinity for the main DaemonSet,\nas we do not support multiple pods of otellogs on the same node\ne.g:\nadditionalDaemonSets:\nlinux:\nnodeSelector:\nkubernetes.io/os: linux\nresources:\nlimits:\nmemory: 1Gi\ncpu: 6\nrequests:\nmemory: 32Mi\ncpu: 2\ndaemonset:\naffinity:\nnodeAffinity:\nrequiredDuringSchedulingIgnoredDuringExecution:\nnodeSelectorTerms:\n- matchExpressions:\n- key: kubernetes.io/os\noperator: NotIn\nvalues:\n- linux" + } + } + }, + "telegraf-operator": { + "type": "object", + "comment": "Configure telegraf-operator\nref: https://github.com/influxdata/helm-charts/blob/master/charts/telegraf-operator/values.yaml", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying Telegraf Operator Helm sub-chart.", + "default": false + }, + "fullnameOverride": { + "type": "string", + "description": "Used to override the chart's full name.", + "default": "", + "comment": "Put here the new name if you want to override the full name used for Telegraf Operator components.", + "commented": true + }, + "image": { + "type": "object", + "description": "", + "properties": { + "sidecarImage": { + "type": "string", + "description": "Telegraf Operator sidecar image.", + "default": "public.ecr.aws/sumologic/telegraf:1.21.2" + } + } + }, + "replicaCount": { + "type": "integer", + "description": "Replica count for Telegraf Operator pods.", + "default": 1 + }, + "classes": { + "type": "object", + "description": "", + "properties": { + "secretName": { + "type": "string", + "description": "Secret name in which the Telegraf Operator configuration will be stored.", + "default": "telegraf-operator-classes" + }, + "default": { + "type": "string", + "description": "Name of the default output configuration.", + "default": "sumologic-prometheus" + }, + "data": { + "type": "object", + "description": "Telegraf sidecar configuration.", + "properties": { + "sumologic-prometheus": { + "type": "string", + "description": "", + "default": "[[outputs.prometheus_client]]\n ## Configuration details:\n ## https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client#configuration\n listen = \":9273\"\n metric_version = 2\n ## Disable the default collectors\n collectors_exclude = [\"gocollector\", \"process\"]\n ## Telegraf operator adds the internal plugin by default, and the Helm Chart doesn't let us disable it\n ## Instead, drop the metrics at the output\n namedrop = [\"internal*\"]\n" + } + } + } + } + }, + "imagePullSecrets": { + "type": "array", + "description": "Pull secrets for Telegraf Operator images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config).", + "default": [], + "commented": true + } + } + }, + "falco": { + "type": "object", + "comment": "Configure Falco\nPlease note that Falco is embedded in this Helm Chart for user convenience only - Sumo Logic does not provide production support for it\nThis is an experimental configuration and shouldn't be used in production environment\nhttps://github.com/falcosecurity/charts/tree/master/falco", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying Falco Helm sub-chart.", + "default": false + }, + "fullnameOverride": { + "type": "string", + "description": "Used to override the chart's full name.", + "default": "", + "commented": true, + "comment": "Put here the new name if you want to override the full name used for Falco components." + }, + "imagePullSecrets": { + "type": "array", + "description": "Pull secrets for falco images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config).", + "default": [], + "commented": true + }, + "image": { + "type": "object", + "description": "", + "properties": { + "registry": { + "type": "string", + "description": "Image registry for falco docker container.", + "default": "public.ecr.aws" + }, + "repository": { + "type": "string", + "description": "Image repository for falco docker container.", + "default": "falcosecurity/falco-no-driver", + "commented": true + } + } + }, + "addKernelDevel": { + "comment": "Add kernel-devel package through MachineConfig, required to enable building of missing falco modules (only for OpenShift)", + "type": "boolean", + "description": "Flag to control installation of `kernel-devel` on nodes using MachineConfig, required to build falco modules (only for OpenShift)", + "default": true + }, + "extra": { + "type": "object", + "description": "", + "properties": { + "initContainers": { + "comment": "Add initContainer to wait until kernel-devel is installed on host", + "type": "array", + "description": "InitContainers for Falco pod", + "default": [ + { + "name": "init-falco", + "image": "public.ecr.aws/docker/library/busybox:1.36.0", + "command": [ + "sh", + "-c", + "while [ -f /host/etc/redhat-release ] && [ -z \"$(ls /host/usr/src/kernels)\" ] ; do\necho \"waiting for kernel headers to be installed\"\nsleep 3\ndone\n" + ], + "volumeMounts": [ + { + "mountPath": "/host/usr", + "name": "usr-fs", + "readOnly": true + }, + { + "mountPath": "/host/etc", + "name": "etc-fs", + "readOnly": true + } + ] + } + ] + } + } + }, + "driver": { + "type": "object", + "description": "", + "properties": { + "kind": { + "type": "string", + "description": "Tell Falco which driver to use. Available options: module (kernel driver) and ebpf (eBPF probe). Set to `ebpf` for GKE", + "default": "module", + "comment": "Set to epbf to enable eBPF support for Falco instead of falco-probe kernel module.\nhttps://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/docs/troubleshoot-collection.md#falco-and-google-kubernetes-engine-gke" + }, + "loader": { + "type": "object", + "description": "", + "properties": { + "initContainer": { + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "description": "Init container image configuration for falco driver loader.", + "properties": { + "registry": { + "type": "string", + "description": "", + "default": "public.ecr.aws" + }, + "repository": { + "type": "string", + "description": "", + "default": "falcosecurity/falco-driver-loader", + "commented": true + } + } + } + } + } + } + } + } + }, + "falco": { + "type": "object", + "description": "", + "properties": { + "load_plugins": { + "type": "array", + "description": "Names of the plugins to be loaded by Falco.", + "default": [ + "json", + "k8saudit" + ] + }, + "json_output": { + "type": "boolean", + "description": "Output events in json.", + "default": true + }, + "rules_file": { + "comment": "The location of the rules file(s). This can contain one or more paths to\nseparate rules files.\nExplicitly add missing /etc/falco/rules.available/application_rules.yaml\nbefore https://github.com/falcosecurity/charts/issues/230 gets resolved.", + "type": "array", + "description": "The location of the rules files that will be consumed by Falco.", + "default": [ + "/etc/falco/falco_rules.yaml", + "/etc/falco/falco_rules.local.yaml", + "/etc/falco/k8s_audit_rules.yaml", + "/etc/falco/rules.d", + "/etc/falco/rules.available/application_rules.yaml" + ] + } + } + }, + "falcoctl": { + "type": "object", + "description": "Falcoctl configuration. We don't use it for now due to breaking changes. [See this issue](https://github.com/SumoLogic/sumologic-kubernetes-collection/issues/3144).", + "properties": { + "artifact": { + "type": "object", + "description": "", + "properties": { + "follow": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "", + "default": false + } + } + }, + "install": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "", + "default": false + } + } + } + } + } + } + }, + "customRules": { + "type": "object", + "description": "Additional falco rules related to Sumo Logic Kubernetes Collection", + "properties": { + "rules_user_known_k8s_api_callers.yaml": { + "comment": "Mark the following as known k8s api callers:\n* prometheus\n* prometheus operator\n* telegraf operator\n* grafana sidecar", + "type": "string", + "description": "", + "default": "- macro: user_known_contact_k8s_api_server_activities\n condition: >\n (container.image.repository = \"quay.io/prometheus/prometheus\") or\n (container.image.repository = \"quay.io/coreos/prometheus-operator\") or\n (container.image.repository = \"quay.io/influxdb/telegraf-operator\") or\n (container.image.repository = \"kiwigrid/k8s-sidecar\")" + }, + "rules_user_sensitive_mount_containers.yaml": { + "type": "string", + "description": "", + "default": "- macro: user_sensitive_mount_containers\n condition: >\n (container.image.repository = \"falcosecurity/falco\") or\n (container.image.repository = \"quay.io/prometheus/node-exporter\")" + }, + "rules_user_privileged_containers.yaml": { + "comment": "NOTE: kube-proxy not exact matching because of regional ecr e.g.\n602401143452.dkr.ecr.us-west-1.amazonaws.com/eks/kube-proxy", + "type": "string", + "description": "", + "default": "- macro: user_privileged_containers\n condition: >\n (container.image.repository endswith \".amazonaws.com/eks/kube-proxy\")" + } + } + } + } + }, + "tailing-sidecar-operator": { + "comment": "Configure Tailing Sidecar Operator\nref: https://github.com/SumoLogic/tailing-sidecar/blob/main/helm/tailing-sidecar-operator/values.yaml", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying Tailing Sidecar Operator Helm sub-chart.", + "default": false + }, + "fullnameOverride": { + "type": "string", + "description": "Used to override the chart's full name.", + "default": "", + "commented": true, + "comment": "Put here the new name if you want to override the full name used for tailing-sidecar-operator components." + }, + "scc": { + "comment": "creation of Security Context Constraints in Openshift", + "type": "object", + "description": "", + "properties": { + "create": { + "type": "boolean", + "description": "Create OpenShift's Security Context Constraint", + "default": false + } + } + } + } + }, + "opentelemetry-operator": { + "comment": "Configure OpenTelemetry Operator - Instrumentation\nref: https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-operator", + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control deploying OpenTelemetry Operator Helm sub-chart.", + "default": true + }, + "instrumentationJobImage": { + "comment": "Specific for Sumo Logic chart - Instrumentation resource creation", + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "Name of the image repository used to apply Instrumentation resource", + "default": "sumologic/kubernetes-tools" + }, + "tag": { + "type": "string", + "description": "Name of the image tag used to apply Instrumentation resource", + "default": "2.14.0" + } + } + } + } + }, + "createDefaultInstrumentation": { + "type": "boolean", + "description": "Flag to control creation of default Instrumentation object", + "default": false + }, + "instrumentationNamespaces": { + "type": "string", + "description": "Used to create `Instrumentation` resources in specified namespaces.", + "default": "" + }, + "instrumentation": { + "comment": "Current instrumentation doesn't support customization\nfor nodejs. Traces are always enabled.\nnodejs:\ntraces:\nenabled: true", + "type": "object", + "description": "", + "properties": { + "dotnet": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control traces export from DotNet instrumentation in `Instrumentation` resource.", + "default": true + } + } + }, + "metrics": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control metrics export from DotNet instrumentation in `Instrumentation` resource.", + "default": true + } + } + } + } + }, + "java": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control traces export from Java instrumentation in `Instrumentation` resource.", + "default": true + } + } + }, + "metrics": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control metrics export from Java instrumentation in `Instrumentation` resource.", + "default": true + } + } + } + } + }, + "python": { + "type": "object", + "description": "", + "properties": { + "traces": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control traces export from Python instrumentation in `Instrumentation` resource.", + "default": true + } + } + }, + "metrics": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to control metrics export from Python instrumentation in `Instrumentation` resource.", + "default": true + } + } + } + } + } + } + }, + "admissionWebhooks": { + "comment": "Specific for OpenTelemetry Operator chart values", + "type": "object", + "description": "Admission webhooks make sure only requests with correctly formatted rules will get into the Operator. They also enable the sidecar injection for OpenTelemetryCollector and Instrumentation CR's.", + "properties": { + "failurePolicy": { + "type": "string", + "description": "", + "default": "Fail" + }, + "enabled": { + "type": "boolean", + "description": "", + "default": true + }, + "objectSelector": { + "comment": "skip admission webhook on our own OpenTelemetryCollector object to avoid having to wait for operator to start", + "type": "object", + "description": "", + "properties": { + "matchExpressions": { + "type": "array", + "description": "", + "default": [ + { + "key": "sumologic.com/component", + "operator": "NotIn", + "values": [ + "metrics" + ] + } + ] + } + } + }, + "certManager": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "", + "default": false + }, + "issuerRef": { + "type": "object", + "description": "", + "default": {} + } + } + }, + "autoGenerateCert": { + "type": "boolean", + "description": "", + "default": true + } + } + }, + "manager": { + "type": "object", + "description": "", + "properties": { + "collectorImage": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "The default collector image repository for OpenTelemetryCollector CRDs.", + "default": "public.ecr.aws/sumologic/sumologic-otel-collector" + }, + "tag": { + "type": "string", + "description": "The default collector image tag for OpenTelemetryCollector CRDs.", + "default": "0.86.0-sumo-1" + } + } + }, + "env": { + "type": "object", + "description": "Additional environment variables for opentelemetry-operator helm chart.", + "default": {} + }, + "resources": { + "type": "object", + "description": "", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "Used to set limit CPU for OpenTelemetry-Operator Manager.", + "default": "250m" + }, + "memory": { + "type": "string", + "description": "Used to set limit Memory for OpenTelemetry-Operator Manager.", + "default": "512Mi" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "cpu": { + "type": "string", + "description": "Used to set requested CPU for OpenTelemetry-Operator Manager.", + "default": "150m" + }, + "memory": { + "type": "string", + "description": "Used to set requested Memory for OpenTelemetry-Operator Manager.", + "default": "256Mi" + } + } + } + } + } + } + } + } + }, + "pvcCleaner": { + "type": "object", + "description": "", + "comment": "pvcCleaner deletes unused PVCs", + "properties": { + "metrics": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to enable cleaning unused PVCs for otelcol metrics statefulsets.", + "default": false + } + } + }, + "logs": { + "type": "object", + "description": "", + "properties": { + "enabled": { + "type": "boolean", + "description": "Flag to enable cleaning unused PVCs for otelcol logs statefulsets.", + "default": false + } + } + }, + "job": { + "type": "object", + "description": "", + "properties": { + "image": { + "type": "object", + "description": "", + "properties": { + "repository": { + "type": "string", + "description": "Image repository for pvcCleaner docker containers.", + "default": "public.ecr.aws/sumologic/kubernetes-tools-kubectl" + }, + "tag": { + "type": "string", + "description": "Image tag for pvcCleaner docker containers.", + "default": "2.20.0" + }, + "pullPolicy": { + "type": "string", + "description": "Image pullPolicy for pvcCleaner docker containers.", + "default": "IfNotPresent" + } + } + }, + "resources": { + "type": "object", + "description": "Resource requests and limits for the pvcCleaner containers.", + "properties": { + "limits": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "256Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "2000m" + } + } + }, + "requests": { + "type": "object", + "description": "", + "properties": { + "memory": { + "type": "string", + "description": "", + "default": "64Mi" + }, + "cpu": { + "type": "string", + "description": "", + "default": "100m" + } + } + } + } + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for pvcCleaner job. [See docs/best-practices.md for more information.](/docs/best-practices.md)", + "default": {} + }, + "tolerations": { + "type": "array", + "description": "Add tolerations for the pvcCleaner job.", + "default": [], + "comment": "Node tolerations for server scheduling to nodes with taints\nRef: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n" + }, + "affinity": { + "type": "object", + "description": "Add affinity and anti-affinity for the pvcCleaner job.", + "default": {}, + "comment": "Affinity and anti-affinity\nRef: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\n" + }, + "podLabels": { + "type": "object", + "description": "Additional labels for the pvcCleaner container.", + "default": {} + }, + "podAnnotations": { + "type": "object", + "description": "Additional annotations for for the pvcCleaner container.", + "default": {}, + "comment": "Add custom annotations" + }, + "schedule": { + "type": "string", + "description": "Schedule for cronJobs", + "default": "*/15 * * * *", + "comment": "Schedule for cronJobs" + }, + "securityContext": { + "type": "object", + "description": "The securityContext configuration for the pvcCleaner.", + "comment": "securityContext for pvcCleaner pods", + "properties": { + "runAsUser": { + "type": "integer", + "description": "", + "default": 1000 + } + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/deploy/helm/sumologic/values.yaml b/deploy/helm/sumologic/values.yaml index b33163f058..1bd7c0e811 100644 --- a/deploy/helm/sumologic/values.yaml +++ b/deploy/helm/sumologic/values.yaml @@ -2294,7 +2294,7 @@ telegraf-operator: metric_version = 2 ## Disable the default collectors collectors_exclude = ["gocollector", "process"] - ## Telegraf operator adds the internal plugin by default, and the Helm Chart doesn't let us disable it + ## Telegraf operator adds the internal plugin by default, and the Helm Chart doesnt let us disable it ## Instead, drop the metrics at the output namedrop = ["internal*"] # imagePullSecrets: [] diff --git a/vagrant/scripts/diff_values.py b/vagrant/scripts/diff_values.py index d375dcc4eb..f562d5ca45 100755 --- a/vagrant/scripts/diff_values.py +++ b/vagrant/scripts/diff_values.py @@ -1,84 +1,101 @@ #!/usr/bin/env python3 import argparse +from http import client, HTTPStatus from yaml import load, dump, Loader -import http.client -from http import HTTPStatus -REPO='SumoLogic/sumologic-kubernetes-collection' -HOST='github.com' -RAW_HOST='raw.githubusercontent.com' -FILE='deploy/helm/sumologic/values.yaml' -AGENT='Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0' +REPO = "SumoLogic/sumologic-kubernetes-collection" +HOST = "github.com" +RAW_HOST = "raw.githubusercontent.com" +FILE = "deploy/helm/sumologic/values.yaml" +AGENT = "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0" + def main(): - parser = argparse.ArgumentParser( - prog='SKC values diff', - description='Return customer overrides over default values.yaml') + parser = argparse.ArgumentParser( + prog="SKC values diff", + description="Return customer overrides over default values.yaml", + ) + + parser.add_argument("filename") # positional argument + parser.add_argument("-v", "--version") # on/off flag + + args = parser.parse_args() + default_values = load(get_values(args.version), Loader) + with open(args.filename, encoding="utf-8") as file: + values = load(file.read(), Loader) - parser.add_argument('filename') # positional argument - parser.add_argument('-v', '--version') # on/off flag + print(dump(remove_duplicates(values, default_values))) - args = parser.parse_args() - default_values = load(get_values(args.version), Loader) - with open(args.filename) as f: - values = load(f.read(), Loader) - - print(dump(remove_duplicates(values, default_values))) +def remove_duplicates(override, defaults): + if isinstance(override, type(defaults)): + return override -def remove_duplicates(obj1, obj2): - if type(obj1) != type(obj2): - return obj1 + if isinstance(override, dict): + to_remove = [] + for key, value in override.items(): + # If values are the same, mark to remove + # '' is not None so we need to compare negations of them + if defaults.get(key) == value or (not defaults.get(key) and not value): + to_remove.append(key) + continue - if isinstance(obj1, dict): - to_remove = [] - for key, value in obj1.items(): - if obj2.get(key) == value: - to_remove.append(key) - continue + # values are different, we need to go deeper + override[key] = remove_duplicates(value, defaults.get(key)) - obj1[key] = remove_duplicates(value, obj2.get(key)) + # no differences + if override[key] in ({}, []): + to_remove.append(key) - if obj1[key] == {}: - to_remove.append(key) + # Remove keys marked to remove + for key in to_remove: + del override[key] + elif isinstance(override, list): + # different length means that list has been overrided + if len(override) != len(defaults): + return override - for key in to_remove: - del obj1[key] - elif isinstance(obj1, list): - to_remove = [] - for key, value in enumerate(obj1): - if key < len(obj2) and obj2[key] == value: - to_remove.append(key) - continue + # if any value differs, return object + for key, value in enumerate(override): + if remove_duplicates(defaults[key], value): + return override - if key < len(obj2): - obj1[key] = remove_duplicates(value, obj2[key]) - - to_remove.reverse() - for key in to_remove: - del obj1[key] + to_remove = list(range(0, len(override))) + to_remove.reverse() + for key in to_remove: + del override[key] - return obj1 + return override def get_values(version: str): - if version is None: - conn = http.client.HTTPSConnection(HOST) - conn.request('GET', f'/{REPO}/releases/latest', headers={'Host': HOST, 'User-Agent': AGENT}) + if version is None: + conn = client.HTTPSConnection(HOST) + conn.request( + "GET", + f"/{REPO}/releases/latest", + headers={"Host": HOST, "User-Agent": AGENT}, + ) + response = conn.getresponse() + if response.status != HTTPStatus.FOUND: + raise Exception(f"Unexpected response status {response.status}") + version = response.headers["Location"].removeprefix( + f"https://{HOST}/{REPO}/releases/tag/" + ) + + conn = client.HTTPSConnection(RAW_HOST) + conn.request( + "GET", + f"/{REPO}/{version}/{FILE}", + headers={"Host": RAW_HOST, "User-Agent": AGENT}, + ) response = conn.getresponse() - if response.status != HTTPStatus.FOUND: - raise Exception(f'Unexpected response status {response.status}') - version = response.headers['Location'].removeprefix(f'https://{HOST}/{REPO}/releases/tag/') - - conn = http.client.HTTPSConnection(RAW_HOST) - conn.request('GET', f'/{REPO}/{version}/{FILE}', headers={'Host': RAW_HOST, 'User-Agent': AGENT}) - response = conn.getresponse() - if response.status != HTTPStatus.OK: - raise Exception(f'Unexpected response status {response.status}') - - return response.read() - - -if __name__ == '__main__': - main() + if response.status != HTTPStatus.OK: + raise Exception(f"Unexpected response status {response.status}") + + return response.read() + + +if __name__ == "__main__": + main() diff --git a/vagrant/scripts/test_diff_values.py b/vagrant/scripts/test_diff_values.py new file mode 100755 index 0000000000..2dde1e879f --- /dev/null +++ b/vagrant/scripts/test_diff_values.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 + +import unittest +from diff_values import remove_duplicates + + +class Case: + def __init__(self, default, override, expected): + self.default = default + self.override = override + self.expected = expected + + +class TestDump(unittest.TestCase): + cases = [ + Case({"a": ["container", "uid"]}, {"a": ["container"]}, {"a": ["container"]}), + Case( + {"a": ["container"]}, + {"a": ["container", "uid"]}, + {"a": ["container", "uid"]}, + ), + Case( + {"a": ["container", "pod"]}, + {"a": ["container", "uid"]}, + {"a": ["container", "uid"]}, + ), + Case({"a": ["container", "pod"]}, {"a": ["container", "pod"]}, {}), + Case({"a": [{"b": ""}]}, {"a": [{"b": None}]}, {}), + ] + + def test(self): + self.maxDiff = None + for case in self.cases: + self.assertEqual( + case.expected, remove_duplicates(case.override, case.default) + ) + + +if __name__ == "__main__": + unittest.main()